1 /* -*- Mode: C++; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 4 -*- */
2 /* vi: set ts=4 sw=4 expandtab: (add to ~/.vimrc: set modeline modelines=5) */
3 /* ***** BEGIN LICENSE BLOCK *****
4 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
6 * The contents of this file are subject to the Mozilla Public License Version
7 * 1.1 (the "License"); you may not use this file except in compliance with
8 * the License. You may obtain a copy of the License at
9 * http://www.mozilla.org/MPL/
11 * Software distributed under the License is distributed on an "AS IS" basis,
12 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
13 * for the specific language governing rights and limitations under the
16 * The Original Code is [Open Source Virtual Machine.].
18 * The Initial Developer of the Original Code is
19 * Adobe System Incorporated.
20 * Portions created by the Initial Developer are Copyright (C) 2004-2006
21 * the Initial Developer. All Rights Reserved.
26 * Alternatively, the contents of this file may be used under the terms of
27 * either the GNU General Public License Version 2 or later (the "GPL"), or
28 * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
29 * in which case the provisions of the GPL or the LGPL are applicable instead
30 * of those above. If you wish to allow use of your version of this file only
31 * under the terms of either the GPL or the LGPL, and not to allow others to
32 * use your version of this file under the terms of the MPL, indicate your
33 * decision by deleting the provisions above and replace them with the notice
34 * and other provisions required by the GPL or the LGPL. If you do not delete
35 * the provisions above, a recipient may use your version of this file under
36 * the terms of any one of the MPL, the GPL or the LGPL.
38 * ***** END LICENSE BLOCK ***** */
44 GCLargeAlloc::GCLargeAlloc(GC
* gc
) : m_gc(gc
)
47 m_startedFinalize
= false;
48 #ifdef MMGC_MEMORY_PROFILER
53 #if defined DEBUG || defined MMGC_MEMORY_PROFILER
54 void* GCLargeAlloc::Alloc(size_t originalSize
, size_t requestSize
, int flags
)
56 void* GCLargeAlloc::Alloc(size_t requestSize
, int flags
)
60 m_gc
->heap
->CheckForOOMAbortAllocation();
62 GCHeap::CheckForAllocSizeOverflow(requestSize
, sizeof(LargeBlock
)+GCHeap::kBlockSize
);
64 int blocks
= (int)((requestSize
+sizeof(LargeBlock
)+GCHeap::kBlockSize
-1) / GCHeap::kBlockSize
);
65 uint32_t computedSize
= blocks
*GCHeap::kBlockSize
- sizeof(LargeBlock
);
67 // Allocation must be signalled before we allocate because no GC work must be allowed to
68 // come between an allocation and an initialization - if it does, we may crash, as
69 // GCFinalizedObject subclasses may not have a valid vtable, but the GC depends on them
70 // having it. In principle we could signal allocation late but only set the object
71 // flags after signaling, but we might still cause trouble for the profiler, which also
72 // depends on non-interruptibility.
74 m_gc
->SignalAllocWork(computedSize
);
76 LargeBlock
*block
= (LargeBlock
*) m_gc
->AllocBlock(blocks
, GC::kGCLargeAllocPageFirst
,
77 (flags
&GC::kZero
) != 0, (flags
&GC::kCanFail
) != 0);
82 gcbits_t flagbits0
= 0;
83 gcbits_t flagbits1
= 0;
84 flagbits0
|= ((flags
&GC::kFinalize
) != 0) ? kFinalizable
: 0;
85 flagbits1
|= ((flags
&GC::kContainsPointers
) != 0) ? kContainsPointers
: 0;
86 flagbits1
|= ((flags
&GC::kRCObject
) != 0) ? kRCObject
: 0;
87 block
->gc
= this->m_gc
;
89 block
->next
= m_blocks
;
90 block
->size
= computedSize
;
92 block
->bitsShift
= 12; // Always use bits[0]
94 block
->bits
= block
->flags
;
97 item
= (void*)(block
+1);
99 if(m_gc
->collecting
&& !m_startedFinalize
)
102 block
->flags
[0] = flagbits0
;
103 block
->flags
[1] = flagbits1
;
106 if (flags
& GC::kZero
)
108 // AllocBlock should take care of this
109 for(int i
=0, n
=(int)(requestSize
/sizeof(int)); i
<n
; i
++) {
110 if(((int*)item
)[i
] != 0)
117 GCHeap
* heap
= GCHeap::GetGCHeap();
118 if(heap
->HooksEnabled()) {
119 size_t userSize
= block
->size
- DebugSize();
120 #ifdef MMGC_MEMORY_PROFILER
121 m_totalAskSize
+= originalSize
;
122 heap
->AllocHook(GetUserPointer(item
), originalSize
, userSize
);
124 heap
->AllocHook(GetUserPointer(item
), 0, userSize
);
133 void GCLargeAlloc::Free(const void *item
)
136 // RCObject have contract that they must clean themselves, since they
137 // have to scan themselves to decrement other RCObjects they might as well
138 // clean themselves too, better than suffering a memset later
139 if(IsRCObject(GetUserPointer(item
)))
140 m_gc
->RCObjectZeroCheck((RCObject
*)GetUserPointer(item
));
143 LargeBlock
*b
= GetLargeBlock(item
);
145 // We can't allow free'ing something during Sweeping, otherwise alloc counters
146 // get decremented twice and destructors will be called twice.
147 GCAssert(m_gc
->collecting
== false || m_gc
->marking
== true);
148 if (m_gc
->marking
&& (m_gc
->collecting
|| IsProtectedAgainstFree(b
))) {
149 m_gc
->AbortFree(GetUserPointer(item
));
153 m_gc
->policy
.signalFreeWork(b
->size
);
156 GCHeap
* heap
= GCHeap::GetGCHeap();
157 if(heap
->HooksEnabled())
159 const void* p
= GetUserPointer(item
);
160 size_t userSize
= GC::Size(p
);
161 #ifdef MMGC_MEMORY_PROFILER
162 if(heap
->GetProfiler())
163 m_totalAskSize
-= heap
->GetProfiler()->GetAskSize(p
);
165 heap
->FinalizeHook(p
, userSize
);
166 heap
->FreeHook(p
, userSize
, uint8_t(GCHeap::GCFreedPoison
));
170 if(b
->flags
[0] & kHasWeakRef
)
171 m_gc
->ClearWeakRef(GetUserPointer(item
));
173 LargeBlock
**prev
= &m_blocks
;
179 m_gc
->FreeBlock(b
, b
->GetNumBlocks());
182 prev
= (LargeBlock
**)(&(*prev
)->next
);
184 GCAssertMsg(false, "Bad free!");
187 void GCLargeAlloc::ClearMarks()
189 LargeBlock
*block
= m_blocks
;
191 block
->flags
[0] &= ~(kMark
|kQueued
);
196 void GCLargeAlloc::Finalize()
198 m_startedFinalize
= true;
199 LargeBlock
**prev
= &m_blocks
;
201 LargeBlock
*b
= *prev
;
202 if ((b
->flags
[0] & kMark
) == 0) {
203 GCAssert((b
->flags
[0] & kQueued
) == 0);
206 // Large blocks may be allocated by finalizers for large blocks, creating contention
207 // for the block list. Yet the block list must be live, since eg GetUsageInfo may be
208 // called by the finalizers (or their callees).
210 // Unlink the block from the list early to avoid contention.
216 if (b
->flags
[0] & kFinalizable
) {
217 GCFinalizedObject
*obj
= (GCFinalizedObject
*) item
;
218 obj
= (GCFinalizedObject
*) GetUserPointer(obj
);
219 obj
->~GCFinalizedObject();
221 if(b
->flags
[1] & kRCObject
) {
222 gc
->RCObjectZeroCheck((RCObject
*)obj
);
226 if(b
->flags
[0] & kHasWeakRef
) {
227 gc
->ClearWeakRef(GetUserPointer(item
));
231 if(m_gc
->heap
->HooksEnabled())
233 #ifdef MMGC_MEMORY_PROFILER
234 if(GCHeap::GetGCHeap()->GetProfiler())
235 m_totalAskSize
-= GCHeap::GetGCHeap()->GetProfiler()->GetAskSize(GetUserPointer(item
));
238 m_gc
->heap
->FinalizeHook(GetUserPointer(item
), b
->size
- DebugSize());
242 // The block is not empty until now, so now add it.
243 gc
->AddToLargeEmptyBlockList(b
);
247 b
->flags
[0] &= ~(kMark
|kQueued
);
248 prev
= (LargeBlock
**)(&b
->next
);
250 m_startedFinalize
= false;
253 GCLargeAlloc::~GCLargeAlloc()
260 bool GCLargeAlloc::ConservativeGetMark(const void *item
, bool bogusPointerReturnValue
)
262 if(!IsLargeBlock(item
))
263 return bogusPointerReturnValue
;
264 return (GetLargeBlock(item
)->flags
[0] & kMark
) != 0;
268 bool GCLargeAlloc::IsWhite(const void *item
)
270 if(!IsLargeBlock(item
))
272 return (GetLargeBlock(item
)->flags
[0] & (kMark
|kQueued
)) == 0;
276 void GCLargeAlloc::GetUsageInfo(size_t& totalAskSize
, size_t& totalAllocated
)
281 LargeBlock
*block
= m_blocks
;
283 totalAllocated
+= block
->size
;
287 #ifdef MMGC_MEMORY_PROFILER
288 totalAskSize
+= m_totalAskSize
;