2 * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
3 * Copyright (C) 2007 Eric Seidel <eric@webkit.org>
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include "Collector.h"
25 #include "CallFrame.h"
26 #include "CodeBlock.h"
27 #include "CollectorHeapIterator.h"
28 #include "Interpreter.h"
30 #include "JSGlobalObject.h"
32 #include "JSONObject.h"
36 #include "MarkStack.h"
43 #include <wtf/FastMalloc.h>
44 #include <wtf/HashCountedSet.h>
45 #include <wtf/UnusedParam.h>
46 #include <wtf/VMTags.h>
50 #include <mach/mach_init.h>
51 #include <mach/mach_port.h>
52 #include <mach/task.h>
53 #include <mach/thread_act.h>
54 #include <mach/vm_map.h>
56 #elif PLATFORM(SYMBIAN)
61 #elif PLATFORM(WIN_OS)
84 #if HAVE(PTHREAD_NP_H)
85 #include <pthread_np.h>
90 #include <sys/procfs.h>
97 #define COLLECT_ON_EVERY_ALLOCATION 0
103 // tunable parameters
105 const size_t GROWTH_FACTOR
= 2;
106 const size_t LOW_WATER_FACTOR
= 4;
107 const size_t ALLOCATIONS_PER_COLLECTION
= 3600;
108 // This value has to be a macro to be used in max() without introducing
109 // a PIC branch in Mach-O binaries, see <rdar://problem/5971391>.
110 #define MIN_ARRAY_SIZE (static_cast<size_t>(14))
112 #if PLATFORM(SYMBIAN)
113 const size_t MAX_NUM_BLOCKS
= 256; // Max size of collector heap set to 16 MB
114 static RHeap
* userChunk
= 0;
117 #if ENABLE(JSC_MULTIPLE_THREADS)
120 typedef mach_port_t PlatformThread
;
121 #elif PLATFORM(WIN_OS)
122 typedef HANDLE PlatformThread
;
127 Thread(pthread_t pthread
, const PlatformThread
& platThread
, void* base
)
128 : posixThread(pthread
)
129 , platformThread(platThread
)
135 pthread_t posixThread
;
136 PlatformThread platformThread
;
142 Heap::Heap(JSGlobalData
* globalData
)
144 #if ENABLE(JSC_MULTIPLE_THREADS)
145 , m_registeredThreads(0)
146 , m_currentThreadRegistrar(0)
148 , m_globalData(globalData
)
152 #if PLATFORM(SYMBIAN)
153 // Symbian OpenC supports mmap but currently not the MAP_ANON flag.
154 // Using fastMalloc() does not properly align blocks on 64k boundaries
155 // and previous implementation was flawed/incomplete.
156 // UserHeap::ChunkHeap allows allocation of continuous memory and specification
157 // of alignment value for (symbian) cells within that heap.
159 // Clarification and mapping of terminology:
160 // RHeap (created by UserHeap::ChunkHeap below) is continuos memory chunk,
161 // which can dynamically grow up to 8 MB,
162 // that holds all CollectorBlocks of this session (static).
163 // Each symbian cell within RHeap maps to a 64kb aligned CollectorBlock.
164 // JSCell objects are maintained as usual within CollectorBlocks.
166 userChunk
= UserHeap::ChunkHeap(0, 0, MAX_NUM_BLOCKS
* BLOCK_SIZE
, BLOCK_SIZE
, BLOCK_SIZE
);
170 #endif // PLATFORM(SYMBIAN)
172 memset(&primaryHeap
, 0, sizeof(CollectorHeap
));
173 allocateBlock
<PrimaryHeap
>();
175 memset(&numberHeap
, 0, sizeof(CollectorHeap
));
177 allocateBlock
<NumberHeap
>();
183 // The destroy function must already have been called, so assert this.
184 ASSERT(!m_globalData
);
189 JSLock
lock(SilenceAssertionsOnly
);
194 // The global object is not GC protected at this point, so sweeping may delete it
195 // (and thus the global data) before other objects that may use the global data.
196 RefPtr
<JSGlobalData
> protect(m_globalData
);
198 delete m_markListSet
;
201 freeBlocks
<PrimaryHeap
>();
202 freeBlocks
<NumberHeap
>();
204 #if ENABLE(JSC_MULTIPLE_THREADS)
205 if (m_currentThreadRegistrar
) {
206 int error
= pthread_key_delete(m_currentThreadRegistrar
);
207 ASSERT_UNUSED(error
, !error
);
210 MutexLocker
registeredThreadsLock(m_registeredThreadsMutex
);
211 for (Heap::Thread
* t
= m_registeredThreads
; t
;) {
212 Heap::Thread
* next
= t
->next
;
221 template <HeapType heapType
>
222 NEVER_INLINE CollectorBlock
* Heap::allocateBlock()
225 vm_address_t address
= 0;
226 vm_map(current_task(), &address
, BLOCK_SIZE
, BLOCK_OFFSET_MASK
, VM_FLAGS_ANYWHERE
| VM_TAG_FOR_COLLECTOR_MEMORY
, MEMORY_OBJECT_NULL
, 0, FALSE
, VM_PROT_DEFAULT
, VM_PROT_DEFAULT
, VM_INHERIT_DEFAULT
);
227 #elif PLATFORM(SYMBIAN)
228 // Allocate a 64 kb aligned CollectorBlock
229 unsigned char* mask
= reinterpret_cast<unsigned char*>(userChunk
->Alloc(BLOCK_SIZE
));
232 uintptr_t address
= reinterpret_cast<uintptr_t>(mask
);
233 #elif PLATFORM(WINCE)
234 void* address
= VirtualAlloc(NULL
, BLOCK_SIZE
, MEM_COMMIT
| MEM_RESERVE
, PAGE_READWRITE
);
235 #elif PLATFORM(WIN_OS)
237 void* address
= __mingw_aligned_malloc(BLOCK_SIZE
, BLOCK_SIZE
);
239 void* address
= _aligned_malloc(BLOCK_SIZE
, BLOCK_SIZE
);
241 memset(address
, 0, BLOCK_SIZE
);
242 #elif HAVE(POSIX_MEMALIGN)
244 posix_memalign(&address
, BLOCK_SIZE
, BLOCK_SIZE
);
247 #if ENABLE(JSC_MULTIPLE_THREADS)
248 #error Need to initialize pagesize safely.
250 static size_t pagesize
= getpagesize();
253 if (BLOCK_SIZE
> pagesize
)
254 extra
= BLOCK_SIZE
- pagesize
;
256 void* mmapResult
= mmap(NULL
, BLOCK_SIZE
+ extra
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
| MAP_ANON
, -1, 0);
257 uintptr_t address
= reinterpret_cast<uintptr_t>(mmapResult
);
260 if ((address
& BLOCK_OFFSET_MASK
) != 0)
261 adjust
= BLOCK_SIZE
- (address
& BLOCK_OFFSET_MASK
);
264 munmap(reinterpret_cast<char*>(address
), adjust
);
267 munmap(reinterpret_cast<char*>(address
+ adjust
+ BLOCK_SIZE
), extra
- adjust
);
274 CollectorBlock
* block
= reinterpret_cast<CollectorBlock
*>(address
);
276 block
->type
= heapType
;
277 clearMarkBits
<heapType
>(block
);
279 // heapAllocate assumes that it's safe to call a destructor on any cell in the primary heap.
280 if (heapType
!= NumberHeap
) {
281 Structure
* dummyMarkableCellStructure
= m_globalData
->dummyMarkableCellStructure
.get();
282 for (size_t i
= 0; i
< HeapConstants
<heapType
>::cellsPerBlock
; ++i
)
283 new (block
->cells
+ i
) JSCell(dummyMarkableCellStructure
);
286 // Add block to blocks vector.
288 CollectorHeap
& heap
= heapType
== PrimaryHeap
? primaryHeap
: numberHeap
;
289 size_t numBlocks
= heap
.numBlocks
;
290 if (heap
.usedBlocks
== numBlocks
) {
291 static const size_t maxNumBlocks
= ULONG_MAX
/ sizeof(CollectorBlock
*) / GROWTH_FACTOR
;
292 if (numBlocks
> maxNumBlocks
)
294 numBlocks
= max(MIN_ARRAY_SIZE
, numBlocks
* GROWTH_FACTOR
);
295 heap
.numBlocks
= numBlocks
;
296 heap
.blocks
= static_cast<CollectorBlock
**>(fastRealloc(heap
.blocks
, numBlocks
* sizeof(CollectorBlock
*)));
298 heap
.blocks
[heap
.usedBlocks
++] = block
;
303 template <HeapType heapType
>
304 NEVER_INLINE
void Heap::freeBlock(size_t block
)
306 CollectorHeap
& heap
= heapType
== PrimaryHeap
? primaryHeap
: numberHeap
;
308 if (heapType
!= NumberHeap
) {
309 ObjectIterator
<heapType
> it(heap
, block
);
310 ObjectIterator
<heapType
> end(heap
, block
+ 1);
311 for ( ; it
!= end
; ++it
)
314 freeBlock(heap
.blocks
[block
]);
316 // swap with the last block so we compact as we go
317 heap
.blocks
[block
] = heap
.blocks
[heap
.usedBlocks
- 1];
320 if (heap
.numBlocks
> MIN_ARRAY_SIZE
&& heap
.usedBlocks
< heap
.numBlocks
/ LOW_WATER_FACTOR
) {
321 heap
.numBlocks
= heap
.numBlocks
/ GROWTH_FACTOR
;
322 heap
.blocks
= static_cast<CollectorBlock
**>(fastRealloc(heap
.blocks
, heap
.numBlocks
* sizeof(CollectorBlock
*)));
326 NEVER_INLINE
void Heap::freeBlock(CollectorBlock
* block
)
329 vm_deallocate(current_task(), reinterpret_cast<vm_address_t
>(block
), BLOCK_SIZE
);
330 #elif PLATFORM(SYMBIAN)
331 userChunk
->Free(reinterpret_cast<TAny
*>(block
));
332 #elif PLATFORM(WINCE)
333 VirtualFree(block
, 0, MEM_RELEASE
);
334 #elif PLATFORM(WIN_OS)
336 __mingw_aligned_free(block
);
338 _aligned_free(block
);
340 #elif HAVE(POSIX_MEMALIGN)
343 munmap(reinterpret_cast<char*>(block
), BLOCK_SIZE
);
347 template <HeapType heapType
>
348 void Heap::freeBlocks()
350 CollectorHeap
& heap
= heapType
== PrimaryHeap
? primaryHeap
: numberHeap
;
352 while (heap
.usedBlocks
)
353 freeBlock
<heapType
>(0);
354 fastFree(heap
.blocks
);
355 memset(&heap
, 0, sizeof(CollectorHeap
));
358 void Heap::recordExtraCost(size_t cost
)
360 // Our frequency of garbage collection tries to balance memory use against speed
361 // by collecting based on the number of newly created values. However, for values
362 // that hold on to a great deal of memory that's not in the form of other JS values,
363 // that is not good enough - in some cases a lot of those objects can pile up and
364 // use crazy amounts of memory without a GC happening. So we track these extra
365 // memory costs. Only unusually large objects are noted, and we only keep track
366 // of this extra cost until the next GC. In garbage collected languages, most values
367 // are either very short lived temporaries, or have extremely long lifetimes. So
368 // if a large value survives one garbage collection, there is not much point to
369 // collecting more frequently as long as it stays alive.
370 // NOTE: we target the primaryHeap unconditionally as JSNumber doesn't modify cost
372 if (primaryHeap
.extraCost
> maxExtraCost
&& primaryHeap
.extraCost
> primaryHeap
.usedBlocks
* BLOCK_SIZE
/ 2) {
373 // If the last iteration through the heap deallocated blocks, we need
374 // to clean up remaining garbage before marking. Otherwise, the conservative
375 // marking mechanism might follow a pointer to unmapped memory.
376 if (primaryHeap
.didShrink
)
377 sweep
<PrimaryHeap
>();
380 primaryHeap
.extraCost
+= cost
;
383 template <HeapType heapType
> ALWAYS_INLINE
void* Heap::heapAllocate(size_t s
)
385 typedef typename HeapConstants
<heapType
>::Block Block
;
386 typedef typename HeapConstants
<heapType
>::Cell Cell
;
388 CollectorHeap
& heap
= heapType
== PrimaryHeap
? primaryHeap
: numberHeap
;
390 ASSERT(JSLock::lockCount() > 0);
391 ASSERT(JSLock::currentThreadIsHoldingLock());
392 ASSERT_UNUSED(s
, s
<= HeapConstants
<heapType
>::cellSize
);
394 ASSERT(heap
.operationInProgress
== NoOperation
);
395 ASSERT(heapType
== PrimaryHeap
|| heap
.extraCost
== 0);
397 #if COLLECT_ON_EVERY_ALLOCATION
399 ASSERT(heap
.operationInProgress
== NoOperation
);
404 // Fast case: find the next garbage cell and recycle it.
407 ASSERT(heap
.nextBlock
< heap
.usedBlocks
);
408 Block
* block
= reinterpret_cast<Block
*>(heap
.blocks
[heap
.nextBlock
]);
410 ASSERT(heap
.nextCell
< HeapConstants
<heapType
>::cellsPerBlock
);
411 if (!block
->marked
.get(heap
.nextCell
>> HeapConstants
<heapType
>::bitmapShift
)) { // Always false for the last cell in the block
412 Cell
* cell
= block
->cells
+ heap
.nextCell
;
413 if (heapType
!= NumberHeap
) {
414 heap
.operationInProgress
= Allocation
;
415 JSCell
* imp
= reinterpret_cast<JSCell
*>(cell
);
417 heap
.operationInProgress
= NoOperation
;
422 } while (++heap
.nextCell
!= HeapConstants
<heapType
>::cellsPerBlock
);
424 } while (++heap
.nextBlock
!= heap
.usedBlocks
);
426 // Slow case: reached the end of the heap. Mark live objects and start over.
432 template <HeapType heapType
>
433 void Heap::resizeBlocks()
435 CollectorHeap
& heap
= heapType
== PrimaryHeap
? primaryHeap
: numberHeap
;
437 heap
.didShrink
= false;
439 size_t usedCellCount
= markedCells
<heapType
>();
440 size_t minCellCount
= usedCellCount
+ max(ALLOCATIONS_PER_COLLECTION
, usedCellCount
);
441 size_t minBlockCount
= (minCellCount
+ HeapConstants
<heapType
>::cellsPerBlock
- 1) / HeapConstants
<heapType
>::cellsPerBlock
;
443 size_t maxCellCount
= 1.25f
* minCellCount
;
444 size_t maxBlockCount
= (maxCellCount
+ HeapConstants
<heapType
>::cellsPerBlock
- 1) / HeapConstants
<heapType
>::cellsPerBlock
;
446 if (heap
.usedBlocks
< minBlockCount
)
447 growBlocks
<heapType
>(minBlockCount
);
448 else if (heap
.usedBlocks
> maxBlockCount
)
449 shrinkBlocks
<heapType
>(maxBlockCount
);
452 template <HeapType heapType
>
453 void Heap::growBlocks(size_t neededBlocks
)
455 CollectorHeap
& heap
= heapType
== PrimaryHeap
? primaryHeap
: numberHeap
;
456 ASSERT(heap
.usedBlocks
< neededBlocks
);
457 while (heap
.usedBlocks
< neededBlocks
)
458 allocateBlock
<heapType
>();
461 template <HeapType heapType
>
462 void Heap::shrinkBlocks(size_t neededBlocks
)
464 CollectorHeap
& heap
= heapType
== PrimaryHeap
? primaryHeap
: numberHeap
;
465 ASSERT(heap
.usedBlocks
> neededBlocks
);
467 // Clear the always-on last bit, so isEmpty() isn't fooled by it.
468 for (size_t i
= 0; i
< heap
.usedBlocks
; ++i
)
469 heap
.blocks
[i
]->marked
.clear((HeapConstants
<heapType
>::cellsPerBlock
- 1) >> HeapConstants
<heapType
>::bitmapShift
);
471 for (size_t i
= 0; i
!= heap
.usedBlocks
&& heap
.usedBlocks
!= neededBlocks
; ) {
472 if (heap
.blocks
[i
]->marked
.isEmpty()) {
473 freeBlock
<heapType
>(i
);
474 heap
.didShrink
= true;
479 // Reset the always-on last bit.
480 for (size_t i
= 0; i
< heap
.usedBlocks
; ++i
)
481 heap
.blocks
[i
]->marked
.set((HeapConstants
<heapType
>::cellsPerBlock
- 1) >> HeapConstants
<heapType
>::bitmapShift
);
484 void* Heap::allocate(size_t s
)
486 return heapAllocate
<PrimaryHeap
>(s
);
489 void* Heap::allocateNumber(size_t s
)
491 return heapAllocate
<NumberHeap
>(s
);
495 void* g_stackBase
= 0;
497 inline bool isPageWritable(void* page
)
499 MEMORY_BASIC_INFORMATION memoryInformation
;
500 DWORD result
= VirtualQuery(page
, &memoryInformation
, sizeof(memoryInformation
));
502 // return false on error, including ptr outside memory
503 if (result
!= sizeof(memoryInformation
))
506 DWORD protect
= memoryInformation
.Protect
& ~(PAGE_GUARD
| PAGE_NOCACHE
);
507 return protect
== PAGE_READWRITE
508 || protect
== PAGE_WRITECOPY
509 || protect
== PAGE_EXECUTE_READWRITE
510 || protect
== PAGE_EXECUTE_WRITECOPY
;
513 static void* getStackBase(void* previousFrame
)
515 // find the address of this stack frame by taking the address of a local variable
516 bool isGrowingDownward
;
517 void* thisFrame
= (void*)(&isGrowingDownward
);
519 isGrowingDownward
= previousFrame
< &thisFrame
;
520 static DWORD pageSize
= 0;
522 SYSTEM_INFO systemInfo
;
523 GetSystemInfo(&systemInfo
);
524 pageSize
= systemInfo
.dwPageSize
;
527 // scan all of memory starting from this frame, and return the last writeable page found
528 register char* currentPage
= (char*)((DWORD
)thisFrame
& ~(pageSize
- 1));
529 if (isGrowingDownward
) {
530 while (currentPage
> 0) {
531 // check for underflow
532 if (currentPage
>= (char*)pageSize
)
533 currentPage
-= pageSize
;
536 if (!isPageWritable(currentPage
))
537 return currentPage
+ pageSize
;
542 // guaranteed to complete because isPageWritable returns false at end of memory
543 currentPage
+= pageSize
;
544 if (!isPageWritable(currentPage
))
552 static inline void *currentThreadStackBaseQNX()
554 static void* stackBase
= 0;
555 static size_t stackSize
= 0;
556 static pthread_t stackThread
;
557 pthread_t thread
= pthread_self();
558 if (stackBase
== 0 || thread
!= stackThread
) {
559 struct _debug_thread_info threadInfo
;
560 memset(&threadInfo
, 0, sizeof(threadInfo
));
561 threadInfo
.tid
= pthread_self();
562 int fd
= open("/proc/self", O_RDONLY
);
564 LOG_ERROR("Unable to open /proc/self (errno: %d)", errno
);
567 devctl(fd
, DCMD_PROC_TIDSTATUS
, &threadInfo
, sizeof(threadInfo
), 0);
569 stackBase
= reinterpret_cast<void*>(threadInfo
.stkbase
);
570 stackSize
= threadInfo
.stksize
;
572 stackThread
= thread
;
574 return static_cast<char*>(stackBase
) + stackSize
;
578 static inline void* currentThreadStackBase()
581 pthread_t thread
= pthread_self();
582 return pthread_get_stackaddr_np(thread
);
583 #elif PLATFORM(WIN_OS) && PLATFORM(X86) && COMPILER(MSVC)
584 // offset 0x18 from the FS segment register gives a pointer to
585 // the thread information block for the current thread
591 return static_cast<void*>(pTib
->StackBase
);
592 #elif PLATFORM(WIN_OS) && PLATFORM(X86_64) && COMPILER(MSVC)
593 PNT_TIB64 pTib
= reinterpret_cast<PNT_TIB64
>(NtCurrentTeb());
594 return reinterpret_cast<void*>(pTib
->StackBase
);
595 #elif PLATFORM(WIN_OS) && PLATFORM(X86) && COMPILER(GCC)
596 // offset 0x18 from the FS segment register gives a pointer to
597 // the thread information block for the current thread
599 asm ( "movl %%fs:0x18, %0\n"
602 return static_cast<void*>(pTib
->StackBase
);
604 return currentThreadStackBaseQNX();
605 #elif PLATFORM(SOLARIS)
609 #elif PLATFORM(OPENBSD)
610 pthread_t thread
= pthread_self();
612 pthread_stackseg_np(thread
, &stack
);
614 #elif PLATFORM(SYMBIAN)
615 static void* stackBase
= 0;
616 if (stackBase
== 0) {
617 TThreadStackInfo info
;
619 thread
.StackInfo(info
);
620 stackBase
= (void*)info
.iBase
;
622 return (void*)stackBase
;
623 #elif PLATFORM(HAIKU)
624 thread_info threadInfo
;
625 get_thread_info(find_thread(NULL
), &threadInfo
);
626 return threadInfo
.stack_end
;
628 static void* stackBase
= 0;
629 static size_t stackSize
= 0;
630 static pthread_t stackThread
;
631 pthread_t thread
= pthread_self();
632 if (stackBase
== 0 || thread
!= stackThread
) {
633 pthread_attr_t sattr
;
634 pthread_attr_init(&sattr
);
635 #if HAVE(PTHREAD_NP_H) || PLATFORM(NETBSD)
636 // e.g. on FreeBSD 5.4, neundorf@kde.org
637 pthread_attr_get_np(thread
, &sattr
);
639 // FIXME: this function is non-portable; other POSIX systems may have different np alternatives
640 pthread_getattr_np(thread
, &sattr
);
642 int rc
= pthread_attr_getstack(&sattr
, &stackBase
, &stackSize
);
643 (void)rc
; // FIXME: Deal with error code somehow? Seems fatal.
645 pthread_attr_destroy(&sattr
);
646 stackThread
= thread
;
648 return static_cast<char*>(stackBase
) + stackSize
;
649 #elif PLATFORM(WINCE)
654 return getStackBase(&dummy
);
657 #error Need a way to get the stack base on this platform
661 #if ENABLE(JSC_MULTIPLE_THREADS)
663 static inline PlatformThread
getCurrentPlatformThread()
666 return pthread_mach_thread_np(pthread_self());
667 #elif PLATFORM(WIN_OS)
668 return pthread_getw32threadhandle_np(pthread_self());
672 void Heap::makeUsableFromMultipleThreads()
674 if (m_currentThreadRegistrar
)
677 int error
= pthread_key_create(&m_currentThreadRegistrar
, unregisterThread
);
682 void Heap::registerThread()
684 ASSERT(!m_globalData
->mainThreadOnly
|| isMainThread());
686 if (!m_currentThreadRegistrar
|| pthread_getspecific(m_currentThreadRegistrar
))
689 pthread_setspecific(m_currentThreadRegistrar
, this);
690 Heap::Thread
* thread
= new Heap::Thread(pthread_self(), getCurrentPlatformThread(), currentThreadStackBase());
692 MutexLocker
lock(m_registeredThreadsMutex
);
694 thread
->next
= m_registeredThreads
;
695 m_registeredThreads
= thread
;
698 void Heap::unregisterThread(void* p
)
701 static_cast<Heap
*>(p
)->unregisterThread();
704 void Heap::unregisterThread()
706 pthread_t currentPosixThread
= pthread_self();
708 MutexLocker
lock(m_registeredThreadsMutex
);
710 if (pthread_equal(currentPosixThread
, m_registeredThreads
->posixThread
)) {
711 Thread
* t
= m_registeredThreads
;
712 m_registeredThreads
= m_registeredThreads
->next
;
715 Heap::Thread
* last
= m_registeredThreads
;
717 for (t
= m_registeredThreads
->next
; t
; t
= t
->next
) {
718 if (pthread_equal(t
->posixThread
, currentPosixThread
)) {
719 last
->next
= t
->next
;
724 ASSERT(t
); // If t is NULL, we never found ourselves in the list.
729 #else // ENABLE(JSC_MULTIPLE_THREADS)
731 void Heap::registerThread()
737 inline bool isPointerAligned(void* p
)
739 return (((intptr_t)(p
) & (sizeof(char*) - 1)) == 0);
742 // Cell size needs to be a power of two for isPossibleCell to be valid.
743 COMPILE_ASSERT(sizeof(CollectorCell
) % 2 == 0, Collector_cell_size_is_power_of_two
);
746 static bool isHalfCellAligned(void *p
)
748 return (((intptr_t)(p
) & (CELL_MASK
>> 1)) == 0);
751 static inline bool isPossibleCell(void* p
)
753 return isHalfCellAligned(p
) && p
;
758 static inline bool isCellAligned(void *p
)
760 return (((intptr_t)(p
) & CELL_MASK
) == 0);
763 static inline bool isPossibleCell(void* p
)
765 return isCellAligned(p
) && p
;
769 void Heap::markConservatively(MarkStack
& markStack
, void* start
, void* end
)
777 ASSERT((static_cast<char*>(end
) - static_cast<char*>(start
)) < 0x1000000);
778 ASSERT(isPointerAligned(start
));
779 ASSERT(isPointerAligned(end
));
781 char** p
= static_cast<char**>(start
);
782 char** e
= static_cast<char**>(end
);
784 CollectorBlock
** primaryBlocks
= primaryHeap
.blocks
;
786 CollectorBlock
** numberBlocks
= numberHeap
.blocks
;
791 if (isPossibleCell(x
)) {
792 size_t usedPrimaryBlocks
;
793 uintptr_t xAsBits
= reinterpret_cast<uintptr_t>(x
);
794 xAsBits
&= CELL_ALIGN_MASK
;
796 uintptr_t offset
= xAsBits
& BLOCK_OFFSET_MASK
;
797 const size_t lastCellOffset
= sizeof(CollectorCell
) * (CELLS_PER_BLOCK
- 1);
798 if (offset
> lastCellOffset
)
801 CollectorBlock
* blockAddr
= reinterpret_cast<CollectorBlock
*>(xAsBits
- offset
);
803 // Mark the the number heap, we can mark these Cells directly to avoid the virtual call cost
804 size_t usedNumberBlocks
= numberHeap
.usedBlocks
;
805 for (size_t block
= 0; block
< usedNumberBlocks
; block
++) {
806 if (numberBlocks
[block
] != blockAddr
)
808 Heap::markCell(reinterpret_cast<JSCell
*>(xAsBits
));
813 // Mark the primary heap
814 usedPrimaryBlocks
= primaryHeap
.usedBlocks
;
815 for (size_t block
= 0; block
< usedPrimaryBlocks
; block
++) {
816 if (primaryBlocks
[block
] != blockAddr
)
818 markStack
.append(reinterpret_cast<JSCell
*>(xAsBits
));
829 void NEVER_INLINE
Heap::markCurrentThreadConservativelyInternal(MarkStack
& markStack
)
832 void* stackPointer
= &dummy
;
833 void* stackBase
= currentThreadStackBase();
834 markConservatively(markStack
, stackPointer
, stackBase
);
838 #define REGISTER_BUFFER_ALIGNMENT __attribute__ ((aligned (sizeof(void*))))
840 #define REGISTER_BUFFER_ALIGNMENT
843 void Heap::markCurrentThreadConservatively(MarkStack
& markStack
)
845 // setjmp forces volatile registers onto the stack
846 jmp_buf registers REGISTER_BUFFER_ALIGNMENT
;
848 #pragma warning(push)
849 #pragma warning(disable: 4611)
856 markCurrentThreadConservativelyInternal(markStack
);
859 #if ENABLE(JSC_MULTIPLE_THREADS)
861 static inline void suspendThread(const PlatformThread
& platformThread
)
864 thread_suspend(platformThread
);
865 #elif PLATFORM(WIN_OS)
866 SuspendThread(platformThread
);
868 #error Need a way to suspend threads on this platform
872 static inline void resumeThread(const PlatformThread
& platformThread
)
875 thread_resume(platformThread
);
876 #elif PLATFORM(WIN_OS)
877 ResumeThread(platformThread
);
879 #error Need a way to resume threads on this platform
883 typedef unsigned long usword_t
; // word size, assumed to be either 32 or 64 bit
888 typedef i386_thread_state_t PlatformThreadRegisters
;
889 #elif PLATFORM(X86_64)
890 typedef x86_thread_state64_t PlatformThreadRegisters
;
892 typedef ppc_thread_state_t PlatformThreadRegisters
;
893 #elif PLATFORM(PPC64)
894 typedef ppc_thread_state64_t PlatformThreadRegisters
;
896 typedef arm_thread_state_t PlatformThreadRegisters
;
898 #error Unknown Architecture
901 #elif PLATFORM(WIN_OS)&& PLATFORM(X86)
902 typedef CONTEXT PlatformThreadRegisters
;
904 #error Need a thread register struct for this platform
907 static size_t getPlatformThreadRegisters(const PlatformThread
& platformThread
, PlatformThreadRegisters
& regs
)
912 unsigned user_count
= sizeof(regs
)/sizeof(int);
913 thread_state_flavor_t flavor
= i386_THREAD_STATE
;
914 #elif PLATFORM(X86_64)
915 unsigned user_count
= x86_THREAD_STATE64_COUNT
;
916 thread_state_flavor_t flavor
= x86_THREAD_STATE64
;
918 unsigned user_count
= PPC_THREAD_STATE_COUNT
;
919 thread_state_flavor_t flavor
= PPC_THREAD_STATE
;
920 #elif PLATFORM(PPC64)
921 unsigned user_count
= PPC_THREAD_STATE64_COUNT
;
922 thread_state_flavor_t flavor
= PPC_THREAD_STATE64
;
924 unsigned user_count
= ARM_THREAD_STATE_COUNT
;
925 thread_state_flavor_t flavor
= ARM_THREAD_STATE
;
927 #error Unknown Architecture
930 kern_return_t result
= thread_get_state(platformThread
, flavor
, (thread_state_t
)®s
, &user_count
);
931 if (result
!= KERN_SUCCESS
) {
932 WTFReportFatalError(__FILE__
, __LINE__
, WTF_PRETTY_FUNCTION
,
933 "JavaScript garbage collection failed because thread_get_state returned an error (%d). This is probably the result of running inside Rosetta, which is not supported.", result
);
936 return user_count
* sizeof(usword_t
);
937 // end PLATFORM(DARWIN)
939 #elif PLATFORM(WIN_OS) && PLATFORM(X86)
940 regs
.ContextFlags
= CONTEXT_INTEGER
| CONTEXT_CONTROL
| CONTEXT_SEGMENTS
;
941 GetThreadContext(platformThread
, ®s
);
942 return sizeof(CONTEXT
);
944 #error Need a way to get thread registers on this platform
948 static inline void* otherThreadStackPointer(const PlatformThreadRegisters
& regs
)
955 return reinterpret_cast<void*>(regs
.__esp
);
956 #elif PLATFORM(X86_64)
957 return reinterpret_cast<void*>(regs
.__rsp
);
958 #elif PLATFORM(PPC) || PLATFORM(PPC64)
959 return reinterpret_cast<void*>(regs
.__r1
);
961 return reinterpret_cast<void*>(regs
.__sp
);
963 #error Unknown Architecture
966 #else // !__DARWIN_UNIX03
969 return reinterpret_cast<void*>(regs
.esp
);
970 #elif PLATFORM(X86_64)
971 return reinterpret_cast<void*>(regs
.rsp
);
972 #elif (PLATFORM(PPC) || PLATFORM(PPC64))
973 return reinterpret_cast<void*>(regs
.r1
);
975 #error Unknown Architecture
978 #endif // __DARWIN_UNIX03
980 // end PLATFORM(DARWIN)
981 #elif PLATFORM(X86) && PLATFORM(WIN_OS)
982 return reinterpret_cast<void*>((uintptr_t) regs
.Esp
);
984 #error Need a way to get the stack pointer for another thread on this platform
988 void Heap::markOtherThreadConservatively(MarkStack
& markStack
, Thread
* thread
)
990 suspendThread(thread
->platformThread
);
992 PlatformThreadRegisters regs
;
993 size_t regSize
= getPlatformThreadRegisters(thread
->platformThread
, regs
);
995 // mark the thread's registers
996 markConservatively(markStack
, static_cast<void*>(®s
), static_cast<void*>(reinterpret_cast<char*>(®s
) + regSize
));
998 void* stackPointer
= otherThreadStackPointer(regs
);
999 markConservatively(markStack
, stackPointer
, thread
->stackBase
);
1001 resumeThread(thread
->platformThread
);
1006 void Heap::markStackObjectsConservatively(MarkStack
& markStack
)
1008 markCurrentThreadConservatively(markStack
);
1010 #if ENABLE(JSC_MULTIPLE_THREADS)
1012 if (m_currentThreadRegistrar
) {
1014 MutexLocker
lock(m_registeredThreadsMutex
);
1017 // Forbid malloc during the mark phase. Marking a thread suspends it, so
1018 // a malloc inside markChildren() would risk a deadlock with a thread that had been
1019 // suspended while holding the malloc lock.
1022 // It is safe to access the registeredThreads list, because we earlier asserted that locks are being held,
1023 // and since this is a shared heap, they are real locks.
1024 for (Thread
* thread
= m_registeredThreads
; thread
; thread
= thread
->next
) {
1025 if (!pthread_equal(thread
->posixThread
, pthread_self()))
1026 markOtherThreadConservatively(markStack
, thread
);
1035 void Heap::protect(JSValue k
)
1038 ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData
->isSharedInstance
);
1043 m_protectedValues
.add(k
.asCell());
1046 void Heap::unprotect(JSValue k
)
1049 ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData
->isSharedInstance
);
1054 m_protectedValues
.remove(k
.asCell());
1057 void Heap::markProtectedObjects(MarkStack
& markStack
)
1059 ProtectCountSet::iterator end
= m_protectedValues
.end();
1060 for (ProtectCountSet::iterator it
= m_protectedValues
.begin(); it
!= end
; ++it
) {
1061 markStack
.append(it
->first
);
1066 template <HeapType heapType
>
1067 void Heap::clearMarkBits()
1069 CollectorHeap
& heap
= heapType
== PrimaryHeap
? primaryHeap
: numberHeap
;
1070 for (size_t i
= 0; i
< heap
.usedBlocks
; ++i
)
1071 clearMarkBits
<heapType
>(heap
.blocks
[i
]);
1074 template <HeapType heapType
>
1075 void Heap::clearMarkBits(CollectorBlock
* block
)
1077 // heapAllocate assumes that the last cell in every block is marked.
1078 block
->marked
.clearAll();
1079 block
->marked
.set((HeapConstants
<heapType
>::cellsPerBlock
- 1) >> HeapConstants
<heapType
>::bitmapShift
);
1082 template <HeapType heapType
>
1083 size_t Heap::markedCells(size_t startBlock
, size_t startCell
) const
1085 const CollectorHeap
& heap
= heapType
== PrimaryHeap
? primaryHeap
: numberHeap
;
1086 ASSERT(startBlock
<= heap
.usedBlocks
);
1087 ASSERT(startCell
< HeapConstants
<heapType
>::cellsPerBlock
);
1089 if (startBlock
>= heap
.usedBlocks
)
1093 result
+= heap
.blocks
[startBlock
]->marked
.count(startCell
);
1094 for (size_t i
= startBlock
+ 1; i
< heap
.usedBlocks
; ++i
)
1095 result
+= heap
.blocks
[i
]->marked
.count();
1100 template <HeapType heapType
>
1103 ASSERT(heapType
!= NumberHeap
); // The number heap does not contain meaningful destructors.
1105 CollectorHeap
& heap
= heapType
== PrimaryHeap
? primaryHeap
: numberHeap
;
1107 ASSERT(heap
.operationInProgress
== NoOperation
);
1108 if (heap
.operationInProgress
!= NoOperation
)
1110 heap
.operationInProgress
= Collection
;
1112 #if !ENABLE(JSC_ZOMBIES)
1113 Structure
* dummyMarkableCellStructure
= m_globalData
->dummyMarkableCellStructure
.get();
1116 DeadObjectIterator
<heapType
> it(heap
, heap
.nextBlock
, heap
.nextCell
);
1117 DeadObjectIterator
<heapType
> end(heap
, heap
.usedBlocks
);
1118 for ( ; it
!= end
; ++it
) {
1120 #if ENABLE(JSC_ZOMBIES)
1121 if (!cell
->isZombie()) {
1122 const ClassInfo
* info
= cell
->classInfo();
1124 new (cell
) JSZombie(info
, JSZombie::leakedZombieStructure());
1125 Heap::markCell(cell
);
1129 // Callers of sweep assume it's safe to mark any cell in the heap.
1130 new (cell
) JSCell(dummyMarkableCellStructure
);
1134 heap
.operationInProgress
= NoOperation
;
1137 void Heap::markRoots()
1140 if (m_globalData
->isSharedInstance
) {
1141 ASSERT(JSLock::lockCount() > 0);
1142 ASSERT(JSLock::currentThreadIsHoldingLock());
1146 ASSERT((primaryHeap
.operationInProgress
== NoOperation
) & (numberHeap
.operationInProgress
== NoOperation
));
1147 if (!((primaryHeap
.operationInProgress
== NoOperation
) & (numberHeap
.operationInProgress
== NoOperation
)))
1150 primaryHeap
.operationInProgress
= Collection
;
1151 numberHeap
.operationInProgress
= Collection
;
1153 MarkStack
& markStack
= m_globalData
->markStack
;
1156 clearMarkBits
<PrimaryHeap
>();
1157 clearMarkBits
<NumberHeap
>();
1159 // Mark stack roots.
1160 markStackObjectsConservatively(markStack
);
1161 m_globalData
->interpreter
->registerFile().markCallFrames(markStack
, this);
1163 // Mark explicitly registered roots.
1164 markProtectedObjects(markStack
);
1166 // Mark misc. other roots.
1167 if (m_markListSet
&& m_markListSet
->size())
1168 MarkedArgumentBuffer::markLists(markStack
, *m_markListSet
);
1169 if (m_globalData
->exception
)
1170 markStack
.append(m_globalData
->exception
);
1171 m_globalData
->smallStrings
.markChildren(markStack
);
1172 if (m_globalData
->functionCodeBlockBeingReparsed
)
1173 m_globalData
->functionCodeBlockBeingReparsed
->markAggregate(markStack
);
1174 if (m_globalData
->firstStringifierToMark
)
1175 JSONObject::markStringifiers(markStack
, m_globalData
->firstStringifierToMark
);
1178 markStack
.compact();
1180 primaryHeap
.operationInProgress
= NoOperation
;
1181 numberHeap
.operationInProgress
= NoOperation
;
1184 size_t Heap::objectCount() const
1186 return objectCount
<PrimaryHeap
>() + objectCount
<NumberHeap
>();
1189 template <HeapType heapType
>
1190 size_t Heap::objectCount() const
1192 const CollectorHeap
& heap
= heapType
== PrimaryHeap
? primaryHeap
: numberHeap
;
1194 return heap
.nextBlock
* HeapConstants
<heapType
>::cellsPerBlock
// allocated full blocks
1195 + heap
.nextCell
// allocated cells in current block
1196 + markedCells
<heapType
>(heap
.nextBlock
, heap
.nextCell
) // marked cells in remainder of heap
1197 - heap
.usedBlocks
; // 1 cell per block is a dummy sentinel
1200 template <HeapType heapType
>
1201 void Heap::addToStatistics(Heap::Statistics
& statistics
) const
1203 const CollectorHeap
& heap
= heapType
== PrimaryHeap
? primaryHeap
: numberHeap
;
1205 statistics
.size
+= heap
.usedBlocks
* BLOCK_SIZE
;
1206 statistics
.free
+= heap
.usedBlocks
* BLOCK_SIZE
- (objectCount
<heapType
>() * HeapConstants
<heapType
>::cellSize
);
1209 Heap::Statistics
Heap::statistics() const
1211 Statistics statistics
= { 0, 0 };
1212 addToStatistics
<PrimaryHeap
>(statistics
);
1213 addToStatistics
<NumberHeap
>(statistics
);
1217 size_t Heap::globalObjectCount()
1220 if (JSGlobalObject
* head
= m_globalData
->head
) {
1221 JSGlobalObject
* o
= head
;
1225 } while (o
!= head
);
1230 size_t Heap::protectedGlobalObjectCount()
1233 if (JSGlobalObject
* head
= m_globalData
->head
) {
1234 JSGlobalObject
* o
= head
;
1236 if (m_protectedValues
.contains(o
))
1239 } while (o
!= head
);
1245 size_t Heap::protectedObjectCount()
1247 return m_protectedValues
.size();
1250 static const char* typeName(JSCell
* cell
)
1252 if (cell
->isString())
1255 if (cell
->isNumber())
1258 if (cell
->isGetterSetter())
1259 return "gettersetter";
1260 if (cell
->isAPIValueWrapper())
1261 return "value wrapper";
1262 if (cell
->isPropertyNameIterator())
1263 return "for-in iterator";
1264 ASSERT(cell
->isObject());
1265 const ClassInfo
* info
= cell
->classInfo();
1266 return info
? info
->className
: "Object";
1269 HashCountedSet
<const char*>* Heap::protectedObjectTypeCounts()
1271 HashCountedSet
<const char*>* counts
= new HashCountedSet
<const char*>;
1273 ProtectCountSet::iterator end
= m_protectedValues
.end();
1274 for (ProtectCountSet::iterator it
= m_protectedValues
.begin(); it
!= end
; ++it
)
1275 counts
->add(typeName(it
->first
));
1282 return (primaryHeap
.operationInProgress
!= NoOperation
) | (numberHeap
.operationInProgress
!= NoOperation
);
1287 JAVASCRIPTCORE_GC_BEGIN();
1291 JAVASCRIPTCORE_GC_MARKED();
1293 primaryHeap
.nextCell
= 0;
1294 primaryHeap
.nextBlock
= 0;
1295 primaryHeap
.extraCost
= 0;
1296 #if ENABLE(JSC_ZOMBIES)
1297 sweep
<PrimaryHeap
>();
1299 resizeBlocks
<PrimaryHeap
>();
1302 numberHeap
.nextCell
= 0;
1303 numberHeap
.nextBlock
= 0;
1304 resizeBlocks
<NumberHeap
>();
1307 JAVASCRIPTCORE_GC_END();
1310 void Heap::collectAllGarbage()
1312 JAVASCRIPTCORE_GC_BEGIN();
1314 // If the last iteration through the heap deallocated blocks, we need
1315 // to clean up remaining garbage before marking. Otherwise, the conservative
1316 // marking mechanism might follow a pointer to unmapped memory.
1317 if (primaryHeap
.didShrink
)
1318 sweep
<PrimaryHeap
>();
1322 JAVASCRIPTCORE_GC_MARKED();
1324 primaryHeap
.nextCell
= 0;
1325 primaryHeap
.nextBlock
= 0;
1326 primaryHeap
.extraCost
= 0;
1327 sweep
<PrimaryHeap
>();
1328 resizeBlocks
<PrimaryHeap
>();
1331 numberHeap
.nextCell
= 0;
1332 numberHeap
.nextBlock
= 0;
1333 resizeBlocks
<NumberHeap
>();
1336 JAVASCRIPTCORE_GC_END();
1339 LiveObjectIterator
<PrimaryHeap
> Heap::primaryHeapBegin()
1341 return LiveObjectIterator
<PrimaryHeap
>(primaryHeap
, 0);
1344 LiveObjectIterator
<PrimaryHeap
> Heap::primaryHeapEnd()
1346 return LiveObjectIterator
<PrimaryHeap
>(primaryHeap
, primaryHeap
.usedBlocks
);