Rubber-stamped by Brady Eidson.
[webbrowser.git] / JavaScriptCore / runtime / Collector.cpp
blob5446749e0a8705e6e07266db27e70f96c3630616
1 /*
2 * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
3 * Copyright (C) 2007 Eric Seidel <eric@webkit.org>
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "config.h"
22 #include "Collector.h"
24 #include "ArgList.h"
25 #include "CallFrame.h"
26 #include "CodeBlock.h"
27 #include "CollectorHeapIterator.h"
28 #include "Interpreter.h"
29 #include "JSArray.h"
30 #include "JSGlobalObject.h"
31 #include "JSLock.h"
32 #include "JSONObject.h"
33 #include "JSString.h"
34 #include "JSValue.h"
35 #include "JSZombie.h"
36 #include "MarkStack.h"
37 #include "Nodes.h"
38 #include "Tracing.h"
39 #include <algorithm>
40 #include <limits.h>
41 #include <setjmp.h>
42 #include <stdlib.h>
43 #include <wtf/FastMalloc.h>
44 #include <wtf/HashCountedSet.h>
45 #include <wtf/UnusedParam.h>
46 #include <wtf/VMTags.h>
48 #if PLATFORM(DARWIN)
50 #include <mach/mach_init.h>
51 #include <mach/mach_port.h>
52 #include <mach/task.h>
53 #include <mach/thread_act.h>
54 #include <mach/vm_map.h>
56 #elif PLATFORM(SYMBIAN)
57 #include <e32std.h>
58 #include <e32cmn.h>
59 #include <unistd.h>
61 #elif PLATFORM(WIN_OS)
63 #include <windows.h>
64 #include <malloc.h>
66 #elif PLATFORM(HAIKU)
68 #include <OS.h>
70 #elif PLATFORM(UNIX)
72 #include <stdlib.h>
73 #if !PLATFORM(HAIKU)
74 #include <sys/mman.h>
75 #endif
76 #include <unistd.h>
78 #if PLATFORM(SOLARIS)
79 #include <thread.h>
80 #else
81 #include <pthread.h>
82 #endif
84 #if HAVE(PTHREAD_NP_H)
85 #include <pthread_np.h>
86 #endif
88 #if PLATFORM(QNX)
89 #include <fcntl.h>
90 #include <sys/procfs.h>
91 #include <stdio.h>
92 #include <errno.h>
93 #endif
95 #endif
97 #define COLLECT_ON_EVERY_ALLOCATION 0
99 using std::max;
101 namespace JSC {
103 // tunable parameters
105 const size_t GROWTH_FACTOR = 2;
106 const size_t LOW_WATER_FACTOR = 4;
107 const size_t ALLOCATIONS_PER_COLLECTION = 3600;
108 // This value has to be a macro to be used in max() without introducing
109 // a PIC branch in Mach-O binaries, see <rdar://problem/5971391>.
110 #define MIN_ARRAY_SIZE (static_cast<size_t>(14))
112 #if PLATFORM(SYMBIAN)
113 const size_t MAX_NUM_BLOCKS = 256; // Max size of collector heap set to 16 MB
114 static RHeap* userChunk = 0;
115 #endif
117 #if ENABLE(JSC_MULTIPLE_THREADS)
119 #if PLATFORM(DARWIN)
120 typedef mach_port_t PlatformThread;
121 #elif PLATFORM(WIN_OS)
122 typedef HANDLE PlatformThread;
123 #endif
125 class Heap::Thread {
126 public:
127 Thread(pthread_t pthread, const PlatformThread& platThread, void* base)
128 : posixThread(pthread)
129 , platformThread(platThread)
130 , stackBase(base)
134 Thread* next;
135 pthread_t posixThread;
136 PlatformThread platformThread;
137 void* stackBase;
140 #endif
142 Heap::Heap(JSGlobalData* globalData)
143 : m_markListSet(0)
144 #if ENABLE(JSC_MULTIPLE_THREADS)
145 , m_registeredThreads(0)
146 , m_currentThreadRegistrar(0)
147 #endif
148 , m_globalData(globalData)
150 ASSERT(globalData);
152 #if PLATFORM(SYMBIAN)
153 // Symbian OpenC supports mmap but currently not the MAP_ANON flag.
154 // Using fastMalloc() does not properly align blocks on 64k boundaries
155 // and previous implementation was flawed/incomplete.
156 // UserHeap::ChunkHeap allows allocation of continuous memory and specification
157 // of alignment value for (symbian) cells within that heap.
159 // Clarification and mapping of terminology:
160 // RHeap (created by UserHeap::ChunkHeap below) is continuos memory chunk,
161 // which can dynamically grow up to 8 MB,
162 // that holds all CollectorBlocks of this session (static).
163 // Each symbian cell within RHeap maps to a 64kb aligned CollectorBlock.
164 // JSCell objects are maintained as usual within CollectorBlocks.
165 if (!userChunk) {
166 userChunk = UserHeap::ChunkHeap(0, 0, MAX_NUM_BLOCKS * BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE);
167 if (!userChunk)
168 CRASH();
170 #endif // PLATFORM(SYMBIAN)
172 memset(&primaryHeap, 0, sizeof(CollectorHeap));
173 allocateBlock<PrimaryHeap>();
175 memset(&numberHeap, 0, sizeof(CollectorHeap));
176 #if USE(JSVALUE32)
177 allocateBlock<NumberHeap>();
178 #endif
181 Heap::~Heap()
183 // The destroy function must already have been called, so assert this.
184 ASSERT(!m_globalData);
187 void Heap::destroy()
189 JSLock lock(SilenceAssertionsOnly);
191 if (!m_globalData)
192 return;
194 // The global object is not GC protected at this point, so sweeping may delete it
195 // (and thus the global data) before other objects that may use the global data.
196 RefPtr<JSGlobalData> protect(m_globalData);
198 delete m_markListSet;
199 m_markListSet = 0;
201 freeBlocks<PrimaryHeap>();
202 freeBlocks<NumberHeap>();
204 #if ENABLE(JSC_MULTIPLE_THREADS)
205 if (m_currentThreadRegistrar) {
206 int error = pthread_key_delete(m_currentThreadRegistrar);
207 ASSERT_UNUSED(error, !error);
210 MutexLocker registeredThreadsLock(m_registeredThreadsMutex);
211 for (Heap::Thread* t = m_registeredThreads; t;) {
212 Heap::Thread* next = t->next;
213 delete t;
214 t = next;
216 #endif
218 m_globalData = 0;
221 template <HeapType heapType>
222 NEVER_INLINE CollectorBlock* Heap::allocateBlock()
224 #if PLATFORM(DARWIN)
225 vm_address_t address = 0;
226 vm_map(current_task(), &address, BLOCK_SIZE, BLOCK_OFFSET_MASK, VM_FLAGS_ANYWHERE | VM_TAG_FOR_COLLECTOR_MEMORY, MEMORY_OBJECT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
227 #elif PLATFORM(SYMBIAN)
228 // Allocate a 64 kb aligned CollectorBlock
229 unsigned char* mask = reinterpret_cast<unsigned char*>(userChunk->Alloc(BLOCK_SIZE));
230 if (!mask)
231 CRASH();
232 uintptr_t address = reinterpret_cast<uintptr_t>(mask);
233 #elif PLATFORM(WINCE)
234 void* address = VirtualAlloc(NULL, BLOCK_SIZE, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
235 #elif PLATFORM(WIN_OS)
236 #if COMPILER(MINGW)
237 void* address = __mingw_aligned_malloc(BLOCK_SIZE, BLOCK_SIZE);
238 #else
239 void* address = _aligned_malloc(BLOCK_SIZE, BLOCK_SIZE);
240 #endif
241 memset(address, 0, BLOCK_SIZE);
242 #elif HAVE(POSIX_MEMALIGN)
243 void* address;
244 posix_memalign(&address, BLOCK_SIZE, BLOCK_SIZE);
245 #else
247 #if ENABLE(JSC_MULTIPLE_THREADS)
248 #error Need to initialize pagesize safely.
249 #endif
250 static size_t pagesize = getpagesize();
252 size_t extra = 0;
253 if (BLOCK_SIZE > pagesize)
254 extra = BLOCK_SIZE - pagesize;
256 void* mmapResult = mmap(NULL, BLOCK_SIZE + extra, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
257 uintptr_t address = reinterpret_cast<uintptr_t>(mmapResult);
259 size_t adjust = 0;
260 if ((address & BLOCK_OFFSET_MASK) != 0)
261 adjust = BLOCK_SIZE - (address & BLOCK_OFFSET_MASK);
263 if (adjust > 0)
264 munmap(reinterpret_cast<char*>(address), adjust);
266 if (adjust < extra)
267 munmap(reinterpret_cast<char*>(address + adjust + BLOCK_SIZE), extra - adjust);
269 address += adjust;
270 #endif
272 // Initialize block.
274 CollectorBlock* block = reinterpret_cast<CollectorBlock*>(address);
275 block->heap = this;
276 block->type = heapType;
277 clearMarkBits<heapType>(block);
279 // heapAllocate assumes that it's safe to call a destructor on any cell in the primary heap.
280 if (heapType != NumberHeap) {
281 Structure* dummyMarkableCellStructure = m_globalData->dummyMarkableCellStructure.get();
282 for (size_t i = 0; i < HeapConstants<heapType>::cellsPerBlock; ++i)
283 new (block->cells + i) JSCell(dummyMarkableCellStructure);
286 // Add block to blocks vector.
288 CollectorHeap& heap = heapType == PrimaryHeap ? primaryHeap : numberHeap;
289 size_t numBlocks = heap.numBlocks;
290 if (heap.usedBlocks == numBlocks) {
291 static const size_t maxNumBlocks = ULONG_MAX / sizeof(CollectorBlock*) / GROWTH_FACTOR;
292 if (numBlocks > maxNumBlocks)
293 CRASH();
294 numBlocks = max(MIN_ARRAY_SIZE, numBlocks * GROWTH_FACTOR);
295 heap.numBlocks = numBlocks;
296 heap.blocks = static_cast<CollectorBlock**>(fastRealloc(heap.blocks, numBlocks * sizeof(CollectorBlock*)));
298 heap.blocks[heap.usedBlocks++] = block;
300 return block;
303 template <HeapType heapType>
304 NEVER_INLINE void Heap::freeBlock(size_t block)
306 CollectorHeap& heap = heapType == PrimaryHeap ? primaryHeap : numberHeap;
308 if (heapType != NumberHeap) {
309 ObjectIterator<heapType> it(heap, block);
310 ObjectIterator<heapType> end(heap, block + 1);
311 for ( ; it != end; ++it)
312 (*it)->~JSCell();
314 freeBlock(heap.blocks[block]);
316 // swap with the last block so we compact as we go
317 heap.blocks[block] = heap.blocks[heap.usedBlocks - 1];
318 heap.usedBlocks--;
320 if (heap.numBlocks > MIN_ARRAY_SIZE && heap.usedBlocks < heap.numBlocks / LOW_WATER_FACTOR) {
321 heap.numBlocks = heap.numBlocks / GROWTH_FACTOR;
322 heap.blocks = static_cast<CollectorBlock**>(fastRealloc(heap.blocks, heap.numBlocks * sizeof(CollectorBlock*)));
326 NEVER_INLINE void Heap::freeBlock(CollectorBlock* block)
328 #if PLATFORM(DARWIN)
329 vm_deallocate(current_task(), reinterpret_cast<vm_address_t>(block), BLOCK_SIZE);
330 #elif PLATFORM(SYMBIAN)
331 userChunk->Free(reinterpret_cast<TAny*>(block));
332 #elif PLATFORM(WINCE)
333 VirtualFree(block, 0, MEM_RELEASE);
334 #elif PLATFORM(WIN_OS)
335 #if COMPILER(MINGW)
336 __mingw_aligned_free(block);
337 #else
338 _aligned_free(block);
339 #endif
340 #elif HAVE(POSIX_MEMALIGN)
341 free(block);
342 #else
343 munmap(reinterpret_cast<char*>(block), BLOCK_SIZE);
344 #endif
347 template <HeapType heapType>
348 void Heap::freeBlocks()
350 CollectorHeap& heap = heapType == PrimaryHeap ? primaryHeap : numberHeap;
352 while (heap.usedBlocks)
353 freeBlock<heapType>(0);
354 fastFree(heap.blocks);
355 memset(&heap, 0, sizeof(CollectorHeap));
358 void Heap::recordExtraCost(size_t cost)
360 // Our frequency of garbage collection tries to balance memory use against speed
361 // by collecting based on the number of newly created values. However, for values
362 // that hold on to a great deal of memory that's not in the form of other JS values,
363 // that is not good enough - in some cases a lot of those objects can pile up and
364 // use crazy amounts of memory without a GC happening. So we track these extra
365 // memory costs. Only unusually large objects are noted, and we only keep track
366 // of this extra cost until the next GC. In garbage collected languages, most values
367 // are either very short lived temporaries, or have extremely long lifetimes. So
368 // if a large value survives one garbage collection, there is not much point to
369 // collecting more frequently as long as it stays alive.
370 // NOTE: we target the primaryHeap unconditionally as JSNumber doesn't modify cost
372 if (primaryHeap.extraCost > maxExtraCost && primaryHeap.extraCost > primaryHeap.usedBlocks * BLOCK_SIZE / 2) {
373 // If the last iteration through the heap deallocated blocks, we need
374 // to clean up remaining garbage before marking. Otherwise, the conservative
375 // marking mechanism might follow a pointer to unmapped memory.
376 if (primaryHeap.didShrink)
377 sweep<PrimaryHeap>();
378 reset();
380 primaryHeap.extraCost += cost;
383 template <HeapType heapType> ALWAYS_INLINE void* Heap::heapAllocate(size_t s)
385 typedef typename HeapConstants<heapType>::Block Block;
386 typedef typename HeapConstants<heapType>::Cell Cell;
388 CollectorHeap& heap = heapType == PrimaryHeap ? primaryHeap : numberHeap;
390 ASSERT(JSLock::lockCount() > 0);
391 ASSERT(JSLock::currentThreadIsHoldingLock());
392 ASSERT_UNUSED(s, s <= HeapConstants<heapType>::cellSize);
394 ASSERT(heap.operationInProgress == NoOperation);
395 ASSERT(heapType == PrimaryHeap || heap.extraCost == 0);
397 #if COLLECT_ON_EVERY_ALLOCATION
398 collectAllGarbage();
399 ASSERT(heap.operationInProgress == NoOperation);
400 #endif
402 allocate:
404 // Fast case: find the next garbage cell and recycle it.
406 do {
407 ASSERT(heap.nextBlock < heap.usedBlocks);
408 Block* block = reinterpret_cast<Block*>(heap.blocks[heap.nextBlock]);
409 do {
410 ASSERT(heap.nextCell < HeapConstants<heapType>::cellsPerBlock);
411 if (!block->marked.get(heap.nextCell >> HeapConstants<heapType>::bitmapShift)) { // Always false for the last cell in the block
412 Cell* cell = block->cells + heap.nextCell;
413 if (heapType != NumberHeap) {
414 heap.operationInProgress = Allocation;
415 JSCell* imp = reinterpret_cast<JSCell*>(cell);
416 imp->~JSCell();
417 heap.operationInProgress = NoOperation;
419 ++heap.nextCell;
420 return cell;
422 } while (++heap.nextCell != HeapConstants<heapType>::cellsPerBlock);
423 heap.nextCell = 0;
424 } while (++heap.nextBlock != heap.usedBlocks);
426 // Slow case: reached the end of the heap. Mark live objects and start over.
428 reset();
429 goto allocate;
432 template <HeapType heapType>
433 void Heap::resizeBlocks()
435 CollectorHeap& heap = heapType == PrimaryHeap ? primaryHeap : numberHeap;
437 heap.didShrink = false;
439 size_t usedCellCount = markedCells<heapType>();
440 size_t minCellCount = usedCellCount + max(ALLOCATIONS_PER_COLLECTION, usedCellCount);
441 size_t minBlockCount = (minCellCount + HeapConstants<heapType>::cellsPerBlock - 1) / HeapConstants<heapType>::cellsPerBlock;
443 size_t maxCellCount = 1.25f * minCellCount;
444 size_t maxBlockCount = (maxCellCount + HeapConstants<heapType>::cellsPerBlock - 1) / HeapConstants<heapType>::cellsPerBlock;
446 if (heap.usedBlocks < minBlockCount)
447 growBlocks<heapType>(minBlockCount);
448 else if (heap.usedBlocks > maxBlockCount)
449 shrinkBlocks<heapType>(maxBlockCount);
452 template <HeapType heapType>
453 void Heap::growBlocks(size_t neededBlocks)
455 CollectorHeap& heap = heapType == PrimaryHeap ? primaryHeap : numberHeap;
456 ASSERT(heap.usedBlocks < neededBlocks);
457 while (heap.usedBlocks < neededBlocks)
458 allocateBlock<heapType>();
461 template <HeapType heapType>
462 void Heap::shrinkBlocks(size_t neededBlocks)
464 CollectorHeap& heap = heapType == PrimaryHeap ? primaryHeap : numberHeap;
465 ASSERT(heap.usedBlocks > neededBlocks);
467 // Clear the always-on last bit, so isEmpty() isn't fooled by it.
468 for (size_t i = 0; i < heap.usedBlocks; ++i)
469 heap.blocks[i]->marked.clear((HeapConstants<heapType>::cellsPerBlock - 1) >> HeapConstants<heapType>::bitmapShift);
471 for (size_t i = 0; i != heap.usedBlocks && heap.usedBlocks != neededBlocks; ) {
472 if (heap.blocks[i]->marked.isEmpty()) {
473 freeBlock<heapType>(i);
474 heap.didShrink = true;
475 } else
476 ++i;
479 // Reset the always-on last bit.
480 for (size_t i = 0; i < heap.usedBlocks; ++i)
481 heap.blocks[i]->marked.set((HeapConstants<heapType>::cellsPerBlock - 1) >> HeapConstants<heapType>::bitmapShift);
484 void* Heap::allocate(size_t s)
486 return heapAllocate<PrimaryHeap>(s);
489 void* Heap::allocateNumber(size_t s)
491 return heapAllocate<NumberHeap>(s);
494 #if PLATFORM(WINCE)
495 void* g_stackBase = 0;
497 inline bool isPageWritable(void* page)
499 MEMORY_BASIC_INFORMATION memoryInformation;
500 DWORD result = VirtualQuery(page, &memoryInformation, sizeof(memoryInformation));
502 // return false on error, including ptr outside memory
503 if (result != sizeof(memoryInformation))
504 return false;
506 DWORD protect = memoryInformation.Protect & ~(PAGE_GUARD | PAGE_NOCACHE);
507 return protect == PAGE_READWRITE
508 || protect == PAGE_WRITECOPY
509 || protect == PAGE_EXECUTE_READWRITE
510 || protect == PAGE_EXECUTE_WRITECOPY;
513 static void* getStackBase(void* previousFrame)
515 // find the address of this stack frame by taking the address of a local variable
516 bool isGrowingDownward;
517 void* thisFrame = (void*)(&isGrowingDownward);
519 isGrowingDownward = previousFrame < &thisFrame;
520 static DWORD pageSize = 0;
521 if (!pageSize) {
522 SYSTEM_INFO systemInfo;
523 GetSystemInfo(&systemInfo);
524 pageSize = systemInfo.dwPageSize;
527 // scan all of memory starting from this frame, and return the last writeable page found
528 register char* currentPage = (char*)((DWORD)thisFrame & ~(pageSize - 1));
529 if (isGrowingDownward) {
530 while (currentPage > 0) {
531 // check for underflow
532 if (currentPage >= (char*)pageSize)
533 currentPage -= pageSize;
534 else
535 currentPage = 0;
536 if (!isPageWritable(currentPage))
537 return currentPage + pageSize;
539 return 0;
540 } else {
541 while (true) {
542 // guaranteed to complete because isPageWritable returns false at end of memory
543 currentPage += pageSize;
544 if (!isPageWritable(currentPage))
545 return currentPage;
549 #endif
551 #if PLATFORM(QNX)
552 static inline void *currentThreadStackBaseQNX()
554 static void* stackBase = 0;
555 static size_t stackSize = 0;
556 static pthread_t stackThread;
557 pthread_t thread = pthread_self();
558 if (stackBase == 0 || thread != stackThread) {
559 struct _debug_thread_info threadInfo;
560 memset(&threadInfo, 0, sizeof(threadInfo));
561 threadInfo.tid = pthread_self();
562 int fd = open("/proc/self", O_RDONLY);
563 if (fd == -1) {
564 LOG_ERROR("Unable to open /proc/self (errno: %d)", errno);
565 return 0;
567 devctl(fd, DCMD_PROC_TIDSTATUS, &threadInfo, sizeof(threadInfo), 0);
568 close(fd);
569 stackBase = reinterpret_cast<void*>(threadInfo.stkbase);
570 stackSize = threadInfo.stksize;
571 ASSERT(stackBase);
572 stackThread = thread;
574 return static_cast<char*>(stackBase) + stackSize;
576 #endif
578 static inline void* currentThreadStackBase()
580 #if PLATFORM(DARWIN)
581 pthread_t thread = pthread_self();
582 return pthread_get_stackaddr_np(thread);
583 #elif PLATFORM(WIN_OS) && PLATFORM(X86) && COMPILER(MSVC)
584 // offset 0x18 from the FS segment register gives a pointer to
585 // the thread information block for the current thread
586 NT_TIB* pTib;
587 __asm {
588 MOV EAX, FS:[18h]
589 MOV pTib, EAX
591 return static_cast<void*>(pTib->StackBase);
592 #elif PLATFORM(WIN_OS) && PLATFORM(X86_64) && COMPILER(MSVC)
593 PNT_TIB64 pTib = reinterpret_cast<PNT_TIB64>(NtCurrentTeb());
594 return reinterpret_cast<void*>(pTib->StackBase);
595 #elif PLATFORM(WIN_OS) && PLATFORM(X86) && COMPILER(GCC)
596 // offset 0x18 from the FS segment register gives a pointer to
597 // the thread information block for the current thread
598 NT_TIB* pTib;
599 asm ( "movl %%fs:0x18, %0\n"
600 : "=r" (pTib)
602 return static_cast<void*>(pTib->StackBase);
603 #elif PLATFORM(QNX)
604 return currentThreadStackBaseQNX();
605 #elif PLATFORM(SOLARIS)
606 stack_t s;
607 thr_stksegment(&s);
608 return s.ss_sp;
609 #elif PLATFORM(OPENBSD)
610 pthread_t thread = pthread_self();
611 stack_t stack;
612 pthread_stackseg_np(thread, &stack);
613 return stack.ss_sp;
614 #elif PLATFORM(SYMBIAN)
615 static void* stackBase = 0;
616 if (stackBase == 0) {
617 TThreadStackInfo info;
618 RThread thread;
619 thread.StackInfo(info);
620 stackBase = (void*)info.iBase;
622 return (void*)stackBase;
623 #elif PLATFORM(HAIKU)
624 thread_info threadInfo;
625 get_thread_info(find_thread(NULL), &threadInfo);
626 return threadInfo.stack_end;
627 #elif PLATFORM(UNIX)
628 static void* stackBase = 0;
629 static size_t stackSize = 0;
630 static pthread_t stackThread;
631 pthread_t thread = pthread_self();
632 if (stackBase == 0 || thread != stackThread) {
633 pthread_attr_t sattr;
634 pthread_attr_init(&sattr);
635 #if HAVE(PTHREAD_NP_H) || PLATFORM(NETBSD)
636 // e.g. on FreeBSD 5.4, neundorf@kde.org
637 pthread_attr_get_np(thread, &sattr);
638 #else
639 // FIXME: this function is non-portable; other POSIX systems may have different np alternatives
640 pthread_getattr_np(thread, &sattr);
641 #endif
642 int rc = pthread_attr_getstack(&sattr, &stackBase, &stackSize);
643 (void)rc; // FIXME: Deal with error code somehow? Seems fatal.
644 ASSERT(stackBase);
645 pthread_attr_destroy(&sattr);
646 stackThread = thread;
648 return static_cast<char*>(stackBase) + stackSize;
649 #elif PLATFORM(WINCE)
650 if (g_stackBase)
651 return g_stackBase;
652 else {
653 int dummy;
654 return getStackBase(&dummy);
656 #else
657 #error Need a way to get the stack base on this platform
658 #endif
661 #if ENABLE(JSC_MULTIPLE_THREADS)
663 static inline PlatformThread getCurrentPlatformThread()
665 #if PLATFORM(DARWIN)
666 return pthread_mach_thread_np(pthread_self());
667 #elif PLATFORM(WIN_OS)
668 return pthread_getw32threadhandle_np(pthread_self());
669 #endif
672 void Heap::makeUsableFromMultipleThreads()
674 if (m_currentThreadRegistrar)
675 return;
677 int error = pthread_key_create(&m_currentThreadRegistrar, unregisterThread);
678 if (error)
679 CRASH();
682 void Heap::registerThread()
684 ASSERT(!m_globalData->mainThreadOnly || isMainThread());
686 if (!m_currentThreadRegistrar || pthread_getspecific(m_currentThreadRegistrar))
687 return;
689 pthread_setspecific(m_currentThreadRegistrar, this);
690 Heap::Thread* thread = new Heap::Thread(pthread_self(), getCurrentPlatformThread(), currentThreadStackBase());
692 MutexLocker lock(m_registeredThreadsMutex);
694 thread->next = m_registeredThreads;
695 m_registeredThreads = thread;
698 void Heap::unregisterThread(void* p)
700 if (p)
701 static_cast<Heap*>(p)->unregisterThread();
704 void Heap::unregisterThread()
706 pthread_t currentPosixThread = pthread_self();
708 MutexLocker lock(m_registeredThreadsMutex);
710 if (pthread_equal(currentPosixThread, m_registeredThreads->posixThread)) {
711 Thread* t = m_registeredThreads;
712 m_registeredThreads = m_registeredThreads->next;
713 delete t;
714 } else {
715 Heap::Thread* last = m_registeredThreads;
716 Heap::Thread* t;
717 for (t = m_registeredThreads->next; t; t = t->next) {
718 if (pthread_equal(t->posixThread, currentPosixThread)) {
719 last->next = t->next;
720 break;
722 last = t;
724 ASSERT(t); // If t is NULL, we never found ourselves in the list.
725 delete t;
729 #else // ENABLE(JSC_MULTIPLE_THREADS)
731 void Heap::registerThread()
735 #endif
737 inline bool isPointerAligned(void* p)
739 return (((intptr_t)(p) & (sizeof(char*) - 1)) == 0);
742 // Cell size needs to be a power of two for isPossibleCell to be valid.
743 COMPILE_ASSERT(sizeof(CollectorCell) % 2 == 0, Collector_cell_size_is_power_of_two);
745 #if USE(JSVALUE32)
746 static bool isHalfCellAligned(void *p)
748 return (((intptr_t)(p) & (CELL_MASK >> 1)) == 0);
751 static inline bool isPossibleCell(void* p)
753 return isHalfCellAligned(p) && p;
756 #else
758 static inline bool isCellAligned(void *p)
760 return (((intptr_t)(p) & CELL_MASK) == 0);
763 static inline bool isPossibleCell(void* p)
765 return isCellAligned(p) && p;
767 #endif
769 void Heap::markConservatively(MarkStack& markStack, void* start, void* end)
771 if (start > end) {
772 void* tmp = start;
773 start = end;
774 end = tmp;
777 ASSERT((static_cast<char*>(end) - static_cast<char*>(start)) < 0x1000000);
778 ASSERT(isPointerAligned(start));
779 ASSERT(isPointerAligned(end));
781 char** p = static_cast<char**>(start);
782 char** e = static_cast<char**>(end);
784 CollectorBlock** primaryBlocks = primaryHeap.blocks;
785 #if USE(JSVALUE32)
786 CollectorBlock** numberBlocks = numberHeap.blocks;
787 #endif
789 while (p != e) {
790 char* x = *p++;
791 if (isPossibleCell(x)) {
792 size_t usedPrimaryBlocks;
793 uintptr_t xAsBits = reinterpret_cast<uintptr_t>(x);
794 xAsBits &= CELL_ALIGN_MASK;
796 uintptr_t offset = xAsBits & BLOCK_OFFSET_MASK;
797 const size_t lastCellOffset = sizeof(CollectorCell) * (CELLS_PER_BLOCK - 1);
798 if (offset > lastCellOffset)
799 continue;
801 CollectorBlock* blockAddr = reinterpret_cast<CollectorBlock*>(xAsBits - offset);
802 #if USE(JSVALUE32)
803 // Mark the the number heap, we can mark these Cells directly to avoid the virtual call cost
804 size_t usedNumberBlocks = numberHeap.usedBlocks;
805 for (size_t block = 0; block < usedNumberBlocks; block++) {
806 if (numberBlocks[block] != blockAddr)
807 continue;
808 Heap::markCell(reinterpret_cast<JSCell*>(xAsBits));
809 goto endMarkLoop;
811 #endif
813 // Mark the primary heap
814 usedPrimaryBlocks = primaryHeap.usedBlocks;
815 for (size_t block = 0; block < usedPrimaryBlocks; block++) {
816 if (primaryBlocks[block] != blockAddr)
817 continue;
818 markStack.append(reinterpret_cast<JSCell*>(xAsBits));
819 markStack.drain();
821 #if USE(JSVALUE32)
822 endMarkLoop:
824 #endif
829 void NEVER_INLINE Heap::markCurrentThreadConservativelyInternal(MarkStack& markStack)
831 void* dummy;
832 void* stackPointer = &dummy;
833 void* stackBase = currentThreadStackBase();
834 markConservatively(markStack, stackPointer, stackBase);
837 #if COMPILER(GCC)
838 #define REGISTER_BUFFER_ALIGNMENT __attribute__ ((aligned (sizeof(void*))))
839 #else
840 #define REGISTER_BUFFER_ALIGNMENT
841 #endif
843 void Heap::markCurrentThreadConservatively(MarkStack& markStack)
845 // setjmp forces volatile registers onto the stack
846 jmp_buf registers REGISTER_BUFFER_ALIGNMENT;
847 #if COMPILER(MSVC)
848 #pragma warning(push)
849 #pragma warning(disable: 4611)
850 #endif
851 setjmp(registers);
852 #if COMPILER(MSVC)
853 #pragma warning(pop)
854 #endif
856 markCurrentThreadConservativelyInternal(markStack);
859 #if ENABLE(JSC_MULTIPLE_THREADS)
861 static inline void suspendThread(const PlatformThread& platformThread)
863 #if PLATFORM(DARWIN)
864 thread_suspend(platformThread);
865 #elif PLATFORM(WIN_OS)
866 SuspendThread(platformThread);
867 #else
868 #error Need a way to suspend threads on this platform
869 #endif
872 static inline void resumeThread(const PlatformThread& platformThread)
874 #if PLATFORM(DARWIN)
875 thread_resume(platformThread);
876 #elif PLATFORM(WIN_OS)
877 ResumeThread(platformThread);
878 #else
879 #error Need a way to resume threads on this platform
880 #endif
883 typedef unsigned long usword_t; // word size, assumed to be either 32 or 64 bit
885 #if PLATFORM(DARWIN)
887 #if PLATFORM(X86)
888 typedef i386_thread_state_t PlatformThreadRegisters;
889 #elif PLATFORM(X86_64)
890 typedef x86_thread_state64_t PlatformThreadRegisters;
891 #elif PLATFORM(PPC)
892 typedef ppc_thread_state_t PlatformThreadRegisters;
893 #elif PLATFORM(PPC64)
894 typedef ppc_thread_state64_t PlatformThreadRegisters;
895 #elif PLATFORM(ARM)
896 typedef arm_thread_state_t PlatformThreadRegisters;
897 #else
898 #error Unknown Architecture
899 #endif
901 #elif PLATFORM(WIN_OS)&& PLATFORM(X86)
902 typedef CONTEXT PlatformThreadRegisters;
903 #else
904 #error Need a thread register struct for this platform
905 #endif
907 static size_t getPlatformThreadRegisters(const PlatformThread& platformThread, PlatformThreadRegisters& regs)
909 #if PLATFORM(DARWIN)
911 #if PLATFORM(X86)
912 unsigned user_count = sizeof(regs)/sizeof(int);
913 thread_state_flavor_t flavor = i386_THREAD_STATE;
914 #elif PLATFORM(X86_64)
915 unsigned user_count = x86_THREAD_STATE64_COUNT;
916 thread_state_flavor_t flavor = x86_THREAD_STATE64;
917 #elif PLATFORM(PPC)
918 unsigned user_count = PPC_THREAD_STATE_COUNT;
919 thread_state_flavor_t flavor = PPC_THREAD_STATE;
920 #elif PLATFORM(PPC64)
921 unsigned user_count = PPC_THREAD_STATE64_COUNT;
922 thread_state_flavor_t flavor = PPC_THREAD_STATE64;
923 #elif PLATFORM(ARM)
924 unsigned user_count = ARM_THREAD_STATE_COUNT;
925 thread_state_flavor_t flavor = ARM_THREAD_STATE;
926 #else
927 #error Unknown Architecture
928 #endif
930 kern_return_t result = thread_get_state(platformThread, flavor, (thread_state_t)&regs, &user_count);
931 if (result != KERN_SUCCESS) {
932 WTFReportFatalError(__FILE__, __LINE__, WTF_PRETTY_FUNCTION,
933 "JavaScript garbage collection failed because thread_get_state returned an error (%d). This is probably the result of running inside Rosetta, which is not supported.", result);
934 CRASH();
936 return user_count * sizeof(usword_t);
937 // end PLATFORM(DARWIN)
939 #elif PLATFORM(WIN_OS) && PLATFORM(X86)
940 regs.ContextFlags = CONTEXT_INTEGER | CONTEXT_CONTROL | CONTEXT_SEGMENTS;
941 GetThreadContext(platformThread, &regs);
942 return sizeof(CONTEXT);
943 #else
944 #error Need a way to get thread registers on this platform
945 #endif
948 static inline void* otherThreadStackPointer(const PlatformThreadRegisters& regs)
950 #if PLATFORM(DARWIN)
952 #if __DARWIN_UNIX03
954 #if PLATFORM(X86)
955 return reinterpret_cast<void*>(regs.__esp);
956 #elif PLATFORM(X86_64)
957 return reinterpret_cast<void*>(regs.__rsp);
958 #elif PLATFORM(PPC) || PLATFORM(PPC64)
959 return reinterpret_cast<void*>(regs.__r1);
960 #elif PLATFORM(ARM)
961 return reinterpret_cast<void*>(regs.__sp);
962 #else
963 #error Unknown Architecture
964 #endif
966 #else // !__DARWIN_UNIX03
968 #if PLATFORM(X86)
969 return reinterpret_cast<void*>(regs.esp);
970 #elif PLATFORM(X86_64)
971 return reinterpret_cast<void*>(regs.rsp);
972 #elif (PLATFORM(PPC) || PLATFORM(PPC64))
973 return reinterpret_cast<void*>(regs.r1);
974 #else
975 #error Unknown Architecture
976 #endif
978 #endif // __DARWIN_UNIX03
980 // end PLATFORM(DARWIN)
981 #elif PLATFORM(X86) && PLATFORM(WIN_OS)
982 return reinterpret_cast<void*>((uintptr_t) regs.Esp);
983 #else
984 #error Need a way to get the stack pointer for another thread on this platform
985 #endif
988 void Heap::markOtherThreadConservatively(MarkStack& markStack, Thread* thread)
990 suspendThread(thread->platformThread);
992 PlatformThreadRegisters regs;
993 size_t regSize = getPlatformThreadRegisters(thread->platformThread, regs);
995 // mark the thread's registers
996 markConservatively(markStack, static_cast<void*>(&regs), static_cast<void*>(reinterpret_cast<char*>(&regs) + regSize));
998 void* stackPointer = otherThreadStackPointer(regs);
999 markConservatively(markStack, stackPointer, thread->stackBase);
1001 resumeThread(thread->platformThread);
1004 #endif
1006 void Heap::markStackObjectsConservatively(MarkStack& markStack)
1008 markCurrentThreadConservatively(markStack);
1010 #if ENABLE(JSC_MULTIPLE_THREADS)
1012 if (m_currentThreadRegistrar) {
1014 MutexLocker lock(m_registeredThreadsMutex);
1016 #ifndef NDEBUG
1017 // Forbid malloc during the mark phase. Marking a thread suspends it, so
1018 // a malloc inside markChildren() would risk a deadlock with a thread that had been
1019 // suspended while holding the malloc lock.
1020 fastMallocForbid();
1021 #endif
1022 // It is safe to access the registeredThreads list, because we earlier asserted that locks are being held,
1023 // and since this is a shared heap, they are real locks.
1024 for (Thread* thread = m_registeredThreads; thread; thread = thread->next) {
1025 if (!pthread_equal(thread->posixThread, pthread_self()))
1026 markOtherThreadConservatively(markStack, thread);
1028 #ifndef NDEBUG
1029 fastMallocAllow();
1030 #endif
1032 #endif
1035 void Heap::protect(JSValue k)
1037 ASSERT(k);
1038 ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData->isSharedInstance);
1040 if (!k.isCell())
1041 return;
1043 m_protectedValues.add(k.asCell());
1046 void Heap::unprotect(JSValue k)
1048 ASSERT(k);
1049 ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData->isSharedInstance);
1051 if (!k.isCell())
1052 return;
1054 m_protectedValues.remove(k.asCell());
1057 void Heap::markProtectedObjects(MarkStack& markStack)
1059 ProtectCountSet::iterator end = m_protectedValues.end();
1060 for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it) {
1061 markStack.append(it->first);
1062 markStack.drain();
1066 template <HeapType heapType>
1067 void Heap::clearMarkBits()
1069 CollectorHeap& heap = heapType == PrimaryHeap ? primaryHeap : numberHeap;
1070 for (size_t i = 0; i < heap.usedBlocks; ++i)
1071 clearMarkBits<heapType>(heap.blocks[i]);
1074 template <HeapType heapType>
1075 void Heap::clearMarkBits(CollectorBlock* block)
1077 // heapAllocate assumes that the last cell in every block is marked.
1078 block->marked.clearAll();
1079 block->marked.set((HeapConstants<heapType>::cellsPerBlock - 1) >> HeapConstants<heapType>::bitmapShift);
1082 template <HeapType heapType>
1083 size_t Heap::markedCells(size_t startBlock, size_t startCell) const
1085 const CollectorHeap& heap = heapType == PrimaryHeap ? primaryHeap : numberHeap;
1086 ASSERT(startBlock <= heap.usedBlocks);
1087 ASSERT(startCell < HeapConstants<heapType>::cellsPerBlock);
1089 if (startBlock >= heap.usedBlocks)
1090 return 0;
1092 size_t result = 0;
1093 result += heap.blocks[startBlock]->marked.count(startCell);
1094 for (size_t i = startBlock + 1; i < heap.usedBlocks; ++i)
1095 result += heap.blocks[i]->marked.count();
1097 return result;
1100 template <HeapType heapType>
1101 void Heap::sweep()
1103 ASSERT(heapType != NumberHeap); // The number heap does not contain meaningful destructors.
1105 CollectorHeap& heap = heapType == PrimaryHeap ? primaryHeap : numberHeap;
1107 ASSERT(heap.operationInProgress == NoOperation);
1108 if (heap.operationInProgress != NoOperation)
1109 CRASH();
1110 heap.operationInProgress = Collection;
1112 #if !ENABLE(JSC_ZOMBIES)
1113 Structure* dummyMarkableCellStructure = m_globalData->dummyMarkableCellStructure.get();
1114 #endif
1116 DeadObjectIterator<heapType> it(heap, heap.nextBlock, heap.nextCell);
1117 DeadObjectIterator<heapType> end(heap, heap.usedBlocks);
1118 for ( ; it != end; ++it) {
1119 JSCell* cell = *it;
1120 #if ENABLE(JSC_ZOMBIES)
1121 if (!cell->isZombie()) {
1122 const ClassInfo* info = cell->classInfo();
1123 cell->~JSCell();
1124 new (cell) JSZombie(info, JSZombie::leakedZombieStructure());
1125 Heap::markCell(cell);
1127 #else
1128 cell->~JSCell();
1129 // Callers of sweep assume it's safe to mark any cell in the heap.
1130 new (cell) JSCell(dummyMarkableCellStructure);
1131 #endif
1134 heap.operationInProgress = NoOperation;
1137 void Heap::markRoots()
1139 #ifndef NDEBUG
1140 if (m_globalData->isSharedInstance) {
1141 ASSERT(JSLock::lockCount() > 0);
1142 ASSERT(JSLock::currentThreadIsHoldingLock());
1144 #endif
1146 ASSERT((primaryHeap.operationInProgress == NoOperation) & (numberHeap.operationInProgress == NoOperation));
1147 if (!((primaryHeap.operationInProgress == NoOperation) & (numberHeap.operationInProgress == NoOperation)))
1148 CRASH();
1150 primaryHeap.operationInProgress = Collection;
1151 numberHeap.operationInProgress = Collection;
1153 MarkStack& markStack = m_globalData->markStack;
1155 // Reset mark bits.
1156 clearMarkBits<PrimaryHeap>();
1157 clearMarkBits<NumberHeap>();
1159 // Mark stack roots.
1160 markStackObjectsConservatively(markStack);
1161 m_globalData->interpreter->registerFile().markCallFrames(markStack, this);
1163 // Mark explicitly registered roots.
1164 markProtectedObjects(markStack);
1166 // Mark misc. other roots.
1167 if (m_markListSet && m_markListSet->size())
1168 MarkedArgumentBuffer::markLists(markStack, *m_markListSet);
1169 if (m_globalData->exception)
1170 markStack.append(m_globalData->exception);
1171 m_globalData->smallStrings.markChildren(markStack);
1172 if (m_globalData->functionCodeBlockBeingReparsed)
1173 m_globalData->functionCodeBlockBeingReparsed->markAggregate(markStack);
1174 if (m_globalData->firstStringifierToMark)
1175 JSONObject::markStringifiers(markStack, m_globalData->firstStringifierToMark);
1177 markStack.drain();
1178 markStack.compact();
1180 primaryHeap.operationInProgress = NoOperation;
1181 numberHeap.operationInProgress = NoOperation;
1184 size_t Heap::objectCount() const
1186 return objectCount<PrimaryHeap>() + objectCount<NumberHeap>();
1189 template <HeapType heapType>
1190 size_t Heap::objectCount() const
1192 const CollectorHeap& heap = heapType == PrimaryHeap ? primaryHeap : numberHeap;
1194 return heap.nextBlock * HeapConstants<heapType>::cellsPerBlock // allocated full blocks
1195 + heap.nextCell // allocated cells in current block
1196 + markedCells<heapType>(heap.nextBlock, heap.nextCell) // marked cells in remainder of heap
1197 - heap.usedBlocks; // 1 cell per block is a dummy sentinel
1200 template <HeapType heapType>
1201 void Heap::addToStatistics(Heap::Statistics& statistics) const
1203 const CollectorHeap& heap = heapType == PrimaryHeap ? primaryHeap : numberHeap;
1205 statistics.size += heap.usedBlocks * BLOCK_SIZE;
1206 statistics.free += heap.usedBlocks * BLOCK_SIZE - (objectCount<heapType>() * HeapConstants<heapType>::cellSize);
1209 Heap::Statistics Heap::statistics() const
1211 Statistics statistics = { 0, 0 };
1212 addToStatistics<PrimaryHeap>(statistics);
1213 addToStatistics<NumberHeap>(statistics);
1214 return statistics;
1217 size_t Heap::globalObjectCount()
1219 size_t count = 0;
1220 if (JSGlobalObject* head = m_globalData->head) {
1221 JSGlobalObject* o = head;
1222 do {
1223 ++count;
1224 o = o->next();
1225 } while (o != head);
1227 return count;
1230 size_t Heap::protectedGlobalObjectCount()
1232 size_t count = 0;
1233 if (JSGlobalObject* head = m_globalData->head) {
1234 JSGlobalObject* o = head;
1235 do {
1236 if (m_protectedValues.contains(o))
1237 ++count;
1238 o = o->next();
1239 } while (o != head);
1242 return count;
1245 size_t Heap::protectedObjectCount()
1247 return m_protectedValues.size();
1250 static const char* typeName(JSCell* cell)
1252 if (cell->isString())
1253 return "string";
1254 #if USE(JSVALUE32)
1255 if (cell->isNumber())
1256 return "number";
1257 #endif
1258 if (cell->isGetterSetter())
1259 return "gettersetter";
1260 if (cell->isAPIValueWrapper())
1261 return "value wrapper";
1262 if (cell->isPropertyNameIterator())
1263 return "for-in iterator";
1264 ASSERT(cell->isObject());
1265 const ClassInfo* info = cell->classInfo();
1266 return info ? info->className : "Object";
1269 HashCountedSet<const char*>* Heap::protectedObjectTypeCounts()
1271 HashCountedSet<const char*>* counts = new HashCountedSet<const char*>;
1273 ProtectCountSet::iterator end = m_protectedValues.end();
1274 for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it)
1275 counts->add(typeName(it->first));
1277 return counts;
1280 bool Heap::isBusy()
1282 return (primaryHeap.operationInProgress != NoOperation) | (numberHeap.operationInProgress != NoOperation);
1285 void Heap::reset()
1287 JAVASCRIPTCORE_GC_BEGIN();
1289 markRoots();
1291 JAVASCRIPTCORE_GC_MARKED();
1293 primaryHeap.nextCell = 0;
1294 primaryHeap.nextBlock = 0;
1295 primaryHeap.extraCost = 0;
1296 #if ENABLE(JSC_ZOMBIES)
1297 sweep<PrimaryHeap>();
1298 #endif
1299 resizeBlocks<PrimaryHeap>();
1301 #if USE(JSVALUE32)
1302 numberHeap.nextCell = 0;
1303 numberHeap.nextBlock = 0;
1304 resizeBlocks<NumberHeap>();
1305 #endif
1307 JAVASCRIPTCORE_GC_END();
1310 void Heap::collectAllGarbage()
1312 JAVASCRIPTCORE_GC_BEGIN();
1314 // If the last iteration through the heap deallocated blocks, we need
1315 // to clean up remaining garbage before marking. Otherwise, the conservative
1316 // marking mechanism might follow a pointer to unmapped memory.
1317 if (primaryHeap.didShrink)
1318 sweep<PrimaryHeap>();
1320 markRoots();
1322 JAVASCRIPTCORE_GC_MARKED();
1324 primaryHeap.nextCell = 0;
1325 primaryHeap.nextBlock = 0;
1326 primaryHeap.extraCost = 0;
1327 sweep<PrimaryHeap>();
1328 resizeBlocks<PrimaryHeap>();
1330 #if USE(JSVALUE32)
1331 numberHeap.nextCell = 0;
1332 numberHeap.nextBlock = 0;
1333 resizeBlocks<NumberHeap>();
1334 #endif
1336 JAVASCRIPTCORE_GC_END();
1339 LiveObjectIterator<PrimaryHeap> Heap::primaryHeapBegin()
1341 return LiveObjectIterator<PrimaryHeap>(primaryHeap, 0);
1344 LiveObjectIterator<PrimaryHeap> Heap::primaryHeapEnd()
1346 return LiveObjectIterator<PrimaryHeap>(primaryHeap, primaryHeap.usedBlocks);
1349 } // namespace JSC