summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/heap
diff options
context:
space:
mode:
Diffstat (limited to 'Source/JavaScriptCore/heap')
-rw-r--r--Source/JavaScriptCore/heap/AllocationSpace.cpp202
-rw-r--r--Source/JavaScriptCore/heap/AllocationSpace.h132
-rw-r--r--Source/JavaScriptCore/heap/CardSet.h91
-rw-r--r--Source/JavaScriptCore/heap/ConservativeRoots.cpp122
-rw-r--r--Source/JavaScriptCore/heap/ConservativeRoots.h81
-rw-r--r--Source/JavaScriptCore/heap/DFGCodeBlocks.cpp104
-rw-r--r--Source/JavaScriptCore/heap/DFGCodeBlocks.h99
-rw-r--r--Source/JavaScriptCore/heap/Handle.h190
-rw-r--r--Source/JavaScriptCore/heap/HandleHeap.cpp211
-rw-r--r--Source/JavaScriptCore/heap/HandleHeap.h309
-rw-r--r--Source/JavaScriptCore/heap/HandleStack.cpp64
-rw-r--r--Source/JavaScriptCore/heap/HandleStack.h129
-rw-r--r--Source/JavaScriptCore/heap/HandleTypes.h52
-rw-r--r--Source/JavaScriptCore/heap/Heap.cpp916
-rw-r--r--Source/JavaScriptCore/heap/Heap.h314
-rw-r--r--Source/JavaScriptCore/heap/HeapRootVisitor.h85
-rw-r--r--Source/JavaScriptCore/heap/ListableHandler.h118
-rw-r--r--Source/JavaScriptCore/heap/Local.h152
-rw-r--r--Source/JavaScriptCore/heap/LocalScope.h78
-rw-r--r--Source/JavaScriptCore/heap/MachineStackMarker.cpp497
-rw-r--r--Source/JavaScriptCore/heap/MachineStackMarker.h63
-rw-r--r--Source/JavaScriptCore/heap/MarkStack.cpp480
-rw-r--r--Source/JavaScriptCore/heap/MarkStack.h451
-rw-r--r--Source/JavaScriptCore/heap/MarkedBlock.cpp186
-rw-r--r--Source/JavaScriptCore/heap/MarkedBlock.h432
-rw-r--r--Source/JavaScriptCore/heap/MarkedBlockSet.h86
-rw-r--r--Source/JavaScriptCore/heap/MarkedSpace.cpp85
-rw-r--r--Source/JavaScriptCore/heap/MarkedSpace.h205
-rw-r--r--Source/JavaScriptCore/heap/SlotVisitor.h81
-rw-r--r--Source/JavaScriptCore/heap/Strong.h150
-rw-r--r--Source/JavaScriptCore/heap/StrongInlines.h55
-rw-r--r--Source/JavaScriptCore/heap/TinyBloomFilter.h67
-rw-r--r--Source/JavaScriptCore/heap/UnconditionalFinalizer.h47
-rw-r--r--Source/JavaScriptCore/heap/VTableSpectrum.cpp90
-rw-r--r--Source/JavaScriptCore/heap/VTableSpectrum.h49
-rw-r--r--Source/JavaScriptCore/heap/Weak.h169
-rw-r--r--Source/JavaScriptCore/heap/WeakReferenceHarvester.h45
-rw-r--r--Source/JavaScriptCore/heap/WriteBarrierSupport.cpp54
-rw-r--r--Source/JavaScriptCore/heap/WriteBarrierSupport.h99
39 files changed, 6840 insertions, 0 deletions
diff --git a/Source/JavaScriptCore/heap/AllocationSpace.cpp b/Source/JavaScriptCore/heap/AllocationSpace.cpp
new file mode 100644
index 000000000..e363de274
--- /dev/null
+++ b/Source/JavaScriptCore/heap/AllocationSpace.cpp
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#include "AllocationSpace.h"
+
+#include "Heap.h"
+
+#define COLLECT_ON_EVERY_ALLOCATION 0
+
+namespace JSC {
+
+inline void* AllocationSpace::tryAllocate(MarkedSpace::SizeClass& sizeClass)
+{
+ m_heap->m_operationInProgress = Allocation;
+ void* result = m_markedSpace.allocate(sizeClass);
+ m_heap->m_operationInProgress = NoOperation;
+ return result;
+}
+
+void* AllocationSpace::allocateSlowCase(MarkedSpace::SizeClass& sizeClass)
+{
+#if COLLECT_ON_EVERY_ALLOCATION
+ m_heap->collectAllGarbage();
+ ASSERT(m_heap->m_operationInProgress == NoOperation);
+#endif
+
+ void* result = tryAllocate(sizeClass);
+
+ if (LIKELY(result != 0))
+ return result;
+
+ AllocationEffort allocationEffort;
+
+ if ((
+#if ENABLE(GGC)
+ m_markedSpace.nurseryWaterMark() < m_heap->m_minBytesPerCycle
+#else
+ m_markedSpace.waterMark() < m_markedSpace.highWaterMark()
+#endif
+ ) || !m_heap->m_isSafeToCollect)
+ allocationEffort = AllocationMustSucceed;
+ else
+ allocationEffort = AllocationCanFail;
+
+ MarkedBlock* block = allocateBlock(sizeClass.cellSize, allocationEffort);
+ if (block) {
+ m_markedSpace.addBlock(sizeClass, block);
+ void* result = tryAllocate(sizeClass);
+ ASSERT(result);
+ return result;
+ }
+
+ m_heap->collect(Heap::DoNotSweep);
+
+ result = tryAllocate(sizeClass);
+
+ if (result)
+ return result;
+
+ ASSERT(m_markedSpace.waterMark() < m_markedSpace.highWaterMark());
+
+ m_markedSpace.addBlock(sizeClass, allocateBlock(sizeClass.cellSize, AllocationMustSucceed));
+
+ result = tryAllocate(sizeClass);
+ ASSERT(result);
+ return result;
+}
+
+MarkedBlock* AllocationSpace::allocateBlock(size_t cellSize, AllocationSpace::AllocationEffort allocationEffort)
+{
+ MarkedBlock* block;
+
+ {
+ MutexLocker locker(m_heap->m_freeBlockLock);
+ if (m_heap->m_numberOfFreeBlocks) {
+ block = m_heap->m_freeBlocks.removeHead();
+ ASSERT(block);
+ m_heap->m_numberOfFreeBlocks--;
+ } else
+ block = 0;
+ }
+ if (block)
+ block = MarkedBlock::recycle(block, cellSize);
+ else if (allocationEffort == AllocationCanFail)
+ return 0;
+ else
+ block = MarkedBlock::create(m_heap, cellSize);
+
+ m_blocks.add(block);
+
+ return block;
+}
+
+void AllocationSpace::freeBlocks(MarkedBlock* head)
+{
+ MarkedBlock* next;
+ for (MarkedBlock* block = head; block; block = next) {
+ next = block->next();
+
+ m_blocks.remove(block);
+ block->sweep();
+ MutexLocker locker(m_heap->m_freeBlockLock);
+ m_heap->m_freeBlocks.append(block);
+ m_heap->m_numberOfFreeBlocks++;
+ }
+}
+
+class TakeIfUnmarked {
+public:
+ typedef MarkedBlock* ReturnType;
+
+ TakeIfUnmarked(MarkedSpace*);
+ void operator()(MarkedBlock*);
+ ReturnType returnValue();
+
+private:
+ MarkedSpace* m_markedSpace;
+ DoublyLinkedList<MarkedBlock> m_empties;
+};
+
+inline TakeIfUnmarked::TakeIfUnmarked(MarkedSpace* newSpace)
+ : m_markedSpace(newSpace)
+{
+}
+
+inline void TakeIfUnmarked::operator()(MarkedBlock* block)
+{
+ if (!block->markCountIsZero())
+ return;
+
+ m_markedSpace->removeBlock(block);
+ m_empties.append(block);
+}
+
+inline TakeIfUnmarked::ReturnType TakeIfUnmarked::returnValue()
+{
+ return m_empties.head();
+}
+
+void AllocationSpace::shrink()
+{
+ // We record a temporary list of empties to avoid modifying m_blocks while iterating it.
+ TakeIfUnmarked takeIfUnmarked(&m_markedSpace);
+ freeBlocks(forEachBlock(takeIfUnmarked));
+}
+
+#if ENABLE(GGC)
+class GatherDirtyCells {
+ WTF_MAKE_NONCOPYABLE(GatherDirtyCells);
+public:
+ typedef void* ReturnType;
+
+ explicit GatherDirtyCells(MarkedBlock::DirtyCellVector*);
+ void operator()(MarkedBlock*);
+ ReturnType returnValue() { return 0; }
+
+private:
+ MarkedBlock::DirtyCellVector* m_dirtyCells;
+};
+
+inline GatherDirtyCells::GatherDirtyCells(MarkedBlock::DirtyCellVector* dirtyCells)
+ : m_dirtyCells(dirtyCells)
+{
+}
+
+inline void GatherDirtyCells::operator()(MarkedBlock* block)
+{
+ block->gatherDirtyCells(*m_dirtyCells);
+}
+
+void AllocationSpace::gatherDirtyCells(MarkedBlock::DirtyCellVector& dirtyCells)
+{
+ GatherDirtyCells gatherDirtyCells(&dirtyCells);
+ forEachBlock(gatherDirtyCells);
+}
+#endif
+
+}
diff --git a/Source/JavaScriptCore/heap/AllocationSpace.h b/Source/JavaScriptCore/heap/AllocationSpace.h
new file mode 100644
index 000000000..550cb9aa3
--- /dev/null
+++ b/Source/JavaScriptCore/heap/AllocationSpace.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef AllocationSpace_h
+#define AllocationSpace_h
+
+#include "MarkedBlockSet.h"
+#include "MarkedSpace.h"
+
+#include <wtf/HashSet.h>
+
+namespace JSC {
+
+class Heap;
+class MarkedBlock;
+
+class AllocationSpace {
+public:
+ AllocationSpace(Heap* heap)
+ : m_heap(heap)
+ , m_markedSpace(heap)
+ {
+ }
+
+ typedef HashSet<MarkedBlock*>::iterator BlockIterator;
+
+ MarkedBlockSet& blocks() { return m_blocks; }
+ MarkedSpace::SizeClass& sizeClassFor(size_t bytes) { return m_markedSpace.sizeClassFor(bytes); }
+ void setHighWaterMark(size_t bytes) { m_markedSpace.setHighWaterMark(bytes); }
+ size_t highWaterMark() { return m_markedSpace.highWaterMark(); }
+
+#if ENABLE(GGC)
+ void gatherDirtyCells(MarkedBlock::DirtyCellVector&);
+#endif
+
+ template<typename Functor> typename Functor::ReturnType forEachCell(Functor&);
+ template<typename Functor> typename Functor::ReturnType forEachCell();
+ template<typename Functor> typename Functor::ReturnType forEachBlock(Functor&);
+ template<typename Functor> typename Functor::ReturnType forEachBlock();
+
+ void canonicalizeCellLivenessData() { m_markedSpace.canonicalizeCellLivenessData(); }
+ void resetAllocator() { m_markedSpace.resetAllocator(); }
+
+ void* allocate(size_t);
+ void freeBlocks(MarkedBlock*);
+ void shrink();
+
+private:
+ enum AllocationEffort { AllocationMustSucceed, AllocationCanFail };
+
+ void* allocate(MarkedSpace::SizeClass&);
+ void* tryAllocate(MarkedSpace::SizeClass&);
+ void* allocateSlowCase(MarkedSpace::SizeClass&);
+ MarkedBlock* allocateBlock(size_t cellSize, AllocationEffort);
+
+ Heap* m_heap;
+ MarkedSpace m_markedSpace;
+ MarkedBlockSet m_blocks;
+};
+
+template<typename Functor> inline typename Functor::ReturnType AllocationSpace::forEachCell(Functor& functor)
+{
+ canonicalizeCellLivenessData();
+
+ BlockIterator end = m_blocks.set().end();
+ for (BlockIterator it = m_blocks.set().begin(); it != end; ++it)
+ (*it)->forEachCell(functor);
+ return functor.returnValue();
+}
+
+template<typename Functor> inline typename Functor::ReturnType AllocationSpace::forEachCell()
+{
+ Functor functor;
+ return forEachCell(functor);
+}
+
+template<typename Functor> inline typename Functor::ReturnType AllocationSpace::forEachBlock(Functor& functor)
+{
+ BlockIterator end = m_blocks.set().end();
+ for (BlockIterator it = m_blocks.set().begin(); it != end; ++it)
+ functor(*it);
+ return functor.returnValue();
+}
+
+template<typename Functor> inline typename Functor::ReturnType AllocationSpace::forEachBlock()
+{
+ Functor functor;
+ return forEachBlock(functor);
+}
+
+inline void* AllocationSpace::allocate(MarkedSpace::SizeClass& sizeClass)
+{
+ // This is a light-weight fast path to cover the most common case.
+ MarkedBlock::FreeCell* firstFreeCell = sizeClass.firstFreeCell;
+ if (UNLIKELY(!firstFreeCell))
+ return allocateSlowCase(sizeClass);
+
+ sizeClass.firstFreeCell = firstFreeCell->next;
+ return firstFreeCell;
+}
+
+inline void* AllocationSpace::allocate(size_t bytes)
+{
+ MarkedSpace::SizeClass& sizeClass = sizeClassFor(bytes);
+ return allocate(sizeClass);
+}
+
+}
+
+#endif
diff --git a/Source/JavaScriptCore/heap/CardSet.h b/Source/JavaScriptCore/heap/CardSet.h
new file mode 100644
index 000000000..dc44c024d
--- /dev/null
+++ b/Source/JavaScriptCore/heap/CardSet.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef CardSet_h
+#define CardSet_h
+
+#include <stdint.h>
+#include <wtf/Assertions.h>
+#include <wtf/Noncopyable.h>
+
+namespace JSC {
+
+template <size_t cardSize, size_t blockSize> class CardSet {
+ WTF_MAKE_NONCOPYABLE(CardSet);
+
+public:
+ static const size_t cardCount = (blockSize + cardSize - 1) / cardSize;
+
+ CardSet()
+ {
+ memset(m_cards, 0, cardCount);
+ }
+
+ bool isCardMarkedForAtom(const void*);
+ void markCardForAtom(const void*);
+ uint8_t& cardForAtom(const void*);
+ bool isCardMarked(size_t);
+ bool testAndClear(size_t);
+
+private:
+ uint8_t m_cards[cardCount];
+ COMPILE_ASSERT(!(cardSize & (cardSize - 1)), cardSet_cardSize_is_power_of_two);
+ COMPILE_ASSERT(!(cardCount & (cardCount - 1)), cardSet_cardCount_is_power_of_two);
+};
+
+template <size_t cardSize, size_t blockSize> uint8_t& CardSet<cardSize, blockSize>::cardForAtom(const void* ptr)
+{
+ ASSERT(ptr > this && ptr < (reinterpret_cast<char*>(this) + cardCount * cardSize));
+ uintptr_t card = (reinterpret_cast<uintptr_t>(ptr) / cardSize) % cardCount;
+ return m_cards[card];
+}
+
+template <size_t cardSize, size_t blockSize> bool CardSet<cardSize, blockSize>::isCardMarkedForAtom(const void* ptr)
+{
+ return cardForAtom(ptr);
+}
+
+template <size_t cardSize, size_t blockSize> void CardSet<cardSize, blockSize>::markCardForAtom(const void* ptr)
+{
+ cardForAtom(ptr) = 1;
+}
+
+template <size_t cardSize, size_t blockSize> bool CardSet<cardSize, blockSize>::isCardMarked(size_t i)
+{
+ ASSERT(i < cardCount);
+ return m_cards[i];
+}
+
+template <size_t cardSize, size_t blockSize> bool CardSet<cardSize, blockSize>::testAndClear(size_t i)
+{
+ ASSERT(i < cardCount);
+ bool result = m_cards[i];
+ m_cards[i] = 0;
+ return result;
+}
+
+}
+
+#endif
diff --git a/Source/JavaScriptCore/heap/ConservativeRoots.cpp b/Source/JavaScriptCore/heap/ConservativeRoots.cpp
new file mode 100644
index 000000000..05c668c35
--- /dev/null
+++ b/Source/JavaScriptCore/heap/ConservativeRoots.cpp
@@ -0,0 +1,122 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "ConservativeRoots.h"
+
+#include "CodeBlock.h"
+#include "DFGCodeBlocks.h"
+#include "JSCell.h"
+#include "JSObject.h"
+#include "Structure.h"
+
+namespace JSC {
+
+inline bool isPointerAligned(void* p)
+{
+ return !((intptr_t)(p) & (sizeof(char*) - 1));
+}
+
+ConservativeRoots::ConservativeRoots(const MarkedBlockSet* blocks)
+ : m_roots(m_inlineRoots)
+ , m_size(0)
+ , m_capacity(inlineCapacity)
+ , m_blocks(blocks)
+{
+}
+
+ConservativeRoots::~ConservativeRoots()
+{
+ if (m_roots != m_inlineRoots)
+ OSAllocator::decommitAndRelease(m_roots, m_capacity * sizeof(JSCell*));
+}
+
+void ConservativeRoots::grow()
+{
+ size_t newCapacity = m_capacity == inlineCapacity ? nonInlineCapacity : m_capacity * 2;
+ JSCell** newRoots = static_cast<JSCell**>(OSAllocator::reserveAndCommit(newCapacity * sizeof(JSCell*)));
+ memcpy(newRoots, m_roots, m_size * sizeof(JSCell*));
+ if (m_roots != m_inlineRoots)
+ OSAllocator::decommitAndRelease(m_roots, m_capacity * sizeof(JSCell*));
+ m_capacity = newCapacity;
+ m_roots = newRoots;
+}
+
+class DummyMarkHook {
+public:
+ void mark(void*) { }
+};
+
+template<typename MarkHook>
+inline void ConservativeRoots::genericAddPointer(void* p, TinyBloomFilter filter, MarkHook& markHook)
+{
+ markHook.mark(p);
+
+ MarkedBlock* candidate = MarkedBlock::blockFor(p);
+ if (filter.ruleOut(reinterpret_cast<Bits>(candidate))) {
+ ASSERT(!candidate || !m_blocks->set().contains(candidate));
+ return;
+ }
+
+ if (!MarkedBlock::isAtomAligned(p))
+ return;
+
+ if (!m_blocks->set().contains(candidate))
+ return;
+
+ if (!candidate->isLiveCell(p))
+ return;
+
+ if (m_size == m_capacity)
+ grow();
+
+ m_roots[m_size++] = static_cast<JSCell*>(p);
+}
+
+template<typename MarkHook>
+void ConservativeRoots::genericAddSpan(void* begin, void* end, MarkHook& markHook)
+{
+ ASSERT(begin <= end);
+ ASSERT((static_cast<char*>(end) - static_cast<char*>(begin)) < 0x1000000);
+ ASSERT(isPointerAligned(begin));
+ ASSERT(isPointerAligned(end));
+
+ TinyBloomFilter filter = m_blocks->filter(); // Make a local copy of filter to show the compiler it won't alias, and can be register-allocated.
+ for (char** it = static_cast<char**>(begin); it != static_cast<char**>(end); ++it)
+ genericAddPointer(*it, filter, markHook);
+}
+
+void ConservativeRoots::add(void* begin, void* end)
+{
+ DummyMarkHook dummyMarkHook;
+ genericAddSpan(begin, end, dummyMarkHook);
+}
+
+void ConservativeRoots::add(void* begin, void* end, DFGCodeBlocks& dfgCodeBlocks)
+{
+ genericAddSpan(begin, end, dfgCodeBlocks);
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/ConservativeRoots.h b/Source/JavaScriptCore/heap/ConservativeRoots.h
new file mode 100644
index 000000000..86dfc5886
--- /dev/null
+++ b/Source/JavaScriptCore/heap/ConservativeRoots.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ConservativeRoots_h
+#define ConservativeRoots_h
+
+#include "Heap.h"
+#include <wtf/OSAllocator.h>
+#include <wtf/Vector.h>
+
+namespace JSC {
+
+class JSCell;
+class DFGCodeBlocks;
+class Heap;
+
+class ConservativeRoots {
+public:
+ ConservativeRoots(const MarkedBlockSet*);
+ ~ConservativeRoots();
+
+ void add(void* begin, void* end);
+ void add(void* begin, void* end, DFGCodeBlocks&);
+
+ size_t size();
+ JSCell** roots();
+
+private:
+ static const size_t inlineCapacity = 128;
+ static const size_t nonInlineCapacity = 8192 / sizeof(JSCell*);
+
+ template<typename MarkHook>
+ void genericAddPointer(void*, TinyBloomFilter, MarkHook&);
+
+ template<typename MarkHook>
+ void genericAddSpan(void*, void* end, MarkHook&);
+
+ void grow();
+
+ JSCell** m_roots;
+ size_t m_size;
+ size_t m_capacity;
+ const MarkedBlockSet* m_blocks;
+ JSCell* m_inlineRoots[inlineCapacity];
+};
+
+inline size_t ConservativeRoots::size()
+{
+ return m_size;
+}
+
+inline JSCell** ConservativeRoots::roots()
+{
+ return m_roots;
+}
+
+} // namespace JSC
+
+#endif // ConservativeRoots_h
diff --git a/Source/JavaScriptCore/heap/DFGCodeBlocks.cpp b/Source/JavaScriptCore/heap/DFGCodeBlocks.cpp
new file mode 100644
index 000000000..03c8df2ff
--- /dev/null
+++ b/Source/JavaScriptCore/heap/DFGCodeBlocks.cpp
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "DFGCodeBlocks.h"
+
+#include "CodeBlock.h"
+#include "SlotVisitor.h"
+#include <wtf/Vector.h>
+
+namespace JSC {
+
+#if ENABLE(DFG_JIT)
+
+DFGCodeBlocks::DFGCodeBlocks() { }
+
+DFGCodeBlocks::~DFGCodeBlocks()
+{
+ Vector<CodeBlock*, 16> toRemove;
+
+ for (HashSet<CodeBlock*>::iterator iter = m_set.begin(); iter != m_set.end(); ++iter) {
+ if ((*iter)->m_dfgData->isJettisoned)
+ toRemove.append(*iter);
+ }
+
+ WTF::deleteAllValues(toRemove);
+}
+
+void DFGCodeBlocks::jettison(PassOwnPtr<CodeBlock> codeBlockPtr)
+{
+ // We don't want to delete it now; we just want its pointer.
+ CodeBlock* codeBlock = codeBlockPtr.leakPtr();
+
+ ASSERT(codeBlock);
+ ASSERT(codeBlock->getJITType() == JITCode::DFGJIT);
+
+ // It should not have already been jettisoned.
+ ASSERT(!codeBlock->m_dfgData->isJettisoned);
+
+ // We should have this block already.
+ ASSERT(m_set.find(codeBlock) != m_set.end());
+
+ codeBlock->m_dfgData->isJettisoned = true;
+}
+
+void DFGCodeBlocks::clearMarks()
+{
+ for (HashSet<CodeBlock*>::iterator iter = m_set.begin(); iter != m_set.end(); ++iter)
+ (*iter)->m_dfgData->mayBeExecuting = false;
+}
+
+void DFGCodeBlocks::deleteUnmarkedJettisonedCodeBlocks()
+{
+ Vector<CodeBlock*, 16> toRemove;
+
+ for (HashSet<CodeBlock*>::iterator iter = m_set.begin(); iter != m_set.end(); ++iter) {
+ if ((*iter)->m_dfgData->isJettisoned && !(*iter)->m_dfgData->mayBeExecuting)
+ toRemove.append(*iter);
+ }
+
+ WTF::deleteAllValues(toRemove);
+}
+
+void DFGCodeBlocks::traceMarkedCodeBlocks(SlotVisitor& visitor)
+{
+ for (HashSet<CodeBlock*>::iterator iter = m_set.begin(); iter != m_set.end(); ++iter) {
+ if ((*iter)->m_dfgData->mayBeExecuting)
+ (*iter)->visitAggregate(visitor);
+ }
+}
+
+#else // ENABLE(DFG_JIT)
+
+void DFGCodeBlocks::jettison(PassOwnPtr<CodeBlock>)
+{
+}
+
+#endif // ENABLE(DFG_JIT)
+
+} // namespace JSC
+
+
diff --git a/Source/JavaScriptCore/heap/DFGCodeBlocks.h b/Source/JavaScriptCore/heap/DFGCodeBlocks.h
new file mode 100644
index 000000000..06fd5ed3d
--- /dev/null
+++ b/Source/JavaScriptCore/heap/DFGCodeBlocks.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGCodeBlocks_h
+#define DFGCodeBlocks_h
+
+#include <wtf/FastAllocBase.h>
+#include <wtf/HashSet.h>
+#include <wtf/PassOwnPtr.h>
+
+namespace JSC {
+
+class CodeBlock;
+class SlotVisitor;
+
+// DFGCodeBlocks notifies the garbage collector about optimized code blocks that
+// have different marking behavior depending on whether or not they are on the
+// stack, and that may be jettisoned. Jettisoning is the process of discarding
+// a code block after all calls to it have been unlinked. This class takes special
+// care to ensure that if there are still call frames that are using the code
+// block, then it should not be immediately deleted, but rather, it should be
+// deleted once we know that there are no longer any references to it from any
+// call frames. This class takes its name from the DFG compiler; only code blocks
+// compiled by the DFG need special marking behavior if they are on the stack, and
+// only those code blocks may be jettisoned.
+
+#if ENABLE(DFG_JIT)
+class DFGCodeBlocks {
+ WTF_MAKE_FAST_ALLOCATED;
+
+public:
+ DFGCodeBlocks();
+ ~DFGCodeBlocks();
+
+ // Inform the collector that a code block has been jettisoned form its
+ // executable and should only be kept alive if there are call frames that use
+ // it. This is typically called either from a recompilation trigger, or from
+ // an unconditional finalizer associated with a CodeBlock that had weak
+ // references, where some subset of those references were dead.
+ void jettison(PassOwnPtr<CodeBlock>);
+
+ // Clear all mark bits associated with DFG code blocks.
+ void clearMarks();
+
+ // Mark a pointer that may be a CodeBlock that belongs to the set of DFG code
+ // blocks. This is defined inline in CodeBlock.h
+ void mark(void* candidateCodeBlock);
+
+ // Delete all jettisoned code blocks that have not been marked (i.e. are not referenced
+ // from call frames).
+ void deleteUnmarkedJettisonedCodeBlocks();
+
+ // Trace all marked code blocks (i.e. are referenced from call frames). The CodeBlock
+ // is free to make use of m_dfgData->isMarked and m_dfgData->isJettisoned.
+ void traceMarkedCodeBlocks(SlotVisitor&);
+
+private:
+ friend class CodeBlock;
+
+ HashSet<CodeBlock*> m_set;
+};
+#else
+class DFGCodeBlocks {
+ WTF_MAKE_FAST_ALLOCATED;
+
+public:
+ void jettison(PassOwnPtr<CodeBlock>);
+ void clearMarks() { }
+ void mark(void*) { }
+ void deleteUnmarkedJettisonedCodeBlocks() { }
+ void traceMarkedCodeBlocks(SlotVisitor&) { }
+};
+#endif
+
+} // namespace JSC
+
+#endif
diff --git a/Source/JavaScriptCore/heap/Handle.h b/Source/JavaScriptCore/heap/Handle.h
new file mode 100644
index 000000000..6f467743c
--- /dev/null
+++ b/Source/JavaScriptCore/heap/Handle.h
@@ -0,0 +1,190 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef Handle_h
+#define Handle_h
+
+#include "HandleTypes.h"
+
+namespace JSC {
+
+/*
+ A Handle is a smart pointer that updates automatically when the garbage
+ collector moves the object to which it points.
+
+ The base Handle class represents a temporary reference to a pointer whose
+ lifetime is guaranteed by something else.
+*/
+
+template <class T> class Handle;
+
+// Creating a JSValue Handle is invalid
+template <> class Handle<JSValue>;
+
+// Forward declare WeakGCMap
+template<typename KeyType, typename MappedType, typename FinalizerCallback, typename HashArg, typename KeyTraitsArg> class WeakGCMap;
+
+class HandleBase {
+ template <typename T> friend class Weak;
+ friend class HandleHeap;
+ friend struct JSCallbackObjectData;
+ template <typename KeyType, typename MappedType, typename FinalizerCallback, typename HashArg, typename KeyTraitsArg> friend class WeakGCMap;
+
+public:
+ bool operator!() const { return !m_slot || !*m_slot; }
+
+ // This conversion operator allows implicit conversion to bool but not to other integer types.
+ typedef JSValue (HandleBase::*UnspecifiedBoolType);
+ operator UnspecifiedBoolType*() const { return (m_slot && *m_slot) ? reinterpret_cast<UnspecifiedBoolType*>(1) : 0; }
+
+protected:
+ HandleBase(HandleSlot slot)
+ : m_slot(slot)
+ {
+ }
+
+ void swap(HandleBase& other) { std::swap(m_slot, other.m_slot); }
+
+ HandleSlot slot() const { return m_slot; }
+ void setSlot(HandleSlot slot)
+ {
+ m_slot = slot;
+ }
+
+private:
+ HandleSlot m_slot;
+};
+
+template <typename Base, typename T> struct HandleConverter {
+ T* operator->()
+ {
+ return static_cast<Base*>(this)->get();
+ }
+ const T* operator->() const
+ {
+ return static_cast<const Base*>(this)->get();
+ }
+
+ T* operator*()
+ {
+ return static_cast<Base*>(this)->get();
+ }
+ const T* operator*() const
+ {
+ return static_cast<const Base*>(this)->get();
+ }
+};
+
+template <typename Base> struct HandleConverter<Base, Unknown> {
+ Handle<JSObject> asObject() const;
+ bool isObject() const { return jsValue().isObject(); }
+ bool getNumber(double number) const { return jsValue().getNumber(number); }
+ UString getString(ExecState*) const;
+ bool isUndefinedOrNull() const { return jsValue().isUndefinedOrNull(); }
+
+private:
+ JSValue jsValue() const
+ {
+ return static_cast<const Base*>(this)->get();
+ }
+};
+
+template <typename T> class Handle : public HandleBase, public HandleConverter<Handle<T>, T> {
+public:
+ template <typename A, typename B> friend class HandleConverter;
+ typedef typename HandleTypes<T>::ExternalType ExternalType;
+ template <typename U> Handle(Handle<U> o)
+ {
+ typename HandleTypes<T>::template validateUpcast<U>();
+ setSlot(o.slot());
+ }
+
+ void swap(Handle& other) { HandleBase::swap(other); }
+
+ ExternalType get() const { return HandleTypes<T>::getFromSlot(this->slot()); }
+
+protected:
+ Handle(HandleSlot slot = 0)
+ : HandleBase(slot)
+ {
+ }
+
+private:
+ friend class HandleHeap;
+
+ static Handle<T> wrapSlot(HandleSlot slot)
+ {
+ return Handle<T>(slot);
+ }
+};
+
+template <typename Base> Handle<JSObject> HandleConverter<Base, Unknown>::asObject() const
+{
+ return Handle<JSObject>::wrapSlot(static_cast<const Base*>(this)->slot());
+}
+
+template <typename T, typename U> inline bool operator==(const Handle<T>& a, const Handle<U>& b)
+{
+ return a.get() == b.get();
+}
+
+template <typename T, typename U> inline bool operator==(const Handle<T>& a, U* b)
+{
+ return a.get() == b;
+}
+
+template <typename T, typename U> inline bool operator==(T* a, const Handle<U>& b)
+{
+ return a == b.get();
+}
+
+template <typename T, typename U> inline bool operator!=(const Handle<T>& a, const Handle<U>& b)
+{
+ return a.get() != b.get();
+}
+
+template <typename T, typename U> inline bool operator!=(const Handle<T>& a, U* b)
+{
+ return a.get() != b;
+}
+
+template <typename T, typename U> inline bool operator!=(T* a, const Handle<U>& b)
+{
+ return a != b.get();
+}
+
+template <typename T, typename U> inline bool operator!=(const Handle<T>& a, JSValue b)
+{
+ return a.get() != b;
+}
+
+template <typename T, typename U> inline bool operator!=(JSValue a, const Handle<U>& b)
+{
+ return a != b.get();
+}
+
+}
+
+#endif
diff --git a/Source/JavaScriptCore/heap/HandleHeap.cpp b/Source/JavaScriptCore/heap/HandleHeap.cpp
new file mode 100644
index 000000000..2402f7efb
--- /dev/null
+++ b/Source/JavaScriptCore/heap/HandleHeap.cpp
@@ -0,0 +1,211 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "HandleHeap.h"
+
+#include "HeapRootVisitor.h"
+#include "JSObject.h"
+
+namespace JSC {
+
+WeakHandleOwner::~WeakHandleOwner()
+{
+}
+
+bool WeakHandleOwner::isReachableFromOpaqueRoots(Handle<Unknown>, void*, SlotVisitor&)
+{
+ return false;
+}
+
+void WeakHandleOwner::finalize(Handle<Unknown>, void*)
+{
+}
+
+HandleHeap::HandleHeap(JSGlobalData* globalData)
+ : m_globalData(globalData)
+ , m_nextToFinalize(0)
+{
+ grow();
+}
+
+void HandleHeap::grow()
+{
+ Node* block = m_blockStack.grow();
+ for (int i = m_blockStack.blockLength - 1; i >= 0; --i) {
+ Node* node = &block[i];
+ new (NotNull, node) Node(this);
+ m_freeList.push(node);
+ }
+}
+
+void HandleHeap::visitStrongHandles(HeapRootVisitor& heapRootVisitor)
+{
+ Node* end = m_strongList.end();
+ for (Node* node = m_strongList.begin(); node != end; node = node->next()) {
+#if ENABLE(GC_VALIDATION)
+ if (!isLiveNode(node))
+ CRASH();
+#endif
+ heapRootVisitor.visit(node->slot());
+ }
+}
+
+void HandleHeap::visitWeakHandles(HeapRootVisitor& heapRootVisitor)
+{
+ SlotVisitor& visitor = heapRootVisitor.visitor();
+
+ Node* end = m_weakList.end();
+ for (Node* node = m_weakList.begin(); node != end; node = node->next()) {
+#if ENABLE(GC_VALIDATION)
+ if (!isValidWeakNode(node))
+ CRASH();
+#endif
+ JSCell* cell = node->slot()->asCell();
+ if (Heap::isMarked(cell))
+ continue;
+
+ WeakHandleOwner* weakOwner = node->weakOwner();
+ if (!weakOwner)
+ continue;
+
+ if (!weakOwner->isReachableFromOpaqueRoots(Handle<Unknown>::wrapSlot(node->slot()), node->weakOwnerContext(), visitor))
+ continue;
+
+ heapRootVisitor.visit(node->slot());
+ }
+}
+
+void HandleHeap::finalizeWeakHandles()
+{
+ Node* end = m_weakList.end();
+ for (Node* node = m_weakList.begin(); node != end; node = m_nextToFinalize) {
+ m_nextToFinalize = node->next();
+#if ENABLE(GC_VALIDATION)
+ if (!isValidWeakNode(node))
+ CRASH();
+#endif
+
+ JSCell* cell = node->slot()->asCell();
+ if (Heap::isMarked(cell))
+ continue;
+
+ if (WeakHandleOwner* weakOwner = node->weakOwner()) {
+ weakOwner->finalize(Handle<Unknown>::wrapSlot(node->slot()), node->weakOwnerContext());
+ if (m_nextToFinalize != node->next()) // Owner deallocated node.
+ continue;
+ }
+#if ENABLE(GC_VALIDATION)
+ if (!isLiveNode(node))
+ CRASH();
+#endif
+ *node->slot() = JSValue();
+ SentinelLinkedList<Node>::remove(node);
+ m_immediateList.push(node);
+ }
+
+ m_nextToFinalize = 0;
+}
+
+void HandleHeap::writeBarrier(HandleSlot slot, const JSValue& value)
+{
+ // Forbid assignment to handles during the finalization phase, since it would violate many GC invariants.
+ // File a bug with stack trace if you hit this.
+ if (m_nextToFinalize)
+ CRASH();
+
+ if (!value == !*slot && slot->isCell() == value.isCell())
+ return;
+
+ Node* node = toNode(slot);
+#if ENABLE(GC_VALIDATION)
+ if (!isLiveNode(node))
+ CRASH();
+#endif
+ SentinelLinkedList<Node>::remove(node);
+ if (!value || !value.isCell()) {
+ m_immediateList.push(node);
+ return;
+ }
+
+ if (node->isWeak()) {
+ m_weakList.push(node);
+#if ENABLE(GC_VALIDATION)
+ if (!isLiveNode(node))
+ CRASH();
+#endif
+ return;
+ }
+
+ m_strongList.push(node);
+#if ENABLE(GC_VALIDATION)
+ if (!isLiveNode(node))
+ CRASH();
+#endif
+}
+
+unsigned HandleHeap::protectedGlobalObjectCount()
+{
+ unsigned count = 0;
+ Node* end = m_strongList.end();
+ for (Node* node = m_strongList.begin(); node != end; node = node->next()) {
+ JSValue value = *node->slot();
+ if (value.isObject() && asObject(value.asCell())->isGlobalObject())
+ count++;
+ }
+ return count;
+}
+
+#if ENABLE(GC_VALIDATION) || !ASSERT_DISABLED
+bool HandleHeap::isLiveNode(Node* node)
+{
+ if (node->prev()->next() != node)
+ return false;
+ if (node->next()->prev() != node)
+ return false;
+
+ return true;
+}
+
+bool HandleHeap::isValidWeakNode(Node* node)
+{
+ if (!isLiveNode(node))
+ return false;
+ if (!node->isWeak())
+ return false;
+
+ JSValue value = *node->slot();
+ if (!value || !value.isCell())
+ return false;
+
+ JSCell* cell = value.asCell();
+ if (!cell || !cell->structure())
+ return false;
+
+ return true;
+}
+#endif
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/HandleHeap.h b/Source/JavaScriptCore/heap/HandleHeap.h
new file mode 100644
index 000000000..c577791d8
--- /dev/null
+++ b/Source/JavaScriptCore/heap/HandleHeap.h
@@ -0,0 +1,309 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef HandleHeap_h
+#define HandleHeap_h
+
+#include "BlockStack.h"
+#include "Handle.h"
+#include "HashCountedSet.h"
+#include "SentinelLinkedList.h"
+#include "SinglyLinkedList.h"
+
+namespace JSC {
+
+class HandleHeap;
+class HeapRootVisitor;
+class JSGlobalData;
+class JSValue;
+class SlotVisitor;
+
+class WeakHandleOwner {
+public:
+ virtual ~WeakHandleOwner();
+ virtual bool isReachableFromOpaqueRoots(Handle<Unknown>, void* context, SlotVisitor&);
+ virtual void finalize(Handle<Unknown>, void* context);
+};
+
+class HandleHeap {
+public:
+ static HandleHeap* heapFor(HandleSlot);
+
+ HandleHeap(JSGlobalData*);
+
+ JSGlobalData* globalData();
+
+ HandleSlot allocate();
+ void deallocate(HandleSlot);
+
+ void makeWeak(HandleSlot, WeakHandleOwner* = 0, void* context = 0);
+ HandleSlot copyWeak(HandleSlot);
+
+ void visitStrongHandles(HeapRootVisitor&);
+ void visitWeakHandles(HeapRootVisitor&);
+ void finalizeWeakHandles();
+
+ void writeBarrier(HandleSlot, const JSValue&);
+
+#if !ASSERT_DISABLED
+ bool hasWeakOwner(HandleSlot, WeakHandleOwner*);
+ bool hasFinalizer(HandleSlot);
+#endif
+
+ unsigned protectedGlobalObjectCount();
+
+ template<typename Functor> void forEachStrongHandle(Functor&, const HashCountedSet<JSCell*>& skipSet);
+
+private:
+ class Node {
+ public:
+ Node(WTF::SentinelTag);
+ Node(HandleHeap*);
+
+ HandleSlot slot();
+ HandleHeap* handleHeap();
+
+ void makeWeak(WeakHandleOwner*, void* context);
+ bool isWeak();
+
+ WeakHandleOwner* weakOwner();
+ void* weakOwnerContext();
+
+ void setPrev(Node*);
+ Node* prev();
+
+ void setNext(Node*);
+ Node* next();
+
+ private:
+ WeakHandleOwner* emptyWeakOwner();
+
+ JSValue m_value;
+ HandleHeap* m_handleHeap;
+ WeakHandleOwner* m_weakOwner;
+ void* m_weakOwnerContext;
+ Node* m_prev;
+ Node* m_next;
+ };
+
+ static HandleSlot toHandle(Node*);
+ static Node* toNode(HandleSlot);
+
+ void grow();
+
+#if ENABLE(GC_VALIDATION) || !ASSERT_DISABLED
+ bool isValidWeakNode(Node*);
+ bool isLiveNode(Node*);
+#endif
+
+ JSGlobalData* m_globalData;
+ BlockStack<Node> m_blockStack;
+
+ SentinelLinkedList<Node> m_strongList;
+ SentinelLinkedList<Node> m_weakList;
+ SentinelLinkedList<Node> m_immediateList;
+ SinglyLinkedList<Node> m_freeList;
+ Node* m_nextToFinalize;
+};
+
+inline HandleHeap* HandleHeap::heapFor(HandleSlot handle)
+{
+ return toNode(handle)->handleHeap();
+}
+
+inline JSGlobalData* HandleHeap::globalData()
+{
+ return m_globalData;
+}
+
+inline HandleSlot HandleHeap::toHandle(Node* node)
+{
+ return reinterpret_cast<HandleSlot>(node);
+}
+
+inline HandleHeap::Node* HandleHeap::toNode(HandleSlot handle)
+{
+ return reinterpret_cast<Node*>(handle);
+}
+
+inline HandleSlot HandleHeap::allocate()
+{
+ // Forbid assignment to handles during the finalization phase, since it would violate many GC invariants.
+ // File a bug with stack trace if you hit this.
+ if (m_nextToFinalize)
+ CRASH();
+ if (m_freeList.isEmpty())
+ grow();
+
+ Node* node = m_freeList.pop();
+ new (NotNull, node) Node(this);
+ m_immediateList.push(node);
+ return toHandle(node);
+}
+
+inline void HandleHeap::deallocate(HandleSlot handle)
+{
+ Node* node = toNode(handle);
+ if (node == m_nextToFinalize) {
+ ASSERT(m_nextToFinalize->next());
+ m_nextToFinalize = m_nextToFinalize->next();
+ }
+
+ SentinelLinkedList<Node>::remove(node);
+ m_freeList.push(node);
+}
+
+inline HandleSlot HandleHeap::copyWeak(HandleSlot other)
+{
+ Node* node = toNode(allocate());
+ node->makeWeak(toNode(other)->weakOwner(), toNode(other)->weakOwnerContext());
+ writeBarrier(node->slot(), *other);
+ *node->slot() = *other;
+ return toHandle(node);
+}
+
+inline void HandleHeap::makeWeak(HandleSlot handle, WeakHandleOwner* weakOwner, void* context)
+{
+ // Forbid assignment to handles during the finalization phase, since it would violate many GC invariants.
+ // File a bug with stack trace if you hit this.
+ if (m_nextToFinalize)
+ CRASH();
+ Node* node = toNode(handle);
+ node->makeWeak(weakOwner, context);
+
+ SentinelLinkedList<Node>::remove(node);
+ if (!*handle || !handle->isCell()) {
+ m_immediateList.push(node);
+ return;
+ }
+
+ m_weakList.push(node);
+}
+
+#if !ASSERT_DISABLED
+inline bool HandleHeap::hasWeakOwner(HandleSlot handle, WeakHandleOwner* weakOwner)
+{
+ return toNode(handle)->weakOwner() == weakOwner;
+}
+
+inline bool HandleHeap::hasFinalizer(HandleSlot handle)
+{
+ return toNode(handle)->weakOwner();
+}
+#endif
+
+inline HandleHeap::Node::Node(HandleHeap* handleHeap)
+ : m_handleHeap(handleHeap)
+ , m_weakOwner(0)
+ , m_weakOwnerContext(0)
+ , m_prev(0)
+ , m_next(0)
+{
+}
+
+inline HandleHeap::Node::Node(WTF::SentinelTag)
+ : m_handleHeap(0)
+ , m_weakOwner(0)
+ , m_weakOwnerContext(0)
+ , m_prev(0)
+ , m_next(0)
+{
+}
+
+inline HandleSlot HandleHeap::Node::slot()
+{
+ return &m_value;
+}
+
+inline HandleHeap* HandleHeap::Node::handleHeap()
+{
+ return m_handleHeap;
+}
+
+inline void HandleHeap::Node::makeWeak(WeakHandleOwner* weakOwner, void* context)
+{
+ m_weakOwner = weakOwner ? weakOwner : emptyWeakOwner();
+ m_weakOwnerContext = context;
+}
+
+inline bool HandleHeap::Node::isWeak()
+{
+ return m_weakOwner; // True for emptyWeakOwner().
+}
+
+inline WeakHandleOwner* HandleHeap::Node::weakOwner()
+{
+ return m_weakOwner == emptyWeakOwner() ? 0 : m_weakOwner; // 0 for emptyWeakOwner().
+}
+
+inline void* HandleHeap::Node::weakOwnerContext()
+{
+ ASSERT(weakOwner());
+ return m_weakOwnerContext;
+}
+
+inline void HandleHeap::Node::setPrev(Node* prev)
+{
+ m_prev = prev;
+}
+
+inline HandleHeap::Node* HandleHeap::Node::prev()
+{
+ return m_prev;
+}
+
+inline void HandleHeap::Node::setNext(Node* next)
+{
+ m_next = next;
+}
+
+inline HandleHeap::Node* HandleHeap::Node::next()
+{
+ return m_next;
+}
+
+// Sentinel to indicate that a node is weak, but its owner has no meaningful
+// callbacks. This allows us to optimize by skipping such nodes.
+inline WeakHandleOwner* HandleHeap::Node::emptyWeakOwner()
+{
+ return reinterpret_cast<WeakHandleOwner*>(-1);
+}
+
+template<typename Functor> void HandleHeap::forEachStrongHandle(Functor& functor, const HashCountedSet<JSCell*>& skipSet)
+{
+ Node* end = m_strongList.end();
+ for (Node* node = m_strongList.begin(); node != end; node = node->next()) {
+ JSValue value = *node->slot();
+ if (!value || !value.isCell())
+ continue;
+ if (skipSet.contains(value.asCell()))
+ continue;
+ functor(value.asCell());
+ }
+}
+
+}
+
+#endif
diff --git a/Source/JavaScriptCore/heap/HandleStack.cpp b/Source/JavaScriptCore/heap/HandleStack.cpp
new file mode 100644
index 000000000..42eb326a5
--- /dev/null
+++ b/Source/JavaScriptCore/heap/HandleStack.cpp
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "HandleStack.h"
+
+#include "HeapRootVisitor.h"
+#include "JSValueInlineMethods.h"
+#include "JSObject.h"
+
+namespace JSC {
+
+HandleStack::HandleStack()
+#ifndef NDEBUG
+ : m_scopeDepth(0)
+#endif
+{
+ grow();
+}
+
+void HandleStack::visit(HeapRootVisitor& heapRootVisitor)
+{
+ const Vector<HandleSlot>& blocks = m_blockStack.blocks();
+ size_t blockLength = m_blockStack.blockLength;
+
+ int end = blocks.size() - 1;
+ for (int i = 0; i < end; ++i) {
+ HandleSlot block = blocks[i];
+ heapRootVisitor.visit(block, blockLength);
+ }
+ HandleSlot block = blocks[end];
+ heapRootVisitor.visit(block, m_frame.m_next - block);
+}
+
+void HandleStack::grow()
+{
+ HandleSlot block = m_blockStack.grow();
+ m_frame.m_next = block;
+ m_frame.m_end = block + m_blockStack.blockLength;
+}
+
+}
diff --git a/Source/JavaScriptCore/heap/HandleStack.h b/Source/JavaScriptCore/heap/HandleStack.h
new file mode 100644
index 000000000..a1e8a0b2a
--- /dev/null
+++ b/Source/JavaScriptCore/heap/HandleStack.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef HandleStack_h
+#define HandleStack_h
+
+#include "Assertions.h"
+#include "BlockStack.h"
+#include "Handle.h"
+
+#include <wtf/UnusedParam.h>
+
+namespace JSC {
+
+class LocalScope;
+class HeapRootVisitor;
+
+class HandleStack {
+public:
+ class Frame {
+ public:
+ HandleSlot m_next;
+ HandleSlot m_end;
+ };
+
+ HandleStack();
+
+ void enterScope(Frame&);
+ void leaveScope(Frame&);
+
+ HandleSlot push();
+
+ void visit(HeapRootVisitor&);
+
+private:
+ void grow();
+ void zapTo(Frame&);
+ HandleSlot findFirstAfter(HandleSlot);
+
+#ifndef NDEBUG
+ size_t m_scopeDepth;
+#endif
+ BlockStack<JSValue> m_blockStack;
+ Frame m_frame;
+};
+
+inline void HandleStack::enterScope(Frame& lastFrame)
+{
+#ifndef NDEBUG
+ ++m_scopeDepth;
+#endif
+
+ lastFrame = m_frame;
+}
+
+
+
+inline void HandleStack::zapTo(Frame& lastFrame)
+{
+#ifdef NDEBUG
+ UNUSED_PARAM(lastFrame);
+#else
+ const Vector<HandleSlot>& blocks = m_blockStack.blocks();
+
+ if (lastFrame.m_end != m_frame.m_end) { // Zapping to a frame in a different block.
+ int i = blocks.size() - 1;
+ for ( ; blocks[i] + m_blockStack.blockLength != lastFrame.m_end; --i) {
+ for (int j = m_blockStack.blockLength - 1; j >= 0; --j)
+ blocks[i][j] = JSValue();
+ }
+
+ for (HandleSlot it = blocks[i] + m_blockStack.blockLength - 1; it != lastFrame.m_next - 1; --it)
+ *it = JSValue();
+
+ return;
+ }
+
+ for (HandleSlot it = m_frame.m_next - 1; it != lastFrame.m_next - 1; --it)
+ *it = JSValue();
+#endif
+}
+
+inline void HandleStack::leaveScope(Frame& lastFrame)
+{
+#ifndef NDEBUG
+ --m_scopeDepth;
+#endif
+
+ zapTo(lastFrame);
+
+ if (lastFrame.m_end != m_frame.m_end) // Popping to a frame in a different block.
+ m_blockStack.shrink(lastFrame.m_end);
+
+ m_frame = lastFrame;
+}
+
+inline HandleSlot HandleStack::push()
+{
+ ASSERT(m_scopeDepth); // Creating a Local outside of a LocalScope is a memory leak.
+ if (m_frame.m_next == m_frame.m_end)
+ grow();
+ return m_frame.m_next++;
+}
+
+}
+
+#endif
diff --git a/Source/JavaScriptCore/heap/HandleTypes.h b/Source/JavaScriptCore/heap/HandleTypes.h
new file mode 100644
index 000000000..780ab85cd
--- /dev/null
+++ b/Source/JavaScriptCore/heap/HandleTypes.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef HandleTypes_h
+#define HandleTypes_h
+
+#include "JSValue.h"
+
+namespace JSC {
+
+typedef enum { } Unknown;
+typedef JSValue* HandleSlot;
+
+template<typename T> struct HandleTypes {
+ typedef T* ExternalType;
+ static ExternalType getFromSlot(HandleSlot slot) { return (slot && *slot) ? reinterpret_cast<ExternalType>(slot->asCell()) : 0; }
+ static JSValue toJSValue(T* cell) { return reinterpret_cast<JSCell*>(cell); }
+ template<typename U> static void validateUpcast() { T* temp; temp = (U*)0; }
+};
+
+template<> struct HandleTypes<Unknown> {
+ typedef JSValue ExternalType;
+ static ExternalType getFromSlot(HandleSlot slot) { return slot ? *slot : JSValue(); }
+ static JSValue toJSValue(const JSValue& v) { return v; }
+ template<typename U> static void validateUpcast() { }
+};
+
+} // namespace JSC
+
+#endif // HandleTypes_h
diff --git a/Source/JavaScriptCore/heap/Heap.cpp b/Source/JavaScriptCore/heap/Heap.cpp
new file mode 100644
index 000000000..61eba08a4
--- /dev/null
+++ b/Source/JavaScriptCore/heap/Heap.cpp
@@ -0,0 +1,916 @@
+/*
+ * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2007 Eric Seidel <eric@webkit.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#include "config.h"
+#include "Heap.h"
+
+#include "CodeBlock.h"
+#include "ConservativeRoots.h"
+#include "GCActivityCallback.h"
+#include "HeapRootVisitor.h"
+#include "Interpreter.h"
+#include "JSGlobalData.h"
+#include "JSGlobalObject.h"
+#include "JSLock.h"
+#include "JSONObject.h"
+#include "Tracing.h"
+#include <algorithm>
+#include <wtf/CurrentTime.h>
+
+
+using namespace std;
+using namespace JSC;
+
+namespace JSC {
+
+namespace {
+
+#if CPU(X86) || CPU(X86_64)
+static const size_t largeHeapSize = 16 * 1024 * 1024;
+#elif PLATFORM(IOS)
+static const size_t largeHeapSize = 8 * 1024 * 1024;
+#else
+static const size_t largeHeapSize = 512 * 1024;
+#endif
+static const size_t smallHeapSize = 512 * 1024;
+
+#if ENABLE(GC_LOGGING)
+#if COMPILER(CLANG)
+#define DEFINE_GC_LOGGING_GLOBAL(type, name, arguments) \
+_Pragma("clang diagnostic push") \
+_Pragma("clang diagnostic ignored \"-Wglobal-constructors\"") \
+_Pragma("clang diagnostic ignored \"-Wexit-time-destructors\"") \
+static type name arguments; \
+_Pragma("clang diagnostic pop")
+#else
+#define DEFINE_GC_LOGGING_GLOBAL(type, name, arguments) \
+static type name arguments;
+#endif // COMPILER(CLANG)
+
+struct GCTimer {
+ GCTimer(const char* name)
+ : m_time(0)
+ , m_min(100000000)
+ , m_max(0)
+ , m_count(0)
+ , m_name(name)
+ {
+ }
+ ~GCTimer()
+ {
+ printf("%s: %.2lfms (avg. %.2lf, min. %.2lf, max. %.2lf)\n", m_name, m_time * 1000, m_time * 1000 / m_count, m_min*1000, m_max*1000);
+ }
+ double m_time;
+ double m_min;
+ double m_max;
+ size_t m_count;
+ const char* m_name;
+};
+
+struct GCTimerScope {
+ GCTimerScope(GCTimer* timer)
+ : m_timer(timer)
+ , m_start(WTF::currentTime())
+ {
+ }
+ ~GCTimerScope()
+ {
+ double delta = WTF::currentTime() - m_start;
+ if (delta < m_timer->m_min)
+ m_timer->m_min = delta;
+ if (delta > m_timer->m_max)
+ m_timer->m_max = delta;
+ m_timer->m_count++;
+ m_timer->m_time += delta;
+ }
+ GCTimer* m_timer;
+ double m_start;
+};
+
+struct GCCounter {
+ GCCounter(const char* name)
+ : m_name(name)
+ , m_count(0)
+ , m_total(0)
+ , m_min(10000000)
+ , m_max(0)
+ {
+ }
+
+ void count(size_t amount)
+ {
+ m_count++;
+ m_total += amount;
+ if (amount < m_min)
+ m_min = amount;
+ if (amount > m_max)
+ m_max = amount;
+ }
+ ~GCCounter()
+ {
+ printf("%s: %zu values (avg. %zu, min. %zu, max. %zu)\n", m_name, m_total, m_total / m_count, m_min, m_max);
+ }
+ const char* m_name;
+ size_t m_count;
+ size_t m_total;
+ size_t m_min;
+ size_t m_max;
+};
+
+#define GCPHASE(name) DEFINE_GC_LOGGING_GLOBAL(GCTimer, name##Timer, (#name)); GCTimerScope name##TimerScope(&name##Timer)
+#define COND_GCPHASE(cond, name1, name2) DEFINE_GC_LOGGING_GLOBAL(GCTimer, name1##Timer, (#name1)); DEFINE_GC_LOGGING_GLOBAL(GCTimer, name2##Timer, (#name2)); GCTimerScope name1##CondTimerScope(cond ? &name1##Timer : &name2##Timer)
+#define GCCOUNTER(name, value) do { DEFINE_GC_LOGGING_GLOBAL(GCCounter, name##Counter, (#name)); name##Counter.count(value); } while (false)
+
+#else
+
+#define GCPHASE(name) do { } while (false)
+#define COND_GCPHASE(cond, name1, name2) do { } while (false)
+#define GCCOUNTER(name, value) do { } while (false)
+#endif
+
+static size_t heapSizeForHint(HeapSize heapSize)
+{
+ if (heapSize == LargeHeap)
+ return largeHeapSize;
+ ASSERT(heapSize == SmallHeap);
+ return smallHeapSize;
+}
+
+static inline bool isValidSharedInstanceThreadState()
+{
+ if (!JSLock::lockCount())
+ return false;
+
+ if (!JSLock::currentThreadIsHoldingLock())
+ return false;
+
+ return true;
+}
+
+static inline bool isValidThreadState(JSGlobalData* globalData)
+{
+ if (globalData->identifierTable != wtfThreadData().currentIdentifierTable())
+ return false;
+
+ if (globalData->isSharedInstance() && !isValidSharedInstanceThreadState())
+ return false;
+
+ return true;
+}
+
+class CountFunctor {
+public:
+ typedef size_t ReturnType;
+
+ CountFunctor();
+ void count(size_t);
+ ReturnType returnValue();
+
+private:
+ ReturnType m_count;
+};
+
+inline CountFunctor::CountFunctor()
+ : m_count(0)
+{
+}
+
+inline void CountFunctor::count(size_t count)
+{
+ m_count += count;
+}
+
+inline CountFunctor::ReturnType CountFunctor::returnValue()
+{
+ return m_count;
+}
+
+struct ClearMarks : MarkedBlock::VoidFunctor {
+ void operator()(MarkedBlock*);
+};
+
+inline void ClearMarks::operator()(MarkedBlock* block)
+{
+ block->clearMarks();
+}
+
+struct Sweep : MarkedBlock::VoidFunctor {
+ void operator()(MarkedBlock*);
+};
+
+inline void Sweep::operator()(MarkedBlock* block)
+{
+ block->sweep();
+}
+
+struct MarkCount : CountFunctor {
+ void operator()(MarkedBlock*);
+};
+
+inline void MarkCount::operator()(MarkedBlock* block)
+{
+ count(block->markCount());
+}
+
+struct Size : CountFunctor {
+ void operator()(MarkedBlock*);
+};
+
+inline void Size::operator()(MarkedBlock* block)
+{
+ count(block->markCount() * block->cellSize());
+}
+
+struct Capacity : CountFunctor {
+ void operator()(MarkedBlock*);
+};
+
+inline void Capacity::operator()(MarkedBlock* block)
+{
+ count(block->capacity());
+}
+
+struct Count : public CountFunctor {
+ void operator()(JSCell*);
+};
+
+inline void Count::operator()(JSCell*)
+{
+ count(1);
+}
+
+struct CountIfGlobalObject : CountFunctor {
+ void operator()(JSCell*);
+};
+
+inline void CountIfGlobalObject::operator()(JSCell* cell)
+{
+ if (!cell->isObject())
+ return;
+ if (!asObject(cell)->isGlobalObject())
+ return;
+ count(1);
+}
+
+class RecordType {
+public:
+ typedef PassOwnPtr<TypeCountSet> ReturnType;
+
+ RecordType();
+ void operator()(JSCell*);
+ ReturnType returnValue();
+
+private:
+ const char* typeName(JSCell*);
+ OwnPtr<TypeCountSet> m_typeCountSet;
+};
+
+inline RecordType::RecordType()
+ : m_typeCountSet(adoptPtr(new TypeCountSet))
+{
+}
+
+inline const char* RecordType::typeName(JSCell* cell)
+{
+ const ClassInfo* info = cell->classInfo();
+ if (!info || !info->className)
+ return "[unknown]";
+ return info->className;
+}
+
+inline void RecordType::operator()(JSCell* cell)
+{
+ m_typeCountSet->add(typeName(cell));
+}
+
+inline PassOwnPtr<TypeCountSet> RecordType::returnValue()
+{
+ return m_typeCountSet.release();
+}
+
+} // anonymous namespace
+
+Heap::Heap(JSGlobalData* globalData, HeapSize heapSize)
+ : m_heapSize(heapSize)
+ , m_minBytesPerCycle(heapSizeForHint(heapSize))
+ , m_lastFullGCSize(0)
+ , m_operationInProgress(NoOperation)
+ , m_objectSpace(this)
+ , m_blockFreeingThreadShouldQuit(false)
+ , m_extraCost(0)
+ , m_markListSet(0)
+ , m_activityCallback(DefaultGCActivityCallback::create(this))
+ , m_machineThreads(this)
+ , m_sharedData(globalData)
+ , m_slotVisitor(m_sharedData)
+ , m_handleHeap(globalData)
+ , m_isSafeToCollect(false)
+ , m_globalData(globalData)
+{
+ m_objectSpace.setHighWaterMark(m_minBytesPerCycle);
+ (*m_activityCallback)();
+ m_numberOfFreeBlocks = 0;
+ m_blockFreeingThread = createThread(blockFreeingThreadStartFunc, this, "JavaScriptCore::BlockFree");
+
+ ASSERT(m_blockFreeingThread);
+}
+
+Heap::~Heap()
+{
+ // Destroy our block freeing thread.
+ {
+ MutexLocker locker(m_freeBlockLock);
+ m_blockFreeingThreadShouldQuit = true;
+ m_freeBlockCondition.broadcast();
+ }
+ waitForThreadCompletion(m_blockFreeingThread, 0);
+
+ // The destroy function must already have been called, so assert this.
+ ASSERT(!m_globalData);
+}
+
+void Heap::destroy()
+{
+ JSLock lock(SilenceAssertionsOnly);
+
+ if (!m_globalData)
+ return;
+
+ ASSERT(!m_globalData->dynamicGlobalObject);
+ ASSERT(m_operationInProgress == NoOperation);
+
+ // The global object is not GC protected at this point, so sweeping may delete it
+ // (and thus the global data) before other objects that may use the global data.
+ RefPtr<JSGlobalData> protect(m_globalData);
+
+#if ENABLE(JIT)
+ m_globalData->jitStubs->clearHostFunctionStubs();
+#endif
+
+ delete m_markListSet;
+ m_markListSet = 0;
+
+ canonicalizeCellLivenessData();
+ clearMarks();
+
+ m_handleHeap.finalizeWeakHandles();
+ m_globalData->smallStrings.finalizeSmallStrings();
+ shrink();
+ ASSERT(!size());
+
+#if ENABLE(SIMPLE_HEAP_PROFILING)
+ m_slotVisitor.m_visitedTypeCounts.dump(stderr, "Visited Type Counts");
+ m_destroyedTypeCounts.dump(stderr, "Destroyed Type Counts");
+#endif
+
+ releaseFreeBlocks();
+
+ m_globalData = 0;
+}
+
+void Heap::waitForRelativeTimeWhileHoldingLock(double relative)
+{
+ if (m_blockFreeingThreadShouldQuit)
+ return;
+ m_freeBlockCondition.timedWait(m_freeBlockLock, currentTime() + relative);
+}
+
+void Heap::waitForRelativeTime(double relative)
+{
+ // If this returns early, that's fine, so long as it doesn't do it too
+ // frequently. It would only be a bug if this function failed to return
+ // when it was asked to do so.
+
+ MutexLocker locker(m_freeBlockLock);
+ waitForRelativeTimeWhileHoldingLock(relative);
+}
+
+void* Heap::blockFreeingThreadStartFunc(void* heap)
+{
+ static_cast<Heap*>(heap)->blockFreeingThreadMain();
+ return 0;
+}
+
+void Heap::blockFreeingThreadMain()
+{
+ while (!m_blockFreeingThreadShouldQuit) {
+ // Generally wait for one second before scavenging free blocks. This
+ // may return early, particularly when we're being asked to quit.
+ waitForRelativeTime(1.0);
+ if (m_blockFreeingThreadShouldQuit)
+ break;
+
+ // Now process the list of free blocks. Keep freeing until half of the
+ // blocks that are currently on the list are gone. Assume that a size_t
+ // field can be accessed atomically.
+ size_t currentNumberOfFreeBlocks = m_numberOfFreeBlocks;
+ if (!currentNumberOfFreeBlocks)
+ continue;
+
+ size_t desiredNumberOfFreeBlocks = currentNumberOfFreeBlocks / 2;
+
+ while (!m_blockFreeingThreadShouldQuit) {
+ MarkedBlock* block;
+ {
+ MutexLocker locker(m_freeBlockLock);
+ if (m_numberOfFreeBlocks <= desiredNumberOfFreeBlocks)
+ block = 0;
+ else {
+ block = m_freeBlocks.removeHead();
+ ASSERT(block);
+ m_numberOfFreeBlocks--;
+ }
+ }
+
+ if (!block)
+ break;
+
+ MarkedBlock::destroy(block);
+ }
+ }
+}
+
+void Heap::reportExtraMemoryCostSlowCase(size_t cost)
+{
+ // Our frequency of garbage collection tries to balance memory use against speed
+ // by collecting based on the number of newly created values. However, for values
+ // that hold on to a great deal of memory that's not in the form of other JS values,
+ // that is not good enough - in some cases a lot of those objects can pile up and
+ // use crazy amounts of memory without a GC happening. So we track these extra
+ // memory costs. Only unusually large objects are noted, and we only keep track
+ // of this extra cost until the next GC. In garbage collected languages, most values
+ // are either very short lived temporaries, or have extremely long lifetimes. So
+ // if a large value survives one garbage collection, there is not much point to
+ // collecting more frequently as long as it stays alive.
+
+ if (m_extraCost > maxExtraCost && m_extraCost > m_objectSpace.highWaterMark() / 2)
+ collectAllGarbage();
+ m_extraCost += cost;
+}
+
+void Heap::protect(JSValue k)
+{
+ ASSERT(k);
+ ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData->isSharedInstance());
+
+ if (!k.isCell())
+ return;
+
+ m_protectedValues.add(k.asCell());
+}
+
+bool Heap::unprotect(JSValue k)
+{
+ ASSERT(k);
+ ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData->isSharedInstance());
+
+ if (!k.isCell())
+ return false;
+
+ return m_protectedValues.remove(k.asCell());
+}
+
+void Heap::jettisonDFGCodeBlock(PassOwnPtr<CodeBlock> codeBlock)
+{
+ m_dfgCodeBlocks.jettison(codeBlock);
+}
+
+void Heap::markProtectedObjects(HeapRootVisitor& heapRootVisitor)
+{
+ ProtectCountSet::iterator end = m_protectedValues.end();
+ for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it)
+ heapRootVisitor.visit(&it->first);
+}
+
+void Heap::pushTempSortVector(Vector<ValueStringPair>* tempVector)
+{
+ m_tempSortingVectors.append(tempVector);
+}
+
+void Heap::popTempSortVector(Vector<ValueStringPair>* tempVector)
+{
+ ASSERT_UNUSED(tempVector, tempVector == m_tempSortingVectors.last());
+ m_tempSortingVectors.removeLast();
+}
+
+void Heap::markTempSortVectors(HeapRootVisitor& heapRootVisitor)
+{
+ typedef Vector<Vector<ValueStringPair>* > VectorOfValueStringVectors;
+
+ VectorOfValueStringVectors::iterator end = m_tempSortingVectors.end();
+ for (VectorOfValueStringVectors::iterator it = m_tempSortingVectors.begin(); it != end; ++it) {
+ Vector<ValueStringPair>* tempSortingVector = *it;
+
+ Vector<ValueStringPair>::iterator vectorEnd = tempSortingVector->end();
+ for (Vector<ValueStringPair>::iterator vectorIt = tempSortingVector->begin(); vectorIt != vectorEnd; ++vectorIt) {
+ if (vectorIt->first)
+ heapRootVisitor.visit(&vectorIt->first);
+ }
+ }
+}
+
+void Heap::harvestWeakReferences()
+{
+ m_slotVisitor.harvestWeakReferences();
+}
+
+void Heap::finalizeUnconditionalFinalizers()
+{
+ m_slotVisitor.finalizeUnconditionalFinalizers();
+}
+
+inline RegisterFile& Heap::registerFile()
+{
+ return m_globalData->interpreter->registerFile();
+}
+
+void Heap::getConservativeRegisterRoots(HashSet<JSCell*>& roots)
+{
+ ASSERT(isValidThreadState(m_globalData));
+ if (m_operationInProgress != NoOperation)
+ CRASH();
+ m_operationInProgress = Collection;
+ ConservativeRoots registerFileRoots(&m_objectSpace.blocks());
+ registerFile().gatherConservativeRoots(registerFileRoots);
+ size_t registerFileRootCount = registerFileRoots.size();
+ JSCell** registerRoots = registerFileRoots.roots();
+ for (size_t i = 0; i < registerFileRootCount; i++) {
+ setMarked(registerRoots[i]);
+ roots.add(registerRoots[i]);
+ }
+ m_operationInProgress = NoOperation;
+}
+
+void Heap::markRoots(bool fullGC)
+{
+ SamplingRegion samplingRegion("Garbage Collection: Tracing");
+
+ COND_GCPHASE(fullGC, MarkFullRoots, MarkYoungRoots);
+ UNUSED_PARAM(fullGC);
+ ASSERT(isValidThreadState(m_globalData));
+ if (m_operationInProgress != NoOperation)
+ CRASH();
+ m_operationInProgress = Collection;
+
+ void* dummy;
+
+ // We gather conservative roots before clearing mark bits because conservative
+ // gathering uses the mark bits to determine whether a reference is valid.
+ ConservativeRoots machineThreadRoots(&m_objectSpace.blocks());
+ {
+ GCPHASE(GatherConservativeRoots);
+ m_machineThreads.gatherConservativeRoots(machineThreadRoots, &dummy);
+ }
+
+ ConservativeRoots registerFileRoots(&m_objectSpace.blocks());
+ m_dfgCodeBlocks.clearMarks();
+ {
+ GCPHASE(GatherRegisterFileRoots);
+ registerFile().gatherConservativeRoots(registerFileRoots, m_dfgCodeBlocks);
+ }
+#if ENABLE(GGC)
+ MarkedBlock::DirtyCellVector dirtyCells;
+ if (!fullGC) {
+ GCPHASE(GatheringDirtyCells);
+ m_objectSpace.gatherDirtyCells(dirtyCells);
+ } else
+#endif
+ {
+ GCPHASE(clearMarks);
+ clearMarks();
+ }
+
+ SlotVisitor& visitor = m_slotVisitor;
+ HeapRootVisitor heapRootVisitor(visitor);
+
+ {
+ ParallelModeEnabler enabler(visitor);
+#if ENABLE(GGC)
+ {
+ size_t dirtyCellCount = dirtyCells.size();
+ GCPHASE(VisitDirtyCells);
+ GCCOUNTER(DirtyCellCount, dirtyCellCount);
+ for (size_t i = 0; i < dirtyCellCount; i++) {
+ heapRootVisitor.visitChildren(dirtyCells[i]);
+ visitor.donateAndDrain();
+ }
+ }
+#endif
+
+ if (m_globalData->codeBlocksBeingCompiled.size()) {
+ GCPHASE(VisitActiveCodeBlock);
+ for (size_t i = 0; i < m_globalData->codeBlocksBeingCompiled.size(); i++)
+ m_globalData->codeBlocksBeingCompiled[i]->visitAggregate(visitor);
+ }
+
+ {
+ GCPHASE(VisitMachineRoots);
+ visitor.append(machineThreadRoots);
+ visitor.donateAndDrain();
+ }
+ {
+ GCPHASE(VisitRegisterFileRoots);
+ visitor.append(registerFileRoots);
+ visitor.donateAndDrain();
+ }
+ {
+ GCPHASE(VisitProtectedObjects);
+ markProtectedObjects(heapRootVisitor);
+ visitor.donateAndDrain();
+ }
+ {
+ GCPHASE(VisitTempSortVectors);
+ markTempSortVectors(heapRootVisitor);
+ visitor.donateAndDrain();
+ }
+
+ {
+ GCPHASE(MarkingArgumentBuffers);
+ if (m_markListSet && m_markListSet->size()) {
+ MarkedArgumentBuffer::markLists(heapRootVisitor, *m_markListSet);
+ visitor.donateAndDrain();
+ }
+ }
+ if (m_globalData->exception) {
+ GCPHASE(MarkingException);
+ heapRootVisitor.visit(&m_globalData->exception);
+ visitor.donateAndDrain();
+ }
+
+ {
+ GCPHASE(VisitStrongHandles);
+ m_handleHeap.visitStrongHandles(heapRootVisitor);
+ visitor.donateAndDrain();
+ }
+
+ {
+ GCPHASE(HandleStack);
+ m_handleStack.visit(heapRootVisitor);
+ visitor.donateAndDrain();
+ }
+
+ {
+ GCPHASE(TraceCodeBlocks);
+ m_dfgCodeBlocks.traceMarkedCodeBlocks(visitor);
+ visitor.donateAndDrain();
+ }
+
+#if ENABLE(PARALLEL_GC)
+ {
+ GCPHASE(Convergence);
+ visitor.drainFromShared(SlotVisitor::MasterDrain);
+ }
+#endif
+ }
+
+ // Weak handles must be marked last, because their owners use the set of
+ // opaque roots to determine reachability.
+ {
+ GCPHASE(VisitingWeakHandles);
+ while (true) {
+ m_handleHeap.visitWeakHandles(heapRootVisitor);
+ harvestWeakReferences();
+ if (visitor.isEmpty())
+ break;
+ {
+ ParallelModeEnabler enabler(visitor);
+ visitor.donateAndDrain();
+#if ENABLE(PARALLEL_GC)
+ visitor.drainFromShared(SlotVisitor::MasterDrain);
+#endif
+ }
+ }
+ }
+ GCCOUNTER(VisitedValueCount, visitor.visitCount());
+
+ visitor.reset();
+ m_sharedData.reset();
+
+ m_operationInProgress = NoOperation;
+}
+
+void Heap::clearMarks()
+{
+ m_objectSpace.forEachBlock<ClearMarks>();
+}
+
+void Heap::sweep()
+{
+ m_objectSpace.forEachBlock<Sweep>();
+}
+
+size_t Heap::objectCount()
+{
+ return m_objectSpace.forEachBlock<MarkCount>();
+}
+
+size_t Heap::size()
+{
+ return m_objectSpace.forEachBlock<Size>();
+}
+
+size_t Heap::capacity()
+{
+ return m_objectSpace.forEachBlock<Capacity>();
+}
+
+size_t Heap::protectedGlobalObjectCount()
+{
+ return forEachProtectedCell<CountIfGlobalObject>();
+}
+
+size_t Heap::globalObjectCount()
+{
+ return m_objectSpace.forEachCell<CountIfGlobalObject>();
+}
+
+size_t Heap::protectedObjectCount()
+{
+ return forEachProtectedCell<Count>();
+}
+
+PassOwnPtr<TypeCountSet> Heap::protectedObjectTypeCounts()
+{
+ return forEachProtectedCell<RecordType>();
+}
+
+PassOwnPtr<TypeCountSet> Heap::objectTypeCounts()
+{
+ return m_objectSpace.forEachCell<RecordType>();
+}
+
+void Heap::collectAllGarbage()
+{
+ if (!m_isSafeToCollect)
+ return;
+ if (!m_globalData->dynamicGlobalObject)
+ m_globalData->recompileAllJSFunctions();
+
+ collect(DoSweep);
+}
+
+void Heap::collect(SweepToggle sweepToggle)
+{
+ SamplingRegion samplingRegion("Garbage Collection");
+
+ GCPHASE(Collect);
+ ASSERT(globalData()->identifierTable == wtfThreadData().currentIdentifierTable());
+ ASSERT(m_isSafeToCollect);
+ JAVASCRIPTCORE_GC_BEGIN();
+#if ENABLE(GGC)
+ bool fullGC = sweepToggle == DoSweep;
+ if (!fullGC)
+ fullGC = (capacity() > 4 * m_lastFullGCSize);
+#else
+ bool fullGC = true;
+#endif
+ {
+ GCPHASE(Canonicalize);
+ canonicalizeCellLivenessData();
+ }
+
+ markRoots(fullGC);
+
+ {
+ GCPHASE(FinalizeUnconditionalFinalizers);
+ finalizeUnconditionalFinalizers();
+ }
+
+ {
+ GCPHASE(FinalizeWeakHandles);
+ m_handleHeap.finalizeWeakHandles();
+ m_globalData->smallStrings.finalizeSmallStrings();
+ }
+
+ JAVASCRIPTCORE_GC_MARKED();
+
+ {
+ GCPHASE(ResetAllocator);
+ resetAllocator();
+ }
+
+ {
+ GCPHASE(DeleteCodeBlocks);
+ m_dfgCodeBlocks.deleteUnmarkedJettisonedCodeBlocks();
+ }
+
+ if (sweepToggle == DoSweep) {
+ SamplingRegion samplingRegion("Garbage Collection: Sweeping");
+ GCPHASE(Sweeping);
+ sweep();
+ shrink();
+ }
+
+ // To avoid pathological GC churn in large heaps, we set the allocation high
+ // water mark to be proportional to the current size of the heap. The exact
+ // proportion is a bit arbitrary. A 2X multiplier gives a 1:1 (heap size :
+ // new bytes allocated) proportion, and seems to work well in benchmarks.
+ size_t newSize = size();
+ size_t proportionalBytes = 2 * newSize;
+ if (fullGC) {
+ m_lastFullGCSize = newSize;
+ m_objectSpace.setHighWaterMark(max(proportionalBytes, m_minBytesPerCycle));
+ }
+ JAVASCRIPTCORE_GC_END();
+
+ (*m_activityCallback)();
+}
+
+void Heap::canonicalizeCellLivenessData()
+{
+ m_objectSpace.canonicalizeCellLivenessData();
+}
+
+void Heap::resetAllocator()
+{
+ m_extraCost = 0;
+ m_objectSpace.resetAllocator();
+}
+
+void Heap::setActivityCallback(PassOwnPtr<GCActivityCallback> activityCallback)
+{
+ m_activityCallback = activityCallback;
+}
+
+GCActivityCallback* Heap::activityCallback()
+{
+ return m_activityCallback.get();
+}
+
+bool Heap::isValidAllocation(size_t bytes)
+{
+ if (!isValidThreadState(m_globalData))
+ return false;
+
+ if (bytes > MarkedSpace::maxCellSize)
+ return false;
+
+ if (m_operationInProgress != NoOperation)
+ return false;
+
+ return true;
+}
+
+void Heap::freeBlocks(MarkedBlock* head)
+{
+ m_objectSpace.freeBlocks(head);
+}
+
+void Heap::shrink()
+{
+ m_objectSpace.shrink();
+}
+
+void Heap::releaseFreeBlocks()
+{
+ while (true) {
+ MarkedBlock* block;
+ {
+ MutexLocker locker(m_freeBlockLock);
+ if (!m_numberOfFreeBlocks)
+ block = 0;
+ else {
+ block = m_freeBlocks.removeHead();
+ ASSERT(block);
+ m_numberOfFreeBlocks--;
+ }
+ }
+
+ if (!block)
+ break;
+
+ MarkedBlock::destroy(block);
+ }
+}
+
+void Heap::addFinalizer(JSCell* cell, Finalizer finalizer)
+{
+ Weak<JSCell> weak(*globalData(), cell, &m_finalizerOwner, reinterpret_cast<void*>(finalizer));
+ weak.leakHandle(); // Balanced by FinalizerOwner::finalize().
+}
+
+void Heap::FinalizerOwner::finalize(Handle<Unknown> handle, void* context)
+{
+ Weak<JSCell> weak(Weak<JSCell>::Adopt, handle);
+ Finalizer finalizer = reinterpret_cast<Finalizer>(context);
+ finalizer(weak.get());
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/Heap.h b/Source/JavaScriptCore/heap/Heap.h
new file mode 100644
index 000000000..1b228253b
--- /dev/null
+++ b/Source/JavaScriptCore/heap/Heap.h
@@ -0,0 +1,314 @@
+/*
+ * Copyright (C) 1999-2000 Harri Porten (porten@kde.org)
+ * Copyright (C) 2001 Peter Kelly (pmk@post.com)
+ * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#ifndef Heap_h
+#define Heap_h
+
+#include "AllocationSpace.h"
+#include "DFGCodeBlocks.h"
+#include "HandleHeap.h"
+#include "HandleStack.h"
+#include "MarkedBlock.h"
+#include "MarkedBlockSet.h"
+#include "MarkedSpace.h"
+#include "SlotVisitor.h"
+#include "WriteBarrierSupport.h"
+#include <wtf/Forward.h>
+#include <wtf/HashCountedSet.h>
+#include <wtf/HashSet.h>
+
+namespace JSC {
+
+ class CodeBlock;
+ class GCActivityCallback;
+ class GlobalCodeBlock;
+ class Heap;
+ class HeapRootVisitor;
+ class JSCell;
+ class JSGlobalData;
+ class JSValue;
+ class LiveObjectIterator;
+ class MarkedArgumentBuffer;
+ class RegisterFile;
+ class UString;
+ class WeakGCHandlePool;
+ class SlotVisitor;
+
+ typedef std::pair<JSValue, UString> ValueStringPair;
+ typedef HashCountedSet<JSCell*> ProtectCountSet;
+ typedef HashCountedSet<const char*> TypeCountSet;
+
+ enum OperationInProgress { NoOperation, Allocation, Collection };
+
+ // Heap size hint.
+ enum HeapSize { SmallHeap, LargeHeap };
+
+ class Heap {
+ WTF_MAKE_NONCOPYABLE(Heap);
+ public:
+ friend class JIT;
+ static Heap* heap(JSValue); // 0 for immediate values
+ static Heap* heap(JSCell*);
+
+ static bool isMarked(const void*);
+ static bool testAndSetMarked(const void*);
+ static void setMarked(const void*);
+
+ static void writeBarrier(const JSCell*, JSValue);
+ static void writeBarrier(const JSCell*, JSCell*);
+ static uint8_t* addressOfCardFor(JSCell*);
+
+ Heap(JSGlobalData*, HeapSize);
+ ~Heap();
+ void destroy(); // JSGlobalData must call destroy() before ~Heap().
+
+ JSGlobalData* globalData() const { return m_globalData; }
+ AllocationSpace& objectSpace() { return m_objectSpace; }
+ MachineThreads& machineThreads() { return m_machineThreads; }
+
+ GCActivityCallback* activityCallback();
+ void setActivityCallback(PassOwnPtr<GCActivityCallback>);
+
+ // true if an allocation or collection is in progress
+ inline bool isBusy();
+
+ MarkedSpace::SizeClass& sizeClassForObject(size_t bytes) { return m_objectSpace.sizeClassFor(bytes); }
+ void* allocate(size_t);
+
+ typedef void (*Finalizer)(JSCell*);
+ void addFinalizer(JSCell*, Finalizer);
+
+ void notifyIsSafeToCollect() { m_isSafeToCollect = true; }
+ void collectAllGarbage();
+
+ void reportExtraMemoryCost(size_t cost);
+
+ void protect(JSValue);
+ bool unprotect(JSValue); // True when the protect count drops to 0.
+
+ void jettisonDFGCodeBlock(PassOwnPtr<CodeBlock>);
+
+ size_t size();
+ size_t capacity();
+ size_t objectCount();
+ size_t globalObjectCount();
+ size_t protectedObjectCount();
+ size_t protectedGlobalObjectCount();
+ PassOwnPtr<TypeCountSet> protectedObjectTypeCounts();
+ PassOwnPtr<TypeCountSet> objectTypeCounts();
+
+ void pushTempSortVector(Vector<ValueStringPair>*);
+ void popTempSortVector(Vector<ValueStringPair>*);
+
+ HashSet<MarkedArgumentBuffer*>& markListSet() { if (!m_markListSet) m_markListSet = new HashSet<MarkedArgumentBuffer*>; return *m_markListSet; }
+
+ template<typename Functor> typename Functor::ReturnType forEachProtectedCell(Functor&);
+ template<typename Functor> typename Functor::ReturnType forEachProtectedCell();
+
+ HandleHeap* handleHeap() { return &m_handleHeap; }
+ HandleStack* handleStack() { return &m_handleStack; }
+
+ void getConservativeRegisterRoots(HashSet<JSCell*>& roots);
+
+ private:
+ friend class MarkedBlock;
+ friend class AllocationSpace;
+ friend class SlotVisitor;
+ friend class CodeBlock;
+
+ static const size_t minExtraCost = 256;
+ static const size_t maxExtraCost = 1024 * 1024;
+
+ class FinalizerOwner : public WeakHandleOwner {
+ virtual void finalize(Handle<Unknown>, void* context);
+ };
+
+ bool isValidAllocation(size_t);
+ void reportExtraMemoryCostSlowCase(size_t);
+
+ // Call this function before any operation that needs to know which cells
+ // in the heap are live. (For example, call this function before
+ // conservative marking, eager sweeping, or iterating the cells in a MarkedBlock.)
+ void canonicalizeCellLivenessData();
+
+ void resetAllocator();
+ void freeBlocks(MarkedBlock*);
+
+ void clearMarks();
+ void markRoots(bool fullGC);
+ void markProtectedObjects(HeapRootVisitor&);
+ void markTempSortVectors(HeapRootVisitor&);
+ void harvestWeakReferences();
+ void finalizeUnconditionalFinalizers();
+
+ enum SweepToggle { DoNotSweep, DoSweep };
+ void collect(SweepToggle);
+ void shrink();
+ void releaseFreeBlocks();
+ void sweep();
+
+ RegisterFile& registerFile();
+
+ void waitForRelativeTimeWhileHoldingLock(double relative);
+ void waitForRelativeTime(double relative);
+ void blockFreeingThreadMain();
+ static void* blockFreeingThreadStartFunc(void* heap);
+
+ const HeapSize m_heapSize;
+ const size_t m_minBytesPerCycle;
+ size_t m_lastFullGCSize;
+
+ OperationInProgress m_operationInProgress;
+ AllocationSpace m_objectSpace;
+
+ DoublyLinkedList<MarkedBlock> m_freeBlocks;
+ size_t m_numberOfFreeBlocks;
+
+ ThreadIdentifier m_blockFreeingThread;
+ Mutex m_freeBlockLock;
+ ThreadCondition m_freeBlockCondition;
+ bool m_blockFreeingThreadShouldQuit;
+
+#if ENABLE(SIMPLE_HEAP_PROFILING)
+ VTableSpectrum m_destroyedTypeCounts;
+#endif
+
+ size_t m_extraCost;
+
+ ProtectCountSet m_protectedValues;
+ Vector<Vector<ValueStringPair>* > m_tempSortingVectors;
+ HashSet<MarkedArgumentBuffer*>* m_markListSet;
+
+ OwnPtr<GCActivityCallback> m_activityCallback;
+
+ MachineThreads m_machineThreads;
+
+ MarkStackThreadSharedData m_sharedData;
+ SlotVisitor m_slotVisitor;
+
+ HandleHeap m_handleHeap;
+ HandleStack m_handleStack;
+ DFGCodeBlocks m_dfgCodeBlocks;
+ FinalizerOwner m_finalizerOwner;
+
+ bool m_isSafeToCollect;
+
+ JSGlobalData* m_globalData;
+ };
+
+ bool Heap::isBusy()
+ {
+ return m_operationInProgress != NoOperation;
+ }
+
+ inline Heap* Heap::heap(JSCell* cell)
+ {
+ return MarkedBlock::blockFor(cell)->heap();
+ }
+
+ inline Heap* Heap::heap(JSValue v)
+ {
+ if (!v.isCell())
+ return 0;
+ return heap(v.asCell());
+ }
+
+ inline bool Heap::isMarked(const void* cell)
+ {
+ return MarkedBlock::blockFor(cell)->isMarked(cell);
+ }
+
+ inline bool Heap::testAndSetMarked(const void* cell)
+ {
+ return MarkedBlock::blockFor(cell)->testAndSetMarked(cell);
+ }
+
+ inline void Heap::setMarked(const void* cell)
+ {
+ MarkedBlock::blockFor(cell)->setMarked(cell);
+ }
+
+#if ENABLE(GGC)
+ inline uint8_t* Heap::addressOfCardFor(JSCell* cell)
+ {
+ return MarkedBlock::blockFor(cell)->addressOfCardFor(cell);
+ }
+
+ inline void Heap::writeBarrier(const JSCell* owner, JSCell*)
+ {
+ WriteBarrierCounters::countWriteBarrier();
+ MarkedBlock* block = MarkedBlock::blockFor(owner);
+ if (block->isMarked(owner))
+ block->setDirtyObject(owner);
+ }
+
+ inline void Heap::writeBarrier(const JSCell* owner, JSValue value)
+ {
+ if (!value)
+ return;
+ if (!value.isCell())
+ return;
+ writeBarrier(owner, value.asCell());
+ }
+#else
+
+ inline void Heap::writeBarrier(const JSCell*, JSCell*)
+ {
+ WriteBarrierCounters::countWriteBarrier();
+ }
+
+ inline void Heap::writeBarrier(const JSCell*, JSValue)
+ {
+ WriteBarrierCounters::countWriteBarrier();
+ }
+#endif
+
+ inline void Heap::reportExtraMemoryCost(size_t cost)
+ {
+ if (cost > minExtraCost)
+ reportExtraMemoryCostSlowCase(cost);
+ }
+
+ template<typename Functor> inline typename Functor::ReturnType Heap::forEachProtectedCell(Functor& functor)
+ {
+ ProtectCountSet::iterator end = m_protectedValues.end();
+ for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it)
+ functor(it->first);
+ m_handleHeap.forEachStrongHandle(functor, m_protectedValues);
+
+ return functor.returnValue();
+ }
+
+ template<typename Functor> inline typename Functor::ReturnType Heap::forEachProtectedCell()
+ {
+ Functor functor;
+ return forEachProtectedCell(functor);
+ }
+
+ inline void* Heap::allocate(size_t bytes)
+ {
+ ASSERT(isValidAllocation(bytes));
+ return m_objectSpace.allocate(bytes);
+ }
+
+} // namespace JSC
+
+#endif // Heap_h
diff --git a/Source/JavaScriptCore/heap/HeapRootVisitor.h b/Source/JavaScriptCore/heap/HeapRootVisitor.h
new file mode 100644
index 000000000..76c97290a
--- /dev/null
+++ b/Source/JavaScriptCore/heap/HeapRootVisitor.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2009, 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef HeapRootVisitor_h
+#define HeapRootVisitor_h
+
+#include "SlotVisitor.h"
+
+namespace JSC {
+
+ // Privileged class for marking JSValues directly. It is only safe to use
+ // this class to mark direct heap roots that are marked during every GC pass.
+ // All other references should be wrapped in WriteBarriers.
+ class HeapRootVisitor {
+ private:
+ friend class Heap;
+ HeapRootVisitor(SlotVisitor&);
+
+ public:
+ void visit(JSValue*);
+ void visit(JSValue*, size_t);
+ void visit(JSString**);
+ void visit(JSCell**);
+
+ SlotVisitor& visitor();
+
+ private:
+ SlotVisitor& m_visitor;
+ };
+
+ inline HeapRootVisitor::HeapRootVisitor(SlotVisitor& visitor)
+ : m_visitor(visitor)
+ {
+ }
+
+ inline void HeapRootVisitor::visit(JSValue* slot)
+ {
+ m_visitor.append(slot);
+ }
+
+ inline void HeapRootVisitor::visit(JSValue* slot, size_t count)
+ {
+ m_visitor.append(slot, count);
+ }
+
+ inline void HeapRootVisitor::visit(JSString** slot)
+ {
+ m_visitor.append(reinterpret_cast<JSCell**>(slot));
+ }
+
+ inline void HeapRootVisitor::visit(JSCell** slot)
+ {
+ m_visitor.append(slot);
+ }
+
+ inline SlotVisitor& HeapRootVisitor::visitor()
+ {
+ return m_visitor;
+ }
+
+} // namespace JSC
+
+#endif // HeapRootVisitor_h
diff --git a/Source/JavaScriptCore/heap/ListableHandler.h b/Source/JavaScriptCore/heap/ListableHandler.h
new file mode 100644
index 000000000..41f18fbce
--- /dev/null
+++ b/Source/JavaScriptCore/heap/ListableHandler.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#ifndef ListableHandler_h
+#define ListableHandler_h
+
+#include <stdint.h>
+#include <wtf/Locker.h>
+#include <wtf/Noncopyable.h>
+#include <wtf/ThreadingPrimitives.h>
+
+namespace JSC {
+
+class MarkStack;
+class MarkStackThreadSharedData;
+class SlotVisitor;
+
+template<typename T>
+class ListableHandler {
+ WTF_MAKE_NONCOPYABLE(ListableHandler);
+
+protected:
+ ListableHandler()
+ : m_nextAndFlag(0)
+ {
+ }
+
+ virtual ~ListableHandler() { }
+
+ T* next() const
+ {
+ return reinterpret_cast<T*>(m_nextAndFlag & ~1);
+ }
+
+private:
+ // Allow these classes to use ListableHandler::List.
+ friend class MarkStack;
+ friend class MarkStackThreadSharedData;
+ friend class SlotVisitor;
+
+ class List {
+ WTF_MAKE_NONCOPYABLE(List);
+ public:
+ List()
+ : m_first(0)
+ {
+ }
+
+ void addThreadSafe(T* handler)
+ {
+ // NOTE: If we ever want this to be faster, we could turn it into
+ // a CAS loop, since this is a singly-linked-list that, in parallel
+ // tracing mode, can only grow. I.e. we don't have to worry about
+ // any ABA problems.
+ MutexLocker locker(m_lock);
+ addNotThreadSafe(handler);
+ }
+
+ bool hasNext()
+ {
+ return !!m_first;
+ }
+
+ T* head()
+ {
+ return m_first;
+ }
+
+ T* removeNext()
+ {
+ T* current = m_first;
+ T* next = current->next();
+ current->m_nextAndFlag = 0;
+ m_first = next;
+ return current;
+ }
+
+ void removeAll()
+ {
+ while (hasNext())
+ removeNext();
+ }
+
+ private:
+ void addNotThreadSafe(T* handler)
+ {
+ if (handler->m_nextAndFlag & 1)
+ return;
+ handler->m_nextAndFlag = reinterpret_cast<uintptr_t>(m_first) | 1;
+ m_first = handler;
+ }
+
+ Mutex m_lock;
+ T* m_first;
+ };
+
+ uintptr_t m_nextAndFlag;
+};
+
+} // namespace JSC
+
+#endif // ListableHandler_h
diff --git a/Source/JavaScriptCore/heap/Local.h b/Source/JavaScriptCore/heap/Local.h
new file mode 100644
index 000000000..afcfe42b8
--- /dev/null
+++ b/Source/JavaScriptCore/heap/Local.h
@@ -0,0 +1,152 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef Local_h
+#define Local_h
+
+#include "Handle.h"
+#include "JSGlobalData.h"
+
+/*
+ A strongly referenced handle whose lifetime is temporary, limited to a given
+ LocalScope. Use Locals for local values on the stack. It is an error to
+ create a Local outside of any LocalScope.
+*/
+
+namespace JSC {
+
+template <typename T> class Local : public Handle<T> {
+ friend class LocalScope;
+ using Handle<T>::slot;
+
+public:
+ typedef typename Handle<T>::ExternalType ExternalType;
+
+ Local(JSGlobalData&, ExternalType = ExternalType());
+ Local(JSGlobalData&, Handle<T>);
+ Local(const Local<T>&); // Adopting constructor. Used to return a Local to a calling function.
+
+ Local& operator=(ExternalType);
+ Local& operator=(Handle<T>);
+
+private:
+ Local(HandleSlot, ExternalType); // Used by LocalScope::release() to move a Local to a containing scope.
+ void set(ExternalType);
+};
+
+template <typename T> inline Local<T>::Local(JSGlobalData& globalData, ExternalType value)
+ : Handle<T>(globalData.heap.handleStack()->push())
+{
+ set(value);
+}
+
+template <typename T> inline Local<T>::Local(JSGlobalData& globalData, Handle<T> other)
+ : Handle<T>(globalData.heap.handleStack()->push())
+{
+ set(other.get());
+}
+
+template <typename T> inline Local<T>::Local(const Local<T>& other)
+ : Handle<T>(other.slot())
+{
+ const_cast<Local<T>&>(other).setSlot(0); // Prevent accidental sharing.
+}
+
+template <typename T> inline Local<T>::Local(HandleSlot slot, ExternalType value)
+ : Handle<T>(slot, value)
+{
+}
+
+template <typename T> inline Local<T>& Local<T>::operator=(ExternalType value)
+{
+ set(value);
+ return *this;
+}
+
+template <typename T> inline Local<T>& Local<T>::operator=(Handle<T> other)
+{
+ set(other.get());
+ return *this;
+}
+
+template <typename T> inline void Local<T>::set(ExternalType externalType)
+{
+ ASSERT(slot());
+ *slot() = externalType;
+}
+
+
+template <typename T, unsigned inlineCapacity = 0> class LocalStack {
+ typedef typename Handle<T>::ExternalType ExternalType;
+public:
+ LocalStack(JSGlobalData& globalData)
+ : m_globalData(&globalData)
+ , m_count(0)
+ {
+ }
+
+ ExternalType peek() const
+ {
+ ASSERT(m_count > 0);
+ return m_stack[m_count - 1].get();
+ }
+
+ ExternalType pop()
+ {
+ ASSERT(m_count > 0);
+ return m_stack[--m_count].get();
+ }
+
+ void push(ExternalType value)
+ {
+ if (m_count == m_stack.size())
+ m_stack.append(Local<T>(*m_globalData, value));
+ else
+ m_stack[m_count] = value;
+ m_count++;
+ }
+
+ bool isEmpty() const { return !m_count; }
+ unsigned size() const { return m_count; }
+
+private:
+ RefPtr<JSGlobalData> m_globalData;
+ Vector<Local<T>, inlineCapacity> m_stack;
+ unsigned m_count;
+};
+
+}
+
+namespace WTF {
+
+template<typename T> struct VectorTraits<JSC::Local<T> > : SimpleClassVectorTraits {
+ static const bool needsDestruction = false;
+ static const bool canInitializeWithMemset = false;
+ static const bool canCompareWithMemcmp = false;
+};
+
+}
+
+#endif
diff --git a/Source/JavaScriptCore/heap/LocalScope.h b/Source/JavaScriptCore/heap/LocalScope.h
new file mode 100644
index 000000000..cd27b32ef
--- /dev/null
+++ b/Source/JavaScriptCore/heap/LocalScope.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef LocalScope_h
+#define LocalScope_h
+
+#include "HandleStack.h"
+#include "Local.h"
+
+namespace JSC {
+/*
+ A LocalScope is a temporary scope in which Locals are allocated. When a
+ LocalScope goes out of scope, all the Locals created in it are destroyed.
+
+ LocalScope is similar in concept to NSAutoreleasePool.
+*/
+
+class JSGlobalData;
+
+class LocalScope {
+public:
+ explicit LocalScope(JSGlobalData&);
+ ~LocalScope();
+
+ template <typename T> Local<T> release(Local<T>); // Destroys all other locals in the scope.
+
+private:
+ HandleStack* m_handleStack;
+ HandleStack::Frame m_lastFrame;
+};
+
+inline LocalScope::LocalScope(JSGlobalData& globalData)
+ : m_handleStack(globalData.heap.handleStack())
+{
+ m_handleStack->enterScope(m_lastFrame);
+}
+
+inline LocalScope::~LocalScope()
+{
+ m_handleStack->leaveScope(m_lastFrame);
+}
+
+template <typename T> Local<T> LocalScope::release(Local<T> local)
+{
+ typename Local<T>::ExternalType ptr = local.get();
+
+ m_handleStack->leaveScope(m_lastFrame);
+ HandleSlot slot = m_handleStack->push();
+ m_handleStack->enterScope(m_lastFrame);
+
+ return Local<T>(slot, ptr);
+}
+
+}
+
+#endif
diff --git a/Source/JavaScriptCore/heap/MachineStackMarker.cpp b/Source/JavaScriptCore/heap/MachineStackMarker.cpp
new file mode 100644
index 000000000..f62ee066f
--- /dev/null
+++ b/Source/JavaScriptCore/heap/MachineStackMarker.cpp
@@ -0,0 +1,497 @@
+/*
+ * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2007 Eric Seidel <eric@webkit.org>
+ * Copyright (C) 2009 Acision BV. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#include "config.h"
+#include "MachineStackMarker.h"
+
+#include "ConservativeRoots.h"
+#include "Heap.h"
+#include "JSArray.h"
+#include "JSGlobalData.h"
+#include <setjmp.h>
+#include <stdlib.h>
+#include <wtf/StdLibExtras.h>
+
+#if OS(DARWIN)
+
+#include <mach/mach_init.h>
+#include <mach/mach_port.h>
+#include <mach/task.h>
+#include <mach/thread_act.h>
+#include <mach/vm_map.h>
+
+#elif OS(WINDOWS)
+
+#include <windows.h>
+#include <malloc.h>
+
+#elif OS(UNIX)
+
+#include <stdlib.h>
+#include <sys/mman.h>
+#include <unistd.h>
+
+#if OS(SOLARIS)
+#include <thread.h>
+#else
+#include <pthread.h>
+#endif
+
+#if HAVE(PTHREAD_NP_H)
+#include <pthread_np.h>
+#endif
+
+#if OS(QNX)
+#include <fcntl.h>
+#include <sys/procfs.h>
+#include <stdio.h>
+#include <errno.h>
+#endif
+
+#if USE(PTHREADS) && !OS(WINDOWS) && !OS(DARWIN)
+#include <signal.h>
+#endif
+
+#endif
+
+using namespace WTF;
+
+namespace JSC {
+
+static inline void swapIfBackwards(void*& begin, void*& end)
+{
+#if OS(WINCE)
+ if (begin <= end)
+ return;
+ std::swap(begin, end);
+#else
+UNUSED_PARAM(begin);
+UNUSED_PARAM(end);
+#endif
+}
+
+#if OS(DARWIN)
+typedef mach_port_t PlatformThread;
+#elif OS(WINDOWS)
+typedef HANDLE PlatformThread;
+#elif USE(PTHREADS)
+typedef pthread_t PlatformThread;
+static const int SigThreadSuspendResume = SIGUSR2;
+
+static void pthreadSignalHandlerSuspendResume(int signo)
+{
+ sigset_t signalSet;
+ sigemptyset(&signalSet);
+ sigaddset(&signalSet, SigThreadSuspendResume);
+ sigsuspend(&signalSet);
+}
+#endif
+
+class MachineThreads::Thread {
+public:
+ Thread(const PlatformThread& platThread, void* base)
+ : platformThread(platThread)
+ , stackBase(base)
+ {
+#if USE(PTHREADS) && !OS(WINDOWS) && !OS(DARWIN) && defined(SA_RESTART)
+ // if we have SA_RESTART, enable SIGUSR2 debugging mechanism
+ struct sigaction action;
+ action.sa_handler = pthreadSignalHandlerSuspendResume;
+ sigemptyset(&action.sa_mask);
+ action.sa_flags = SA_RESTART;
+ sigaction(SigThreadSuspendResume, &action, 0);
+
+ sigset_t mask;
+ sigemptyset(&mask);
+ sigaddset(&mask, SigThreadSuspendResume);
+ pthread_sigmask(SIG_UNBLOCK, &mask, 0);
+#endif
+ }
+
+ Thread* next;
+ PlatformThread platformThread;
+ void* stackBase;
+};
+
+MachineThreads::MachineThreads(Heap* heap)
+ : m_heap(heap)
+ , m_registeredThreads(0)
+ , m_threadSpecific(0)
+{
+}
+
+MachineThreads::~MachineThreads()
+{
+ if (m_threadSpecific) {
+ int error = pthread_key_delete(m_threadSpecific);
+ ASSERT_UNUSED(error, !error);
+ }
+
+ MutexLocker registeredThreadsLock(m_registeredThreadsMutex);
+ for (Thread* t = m_registeredThreads; t;) {
+ Thread* next = t->next;
+ delete t;
+ t = next;
+ }
+}
+
+static inline PlatformThread getCurrentPlatformThread()
+{
+#if OS(DARWIN)
+ return pthread_mach_thread_np(pthread_self());
+#elif OS(WINDOWS)
+ return GetCurrentThread();
+#elif USE(PTHREADS)
+ return pthread_self();
+#endif
+}
+
+static inline bool equalThread(const PlatformThread& first, const PlatformThread& second)
+{
+#if OS(DARWIN) || OS(WINDOWS)
+ return first == second;
+#elif USE(PTHREADS)
+ return !!pthread_equal(first, second);
+#else
+#error Need a way to compare threads on this platform
+#endif
+}
+
+void MachineThreads::makeUsableFromMultipleThreads()
+{
+ if (m_threadSpecific)
+ return;
+
+ int error = pthread_key_create(&m_threadSpecific, removeThread);
+ if (error)
+ CRASH();
+}
+
+void MachineThreads::addCurrentThread()
+{
+ ASSERT(!m_heap->globalData()->exclusiveThread || m_heap->globalData()->exclusiveThread == currentThread());
+
+ if (!m_threadSpecific || pthread_getspecific(m_threadSpecific))
+ return;
+
+ pthread_setspecific(m_threadSpecific, this);
+ Thread* thread = new Thread(getCurrentPlatformThread(), m_heap->globalData()->stack().origin());
+
+ MutexLocker lock(m_registeredThreadsMutex);
+
+ thread->next = m_registeredThreads;
+ m_registeredThreads = thread;
+}
+
+void MachineThreads::removeThread(void* p)
+{
+ if (p)
+ static_cast<MachineThreads*>(p)->removeCurrentThread();
+}
+
+void MachineThreads::removeCurrentThread()
+{
+ PlatformThread currentPlatformThread = getCurrentPlatformThread();
+
+ MutexLocker lock(m_registeredThreadsMutex);
+
+ if (equalThread(currentPlatformThread, m_registeredThreads->platformThread)) {
+ Thread* t = m_registeredThreads;
+ m_registeredThreads = m_registeredThreads->next;
+ delete t;
+ } else {
+ Thread* last = m_registeredThreads;
+ Thread* t;
+ for (t = m_registeredThreads->next; t; t = t->next) {
+ if (equalThread(t->platformThread, currentPlatformThread)) {
+ last->next = t->next;
+ break;
+ }
+ last = t;
+ }
+ ASSERT(t); // If t is NULL, we never found ourselves in the list.
+ delete t;
+ }
+}
+
+#if COMPILER(GCC)
+#define REGISTER_BUFFER_ALIGNMENT __attribute__ ((aligned (sizeof(void*))))
+#else
+#define REGISTER_BUFFER_ALIGNMENT
+#endif
+
+void MachineThreads::gatherFromCurrentThread(ConservativeRoots& conservativeRoots, void* stackCurrent)
+{
+ // setjmp forces volatile registers onto the stack
+ jmp_buf registers REGISTER_BUFFER_ALIGNMENT;
+#if COMPILER(MSVC)
+#pragma warning(push)
+#pragma warning(disable: 4611)
+#endif
+ setjmp(registers);
+#if COMPILER(MSVC)
+#pragma warning(pop)
+#endif
+
+ void* registersBegin = &registers;
+ void* registersEnd = reinterpret_cast<void*>(roundUpToMultipleOf<sizeof(void*)>(reinterpret_cast<uintptr_t>(&registers + 1)));
+ swapIfBackwards(registersBegin, registersEnd);
+ conservativeRoots.add(registersBegin, registersEnd);
+
+ void* stackBegin = stackCurrent;
+ void* stackEnd = m_heap->globalData()->stack().origin();
+ swapIfBackwards(stackBegin, stackEnd);
+ conservativeRoots.add(stackBegin, stackEnd);
+}
+
+static inline void suspendThread(const PlatformThread& platformThread)
+{
+#if OS(DARWIN)
+ thread_suspend(platformThread);
+#elif OS(WINDOWS)
+ SuspendThread(platformThread);
+#elif USE(PTHREADS)
+ pthread_kill(platformThread, SigThreadSuspendResume);
+#else
+#error Need a way to suspend threads on this platform
+#endif
+}
+
+static inline void resumeThread(const PlatformThread& platformThread)
+{
+#if OS(DARWIN)
+ thread_resume(platformThread);
+#elif OS(WINDOWS)
+ ResumeThread(platformThread);
+#elif USE(PTHREADS)
+ pthread_kill(platformThread, SigThreadSuspendResume);
+#else
+#error Need a way to resume threads on this platform
+#endif
+}
+
+typedef unsigned long usword_t; // word size, assumed to be either 32 or 64 bit
+
+#if OS(DARWIN)
+
+#if CPU(X86)
+typedef i386_thread_state_t PlatformThreadRegisters;
+#elif CPU(X86_64)
+typedef x86_thread_state64_t PlatformThreadRegisters;
+#elif CPU(PPC)
+typedef ppc_thread_state_t PlatformThreadRegisters;
+#elif CPU(PPC64)
+typedef ppc_thread_state64_t PlatformThreadRegisters;
+#elif CPU(ARM)
+typedef arm_thread_state_t PlatformThreadRegisters;
+#else
+#error Unknown Architecture
+#endif
+
+#elif OS(WINDOWS)
+typedef CONTEXT PlatformThreadRegisters;
+#elif OS(QNX)
+typedef struct _debug_thread_info PlatformThreadRegisters;
+#elif USE(PTHREADS)
+typedef pthread_attr_t PlatformThreadRegisters;
+#else
+#error Need a thread register struct for this platform
+#endif
+
+static size_t getPlatformThreadRegisters(const PlatformThread& platformThread, PlatformThreadRegisters& regs)
+{
+#if OS(DARWIN)
+
+#if CPU(X86)
+ unsigned user_count = sizeof(regs)/sizeof(int);
+ thread_state_flavor_t flavor = i386_THREAD_STATE;
+#elif CPU(X86_64)
+ unsigned user_count = x86_THREAD_STATE64_COUNT;
+ thread_state_flavor_t flavor = x86_THREAD_STATE64;
+#elif CPU(PPC)
+ unsigned user_count = PPC_THREAD_STATE_COUNT;
+ thread_state_flavor_t flavor = PPC_THREAD_STATE;
+#elif CPU(PPC64)
+ unsigned user_count = PPC_THREAD_STATE64_COUNT;
+ thread_state_flavor_t flavor = PPC_THREAD_STATE64;
+#elif CPU(ARM)
+ unsigned user_count = ARM_THREAD_STATE_COUNT;
+ thread_state_flavor_t flavor = ARM_THREAD_STATE;
+#else
+#error Unknown Architecture
+#endif
+
+ kern_return_t result = thread_get_state(platformThread, flavor, (thread_state_t)&regs, &user_count);
+ if (result != KERN_SUCCESS) {
+ WTFReportFatalError(__FILE__, __LINE__, WTF_PRETTY_FUNCTION,
+ "JavaScript garbage collection failed because thread_get_state returned an error (%d). This is probably the result of running inside Rosetta, which is not supported.", result);
+ CRASH();
+ }
+ return user_count * sizeof(usword_t);
+// end OS(DARWIN)
+
+#elif OS(WINDOWS)
+ regs.ContextFlags = CONTEXT_INTEGER | CONTEXT_CONTROL;
+ GetThreadContext(platformThread, &regs);
+ return sizeof(CONTEXT);
+#elif OS(QNX)
+ memset(&regs, 0, sizeof(regs));
+ regs.tid = pthread_self();
+ int fd = open("/proc/self", O_RDONLY);
+ if (fd == -1) {
+ LOG_ERROR("Unable to open /proc/self (errno: %d)", errno);
+ CRASH();
+ }
+ devctl(fd, DCMD_PROC_TIDSTATUS, &regs, sizeof(regs), 0);
+ close(fd);
+#elif USE(PTHREADS)
+ pthread_attr_init(&regs);
+#if HAVE(PTHREAD_NP_H) || OS(NETBSD)
+ // e.g. on FreeBSD 5.4, neundorf@kde.org
+ pthread_attr_get_np(platformThread, &regs);
+#else
+ // FIXME: this function is non-portable; other POSIX systems may have different np alternatives
+ pthread_getattr_np(platformThread, &regs);
+#endif
+ return 0;
+#else
+#error Need a way to get thread registers on this platform
+#endif
+}
+
+static inline void* otherThreadStackPointer(const PlatformThreadRegisters& regs)
+{
+#if OS(DARWIN)
+
+#if __DARWIN_UNIX03
+
+#if CPU(X86)
+ return reinterpret_cast<void*>(regs.__esp);
+#elif CPU(X86_64)
+ return reinterpret_cast<void*>(regs.__rsp);
+#elif CPU(PPC) || CPU(PPC64)
+ return reinterpret_cast<void*>(regs.__r1);
+#elif CPU(ARM)
+ return reinterpret_cast<void*>(regs.__sp);
+#else
+#error Unknown Architecture
+#endif
+
+#else // !__DARWIN_UNIX03
+
+#if CPU(X86)
+ return reinterpret_cast<void*>(regs.esp);
+#elif CPU(X86_64)
+ return reinterpret_cast<void*>(regs.rsp);
+#elif CPU(PPC) || CPU(PPC64)
+ return reinterpret_cast<void*>(regs.r1);
+#else
+#error Unknown Architecture
+#endif
+
+#endif // __DARWIN_UNIX03
+
+// end OS(DARWIN)
+#elif OS(WINDOWS)
+
+#if CPU(ARM)
+ return reinterpret_cast<void*>((uintptr_t) regs.Sp);
+#elif CPU(MIPS)
+ return reinterpret_cast<void*>((uintptr_t) regs.IntSp);
+#elif CPU(X86)
+ return reinterpret_cast<void*>((uintptr_t) regs.Esp);
+#elif CPU(X86_64)
+ return reinterpret_cast<void*>((uintptr_t) regs.Rsp);
+#else
+#error Unknown Architecture
+#endif
+
+#elif OS(QNX)
+ return reinterpret_cast<void*>((uintptr_t) regs.sp);
+
+#elif USE(PTHREADS)
+ void* stackBase = 0;
+ size_t stackSize = 0;
+ int rc = pthread_attr_getstack(&regs, &stackBase, &stackSize);
+ (void)rc; // FIXME: Deal with error code somehow? Seems fatal.
+ ASSERT(stackBase);
+ return static_cast<char*>(stackBase) + stackSize;
+#else
+#error Need a way to get the stack pointer for another thread on this platform
+#endif
+}
+
+static void freePlatformThreadRegisters(PlatformThreadRegisters& regs)
+{
+#if USE(PTHREADS) && !OS(WINDOWS) && !OS(DARWIN) && !OS(QNX)
+ pthread_attr_destroy(&regs);
+#else
+ UNUSED_PARAM(regs);
+#endif
+}
+
+void MachineThreads::gatherFromOtherThread(ConservativeRoots& conservativeRoots, Thread* thread)
+{
+ suspendThread(thread->platformThread);
+
+ PlatformThreadRegisters regs;
+ size_t regSize = getPlatformThreadRegisters(thread->platformThread, regs);
+
+ conservativeRoots.add(static_cast<void*>(&regs), static_cast<void*>(reinterpret_cast<char*>(&regs) + regSize));
+
+ void* stackPointer = otherThreadStackPointer(regs);
+ void* stackBase = thread->stackBase;
+ swapIfBackwards(stackPointer, stackBase);
+ conservativeRoots.add(stackPointer, stackBase);
+
+ resumeThread(thread->platformThread);
+
+ freePlatformThreadRegisters(regs);
+}
+
+void MachineThreads::gatherConservativeRoots(ConservativeRoots& conservativeRoots, void* stackCurrent)
+{
+ gatherFromCurrentThread(conservativeRoots, stackCurrent);
+
+ if (m_threadSpecific) {
+ PlatformThread currentPlatformThread = getCurrentPlatformThread();
+
+ MutexLocker lock(m_registeredThreadsMutex);
+
+#ifndef NDEBUG
+ // Forbid malloc during the gather phase. The gather phase suspends
+ // threads, so a malloc during gather would risk a deadlock with a
+ // thread that had been suspended while holding the malloc lock.
+ fastMallocForbid();
+#endif
+ // It is safe to access the registeredThreads list, because we earlier asserted that locks are being held,
+ // and since this is a shared heap, they are real locks.
+ for (Thread* thread = m_registeredThreads; thread; thread = thread->next) {
+ if (!equalThread(thread->platformThread, currentPlatformThread))
+ gatherFromOtherThread(conservativeRoots, thread);
+ }
+#ifndef NDEBUG
+ fastMallocAllow();
+#endif
+ }
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/MachineStackMarker.h b/Source/JavaScriptCore/heap/MachineStackMarker.h
new file mode 100644
index 000000000..69c4537bd
--- /dev/null
+++ b/Source/JavaScriptCore/heap/MachineStackMarker.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 1999-2000 Harri Porten (porten@kde.org)
+ * Copyright (C) 2001 Peter Kelly (pmk@post.com)
+ * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#ifndef MachineThreads_h
+#define MachineThreads_h
+
+#include <pthread.h>
+#include <wtf/Noncopyable.h>
+#include <wtf/ThreadingPrimitives.h>
+
+namespace JSC {
+
+ class Heap;
+ class ConservativeRoots;
+
+ class MachineThreads {
+ WTF_MAKE_NONCOPYABLE(MachineThreads);
+ public:
+ MachineThreads(Heap*);
+ ~MachineThreads();
+
+ void gatherConservativeRoots(ConservativeRoots&, void* stackCurrent);
+
+ void makeUsableFromMultipleThreads();
+ void addCurrentThread(); // Only needs to be called by clients that can use the same heap from multiple threads.
+
+ private:
+ void gatherFromCurrentThread(ConservativeRoots&, void* stackCurrent);
+
+ class Thread;
+
+ static void removeThread(void*);
+ void removeCurrentThread();
+
+ void gatherFromOtherThread(ConservativeRoots&, Thread*);
+
+ Heap* m_heap;
+ Mutex m_registeredThreadsMutex;
+ Thread* m_registeredThreads;
+ pthread_key_t m_threadSpecific;
+ };
+
+} // namespace JSC
+
+#endif // MachineThreads_h
diff --git a/Source/JavaScriptCore/heap/MarkStack.cpp b/Source/JavaScriptCore/heap/MarkStack.cpp
new file mode 100644
index 000000000..02cf328d4
--- /dev/null
+++ b/Source/JavaScriptCore/heap/MarkStack.cpp
@@ -0,0 +1,480 @@
+/*
+ * Copyright (C) 2009, 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "MarkStack.h"
+
+#include "ConservativeRoots.h"
+#include "Heap.h"
+#include "Options.h"
+#include "JSArray.h"
+#include "JSCell.h"
+#include "JSObject.h"
+#include "ScopeChain.h"
+#include "Structure.h"
+#include "WriteBarrier.h"
+#include <wtf/MainThread.h>
+
+namespace JSC {
+
+MarkStackSegmentAllocator::MarkStackSegmentAllocator()
+ : m_nextFreeSegment(0)
+{
+}
+
+MarkStackSegmentAllocator::~MarkStackSegmentAllocator()
+{
+ shrinkReserve();
+}
+
+MarkStackSegment* MarkStackSegmentAllocator::allocate()
+{
+ {
+ MutexLocker locker(m_lock);
+ if (m_nextFreeSegment) {
+ MarkStackSegment* result = m_nextFreeSegment;
+ m_nextFreeSegment = result->m_previous;
+ return result;
+ }
+ }
+
+ return static_cast<MarkStackSegment*>(OSAllocator::reserveAndCommit(Options::gcMarkStackSegmentSize));
+}
+
+void MarkStackSegmentAllocator::release(MarkStackSegment* segment)
+{
+ MutexLocker locker(m_lock);
+ segment->m_previous = m_nextFreeSegment;
+ m_nextFreeSegment = segment;
+}
+
+void MarkStackSegmentAllocator::shrinkReserve()
+{
+ MarkStackSegment* segments;
+ {
+ MutexLocker locker(m_lock);
+ segments = m_nextFreeSegment;
+ m_nextFreeSegment = 0;
+ }
+ while (segments) {
+ MarkStackSegment* toFree = segments;
+ segments = segments->m_previous;
+ OSAllocator::decommitAndRelease(toFree, Options::gcMarkStackSegmentSize);
+ }
+}
+
+MarkStackArray::MarkStackArray(MarkStackSegmentAllocator& allocator)
+ : m_allocator(allocator)
+ , m_segmentCapacity(MarkStackSegment::capacityFromSize(Options::gcMarkStackSegmentSize))
+ , m_top(0)
+ , m_numberOfPreviousSegments(0)
+{
+ m_topSegment = m_allocator.allocate();
+#if !ASSERT_DISABLED
+ m_topSegment->m_top = 0;
+#endif
+ m_topSegment->m_previous = 0;
+}
+
+MarkStackArray::~MarkStackArray()
+{
+ ASSERT(!m_topSegment->m_previous);
+ m_allocator.release(m_topSegment);
+}
+
+void MarkStackArray::expand()
+{
+ ASSERT(m_topSegment->m_top == m_segmentCapacity);
+
+ m_numberOfPreviousSegments++;
+
+ MarkStackSegment* nextSegment = m_allocator.allocate();
+#if !ASSERT_DISABLED
+ nextSegment->m_top = 0;
+#endif
+ nextSegment->m_previous = m_topSegment;
+ m_topSegment = nextSegment;
+ setTopForEmptySegment();
+ validatePrevious();
+}
+
+bool MarkStackArray::refill()
+{
+ validatePrevious();
+ if (top())
+ return true;
+ MarkStackSegment* toFree = m_topSegment;
+ MarkStackSegment* previous = m_topSegment->m_previous;
+ if (!previous)
+ return false;
+ ASSERT(m_numberOfPreviousSegments);
+ m_numberOfPreviousSegments--;
+ m_topSegment = previous;
+ m_allocator.release(toFree);
+ setTopForFullSegment();
+ validatePrevious();
+ return true;
+}
+
+bool MarkStackArray::donateSomeCellsTo(MarkStackArray& other)
+{
+ ASSERT(m_segmentCapacity == other.m_segmentCapacity);
+ validatePrevious();
+ other.validatePrevious();
+
+ // Fast check: see if the other mark stack already has enough segments.
+ if (other.m_numberOfPreviousSegments + 1 >= Options::maximumNumberOfSharedSegments)
+ return false;
+
+ size_t numberOfCellsToKeep = Options::minimumNumberOfCellsToKeep;
+ ASSERT(m_top > numberOfCellsToKeep || m_topSegment->m_previous);
+
+ // Looks like we should donate! Give the other mark stack all of our
+ // previous segments, and then top it off.
+ MarkStackSegment* previous = m_topSegment->m_previous;
+ while (previous) {
+ ASSERT(m_numberOfPreviousSegments);
+
+ MarkStackSegment* current = previous;
+ previous = current->m_previous;
+
+ current->m_previous = other.m_topSegment->m_previous;
+ other.m_topSegment->m_previous = current;
+
+ m_numberOfPreviousSegments--;
+ other.m_numberOfPreviousSegments++;
+ }
+ ASSERT(!m_numberOfPreviousSegments);
+ m_topSegment->m_previous = 0;
+ validatePrevious();
+ other.validatePrevious();
+
+ // Now top off. We want to keep at a minimum numberOfCellsToKeep, but if
+ // we really have a lot of work, we give up half.
+ if (m_top > numberOfCellsToKeep * 2)
+ numberOfCellsToKeep = m_top / 2;
+ while (m_top > numberOfCellsToKeep)
+ other.append(removeLast());
+
+ return true;
+}
+
+void MarkStackArray::stealSomeCellsFrom(MarkStackArray& other)
+{
+ ASSERT(m_segmentCapacity == other.m_segmentCapacity);
+ validatePrevious();
+ other.validatePrevious();
+
+ // If other has an entire segment, steal it and return.
+ if (other.m_topSegment->m_previous) {
+ ASSERT(other.m_topSegment->m_previous->m_top == m_segmentCapacity);
+
+ // First remove a segment from other.
+ MarkStackSegment* current = other.m_topSegment->m_previous;
+ other.m_topSegment->m_previous = current->m_previous;
+ other.m_numberOfPreviousSegments--;
+
+ ASSERT(!!other.m_numberOfPreviousSegments == !!other.m_topSegment->m_previous);
+
+ // Now add it to this.
+ current->m_previous = m_topSegment->m_previous;
+ m_topSegment->m_previous = current;
+ m_numberOfPreviousSegments++;
+
+ validatePrevious();
+ other.validatePrevious();
+ return;
+ }
+
+ // Otherwise drain 1/Nth of the shared array where N is the number of
+ // workers, or Options::minimumNumberOfCellsToKeep, whichever is bigger.
+ size_t numberOfCellsToSteal = std::max((size_t)Options::minimumNumberOfCellsToKeep, other.size() / Options::numberOfGCMarkers);
+ while (numberOfCellsToSteal-- > 0 && other.canRemoveLast())
+ append(other.removeLast());
+}
+
+#if ENABLE(PARALLEL_GC)
+void MarkStackThreadSharedData::markingThreadMain()
+{
+ WTF::registerGCThread();
+ SlotVisitor slotVisitor(*this);
+ ParallelModeEnabler enabler(slotVisitor);
+ slotVisitor.drainFromShared(SlotVisitor::SlaveDrain);
+}
+
+void* MarkStackThreadSharedData::markingThreadStartFunc(void* shared)
+{
+ static_cast<MarkStackThreadSharedData*>(shared)->markingThreadMain();
+ return 0;
+}
+#endif
+
+MarkStackThreadSharedData::MarkStackThreadSharedData(JSGlobalData* globalData)
+ : m_globalData(globalData)
+ , m_sharedMarkStack(m_segmentAllocator)
+ , m_numberOfActiveParallelMarkers(0)
+ , m_parallelMarkersShouldExit(false)
+{
+#if ENABLE(PARALLEL_GC)
+ for (unsigned i = 1; i < Options::numberOfGCMarkers; ++i) {
+ m_markingThreads.append(createThread(markingThreadStartFunc, this, "JavaScriptCore::Marking"));
+ ASSERT(m_markingThreads.last());
+ }
+#endif
+}
+
+MarkStackThreadSharedData::~MarkStackThreadSharedData()
+{
+#if ENABLE(PARALLEL_GC)
+ // Destroy our marking threads.
+ {
+ MutexLocker locker(m_markingLock);
+ m_parallelMarkersShouldExit = true;
+ m_markingCondition.broadcast();
+ }
+ for (unsigned i = 0; i < m_markingThreads.size(); ++i)
+ waitForThreadCompletion(m_markingThreads[i], 0);
+#endif
+}
+
+void MarkStackThreadSharedData::reset()
+{
+ ASSERT(!m_numberOfActiveParallelMarkers);
+ ASSERT(!m_parallelMarkersShouldExit);
+ ASSERT(m_sharedMarkStack.isEmpty());
+
+#if ENABLE(PARALLEL_GC)
+ m_segmentAllocator.shrinkReserve();
+ m_opaqueRoots.clear();
+#else
+ ASSERT(m_opaqueRoots.isEmpty());
+#endif
+
+ m_weakReferenceHarvesters.removeAll();
+}
+
+void MarkStack::reset()
+{
+ m_visitCount = 0;
+ ASSERT(m_stack.isEmpty());
+#if ENABLE(PARALLEL_GC)
+ ASSERT(m_opaqueRoots.isEmpty()); // Should have merged by now.
+#else
+ m_opaqueRoots.clear();
+#endif
+}
+
+void MarkStack::append(ConservativeRoots& conservativeRoots)
+{
+ JSCell** roots = conservativeRoots.roots();
+ size_t size = conservativeRoots.size();
+ for (size_t i = 0; i < size; ++i)
+ internalAppend(roots[i]);
+}
+
+ALWAYS_INLINE static void visitChildren(SlotVisitor& visitor, const JSCell* cell)
+{
+#if ENABLE(SIMPLE_HEAP_PROFILING)
+ m_visitedTypeCounts.count(cell);
+#endif
+
+ ASSERT(Heap::isMarked(cell));
+
+ if (isJSString(cell)) {
+ JSString::visitChildren(const_cast<JSCell*>(cell), visitor);
+ return;
+ }
+
+ if (isJSFinalObject(cell)) {
+ JSObject::visitChildren(const_cast<JSCell*>(cell), visitor);
+ return;
+ }
+
+ if (isJSArray(cell)) {
+ JSArray::visitChildren(const_cast<JSCell*>(cell), visitor);
+ return;
+ }
+
+ cell->methodTable()->visitChildren(const_cast<JSCell*>(cell), visitor);
+}
+
+void SlotVisitor::donateSlow()
+{
+ // Refuse to donate if shared has more entries than I do.
+ if (m_shared.m_sharedMarkStack.size() > m_stack.size())
+ return;
+ MutexLocker locker(m_shared.m_markingLock);
+ if (m_stack.donateSomeCellsTo(m_shared.m_sharedMarkStack)) {
+ // Only wake up threads if the shared stack is big enough; otherwise assume that
+ // it's more profitable for us to just scan this ourselves later.
+ if (m_shared.m_sharedMarkStack.size() >= Options::sharedStackWakeupThreshold)
+ m_shared.m_markingCondition.broadcast();
+ }
+}
+
+void SlotVisitor::drain()
+{
+ ASSERT(m_isInParallelMode);
+
+#if ENABLE(PARALLEL_GC)
+ if (Options::numberOfGCMarkers > 1) {
+ while (!m_stack.isEmpty()) {
+ m_stack.refill();
+ for (unsigned countdown = Options::minimumNumberOfScansBetweenRebalance; m_stack.canRemoveLast() && countdown--;)
+ visitChildren(*this, m_stack.removeLast());
+ donateKnownParallel();
+ }
+
+ mergeOpaqueRootsIfNecessary();
+ return;
+ }
+#endif
+
+ while (!m_stack.isEmpty()) {
+ m_stack.refill();
+ while (m_stack.canRemoveLast())
+ visitChildren(*this, m_stack.removeLast());
+ }
+}
+
+void SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode)
+{
+ ASSERT(m_isInParallelMode);
+
+ ASSERT(Options::numberOfGCMarkers);
+
+ bool shouldBeParallel;
+
+#if ENABLE(PARALLEL_GC)
+ shouldBeParallel = Options::numberOfGCMarkers > 1;
+#else
+ ASSERT(Options::numberOfGCMarkers == 1);
+ shouldBeParallel = false;
+#endif
+
+ if (!shouldBeParallel) {
+ // This call should be a no-op.
+ ASSERT_UNUSED(sharedDrainMode, sharedDrainMode == MasterDrain);
+ ASSERT(m_stack.isEmpty());
+ ASSERT(m_shared.m_sharedMarkStack.isEmpty());
+ return;
+ }
+
+#if ENABLE(PARALLEL_GC)
+ {
+ MutexLocker locker(m_shared.m_markingLock);
+ m_shared.m_numberOfActiveParallelMarkers++;
+ }
+ while (true) {
+ {
+ MutexLocker locker(m_shared.m_markingLock);
+ m_shared.m_numberOfActiveParallelMarkers--;
+
+ // How we wait differs depending on drain mode.
+ if (sharedDrainMode == MasterDrain) {
+ // Wait until either termination is reached, or until there is some work
+ // for us to do.
+ while (true) {
+ // Did we reach termination?
+ if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty())
+ return;
+
+ // Is there work to be done?
+ if (!m_shared.m_sharedMarkStack.isEmpty())
+ break;
+
+ // Otherwise wait.
+ m_shared.m_markingCondition.wait(m_shared.m_markingLock);
+ }
+ } else {
+ ASSERT(sharedDrainMode == SlaveDrain);
+
+ // Did we detect termination? If so, let the master know.
+ if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty())
+ m_shared.m_markingCondition.broadcast();
+
+ while (m_shared.m_sharedMarkStack.isEmpty() && !m_shared.m_parallelMarkersShouldExit)
+ m_shared.m_markingCondition.wait(m_shared.m_markingLock);
+
+ // Is the VM exiting? If so, exit this thread.
+ if (m_shared.m_parallelMarkersShouldExit)
+ return;
+ }
+
+ m_stack.stealSomeCellsFrom(m_shared.m_sharedMarkStack);
+ m_shared.m_numberOfActiveParallelMarkers++;
+ }
+
+ drain();
+ }
+#endif
+}
+
+void MarkStack::mergeOpaqueRoots()
+{
+ ASSERT(!m_opaqueRoots.isEmpty()); // Should only be called when opaque roots are non-empty.
+ {
+ MutexLocker locker(m_shared.m_opaqueRootsLock);
+ HashSet<void*>::iterator begin = m_opaqueRoots.begin();
+ HashSet<void*>::iterator end = m_opaqueRoots.end();
+ for (HashSet<void*>::iterator iter = begin; iter != end; ++iter)
+ m_shared.m_opaqueRoots.add(*iter);
+ }
+ m_opaqueRoots.clear();
+}
+
+void SlotVisitor::harvestWeakReferences()
+{
+ for (WeakReferenceHarvester* current = m_shared.m_weakReferenceHarvesters.head(); current; current = current->next())
+ current->visitWeakReferences(*this);
+}
+
+void SlotVisitor::finalizeUnconditionalFinalizers()
+{
+ while (m_shared.m_unconditionalFinalizers.hasNext())
+ m_shared.m_unconditionalFinalizers.removeNext()->finalizeUnconditionally();
+}
+
+#if ENABLE(GC_VALIDATION)
+void MarkStack::validate(JSCell* cell)
+{
+ if (!cell)
+ CRASH();
+
+ if (!cell->structure())
+ CRASH();
+
+ // Both the cell's structure, and the cell's structure's structure should be the Structure Structure.
+ // I hate this sentence.
+ if (cell->structure()->structure()->JSCell::classInfo() != cell->structure()->JSCell::classInfo())
+ CRASH();
+}
+#else
+void MarkStack::validate(JSCell*)
+{
+}
+#endif
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/MarkStack.h b/Source/JavaScriptCore/heap/MarkStack.h
new file mode 100644
index 000000000..1478011d9
--- /dev/null
+++ b/Source/JavaScriptCore/heap/MarkStack.h
@@ -0,0 +1,451 @@
+/*
+ * Copyright (C) 2009, 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MarkStack_h
+#define MarkStack_h
+
+#include "HandleTypes.h"
+#include "Options.h"
+#include "JSValue.h"
+#include "Register.h"
+#include "UnconditionalFinalizer.h"
+#include "VTableSpectrum.h"
+#include "WeakReferenceHarvester.h"
+#include <wtf/HashMap.h>
+#include <wtf/HashSet.h>
+#include <wtf/Vector.h>
+#include <wtf/Noncopyable.h>
+#include <wtf/OSAllocator.h>
+#include <wtf/PageBlock.h>
+
+namespace JSC {
+
+ class ConservativeRoots;
+ class JSGlobalData;
+ class MarkStack;
+ class ParallelModeEnabler;
+ class Register;
+ class SlotVisitor;
+ template<typename T> class WriteBarrierBase;
+ template<typename T> class JITWriteBarrier;
+
+ struct MarkStackSegment {
+ MarkStackSegment* m_previous;
+#if !ASSERT_DISABLED
+ size_t m_top;
+#endif
+
+ const JSCell** data()
+ {
+ return bitwise_cast<const JSCell**>(this + 1);
+ }
+
+ static size_t capacityFromSize(size_t size)
+ {
+ return (size - sizeof(MarkStackSegment)) / sizeof(const JSCell*);
+ }
+
+ static size_t sizeFromCapacity(size_t capacity)
+ {
+ return sizeof(MarkStackSegment) + capacity * sizeof(const JSCell*);
+ }
+ };
+
+ class MarkStackSegmentAllocator {
+ public:
+ MarkStackSegmentAllocator();
+ ~MarkStackSegmentAllocator();
+
+ MarkStackSegment* allocate();
+ void release(MarkStackSegment*);
+
+ void shrinkReserve();
+
+ private:
+ Mutex m_lock;
+ MarkStackSegment* m_nextFreeSegment;
+ };
+
+ class MarkStackArray {
+ public:
+ MarkStackArray(MarkStackSegmentAllocator&);
+ ~MarkStackArray();
+
+ void append(const JSCell*);
+
+ bool canRemoveLast();
+ const JSCell* removeLast();
+ bool refill();
+
+ bool isEmpty();
+
+ bool canDonateSomeCells(); // Returns false if you should definitely not call doanteSomeCellsTo().
+ bool donateSomeCellsTo(MarkStackArray& other); // Returns true if some cells were donated.
+
+ void stealSomeCellsFrom(MarkStackArray& other);
+
+ size_t size();
+
+ private:
+ MarkStackSegment* m_topSegment;
+
+ void expand();
+
+ MarkStackSegmentAllocator& m_allocator;
+
+ size_t m_segmentCapacity;
+ size_t m_top;
+ size_t m_numberOfPreviousSegments;
+
+ size_t postIncTop()
+ {
+ size_t result = m_top++;
+ ASSERT(result == m_topSegment->m_top++);
+ return result;
+ }
+
+ size_t preDecTop()
+ {
+ size_t result = --m_top;
+ ASSERT(result == --m_topSegment->m_top);
+ return result;
+ }
+
+ void setTopForFullSegment()
+ {
+ ASSERT(m_topSegment->m_top == m_segmentCapacity);
+ m_top = m_segmentCapacity;
+ }
+
+ void setTopForEmptySegment()
+ {
+ ASSERT(!m_topSegment->m_top);
+ m_top = 0;
+ }
+
+ size_t top()
+ {
+ ASSERT(m_top == m_topSegment->m_top);
+ return m_top;
+ }
+
+#if ASSERT_DISABLED
+ void validatePrevious() { }
+#else
+ void validatePrevious()
+ {
+ unsigned count = 0;
+ for (MarkStackSegment* current = m_topSegment->m_previous; current; current = current->m_previous)
+ count++;
+ ASSERT(count == m_numberOfPreviousSegments);
+ }
+#endif
+ };
+
+ class MarkStackThreadSharedData {
+ public:
+ MarkStackThreadSharedData(JSGlobalData*);
+ ~MarkStackThreadSharedData();
+
+ void reset();
+
+ private:
+ friend class MarkStack;
+ friend class SlotVisitor;
+
+#if ENABLE(PARALLEL_GC)
+ void markingThreadMain();
+ static void* markingThreadStartFunc(void* heap);
+#endif
+
+ JSGlobalData* m_globalData;
+
+ MarkStackSegmentAllocator m_segmentAllocator;
+
+ Vector<ThreadIdentifier> m_markingThreads;
+
+ Mutex m_markingLock;
+ ThreadCondition m_markingCondition;
+ MarkStackArray m_sharedMarkStack;
+ unsigned m_numberOfActiveParallelMarkers;
+ bool m_parallelMarkersShouldExit;
+
+ Mutex m_opaqueRootsLock;
+ HashSet<void*> m_opaqueRoots;
+
+ ListableHandler<WeakReferenceHarvester>::List m_weakReferenceHarvesters;
+ ListableHandler<UnconditionalFinalizer>::List m_unconditionalFinalizers;
+ };
+
+ class MarkStack {
+ WTF_MAKE_NONCOPYABLE(MarkStack);
+ friend class HeapRootVisitor; // Allowed to mark a JSValue* or JSCell** directly.
+
+ public:
+ MarkStack(MarkStackThreadSharedData&);
+ ~MarkStack();
+
+ void append(ConservativeRoots&);
+
+ template<typename T> void append(JITWriteBarrier<T>*);
+ template<typename T> void append(WriteBarrierBase<T>*);
+ void appendValues(WriteBarrierBase<Unknown>*, size_t count);
+
+ template<typename T>
+ void appendUnbarrieredPointer(T**);
+
+ void addOpaqueRoot(void*);
+ bool containsOpaqueRoot(void*);
+ int opaqueRootCount();
+
+ bool isEmpty() { return m_stack.isEmpty(); }
+
+ void reset();
+
+ size_t visitCount() const { return m_visitCount; }
+
+#if ENABLE(SIMPLE_HEAP_PROFILING)
+ VTableSpectrum m_visitedTypeCounts;
+#endif
+
+ void addWeakReferenceHarvester(WeakReferenceHarvester* weakReferenceHarvester)
+ {
+ m_shared.m_weakReferenceHarvesters.addThreadSafe(weakReferenceHarvester);
+ }
+
+ void addUnconditionalFinalizer(UnconditionalFinalizer* unconditionalFinalizer)
+ {
+ m_shared.m_unconditionalFinalizers.addThreadSafe(unconditionalFinalizer);
+ }
+
+ protected:
+ static void validate(JSCell*);
+
+ void append(JSValue*);
+ void append(JSValue*, size_t count);
+ void append(JSCell**);
+
+ void internalAppend(JSCell*);
+ void internalAppend(JSValue);
+
+ void mergeOpaqueRoots();
+
+ void mergeOpaqueRootsIfNecessary()
+ {
+ if (m_opaqueRoots.isEmpty())
+ return;
+ mergeOpaqueRoots();
+ }
+
+ void mergeOpaqueRootsIfProfitable()
+ {
+ if (static_cast<unsigned>(m_opaqueRoots.size()) < Options::opaqueRootMergeThreshold)
+ return;
+ mergeOpaqueRoots();
+ }
+
+ MarkStackArray m_stack;
+ HashSet<void*> m_opaqueRoots; // Handle-owning data structures not visible to the garbage collector.
+
+#if !ASSERT_DISABLED
+ public:
+ bool m_isCheckingForDefaultMarkViolation;
+ bool m_isDraining;
+#endif
+ protected:
+ friend class ParallelModeEnabler;
+
+ size_t m_visitCount;
+ bool m_isInParallelMode;
+
+ MarkStackThreadSharedData& m_shared;
+ };
+
+ inline MarkStack::MarkStack(MarkStackThreadSharedData& shared)
+ : m_stack(shared.m_segmentAllocator)
+#if !ASSERT_DISABLED
+ , m_isCheckingForDefaultMarkViolation(false)
+ , m_isDraining(false)
+#endif
+ , m_visitCount(0)
+ , m_isInParallelMode(false)
+ , m_shared(shared)
+ {
+ }
+
+ inline MarkStack::~MarkStack()
+ {
+ ASSERT(m_stack.isEmpty());
+ }
+
+ inline void MarkStack::addOpaqueRoot(void* root)
+ {
+#if ENABLE(PARALLEL_GC)
+ if (Options::numberOfGCMarkers == 1) {
+ // Put directly into the shared HashSet.
+ m_shared.m_opaqueRoots.add(root);
+ return;
+ }
+ // Put into the local set, but merge with the shared one every once in
+ // a while to make sure that the local sets don't grow too large.
+ mergeOpaqueRootsIfProfitable();
+ m_opaqueRoots.add(root);
+#else
+ m_opaqueRoots.add(root);
+#endif
+ }
+
+ inline bool MarkStack::containsOpaqueRoot(void* root)
+ {
+ ASSERT(!m_isInParallelMode);
+#if ENABLE(PARALLEL_GC)
+ ASSERT(m_opaqueRoots.isEmpty());
+ return m_shared.m_opaqueRoots.contains(root);
+#else
+ return m_opaqueRoots.contains(root);
+#endif
+ }
+
+ inline int MarkStack::opaqueRootCount()
+ {
+ ASSERT(!m_isInParallelMode);
+#if ENABLE(PARALLEL_GC)
+ ASSERT(m_opaqueRoots.isEmpty());
+ return m_shared.m_opaqueRoots.size();
+#else
+ return m_opaqueRoots.size();
+#endif
+ }
+
+ inline void MarkStackArray::append(const JSCell* cell)
+ {
+ if (m_top == m_segmentCapacity)
+ expand();
+ m_topSegment->data()[postIncTop()] = cell;
+ }
+
+ inline bool MarkStackArray::canRemoveLast()
+ {
+ return !!m_top;
+ }
+
+ inline const JSCell* MarkStackArray::removeLast()
+ {
+ return m_topSegment->data()[preDecTop()];
+ }
+
+ inline bool MarkStackArray::isEmpty()
+ {
+ if (m_top)
+ return false;
+ if (m_topSegment->m_previous) {
+ ASSERT(m_topSegment->m_previous->m_top == m_segmentCapacity);
+ return false;
+ }
+ return true;
+ }
+
+ inline bool MarkStackArray::canDonateSomeCells()
+ {
+ size_t numberOfCellsToKeep = Options::minimumNumberOfCellsToKeep;
+ // Another check: see if we have enough cells to warrant donation.
+ if (m_top <= numberOfCellsToKeep) {
+ // This indicates that we might not want to donate anything; check if we have
+ // another full segment. If not, then don't donate.
+ if (!m_topSegment->m_previous)
+ return false;
+
+ ASSERT(m_topSegment->m_previous->m_top == m_segmentCapacity);
+ }
+
+ return true;
+ }
+
+ inline size_t MarkStackArray::size()
+ {
+ return m_top + m_segmentCapacity * m_numberOfPreviousSegments;
+ }
+
+ ALWAYS_INLINE void MarkStack::append(JSValue* slot, size_t count)
+ {
+ for (size_t i = 0; i < count; ++i) {
+ JSValue& value = slot[i];
+ if (!value)
+ continue;
+ internalAppend(value);
+ }
+ }
+
+ template<typename T>
+ inline void MarkStack::appendUnbarrieredPointer(T** slot)
+ {
+ ASSERT(slot);
+ JSCell* cell = *slot;
+ if (cell)
+ internalAppend(cell);
+ }
+
+ ALWAYS_INLINE void MarkStack::append(JSValue* slot)
+ {
+ ASSERT(slot);
+ internalAppend(*slot);
+ }
+
+ ALWAYS_INLINE void MarkStack::append(JSCell** slot)
+ {
+ ASSERT(slot);
+ internalAppend(*slot);
+ }
+
+ ALWAYS_INLINE void MarkStack::internalAppend(JSValue value)
+ {
+ ASSERT(value);
+ if (!value.isCell())
+ return;
+ internalAppend(value.asCell());
+ }
+
+ class ParallelModeEnabler {
+ public:
+ ParallelModeEnabler(MarkStack& stack)
+ : m_stack(stack)
+ {
+ ASSERT(!m_stack.m_isInParallelMode);
+ m_stack.m_isInParallelMode = true;
+ }
+
+ ~ParallelModeEnabler()
+ {
+ ASSERT(m_stack.m_isInParallelMode);
+ m_stack.m_isInParallelMode = false;
+ }
+
+ private:
+ MarkStack& m_stack;
+ };
+
+} // namespace JSC
+
+#endif
diff --git a/Source/JavaScriptCore/heap/MarkedBlock.cpp b/Source/JavaScriptCore/heap/MarkedBlock.cpp
new file mode 100644
index 000000000..771c9c082
--- /dev/null
+++ b/Source/JavaScriptCore/heap/MarkedBlock.cpp
@@ -0,0 +1,186 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "MarkedBlock.h"
+
+#include "JSCell.h"
+#include "JSObject.h"
+#include "ScopeChain.h"
+
+namespace JSC {
+
+MarkedBlock* MarkedBlock::create(Heap* heap, size_t cellSize)
+{
+ PageAllocationAligned allocation = PageAllocationAligned::allocate(blockSize, blockSize, OSAllocator::JSGCHeapPages);
+ if (!static_cast<bool>(allocation))
+ CRASH();
+ return new (NotNull, allocation.base()) MarkedBlock(allocation, heap, cellSize);
+}
+
+MarkedBlock* MarkedBlock::recycle(MarkedBlock* block, size_t cellSize)
+{
+ return new (NotNull, block) MarkedBlock(block->m_allocation, block->m_heap, cellSize);
+}
+
+void MarkedBlock::destroy(MarkedBlock* block)
+{
+ block->m_allocation.deallocate();
+}
+
+MarkedBlock::MarkedBlock(const PageAllocationAligned& allocation, Heap* heap, size_t cellSize)
+ : m_atomsPerCell((cellSize + atomSize - 1) / atomSize)
+ , m_endAtom(atomsPerBlock - m_atomsPerCell + 1)
+ , m_state(New) // All cells start out unmarked.
+ , m_allocation(allocation)
+ , m_heap(heap)
+{
+ HEAP_LOG_BLOCK_STATE_TRANSITION(this);
+}
+
+inline void MarkedBlock::callDestructor(JSCell* cell)
+{
+ // A previous eager sweep may already have run cell's destructor.
+ if (cell->isZapped())
+ return;
+
+#if ENABLE(SIMPLE_HEAP_PROFILING)
+ m_heap->m_destroyedTypeCounts.countVPtr(vptr);
+#endif
+ if (cell->classInfo() != &JSFinalObject::s_info)
+ cell->methodTable()->destroy(cell);
+
+ cell->zap();
+}
+
+template<MarkedBlock::BlockState blockState, MarkedBlock::SweepMode sweepMode>
+MarkedBlock::FreeCell* MarkedBlock::specializedSweep()
+{
+ ASSERT(blockState != Allocated && blockState != FreeListed);
+
+ // This produces a free list that is ordered in reverse through the block.
+ // This is fine, since the allocation code makes no assumptions about the
+ // order of the free list.
+ FreeCell* head = 0;
+ for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
+ if (blockState == Marked && m_marks.get(i))
+ continue;
+
+ JSCell* cell = reinterpret_cast<JSCell*>(&atoms()[i]);
+ if (blockState == Zapped && !cell->isZapped())
+ continue;
+
+ if (blockState != New)
+ callDestructor(cell);
+
+ if (sweepMode == SweepToFreeList) {
+ FreeCell* freeCell = reinterpret_cast<FreeCell*>(cell);
+ freeCell->next = head;
+ head = freeCell;
+ }
+ }
+
+ m_state = ((sweepMode == SweepToFreeList) ? FreeListed : Zapped);
+ return head;
+}
+
+MarkedBlock::FreeCell* MarkedBlock::sweep(SweepMode sweepMode)
+{
+ HEAP_LOG_BLOCK_STATE_TRANSITION(this);
+
+ switch (m_state) {
+ case New:
+ ASSERT(sweepMode == SweepToFreeList);
+ return specializedSweep<New, SweepToFreeList>();
+ case FreeListed:
+ // Happens when a block transitions to fully allocated.
+ ASSERT(sweepMode == SweepToFreeList);
+ return 0;
+ case Allocated:
+ ASSERT_NOT_REACHED();
+ return 0;
+ case Marked:
+ return sweepMode == SweepToFreeList
+ ? specializedSweep<Marked, SweepToFreeList>()
+ : specializedSweep<Marked, SweepOnly>();
+ case Zapped:
+ return sweepMode == SweepToFreeList
+ ? specializedSweep<Zapped, SweepToFreeList>()
+ : specializedSweep<Zapped, SweepOnly>();
+ }
+
+ ASSERT_NOT_REACHED();
+ return 0;
+}
+
+void MarkedBlock::zapFreeList(FreeCell* firstFreeCell)
+{
+ HEAP_LOG_BLOCK_STATE_TRANSITION(this);
+
+ if (m_state == Marked) {
+ // If the block is in the Marked state then we know that:
+ // 1) It was not used for allocation during the previous allocation cycle.
+ // 2) It may have dead objects, and we only know them to be dead by the
+ // fact that their mark bits are unset.
+ // Hence if the block is Marked we need to leave it Marked.
+
+ ASSERT(!firstFreeCell);
+
+ return;
+ }
+
+ if (m_state == Zapped) {
+ // If the block is in the Zapped state then we know that someone already
+ // zapped it for us. This could not have happened during a GC, but might
+ // be the result of someone having done a GC scan to perform some operation
+ // over all live objects (or all live blocks). It also means that somebody
+ // had allocated in this block since the last GC, swept all dead objects
+ // onto the free list, left the block in the FreeListed state, then the heap
+ // scan happened, and canonicalized the block, leading to all dead objects
+ // being zapped. Therefore, it is safe for us to simply do nothing, since
+ // dead objects will have 0 in their vtables and live objects will have
+ // non-zero vtables, which is consistent with the block being zapped.
+
+ ASSERT(!firstFreeCell);
+
+ return;
+ }
+
+ ASSERT(m_state == FreeListed);
+
+ // Roll back to a coherent state for Heap introspection. Cells newly
+ // allocated from our free list are not currently marked, so we need another
+ // way to tell what's live vs dead. We use zapping for that.
+
+ FreeCell* next;
+ for (FreeCell* current = firstFreeCell; current; current = next) {
+ next = current->next;
+ reinterpret_cast<JSCell*>(current)->zap();
+ }
+
+ m_state = Zapped;
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/MarkedBlock.h b/Source/JavaScriptCore/heap/MarkedBlock.h
new file mode 100644
index 000000000..8c665dd5b
--- /dev/null
+++ b/Source/JavaScriptCore/heap/MarkedBlock.h
@@ -0,0 +1,432 @@
+/*
+ * Copyright (C) 1999-2000 Harri Porten (porten@kde.org)
+ * Copyright (C) 2001 Peter Kelly (pmk@post.com)
+ * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011 Apple Inc. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#ifndef MarkedBlock_h
+#define MarkedBlock_h
+
+#include "CardSet.h"
+
+#include <wtf/Bitmap.h>
+#include <wtf/DoublyLinkedList.h>
+#include <wtf/HashFunctions.h>
+#include <wtf/PageAllocationAligned.h>
+#include <wtf/StdLibExtras.h>
+#include <wtf/Vector.h>
+
+// Set to log state transitions of blocks.
+#define HEAP_LOG_BLOCK_STATE_TRANSITIONS 0
+
+#if HEAP_LOG_BLOCK_STATE_TRANSITIONS
+#define HEAP_LOG_BLOCK_STATE_TRANSITION(block) do { \
+ printf("%s:%d %s: block %s = %p, %d\n", \
+ __FILE__, __LINE__, __FUNCTION__, #block, (block), (block)->m_state); \
+ } while (false)
+#else
+#define HEAP_LOG_BLOCK_STATE_TRANSITION(block) ((void)0)
+#endif
+
+namespace JSC {
+
+ class Heap;
+ class JSCell;
+
+ typedef uintptr_t Bits;
+
+ static const size_t KB = 1024;
+ static const size_t MB = 1024 * 1024;
+
+ bool isZapped(const JSCell*);
+
+ // A marked block is a page-aligned container for heap-allocated objects.
+ // Objects are allocated within cells of the marked block. For a given
+ // marked block, all cells have the same size. Objects smaller than the
+ // cell size may be allocated in the marked block, in which case the
+ // allocation suffers from internal fragmentation: wasted space whose
+ // size is equal to the difference between the cell size and the object
+ // size.
+
+ class MarkedBlock : public DoublyLinkedListNode<MarkedBlock> {
+ friend class WTF::DoublyLinkedListNode<MarkedBlock>;
+ public:
+ // Ensure natural alignment for native types whilst recognizing that the smallest
+ // object the heap will commonly allocate is four words.
+ static const size_t atomSize = 4 * sizeof(void*);
+ static const size_t atomShift = 5;
+ static const size_t blockSize = 16 * KB;
+ static const size_t blockMask = ~(blockSize - 1); // blockSize must be a power of two.
+
+ static const size_t atomsPerBlock = blockSize / atomSize; // ~0.4% overhead
+ static const size_t atomMask = atomsPerBlock - 1;
+ static const int cardShift = 8; // This is log2 of bytes per card.
+ static const size_t bytesPerCard = 1 << cardShift;
+ static const int cardCount = blockSize / bytesPerCard;
+ static const int cardMask = cardCount - 1;
+
+ struct FreeCell {
+ FreeCell* next;
+ };
+
+ struct VoidFunctor {
+ typedef void ReturnType;
+ void returnValue() { }
+ };
+
+ static MarkedBlock* create(Heap*, size_t cellSize);
+ static MarkedBlock* recycle(MarkedBlock*, size_t cellSize);
+ static void destroy(MarkedBlock*);
+
+ static bool isAtomAligned(const void*);
+ static MarkedBlock* blockFor(const void*);
+ static size_t firstAtom();
+
+ Heap* heap() const;
+
+ void* allocate();
+
+ enum SweepMode { SweepOnly, SweepToFreeList };
+ FreeCell* sweep(SweepMode = SweepOnly);
+
+ // While allocating from a free list, MarkedBlock temporarily has bogus
+ // cell liveness data. To restore accurate cell liveness data, call one
+ // of these functions:
+ void didConsumeFreeList(); // Call this once you've allocated all the items in the free list.
+ void zapFreeList(FreeCell* firstFreeCell); // Call this to undo the free list.
+
+ void clearMarks();
+ size_t markCount();
+ bool markCountIsZero(); // Faster than markCount().
+
+ size_t cellSize();
+
+ size_t size();
+ size_t capacity();
+
+ bool isMarked(const void*);
+ bool testAndSetMarked(const void*);
+ bool isLive(const JSCell*);
+ bool isLiveCell(const void*);
+ void setMarked(const void*);
+
+#if ENABLE(GGC)
+ void setDirtyObject(const void* atom)
+ {
+ ASSERT(MarkedBlock::blockFor(atom) == this);
+ m_cards.markCardForAtom(atom);
+ }
+
+ uint8_t* addressOfCardFor(const void* atom)
+ {
+ ASSERT(MarkedBlock::blockFor(atom) == this);
+ return &m_cards.cardForAtom(atom);
+ }
+
+ static inline size_t offsetOfCards()
+ {
+ return OBJECT_OFFSETOF(MarkedBlock, m_cards);
+ }
+
+ static inline size_t offsetOfMarks()
+ {
+ return OBJECT_OFFSETOF(MarkedBlock, m_marks);
+ }
+
+ typedef Vector<JSCell*, 32> DirtyCellVector;
+ inline void gatherDirtyCells(DirtyCellVector&);
+ template <int size> inline void gatherDirtyCellsWithSize(DirtyCellVector&);
+#endif
+
+ template <typename Functor> void forEachCell(Functor&);
+
+ private:
+ static const size_t atomAlignmentMask = atomSize - 1; // atomSize must be a power of two.
+
+ enum BlockState { New, FreeListed, Allocated, Marked, Zapped };
+
+ typedef char Atom[atomSize];
+
+ MarkedBlock(const PageAllocationAligned&, Heap*, size_t cellSize);
+ Atom* atoms();
+ size_t atomNumber(const void*);
+ void callDestructor(JSCell*);
+ template<BlockState, SweepMode> FreeCell* specializedSweep();
+
+#if ENABLE(GGC)
+ CardSet<bytesPerCard, blockSize> m_cards;
+#endif
+
+ size_t m_atomsPerCell;
+ size_t m_endAtom; // This is a fuzzy end. Always test for < m_endAtom.
+#if ENABLE(PARALLEL_GC)
+ WTF::Bitmap<atomsPerBlock, WTF::BitmapAtomic> m_marks;
+#else
+ WTF::Bitmap<atomsPerBlock, WTF::BitmapNotAtomic> m_marks;
+#endif
+ BlockState m_state;
+ PageAllocationAligned m_allocation;
+ Heap* m_heap;
+ MarkedBlock* m_prev;
+ MarkedBlock* m_next;
+ };
+
+ inline size_t MarkedBlock::firstAtom()
+ {
+ return WTF::roundUpToMultipleOf<atomSize>(sizeof(MarkedBlock)) / atomSize;
+ }
+
+ inline MarkedBlock::Atom* MarkedBlock::atoms()
+ {
+ return reinterpret_cast<Atom*>(this);
+ }
+
+ inline bool MarkedBlock::isAtomAligned(const void* p)
+ {
+ return !(reinterpret_cast<Bits>(p) & atomAlignmentMask);
+ }
+
+ inline MarkedBlock* MarkedBlock::blockFor(const void* p)
+ {
+ return reinterpret_cast<MarkedBlock*>(reinterpret_cast<Bits>(p) & blockMask);
+ }
+
+ inline Heap* MarkedBlock::heap() const
+ {
+ return m_heap;
+ }
+
+ inline void MarkedBlock::didConsumeFreeList()
+ {
+ HEAP_LOG_BLOCK_STATE_TRANSITION(this);
+
+ ASSERT(m_state == FreeListed);
+ m_state = Allocated;
+ }
+
+ inline void MarkedBlock::clearMarks()
+ {
+ HEAP_LOG_BLOCK_STATE_TRANSITION(this);
+
+ ASSERT(m_state != New && m_state != FreeListed);
+ m_marks.clearAll();
+
+ // This will become true at the end of the mark phase. We set it now to
+ // avoid an extra pass to do so later.
+ m_state = Marked;
+ }
+
+ inline size_t MarkedBlock::markCount()
+ {
+ return m_marks.count();
+ }
+
+ inline bool MarkedBlock::markCountIsZero()
+ {
+ return m_marks.isEmpty();
+ }
+
+ inline size_t MarkedBlock::cellSize()
+ {
+ return m_atomsPerCell * atomSize;
+ }
+
+ inline size_t MarkedBlock::size()
+ {
+ return markCount() * cellSize();
+ }
+
+ inline size_t MarkedBlock::capacity()
+ {
+ return m_allocation.size();
+ }
+
+ inline size_t MarkedBlock::atomNumber(const void* p)
+ {
+ return (reinterpret_cast<Bits>(p) - reinterpret_cast<Bits>(this)) / atomSize;
+ }
+
+ inline bool MarkedBlock::isMarked(const void* p)
+ {
+ return m_marks.get(atomNumber(p));
+ }
+
+ inline bool MarkedBlock::testAndSetMarked(const void* p)
+ {
+ return m_marks.concurrentTestAndSet(atomNumber(p));
+ }
+
+ inline void MarkedBlock::setMarked(const void* p)
+ {
+ m_marks.set(atomNumber(p));
+ }
+
+ inline bool MarkedBlock::isLive(const JSCell* cell)
+ {
+ switch (m_state) {
+ case Allocated:
+ return true;
+ case Zapped:
+ if (isZapped(cell)) {
+ // Object dead in previous collection, not allocated since previous collection: mark bit should not be set.
+ ASSERT(!m_marks.get(atomNumber(cell)));
+ return false;
+ }
+
+ // Newly allocated objects: mark bit not set.
+ // Objects that survived prior collection: mark bit set.
+ return true;
+ case Marked:
+ return m_marks.get(atomNumber(cell));
+
+ case New:
+ case FreeListed:
+ ASSERT_NOT_REACHED();
+ return false;
+ }
+
+ ASSERT_NOT_REACHED();
+ return false;
+ }
+
+ inline bool MarkedBlock::isLiveCell(const void* p)
+ {
+ ASSERT(MarkedBlock::isAtomAligned(p));
+ size_t atomNumber = this->atomNumber(p);
+ size_t firstAtom = this->firstAtom();
+ if (atomNumber < firstAtom) // Filters pointers into MarkedBlock metadata.
+ return false;
+ if ((atomNumber - firstAtom) % m_atomsPerCell) // Filters pointers into cell middles.
+ return false;
+
+ return isLive(static_cast<const JSCell*>(p));
+ }
+
+ template <typename Functor> inline void MarkedBlock::forEachCell(Functor& functor)
+ {
+ for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
+ JSCell* cell = reinterpret_cast<JSCell*>(&atoms()[i]);
+ if (!isLive(cell))
+ continue;
+
+ functor(cell);
+ }
+ }
+
+#if ENABLE(GGC)
+template <int _cellSize> void MarkedBlock::gatherDirtyCellsWithSize(DirtyCellVector& dirtyCells)
+{
+ if (m_cards.testAndClear(0)) {
+ char* ptr = reinterpret_cast<char*>(&atoms()[firstAtom()]);
+ const char* end = reinterpret_cast<char*>(this) + bytesPerCard;
+ while (ptr < end) {
+ JSCell* cell = reinterpret_cast<JSCell*>(ptr);
+ if (isMarked(cell))
+ dirtyCells.append(cell);
+ ptr += _cellSize;
+ }
+ }
+
+ const size_t cellOffset = firstAtom() * atomSize % _cellSize;
+ for (size_t i = 1; i < m_cards.cardCount; i++) {
+ if (!m_cards.testAndClear(i))
+ continue;
+ char* ptr = reinterpret_cast<char*>(this) + i * bytesPerCard + cellOffset;
+ char* end = reinterpret_cast<char*>(this) + (i + 1) * bytesPerCard;
+
+ while (ptr < end) {
+ JSCell* cell = reinterpret_cast<JSCell*>(ptr);
+ if (isMarked(cell))
+ dirtyCells.append(cell);
+ ptr += _cellSize;
+ }
+ }
+}
+
+void MarkedBlock::gatherDirtyCells(DirtyCellVector& dirtyCells)
+{
+ COMPILE_ASSERT((int)m_cards.cardCount == (int)cardCount, MarkedBlockCardCountsMatch);
+
+ ASSERT(m_state != New && m_state != FreeListed);
+
+ // This is an optimisation to avoid having to walk the set of marked
+ // blocks twice during GC.
+ m_state = Marked;
+
+ if (markCountIsZero())
+ return;
+
+ size_t cellSize = this->cellSize();
+ if (cellSize == 32) {
+ gatherDirtyCellsWithSize<32>(dirtyCells);
+ return;
+ }
+ if (cellSize == 64) {
+ gatherDirtyCellsWithSize<64>(dirtyCells);
+ return;
+ }
+
+ const size_t firstCellOffset = firstAtom() * atomSize % cellSize;
+
+ if (m_cards.testAndClear(0)) {
+ char* ptr = reinterpret_cast<char*>(this) + firstAtom() * atomSize;
+ char* end = reinterpret_cast<char*>(this) + bytesPerCard;
+ while (ptr < end) {
+ JSCell* cell = reinterpret_cast<JSCell*>(ptr);
+ if (isMarked(cell))
+ dirtyCells.append(cell);
+ ptr += cellSize;
+ }
+ }
+ for (size_t i = 1; i < m_cards.cardCount; i++) {
+ if (!m_cards.testAndClear(i))
+ continue;
+ char* ptr = reinterpret_cast<char*>(this) + firstCellOffset + cellSize * ((i * bytesPerCard + cellSize - 1 - firstCellOffset) / cellSize);
+ char* end = reinterpret_cast<char*>(this) + std::min((i + 1) * bytesPerCard, m_endAtom * atomSize);
+
+ while (ptr < end) {
+ JSCell* cell = reinterpret_cast<JSCell*>(ptr);
+ if (isMarked(cell))
+ dirtyCells.append(cell);
+ ptr += cellSize;
+ }
+ }
+}
+#endif
+
+} // namespace JSC
+
+namespace WTF {
+
+ struct MarkedBlockHash : PtrHash<JSC::MarkedBlock*> {
+ static unsigned hash(JSC::MarkedBlock* const& key)
+ {
+ // Aligned VM regions tend to be monotonically increasing integers,
+ // which is a great hash function, but we have to remove the low bits,
+ // since they're always zero, which is a terrible hash function!
+ return reinterpret_cast<JSC::Bits>(key) / JSC::MarkedBlock::blockSize;
+ }
+ };
+
+ template<> struct DefaultHash<JSC::MarkedBlock*> {
+ typedef MarkedBlockHash Hash;
+ };
+
+} // namespace WTF
+
+#endif // MarkedBlock_h
diff --git a/Source/JavaScriptCore/heap/MarkedBlockSet.h b/Source/JavaScriptCore/heap/MarkedBlockSet.h
new file mode 100644
index 000000000..022a17389
--- /dev/null
+++ b/Source/JavaScriptCore/heap/MarkedBlockSet.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MarkedBlockSet_h
+#define MarkedBlockSet_h
+
+#include "MarkedBlock.h"
+#include "TinyBloomFilter.h"
+#include <wtf/HashSet.h>
+
+namespace JSC {
+
+class MarkedBlock;
+
+class MarkedBlockSet {
+public:
+ void add(MarkedBlock*);
+ void remove(MarkedBlock*);
+
+ TinyBloomFilter filter() const;
+ const HashSet<MarkedBlock*>& set() const;
+
+private:
+ void recomputeFilter();
+
+ TinyBloomFilter m_filter;
+ HashSet<MarkedBlock*> m_set;
+};
+
+inline void MarkedBlockSet::add(MarkedBlock* block)
+{
+ m_filter.add(reinterpret_cast<Bits>(block));
+ m_set.add(block);
+}
+
+inline void MarkedBlockSet::remove(MarkedBlock* block)
+{
+ int oldCapacity = m_set.capacity();
+ m_set.remove(block);
+ if (m_set.capacity() != oldCapacity) // Indicates we've removed a lot of blocks.
+ recomputeFilter();
+}
+
+inline void MarkedBlockSet::recomputeFilter()
+{
+ TinyBloomFilter filter;
+ for (HashSet<MarkedBlock*>::iterator it = m_set.begin(); it != m_set.end(); ++it)
+ filter.add(reinterpret_cast<Bits>(*it));
+ m_filter = filter;
+}
+
+inline TinyBloomFilter MarkedBlockSet::filter() const
+{
+ return m_filter;
+}
+
+inline const HashSet<MarkedBlock*>& MarkedBlockSet::set() const
+{
+ return m_set;
+}
+
+} // namespace JSC
+
+#endif // MarkedBlockSet_h
diff --git a/Source/JavaScriptCore/heap/MarkedSpace.cpp b/Source/JavaScriptCore/heap/MarkedSpace.cpp
new file mode 100644
index 000000000..acbd8acac
--- /dev/null
+++ b/Source/JavaScriptCore/heap/MarkedSpace.cpp
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2007 Eric Seidel <eric@webkit.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#include "config.h"
+#include "MarkedSpace.h"
+
+#include "JSGlobalObject.h"
+#include "JSLock.h"
+#include "JSObject.h"
+#include "ScopeChain.h"
+
+namespace JSC {
+
+class Structure;
+
+MarkedSpace::MarkedSpace(Heap* heap)
+ : m_waterMark(0)
+ , m_nurseryWaterMark(0)
+ , m_highWaterMark(0)
+ , m_heap(heap)
+{
+ for (size_t cellSize = preciseStep; cellSize <= preciseCutoff; cellSize += preciseStep)
+ sizeClassFor(cellSize).cellSize = cellSize;
+
+ for (size_t cellSize = impreciseStep; cellSize <= impreciseCutoff; cellSize += impreciseStep)
+ sizeClassFor(cellSize).cellSize = cellSize;
+}
+
+void MarkedSpace::addBlock(SizeClass& sizeClass, MarkedBlock* block)
+{
+ ASSERT(!sizeClass.currentBlock);
+ ASSERT(!sizeClass.firstFreeCell);
+
+ sizeClass.blockList.append(block);
+ sizeClass.currentBlock = block;
+ sizeClass.firstFreeCell = block->sweep(MarkedBlock::SweepToFreeList);
+}
+
+void MarkedSpace::removeBlock(MarkedBlock* block)
+{
+ SizeClass& sizeClass = sizeClassFor(block->cellSize());
+ if (sizeClass.currentBlock == block)
+ sizeClass.currentBlock = 0;
+ sizeClass.blockList.remove(block);
+}
+
+void MarkedSpace::resetAllocator()
+{
+ m_waterMark = 0;
+ m_nurseryWaterMark = 0;
+
+ for (size_t cellSize = preciseStep; cellSize <= preciseCutoff; cellSize += preciseStep)
+ sizeClassFor(cellSize).resetAllocator();
+
+ for (size_t cellSize = impreciseStep; cellSize <= impreciseCutoff; cellSize += impreciseStep)
+ sizeClassFor(cellSize).resetAllocator();
+}
+
+void MarkedSpace::canonicalizeCellLivenessData()
+{
+ for (size_t cellSize = preciseStep; cellSize <= preciseCutoff; cellSize += preciseStep)
+ sizeClassFor(cellSize).zapFreeList();
+
+ for (size_t cellSize = impreciseStep; cellSize <= impreciseCutoff; cellSize += impreciseStep)
+ sizeClassFor(cellSize).zapFreeList();
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/MarkedSpace.h b/Source/JavaScriptCore/heap/MarkedSpace.h
new file mode 100644
index 000000000..751fe2fee
--- /dev/null
+++ b/Source/JavaScriptCore/heap/MarkedSpace.h
@@ -0,0 +1,205 @@
+/*
+ * Copyright (C) 1999-2000 Harri Porten (porten@kde.org)
+ * Copyright (C) 2001 Peter Kelly (pmk@post.com)
+ * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011 Apple Inc. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#ifndef MarkedSpace_h
+#define MarkedSpace_h
+
+#include "MachineStackMarker.h"
+#include "MarkedBlock.h"
+#include "PageAllocationAligned.h"
+#include <wtf/Bitmap.h>
+#include <wtf/DoublyLinkedList.h>
+#include <wtf/FixedArray.h>
+#include <wtf/HashSet.h>
+#include <wtf/Noncopyable.h>
+#include <wtf/Vector.h>
+
+#define ASSERT_CLASS_FITS_IN_CELL(class) COMPILE_ASSERT(sizeof(class) <= MarkedSpace::maxCellSize, class_fits_in_cell)
+
+namespace JSC {
+
+class Heap;
+class JSCell;
+class LiveObjectIterator;
+class WeakGCHandle;
+class SlotVisitor;
+
+class MarkedSpace {
+ WTF_MAKE_NONCOPYABLE(MarkedSpace);
+public:
+ static const size_t maxCellSize = 2048;
+
+ struct SizeClass {
+ SizeClass();
+ void resetAllocator();
+ void zapFreeList();
+
+ MarkedBlock::FreeCell* firstFreeCell;
+ MarkedBlock* currentBlock;
+ DoublyLinkedList<MarkedBlock> blockList;
+ size_t cellSize;
+ };
+
+ MarkedSpace(Heap*);
+
+ SizeClass& sizeClassFor(size_t);
+ void* allocate(SizeClass&);
+
+ void resetAllocator();
+
+ void addBlock(SizeClass&, MarkedBlock*);
+ void removeBlock(MarkedBlock*);
+
+ void canonicalizeCellLivenessData();
+
+ size_t waterMark();
+ size_t highWaterMark();
+ size_t nurseryWaterMark();
+ void setHighWaterMark(size_t);
+
+ template<typename Functor> typename Functor::ReturnType forEachBlock(Functor&); // Safe to remove the current item while iterating.
+ template<typename Functor> typename Functor::ReturnType forEachBlock();
+
+private:
+ // [ 32... 256 ]
+ static const size_t preciseStep = MarkedBlock::atomSize;
+ static const size_t preciseCutoff = 256;
+ static const size_t preciseCount = preciseCutoff / preciseStep;
+
+ // [ 512... 2048 ]
+ static const size_t impreciseStep = preciseCutoff;
+ static const size_t impreciseCutoff = maxCellSize;
+ static const size_t impreciseCount = impreciseCutoff / impreciseStep;
+
+ FixedArray<SizeClass, preciseCount> m_preciseSizeClasses;
+ FixedArray<SizeClass, impreciseCount> m_impreciseSizeClasses;
+ size_t m_waterMark;
+ size_t m_nurseryWaterMark;
+ size_t m_highWaterMark;
+ Heap* m_heap;
+};
+
+inline size_t MarkedSpace::waterMark()
+{
+ return m_waterMark;
+}
+
+inline size_t MarkedSpace::highWaterMark()
+{
+ return m_highWaterMark;
+}
+
+inline size_t MarkedSpace::nurseryWaterMark()
+{
+ return m_nurseryWaterMark;
+}
+
+inline void MarkedSpace::setHighWaterMark(size_t highWaterMark)
+{
+ m_highWaterMark = highWaterMark;
+}
+
+inline MarkedSpace::SizeClass& MarkedSpace::sizeClassFor(size_t bytes)
+{
+ ASSERT(bytes && bytes <= maxCellSize);
+ if (bytes <= preciseCutoff)
+ return m_preciseSizeClasses[(bytes - 1) / preciseStep];
+ return m_impreciseSizeClasses[(bytes - 1) / impreciseStep];
+}
+
+inline void* MarkedSpace::allocate(SizeClass& sizeClass)
+{
+ MarkedBlock::FreeCell* firstFreeCell = sizeClass.firstFreeCell;
+ if (!firstFreeCell) {
+ for (MarkedBlock*& block = sizeClass.currentBlock; block; block = block->next()) {
+ firstFreeCell = block->sweep(MarkedBlock::SweepToFreeList);
+ if (firstFreeCell)
+ break;
+ m_nurseryWaterMark += block->capacity() - block->size();
+ m_waterMark += block->capacity();
+ block->didConsumeFreeList();
+ }
+
+ if (!firstFreeCell)
+ return 0;
+ }
+
+ ASSERT(firstFreeCell);
+
+ sizeClass.firstFreeCell = firstFreeCell->next;
+ return firstFreeCell;
+}
+
+template <typename Functor> inline typename Functor::ReturnType MarkedSpace::forEachBlock(Functor& functor)
+{
+ for (size_t i = 0; i < preciseCount; ++i) {
+ SizeClass& sizeClass = m_preciseSizeClasses[i];
+ MarkedBlock* next;
+ for (MarkedBlock* block = sizeClass.blockList.head(); block; block = next) {
+ next = block->next();
+ functor(block);
+ }
+ }
+
+ for (size_t i = 0; i < impreciseCount; ++i) {
+ SizeClass& sizeClass = m_impreciseSizeClasses[i];
+ MarkedBlock* next;
+ for (MarkedBlock* block = sizeClass.blockList.head(); block; block = next) {
+ next = block->next();
+ functor(block);
+ }
+ }
+
+ return functor.returnValue();
+}
+
+template <typename Functor> inline typename Functor::ReturnType MarkedSpace::forEachBlock()
+{
+ Functor functor;
+ return forEachBlock(functor);
+}
+
+inline MarkedSpace::SizeClass::SizeClass()
+ : firstFreeCell(0)
+ , currentBlock(0)
+ , cellSize(0)
+{
+}
+
+inline void MarkedSpace::SizeClass::resetAllocator()
+{
+ currentBlock = blockList.head();
+}
+
+inline void MarkedSpace::SizeClass::zapFreeList()
+{
+ if (!currentBlock) {
+ ASSERT(!firstFreeCell);
+ return;
+ }
+
+ currentBlock->zapFreeList(firstFreeCell);
+ firstFreeCell = 0;
+}
+
+} // namespace JSC
+
+#endif // MarkedSpace_h
diff --git a/Source/JavaScriptCore/heap/SlotVisitor.h b/Source/JavaScriptCore/heap/SlotVisitor.h
new file mode 100644
index 000000000..142d8ca49
--- /dev/null
+++ b/Source/JavaScriptCore/heap/SlotVisitor.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef SlotVisitor_h
+#define SlotVisitor_h
+
+#include "MarkStack.h"
+
+namespace JSC {
+
+class Heap;
+
+class SlotVisitor : public MarkStack {
+ friend class HeapRootVisitor;
+public:
+ SlotVisitor(MarkStackThreadSharedData&);
+
+ void donate()
+ {
+ ASSERT(m_isInParallelMode);
+ if (Options::numberOfGCMarkers == 1)
+ return;
+
+ donateKnownParallel();
+ }
+
+ void drain();
+
+ void donateAndDrain()
+ {
+ donate();
+ drain();
+ }
+
+ enum SharedDrainMode { SlaveDrain, MasterDrain };
+ void drainFromShared(SharedDrainMode);
+
+ void harvestWeakReferences();
+ void finalizeUnconditionalFinalizers();
+
+private:
+ void donateSlow();
+
+ void donateKnownParallel()
+ {
+ if (!m_stack.canDonateSomeCells())
+ return;
+ donateSlow();
+ }
+};
+
+inline SlotVisitor::SlotVisitor(MarkStackThreadSharedData& shared)
+ : MarkStack(shared)
+{
+}
+
+} // namespace JSC
+
+#endif // SlotVisitor_h
diff --git a/Source/JavaScriptCore/heap/Strong.h b/Source/JavaScriptCore/heap/Strong.h
new file mode 100644
index 000000000..a9389fa53
--- /dev/null
+++ b/Source/JavaScriptCore/heap/Strong.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef Strong_h
+#define Strong_h
+
+#include "Assertions.h"
+#include "Handle.h"
+#include "HandleHeap.h"
+
+namespace JSC {
+
+class JSGlobalData;
+
+// A strongly referenced handle that prevents the object it points to from being garbage collected.
+template <typename T> class Strong : public Handle<T> {
+ using Handle<T>::slot;
+ using Handle<T>::setSlot;
+
+public:
+ typedef typename Handle<T>::ExternalType ExternalType;
+
+ Strong()
+ : Handle<T>()
+ {
+ }
+
+ Strong(JSGlobalData&, ExternalType = ExternalType());
+
+ Strong(JSGlobalData&, Handle<T>);
+
+ Strong(const Strong& other)
+ : Handle<T>()
+ {
+ if (!other.slot())
+ return;
+ setSlot(HandleHeap::heapFor(other.slot())->allocate());
+ set(other.get());
+ }
+
+ template <typename U> Strong(const Strong<U>& other)
+ : Handle<T>()
+ {
+ if (!other.slot())
+ return;
+ setSlot(HandleHeap::heapFor(other.slot())->allocate());
+ set(other.get());
+ }
+
+ enum HashTableDeletedValueTag { HashTableDeletedValue };
+ bool isHashTableDeletedValue() const { return slot() == hashTableDeletedValue(); }
+ Strong(HashTableDeletedValueTag)
+ : Handle<T>(hashTableDeletedValue())
+ {
+ }
+
+ ~Strong()
+ {
+ clear();
+ }
+
+ void swap(Strong& other)
+ {
+ Handle<T>::swap(other);
+ }
+
+ void set(JSGlobalData&, ExternalType);
+
+ template <typename U> Strong& operator=(const Strong<U>& other)
+ {
+ if (!other.slot()) {
+ clear();
+ return *this;
+ }
+
+ set(*HandleHeap::heapFor(other.slot())->globalData(), other.get());
+ return *this;
+ }
+
+ Strong& operator=(const Strong& other)
+ {
+ if (!other.slot()) {
+ clear();
+ return *this;
+ }
+
+ set(*HandleHeap::heapFor(other.slot())->globalData(), other.get());
+ return *this;
+ }
+
+ void clear()
+ {
+ if (!slot())
+ return;
+ HandleHeap::heapFor(slot())->deallocate(slot());
+ setSlot(0);
+ }
+
+private:
+ static HandleSlot hashTableDeletedValue() { return reinterpret_cast<HandleSlot>(-1); }
+
+ void set(ExternalType externalType)
+ {
+ ASSERT(slot());
+ JSValue value = HandleTypes<T>::toJSValue(externalType);
+ HandleHeap::heapFor(slot())->writeBarrier(slot(), value);
+ *slot() = value;
+ }
+};
+
+template<class T> inline void swap(Strong<T>& a, Strong<T>& b)
+{
+ a.swap(b);
+}
+
+} // namespace JSC
+
+namespace WTF {
+
+template<typename T> struct VectorTraits<JSC::Strong<T> > : SimpleClassVectorTraits {
+ static const bool canCompareWithMemcmp = false;
+};
+
+template<typename P> struct HashTraits<JSC::Strong<P> > : SimpleClassHashTraits<JSC::Strong<P> > { };
+
+}
+
+#endif // Strong_h
diff --git a/Source/JavaScriptCore/heap/StrongInlines.h b/Source/JavaScriptCore/heap/StrongInlines.h
new file mode 100644
index 000000000..46049096a
--- /dev/null
+++ b/Source/JavaScriptCore/heap/StrongInlines.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef StrongInlines_h
+#define StrongInlines_h
+
+namespace JSC {
+
+template <typename T>
+inline Strong<T>::Strong(JSGlobalData& globalData, ExternalType value)
+ : Handle<T>(globalData.heap.handleHeap()->allocate())
+{
+ set(value);
+}
+
+template <typename T>
+inline Strong<T>::Strong(JSGlobalData& globalData, Handle<T> handle)
+ : Handle<T>(globalData.heap.handleHeap()->allocate())
+{
+ set(handle.get());
+}
+
+template <typename T>
+inline void Strong<T>::set(JSGlobalData& globalData, ExternalType value)
+{
+ if (!slot())
+ setSlot(globalData.heap.handleHeap()->allocate());
+ set(value);
+}
+
+} // namespace JSC
+
+#endif // StrongInlines_h
diff --git a/Source/JavaScriptCore/heap/TinyBloomFilter.h b/Source/JavaScriptCore/heap/TinyBloomFilter.h
new file mode 100644
index 000000000..82b586309
--- /dev/null
+++ b/Source/JavaScriptCore/heap/TinyBloomFilter.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TinyBloomFilter_h
+#define TinyBloomFilter_h
+
+namespace JSC {
+
+typedef uintptr_t Bits;
+
+class TinyBloomFilter {
+public:
+ TinyBloomFilter();
+
+ void add(Bits);
+ bool ruleOut(Bits) const; // True for 0.
+
+private:
+ Bits m_bits;
+};
+
+inline TinyBloomFilter::TinyBloomFilter()
+ : m_bits(0)
+{
+}
+
+inline void TinyBloomFilter::add(Bits bits)
+{
+ m_bits |= bits;
+}
+
+inline bool TinyBloomFilter::ruleOut(Bits bits) const
+{
+ if (!bits)
+ return true;
+
+ if ((bits & m_bits) != bits)
+ return true;
+
+ return false;
+}
+
+} // namespace JSC
+
+#endif // TinyBloomFilter_h
diff --git a/Source/JavaScriptCore/heap/UnconditionalFinalizer.h b/Source/JavaScriptCore/heap/UnconditionalFinalizer.h
new file mode 100644
index 000000000..26029d046
--- /dev/null
+++ b/Source/JavaScriptCore/heap/UnconditionalFinalizer.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef UnconditionalFinalizer_h
+#define UnconditionalFinalizer_h
+
+#include "ListableHandler.h"
+
+namespace JSC {
+
+// An unconditional finalizer is useful for caches that you would like to
+// destroy on each GC. This is currently used for the bytecode stream
+// associated with each CodeBlock.
+
+class UnconditionalFinalizer : public ListableHandler<UnconditionalFinalizer> {
+public:
+ virtual void finalizeUnconditionally() = 0;
+
+protected:
+ virtual ~UnconditionalFinalizer() { }
+};
+
+}
+
+#endif // UltraWeakFinalizer_h
diff --git a/Source/JavaScriptCore/heap/VTableSpectrum.cpp b/Source/JavaScriptCore/heap/VTableSpectrum.cpp
new file mode 100644
index 000000000..acb494034
--- /dev/null
+++ b/Source/JavaScriptCore/heap/VTableSpectrum.cpp
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "VTableSpectrum.h"
+
+#include "JSObject.h"
+#include "Structure.h"
+#include <algorithm>
+#include <stdio.h>
+#include <wtf/Platform.h>
+#include <wtf/Vector.h>
+
+#if PLATFORM(MAC)
+#include <dlfcn.h>
+#endif
+
+namespace JSC {
+
+VTableSpectrum::VTableSpectrum()
+{
+}
+
+VTableSpectrum::~VTableSpectrum()
+{
+}
+
+void VTableSpectrum::countVPtr(void* vTablePointer)
+{
+ add(vTablePointer);
+}
+
+void VTableSpectrum::count(JSCell* cell)
+{
+ // FIXME: we need to change this class to count ClassInfos rather than vptrs
+ UNUSED_PARAM(cell);
+}
+
+void VTableSpectrum::dump(FILE* output, const char* comment)
+{
+ fprintf(output, "%s:\n", comment);
+
+ Vector<KeyAndCount> list = buildList();
+
+ for (size_t index = list.size(); index-- > 0;) {
+ KeyAndCount item = list.at(index);
+#if PLATFORM(MAC)
+ Dl_info info;
+ if (dladdr(item.key, &info)) {
+ char* findResult = strrchr(info.dli_fname, '/');
+ const char* strippedFileName;
+
+ if (findResult)
+ strippedFileName = findResult + 1;
+ else
+ strippedFileName = info.dli_fname;
+
+ fprintf(output, " %s:%s(%p): %lu\n", strippedFileName, info.dli_sname, item.key, item.count);
+ continue;
+ }
+#endif
+ fprintf(output, " %p: %lu\n", item.key, item.count);
+ }
+
+ fflush(output);
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/VTableSpectrum.h b/Source/JavaScriptCore/heap/VTableSpectrum.h
new file mode 100644
index 000000000..8a9737e9b
--- /dev/null
+++ b/Source/JavaScriptCore/heap/VTableSpectrum.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef VTableSpectrum_h
+#define VTableSpectrum_h
+
+#include <stdio.h>
+#include <wtf/Spectrum.h>
+
+namespace JSC {
+
+class JSCell;
+
+class VTableSpectrum : Spectrum<void*> {
+public:
+ VTableSpectrum();
+ ~VTableSpectrum();
+
+ void countVPtr(void*);
+ void count(JSCell*);
+
+ void dump(FILE* output, const char* comment);
+};
+
+} // namespace JSC
+
+#endif // VTableSpectrum_h
diff --git a/Source/JavaScriptCore/heap/Weak.h b/Source/JavaScriptCore/heap/Weak.h
new file mode 100644
index 000000000..f0c028d71
--- /dev/null
+++ b/Source/JavaScriptCore/heap/Weak.h
@@ -0,0 +1,169 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef Weak_h
+#define Weak_h
+
+#include "Assertions.h"
+#include "Handle.h"
+#include "HandleHeap.h"
+#include "JSGlobalData.h"
+
+namespace JSC {
+
+// A weakly referenced handle that becomes 0 when the value it points to is garbage collected.
+template <typename T> class Weak : public Handle<T> {
+ using Handle<T>::slot;
+ using Handle<T>::setSlot;
+
+public:
+ typedef typename Handle<T>::ExternalType ExternalType;
+
+ Weak()
+ : Handle<T>()
+ {
+ }
+
+ Weak(JSGlobalData& globalData, ExternalType value = ExternalType(), WeakHandleOwner* weakOwner = 0, void* context = 0)
+ : Handle<T>(globalData.heap.handleHeap()->allocate())
+ {
+ HandleHeap::heapFor(slot())->makeWeak(slot(), weakOwner, context);
+ set(value);
+ }
+
+ enum AdoptTag { Adopt };
+ template<typename U> Weak(AdoptTag, Handle<U> handle)
+ : Handle<T>(handle.slot())
+ {
+ validateCell(get());
+ }
+
+ Weak(const Weak& other)
+ : Handle<T>()
+ {
+ if (!other.slot())
+ return;
+ setSlot(HandleHeap::heapFor(other.slot())->copyWeak(other.slot()));
+ }
+
+ template <typename U> Weak(const Weak<U>& other)
+ : Handle<T>()
+ {
+ if (!other.slot())
+ return;
+ setSlot(HandleHeap::heapFor(other.slot())->copyWeak(other.slot()));
+ }
+
+ enum HashTableDeletedValueTag { HashTableDeletedValue };
+ bool isHashTableDeletedValue() const { return slot() == hashTableDeletedValue(); }
+ Weak(HashTableDeletedValueTag)
+ : Handle<T>(hashTableDeletedValue())
+ {
+ }
+
+ ~Weak()
+ {
+ clear();
+ }
+
+ void swap(Weak& other)
+ {
+ Handle<T>::swap(other);
+ }
+
+ ExternalType get() const { return HandleTypes<T>::getFromSlot(slot()); }
+
+ void clear()
+ {
+ if (!slot())
+ return;
+ HandleHeap::heapFor(slot())->deallocate(slot());
+ setSlot(0);
+ }
+
+ void set(JSGlobalData& globalData, ExternalType value, WeakHandleOwner* weakOwner = 0, void* context = 0)
+ {
+ if (!slot()) {
+ setSlot(globalData.heap.handleHeap()->allocate());
+ HandleHeap::heapFor(slot())->makeWeak(slot(), weakOwner, context);
+ }
+ ASSERT(HandleHeap::heapFor(slot())->hasWeakOwner(slot(), weakOwner));
+ set(value);
+ }
+
+ template <typename U> Weak& operator=(const Weak<U>& other)
+ {
+ clear();
+ if (other.slot())
+ setSlot(HandleHeap::heapFor(other.slot())->copyWeak(other.slot()));
+ return *this;
+ }
+
+ Weak& operator=(const Weak& other)
+ {
+ clear();
+ if (other.slot())
+ setSlot(HandleHeap::heapFor(other.slot())->copyWeak(other.slot()));
+ return *this;
+ }
+
+ HandleSlot leakHandle()
+ {
+ ASSERT(HandleHeap::heapFor(slot())->hasFinalizer(slot()));
+ HandleSlot result = slot();
+ setSlot(0);
+ return result;
+ }
+
+private:
+ static HandleSlot hashTableDeletedValue() { return reinterpret_cast<HandleSlot>(-1); }
+
+ void set(ExternalType externalType)
+ {
+ ASSERT(slot());
+ JSValue value = HandleTypes<T>::toJSValue(externalType);
+ HandleHeap::heapFor(slot())->writeBarrier(slot(), value);
+ *slot() = value;
+ }
+};
+
+template<class T> inline void swap(Weak<T>& a, Weak<T>& b)
+{
+ a.swap(b);
+}
+
+} // namespace JSC
+
+namespace WTF {
+
+template<typename T> struct VectorTraits<JSC::Weak<T> > : SimpleClassVectorTraits {
+ static const bool canCompareWithMemcmp = false;
+};
+
+template<typename P> struct HashTraits<JSC::Weak<P> > : SimpleClassHashTraits<JSC::Weak<P> > { };
+
+}
+
+#endif // Weak_h
diff --git a/Source/JavaScriptCore/heap/WeakReferenceHarvester.h b/Source/JavaScriptCore/heap/WeakReferenceHarvester.h
new file mode 100644
index 000000000..90b4deed0
--- /dev/null
+++ b/Source/JavaScriptCore/heap/WeakReferenceHarvester.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#ifndef WeakReferenceHarvester_h
+#define WeakReferenceHarvester_h
+
+#include "ListableHandler.h"
+
+namespace JSC {
+
+class MarkStack;
+class MarkStackSharedData;
+class SlotVisitor;
+
+class WeakReferenceHarvester : public ListableHandler<WeakReferenceHarvester> {
+public:
+ virtual void visitWeakReferences(SlotVisitor&) = 0;
+
+protected:
+ WeakReferenceHarvester()
+ {
+ }
+
+ virtual ~WeakReferenceHarvester() { }
+};
+
+} // namespace JSC
+
+#endif // WeakReferenceHarvester_h
diff --git a/Source/JavaScriptCore/heap/WriteBarrierSupport.cpp b/Source/JavaScriptCore/heap/WriteBarrierSupport.cpp
new file mode 100644
index 000000000..5ca33c861
--- /dev/null
+++ b/Source/JavaScriptCore/heap/WriteBarrierSupport.cpp
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "WriteBarrierSupport.h"
+
+namespace JSC {
+
+#if ENABLE(WRITE_BARRIER_PROFILING)
+GlobalSamplingCounter WriteBarrierCounters::usesWithBarrierFromCpp;
+GlobalSamplingCounter WriteBarrierCounters::usesWithoutBarrierFromCpp;
+GlobalSamplingCounter WriteBarrierCounters::usesWithBarrierFromJit;
+GlobalSamplingCounter WriteBarrierCounters::usesForPropertiesFromJit;
+GlobalSamplingCounter WriteBarrierCounters::usesForVariablesFromJit;
+GlobalSamplingCounter WriteBarrierCounters::usesWithoutBarrierFromJit;
+
+void WriteBarrierCounters::initialize()
+{
+ usesWithBarrierFromCpp.name("WithBarrierFromCpp");
+ usesWithoutBarrierFromCpp.name("WithoutBarrierFromCpp");
+ usesWithBarrierFromJit.name("WithBarrierFromJit");
+ usesForPropertiesFromJit.name("WriteForPropertiesFromJit");
+ usesForVariablesFromJit.name("WriteForVariablesFromJit");
+ usesWithoutBarrierFromJit.name("WithoutBarrierFromJit");
+}
+#else
+char WriteBarrierCounters::usesWithBarrierFromCpp;
+char WriteBarrierCounters::usesWithoutBarrierFromCpp;
+#endif
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/heap/WriteBarrierSupport.h b/Source/JavaScriptCore/heap/WriteBarrierSupport.h
new file mode 100644
index 000000000..00b9bb97f
--- /dev/null
+++ b/Source/JavaScriptCore/heap/WriteBarrierSupport.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WriteBarrierSupport_h
+#define WriteBarrierSupport_h
+
+#include "SamplingCounter.h"
+#include <wtf/Assertions.h>
+
+namespace JSC {
+
+// This allows the JIT to distinguish between uses of the barrier for different
+// kinds of writes. This is used by the JIT for profiling, and may be appropriate
+// for allowing the GC implementation to specialize the JIT's write barrier code
+// for different kinds of target objects.
+enum WriteBarrierUseKind {
+ // This allows specialization for access to the property storage (either
+ // array element or property), but not for any other kind of property
+ // accesses (such as writes that are a consequence of setter execution).
+ WriteBarrierForPropertyAccess,
+
+ // This allows specialization for variable accesses (such as global or
+ // scoped variables).
+ WriteBarrierForVariableAccess,
+
+ // This captures all other forms of write barriers. It should always be
+ // correct to use a generic access write barrier, even when storing to
+ // properties. Hence, if optimization is not necessary, it is preferable
+ // to just use a generic access.
+ WriteBarrierForGenericAccess
+};
+
+class WriteBarrierCounters {
+private:
+ WriteBarrierCounters() { }
+
+public:
+#if ENABLE(WRITE_BARRIER_PROFILING)
+ static GlobalSamplingCounter usesWithBarrierFromCpp;
+ static GlobalSamplingCounter usesWithoutBarrierFromCpp;
+ static GlobalSamplingCounter usesWithBarrierFromJit;
+ static GlobalSamplingCounter usesForPropertiesFromJit;
+ static GlobalSamplingCounter usesForVariablesFromJit;
+ static GlobalSamplingCounter usesWithoutBarrierFromJit;
+
+ static void initialize();
+
+ static GlobalSamplingCounter& jitCounterFor(WriteBarrierUseKind useKind)
+ {
+ switch (useKind) {
+ case WriteBarrierForPropertyAccess:
+ return usesForPropertiesFromJit;
+ case WriteBarrierForVariableAccess:
+ return usesForVariablesFromJit;
+ default:
+ ASSERT(useKind == WriteBarrierForGenericAccess);
+ return usesWithBarrierFromJit;
+ }
+ }
+#else
+ // These are necessary to work around not having conditional exports.
+ static char usesWithBarrierFromCpp;
+ static char usesWithoutBarrierFromCpp;
+#endif // ENABLE(WRITE_BARRIER_PROFILING)
+
+ static void countWriteBarrier()
+ {
+#if ENABLE(WRITE_BARRIER_PROFILING)
+ WriteBarrierCounters::usesWithBarrierFromCpp.count();
+#endif
+ }
+};
+
+} // namespace JSC
+
+#endif // WriteBarrierSupport_h
+