summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/heap
diff options
context:
space:
mode:
authorOswald Buddenhagen <oswald.buddenhagen@qt.io>2017-05-30 12:48:17 +0200
committerOswald Buddenhagen <oswald.buddenhagen@qt.io>2017-05-30 12:48:17 +0200
commit881da28418d380042aa95a97f0cbd42560a64f7c (patch)
treea794dff3274695e99c651902dde93d934ea7a5af /Source/JavaScriptCore/heap
parent7e104c57a70fdf551bb3d22a5d637cdcbc69dbea (diff)
parent0fcedcd17cc00d3dd44c718b3cb36c1033319671 (diff)
downloadqtwebkit-881da28418d380042aa95a97f0cbd42560a64f7c.tar.gz
Merge 'wip/next' into dev
Change-Id: Iff9ee5e23bb326c4371ec8ed81d56f2f05d680e9
Diffstat (limited to 'Source/JavaScriptCore/heap')
-rw-r--r--Source/JavaScriptCore/heap/BlockAllocator.cpp168
-rw-r--r--Source/JavaScriptCore/heap/BlockAllocator.h295
-rw-r--r--Source/JavaScriptCore/heap/CellState.h60
-rw-r--r--Source/JavaScriptCore/heap/CodeBlockSet.cpp142
-rw-r--r--Source/JavaScriptCore/heap/CodeBlockSet.h115
-rw-r--r--Source/JavaScriptCore/heap/ConservativeRoots.cpp57
-rw-r--r--Source/JavaScriptCore/heap/ConservativeRoots.h11
-rw-r--r--Source/JavaScriptCore/heap/CopiedAllocator.h12
-rw-r--r--Source/JavaScriptCore/heap/CopiedBlock.cpp94
-rw-r--r--Source/JavaScriptCore/heap/CopiedBlock.h120
-rw-r--r--Source/JavaScriptCore/heap/CopiedBlockInlines.h33
-rw-r--r--Source/JavaScriptCore/heap/CopiedSpace.cpp221
-rw-r--r--Source/JavaScriptCore/heap/CopiedSpace.h65
-rw-r--r--Source/JavaScriptCore/heap/CopiedSpaceInlines.h116
-rw-r--r--Source/JavaScriptCore/heap/CopyBarrier.h198
-rw-r--r--Source/JavaScriptCore/heap/CopyToken.h40
-rw-r--r--Source/JavaScriptCore/heap/CopyVisitor.cpp41
-rw-r--r--Source/JavaScriptCore/heap/CopyVisitor.h17
-rw-r--r--Source/JavaScriptCore/heap/CopyVisitorInlines.h37
-rw-r--r--Source/JavaScriptCore/heap/CopyWorkList.h79
-rw-r--r--Source/JavaScriptCore/heap/DFGCodeBlocks.cpp102
-rw-r--r--Source/JavaScriptCore/heap/DFGCodeBlocks.h99
-rw-r--r--Source/JavaScriptCore/heap/DeferGC.cpp37
-rw-r--r--Source/JavaScriptCore/heap/DeferGC.h101
-rw-r--r--Source/JavaScriptCore/heap/EdenGCActivityCallback.cpp99
-rw-r--r--Source/JavaScriptCore/heap/EdenGCActivityCallback.h59
-rw-r--r--Source/JavaScriptCore/heap/FullGCActivityCallback.cpp115
-rw-r--r--Source/JavaScriptCore/heap/FullGCActivityCallback.h64
-rw-r--r--Source/JavaScriptCore/heap/GCActivityCallback.cpp224
-rw-r--r--Source/JavaScriptCore/heap/GCActivityCallback.h130
-rw-r--r--Source/JavaScriptCore/heap/GCAssertions.h22
-rw-r--r--Source/JavaScriptCore/heap/GCIncomingRefCounted.h115
-rw-r--r--Source/JavaScriptCore/heap/GCIncomingRefCountedInlines.h130
-rw-r--r--Source/JavaScriptCore/heap/GCIncomingRefCountedSet.h (renamed from Source/JavaScriptCore/heap/HeapBlock.h)62
-rw-r--r--Source/JavaScriptCore/heap/GCIncomingRefCountedSetInlines.h92
-rw-r--r--Source/JavaScriptCore/heap/GCLogging.cpp139
-rw-r--r--Source/JavaScriptCore/heap/GCLogging.h59
-rw-r--r--Source/JavaScriptCore/heap/GCSegmentedArray.h167
-rw-r--r--Source/JavaScriptCore/heap/GCSegmentedArrayInlines.h230
-rw-r--r--Source/JavaScriptCore/heap/GCThread.cpp138
-rw-r--r--Source/JavaScriptCore/heap/GCThreadSharedData.cpp188
-rw-r--r--Source/JavaScriptCore/heap/GCThreadSharedData.h122
-rw-r--r--Source/JavaScriptCore/heap/Handle.h4
-rw-r--r--Source/JavaScriptCore/heap/HandleBlock.h13
-rw-r--r--Source/JavaScriptCore/heap/HandleBlockInlines.h19
-rw-r--r--Source/JavaScriptCore/heap/HandleSet.cpp11
-rw-r--r--Source/JavaScriptCore/heap/HandleSet.h12
-rw-r--r--Source/JavaScriptCore/heap/HandleStack.cpp2
-rw-r--r--Source/JavaScriptCore/heap/HandleStack.h2
-rw-r--r--Source/JavaScriptCore/heap/Heap.cpp1555
-rw-r--r--Source/JavaScriptCore/heap/Heap.h762
-rw-r--r--Source/JavaScriptCore/heap/HeapHelperPool.cpp47
-rw-r--r--Source/JavaScriptCore/heap/HeapHelperPool.h (renamed from Source/JavaScriptCore/heap/VTableSpectrum.h)24
-rw-r--r--Source/JavaScriptCore/heap/HeapInlines.h358
-rw-r--r--Source/JavaScriptCore/heap/HeapIterationScope.h (renamed from Source/JavaScriptCore/heap/SuperRegion.h)42
-rw-r--r--Source/JavaScriptCore/heap/HeapObserver.h42
-rw-r--r--Source/JavaScriptCore/heap/HeapOperation.h35
-rw-r--r--Source/JavaScriptCore/heap/HeapRootVisitor.h9
-rw-r--r--Source/JavaScriptCore/heap/HeapStatistics.cpp56
-rw-r--r--Source/JavaScriptCore/heap/HeapStatistics.h9
-rw-r--r--Source/JavaScriptCore/heap/HeapTimer.cpp99
-rw-r--r--Source/JavaScriptCore/heap/HeapTimer.h24
-rw-r--r--Source/JavaScriptCore/heap/HeapVerifier.cpp290
-rw-r--r--Source/JavaScriptCore/heap/HeapVerifier.h104
-rw-r--r--Source/JavaScriptCore/heap/IncrementalSweeper.cpp99
-rw-r--r--Source/JavaScriptCore/heap/IncrementalSweeper.h33
-rw-r--r--Source/JavaScriptCore/heap/JITStubRoutineSet.cpp2
-rw-r--r--Source/JavaScriptCore/heap/JITStubRoutineSet.h4
-rw-r--r--Source/JavaScriptCore/heap/ListableHandler.h13
-rw-r--r--Source/JavaScriptCore/heap/LiveObjectData.h47
-rw-r--r--Source/JavaScriptCore/heap/LiveObjectList.cpp41
-rw-r--r--Source/JavaScriptCore/heap/LiveObjectList.h57
-rw-r--r--Source/JavaScriptCore/heap/Local.h2
-rw-r--r--Source/JavaScriptCore/heap/MachineStackMarker.cpp985
-rw-r--r--Source/JavaScriptCore/heap/MachineStackMarker.h169
-rw-r--r--Source/JavaScriptCore/heap/MarkStack.cpp78
-rw-r--r--Source/JavaScriptCore/heap/MarkStack.h90
-rw-r--r--Source/JavaScriptCore/heap/MarkStackInlines.h119
-rw-r--r--Source/JavaScriptCore/heap/MarkedAllocator.cpp175
-rw-r--r--Source/JavaScriptCore/heap/MarkedAllocator.h91
-rw-r--r--Source/JavaScriptCore/heap/MarkedBlock.cpp166
-rw-r--r--Source/JavaScriptCore/heap/MarkedBlock.h163
-rw-r--r--Source/JavaScriptCore/heap/MarkedBlockSet.h2
-rw-r--r--Source/JavaScriptCore/heap/MarkedSpace.cpp266
-rw-r--r--Source/JavaScriptCore/heap/MarkedSpace.h192
-rw-r--r--Source/JavaScriptCore/heap/OpaqueRootSet.h94
-rw-r--r--Source/JavaScriptCore/heap/PassWeak.h203
-rw-r--r--Source/JavaScriptCore/heap/Region.h319
-rw-r--r--Source/JavaScriptCore/heap/SlotVisitor.cpp450
-rw-r--r--Source/JavaScriptCore/heap/SlotVisitor.h82
-rw-r--r--Source/JavaScriptCore/heap/SlotVisitorInlines.h141
-rw-r--r--Source/JavaScriptCore/heap/Strong.h8
-rw-r--r--Source/JavaScriptCore/heap/TinyBloomFilter.h6
-rw-r--r--Source/JavaScriptCore/heap/UnconditionalFinalizer.h3
-rw-r--r--Source/JavaScriptCore/heap/VTableSpectrum.cpp89
-rw-r--r--Source/JavaScriptCore/heap/Weak.cpp1
-rw-r--r--Source/JavaScriptCore/heap/Weak.h21
-rw-r--r--Source/JavaScriptCore/heap/WeakBlock.cpp49
-rw-r--r--Source/JavaScriptCore/heap/WeakBlock.h40
-rw-r--r--Source/JavaScriptCore/heap/WeakHandleOwner.cpp2
-rw-r--r--Source/JavaScriptCore/heap/WeakInlines.h39
-rw-r--r--Source/JavaScriptCore/heap/WeakReferenceHarvester.h2
-rw-r--r--Source/JavaScriptCore/heap/WeakSet.cpp21
-rw-r--r--Source/JavaScriptCore/heap/WeakSet.h9
-rw-r--r--Source/JavaScriptCore/heap/WriteBarrierBuffer.cpp (renamed from Source/JavaScriptCore/heap/SuperRegion.cpp)59
-rw-r--r--Source/JavaScriptCore/heap/WriteBarrierBuffer.h (renamed from Source/JavaScriptCore/heap/GCThread.h)55
-rw-r--r--Source/JavaScriptCore/heap/WriteBarrierSupport.cpp2
107 files changed, 7984 insertions, 4439 deletions
diff --git a/Source/JavaScriptCore/heap/BlockAllocator.cpp b/Source/JavaScriptCore/heap/BlockAllocator.cpp
deleted file mode 100644
index aebee6a4e..000000000
--- a/Source/JavaScriptCore/heap/BlockAllocator.cpp
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "BlockAllocator.h"
-
-#include "CopiedBlock.h"
-#include "CopyWorkList.h"
-#include "MarkedBlock.h"
-#include "WeakBlock.h"
-#include <wtf/CurrentTime.h>
-
-namespace JSC {
-
-BlockAllocator::BlockAllocator()
- : m_superRegion()
- , m_copiedRegionSet(CopiedBlock::blockSize)
- , m_markedRegionSet(MarkedBlock::blockSize)
- , m_fourKBBlockRegionSet(WeakBlock::blockSize)
- , m_workListRegionSet(CopyWorkListSegment::blockSize)
- , m_numberOfEmptyRegions(0)
- , m_isCurrentlyAllocating(false)
- , m_blockFreeingThreadShouldQuit(false)
- , m_blockFreeingThread(createThread(blockFreeingThreadStartFunc, this, "JavaScriptCore::BlockFree"))
-{
- RELEASE_ASSERT(m_blockFreeingThread);
- m_regionLock.Init();
-}
-
-BlockAllocator::~BlockAllocator()
-{
- releaseFreeRegions();
- {
- MutexLocker locker(m_emptyRegionConditionLock);
- m_blockFreeingThreadShouldQuit = true;
- m_emptyRegionCondition.broadcast();
- }
- waitForThreadCompletion(m_blockFreeingThread);
- ASSERT(allRegionSetsAreEmpty());
- ASSERT(m_emptyRegions.isEmpty());
-}
-
-bool BlockAllocator::allRegionSetsAreEmpty() const
-{
- return m_copiedRegionSet.isEmpty()
- && m_markedRegionSet.isEmpty()
- && m_fourKBBlockRegionSet.isEmpty()
- && m_workListRegionSet.isEmpty();
-}
-
-void BlockAllocator::releaseFreeRegions()
-{
- while (true) {
- Region* region;
- {
- SpinLockHolder locker(&m_regionLock);
- if (!m_numberOfEmptyRegions)
- region = 0;
- else {
- region = m_emptyRegions.removeHead();
- RELEASE_ASSERT(region);
- m_numberOfEmptyRegions--;
- }
- }
-
- if (!region)
- break;
-
- region->destroy();
- }
-}
-
-void BlockAllocator::waitForRelativeTimeWhileHoldingLock(double relative)
-{
- if (m_blockFreeingThreadShouldQuit)
- return;
-
- m_emptyRegionCondition.timedWait(m_emptyRegionConditionLock, currentTime() + relative);
-}
-
-void BlockAllocator::waitForRelativeTime(double relative)
-{
- // If this returns early, that's fine, so long as it doesn't do it too
- // frequently. It would only be a bug if this function failed to return
- // when it was asked to do so.
-
- MutexLocker locker(m_emptyRegionConditionLock);
- waitForRelativeTimeWhileHoldingLock(relative);
-}
-
-void BlockAllocator::blockFreeingThreadStartFunc(void* blockAllocator)
-{
- static_cast<BlockAllocator*>(blockAllocator)->blockFreeingThreadMain();
-}
-
-void BlockAllocator::blockFreeingThreadMain()
-{
- size_t currentNumberOfEmptyRegions;
- while (!m_blockFreeingThreadShouldQuit) {
- // Generally wait for one second before scavenging free blocks. This
- // may return early, particularly when we're being asked to quit.
- waitForRelativeTime(1.0);
- if (m_blockFreeingThreadShouldQuit)
- break;
-
- if (m_isCurrentlyAllocating) {
- m_isCurrentlyAllocating = false;
- continue;
- }
-
- // Sleep until there is actually work to do rather than waking up every second to check.
- {
- MutexLocker locker(m_emptyRegionConditionLock);
- SpinLockHolder regionLocker(&m_regionLock);
- while (!m_numberOfEmptyRegions && !m_blockFreeingThreadShouldQuit) {
- m_regionLock.Unlock();
- m_emptyRegionCondition.wait(m_emptyRegionConditionLock);
- m_regionLock.Lock();
- }
- currentNumberOfEmptyRegions = m_numberOfEmptyRegions;
- }
-
- size_t desiredNumberOfEmptyRegions = currentNumberOfEmptyRegions / 2;
-
- while (!m_blockFreeingThreadShouldQuit) {
- Region* region;
- {
- SpinLockHolder locker(&m_regionLock);
- if (m_numberOfEmptyRegions <= desiredNumberOfEmptyRegions)
- region = 0;
- else {
- region = m_emptyRegions.removeHead();
- RELEASE_ASSERT(region);
- m_numberOfEmptyRegions--;
- }
- }
-
- if (!region)
- break;
-
- region->destroy();
- }
- }
-}
-
-} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/BlockAllocator.h b/Source/JavaScriptCore/heap/BlockAllocator.h
deleted file mode 100644
index afd3259fe..000000000
--- a/Source/JavaScriptCore/heap/BlockAllocator.h
+++ /dev/null
@@ -1,295 +0,0 @@
-/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef BlockAllocator_h
-#define BlockAllocator_h
-
-#include "HeapBlock.h"
-#include "Region.h"
-#include <wtf/DoublyLinkedList.h>
-#include <wtf/Forward.h>
-#include <wtf/PageAllocationAligned.h>
-#include <wtf/TCSpinLock.h>
-#include <wtf/Threading.h>
-
-namespace JSC {
-
-class BlockAllocator;
-class CopiedBlock;
-class CopyWorkListSegment;
-class HandleBlock;
-class VM;
-class MarkStackSegment;
-class MarkedBlock;
-class WeakBlock;
-
-// Simple allocator to reduce VM cost by holding onto blocks of memory for
-// short periods of time and then freeing them on a secondary thread.
-
-class BlockAllocator {
-public:
- BlockAllocator();
- ~BlockAllocator();
-
- template <typename T> DeadBlock* allocate();
- DeadBlock* allocateCustomSize(size_t blockSize, size_t blockAlignment);
- template <typename T> void deallocate(T*);
- template <typename T> void deallocateCustomSize(T*);
-
-private:
- void waitForRelativeTimeWhileHoldingLock(double relative);
- void waitForRelativeTime(double relative);
-
- void blockFreeingThreadMain();
- static void blockFreeingThreadStartFunc(void* heap);
-
- struct RegionSet {
- RegionSet(size_t blockSize)
- : m_numberOfPartialRegions(0)
- , m_blockSize(blockSize)
- {
- }
-
- bool isEmpty() const
- {
- return m_fullRegions.isEmpty() && m_partialRegions.isEmpty();
- }
-
- DoublyLinkedList<Region> m_fullRegions;
- DoublyLinkedList<Region> m_partialRegions;
- size_t m_numberOfPartialRegions;
- size_t m_blockSize;
- };
-
- DeadBlock* tryAllocateFromRegion(RegionSet&, DoublyLinkedList<Region>&, size_t&);
-
- bool allRegionSetsAreEmpty() const;
- void releaseFreeRegions();
-
- template <typename T> RegionSet& regionSetFor();
-
- SuperRegion m_superRegion;
- RegionSet m_copiedRegionSet;
- RegionSet m_markedRegionSet;
- // WeakBlocks and MarkStackSegments use the same RegionSet since they're the same size.
- RegionSet m_fourKBBlockRegionSet;
- RegionSet m_workListRegionSet;
-
- DoublyLinkedList<Region> m_emptyRegions;
- size_t m_numberOfEmptyRegions;
-
- bool m_isCurrentlyAllocating;
- bool m_blockFreeingThreadShouldQuit;
- SpinLock m_regionLock;
- Mutex m_emptyRegionConditionLock;
- ThreadCondition m_emptyRegionCondition;
- ThreadIdentifier m_blockFreeingThread;
-};
-
-inline DeadBlock* BlockAllocator::tryAllocateFromRegion(RegionSet& set, DoublyLinkedList<Region>& regions, size_t& numberOfRegions)
-{
- if (numberOfRegions) {
- ASSERT(!regions.isEmpty());
- Region* region = regions.head();
- ASSERT(!region->isFull());
-
- if (region->isEmpty()) {
- ASSERT(region == m_emptyRegions.head());
- m_numberOfEmptyRegions--;
- set.m_numberOfPartialRegions++;
- region = m_emptyRegions.removeHead()->reset(set.m_blockSize);
- set.m_partialRegions.push(region);
- }
-
- DeadBlock* block = region->allocate();
-
- if (region->isFull()) {
- set.m_numberOfPartialRegions--;
- set.m_fullRegions.push(set.m_partialRegions.removeHead());
- }
-
- return block;
- }
- return 0;
-}
-
-template<typename T>
-inline DeadBlock* BlockAllocator::allocate()
-{
- RegionSet& set = regionSetFor<T>();
- DeadBlock* block;
- m_isCurrentlyAllocating = true;
- {
- SpinLockHolder locker(&m_regionLock);
- if ((block = tryAllocateFromRegion(set, set.m_partialRegions, set.m_numberOfPartialRegions)))
- return block;
- if ((block = tryAllocateFromRegion(set, m_emptyRegions, m_numberOfEmptyRegions)))
- return block;
- }
-
- Region* newRegion = Region::create(&m_superRegion, T::blockSize);
-
- SpinLockHolder locker(&m_regionLock);
- m_emptyRegions.push(newRegion);
- m_numberOfEmptyRegions++;
- block = tryAllocateFromRegion(set, m_emptyRegions, m_numberOfEmptyRegions);
- ASSERT(block);
- return block;
-}
-
-inline DeadBlock* BlockAllocator::allocateCustomSize(size_t blockSize, size_t blockAlignment)
-{
- size_t realSize = WTF::roundUpToMultipleOf(blockAlignment, blockSize);
- Region* newRegion = Region::createCustomSize(&m_superRegion, realSize, blockAlignment);
- DeadBlock* block = newRegion->allocate();
- ASSERT(block);
- return block;
-}
-
-template<typename T>
-inline void BlockAllocator::deallocate(T* block)
-{
- RegionSet& set = regionSetFor<T>();
- bool shouldWakeBlockFreeingThread = false;
- {
- SpinLockHolder locker(&m_regionLock);
- Region* region = block->region();
- ASSERT(!region->isEmpty());
- if (region->isFull())
- set.m_fullRegions.remove(region);
- else {
- set.m_partialRegions.remove(region);
- set.m_numberOfPartialRegions--;
- }
-
- region->deallocate(block);
-
- if (region->isEmpty()) {
- m_emptyRegions.push(region);
- shouldWakeBlockFreeingThread = !m_numberOfEmptyRegions;
- m_numberOfEmptyRegions++;
- } else {
- set.m_partialRegions.push(region);
- set.m_numberOfPartialRegions++;
- }
- }
-
- if (shouldWakeBlockFreeingThread) {
- MutexLocker mutexLocker(m_emptyRegionConditionLock);
- m_emptyRegionCondition.signal();
- }
-}
-
-template<typename T>
-inline void BlockAllocator::deallocateCustomSize(T* block)
-{
- Region* region = block->region();
- ASSERT(region->isCustomSize());
- region->deallocate(block);
- region->destroy();
-}
-
-template <>
-inline BlockAllocator::RegionSet& BlockAllocator::regionSetFor<CopiedBlock>()
-{
- return m_copiedRegionSet;
-}
-
-template <>
-inline BlockAllocator::RegionSet& BlockAllocator::regionSetFor<MarkedBlock>()
-{
- return m_markedRegionSet;
-}
-
-template <>
-inline BlockAllocator::RegionSet& BlockAllocator::regionSetFor<WeakBlock>()
-{
- return m_fourKBBlockRegionSet;
-}
-
-template <>
-inline BlockAllocator::RegionSet& BlockAllocator::regionSetFor<MarkStackSegment>()
-{
- return m_fourKBBlockRegionSet;
-}
-
-template <>
-inline BlockAllocator::RegionSet& BlockAllocator::regionSetFor<CopyWorkListSegment>()
-{
- return m_workListRegionSet;
-}
-
-template <>
-inline BlockAllocator::RegionSet& BlockAllocator::regionSetFor<HandleBlock>()
-{
- return m_fourKBBlockRegionSet;
-}
-
-template <>
-inline BlockAllocator::RegionSet& BlockAllocator::regionSetFor<HeapBlock<CopiedBlock> >()
-{
- return m_copiedRegionSet;
-}
-
-template <>
-inline BlockAllocator::RegionSet& BlockAllocator::regionSetFor<HeapBlock<MarkedBlock> >()
-{
- return m_markedRegionSet;
-}
-
-template <>
-inline BlockAllocator::RegionSet& BlockAllocator::regionSetFor<HeapBlock<WeakBlock> >()
-{
- return m_fourKBBlockRegionSet;
-}
-
-template <>
-inline BlockAllocator::RegionSet& BlockAllocator::regionSetFor<HeapBlock<MarkStackSegment> >()
-{
- return m_fourKBBlockRegionSet;
-}
-
-template <>
-inline BlockAllocator::RegionSet& BlockAllocator::regionSetFor<HeapBlock<CopyWorkListSegment> >()
-{
- return m_workListRegionSet;
-}
-
-template <>
-inline BlockAllocator::RegionSet& BlockAllocator::regionSetFor<HeapBlock<HandleBlock> >()
-{
- return m_fourKBBlockRegionSet;
-}
-
-template <typename T>
-inline BlockAllocator::RegionSet& BlockAllocator::regionSetFor()
-{
- RELEASE_ASSERT_NOT_REACHED();
- return *(RegionSet*)0;
-}
-
-} // namespace JSC
-
-#endif // BlockAllocator_h
diff --git a/Source/JavaScriptCore/heap/CellState.h b/Source/JavaScriptCore/heap/CellState.h
new file mode 100644
index 000000000..edc716cd0
--- /dev/null
+++ b/Source/JavaScriptCore/heap/CellState.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef CellState_h
+#define CellState_h
+
+namespace JSC {
+
+enum class CellState : uint8_t {
+ // The object is black as far as this GC is concerned. When not in GC, this just means that it's an
+ // old gen object. Note that we deliberately arrange OldBlack to be zero, so that the store barrier on
+ // a target object "from" is just:
+ //
+ // if (!from->cellState())
+ // slowPath(from);
+ //
+ // There is a bunch of code in the LLInt and JITs that rely on this being the case. You'd have to
+ // change a lot of code if you ever wanted the store barrier to be anything but a non-zero check on
+ // cellState.
+ OldBlack = 0,
+
+ // The object is in eden. During GC, this means that the object has not been marked yet.
+ NewWhite = 1,
+
+ // The object is grey - i.e. it will be scanned - but it either belongs to old gen (if this is eden
+ // GC) or it is grey a second time in this current GC (because a concurrent store barrier requested
+ // re-greying).
+ OldGrey = 2,
+
+ // The object is grey - i.e. it will be scanned - and this is the first time in this GC that we are
+ // going to scan it. If this is an eden GC, this also means that the object is in eden.
+ NewGrey = 3
+};
+
+} // namespace JSC
+
+#endif // CellState_h
+
diff --git a/Source/JavaScriptCore/heap/CodeBlockSet.cpp b/Source/JavaScriptCore/heap/CodeBlockSet.cpp
new file mode 100644
index 000000000..0cfcd1fed
--- /dev/null
+++ b/Source/JavaScriptCore/heap/CodeBlockSet.cpp
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "CodeBlockSet.h"
+
+#include "CodeBlock.h"
+#include "JSCInlines.h"
+#include "SlotVisitor.h"
+#include <wtf/CommaPrinter.h>
+
+namespace JSC {
+
+static const bool verbose = false;
+
+CodeBlockSet::CodeBlockSet()
+{
+}
+
+CodeBlockSet::~CodeBlockSet()
+{
+}
+
+void CodeBlockSet::add(CodeBlock* codeBlock)
+{
+ LockHolder locker(&m_lock);
+ bool isNewEntry = m_newCodeBlocks.add(codeBlock).isNewEntry;
+ ASSERT_UNUSED(isNewEntry, isNewEntry);
+}
+
+void CodeBlockSet::promoteYoungCodeBlocks(const LockHolder&)
+{
+ ASSERT(m_lock.isLocked());
+ m_oldCodeBlocks.add(m_newCodeBlocks.begin(), m_newCodeBlocks.end());
+ m_newCodeBlocks.clear();
+}
+
+void CodeBlockSet::clearMarksForFullCollection()
+{
+ LockHolder locker(&m_lock);
+ for (CodeBlock* codeBlock : m_oldCodeBlocks)
+ codeBlock->clearVisitWeaklyHasBeenCalled();
+
+ // We promote after we clear marks on the old generation CodeBlocks because
+ // none of the young generations CodeBlocks need to be cleared.
+ promoteYoungCodeBlocks(locker);
+}
+
+void CodeBlockSet::lastChanceToFinalize()
+{
+ LockHolder locker(&m_lock);
+ for (CodeBlock* codeBlock : m_newCodeBlocks)
+ codeBlock->classInfo()->methodTable.destroy(codeBlock);
+
+ for (CodeBlock* codeBlock : m_oldCodeBlocks)
+ codeBlock->classInfo()->methodTable.destroy(codeBlock);
+}
+
+void CodeBlockSet::deleteUnmarkedAndUnreferenced(HeapOperation collectionType)
+{
+ LockHolder locker(&m_lock);
+ HashSet<CodeBlock*>& set = collectionType == EdenCollection ? m_newCodeBlocks : m_oldCodeBlocks;
+ Vector<CodeBlock*> unmarked;
+ for (CodeBlock* codeBlock : set) {
+ if (Heap::isMarked(codeBlock))
+ continue;
+ unmarked.append(codeBlock);
+ }
+
+ for (CodeBlock* codeBlock : unmarked) {
+ codeBlock->classInfo()->methodTable.destroy(codeBlock);
+ set.remove(codeBlock);
+ }
+
+ // Any remaining young CodeBlocks are live and need to be promoted to the set of old CodeBlocks.
+ if (collectionType == EdenCollection)
+ promoteYoungCodeBlocks(locker);
+}
+
+bool CodeBlockSet::contains(const LockHolder&, void* candidateCodeBlock)
+{
+ RELEASE_ASSERT(m_lock.isLocked());
+ CodeBlock* codeBlock = static_cast<CodeBlock*>(candidateCodeBlock);
+ if (!HashSet<CodeBlock*>::isValidValue(codeBlock))
+ return false;
+ return m_oldCodeBlocks.contains(codeBlock) || m_newCodeBlocks.contains(codeBlock) || m_currentlyExecuting.contains(codeBlock);
+}
+
+void CodeBlockSet::writeBarrierCurrentlyExecutingCodeBlocks(Heap* heap)
+{
+ LockHolder locker(&m_lock);
+ if (verbose)
+ dataLog("Remembering ", m_currentlyExecuting.size(), " code blocks.\n");
+ for (CodeBlock* codeBlock : m_currentlyExecuting)
+ heap->writeBarrier(codeBlock);
+
+ // It's safe to clear this set because we won't delete the CodeBlocks
+ // in it until the next GC, and we'll recompute it at that time.
+ m_currentlyExecuting.clear();
+}
+
+void CodeBlockSet::dump(PrintStream& out) const
+{
+ CommaPrinter comma;
+ out.print("{old = [");
+ for (CodeBlock* codeBlock : m_oldCodeBlocks)
+ out.print(comma, pointerDump(codeBlock));
+ out.print("], new = [");
+ comma = CommaPrinter();
+ for (CodeBlock* codeBlock : m_newCodeBlocks)
+ out.print(comma, pointerDump(codeBlock));
+ out.print("], currentlyExecuting = [");
+ comma = CommaPrinter();
+ for (CodeBlock* codeBlock : m_currentlyExecuting)
+ out.print(comma, pointerDump(codeBlock));
+ out.print("]}");
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/heap/CodeBlockSet.h b/Source/JavaScriptCore/heap/CodeBlockSet.h
new file mode 100644
index 000000000..56507c052
--- /dev/null
+++ b/Source/JavaScriptCore/heap/CodeBlockSet.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2013, 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef CodeBlockSet_h
+#define CodeBlockSet_h
+
+#include "GCSegmentedArray.h"
+#include "HeapOperation.h"
+#include <wtf/HashSet.h>
+#include <wtf/Lock.h>
+#include <wtf/Noncopyable.h>
+#include <wtf/PassRefPtr.h>
+#include <wtf/PrintStream.h>
+#include <wtf/RefPtr.h>
+
+namespace JSC {
+
+class CodeBlock;
+class Heap;
+class JSCell;
+class SlotVisitor;
+
+// CodeBlockSet tracks all CodeBlocks. Every CodeBlock starts out with one
+// reference coming in from GC. The GC is responsible for freeing CodeBlocks
+// once they hasOneRef() and nobody is running code from that CodeBlock.
+
+class CodeBlockSet {
+ WTF_MAKE_NONCOPYABLE(CodeBlockSet);
+
+public:
+ CodeBlockSet();
+ ~CodeBlockSet();
+
+ void lastChanceToFinalize();
+
+ // Add a CodeBlock. This is only called by CodeBlock constructors.
+ void add(CodeBlock*);
+
+ // Clear all mark bits for all CodeBlocks.
+ void clearMarksForFullCollection();
+
+ // Mark a pointer that may be a CodeBlock that belongs to the set of DFG
+ // blocks. This is defined in CodeBlock.h.
+private:
+ void mark(const LockHolder&, CodeBlock* candidateCodeBlock);
+public:
+ void mark(const LockHolder&, void* candidateCodeBlock);
+
+ // Delete all code blocks that are only referenced by this set (i.e. owned
+ // by this set), and that have not been marked.
+ void deleteUnmarkedAndUnreferenced(HeapOperation);
+
+ // Add all currently executing CodeBlocks to the remembered set to be
+ // re-scanned during the next collection.
+ void writeBarrierCurrentlyExecutingCodeBlocks(Heap*);
+
+ bool contains(const LockHolder&, void* candidateCodeBlock);
+ Lock& getLock() { return m_lock; }
+
+ // Visits each CodeBlock in the heap until the visitor function returns true
+ // to indicate that it is done iterating, or until every CodeBlock has been
+ // visited.
+ template<typename Functor> void iterate(Functor& functor)
+ {
+ LockHolder locker(m_lock);
+ for (auto& codeBlock : m_oldCodeBlocks) {
+ bool done = functor(codeBlock);
+ if (done)
+ return;
+ }
+
+ for (auto& codeBlock : m_newCodeBlocks) {
+ bool done = functor(codeBlock);
+ if (done)
+ return;
+ }
+ }
+
+ void dump(PrintStream&) const;
+
+private:
+ void promoteYoungCodeBlocks(const LockHolder&);
+
+ HashSet<CodeBlock*> m_oldCodeBlocks;
+ HashSet<CodeBlock*> m_newCodeBlocks;
+ HashSet<CodeBlock*> m_currentlyExecuting;
+ Lock m_lock;
+};
+
+} // namespace JSC
+
+#endif // CodeBlockSet_h
+
diff --git a/Source/JavaScriptCore/heap/ConservativeRoots.cpp b/Source/JavaScriptCore/heap/ConservativeRoots.cpp
index 752ce2775..e1e12002e 100644
--- a/Source/JavaScriptCore/heap/ConservativeRoots.cpp
+++ b/Source/JavaScriptCore/heap/ConservativeRoots.cpp
@@ -27,16 +27,19 @@
#include "ConservativeRoots.h"
#include "CodeBlock.h"
+#include "CodeBlockSet.h"
#include "CopiedSpace.h"
#include "CopiedSpaceInlines.h"
-#include "DFGCodeBlocks.h"
+#include "HeapInlines.h"
#include "JSCell.h"
#include "JSObject.h"
+#include "JSCInlines.h"
#include "Structure.h"
+#include <wtf/OSAllocator.h>
namespace JSC {
-ConservativeRoots::ConservativeRoots(const MarkedBlockSet* blocks, CopiedSpace* copiedSpace)
+ConservativeRoots::ConservativeRoots(MarkedBlockSet* blocks, CopiedSpace* copiedSpace)
: m_roots(m_inlineRoots)
, m_size(0)
, m_capacity(inlineCapacity)
@@ -68,20 +71,8 @@ inline void ConservativeRoots::genericAddPointer(void* p, TinyBloomFilter filter
markHook.mark(p);
m_copiedSpace->pinIfNecessary(p);
-
- MarkedBlock* candidate = MarkedBlock::blockFor(p);
- if (filter.ruleOut(reinterpret_cast<Bits>(candidate))) {
- ASSERT(!candidate || !m_blocks->set().contains(candidate));
- return;
- }
-
- if (!MarkedBlock::isAtomAligned(p))
- return;
- if (!m_blocks->set().contains(candidate))
- return;
-
- if (!candidate->isLiveCell(p))
+ if (!Heap::isPointerGCObject(filter, *m_blocks, p))
return;
if (m_size == m_capacity)
@@ -91,12 +82,17 @@ inline void ConservativeRoots::genericAddPointer(void* p, TinyBloomFilter filter
}
template<typename MarkHook>
+SUPPRESS_ASAN
void ConservativeRoots::genericAddSpan(void* begin, void* end, MarkHook& markHook)
{
- ASSERT(begin <= end);
- ASSERT((static_cast<char*>(end) - static_cast<char*>(begin)) < 0x1000000);
- ASSERT(isPointerAligned(begin));
- ASSERT(isPointerAligned(end));
+ if (begin > end) {
+ void* swapTemp = begin;
+ begin = end;
+ end = swapTemp;
+ }
+
+ RELEASE_ASSERT(isPointerAligned(begin));
+ RELEASE_ASSERT(isPointerAligned(end));
TinyBloomFilter filter = m_blocks->filter(); // Make a local copy of filter to show the compiler it won't alias, and can be register-allocated.
for (char** it = static_cast<char**>(begin); it != static_cast<char**>(end); ++it)
@@ -119,31 +115,32 @@ void ConservativeRoots::add(void* begin, void* end, JITStubRoutineSet& jitStubRo
genericAddSpan(begin, end, jitStubRoutines);
}
-template<typename T, typename U>
class CompositeMarkHook {
public:
- CompositeMarkHook(T& first, U& second)
- : m_first(first)
- , m_second(second)
+ CompositeMarkHook(JITStubRoutineSet& stubRoutines, CodeBlockSet& codeBlocks, const LockHolder& locker)
+ : m_stubRoutines(stubRoutines)
+ , m_codeBlocks(codeBlocks)
+ , m_codeBlocksLocker(locker)
{
}
void mark(void* address)
{
- m_first.mark(address);
- m_second.mark(address);
+ m_stubRoutines.mark(address);
+ m_codeBlocks.mark(m_codeBlocksLocker, address);
}
private:
- T& m_first;
- U& m_second;
+ JITStubRoutineSet& m_stubRoutines;
+ CodeBlockSet& m_codeBlocks;
+ const LockHolder& m_codeBlocksLocker;
};
void ConservativeRoots::add(
- void* begin, void* end, JITStubRoutineSet& jitStubRoutines, DFGCodeBlocks& dfgCodeBlocks)
+ void* begin, void* end, JITStubRoutineSet& jitStubRoutines, CodeBlockSet& codeBlocks)
{
- CompositeMarkHook<JITStubRoutineSet, DFGCodeBlocks> markHook(
- jitStubRoutines, dfgCodeBlocks);
+ LockHolder locker(codeBlocks.getLock());
+ CompositeMarkHook markHook(jitStubRoutines, codeBlocks, locker);
genericAddSpan(begin, end, markHook);
}
diff --git a/Source/JavaScriptCore/heap/ConservativeRoots.h b/Source/JavaScriptCore/heap/ConservativeRoots.h
index 219bdcc8e..634354857 100644
--- a/Source/JavaScriptCore/heap/ConservativeRoots.h
+++ b/Source/JavaScriptCore/heap/ConservativeRoots.h
@@ -27,24 +27,21 @@
#define ConservativeRoots_h
#include "Heap.h"
-#include <wtf/OSAllocator.h>
-#include <wtf/Vector.h>
namespace JSC {
-class DFGCodeBlocks;
-class Heap;
+class CodeBlockSet;
class JITStubRoutineSet;
class JSCell;
class ConservativeRoots {
public:
- ConservativeRoots(const MarkedBlockSet*, CopiedSpace*);
+ ConservativeRoots(MarkedBlockSet*, CopiedSpace*);
~ConservativeRoots();
void add(void* begin, void* end);
void add(void* begin, void* end, JITStubRoutineSet&);
- void add(void* begin, void* end, JITStubRoutineSet&, DFGCodeBlocks&);
+ void add(void* begin, void* end, JITStubRoutineSet&, CodeBlockSet&);
size_t size();
JSCell** roots();
@@ -64,7 +61,7 @@ private:
JSCell** m_roots;
size_t m_size;
size_t m_capacity;
- const MarkedBlockSet* m_blocks;
+ MarkedBlockSet* m_blocks;
CopiedSpace* m_copiedSpace;
JSCell* m_inlineRoots[inlineCapacity];
};
diff --git a/Source/JavaScriptCore/heap/CopiedAllocator.h b/Source/JavaScriptCore/heap/CopiedAllocator.h
index 32b84f008..143117fdc 100644
--- a/Source/JavaScriptCore/heap/CopiedAllocator.h
+++ b/Source/JavaScriptCore/heap/CopiedAllocator.h
@@ -28,7 +28,6 @@
#include "CopiedBlock.h"
#include <wtf/CheckedBoolean.h>
-#include <wtf/DataLog.h>
namespace JSC {
@@ -38,13 +37,14 @@ public:
bool fastPathShouldSucceed(size_t bytes) const;
CheckedBoolean tryAllocate(size_t bytes, void** outPtr);
+ CheckedBoolean tryAllocateDuringCopying(size_t bytes, void** outPtr);
CheckedBoolean tryReallocate(void *oldPtr, size_t oldBytes, size_t newBytes);
void* forceAllocate(size_t bytes);
CopiedBlock* resetCurrentBlock();
void setCurrentBlock(CopiedBlock*);
size_t currentCapacity();
- bool isValid() { return !!m_currentBlock; }
+ bool isValid() const { return !!m_currentBlock; }
CopiedBlock* currentBlock() { return m_currentBlock; }
@@ -93,6 +93,14 @@ inline CheckedBoolean CopiedAllocator::tryAllocate(size_t bytes, void** outPtr)
return true;
}
+inline CheckedBoolean CopiedAllocator::tryAllocateDuringCopying(size_t bytes, void** outPtr)
+{
+ if (!tryAllocate(bytes, outPtr))
+ return false;
+ m_currentBlock->reportLiveBytesDuringCopying(bytes);
+ return true;
+}
+
inline CheckedBoolean CopiedAllocator::tryReallocate(
void* oldPtr, size_t oldBytes, size_t newBytes)
{
diff --git a/Source/JavaScriptCore/heap/CopiedBlock.cpp b/Source/JavaScriptCore/heap/CopiedBlock.cpp
new file mode 100644
index 000000000..13798a198
--- /dev/null
+++ b/Source/JavaScriptCore/heap/CopiedBlock.cpp
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "CopiedBlock.h"
+
+#include "JSCInlines.h"
+
+namespace JSC {
+
+static const bool computeBalance = false;
+static size_t balance;
+
+CopiedBlock* CopiedBlock::createNoZeroFill(Heap& heap, size_t capacity)
+{
+ if (computeBalance) {
+ balance++;
+ if (!(balance % 10))
+ dataLog("CopiedBlock Balance: ", balance, "\n");
+ }
+ CopiedBlock* block = new (NotNull, fastAlignedMalloc(CopiedBlock::blockSize, capacity)) CopiedBlock(capacity);
+ heap.didAllocateBlock(capacity);
+ return block;
+}
+
+void CopiedBlock::destroy(Heap& heap, CopiedBlock* copiedBlock)
+{
+ if (computeBalance) {
+ balance--;
+ if (!(balance % 10))
+ dataLog("CopiedBlock Balance: ", balance, "\n");
+ }
+ size_t capacity = copiedBlock->capacity();
+ copiedBlock->~CopiedBlock();
+ fastAlignedFree(copiedBlock);
+ heap.didFreeBlock(capacity);
+}
+
+CopiedBlock* CopiedBlock::create(Heap& heap, size_t capacity)
+{
+ CopiedBlock* newBlock = createNoZeroFill(heap, capacity);
+ newBlock->zeroFillWilderness();
+ return newBlock;
+}
+
+void CopiedBlock::zeroFillWilderness()
+{
+#if USE(JSVALUE64)
+ memset(wilderness(), 0, wildernessSize());
+#else
+ JSValue emptyValue;
+ JSValue* limit = reinterpret_cast_ptr<JSValue*>(wildernessEnd());
+ for (JSValue* currentValue = reinterpret_cast_ptr<JSValue*>(wilderness()); currentValue < limit; currentValue++)
+ *currentValue = emptyValue;
+#endif
+}
+
+CopiedBlock::CopiedBlock(size_t capacity)
+ : DoublyLinkedListNode<CopiedBlock>()
+ , m_capacity(capacity)
+ , m_remaining(payloadCapacity())
+ , m_isPinned(false)
+ , m_isOld(false)
+ , m_liveBytes(0)
+#ifndef NDEBUG
+ , m_liveObjects(0)
+#endif
+{
+ ASSERT(is8ByteAligned(reinterpret_cast<void*>(m_remaining)));
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/CopiedBlock.h b/Source/JavaScriptCore/heap/CopiedBlock.h
index 86cca8cbe..dd3b44532 100644
--- a/Source/JavaScriptCore/heap/CopiedBlock.h
+++ b/Source/JavaScriptCore/heap/CopiedBlock.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,33 +26,35 @@
#ifndef CopiedBlock_h
#define CopiedBlock_h
-#include "BlockAllocator.h"
#include "CopyWorkList.h"
-#include "HeapBlock.h"
#include "JSCJSValue.h"
#include "Options.h"
-#include <wtf/Atomics.h>
-#include <wtf/OwnPtr.h>
-#include <wtf/PassOwnPtr.h>
+#include <wtf/DoublyLinkedList.h>
+#include <wtf/Lock.h>
namespace JSC {
class CopiedSpace;
-class CopiedBlock : public HeapBlock<CopiedBlock> {
+class CopiedBlock : public DoublyLinkedListNode<CopiedBlock> {
+ friend class WTF::DoublyLinkedListNode<CopiedBlock>;
friend class CopiedSpace;
friend class CopiedAllocator;
public:
- static CopiedBlock* create(DeadBlock*);
- static CopiedBlock* createNoZeroFill(DeadBlock*);
+ static CopiedBlock* create(Heap&, size_t = blockSize);
+ static CopiedBlock* createNoZeroFill(Heap&, size_t = blockSize);
+ static void destroy(Heap&, CopiedBlock*);
void pin();
bool isPinned();
+ bool isOld();
bool isOversize();
+ void didPromote();
unsigned liveBytes();
- void reportLiveBytes(JSCell*, unsigned);
+ void reportLiveBytes(LockHolder&, JSCell*, CopyToken, unsigned);
+ void reportLiveBytesDuringCopying(unsigned);
void didSurviveGC();
void didEvacuateBytes(unsigned);
bool shouldEvacuate();
@@ -81,79 +83,65 @@ public:
bool hasWorkList();
CopyWorkList& workList();
+ Lock& workListLock() { return m_workListLock; }
private:
- CopiedBlock(Region*);
+ CopiedBlock(size_t);
void zeroFillWilderness(); // Can be called at any time to zero-fill to the end of the block.
-#if ENABLE(PARALLEL_GC)
- SpinLock m_workListLock;
-#endif
- OwnPtr<CopyWorkList> m_workList;
+ void checkConsistency();
- size_t m_remaining;
- uintptr_t m_isPinned;
- unsigned m_liveBytes;
-};
-
-inline CopiedBlock* CopiedBlock::createNoZeroFill(DeadBlock* block)
-{
- Region* region = block->region();
- return new(NotNull, block) CopiedBlock(region);
-}
+ CopiedBlock* m_prev;
+ CopiedBlock* m_next;
-inline CopiedBlock* CopiedBlock::create(DeadBlock* block)
-{
- CopiedBlock* newBlock = createNoZeroFill(block);
- newBlock->zeroFillWilderness();
- return newBlock;
-}
+ size_t m_capacity;
-inline void CopiedBlock::zeroFillWilderness()
-{
-#if USE(JSVALUE64)
- memset(wilderness(), 0, wildernessSize());
-#else
- JSValue emptyValue;
- JSValue* limit = reinterpret_cast_ptr<JSValue*>(wildernessEnd());
- for (JSValue* currentValue = reinterpret_cast_ptr<JSValue*>(wilderness()); currentValue < limit; currentValue++)
- *currentValue = emptyValue;
-#endif
-}
+ Lock m_workListLock;
+ std::unique_ptr<CopyWorkList> m_workList;
-inline CopiedBlock::CopiedBlock(Region* region)
- : HeapBlock<CopiedBlock>(region)
- , m_remaining(payloadCapacity())
- , m_isPinned(false)
- , m_liveBytes(0)
-{
-#if ENABLE(PARALLEL_GC)
- m_workListLock.Init();
+ size_t m_remaining;
+ bool m_isPinned : 1;
+ bool m_isOld : 1;
+ unsigned m_liveBytes;
+#ifndef NDEBUG
+ unsigned m_liveObjects;
#endif
- ASSERT(is8ByteAligned(reinterpret_cast<void*>(m_remaining)));
-}
+};
inline void CopiedBlock::didSurviveGC()
{
+ checkConsistency();
+ ASSERT(isOld());
m_liveBytes = 0;
+#ifndef NDEBUG
+ m_liveObjects = 0;
+#endif
m_isPinned = false;
if (m_workList)
- m_workList.clear();
+ m_workList = nullptr;
}
inline void CopiedBlock::didEvacuateBytes(unsigned bytes)
{
ASSERT(m_liveBytes >= bytes);
+ ASSERT(m_liveObjects);
+ checkConsistency();
m_liveBytes -= bytes;
+#ifndef NDEBUG
+ m_liveObjects--;
+#endif
+ checkConsistency();
}
inline bool CopiedBlock::canBeRecycled()
{
+ checkConsistency();
return !m_liveBytes;
}
inline bool CopiedBlock::shouldEvacuate()
{
+ checkConsistency();
return static_cast<double>(m_liveBytes) / static_cast<double>(payloadCapacity()) <= Options::minCopiedBlockUtilization();
}
@@ -161,7 +149,7 @@ inline void CopiedBlock::pin()
{
m_isPinned = true;
if (m_workList)
- m_workList.clear();
+ m_workList = nullptr;
}
inline bool CopiedBlock::isPinned()
@@ -169,24 +157,35 @@ inline bool CopiedBlock::isPinned()
return m_isPinned;
}
+inline bool CopiedBlock::isOld()
+{
+ return m_isOld;
+}
+
+inline void CopiedBlock::didPromote()
+{
+ m_isOld = true;
+}
+
inline bool CopiedBlock::isOversize()
{
- return region()->isCustomSize();
+ return m_capacity != blockSize;
}
inline unsigned CopiedBlock::liveBytes()
{
+ checkConsistency();
return m_liveBytes;
}
inline char* CopiedBlock::payload()
{
- return reinterpret_cast<char*>(this) + ((sizeof(CopiedBlock) + 7) & ~7);
+ return reinterpret_cast<char*>(this) + WTF::roundUpToMultipleOf<sizeof(double)>(sizeof(CopiedBlock));
}
inline char* CopiedBlock::payloadEnd()
{
- return reinterpret_cast<char*>(this) + region()->blockSize();
+ return reinterpret_cast<char*>(this) + m_capacity;
}
inline size_t CopiedBlock::payloadCapacity()
@@ -231,7 +230,7 @@ inline size_t CopiedBlock::size()
inline size_t CopiedBlock::capacity()
{
- return region()->blockSize();
+ return m_capacity;
}
inline bool CopiedBlock::hasWorkList()
@@ -244,6 +243,11 @@ inline CopyWorkList& CopiedBlock::workList()
return *m_workList;
}
+inline void CopiedBlock::checkConsistency()
+{
+ ASSERT(!!m_liveBytes == !!m_liveObjects);
+}
+
} // namespace JSC
#endif
diff --git a/Source/JavaScriptCore/heap/CopiedBlockInlines.h b/Source/JavaScriptCore/heap/CopiedBlockInlines.h
index 0068abcdd..c6fb76217 100644
--- a/Source/JavaScriptCore/heap/CopiedBlockInlines.h
+++ b/Source/JavaScriptCore/heap/CopiedBlockInlines.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2013, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -30,13 +30,19 @@
#include "Heap.h"
namespace JSC {
-
-inline void CopiedBlock::reportLiveBytes(JSCell* owner, unsigned bytes)
+
+inline void CopiedBlock::reportLiveBytes(LockHolder&, JSCell* owner, CopyToken token, unsigned bytes)
{
-#if ENABLE(PARALLEL_GC)
- SpinLockHolder locker(&m_workListLock);
+ checkConsistency();
+#ifndef NDEBUG
+ m_liveObjects++;
#endif
m_liveBytes += bytes;
+ checkConsistency();
+ ASSERT(m_liveBytes <= m_capacity);
+
+ if (isPinned())
+ return;
if (!shouldEvacuate()) {
pin();
@@ -44,9 +50,22 @@ inline void CopiedBlock::reportLiveBytes(JSCell* owner, unsigned bytes)
}
if (!m_workList)
- m_workList = adoptPtr(new CopyWorkList(Heap::heap(owner)->blockAllocator()));
+ m_workList = std::make_unique<CopyWorkList>();
+
+ m_workList->append(CopyWorklistItem(owner, token));
+}
- m_workList->append(owner);
+inline void CopiedBlock::reportLiveBytesDuringCopying(unsigned bytes)
+{
+ checkConsistency();
+ // This doesn't need to be locked because the thread that calls this function owns the current block.
+ m_isOld = true;
+#ifndef NDEBUG
+ m_liveObjects++;
+#endif
+ m_liveBytes += bytes;
+ checkConsistency();
+ ASSERT(m_liveBytes <= CopiedBlock::blockSize);
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/CopiedSpace.cpp b/Source/JavaScriptCore/heap/CopiedSpace.cpp
index b23e87397..47656ed15 100644
--- a/Source/JavaScriptCore/heap/CopiedSpace.cpp
+++ b/Source/JavaScriptCore/heap/CopiedSpace.cpp
@@ -28,39 +28,55 @@
#include "CopiedSpaceInlines.h"
#include "GCActivityCallback.h"
-#include "Operations.h"
-#include "Options.h"
+#include "JSCInlines.h"
namespace JSC {
CopiedSpace::CopiedSpace(Heap* heap)
: m_heap(heap)
- , m_toSpace(0)
- , m_fromSpace(0)
, m_inCopyingPhase(false)
, m_shouldDoCopyPhase(false)
, m_numberOfLoanedBlocks(0)
+ , m_bytesRemovedFromOldSpaceDueToReallocation(0)
{
- m_toSpaceLock.Init();
}
CopiedSpace::~CopiedSpace()
{
- while (!m_toSpace->isEmpty())
- m_heap->blockAllocator().deallocate(CopiedBlock::destroy(m_toSpace->removeHead()));
+ while (!m_oldGen.toSpace->isEmpty())
+ CopiedBlock::destroy(*heap(), m_oldGen.toSpace->removeHead());
- while (!m_fromSpace->isEmpty())
- m_heap->blockAllocator().deallocate(CopiedBlock::destroy(m_fromSpace->removeHead()));
+ while (!m_oldGen.fromSpace->isEmpty())
+ CopiedBlock::destroy(*heap(), m_oldGen.fromSpace->removeHead());
- while (!m_oversizeBlocks.isEmpty())
- m_heap->blockAllocator().deallocateCustomSize(CopiedBlock::destroy(m_oversizeBlocks.removeHead()));
+ while (!m_oldGen.oversizeBlocks.isEmpty())
+ CopiedBlock::destroy(*heap(), m_oldGen.oversizeBlocks.removeHead());
+
+ while (!m_newGen.toSpace->isEmpty())
+ CopiedBlock::destroy(*heap(), m_newGen.toSpace->removeHead());
+
+ while (!m_newGen.fromSpace->isEmpty())
+ CopiedBlock::destroy(*heap(), m_newGen.fromSpace->removeHead());
+
+ while (!m_newGen.oversizeBlocks.isEmpty())
+ CopiedBlock::destroy(*heap(), m_newGen.oversizeBlocks.removeHead());
+
+ ASSERT(m_oldGen.toSpace->isEmpty());
+ ASSERT(m_oldGen.fromSpace->isEmpty());
+ ASSERT(m_oldGen.oversizeBlocks.isEmpty());
+ ASSERT(m_newGen.toSpace->isEmpty());
+ ASSERT(m_newGen.fromSpace->isEmpty());
+ ASSERT(m_newGen.oversizeBlocks.isEmpty());
}
void CopiedSpace::init()
{
- m_toSpace = &m_blocks1;
- m_fromSpace = &m_blocks2;
+ m_oldGen.toSpace = &m_oldGen.blocks1;
+ m_oldGen.fromSpace = &m_oldGen.blocks2;
+ m_newGen.toSpace = &m_newGen.blocks1;
+ m_newGen.fromSpace = &m_newGen.blocks2;
+
allocateBlock();
}
@@ -69,7 +85,7 @@ CheckedBoolean CopiedSpace::tryAllocateSlowCase(size_t bytes, void** outPtr)
if (isOversize(bytes))
return tryAllocateOversize(bytes, outPtr);
- ASSERT(m_heap->vm()->apiLock().currentThreadIsHoldingLock());
+ ASSERT(m_heap->vm()->currentThreadIsHoldingAPILock());
m_heap->didAllocate(m_allocator.currentCapacity());
allocateBlock();
@@ -82,17 +98,18 @@ CheckedBoolean CopiedSpace::tryAllocateOversize(size_t bytes, void** outPtr)
{
ASSERT(isOversize(bytes));
- CopiedBlock* block = CopiedBlock::create(m_heap->blockAllocator().allocateCustomSize(sizeof(CopiedBlock) + bytes, CopiedBlock::blockSize));
- m_oversizeBlocks.push(block);
- m_blockFilter.add(reinterpret_cast<Bits>(block));
+ CopiedBlock* block = CopiedBlock::create(*m_heap, WTF::roundUpToMultipleOf<sizeof(double)>(sizeof(CopiedBlock) + bytes));
+ m_newGen.oversizeBlocks.push(block);
+ m_newGen.blockFilter.add(reinterpret_cast<Bits>(block));
m_blockSet.add(block);
+ ASSERT(!block->isOld());
CopiedAllocator allocator;
allocator.setCurrentBlock(block);
*outPtr = allocator.forceAllocate(bytes);
allocator.resetCurrentBlock();
- m_heap->didAllocate(block->region()->blockSize());
+ m_heap->didAllocate(block->capacity());
return true;
}
@@ -138,9 +155,16 @@ CheckedBoolean CopiedSpace::tryReallocateOversize(void** ptr, size_t oldSize, si
CopiedBlock* oldBlock = CopiedSpace::blockFor(oldPtr);
if (oldBlock->isOversize()) {
- m_oversizeBlocks.remove(oldBlock);
+ // FIXME: Eagerly deallocating the old space block probably buys more confusion than
+ // value.
+ // https://bugs.webkit.org/show_bug.cgi?id=144750
+ if (oldBlock->isOld()) {
+ m_bytesRemovedFromOldSpaceDueToReallocation += oldBlock->size();
+ m_oldGen.oversizeBlocks.remove(oldBlock);
+ } else
+ m_newGen.oversizeBlocks.remove(oldBlock);
m_blockSet.remove(oldBlock);
- m_heap->blockAllocator().deallocateCustomSize(CopiedBlock::destroy(oldBlock));
+ CopiedBlock::destroy(*heap(), oldBlock);
}
*ptr = newPtr;
@@ -165,96 +189,82 @@ void CopiedSpace::doneFillingBlock(CopiedBlock* block, CopiedBlock** exchange)
block->zeroFillWilderness();
{
- SpinLockHolder locker(&m_toSpaceLock);
- m_toSpace->push(block);
+ // Always put the block into the old gen because it's being promoted!
+ LockHolder locker(&m_toSpaceLock);
+ m_oldGen.toSpace->push(block);
m_blockSet.add(block);
- m_blockFilter.add(reinterpret_cast<Bits>(block));
+ m_oldGen.blockFilter.add(reinterpret_cast<Bits>(block));
}
{
- MutexLocker locker(m_loanedBlocksLock);
+ LockHolder locker(m_loanedBlocksLock);
ASSERT(m_numberOfLoanedBlocks > 0);
ASSERT(m_inCopyingPhase);
m_numberOfLoanedBlocks--;
- if (!m_numberOfLoanedBlocks)
- m_loanedBlocksCondition.signal();
}
}
-void CopiedSpace::startedCopying()
+void CopiedSpace::didStartFullCollection()
{
- std::swap(m_fromSpace, m_toSpace);
-
- m_blockFilter.reset();
- m_allocator.resetCurrentBlock();
-
- CopiedBlock* next = 0;
- size_t totalLiveBytes = 0;
- size_t totalUsableBytes = 0;
- for (CopiedBlock* block = m_fromSpace->head(); block; block = next) {
- next = block->next();
- if (!block->isPinned() && block->canBeRecycled()) {
- recycleEvacuatedBlock(block);
- continue;
- }
- totalLiveBytes += block->liveBytes();
- totalUsableBytes += block->payloadCapacity();
- }
+ ASSERT(heap()->operationInProgress() == FullCollection);
+ ASSERT(m_oldGen.fromSpace->isEmpty());
+ ASSERT(m_newGen.fromSpace->isEmpty());
- CopiedBlock* block = m_oversizeBlocks.head();
- while (block) {
- CopiedBlock* next = block->next();
- if (block->isPinned()) {
- m_blockFilter.add(reinterpret_cast<Bits>(block));
- totalLiveBytes += block->payloadCapacity();
- totalUsableBytes += block->payloadCapacity();
- block->didSurviveGC();
- } else {
- m_oversizeBlocks.remove(block);
- m_blockSet.remove(block);
- m_heap->blockAllocator().deallocateCustomSize(CopiedBlock::destroy(block));
- }
- block = next;
- }
+#ifndef NDEBUG
+ for (CopiedBlock* block = m_newGen.toSpace->head(); block; block = block->next())
+ ASSERT(!block->liveBytes());
- double markedSpaceBytes = m_heap->objectSpace().capacity();
- double totalFragmentation = ((double)totalLiveBytes + markedSpaceBytes) / ((double)totalUsableBytes + markedSpaceBytes);
- m_shouldDoCopyPhase = totalFragmentation <= Options::minHeapUtilization();
- if (!m_shouldDoCopyPhase)
- return;
+ for (CopiedBlock* block = m_newGen.oversizeBlocks.head(); block; block = block->next())
+ ASSERT(!block->liveBytes());
+#endif
- ASSERT(m_shouldDoCopyPhase);
- ASSERT(!m_inCopyingPhase);
- ASSERT(!m_numberOfLoanedBlocks);
- m_inCopyingPhase = true;
+ for (CopiedBlock* block = m_oldGen.toSpace->head(); block; block = block->next())
+ block->didSurviveGC();
+
+ for (CopiedBlock* block = m_oldGen.oversizeBlocks.head(); block; block = block->next())
+ block->didSurviveGC();
}
void CopiedSpace::doneCopying()
{
- {
- MutexLocker locker(m_loanedBlocksLock);
- while (m_numberOfLoanedBlocks > 0)
- m_loanedBlocksCondition.wait(m_loanedBlocksLock);
- }
-
- ASSERT(m_inCopyingPhase == m_shouldDoCopyPhase);
+ RELEASE_ASSERT(!m_numberOfLoanedBlocks);
+ RELEASE_ASSERT(m_inCopyingPhase == m_shouldDoCopyPhase);
m_inCopyingPhase = false;
- while (!m_fromSpace->isEmpty()) {
- CopiedBlock* block = m_fromSpace->removeHead();
- // All non-pinned blocks in from-space should have been reclaimed as they were evacuated.
- ASSERT(block->isPinned() || !m_shouldDoCopyPhase);
- block->didSurviveGC();
+ DoublyLinkedList<CopiedBlock>* toSpace;
+ DoublyLinkedList<CopiedBlock>* fromSpace;
+ TinyBloomFilter* blockFilter;
+ if (heap()->operationInProgress() == FullCollection) {
+ toSpace = m_oldGen.toSpace;
+ fromSpace = m_oldGen.fromSpace;
+ blockFilter = &m_oldGen.blockFilter;
+ } else {
+ toSpace = m_newGen.toSpace;
+ fromSpace = m_newGen.fromSpace;
+ blockFilter = &m_newGen.blockFilter;
+ }
+
+ while (!fromSpace->isEmpty()) {
+ CopiedBlock* block = fromSpace->removeHead();
// We don't add the block to the blockSet because it was never removed.
ASSERT(m_blockSet.contains(block));
- m_blockFilter.add(reinterpret_cast<Bits>(block));
- m_toSpace->push(block);
+ blockFilter->add(reinterpret_cast<Bits>(block));
+ block->didSurviveGC();
+ toSpace->push(block);
+ }
+
+ if (heap()->operationInProgress() == EdenCollection) {
+ m_oldGen.toSpace->append(*m_newGen.toSpace);
+ m_oldGen.oversizeBlocks.append(m_newGen.oversizeBlocks);
+ m_oldGen.blockFilter.add(m_newGen.blockFilter);
+ m_newGen.blockFilter.reset();
}
- if (!m_toSpace->head())
- allocateBlock();
- else
- m_allocator.setCurrentBlock(m_toSpace->head());
+ ASSERT(m_newGen.toSpace->isEmpty());
+ ASSERT(m_newGen.fromSpace->isEmpty());
+ ASSERT(m_newGen.oversizeBlocks.isEmpty());
+
+ allocateBlock();
m_shouldDoCopyPhase = false;
}
@@ -263,13 +273,22 @@ size_t CopiedSpace::size()
{
size_t calculatedSize = 0;
- for (CopiedBlock* block = m_toSpace->head(); block; block = block->next())
+ for (CopiedBlock* block = m_oldGen.toSpace->head(); block; block = block->next())
+ calculatedSize += block->size();
+
+ for (CopiedBlock* block = m_oldGen.fromSpace->head(); block; block = block->next())
+ calculatedSize += block->size();
+
+ for (CopiedBlock* block = m_oldGen.oversizeBlocks.head(); block; block = block->next())
calculatedSize += block->size();
- for (CopiedBlock* block = m_fromSpace->head(); block; block = block->next())
+ for (CopiedBlock* block = m_newGen.toSpace->head(); block; block = block->next())
calculatedSize += block->size();
- for (CopiedBlock* block = m_oversizeBlocks.head(); block; block = block->next())
+ for (CopiedBlock* block = m_newGen.fromSpace->head(); block; block = block->next())
+ calculatedSize += block->size();
+
+ for (CopiedBlock* block = m_newGen.oversizeBlocks.head(); block; block = block->next())
calculatedSize += block->size();
return calculatedSize;
@@ -279,13 +298,22 @@ size_t CopiedSpace::capacity()
{
size_t calculatedCapacity = 0;
- for (CopiedBlock* block = m_toSpace->head(); block; block = block->next())
+ for (CopiedBlock* block = m_oldGen.toSpace->head(); block; block = block->next())
+ calculatedCapacity += block->capacity();
+
+ for (CopiedBlock* block = m_oldGen.fromSpace->head(); block; block = block->next())
+ calculatedCapacity += block->capacity();
+
+ for (CopiedBlock* block = m_oldGen.oversizeBlocks.head(); block; block = block->next())
+ calculatedCapacity += block->capacity();
+
+ for (CopiedBlock* block = m_newGen.toSpace->head(); block; block = block->next())
calculatedCapacity += block->capacity();
- for (CopiedBlock* block = m_fromSpace->head(); block; block = block->next())
+ for (CopiedBlock* block = m_newGen.fromSpace->head(); block; block = block->next())
calculatedCapacity += block->capacity();
- for (CopiedBlock* block = m_oversizeBlocks.head(); block; block = block->next())
+ for (CopiedBlock* block = m_newGen.oversizeBlocks.head(); block; block = block->next())
calculatedCapacity += block->capacity();
return calculatedCapacity;
@@ -311,9 +339,12 @@ static bool isBlockListPagedOut(double deadline, DoublyLinkedList<CopiedBlock>*
bool CopiedSpace::isPagedOut(double deadline)
{
- return isBlockListPagedOut(deadline, m_toSpace)
- || isBlockListPagedOut(deadline, m_fromSpace)
- || isBlockListPagedOut(deadline, &m_oversizeBlocks);
+ return isBlockListPagedOut(deadline, m_oldGen.toSpace)
+ || isBlockListPagedOut(deadline, m_oldGen.fromSpace)
+ || isBlockListPagedOut(deadline, &m_oldGen.oversizeBlocks)
+ || isBlockListPagedOut(deadline, m_newGen.toSpace)
+ || isBlockListPagedOut(deadline, m_newGen.fromSpace)
+ || isBlockListPagedOut(deadline, &m_newGen.oversizeBlocks);
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/CopiedSpace.h b/Source/JavaScriptCore/heap/CopiedSpace.h
index 65ca04ef6..dfb720bcd 100644
--- a/Source/JavaScriptCore/heap/CopiedSpace.h
+++ b/Source/JavaScriptCore/heap/CopiedSpace.h
@@ -27,18 +27,13 @@
#define CopiedSpace_h
#include "CopiedAllocator.h"
-#include "HeapBlock.h"
+#include "HeapOperation.h"
#include "TinyBloomFilter.h"
#include <wtf/Assertions.h>
#include <wtf/CheckedBoolean.h>
#include <wtf/DoublyLinkedList.h>
#include <wtf/HashSet.h>
-#include <wtf/OSAllocator.h>
-#include <wtf/PageAllocationAligned.h>
-#include <wtf/PageBlock.h>
-#include <wtf/StdLibExtras.h>
-#include <wtf/TCSpinLock.h>
-#include <wtf/ThreadingPrimitives.h>
+#include <wtf/Lock.h>
namespace JSC {
@@ -47,7 +42,7 @@ class CopiedBlock;
class CopiedSpace {
friend class CopyVisitor;
- friend class GCThreadSharedData;
+ friend class Heap;
friend class SlotVisitor;
friend class JIT;
public:
@@ -60,9 +55,12 @@ public:
CopiedAllocator& allocator() { return m_allocator; }
+ void didStartFullCollection();
+
+ template <HeapOperation collectionType>
void startedCopying();
void doneCopying();
- bool isInCopyPhase() { return m_inCopyingPhase; }
+ bool isInCopyPhase() const { return m_inCopyingPhase; }
void pin(CopiedBlock*);
bool isPinned(void*);
@@ -76,10 +74,19 @@ public:
size_t capacity();
bool isPagedOut(double deadline);
- bool shouldDoCopyPhase() { return m_shouldDoCopyPhase; }
+ bool shouldDoCopyPhase() const { return m_shouldDoCopyPhase; }
static CopiedBlock* blockFor(void*);
+ Heap* heap() const { return m_heap; }
+
+ size_t takeBytesRemovedFromOldSpaceDueToReallocation()
+ {
+ size_t result = 0;
+ std::swap(m_bytesRemovedFromOldSpaceDueToReallocation, result);
+ return result;
+ }
+
private:
static bool isOversize(size_t);
@@ -91,34 +98,46 @@ private:
CopiedBlock* allocateBlockForCopyingPhase();
void doneFillingBlock(CopiedBlock*, CopiedBlock**);
- void recycleEvacuatedBlock(CopiedBlock*);
+ void recycleEvacuatedBlock(CopiedBlock*, HeapOperation collectionType);
void recycleBorrowedBlock(CopiedBlock*);
Heap* m_heap;
CopiedAllocator m_allocator;
- TinyBloomFilter m_blockFilter;
HashSet<CopiedBlock*> m_blockSet;
- SpinLock m_toSpaceLock;
+ Lock m_toSpaceLock;
+
+ struct CopiedGeneration {
+ CopiedGeneration()
+ : toSpace(0)
+ , fromSpace(0)
+ {
+ }
+
+ DoublyLinkedList<CopiedBlock>* toSpace;
+ DoublyLinkedList<CopiedBlock>* fromSpace;
+
+ DoublyLinkedList<CopiedBlock> blocks1;
+ DoublyLinkedList<CopiedBlock> blocks2;
+ DoublyLinkedList<CopiedBlock> oversizeBlocks;
+
+ TinyBloomFilter blockFilter;
+ };
+
+ CopiedGeneration m_oldGen;
+ CopiedGeneration m_newGen;
- DoublyLinkedList<CopiedBlock>* m_toSpace;
- DoublyLinkedList<CopiedBlock>* m_fromSpace;
-
- DoublyLinkedList<CopiedBlock> m_blocks1;
- DoublyLinkedList<CopiedBlock> m_blocks2;
- DoublyLinkedList<CopiedBlock> m_oversizeBlocks;
-
bool m_inCopyingPhase;
bool m_shouldDoCopyPhase;
- Mutex m_loanedBlocksLock;
- ThreadCondition m_loanedBlocksCondition;
+ Lock m_loanedBlocksLock;
size_t m_numberOfLoanedBlocks;
+
+ size_t m_bytesRemovedFromOldSpaceDueToReallocation;
static const size_t s_maxAllocationSize = CopiedBlock::blockSize / 2;
- static const size_t s_initialBlockNum = 16;
static const size_t s_blockMask = ~(CopiedBlock::blockSize - 1);
};
diff --git a/Source/JavaScriptCore/heap/CopiedSpaceInlines.h b/Source/JavaScriptCore/heap/CopiedSpaceInlines.h
index 47f2414f3..6509b0739 100644
--- a/Source/JavaScriptCore/heap/CopiedSpaceInlines.h
+++ b/Source/JavaScriptCore/heap/CopiedSpaceInlines.h
@@ -29,15 +29,14 @@
#include "CopiedBlock.h"
#include "CopiedSpace.h"
#include "Heap.h"
-#include "HeapBlock.h"
#include "VM.h"
-#include <wtf/CheckedBoolean.h>
namespace JSC {
inline bool CopiedSpace::contains(CopiedBlock* block)
{
- return !m_blockFilter.ruleOut(reinterpret_cast<Bits>(block)) && m_blockSet.contains(block);
+ return (!m_newGen.blockFilter.ruleOut(reinterpret_cast<Bits>(block)) || !m_oldGen.blockFilter.ruleOut(reinterpret_cast<Bits>(block)))
+ && m_blockSet.contains(block);
}
inline bool CopiedSpace::contains(void* ptr, CopiedBlock*& result)
@@ -92,40 +91,41 @@ inline void CopiedSpace::pinIfNecessary(void* opaquePointer)
pin(block);
}
-inline void CopiedSpace::recycleEvacuatedBlock(CopiedBlock* block)
+inline void CopiedSpace::recycleEvacuatedBlock(CopiedBlock* block, HeapOperation collectionType)
{
ASSERT(block);
ASSERT(block->canBeRecycled());
ASSERT(!block->m_isPinned);
{
- SpinLockHolder locker(&m_toSpaceLock);
+ LockHolder locker(&m_toSpaceLock);
m_blockSet.remove(block);
- m_fromSpace->remove(block);
+ if (collectionType == EdenCollection)
+ m_newGen.fromSpace->remove(block);
+ else
+ m_oldGen.fromSpace->remove(block);
}
- m_heap->blockAllocator().deallocate(CopiedBlock::destroy(block));
+ CopiedBlock::destroy(*heap(), block);
}
inline void CopiedSpace::recycleBorrowedBlock(CopiedBlock* block)
{
- m_heap->blockAllocator().deallocate(CopiedBlock::destroy(block));
+ CopiedBlock::destroy(*heap(), block);
{
- MutexLocker locker(m_loanedBlocksLock);
+ LockHolder locker(m_loanedBlocksLock);
ASSERT(m_numberOfLoanedBlocks > 0);
ASSERT(m_inCopyingPhase);
m_numberOfLoanedBlocks--;
- if (!m_numberOfLoanedBlocks)
- m_loanedBlocksCondition.signal();
}
}
inline CopiedBlock* CopiedSpace::allocateBlockForCopyingPhase()
{
ASSERT(m_inCopyingPhase);
- CopiedBlock* block = CopiedBlock::createNoZeroFill(m_heap->blockAllocator().allocate<CopiedBlock>());
+ CopiedBlock* block = CopiedBlock::createNoZeroFill(*m_heap);
{
- MutexLocker locker(m_loanedBlocksLock);
+ LockHolder locker(m_loanedBlocksLock);
m_numberOfLoanedBlocks++;
}
@@ -135,15 +135,14 @@ inline CopiedBlock* CopiedSpace::allocateBlockForCopyingPhase()
inline void CopiedSpace::allocateBlock()
{
- if (m_heap->shouldCollect())
- m_heap->collect(Heap::DoNotSweep);
+ m_heap->collectIfNecessaryOrDefer();
m_allocator.resetCurrentBlock();
- CopiedBlock* block = CopiedBlock::create(m_heap->blockAllocator().allocate<CopiedBlock>());
+ CopiedBlock* block = CopiedBlock::create(*m_heap);
- m_toSpace->push(block);
- m_blockFilter.add(reinterpret_cast<Bits>(block));
+ m_newGen.toSpace->push(block);
+ m_newGen.blockFilter.add(reinterpret_cast<Bits>(block));
m_blockSet.add(block);
m_allocator.setCurrentBlock(block);
}
@@ -151,6 +150,7 @@ inline void CopiedSpace::allocateBlock()
inline CheckedBoolean CopiedSpace::tryAllocate(size_t bytes, void** outPtr)
{
ASSERT(!m_heap->vm()->isInitializingObject());
+ ASSERT(bytes);
if (!m_allocator.tryAllocate(bytes, outPtr))
return tryAllocateSlowCase(bytes, outPtr);
@@ -174,7 +174,85 @@ inline CopiedBlock* CopiedSpace::blockFor(void* ptr)
return reinterpret_cast<CopiedBlock*>(reinterpret_cast<size_t>(ptr) & s_blockMask);
}
+template <HeapOperation collectionType>
+inline void CopiedSpace::startedCopying()
+{
+ DoublyLinkedList<CopiedBlock>* fromSpace;
+ DoublyLinkedList<CopiedBlock>* oversizeBlocks;
+ TinyBloomFilter* blockFilter;
+ if (collectionType == FullCollection) {
+ ASSERT(m_oldGen.fromSpace->isEmpty());
+ ASSERT(m_newGen.fromSpace->isEmpty());
+
+ m_oldGen.toSpace->append(*m_newGen.toSpace);
+ m_oldGen.oversizeBlocks.append(m_newGen.oversizeBlocks);
+
+ ASSERT(m_newGen.toSpace->isEmpty());
+ ASSERT(m_newGen.fromSpace->isEmpty());
+ ASSERT(m_newGen.oversizeBlocks.isEmpty());
+
+ std::swap(m_oldGen.fromSpace, m_oldGen.toSpace);
+ fromSpace = m_oldGen.fromSpace;
+ oversizeBlocks = &m_oldGen.oversizeBlocks;
+ blockFilter = &m_oldGen.blockFilter;
+ } else {
+ std::swap(m_newGen.fromSpace, m_newGen.toSpace);
+ fromSpace = m_newGen.fromSpace;
+ oversizeBlocks = &m_newGen.oversizeBlocks;
+ blockFilter = &m_newGen.blockFilter;
+ }
+
+ blockFilter->reset();
+ m_allocator.resetCurrentBlock();
+
+ CopiedBlock* next = 0;
+ size_t totalLiveBytes = 0;
+ size_t totalUsableBytes = 0;
+ for (CopiedBlock* block = fromSpace->head(); block; block = next) {
+ next = block->next();
+ if (!block->isPinned() && block->canBeRecycled()) {
+ recycleEvacuatedBlock(block, collectionType);
+ continue;
+ }
+ ASSERT(block->liveBytes() <= CopiedBlock::blockSize);
+ totalLiveBytes += block->liveBytes();
+ totalUsableBytes += block->payloadCapacity();
+ block->didPromote();
+ }
+
+ CopiedBlock* block = oversizeBlocks->head();
+ while (block) {
+ CopiedBlock* next = block->next();
+ if (block->isPinned()) {
+ blockFilter->add(reinterpret_cast<Bits>(block));
+ totalLiveBytes += block->payloadCapacity();
+ totalUsableBytes += block->payloadCapacity();
+ block->didPromote();
+ } else {
+ oversizeBlocks->remove(block);
+ m_blockSet.remove(block);
+ CopiedBlock::destroy(*heap(), block);
+ }
+ block = next;
+ }
+
+ double markedSpaceBytes = m_heap->objectSpace().capacity();
+ double totalUtilization = static_cast<double>(totalLiveBytes + markedSpaceBytes) / static_cast<double>(totalUsableBytes + markedSpaceBytes);
+ m_shouldDoCopyPhase = m_heap->operationInProgress() == EdenCollection || totalUtilization <= Options::minHeapUtilization();
+ if (!m_shouldDoCopyPhase) {
+ if (Options::logGC())
+ dataLog("Skipped copying, ");
+ return;
+ }
+
+ if (Options::logGC())
+ dataLogF("Did copy, ");
+ ASSERT(m_shouldDoCopyPhase);
+ ASSERT(!m_numberOfLoanedBlocks);
+ ASSERT(!m_inCopyingPhase);
+ m_inCopyingPhase = true;
+}
+
} // namespace JSC
#endif // CopiedSpaceInlines_h
-
diff --git a/Source/JavaScriptCore/heap/CopyBarrier.h b/Source/JavaScriptCore/heap/CopyBarrier.h
new file mode 100644
index 000000000..123eeb8af
--- /dev/null
+++ b/Source/JavaScriptCore/heap/CopyBarrier.h
@@ -0,0 +1,198 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef CopyBarrier_h
+#define CopyBarrier_h
+
+#include "Heap.h"
+
+namespace JSC {
+
+enum class CopyState {
+ // The backing store is not planned to get copied in this epoch. If you keep a pointer to the backing
+ // store on the stack, it will not get copied. If you don't keep it on the stack, it may get copied
+ // starting at the next handshake (that is, it may transition from ToSpace to CopyPlanned, but
+ // CopyPlanned means ToSpace prior to the handshake that starts the copy phase).
+ ToSpace,
+
+ // The marking phase has selected this backing store to be copied. If we are not yet in the copying
+ // phase, this backing store is still in to-space. All that is needed in such a case is to mask off the
+ // low bits. If we are in the copying phase, this means that the object points to from-space. The
+ // barrier should first copy the object - or wait for copying to finish - before using the object.
+ CopyPlanned,
+
+ // The object is being copied right now. Anyone wanting to use the object must wait for the object to
+ // finish being copied. Notifications about copying use the ParkingLot combined with these bits. If the
+ // state is CopyingAndWaiting, then when the copying finishes, whatever thread was doing it will
+ // unparkAll() on the address of the CopyBarrierBase. So, to wait for copying to finish, CAS this to
+ // CopyingAndWaiting and then parkConditionally on the barrier address.
+ Copying,
+
+ // The object is being copied right now, and there are one or more threads parked. Those threads want
+ // to be unparked when copying is done. So, whichever thread does the copying needs to call unparkAll()
+ // on the barrier address after copying is done.
+ CopyingAndWaiting
+};
+
+class CopyBarrierBase {
+public:
+ static const unsigned spaceBits = 3;
+
+ CopyBarrierBase()
+ : m_value(nullptr)
+ {
+ }
+
+ bool operator!() const { return !m_value; }
+
+ explicit operator bool() const { return m_value; }
+
+ void* getWithoutBarrier() const
+ {
+ return m_value;
+ }
+
+ // Use this version of get() if you only want to execute the barrier slow path if some condition
+ // holds, and you only want to evaluate that condition after first checking the barrier's
+ // condition. Usually, you just want to use get().
+ template<typename Functor>
+ void* getPredicated(const JSCell* owner, const Functor& functor) const
+ {
+ void* result = m_value;
+ if (UNLIKELY(bitwise_cast<uintptr_t>(result) & spaceBits)) {
+ if (functor())
+ return Heap::copyBarrier(owner, m_value);
+ }
+ return result;
+ }
+
+ // When we are in the concurrent copying phase, this method may lock the barrier object (i.e. the field
+ // pointing to copied space) and call directly into the owning object's copyBackingStore() method.
+ void* get(const JSCell* owner) const
+ {
+ return getPredicated(owner, [] () -> bool { return true; });
+ }
+
+ CopyState copyState() const
+ {
+ return static_cast<CopyState>(bitwise_cast<uintptr_t>(m_value) & spaceBits);
+ }
+
+ // This only works when you know that there is nobody else concurrently messing with this CopyBarrier.
+ // That's hard to guarantee, though there are a few unusual places where this ends up being safe.
+ // Usually you want to use CopyBarrier::weakCAS().
+ void setCopyState(CopyState copyState)
+ {
+ WTF::storeStoreFence();
+ uintptr_t value = bitwise_cast<uintptr_t>(m_value);
+ value &= ~static_cast<uintptr_t>(spaceBits);
+ value |= static_cast<uintptr_t>(copyState);
+ m_value = bitwise_cast<void*>(value);
+ }
+
+ void clear() { m_value = nullptr; }
+
+protected:
+ CopyBarrierBase(VM& vm, const JSCell* owner, void* value)
+ {
+ this->set(vm, owner, value);
+ }
+
+ void set(VM& vm, const JSCell* owner, void* value)
+ {
+ this->m_value = value;
+ vm.heap.writeBarrier(owner);
+ }
+
+ void setWithoutBarrier(void* value)
+ {
+ this->m_value = value;
+ }
+
+ bool weakCASWithoutBarrier(
+ void* oldPointer, CopyState oldCopyState, void* newPointer, CopyState newCopyState)
+ {
+ uintptr_t oldValue = bitwise_cast<uintptr_t>(oldPointer) | static_cast<uintptr_t>(oldCopyState);
+ uintptr_t newValue = bitwise_cast<uintptr_t>(newPointer) | static_cast<uintptr_t>(newCopyState);
+ return WTF::weakCompareAndSwap(
+ &m_value, bitwise_cast<void*>(oldValue), bitwise_cast<void*>(newValue));
+ }
+
+private:
+ mutable void* m_value;
+};
+
+template <typename T>
+class CopyBarrier : public CopyBarrierBase {
+public:
+ CopyBarrier()
+ {
+ }
+
+ CopyBarrier(VM& vm, const JSCell* owner, T& value)
+ : CopyBarrierBase(vm, owner, &value)
+ {
+ }
+
+ CopyBarrier(VM& vm, const JSCell* owner, T* value)
+ : CopyBarrierBase(vm, owner, value)
+ {
+ }
+
+ T* getWithoutBarrier() const
+ {
+ return bitwise_cast<T*>(CopyBarrierBase::getWithoutBarrier());
+ }
+
+ T* get(const JSCell* owner) const
+ {
+ return bitwise_cast<T*>(CopyBarrierBase::get(owner));
+ }
+
+ template<typename Functor>
+ T* getPredicated(const JSCell* owner, const Functor& functor) const
+ {
+ return bitwise_cast<T*>(CopyBarrierBase::getPredicated(owner, functor));
+ }
+
+ void set(VM& vm, const JSCell* owner, T* value)
+ {
+ CopyBarrierBase::set(vm, owner, value);
+ }
+
+ void setWithoutBarrier(T* value)
+ {
+ CopyBarrierBase::setWithoutBarrier(value);
+ }
+
+ bool weakCASWithoutBarrier(T* oldPointer, CopyState oldCopyState, T* newPointer, CopyState newCopyState)
+ {
+ return CopyBarrierBase::weakCASWithoutBarrier(oldPointer, oldCopyState, newPointer, newCopyState);
+ }
+};
+
+} // namespace JSC
+
+#endif // CopyBarrier_h
diff --git a/Source/JavaScriptCore/heap/CopyToken.h b/Source/JavaScriptCore/heap/CopyToken.h
new file mode 100644
index 000000000..e8f810905
--- /dev/null
+++ b/Source/JavaScriptCore/heap/CopyToken.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef CopyToken_h
+#define CopyToken_h
+
+namespace JSC {
+
+enum CopyToken {
+ ButterflyCopyToken,
+ TypedArrayVectorCopyToken,
+ MapBackingStoreCopyToken,
+ DirectArgumentsOverridesCopyToken
+};
+
+} // namespace JSC
+
+#endif // CopyToken_h
diff --git a/Source/JavaScriptCore/heap/CopyVisitor.cpp b/Source/JavaScriptCore/heap/CopyVisitor.cpp
index 2e2172f76..2e7dd14dc 100644
--- a/Source/JavaScriptCore/heap/CopyVisitor.cpp
+++ b/Source/JavaScriptCore/heap/CopyVisitor.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,41 +26,26 @@
#include "config.h"
#include "CopyVisitor.h"
+#include "CopiedSpaceInlines.h"
#include "CopyVisitorInlines.h"
-#include "CopyWorkList.h"
-#include "GCThreadSharedData.h"
-#include "JSCell.h"
-#include "JSCellInlines.h"
-#include "JSObject.h"
-#include <wtf/Threading.h>
namespace JSC {
-CopyVisitor::CopyVisitor(GCThreadSharedData& shared)
- : m_shared(shared)
+CopyVisitor::CopyVisitor(Heap& heap)
+ : m_heap(heap)
{
+ ASSERT(!m_copiedAllocator.isValid());
+ CopiedBlock* block = nullptr;
+ m_heap.m_storageSpace.doneFillingBlock(nullptr, &block);
+ m_copiedAllocator.setCurrentBlock(block);
}
-void CopyVisitor::copyFromShared()
+CopyVisitor::~CopyVisitor()
{
- size_t next, end;
- m_shared.getNextBlocksToCopy(next, end);
- while (next < end) {
- for (; next < end; ++next) {
- CopiedBlock* block = m_shared.m_blocksToCopy[next];
- if (!block->hasWorkList())
- continue;
-
- CopyWorkList& workList = block->workList();
- for (CopyWorkList::iterator it = workList.begin(); it != workList.end(); ++it)
- visitCell(*it);
-
- ASSERT(!block->liveBytes());
- m_shared.m_copiedSpace->recycleEvacuatedBlock(block);
- }
- m_shared.getNextBlocksToCopy(next, end);
- }
- ASSERT(next == end);
+ if (m_copiedAllocator.isValid())
+ m_heap.m_storageSpace.doneFillingBlock(m_copiedAllocator.resetCurrentBlock(), nullptr);
+
+ WTF::releaseFastMallocFreeMemoryForThisThread();
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/CopyVisitor.h b/Source/JavaScriptCore/heap/CopyVisitor.h
index da92ba5b5..d0c255bb8 100644
--- a/Source/JavaScriptCore/heap/CopyVisitor.h
+++ b/Source/JavaScriptCore/heap/CopyVisitor.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -27,21 +27,20 @@
#define CopyVisitor_h
#include "CopiedSpace.h"
+#include <wtf/Noncopyable.h>
namespace JSC {
-class GCThreadSharedData;
-class JSCell;
+class Heap;
class CopyVisitor {
+ WTF_MAKE_NONCOPYABLE(CopyVisitor);
public:
- CopyVisitor(GCThreadSharedData&);
+ CopyVisitor(Heap&);
+ ~CopyVisitor();
void copyFromShared();
- void startCopying();
- void doneCopying();
-
// Low-level API for copying, appropriate for cases where the object's heap references
// are discontiguous or if the object occurs frequently enough that you need to focus on
// performance. Use this with care as it is easy to shoot yourself in the foot.
@@ -51,9 +50,9 @@ public:
private:
void* allocateNewSpaceSlow(size_t);
- void visitCell(JSCell*);
+ void visitItem(CopyWorklistItem);
- GCThreadSharedData& m_shared;
+ Heap& m_heap;
CopiedAllocator m_copiedAllocator;
};
diff --git a/Source/JavaScriptCore/heap/CopyVisitorInlines.h b/Source/JavaScriptCore/heap/CopyVisitorInlines.h
index 4e087b8db..70cc67298 100644
--- a/Source/JavaScriptCore/heap/CopyVisitorInlines.h
+++ b/Source/JavaScriptCore/heap/CopyVisitorInlines.h
@@ -26,22 +26,15 @@
#ifndef CopyVisitorInlines_h
#define CopyVisitorInlines_h
-#include "ClassInfo.h"
#include "CopyVisitor.h"
-#include "GCThreadSharedData.h"
-#include "JSCell.h"
-#include "JSDestructibleObject.h"
+#include "Heap.h"
namespace JSC {
-inline void CopyVisitor::visitCell(JSCell* cell)
-{
- ASSERT(cell->structure()->classInfo()->methodTable.copyBackingStore == JSObject::copyBackingStore);
- JSObject::copyBackingStore(cell, *this);
-}
-
inline bool CopyVisitor::checkIfShouldCopy(void* oldPtr)
{
+ if (!oldPtr)
+ return false;
CopiedBlock* block = CopiedSpace::blockFor(oldPtr);
if (block->isOversize() || block->isPinned())
return false;
@@ -51,7 +44,7 @@ inline bool CopyVisitor::checkIfShouldCopy(void* oldPtr)
inline void* CopyVisitor::allocateNewSpace(size_t bytes)
{
void* result = 0; // Compilers don't realize that this will be assigned.
- if (LIKELY(m_copiedAllocator.tryAllocate(bytes, &result)))
+ if (LIKELY(m_copiedAllocator.tryAllocateDuringCopying(bytes, &result)))
return result;
result = allocateNewSpaceSlow(bytes);
@@ -62,31 +55,15 @@ inline void* CopyVisitor::allocateNewSpace(size_t bytes)
inline void* CopyVisitor::allocateNewSpaceSlow(size_t bytes)
{
CopiedBlock* newBlock = 0;
- m_shared.m_copiedSpace->doneFillingBlock(m_copiedAllocator.resetCurrentBlock(), &newBlock);
+ m_heap.m_storageSpace.doneFillingBlock(m_copiedAllocator.resetCurrentBlock(), &newBlock);
m_copiedAllocator.setCurrentBlock(newBlock);
void* result = 0;
- CheckedBoolean didSucceed = m_copiedAllocator.tryAllocate(bytes, &result);
+ CheckedBoolean didSucceed = m_copiedAllocator.tryAllocateDuringCopying(bytes, &result);
ASSERT(didSucceed);
return result;
}
-inline void CopyVisitor::startCopying()
-{
- ASSERT(!m_copiedAllocator.isValid());
- CopiedBlock* block = 0;
- m_shared.m_copiedSpace->doneFillingBlock(m_copiedAllocator.resetCurrentBlock(), &block);
- m_copiedAllocator.setCurrentBlock(block);
-}
-
-inline void CopyVisitor::doneCopying()
-{
- if (!m_copiedAllocator.isValid())
- return;
-
- m_shared.m_copiedSpace->doneFillingBlock(m_copiedAllocator.resetCurrentBlock(), 0);
-}
-
inline void CopyVisitor::didCopy(void* ptr, size_t bytes)
{
CopiedBlock* block = CopiedSpace::blockFor(ptr);
@@ -94,10 +71,8 @@ inline void CopyVisitor::didCopy(void* ptr, size_t bytes)
ASSERT(!block->isPinned());
block->didEvacuateBytes(bytes);
-
}
} // namespace JSC
#endif // CopyVisitorInlines_h
-
diff --git a/Source/JavaScriptCore/heap/CopyWorkList.h b/Source/JavaScriptCore/heap/CopyWorkList.h
index 164e2ddce..f1921ce6a 100644
--- a/Source/JavaScriptCore/heap/CopyWorkList.h
+++ b/Source/JavaScriptCore/heap/CopyWorkList.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,51 +26,85 @@
#ifndef CopyWorkList_h
#define CopyWorkList_h
-#include <wtf/Vector.h>
+#include "CopyToken.h"
+#include <wtf/DoublyLinkedList.h>
namespace JSC {
class JSCell;
-class CopyWorkListSegment : public HeapBlock<CopyWorkListSegment> {
+class CopyWorklistItem {
public:
- static CopyWorkListSegment* create(DeadBlock* block)
+ CopyWorklistItem()
+ : m_value(0)
{
- return new (NotNull, block) CopyWorkListSegment(block->region());
+ }
+
+ CopyWorklistItem(JSCell* cell, CopyToken token)
+ : m_value(bitwise_cast<uintptr_t>(cell) | static_cast<uintptr_t>(token))
+ {
+ ASSERT(!(bitwise_cast<uintptr_t>(cell) & static_cast<uintptr_t>(mask)));
+ ASSERT(static_cast<uintptr_t>(token) <= mask);
+ }
+
+ JSCell* cell() const { return bitwise_cast<JSCell*>(m_value & ~static_cast<uintptr_t>(mask)); }
+ CopyToken token() const { return static_cast<CopyToken>(m_value & mask); }
+
+private:
+ static const unsigned requiredAlignment = 8;
+ static const unsigned mask = requiredAlignment - 1;
+
+ uintptr_t m_value;
+};
+
+class CopyWorkListSegment : public DoublyLinkedListNode<CopyWorkListSegment> {
+ friend class WTF::DoublyLinkedListNode<CopyWorkListSegment>;
+public:
+ static CopyWorkListSegment* create()
+ {
+ return new (NotNull, fastMalloc(blockSize)) CopyWorkListSegment();
+ }
+
+ static void destroy(CopyWorkListSegment* segment)
+ {
+ segment->~CopyWorkListSegment();
+ fastFree(segment);
}
size_t size() { return m_size; }
bool isFull() { return reinterpret_cast<char*>(&data()[size()]) >= endOfBlock(); }
- JSCell* get(size_t index) { return data()[index]; }
+ CopyWorklistItem get(size_t index) { return data()[index]; }
- void append(JSCell* cell)
+ void append(CopyWorklistItem item)
{
ASSERT(!isFull());
- data()[m_size] = cell;
+ data()[m_size] = item;
m_size += 1;
}
static const size_t blockSize = 512;
private:
- CopyWorkListSegment(Region* region)
- : HeapBlock<CopyWorkListSegment>(region)
+ CopyWorkListSegment()
+ : DoublyLinkedListNode<CopyWorkListSegment>()
, m_size(0)
{
}
- JSCell** data() { return reinterpret_cast<JSCell**>(this + 1); }
+ CopyWorklistItem* data() { return reinterpret_cast<CopyWorklistItem*>(this + 1); }
char* endOfBlock() { return reinterpret_cast<char*>(this) + blockSize; }
+ CopyWorkListSegment* m_prev;
+ CopyWorkListSegment* m_next;
size_t m_size;
};
class CopyWorkListIterator {
friend class CopyWorkList;
public:
- JSCell* get() { return m_currentSegment->get(m_currentIndex); }
- JSCell* operator*() { return get(); }
- JSCell* operator->() { return get(); }
+ CopyWorklistItem get() { return m_currentSegment->get(m_currentIndex); }
+ CopyWorklistItem operator*() { return get(); }
+ CopyWorklistItem operator->() { return get(); }
CopyWorkListIterator& operator++()
{
@@ -114,40 +148,39 @@ private:
};
class CopyWorkList {
+ WTF_MAKE_FAST_ALLOCATED;
public:
typedef CopyWorkListIterator iterator;
- CopyWorkList(BlockAllocator&);
+ CopyWorkList();
~CopyWorkList();
- void append(JSCell*);
+ void append(CopyWorklistItem);
iterator begin();
iterator end();
private:
DoublyLinkedList<CopyWorkListSegment> m_segments;
- BlockAllocator& m_blockAllocator;
};
-inline CopyWorkList::CopyWorkList(BlockAllocator& blockAllocator)
- : m_blockAllocator(blockAllocator)
+inline CopyWorkList::CopyWorkList()
{
}
inline CopyWorkList::~CopyWorkList()
{
while (!m_segments.isEmpty())
- m_blockAllocator.deallocate(CopyWorkListSegment::destroy(m_segments.removeHead()));
+ CopyWorkListSegment::destroy(m_segments.removeHead());
}
-inline void CopyWorkList::append(JSCell* cell)
+inline void CopyWorkList::append(CopyWorklistItem item)
{
if (m_segments.isEmpty() || m_segments.tail()->isFull())
- m_segments.append(CopyWorkListSegment::create(m_blockAllocator.allocate<CopyWorkListSegment>()));
+ m_segments.append(CopyWorkListSegment::create());
ASSERT(!m_segments.tail()->isFull());
- m_segments.tail()->append(cell);
+ m_segments.tail()->append(item);
}
inline CopyWorkList::iterator CopyWorkList::begin()
diff --git a/Source/JavaScriptCore/heap/DFGCodeBlocks.cpp b/Source/JavaScriptCore/heap/DFGCodeBlocks.cpp
deleted file mode 100644
index e3cc75919..000000000
--- a/Source/JavaScriptCore/heap/DFGCodeBlocks.cpp
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "DFGCodeBlocks.h"
-
-#include "CodeBlock.h"
-#include "SlotVisitor.h"
-#include <wtf/Vector.h>
-
-namespace JSC {
-
-#if ENABLE(DFG_JIT)
-
-DFGCodeBlocks::DFGCodeBlocks() { }
-
-DFGCodeBlocks::~DFGCodeBlocks()
-{
- Vector<OwnPtr<CodeBlock>, 16> toRemove;
-
- for (HashSet<CodeBlock*>::iterator iter = m_set.begin(); iter != m_set.end(); ++iter) {
- if ((*iter)->m_dfgData->isJettisoned)
- toRemove.append(adoptPtr(*iter));
- }
-}
-
-void DFGCodeBlocks::jettison(PassOwnPtr<CodeBlock> codeBlockPtr)
-{
- // We don't want to delete it now; we just want its pointer.
- CodeBlock* codeBlock = codeBlockPtr.leakPtr();
-
- ASSERT(codeBlock);
- ASSERT(codeBlock->getJITType() == JITCode::DFGJIT);
-
- // It should not have already been jettisoned.
- ASSERT(!codeBlock->m_dfgData->isJettisoned);
-
- // We should have this block already.
- ASSERT(m_set.find(codeBlock) != m_set.end());
-
- codeBlock->m_dfgData->isJettisoned = true;
-}
-
-void DFGCodeBlocks::clearMarks()
-{
- for (HashSet<CodeBlock*>::iterator iter = m_set.begin(); iter != m_set.end(); ++iter) {
- (*iter)->m_dfgData->mayBeExecuting = false;
- (*iter)->m_dfgData->visitAggregateHasBeenCalled = false;
- }
-}
-
-void DFGCodeBlocks::deleteUnmarkedJettisonedCodeBlocks()
-{
- Vector<OwnPtr<CodeBlock>, 16> toRemove;
-
- for (HashSet<CodeBlock*>::iterator iter = m_set.begin(); iter != m_set.end(); ++iter) {
- if ((*iter)->m_dfgData->isJettisoned && !(*iter)->m_dfgData->mayBeExecuting)
- toRemove.append(adoptPtr(*iter));
- }
-}
-
-void DFGCodeBlocks::traceMarkedCodeBlocks(SlotVisitor& visitor)
-{
- for (HashSet<CodeBlock*>::iterator iter = m_set.begin(); iter != m_set.end(); ++iter) {
- if ((*iter)->m_dfgData->mayBeExecuting)
- (*iter)->visitAggregate(visitor);
- }
-}
-
-#else // ENABLE(DFG_JIT)
-
-void DFGCodeBlocks::jettison(PassOwnPtr<CodeBlock>)
-{
-}
-
-#endif // ENABLE(DFG_JIT)
-
-} // namespace JSC
-
-
diff --git a/Source/JavaScriptCore/heap/DFGCodeBlocks.h b/Source/JavaScriptCore/heap/DFGCodeBlocks.h
deleted file mode 100644
index 06fd5ed3d..000000000
--- a/Source/JavaScriptCore/heap/DFGCodeBlocks.h
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef DFGCodeBlocks_h
-#define DFGCodeBlocks_h
-
-#include <wtf/FastAllocBase.h>
-#include <wtf/HashSet.h>
-#include <wtf/PassOwnPtr.h>
-
-namespace JSC {
-
-class CodeBlock;
-class SlotVisitor;
-
-// DFGCodeBlocks notifies the garbage collector about optimized code blocks that
-// have different marking behavior depending on whether or not they are on the
-// stack, and that may be jettisoned. Jettisoning is the process of discarding
-// a code block after all calls to it have been unlinked. This class takes special
-// care to ensure that if there are still call frames that are using the code
-// block, then it should not be immediately deleted, but rather, it should be
-// deleted once we know that there are no longer any references to it from any
-// call frames. This class takes its name from the DFG compiler; only code blocks
-// compiled by the DFG need special marking behavior if they are on the stack, and
-// only those code blocks may be jettisoned.
-
-#if ENABLE(DFG_JIT)
-class DFGCodeBlocks {
- WTF_MAKE_FAST_ALLOCATED;
-
-public:
- DFGCodeBlocks();
- ~DFGCodeBlocks();
-
- // Inform the collector that a code block has been jettisoned form its
- // executable and should only be kept alive if there are call frames that use
- // it. This is typically called either from a recompilation trigger, or from
- // an unconditional finalizer associated with a CodeBlock that had weak
- // references, where some subset of those references were dead.
- void jettison(PassOwnPtr<CodeBlock>);
-
- // Clear all mark bits associated with DFG code blocks.
- void clearMarks();
-
- // Mark a pointer that may be a CodeBlock that belongs to the set of DFG code
- // blocks. This is defined inline in CodeBlock.h
- void mark(void* candidateCodeBlock);
-
- // Delete all jettisoned code blocks that have not been marked (i.e. are not referenced
- // from call frames).
- void deleteUnmarkedJettisonedCodeBlocks();
-
- // Trace all marked code blocks (i.e. are referenced from call frames). The CodeBlock
- // is free to make use of m_dfgData->isMarked and m_dfgData->isJettisoned.
- void traceMarkedCodeBlocks(SlotVisitor&);
-
-private:
- friend class CodeBlock;
-
- HashSet<CodeBlock*> m_set;
-};
-#else
-class DFGCodeBlocks {
- WTF_MAKE_FAST_ALLOCATED;
-
-public:
- void jettison(PassOwnPtr<CodeBlock>);
- void clearMarks() { }
- void mark(void*) { }
- void deleteUnmarkedJettisonedCodeBlocks() { }
- void traceMarkedCodeBlocks(SlotVisitor&) { }
-};
-#endif
-
-} // namespace JSC
-
-#endif
diff --git a/Source/JavaScriptCore/heap/DeferGC.cpp b/Source/JavaScriptCore/heap/DeferGC.cpp
new file mode 100644
index 000000000..dd66c6384
--- /dev/null
+++ b/Source/JavaScriptCore/heap/DeferGC.cpp
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "DeferGC.h"
+
+#include "JSCInlines.h"
+
+namespace JSC {
+
+#ifndef NDEBUG
+WTF::ThreadSpecificKey DisallowGC::s_isGCDisallowedOnCurrentThread = 0;
+#endif
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/DeferGC.h b/Source/JavaScriptCore/heap/DeferGC.h
new file mode 100644
index 000000000..d29eec854
--- /dev/null
+++ b/Source/JavaScriptCore/heap/DeferGC.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DeferGC_h
+#define DeferGC_h
+
+#include "Heap.h"
+#include <wtf/Noncopyable.h>
+#include <wtf/ThreadSpecific.h>
+
+namespace JSC {
+
+class DeferGC {
+ WTF_MAKE_NONCOPYABLE(DeferGC);
+public:
+ DeferGC(Heap& heap)
+ : m_heap(heap)
+ {
+ m_heap.incrementDeferralDepth();
+ }
+
+ ~DeferGC()
+ {
+ m_heap.decrementDeferralDepthAndGCIfNeeded();
+ }
+
+private:
+ Heap& m_heap;
+};
+
+class DeferGCForAWhile {
+ WTF_MAKE_NONCOPYABLE(DeferGCForAWhile);
+public:
+ DeferGCForAWhile(Heap& heap)
+ : m_heap(heap)
+ {
+ m_heap.incrementDeferralDepth();
+ }
+
+ ~DeferGCForAWhile()
+ {
+ m_heap.decrementDeferralDepth();
+ }
+
+private:
+ Heap& m_heap;
+};
+
+#ifndef NDEBUG
+class DisallowGC {
+ WTF_MAKE_NONCOPYABLE(DisallowGC);
+public:
+ DisallowGC()
+ {
+ WTF::threadSpecificSet(s_isGCDisallowedOnCurrentThread, reinterpret_cast<void*>(true));
+ }
+
+ ~DisallowGC()
+ {
+ WTF::threadSpecificSet(s_isGCDisallowedOnCurrentThread, reinterpret_cast<void*>(false));
+ }
+
+ static bool isGCDisallowedOnCurrentThread()
+ {
+ return !!WTF::threadSpecificGet(s_isGCDisallowedOnCurrentThread);
+ }
+ static void initialize()
+ {
+ WTF::threadSpecificKeyCreate(&s_isGCDisallowedOnCurrentThread, 0);
+ }
+
+ JS_EXPORT_PRIVATE static WTF::ThreadSpecificKey s_isGCDisallowedOnCurrentThread;
+};
+#endif // NDEBUG
+
+} // namespace JSC
+
+#endif // DeferGC_h
+
diff --git a/Source/JavaScriptCore/heap/EdenGCActivityCallback.cpp b/Source/JavaScriptCore/heap/EdenGCActivityCallback.cpp
new file mode 100644
index 000000000..1db8f40b6
--- /dev/null
+++ b/Source/JavaScriptCore/heap/EdenGCActivityCallback.cpp
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "EdenGCActivityCallback.h"
+
+#include "VM.h"
+
+namespace JSC {
+
+#if USE(CF) || USE(GLIB) || PLATFORM(QT)
+
+EdenGCActivityCallback::EdenGCActivityCallback(Heap* heap)
+ : GCActivityCallback(heap)
+{
+}
+
+void EdenGCActivityCallback::doCollection()
+{
+ m_vm->heap.collect(EdenCollection);
+}
+
+double EdenGCActivityCallback::lastGCLength()
+{
+ return m_vm->heap.lastEdenGCLength();
+}
+
+double EdenGCActivityCallback::deathRate()
+{
+ Heap* heap = &m_vm->heap;
+ size_t sizeBefore = heap->sizeBeforeLastEdenCollection();
+ size_t sizeAfter = heap->sizeAfterLastEdenCollection();
+ if (!sizeBefore)
+ return 1.0;
+ if (sizeAfter > sizeBefore) {
+ // GC caused the heap to grow(!)
+ // This could happen if the we visited more extra memory than was reported allocated.
+ // We don't return a negative death rate, since that would schedule the next GC in the past.
+ return 0;
+ }
+ return static_cast<double>(sizeBefore - sizeAfter) / static_cast<double>(sizeBefore);
+}
+
+double EdenGCActivityCallback::gcTimeSlice(size_t bytes)
+{
+ return std::min((static_cast<double>(bytes) / MB) * Options::percentCPUPerMBForEdenTimer(), Options::collectionTimerMaxPercentCPU());
+}
+
+#else
+
+EdenGCActivityCallback::EdenGCActivityCallback(Heap* heap)
+ : GCActivityCallback(heap->vm())
+{
+}
+
+void EdenGCActivityCallback::doCollection()
+{
+}
+
+double EdenGCActivityCallback::lastGCLength()
+{
+ return 0;
+}
+
+double EdenGCActivityCallback::deathRate()
+{
+ return 0;
+}
+
+double EdenGCActivityCallback::gcTimeSlice(size_t)
+{
+ return 0;
+}
+
+#endif // USE(CF) || PLATFORM(EFL)
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/EdenGCActivityCallback.h b/Source/JavaScriptCore/heap/EdenGCActivityCallback.h
new file mode 100644
index 000000000..214ab43dc
--- /dev/null
+++ b/Source/JavaScriptCore/heap/EdenGCActivityCallback.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef EdenGCActivityCallback_h
+#define EdenGCActivityCallback_h
+
+#include "GCActivityCallback.h"
+
+namespace JSC {
+
+class JS_EXPORT_PRIVATE EdenGCActivityCallback : public GCActivityCallback {
+public:
+ EdenGCActivityCallback(Heap*);
+
+ virtual void doCollection() override;
+
+protected:
+#if USE(CF)
+ EdenGCActivityCallback(Heap* heap, CFRunLoopRef runLoop)
+ : GCActivityCallback(heap, runLoop)
+ {
+ }
+#endif
+
+ virtual double lastGCLength() override;
+ virtual double gcTimeSlice(size_t bytes) override;
+ virtual double deathRate() override;
+};
+
+inline RefPtr<GCActivityCallback> GCActivityCallback::createEdenTimer(Heap* heap)
+{
+ return s_shouldCreateGCTimer ? adoptRef(new EdenGCActivityCallback(heap)) : nullptr;
+}
+
+} // namespace JSC
+
+#endif // EdenGCActivityCallback_h
diff --git a/Source/JavaScriptCore/heap/FullGCActivityCallback.cpp b/Source/JavaScriptCore/heap/FullGCActivityCallback.cpp
new file mode 100644
index 000000000..d2161138f
--- /dev/null
+++ b/Source/JavaScriptCore/heap/FullGCActivityCallback.cpp
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "FullGCActivityCallback.h"
+
+#include "VM.h"
+
+namespace JSC {
+
+#if USE(CF) || USE(GLIB) || PLATFORM(QT)
+
+#if !PLATFORM(IOS)
+const double pagingTimeOut = 0.1; // Time in seconds to allow opportunistic timer to iterate over all blocks to see if the Heap is paged out.
+#endif
+
+FullGCActivityCallback::FullGCActivityCallback(Heap* heap)
+ : GCActivityCallback(heap)
+{
+}
+
+void FullGCActivityCallback::doCollection()
+{
+ Heap& heap = m_vm->heap;
+ m_didSyncGCRecently = false;
+
+#if !PLATFORM(IOS)
+ double startTime = WTF::monotonicallyIncreasingTime();
+ if (heap.isPagedOut(startTime + pagingTimeOut)) {
+ cancel();
+ heap.increaseLastFullGCLength(pagingTimeOut);
+ return;
+ }
+#endif
+
+ heap.collect(FullCollection);
+}
+
+double FullGCActivityCallback::lastGCLength()
+{
+ return m_vm->heap.lastFullGCLength();
+}
+
+double FullGCActivityCallback::deathRate()
+{
+ Heap* heap = &m_vm->heap;
+ size_t sizeBefore = heap->sizeBeforeLastFullCollection();
+ size_t sizeAfter = heap->sizeAfterLastFullCollection();
+ if (!sizeBefore)
+ return 1.0;
+ if (sizeAfter > sizeBefore) {
+ // GC caused the heap to grow(!)
+ // This could happen if the we visited more extra memory than was reported allocated.
+ // We don't return a negative death rate, since that would schedule the next GC in the past.
+ return 0;
+ }
+ return static_cast<double>(sizeBefore - sizeAfter) / static_cast<double>(sizeBefore);
+}
+
+double FullGCActivityCallback::gcTimeSlice(size_t bytes)
+{
+ return std::min((static_cast<double>(bytes) / MB) * Options::percentCPUPerMBForFullTimer(), Options::collectionTimerMaxPercentCPU());
+}
+
+#else
+
+FullGCActivityCallback::FullGCActivityCallback(Heap* heap)
+ : GCActivityCallback(heap)
+{
+}
+
+void FullGCActivityCallback::doCollection()
+{
+}
+
+double FullGCActivityCallback::lastGCLength()
+{
+ return 0;
+}
+
+double FullGCActivityCallback::deathRate()
+{
+ return 0;
+}
+
+double FullGCActivityCallback::gcTimeSlice(size_t)
+{
+ return 0;
+}
+
+#endif // USE(CF) || PLATFORM(EFL)
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/FullGCActivityCallback.h b/Source/JavaScriptCore/heap/FullGCActivityCallback.h
new file mode 100644
index 000000000..e727592e2
--- /dev/null
+++ b/Source/JavaScriptCore/heap/FullGCActivityCallback.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef FullGCActivityCallback_h
+#define FullGCActivityCallback_h
+
+#include "GCActivityCallback.h"
+
+namespace JSC {
+
+class JS_EXPORT_PRIVATE FullGCActivityCallback : public GCActivityCallback {
+public:
+ FullGCActivityCallback(Heap*);
+
+ virtual void doCollection() override;
+
+ bool didSyncGCRecently() const { return m_didSyncGCRecently; }
+ void setDidSyncGCRecently() { m_didSyncGCRecently = true; }
+
+protected:
+#if USE(CF)
+ FullGCActivityCallback(Heap* heap, CFRunLoopRef runLoop)
+ : GCActivityCallback(heap, runLoop)
+ {
+ }
+#endif
+
+ virtual double lastGCLength() override;
+ virtual double gcTimeSlice(size_t bytes) override;
+ virtual double deathRate() override;
+
+ bool m_didSyncGCRecently { false };
+};
+
+inline RefPtr<FullGCActivityCallback> GCActivityCallback::createFullTimer(Heap* heap)
+{
+ return s_shouldCreateGCTimer ? adoptRef(new FullGCActivityCallback(heap)) : nullptr;
+}
+
+} // namespace JSC
+
+#endif // FullGCActivityCallback_h
diff --git a/Source/JavaScriptCore/heap/GCActivityCallback.cpp b/Source/JavaScriptCore/heap/GCActivityCallback.cpp
new file mode 100644
index 000000000..02dc0ef8b
--- /dev/null
+++ b/Source/JavaScriptCore/heap/GCActivityCallback.cpp
@@ -0,0 +1,224 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "GCActivityCallback.h"
+
+#include "Heap.h"
+#include "JSLock.h"
+#include "JSObject.h"
+#include "VM.h"
+
+#include <wtf/RetainPtr.h>
+#include <wtf/WTFThreadData.h>
+
+#if PLATFORM(EFL)
+#include <wtf/MainThread.h>
+#elif USE(GLIB) && !PLATFORM(QT)
+#include <glib.h>
+#endif
+
+namespace JSC {
+
+bool GCActivityCallback::s_shouldCreateGCTimer = true;
+
+#if USE(CF) || USE(GLIB) || PLATFORM(QT)
+
+const double timerSlop = 2.0; // Fudge factor to avoid performance cost of resetting timer.
+
+#if USE(CF)
+GCActivityCallback::GCActivityCallback(Heap* heap)
+ : GCActivityCallback(heap->vm(), CFRunLoopGetCurrent())
+{
+}
+
+GCActivityCallback::GCActivityCallback(Heap* heap, CFRunLoopRef runLoop)
+ : GCActivityCallback(heap->vm(), runLoop)
+{
+}
+#elif PLATFORM(EFL)
+GCActivityCallback::GCActivityCallback(Heap* heap)
+ : GCActivityCallback(heap->vm(), WTF::isMainThread())
+{
+}
+#elif PLATFORM(QT) || USE(GLIB)
+GCActivityCallback::GCActivityCallback(Heap* heap)
+ : GCActivityCallback(heap->vm())
+{
+}
+#endif
+
+void GCActivityCallback::doWork()
+{
+ Heap* heap = &m_vm->heap;
+ if (!isEnabled())
+ return;
+
+ JSLockHolder locker(m_vm);
+ if (heap->isDeferred()) {
+ scheduleTimer(0);
+ return;
+ }
+
+ doCollection();
+}
+
+#if USE(CF)
+void GCActivityCallback::scheduleTimer(double newDelay)
+{
+ if (newDelay * timerSlop > m_delay)
+ return;
+ double delta = m_delay - newDelay;
+ m_delay = newDelay;
+ m_nextFireTime = WTF::currentTime() + newDelay;
+ CFRunLoopTimerSetNextFireDate(m_timer.get(), CFRunLoopTimerGetNextFireDate(m_timer.get()) - delta);
+}
+
+void GCActivityCallback::cancelTimer()
+{
+ m_delay = s_decade;
+ m_nextFireTime = 0;
+ CFRunLoopTimerSetNextFireDate(m_timer.get(), CFAbsoluteTimeGetCurrent() + s_decade);
+}
+#elif PLATFORM(EFL)
+void GCActivityCallback::scheduleTimer(double newDelay)
+{
+ if (newDelay * timerSlop > m_delay)
+ return;
+
+ stop();
+ m_delay = newDelay;
+
+ ASSERT(!m_timer);
+ m_timer = add(newDelay, this);
+}
+
+void GCActivityCallback::cancelTimer()
+{
+ m_delay = s_hour;
+ stop();
+}
+#elif PLATFORM(QT)
+void GCActivityCallback::scheduleTimer(double newDelay)
+{
+ if (newDelay * timerSlop > m_delay)
+ return;
+ m_delay = newDelay;
+ m_timer.start(newDelay * 1000, this);
+}
+
+void GCActivityCallback::cancelTimer()
+{
+ m_delay = s_hour;
+ m_timer.stop();
+}
+#elif USE(GLIB)
+void GCActivityCallback::scheduleTimer(double newDelay)
+{
+ ASSERT(newDelay >= 0);
+ if (m_delay != -1 && newDelay * timerSlop > m_delay)
+ return;
+
+ m_delay = newDelay;
+ if (!m_delay) {
+ g_source_set_ready_time(m_timer.get(), 0);
+ return;
+ }
+
+ auto delayDuration = std::chrono::duration<double>(m_delay);
+ auto safeDelayDuration = std::chrono::microseconds::max();
+ if (delayDuration < safeDelayDuration)
+ safeDelayDuration = std::chrono::duration_cast<std::chrono::microseconds>(delayDuration);
+ gint64 currentTime = g_get_monotonic_time();
+ gint64 targetTime = currentTime + std::min<gint64>(G_MAXINT64 - currentTime, safeDelayDuration.count());
+ ASSERT(targetTime >= currentTime);
+ g_source_set_ready_time(m_timer.get(), targetTime);
+}
+
+void GCActivityCallback::cancelTimer()
+{
+ m_delay = -1;
+ g_source_set_ready_time(m_timer.get(), -1);
+}
+#endif
+
+void GCActivityCallback::didAllocate(size_t bytes)
+{
+#if PLATFORM(EFL)
+ if (!isEnabled())
+ return;
+
+ ASSERT(WTF::isMainThread());
+#endif
+
+ // The first byte allocated in an allocation cycle will report 0 bytes to didAllocate.
+ // We pretend it's one byte so that we don't ignore this allocation entirely.
+ if (!bytes)
+ bytes = 1;
+ double bytesExpectedToReclaim = static_cast<double>(bytes) * deathRate();
+ double newDelay = lastGCLength() / gcTimeSlice(bytesExpectedToReclaim);
+ scheduleTimer(newDelay);
+}
+
+void GCActivityCallback::willCollect()
+{
+ cancelTimer();
+}
+
+void GCActivityCallback::cancel()
+{
+ cancelTimer();
+}
+
+#else
+
+GCActivityCallback::GCActivityCallback(Heap* heap)
+ : GCActivityCallback(heap->vm())
+{
+}
+
+void GCActivityCallback::doWork()
+{
+}
+
+void GCActivityCallback::didAllocate(size_t)
+{
+}
+
+void GCActivityCallback::willCollect()
+{
+}
+
+void GCActivityCallback::cancel()
+{
+}
+
+#endif
+
+}
+
diff --git a/Source/JavaScriptCore/heap/GCActivityCallback.h b/Source/JavaScriptCore/heap/GCActivityCallback.h
new file mode 100644
index 000000000..a99133cf3
--- /dev/null
+++ b/Source/JavaScriptCore/heap/GCActivityCallback.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef GCActivityCallback_h
+#define GCActivityCallback_h
+
+#include "HeapTimer.h"
+#include <wtf/RefPtr.h>
+
+#if USE(CF)
+#include <CoreFoundation/CoreFoundation.h>
+#endif
+
+namespace JSC {
+
+class FullGCActivityCallback;
+class Heap;
+
+class JS_EXPORT_PRIVATE GCActivityCallback : public HeapTimer, public ThreadSafeRefCounted<GCActivityCallback> {
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ static RefPtr<FullGCActivityCallback> createFullTimer(Heap*);
+ static RefPtr<GCActivityCallback> createEdenTimer(Heap*);
+
+ GCActivityCallback(Heap*);
+
+ virtual void doWork() override;
+
+ virtual void doCollection() = 0;
+
+ virtual void didAllocate(size_t);
+ virtual void willCollect();
+ virtual void cancel();
+ bool isEnabled() const { return m_enabled; }
+ void setEnabled(bool enabled) { m_enabled = enabled; }
+
+ static bool s_shouldCreateGCTimer;
+
+#if USE(CF) || PLATFORM(EFL)
+ double nextFireTime() const { return m_nextFireTime; }
+#endif
+
+protected:
+ virtual double lastGCLength() = 0;
+ virtual double gcTimeSlice(size_t bytes) = 0;
+ virtual double deathRate() = 0;
+
+#if USE(CF)
+ GCActivityCallback(VM* vm, CFRunLoopRef runLoop)
+ : HeapTimer(vm, runLoop)
+ , m_enabled(true)
+ , m_delay(s_decade)
+ {
+ }
+#elif PLATFORM(EFL)
+ static constexpr double s_hour = 3600;
+ GCActivityCallback(VM* vm, bool flag)
+ : HeapTimer(vm)
+ , m_enabled(flag)
+ , m_delay(s_hour)
+ {
+ }
+#elif PLATFORM(QT)
+ static constexpr double s_hour = 3600;
+ GCActivityCallback(VM* vm)
+ : HeapTimer(vm)
+ , m_enabled(true)
+ , m_delay(s_hour)
+ {
+ }
+#elif USE(GLIB)
+ GCActivityCallback(VM* vm)
+ : HeapTimer(vm)
+ , m_enabled(true)
+ , m_delay(-1)
+ {
+ }
+#else
+ GCActivityCallback(VM* vm)
+ : HeapTimer(vm)
+ , m_enabled(true)
+ {
+ }
+#endif
+
+ bool m_enabled;
+
+#if USE(CF)
+protected:
+ GCActivityCallback(Heap*, CFRunLoopRef);
+#endif
+#if USE(CF) || USE(GLIB) || PLATFORM(QT)
+protected:
+ void cancelTimer();
+ void scheduleTimer(double);
+
+private:
+ double m_delay;
+ double m_nextFireTime { 0 };
+#endif
+};
+
+} // namespace JSC
+
+#endif
diff --git a/Source/JavaScriptCore/heap/GCAssertions.h b/Source/JavaScriptCore/heap/GCAssertions.h
index 7c7054deb..836244fe6 100644
--- a/Source/JavaScriptCore/heap/GCAssertions.h
+++ b/Source/JavaScriptCore/heap/GCAssertions.h
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2015 Igalia S.L.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,12 +27,13 @@
#ifndef GCAssertions_h
#define GCAssertions_h
+#include <type_traits>
#include <wtf/Assertions.h>
#if ENABLE(GC_VALIDATION)
#define ASSERT_GC_OBJECT_LOOKS_VALID(cell) do { \
RELEASE_ASSERT(cell);\
- RELEASE_ASSERT(cell->unvalidatedStructure()->unvalidatedStructure() == cell->unvalidatedStructure()->unvalidatedStructure()->unvalidatedStructure()); \
+ RELEASE_ASSERT(cell->structure()->structure() == cell->structure()->structure()->structure()); \
} while (0)
#define ASSERT_GC_OBJECT_INHERITS(object, classInfo) do {\
@@ -39,15 +41,23 @@
RELEASE_ASSERT(object->inherits(classInfo)); \
} while (0)
+// Used to avoid triggering -Wundefined-bool-conversion.
+#define ASSERT_THIS_GC_OBJECT_LOOKS_VALID() do { \
+ RELEASE_ASSERT(this->structure()->structure() == this->structure()->structure()->structure()); \
+} while (0)
+
+#define ASSERT_THIS_GC_OBJECT_INHERITS(classInfo) do {\
+ ASSERT_THIS_GC_OBJECT_LOOKS_VALID(); \
+ RELEASE_ASSERT(this->inherits(classInfo)); \
+} while (0)
+
#else
#define ASSERT_GC_OBJECT_LOOKS_VALID(cell) do { (void)cell; } while (0)
#define ASSERT_GC_OBJECT_INHERITS(object, classInfo) do { (void)object; (void)classInfo; } while (0)
+#define ASSERT_THIS_GC_OBJECT_LOOKS_VALID()
+#define ASSERT_THIS_GC_OBJECT_INHERITS(classInfo) do { (void)classInfo; } while (0)
#endif
-#if COMPILER_SUPPORTS(HAS_TRIVIAL_DESTRUCTOR)
-#define ASSERT_HAS_TRIVIAL_DESTRUCTOR(klass) COMPILE_ASSERT(__has_trivial_destructor(klass), klass##_has_trivial_destructor_check)
-#else
-#define ASSERT_HAS_TRIVIAL_DESTRUCTOR(klass)
-#endif
+#define STATIC_ASSERT_IS_TRIVIALLY_DESTRUCTIBLE(klass) static_assert(std::is_trivially_destructible<klass>::value, #klass " must have a trivial destructor")
#endif // GCAssertions_h
diff --git a/Source/JavaScriptCore/heap/GCIncomingRefCounted.h b/Source/JavaScriptCore/heap/GCIncomingRefCounted.h
new file mode 100644
index 000000000..3854d0a14
--- /dev/null
+++ b/Source/JavaScriptCore/heap/GCIncomingRefCounted.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef GCIncomingRefCounted_h
+#define GCIncomingRefCounted_h
+
+#include <wtf/DeferrableRefCounted.h>
+#include <wtf/Vector.h>
+
+namespace JSC {
+
+class JSCell;
+
+// A C-heap-allocated object that may have additional reference counts
+// due to incoming references from the heap, which are tracked in
+// reverse: the object knows its incoming references. Such objects also
+// have the invariant that they don't have references back into the GC
+// heap.
+
+template<typename T>
+class GCIncomingRefCounted : public DeferrableRefCounted<T> {
+public:
+ GCIncomingRefCounted()
+ : m_encodedPointer(0)
+ {
+ }
+
+ ~GCIncomingRefCounted()
+ {
+ if (hasVectorOfCells())
+ delete vectorOfCells();
+ }
+
+ size_t numberOfIncomingReferences() const
+ {
+ if (!hasAnyIncoming())
+ return 0;
+ if (hasSingleton())
+ return 1;
+ return vectorOfCells()->size();
+ }
+
+ JSCell* incomingReferenceAt(size_t index) const
+ {
+ ASSERT(hasAnyIncoming());
+ if (hasSingleton()) {
+ ASSERT(!index);
+ return singleton();
+ }
+ return vectorOfCells()->at(index);
+ }
+
+ // It's generally not a good idea to call this directly, since if this
+ // returns true, you're supposed to add this object to the GC's list.
+ // Call GCIncomingRefCountedSet::addReference() instead.
+ bool addIncomingReference(JSCell*);
+
+ // A filter function returns true if we wish to keep the incoming
+ // reference, and false if we don't. This may delete the object,
+ // and if it does so, this returns true. In general, you don't want
+ // to use this with a filter function that can return false unless
+ // you're also walking the GC's list.
+ template<typename FilterFunctionType>
+ bool filterIncomingReferences(FilterFunctionType&);
+
+private:
+ static uintptr_t singletonFlag() { return 1; }
+
+ bool hasVectorOfCells() const { return !(m_encodedPointer & singletonFlag()); }
+ bool hasAnyIncoming() const { return !!m_encodedPointer; }
+ bool hasSingleton() const { return hasAnyIncoming() && !hasVectorOfCells(); }
+
+ JSCell* singleton() const
+ {
+ ASSERT(hasSingleton());
+ return bitwise_cast<JSCell*>(m_encodedPointer & ~singletonFlag());
+ }
+
+ Vector<JSCell*>* vectorOfCells() const
+ {
+ ASSERT(hasVectorOfCells());
+ return bitwise_cast<Vector<JSCell*>*>(m_encodedPointer);
+ }
+
+ // Singleton flag is set: this is a JSCell*.
+ // Singleton flag not set: this is a pointer to a vector of cells.
+ uintptr_t m_encodedPointer;
+};
+
+} // namespace JSC
+
+#endif // GCIncomingRefCounted_h
+
diff --git a/Source/JavaScriptCore/heap/GCIncomingRefCountedInlines.h b/Source/JavaScriptCore/heap/GCIncomingRefCountedInlines.h
new file mode 100644
index 000000000..41330ecb9
--- /dev/null
+++ b/Source/JavaScriptCore/heap/GCIncomingRefCountedInlines.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef GCIncomingRefCountedInlines_h
+#define GCIncomingRefCountedInlines_h
+
+#include "GCIncomingRefCounted.h"
+#include "Heap.h"
+
+namespace JSC {
+
+template<typename T>
+bool GCIncomingRefCounted<T>::addIncomingReference(JSCell* cell)
+{
+ if (!hasAnyIncoming()) {
+ m_encodedPointer = bitwise_cast<uintptr_t>(cell) | singletonFlag();
+ this->setIsDeferred(true);
+ ASSERT(hasSingleton());
+ return true;
+ }
+
+ ASSERT(Heap::heap(incomingReferenceAt(0)) == Heap::heap(cell));
+
+ if (hasSingleton()) {
+ Vector<JSCell*>* vector = new Vector<JSCell*>();
+ vector->append(singleton());
+ vector->append(cell);
+ m_encodedPointer = bitwise_cast<uintptr_t>(vector);
+ ASSERT(hasVectorOfCells());
+ return false;
+ }
+
+ vectorOfCells()->append(cell);
+ return false;
+}
+
+template<typename T>
+template<typename FilterFunctionType>
+bool GCIncomingRefCounted<T>::filterIncomingReferences(FilterFunctionType& filterFunction)
+{
+ const bool verbose = false;
+
+ if (verbose)
+ dataLog("Filtering incoming references.\n");
+
+ if (!hasAnyIncoming()) {
+ ASSERT(!this->isDeferred());
+ ASSERT(this->refCount());
+ if (verbose)
+ dataLog(" Has no incoming.\n");
+ return false;
+ }
+
+ ASSERT(this->isDeferred());
+
+ if (hasSingleton()) {
+ if (filterFunction(singleton())) {
+ if (verbose)
+ dataLog(" Singleton passed.\n");
+ return false;
+ }
+
+ if (verbose)
+ dataLog(" Removing singleton.\n");
+ m_encodedPointer = 0;
+ ASSERT(!hasAnyIncoming());
+ this->setIsDeferred(false);
+ return true;
+ }
+
+ if (verbose)
+ dataLog(" Has ", vectorOfCells()->size(), " entries.\n");
+ for (size_t i = 0; i < vectorOfCells()->size(); ++i) {
+ if (filterFunction(vectorOfCells()->at(i)))
+ continue;
+ vectorOfCells()->at(i--) = vectorOfCells()->last();
+ vectorOfCells()->removeLast();
+ }
+
+ if (vectorOfCells()->size() >= 2) {
+ if (verbose)
+ dataLog(" Still has ", vectorOfCells()->size(), " entries.\n");
+ return false;
+ }
+
+ if (vectorOfCells()->isEmpty()) {
+ if (verbose)
+ dataLog(" Removing.\n");
+ delete vectorOfCells();
+ m_encodedPointer = 0;
+ ASSERT(!hasAnyIncoming());
+ this->setIsDeferred(false);
+ return true;
+ }
+
+ if (verbose)
+ dataLog(" Shrinking to singleton.\n");
+ JSCell* singleton = vectorOfCells()->at(0);
+ delete vectorOfCells();
+ m_encodedPointer = bitwise_cast<uintptr_t>(singleton) | singletonFlag();
+ ASSERT(hasSingleton());
+ return false;
+}
+
+} // namespace JSC
+
+#endif // GCIncomingRefCountedInlines_h
+
diff --git a/Source/JavaScriptCore/heap/HeapBlock.h b/Source/JavaScriptCore/heap/GCIncomingRefCountedSet.h
index 6f2a74c08..5e7254a2c 100644
--- a/Source/JavaScriptCore/heap/HeapBlock.h
+++ b/Source/JavaScriptCore/heap/GCIncomingRefCountedSet.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,51 +23,37 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef HeapBlock_h
-#define HeapBlock_h
+#ifndef GCIncomingRefCountedSet_h
+#define GCIncomingRefCountedSet_h
-#include <wtf/DoublyLinkedList.h>
-#include <wtf/StdLibExtras.h>
+#include "GCIncomingRefCounted.h"
namespace JSC {
-enum AllocationEffort { AllocationCanFail, AllocationMustSucceed };
-
-class Region;
-
-#if COMPILER(GCC)
-#define CLASS_IF_GCC class
-#else
-#define CLASS_IF_GCC
-#endif
-
+// T = some subtype of GCIncomingRefCounted, must support a gcSizeEstimateInBytes()
+// method.
template<typename T>
-class HeapBlock : public DoublyLinkedListNode<T> {
- friend CLASS_IF_GCC DoublyLinkedListNode<T>;
+class GCIncomingRefCountedSet {
public:
- static HeapBlock* destroy(HeapBlock* block) WARN_UNUSED_RETURN
- {
- static_cast<T*>(block)->~T();
- return block;
- }
-
- HeapBlock(Region* region)
- : DoublyLinkedListNode<T>()
- , m_region(region)
- , m_prev(0)
- , m_next(0)
- {
- ASSERT(m_region);
- }
-
- Region* region() const { return m_region; }
-
+ GCIncomingRefCountedSet();
+ ~GCIncomingRefCountedSet();
+
+ // Returns true if the native object is new to this set.
+ bool addReference(JSCell*, T*);
+
+ void sweep();
+
+ size_t size() const { return m_bytes; };
+
private:
- Region* m_region;
- T* m_prev;
- T* m_next;
+ static bool removeAll(JSCell*);
+ static bool removeDead(JSCell*);
+
+ Vector<T*> m_vector;
+ size_t m_bytes;
};
} // namespace JSC
-#endif
+#endif // GCIncomingRefCountedSet_h
+
diff --git a/Source/JavaScriptCore/heap/GCIncomingRefCountedSetInlines.h b/Source/JavaScriptCore/heap/GCIncomingRefCountedSetInlines.h
new file mode 100644
index 000000000..52c55e2d1
--- /dev/null
+++ b/Source/JavaScriptCore/heap/GCIncomingRefCountedSetInlines.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef GCIncomingRefCountedSetInlines_h
+#define GCIncomingRefCountedSetInlines_h
+
+#include "GCIncomingRefCountedSet.h"
+#include "VM.h"
+
+namespace JSC {
+
+template<typename T>
+GCIncomingRefCountedSet<T>::GCIncomingRefCountedSet()
+ : m_bytes(0)
+{
+}
+
+template<typename T>
+GCIncomingRefCountedSet<T>::~GCIncomingRefCountedSet()
+{
+ for (size_t i = m_vector.size(); i--;)
+ m_vector[i]->filterIncomingReferences(removeAll);
+}
+
+template<typename T>
+bool GCIncomingRefCountedSet<T>::addReference(JSCell* cell, T* object)
+{
+ if (!object->addIncomingReference(cell)) {
+ ASSERT(object->isDeferred());
+ ASSERT(object->numberOfIncomingReferences());
+ return false;
+ }
+ m_vector.append(object);
+ m_bytes += object->gcSizeEstimateInBytes();
+ ASSERT(object->isDeferred());
+ ASSERT(object->numberOfIncomingReferences());
+ return true;
+}
+
+template<typename T>
+void GCIncomingRefCountedSet<T>::sweep()
+{
+ for (size_t i = 0; i < m_vector.size(); ++i) {
+ T* object = m_vector[i];
+ size_t size = object->gcSizeEstimateInBytes();
+ ASSERT(object->isDeferred());
+ ASSERT(object->numberOfIncomingReferences());
+ if (!object->filterIncomingReferences(removeDead))
+ continue;
+ m_bytes -= size;
+ m_vector[i--] = m_vector.last();
+ m_vector.removeLast();
+ }
+}
+
+template<typename T>
+bool GCIncomingRefCountedSet<T>::removeAll(JSCell*)
+{
+ return false;
+}
+
+template<typename T>
+bool GCIncomingRefCountedSet<T>::removeDead(JSCell* cell)
+{
+ return Heap::isMarked(cell);
+}
+
+} // namespace JSC
+
+#endif // GCIncomingRefCountedSetInlines_h
diff --git a/Source/JavaScriptCore/heap/GCLogging.cpp b/Source/JavaScriptCore/heap/GCLogging.cpp
new file mode 100644
index 000000000..1504637a0
--- /dev/null
+++ b/Source/JavaScriptCore/heap/GCLogging.cpp
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "GCLogging.h"
+
+#include "ClassInfo.h"
+#include "Heap.h"
+#include "HeapIterationScope.h"
+#include "JSCell.h"
+#include "JSCellInlines.h"
+#include <wtf/PrintStream.h>
+
+namespace JSC {
+
+const char* GCLogging::levelAsString(Level level)
+{
+ switch (level) {
+ case None:
+ return "None";
+ case Basic:
+ return "Basic";
+ case Verbose:
+ return "Verbose";
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return "";
+ }
+}
+
+class LoggingFunctor {
+public:
+ LoggingFunctor(SlotVisitor& slotVisitor)
+ : m_slotVisitor(slotVisitor)
+ {
+ m_savedMarkStack.resize(m_slotVisitor.markStack().size());
+ m_slotVisitor.markStack().fillVector(m_savedMarkStack);
+ }
+
+ ~LoggingFunctor()
+ {
+ reviveCells();
+ }
+
+ IterationStatus operator()(JSCell* cell)
+ {
+ m_liveCells.append(cell);
+ MarkedBlock::blockFor(cell)->clearMarked(cell);
+ return IterationStatus::Continue;
+ }
+
+ void log()
+ {
+ m_slotVisitor.clearMarkStack();
+ for (JSCell* cell : m_liveCells) {
+ cell->methodTable()->visitChildren(cell, m_slotVisitor);
+ dataLog("\n", *cell, ":\n", m_slotVisitor);
+ for (const JSCell* neighbor : m_slotVisitor.markStack())
+ MarkedBlock::blockFor(neighbor)->clearMarked(neighbor);
+ m_slotVisitor.clearMarkStack();
+ }
+ m_slotVisitor.reset();
+ }
+
+ void reviveCells()
+ {
+ for (JSCell* cell : m_liveCells)
+ MarkedBlock::blockFor(cell)->setMarked(cell);
+
+ for (const JSCell* cell : m_savedMarkStack) {
+ m_slotVisitor.markStack().append(cell);
+ cell->setCellState(CellState::OldGrey);
+ }
+ }
+
+ typedef void ReturnType;
+
+ void returnValue() { };
+
+private:
+ Vector<const JSCell*> m_savedMarkStack;
+ Vector<JSCell*> m_liveCells;
+ SlotVisitor& m_slotVisitor;
+};
+
+void GCLogging::dumpObjectGraph(Heap* heap)
+{
+ LoggingFunctor loggingFunctor(heap->m_slotVisitor);
+ HeapIterationScope iterationScope(*heap);
+ heap->objectSpace().forEachLiveCell(iterationScope, loggingFunctor);
+ loggingFunctor.log();
+}
+
+} // namespace JSC
+
+namespace WTF {
+
+void printInternal(PrintStream& out, JSC::GCLogging::Level level)
+{
+ switch (level) {
+ case JSC::GCLogging::Level::None:
+ out.print("None");
+ return;
+ case JSC::GCLogging::Level::Basic:
+ out.print("Basic");
+ return;
+ case JSC::GCLogging::Level::Verbose:
+ out.print("Verbose");
+ return;
+ default:
+ out.print("Level=", level - JSC::GCLogging::Level::None);
+ return;
+ }
+}
+
+} // namespace WTF
+
diff --git a/Source/JavaScriptCore/heap/GCLogging.h b/Source/JavaScriptCore/heap/GCLogging.h
new file mode 100644
index 000000000..650d3fc04
--- /dev/null
+++ b/Source/JavaScriptCore/heap/GCLogging.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef GCLogging_h
+#define GCLogging_h
+
+#include <wtf/Assertions.h>
+
+namespace JSC {
+
+class Heap;
+
+class GCLogging {
+public:
+ enum Level : uint8_t {
+ None = 0,
+ Basic,
+ Verbose
+ };
+
+ static const char* levelAsString(Level);
+ static void dumpObjectGraph(Heap*);
+};
+
+typedef GCLogging::Level gcLogLevel;
+
+} // namespace JSC
+
+namespace WTF {
+
+class PrintStream;
+
+void printInternal(PrintStream&, JSC::GCLogging::Level);
+
+} // namespace WTF
+
+#endif // GCLogging_h
diff --git a/Source/JavaScriptCore/heap/GCSegmentedArray.h b/Source/JavaScriptCore/heap/GCSegmentedArray.h
new file mode 100644
index 000000000..8aeba1025
--- /dev/null
+++ b/Source/JavaScriptCore/heap/GCSegmentedArray.h
@@ -0,0 +1,167 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef GCSegmentedArray_h
+#define GCSegmentedArray_h
+
+#include <wtf/DoublyLinkedList.h>
+#include <wtf/Vector.h>
+
+namespace JSC {
+
+template <typename T>
+class GCArraySegment : public DoublyLinkedListNode<GCArraySegment<T>> {
+ friend class WTF::DoublyLinkedListNode<GCArraySegment<T>>;
+public:
+ GCArraySegment()
+ : DoublyLinkedListNode<GCArraySegment<T>>()
+#if !ASSERT_DISABLED
+ , m_top(0)
+#endif
+ {
+ }
+
+ static GCArraySegment* create();
+ static void destroy(GCArraySegment*);
+
+ T* data()
+ {
+ return bitwise_cast<T*>(this + 1);
+ }
+
+ static const size_t blockSize = 4 * KB;
+
+ GCArraySegment* m_prev;
+ GCArraySegment* m_next;
+#if !ASSERT_DISABLED
+ size_t m_top;
+#endif
+};
+
+template <typename T> class GCSegmentedArrayIterator;
+
+template <typename T>
+class GCSegmentedArray {
+ friend class GCSegmentedArrayIterator<T>;
+ friend class GCSegmentedArrayIterator<const T>;
+public:
+ GCSegmentedArray();
+ ~GCSegmentedArray();
+
+ void append(T);
+
+ bool canRemoveLast();
+ const T removeLast();
+ bool refill();
+
+ size_t size();
+ bool isEmpty();
+
+ void fillVector(Vector<T>&);
+ void clear();
+
+ typedef GCSegmentedArrayIterator<T> iterator;
+ iterator begin() const { return GCSegmentedArrayIterator<T>(m_segments.head(), m_top); }
+ iterator end() const { return GCSegmentedArrayIterator<T>(); }
+
+protected:
+ template <size_t size> struct CapacityFromSize {
+ static const size_t value = (size - sizeof(GCArraySegment<T>)) / sizeof(T);
+ };
+
+ void expand();
+
+ size_t postIncTop();
+ size_t preDecTop();
+ void setTopForFullSegment();
+ void setTopForEmptySegment();
+ size_t top();
+
+ void validatePrevious();
+
+ DoublyLinkedList<GCArraySegment<T>> m_segments;
+
+ JS_EXPORT_PRIVATE static const size_t s_segmentCapacity = CapacityFromSize<GCArraySegment<T>::blockSize>::value;
+ size_t m_top;
+ size_t m_numberOfSegments;
+};
+
+template <typename T>
+class GCSegmentedArrayIterator {
+ friend class GCSegmentedArray<T>;
+public:
+ GCSegmentedArrayIterator()
+ : m_currentSegment(0)
+ , m_currentOffset(0)
+ {
+ }
+
+ T& get() { return m_currentSegment->data()[m_currentOffset]; }
+ T& operator*() { return get(); }
+ T& operator->() { return get(); }
+
+ bool operator==(const GCSegmentedArrayIterator& other)
+ {
+ return m_currentSegment == other.m_currentSegment && m_currentOffset == other.m_currentOffset;
+ }
+
+ bool operator!=(const GCSegmentedArrayIterator& other)
+ {
+ return !(*this == other);
+ }
+
+ GCSegmentedArrayIterator& operator++()
+ {
+ ASSERT(m_currentSegment);
+
+ m_currentOffset++;
+
+ if (m_currentOffset >= m_offsetLimit) {
+ m_currentSegment = m_currentSegment->next();
+ m_currentOffset = 0;
+ m_offsetLimit = GCSegmentedArray<T>::s_segmentCapacity;
+ }
+
+ return *this;
+ }
+
+private:
+ GCSegmentedArrayIterator(GCArraySegment<T>* start, size_t top)
+ : m_currentSegment(start)
+ , m_currentOffset(0)
+ , m_offsetLimit(top)
+ {
+ if (!m_offsetLimit)
+ m_currentSegment = nullptr;
+ }
+
+ GCArraySegment<T>* m_currentSegment;
+ size_t m_currentOffset;
+ size_t m_offsetLimit;
+};
+
+} // namespace JSC
+
+#endif // GCSegmentedArray_h
diff --git a/Source/JavaScriptCore/heap/GCSegmentedArrayInlines.h b/Source/JavaScriptCore/heap/GCSegmentedArrayInlines.h
new file mode 100644
index 000000000..88e43cc9b
--- /dev/null
+++ b/Source/JavaScriptCore/heap/GCSegmentedArrayInlines.h
@@ -0,0 +1,230 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef GCSegmentedArrayInlines_h
+#define GCSegmentedArrayInlines_h
+
+#include "GCSegmentedArray.h"
+
+namespace JSC {
+
+template <typename T>
+GCSegmentedArray<T>::GCSegmentedArray()
+ : m_top(0)
+ , m_numberOfSegments(0)
+{
+ m_segments.push(GCArraySegment<T>::create());
+ m_numberOfSegments++;
+}
+
+template <typename T>
+GCSegmentedArray<T>::~GCSegmentedArray()
+{
+ ASSERT(m_numberOfSegments == 1);
+ ASSERT(m_segments.size() == 1);
+ GCArraySegment<T>::destroy(m_segments.removeHead());
+ m_numberOfSegments--;
+ ASSERT(!m_numberOfSegments);
+ ASSERT(!m_segments.size());
+}
+
+template <typename T>
+void GCSegmentedArray<T>::clear()
+{
+ if (!m_segments.head())
+ return;
+ GCArraySegment<T>* next;
+ for (GCArraySegment<T>* current = m_segments.head(); current->next(); current = next) {
+ next = current->next();
+ m_segments.remove(current);
+ GCArraySegment<T>::destroy(current);
+ }
+ m_top = 0;
+ m_numberOfSegments = 1;
+#if !ASSERT_DISABLED
+ m_segments.head()->m_top = 0;
+#endif
+}
+
+template <typename T>
+void GCSegmentedArray<T>::expand()
+{
+ ASSERT(m_segments.head()->m_top == s_segmentCapacity);
+
+ GCArraySegment<T>* nextSegment = GCArraySegment<T>::create();
+ m_numberOfSegments++;
+
+#if !ASSERT_DISABLED
+ nextSegment->m_top = 0;
+#endif
+
+ m_segments.push(nextSegment);
+ setTopForEmptySegment();
+ validatePrevious();
+}
+
+template <typename T>
+bool GCSegmentedArray<T>::refill()
+{
+ validatePrevious();
+ if (top())
+ return true;
+ GCArraySegment<T>::destroy(m_segments.removeHead());
+ ASSERT(m_numberOfSegments > 1);
+ m_numberOfSegments--;
+ setTopForFullSegment();
+ validatePrevious();
+ return true;
+}
+
+template <typename T>
+void GCSegmentedArray<T>::fillVector(Vector<T>& vector)
+{
+ ASSERT(vector.size() == size());
+
+ GCArraySegment<T>* currentSegment = m_segments.head();
+ if (!currentSegment)
+ return;
+
+ unsigned count = 0;
+ for (unsigned i = 0; i < m_top; ++i) {
+ ASSERT(currentSegment->data()[i]);
+ vector[count++] = currentSegment->data()[i];
+ }
+
+ currentSegment = currentSegment->next();
+ while (currentSegment) {
+ for (unsigned i = 0; i < s_segmentCapacity; ++i) {
+ ASSERT(currentSegment->data()[i]);
+ vector[count++] = currentSegment->data()[i];
+ }
+ currentSegment = currentSegment->next();
+ }
+}
+
+template <typename T>
+inline GCArraySegment<T>* GCArraySegment<T>::create()
+{
+ return new (NotNull, fastMalloc(blockSize)) GCArraySegment<T>();
+}
+
+template <typename T>
+inline void GCArraySegment<T>::destroy(GCArraySegment* segment)
+{
+ segment->~GCArraySegment();
+ fastFree(segment);
+}
+
+template <typename T>
+inline size_t GCSegmentedArray<T>::postIncTop()
+{
+ size_t result = m_top++;
+ ASSERT(result == m_segments.head()->m_top++);
+ return result;
+}
+
+template <typename T>
+inline size_t GCSegmentedArray<T>::preDecTop()
+{
+ size_t result = --m_top;
+ ASSERT(result == --m_segments.head()->m_top);
+ return result;
+}
+
+template <typename T>
+inline void GCSegmentedArray<T>::setTopForFullSegment()
+{
+ ASSERT(m_segments.head()->m_top == s_segmentCapacity);
+ m_top = s_segmentCapacity;
+}
+
+template <typename T>
+inline void GCSegmentedArray<T>::setTopForEmptySegment()
+{
+ ASSERT(!m_segments.head()->m_top);
+ m_top = 0;
+}
+
+template <typename T>
+inline size_t GCSegmentedArray<T>::top()
+{
+ ASSERT(m_top == m_segments.head()->m_top);
+ return m_top;
+}
+
+template <typename T>
+#if ASSERT_DISABLED
+inline void GCSegmentedArray<T>::validatePrevious() { }
+#else
+inline void GCSegmentedArray<T>::validatePrevious()
+{
+ unsigned count = 0;
+ for (GCArraySegment<T>* current = m_segments.head(); current; current = current->next())
+ count++;
+ ASSERT(m_segments.size() == m_numberOfSegments);
+}
+#endif
+
+template <typename T>
+inline void GCSegmentedArray<T>::append(T value)
+{
+ if (m_top == s_segmentCapacity)
+ expand();
+ m_segments.head()->data()[postIncTop()] = value;
+}
+
+template <typename T>
+inline bool GCSegmentedArray<T>::canRemoveLast()
+{
+ return !!m_top;
+}
+
+template <typename T>
+inline const T GCSegmentedArray<T>::removeLast()
+{
+ return m_segments.head()->data()[preDecTop()];
+}
+
+template <typename T>
+inline bool GCSegmentedArray<T>::isEmpty()
+{
+ if (m_top)
+ return false;
+ if (m_segments.head()->next()) {
+ ASSERT(m_segments.head()->next()->m_top == s_segmentCapacity);
+ return false;
+ }
+ return true;
+}
+
+template <typename T>
+inline size_t GCSegmentedArray<T>::size()
+{
+ return m_top + s_segmentCapacity * (m_numberOfSegments - 1);
+}
+
+} // namespace JSC
+
+#endif // GCSegmentedArrayInlines_h
diff --git a/Source/JavaScriptCore/heap/GCThread.cpp b/Source/JavaScriptCore/heap/GCThread.cpp
deleted file mode 100644
index aa868f1b3..000000000
--- a/Source/JavaScriptCore/heap/GCThread.cpp
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "GCThread.h"
-
-#include "CopyVisitor.h"
-#include "CopyVisitorInlines.h"
-#include "GCThreadSharedData.h"
-#include "SlotVisitor.h"
-#include <wtf/MainThread.h>
-#include <wtf/PassOwnPtr.h>
-
-namespace JSC {
-
-GCThread::GCThread(GCThreadSharedData& shared, SlotVisitor* slotVisitor, CopyVisitor* copyVisitor)
- : m_threadID(0)
- , m_shared(shared)
- , m_slotVisitor(WTF::adoptPtr(slotVisitor))
- , m_copyVisitor(WTF::adoptPtr(copyVisitor))
-{
-}
-
-ThreadIdentifier GCThread::threadID()
-{
- ASSERT(m_threadID);
- return m_threadID;
-}
-
-void GCThread::initializeThreadID(ThreadIdentifier threadID)
-{
- ASSERT(!m_threadID);
- m_threadID = threadID;
-}
-
-SlotVisitor* GCThread::slotVisitor()
-{
- ASSERT(m_slotVisitor);
- return m_slotVisitor.get();
-}
-
-CopyVisitor* GCThread::copyVisitor()
-{
- ASSERT(m_copyVisitor);
- return m_copyVisitor.get();
-}
-
-GCPhase GCThread::waitForNextPhase()
-{
- MutexLocker locker(m_shared.m_phaseLock);
- while (m_shared.m_gcThreadsShouldWait)
- m_shared.m_phaseCondition.wait(m_shared.m_phaseLock);
-
- m_shared.m_numberOfActiveGCThreads--;
- if (!m_shared.m_numberOfActiveGCThreads)
- m_shared.m_activityCondition.signal();
-
- while (m_shared.m_currentPhase == NoPhase)
- m_shared.m_phaseCondition.wait(m_shared.m_phaseLock);
- m_shared.m_numberOfActiveGCThreads++;
- return m_shared.m_currentPhase;
-}
-
-void GCThread::gcThreadMain()
-{
- GCPhase currentPhase;
-#if ENABLE(PARALLEL_GC)
- WTF::registerGCThread();
-#endif
- // Wait for the main thread to finish creating and initializing us. The main thread grabs this lock before
- // creating this thread. We aren't guaranteed to have a valid threadID until the main thread releases this lock.
- {
- MutexLocker locker(m_shared.m_phaseLock);
- }
- {
- ParallelModeEnabler enabler(*m_slotVisitor);
- while ((currentPhase = waitForNextPhase()) != Exit) {
- // Note: Each phase is responsible for its own termination conditions. The comments below describe
- // how each phase reaches termination.
- switch (currentPhase) {
- case Mark:
- m_slotVisitor->drainFromShared(SlotVisitor::SlaveDrain);
- // GCThreads only return from drainFromShared() if the main thread sets the m_parallelMarkersShouldExit
- // flag in the GCThreadSharedData. The only way the main thread sets that flag is if it realizes
- // that all of the various subphases in Heap::markRoots() have been fully finished and there is
- // no more marking work to do and all of the GCThreads are idle, meaning no more work can be generated.
- break;
- case Copy:
- // We don't have to call startCopying() because it's called for us on the main thread to avoid a
- // race condition.
- m_copyVisitor->copyFromShared();
- // We know we're done copying when we return from copyFromShared() because we would
- // only do so if there were no more chunks of copying work left to do. When there is no
- // more copying work to do, the main thread will wait in CopiedSpace::doneCopying() until
- // all of the blocks that the GCThreads borrowed have been returned. doneCopying()
- // returns our borrowed CopiedBlock, allowing the copying phase to finish.
- m_copyVisitor->doneCopying();
- break;
- case NoPhase:
- RELEASE_ASSERT_NOT_REACHED();
- break;
- case Exit:
- RELEASE_ASSERT_NOT_REACHED();
- break;
- }
- }
- }
-}
-
-void GCThread::gcThreadStartFunc(void* data)
-{
- GCThread* thread = static_cast<GCThread*>(data);
- thread->gcThreadMain();
-}
-
-} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/GCThreadSharedData.cpp b/Source/JavaScriptCore/heap/GCThreadSharedData.cpp
deleted file mode 100644
index b39ab5763..000000000
--- a/Source/JavaScriptCore/heap/GCThreadSharedData.cpp
+++ /dev/null
@@ -1,188 +0,0 @@
-/*
- * Copyright (C) 2009, 2011 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "GCThreadSharedData.h"
-
-#include "CopyVisitor.h"
-#include "CopyVisitorInlines.h"
-#include "GCThread.h"
-#include "VM.h"
-#include "MarkStack.h"
-#include "SlotVisitor.h"
-#include "SlotVisitorInlines.h"
-
-namespace JSC {
-
-#if ENABLE(PARALLEL_GC)
-void GCThreadSharedData::resetChildren()
-{
- for (size_t i = 0; i < m_gcThreads.size(); ++i)
- m_gcThreads[i]->slotVisitor()->reset();
-}
-
-size_t GCThreadSharedData::childVisitCount()
-{
- unsigned long result = 0;
- for (unsigned i = 0; i < m_gcThreads.size(); ++i)
- result += m_gcThreads[i]->slotVisitor()->visitCount();
- return result;
-}
-#endif
-
-GCThreadSharedData::GCThreadSharedData(VM* vm)
- : m_vm(vm)
- , m_copiedSpace(&vm->heap.m_storageSpace)
- , m_shouldHashCons(false)
- , m_sharedMarkStack(vm->heap.blockAllocator())
- , m_numberOfActiveParallelMarkers(0)
- , m_parallelMarkersShouldExit(false)
- , m_copyIndex(0)
- , m_numberOfActiveGCThreads(0)
- , m_gcThreadsShouldWait(false)
- , m_currentPhase(NoPhase)
-{
- m_copyLock.Init();
-#if ENABLE(PARALLEL_GC)
- // Grab the lock so the new GC threads can be properly initialized before they start running.
- MutexLocker locker(m_phaseLock);
- for (unsigned i = 1; i < Options::numberOfGCMarkers(); ++i) {
- m_numberOfActiveGCThreads++;
- SlotVisitor* slotVisitor = new SlotVisitor(*this);
- CopyVisitor* copyVisitor = new CopyVisitor(*this);
- GCThread* newThread = new GCThread(*this, slotVisitor, copyVisitor);
- ThreadIdentifier threadID = createThread(GCThread::gcThreadStartFunc, newThread, "JavaScriptCore::Marking");
- newThread->initializeThreadID(threadID);
- m_gcThreads.append(newThread);
- }
-
- // Wait for all the GCThreads to get to the right place.
- while (m_numberOfActiveGCThreads)
- m_activityCondition.wait(m_phaseLock);
-#endif
-}
-
-GCThreadSharedData::~GCThreadSharedData()
-{
-#if ENABLE(PARALLEL_GC)
- // Destroy our marking threads.
- {
- MutexLocker markingLocker(m_markingLock);
- MutexLocker phaseLocker(m_phaseLock);
- ASSERT(m_currentPhase == NoPhase);
- m_parallelMarkersShouldExit = true;
- m_gcThreadsShouldWait = false;
- m_currentPhase = Exit;
- m_phaseCondition.broadcast();
- }
- for (unsigned i = 0; i < m_gcThreads.size(); ++i) {
- waitForThreadCompletion(m_gcThreads[i]->threadID());
- delete m_gcThreads[i];
- }
-#endif
-}
-
-void GCThreadSharedData::reset()
-{
- ASSERT(m_sharedMarkStack.isEmpty());
-
-#if ENABLE(PARALLEL_GC)
- m_opaqueRoots.clear();
-#else
- ASSERT(m_opaqueRoots.isEmpty());
-#endif
- m_weakReferenceHarvesters.removeAll();
-
- if (m_shouldHashCons) {
- m_vm->resetNewStringsSinceLastHashCons();
- m_shouldHashCons = false;
- }
-}
-
-void GCThreadSharedData::startNextPhase(GCPhase phase)
-{
- MutexLocker phaseLocker(m_phaseLock);
- ASSERT(!m_gcThreadsShouldWait);
- ASSERT(m_currentPhase == NoPhase);
- m_gcThreadsShouldWait = true;
- m_currentPhase = phase;
- m_phaseCondition.broadcast();
-}
-
-void GCThreadSharedData::endCurrentPhase()
-{
- ASSERT(m_gcThreadsShouldWait);
- MutexLocker locker(m_phaseLock);
- m_currentPhase = NoPhase;
- m_gcThreadsShouldWait = false;
- m_phaseCondition.broadcast();
- while (m_numberOfActiveGCThreads)
- m_activityCondition.wait(m_phaseLock);
-}
-
-void GCThreadSharedData::didStartMarking()
-{
- MutexLocker markingLocker(m_markingLock);
- m_parallelMarkersShouldExit = false;
- startNextPhase(Mark);
-}
-
-void GCThreadSharedData::didFinishMarking()
-{
- {
- MutexLocker markingLocker(m_markingLock);
- m_parallelMarkersShouldExit = true;
- m_markingCondition.broadcast();
- }
-
- ASSERT(m_currentPhase == Mark);
- endCurrentPhase();
-}
-
-void GCThreadSharedData::didStartCopying()
-{
- {
- SpinLockHolder locker(&m_copyLock);
- WTF::copyToVector(m_copiedSpace->m_blockSet, m_blocksToCopy);
- m_copyIndex = 0;
- }
-
- // We do this here so that we avoid a race condition where the main thread can
- // blow through all of the copying work before the GCThreads fully wake up.
- // The GCThreads then request a block from the CopiedSpace when the copying phase
- // has completed, which isn't allowed.
- for (size_t i = 0; i < m_gcThreads.size(); i++)
- m_gcThreads[i]->copyVisitor()->startCopying();
-
- startNextPhase(Copy);
-}
-
-void GCThreadSharedData::didFinishCopying()
-{
- ASSERT(m_currentPhase == Copy);
- endCurrentPhase();
-}
-
-} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/GCThreadSharedData.h b/Source/JavaScriptCore/heap/GCThreadSharedData.h
deleted file mode 100644
index 47a53ebd8..000000000
--- a/Source/JavaScriptCore/heap/GCThreadSharedData.h
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Copyright (C) 2009, 2011 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef GCThreadSharedData_h
-#define GCThreadSharedData_h
-
-#include "ListableHandler.h"
-#include "MarkStack.h"
-#include "MarkedBlock.h"
-#include "UnconditionalFinalizer.h"
-#include "WeakReferenceHarvester.h"
-#include <wtf/HashSet.h>
-#include <wtf/TCSpinLock.h>
-#include <wtf/Threading.h>
-#include <wtf/Vector.h>
-
-namespace JSC {
-
-class GCThread;
-class VM;
-class CopiedSpace;
-class CopyVisitor;
-
-enum GCPhase {
- NoPhase,
- Mark,
- Copy,
- Exit
-};
-
-class GCThreadSharedData {
-public:
- GCThreadSharedData(VM*);
- ~GCThreadSharedData();
-
- void reset();
-
- void didStartMarking();
- void didFinishMarking();
- void didStartCopying();
- void didFinishCopying();
-
-#if ENABLE(PARALLEL_GC)
- void resetChildren();
- size_t childVisitCount();
- size_t childDupStrings();
-#endif
-
-private:
- friend class GCThread;
- friend class SlotVisitor;
- friend class CopyVisitor;
-
- void getNextBlocksToCopy(size_t&, size_t&);
- void startNextPhase(GCPhase);
- void endCurrentPhase();
-
- VM* m_vm;
- CopiedSpace* m_copiedSpace;
-
- bool m_shouldHashCons;
-
- Vector<GCThread*> m_gcThreads;
-
- Mutex m_markingLock;
- ThreadCondition m_markingCondition;
- MarkStackArray m_sharedMarkStack;
- unsigned m_numberOfActiveParallelMarkers;
- bool m_parallelMarkersShouldExit;
-
- Mutex m_opaqueRootsLock;
- HashSet<void*> m_opaqueRoots;
-
- SpinLock m_copyLock;
- Vector<CopiedBlock*> m_blocksToCopy;
- size_t m_copyIndex;
- static const size_t s_blockFragmentLength = 32;
-
- Mutex m_phaseLock;
- ThreadCondition m_phaseCondition;
- ThreadCondition m_activityCondition;
- unsigned m_numberOfActiveGCThreads;
- bool m_gcThreadsShouldWait;
- GCPhase m_currentPhase;
-
- ListableHandler<WeakReferenceHarvester>::List m_weakReferenceHarvesters;
- ListableHandler<UnconditionalFinalizer>::List m_unconditionalFinalizers;
-};
-
-inline void GCThreadSharedData::getNextBlocksToCopy(size_t& start, size_t& end)
-{
- SpinLockHolder locker(&m_copyLock);
- start = m_copyIndex;
- end = std::min(m_blocksToCopy.size(), m_copyIndex + s_blockFragmentLength);
- m_copyIndex = end;
-}
-
-} // namespace JSC
-
-#endif
diff --git a/Source/JavaScriptCore/heap/Handle.h b/Source/JavaScriptCore/heap/Handle.h
index 28ac30cd9..c924e041d 100644
--- a/Source/JavaScriptCore/heap/Handle.h
+++ b/Source/JavaScriptCore/heap/Handle.h
@@ -52,9 +52,7 @@ class HandleBase {
public:
bool operator!() const { return !m_slot || !*m_slot; }
- // This conversion operator allows implicit conversion to bool but not to other integer types.
- typedef JSValue (HandleBase::*UnspecifiedBoolType);
- operator UnspecifiedBoolType*() const { return (m_slot && *m_slot) ? reinterpret_cast<UnspecifiedBoolType*>(1) : 0; }
+ explicit operator bool() const { return m_slot && *m_slot; }
HandleSlot slot() const { return m_slot; }
diff --git a/Source/JavaScriptCore/heap/HandleBlock.h b/Source/JavaScriptCore/heap/HandleBlock.h
index 962d37c5e..ca2895695 100644
--- a/Source/JavaScriptCore/heap/HandleBlock.h
+++ b/Source/JavaScriptCore/heap/HandleBlock.h
@@ -26,17 +26,18 @@
#ifndef HandleBlock_h
#define HandleBlock_h
-#include "HeapBlock.h"
+#include <wtf/DoublyLinkedList.h>
namespace JSC {
-class DeadBlock;
class HandleSet;
class HandleNode;
-class HandleBlock : public HeapBlock<HandleBlock> {
+class HandleBlock : public DoublyLinkedListNode<HandleBlock> {
+ friend class WTF::DoublyLinkedListNode<HandleBlock>;
public:
- static HandleBlock* create(DeadBlock*, HandleSet*);
+ static HandleBlock* create(HandleSet*);
+ static void destroy(HandleBlock*);
static HandleBlock* blockFor(HandleNode*);
static const size_t blockSize = 4 * KB;
@@ -48,13 +49,15 @@ public:
unsigned nodeCapacity();
private:
- HandleBlock(Region*, HandleSet*);
+ HandleBlock(HandleSet*);
char* payload();
char* payloadEnd();
static const size_t s_blockMask = ~(blockSize - 1);
+ HandleBlock* m_prev;
+ HandleBlock* m_next;
HandleSet* m_handleSet;
};
diff --git a/Source/JavaScriptCore/heap/HandleBlockInlines.h b/Source/JavaScriptCore/heap/HandleBlockInlines.h
index 7c771935e..9e29bffd1 100644
--- a/Source/JavaScriptCore/heap/HandleBlockInlines.h
+++ b/Source/JavaScriptCore/heap/HandleBlockInlines.h
@@ -26,26 +26,31 @@
#ifndef HandleBlockInlines_h
#define HandleBlockInlines_h
-#include "BlockAllocator.h"
#include "HandleBlock.h"
+#include <wtf/FastMalloc.h>
namespace JSC {
-inline HandleBlock* HandleBlock::create(DeadBlock* block, HandleSet* handleSet)
+inline HandleBlock* HandleBlock::create(HandleSet* handleSet)
{
- Region* region = block->region();
- return new (NotNull, block) HandleBlock(region, handleSet);
+ return new (NotNull, fastAlignedMalloc(blockSize, blockSize)) HandleBlock(handleSet);
}
-inline HandleBlock::HandleBlock(Region* region, HandleSet* handleSet)
- : HeapBlock<HandleBlock>(region)
+inline void HandleBlock::destroy(HandleBlock* block)
+{
+ block->~HandleBlock();
+ fastAlignedFree(block);
+}
+
+inline HandleBlock::HandleBlock(HandleSet* handleSet)
+ : DoublyLinkedListNode<HandleBlock>()
, m_handleSet(handleSet)
{
}
inline char* HandleBlock::payloadEnd()
{
- return reinterpret_cast<char*>(this) + region()->blockSize();
+ return reinterpret_cast<char*>(this) + blockSize;
}
inline char* HandleBlock::payload()
diff --git a/Source/JavaScriptCore/heap/HandleSet.cpp b/Source/JavaScriptCore/heap/HandleSet.cpp
index fdb554448..dec8370eb 100644
--- a/Source/JavaScriptCore/heap/HandleSet.cpp
+++ b/Source/JavaScriptCore/heap/HandleSet.cpp
@@ -30,14 +30,13 @@
#include "HandleBlockInlines.h"
#include "HeapRootVisitor.h"
#include "JSObject.h"
-#include "Operations.h"
+#include "JSCInlines.h"
#include <wtf/DataLog.h>
namespace JSC {
HandleSet::HandleSet(VM* vm)
: m_vm(vm)
- , m_nextToFinalize(0)
{
grow();
}
@@ -45,12 +44,12 @@ HandleSet::HandleSet(VM* vm)
HandleSet::~HandleSet()
{
while (!m_blockList.isEmpty())
- m_vm->heap.blockAllocator().deallocate(HandleBlock::destroy(m_blockList.removeHead()));
+ HandleBlock::destroy(m_blockList.removeHead());
}
void HandleSet::grow()
{
- HandleBlock* newBlock = HandleBlock::create(m_vm->heap.blockAllocator().allocate<HandleBlock>(), this);
+ HandleBlock* newBlock = HandleBlock::create(this);
m_blockList.append(newBlock);
for (int i = newBlock->nodeCapacity() - 1; i >= 0; --i) {
@@ -73,10 +72,6 @@ void HandleSet::visitStrongHandles(HeapRootVisitor& heapRootVisitor)
void HandleSet::writeBarrier(HandleSlot slot, const JSValue& value)
{
- // Forbid assignment to handles during the finalization phase, since it would violate many GC invariants.
- // File a bug with stack trace if you hit this.
- RELEASE_ASSERT(!m_nextToFinalize);
-
if (!value == !*slot && slot->isCell() == value.isCell())
return;
diff --git a/Source/JavaScriptCore/heap/HandleSet.h b/Source/JavaScriptCore/heap/HandleSet.h
index 58251f66a..f9737882e 100644
--- a/Source/JavaScriptCore/heap/HandleSet.h
+++ b/Source/JavaScriptCore/heap/HandleSet.h
@@ -35,12 +35,10 @@
namespace JSC {
-class HandleBlock;
class HandleSet;
class HeapRootVisitor;
class VM;
class JSValue;
-class SlotVisitor;
class HandleNode {
public:
@@ -100,7 +98,6 @@ private:
SentinelLinkedList<Node> m_strongList;
SentinelLinkedList<Node> m_immediateList;
SinglyLinkedList<Node> m_freeList;
- Node* m_nextToFinalize;
};
inline HandleSet* HandleSet::heapFor(HandleSlot handle)
@@ -125,10 +122,6 @@ inline HandleSet::Node* HandleSet::toNode(HandleSlot handle)
inline HandleSlot HandleSet::allocate()
{
- // Forbid assignment to handles during the finalization phase, since it would violate many GC invariants.
- // File a bug with stack trace if you hit this.
- RELEASE_ASSERT(!m_nextToFinalize);
-
if (m_freeList.isEmpty())
grow();
@@ -141,11 +134,6 @@ inline HandleSlot HandleSet::allocate()
inline void HandleSet::deallocate(HandleSlot handle)
{
HandleSet::Node* node = toNode(handle);
- if (node == m_nextToFinalize) {
- ASSERT(m_nextToFinalize->next());
- m_nextToFinalize = m_nextToFinalize->next();
- }
-
SentinelLinkedList<HandleSet::Node>::remove(node);
m_freeList.push(node);
}
diff --git a/Source/JavaScriptCore/heap/HandleStack.cpp b/Source/JavaScriptCore/heap/HandleStack.cpp
index 41b2ada5f..178bbccf3 100644
--- a/Source/JavaScriptCore/heap/HandleStack.cpp
+++ b/Source/JavaScriptCore/heap/HandleStack.cpp
@@ -28,7 +28,7 @@
#include "HeapRootVisitor.h"
#include "JSObject.h"
-#include "Operations.h"
+#include "JSCInlines.h"
namespace JSC {
diff --git a/Source/JavaScriptCore/heap/HandleStack.h b/Source/JavaScriptCore/heap/HandleStack.h
index a7ce97650..8df8684ec 100644
--- a/Source/JavaScriptCore/heap/HandleStack.h
+++ b/Source/JavaScriptCore/heap/HandleStack.h
@@ -53,7 +53,7 @@ public:
void visit(HeapRootVisitor&);
private:
- void grow();
+ JS_EXPORT_PRIVATE void grow();
void zapTo(Frame&);
HandleSlot findFirstAfter(HandleSlot);
diff --git a/Source/JavaScriptCore/heap/Heap.cpp b/Source/JavaScriptCore/heap/Heap.cpp
index 35a9bf71f..10eaa0205 100644
--- a/Source/JavaScriptCore/heap/Heap.cpp
+++ b/Source/JavaScriptCore/heap/Heap.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2003-2009, 2011, 2013-2015 Apple Inc. All rights reserved.
* Copyright (C) 2007 Eric Seidel <eric@webkit.org>
*
* This library is free software; you can redistribute it and/or
@@ -26,33 +26,45 @@
#include "CopiedSpace.h"
#include "CopiedSpaceInlines.h"
#include "CopyVisitorInlines.h"
+#include "DFGWorklist.h"
+#include "EdenGCActivityCallback.h"
+#include "FullGCActivityCallback.h"
#include "GCActivityCallback.h"
+#include "GCIncomingRefCountedSetInlines.h"
+#include "HeapHelperPool.h"
+#include "HeapIterationScope.h"
#include "HeapRootVisitor.h"
#include "HeapStatistics.h"
+#include "HeapVerifier.h"
#include "IncrementalSweeper.h"
#include "Interpreter.h"
-#include "VM.h"
+#include "JSCInlines.h"
#include "JSGlobalObject.h"
#include "JSLock.h"
-#include "JSONObject.h"
-#include "Operations.h"
+#include "JSVirtualMachineInternal.h"
+#include "SamplingProfiler.h"
#include "Tracing.h"
+#include "TypeProfilerLog.h"
#include "UnlinkedCodeBlock.h"
+#include "VM.h"
#include "WeakSetInlines.h"
#include <algorithm>
-#include <wtf/RAMSize.h>
#include <wtf/CurrentTime.h>
+#include <wtf/ParallelVectorIterator.h>
+#include <wtf/ProcessID.h>
+#include <wtf/RAMSize.h>
using namespace std;
-using namespace JSC;
namespace JSC {
-namespace {
+namespace {
static const size_t largeHeapSize = 32 * MB; // About 1.5X the average webpage.
static const size_t smallHeapSize = 1 * MB; // Matches the FastMalloc per-thread cache.
+#define ENABLE_GC_LOGGING 0
+
#if ENABLE(GC_LOGGING)
#if COMPILER(CLANG)
#define DEFINE_GC_LOGGING_GLOBAL(type, name, arguments) \
@@ -68,82 +80,129 @@ static type name arguments;
struct GCTimer {
GCTimer(const char* name)
- : m_time(0)
- , m_min(100000000)
- , m_max(0)
- , m_count(0)
- , m_name(name)
+ : name(name)
{
}
~GCTimer()
{
- dataLogF("%s: %.2lfms (avg. %.2lf, min. %.2lf, max. %.2lf)\n", m_name, m_time * 1000, m_time * 1000 / m_count, m_min*1000, m_max*1000);
+ logData(allCollectionData, "(All)");
+ logData(edenCollectionData, "(Eden)");
+ logData(fullCollectionData, "(Full)");
+ }
+
+ struct TimeRecord {
+ TimeRecord()
+ : time(0)
+ , min(std::numeric_limits<double>::infinity())
+ , max(0)
+ , count(0)
+ {
+ }
+
+ double time;
+ double min;
+ double max;
+ size_t count;
+ };
+
+ void logData(const TimeRecord& data, const char* extra)
+ {
+ dataLogF("[%d] %s (Parent: %s) %s: %.2lfms (avg. %.2lf, min. %.2lf, max. %.2lf, count %lu)\n",
+ getCurrentProcessID(),
+ name,
+ parent ? parent->name : "nullptr",
+ extra,
+ data.time * 1000,
+ data.time * 1000 / data.count,
+ data.min * 1000,
+ data.max * 1000,
+ data.count);
}
- double m_time;
- double m_min;
- double m_max;
- size_t m_count;
- const char* m_name;
+
+ void updateData(TimeRecord& data, double duration)
+ {
+ if (duration < data.min)
+ data.min = duration;
+ if (duration > data.max)
+ data.max = duration;
+ data.count++;
+ data.time += duration;
+ }
+
+ void didFinishPhase(HeapOperation collectionType, double duration)
+ {
+ TimeRecord& data = collectionType == EdenCollection ? edenCollectionData : fullCollectionData;
+ updateData(data, duration);
+ updateData(allCollectionData, duration);
+ }
+
+ static GCTimer* s_currentGlobalTimer;
+
+ TimeRecord allCollectionData;
+ TimeRecord fullCollectionData;
+ TimeRecord edenCollectionData;
+ const char* name;
+ GCTimer* parent { nullptr };
};
+GCTimer* GCTimer::s_currentGlobalTimer = nullptr;
+
struct GCTimerScope {
- GCTimerScope(GCTimer* timer)
- : m_timer(timer)
- , m_start(WTF::currentTime())
+ GCTimerScope(GCTimer& timer, HeapOperation collectionType)
+ : timer(timer)
+ , start(WTF::monotonicallyIncreasingTime())
+ , collectionType(collectionType)
{
+ timer.parent = GCTimer::s_currentGlobalTimer;
+ GCTimer::s_currentGlobalTimer = &timer;
}
~GCTimerScope()
{
- double delta = WTF::currentTime() - m_start;
- if (delta < m_timer->m_min)
- m_timer->m_min = delta;
- if (delta > m_timer->m_max)
- m_timer->m_max = delta;
- m_timer->m_count++;
- m_timer->m_time += delta;
- }
- GCTimer* m_timer;
- double m_start;
+ double delta = WTF::monotonicallyIncreasingTime() - start;
+ timer.didFinishPhase(collectionType, delta);
+ GCTimer::s_currentGlobalTimer = timer.parent;
+ }
+ GCTimer& timer;
+ double start;
+ HeapOperation collectionType;
};
struct GCCounter {
GCCounter(const char* name)
- : m_name(name)
- , m_count(0)
- , m_total(0)
- , m_min(10000000)
- , m_max(0)
+ : name(name)
+ , count(0)
+ , total(0)
+ , min(10000000)
+ , max(0)
{
}
- void count(size_t amount)
+ void add(size_t amount)
{
- m_count++;
- m_total += amount;
- if (amount < m_min)
- m_min = amount;
- if (amount > m_max)
- m_max = amount;
+ count++;
+ total += amount;
+ if (amount < min)
+ min = amount;
+ if (amount > max)
+ max = amount;
}
~GCCounter()
{
- dataLogF("%s: %zu values (avg. %zu, min. %zu, max. %zu)\n", m_name, m_total, m_total / m_count, m_min, m_max);
+ dataLogF("[%d] %s: %zu values (avg. %zu, min. %zu, max. %zu)\n", getCurrentProcessID(), name, total, total / count, min, max);
}
- const char* m_name;
- size_t m_count;
- size_t m_total;
- size_t m_min;
- size_t m_max;
+ const char* name;
+ size_t count;
+ size_t total;
+ size_t min;
+ size_t max;
};
-#define GCPHASE(name) DEFINE_GC_LOGGING_GLOBAL(GCTimer, name##Timer, (#name)); GCTimerScope name##TimerScope(&name##Timer)
-#define COND_GCPHASE(cond, name1, name2) DEFINE_GC_LOGGING_GLOBAL(GCTimer, name1##Timer, (#name1)); DEFINE_GC_LOGGING_GLOBAL(GCTimer, name2##Timer, (#name2)); GCTimerScope name1##CondTimerScope(cond ? &name1##Timer : &name2##Timer)
-#define GCCOUNTER(name, value) do { DEFINE_GC_LOGGING_GLOBAL(GCCounter, name##Counter, (#name)); name##Counter.count(value); } while (false)
+#define GCPHASE(name) DEFINE_GC_LOGGING_GLOBAL(GCTimer, name##Timer, (#name)); GCTimerScope name##TimerScope(name##Timer, m_operationInProgress)
+#define GCCOUNTER(name, value) do { DEFINE_GC_LOGGING_GLOBAL(GCCounter, name##Counter, (#name)); name##Counter.add(value); } while (false)
#else
#define GCPHASE(name) do { } while (false)
-#define COND_GCPHASE(cond, name1, name2) do { } while (false)
#define GCCOUNTER(name, value) do { } while (false)
#endif
@@ -166,12 +225,12 @@ static inline size_t proportionalHeapSize(size_t heapSize, size_t ramSize)
static inline bool isValidSharedInstanceThreadState(VM* vm)
{
- return vm->apiLock().currentThreadIsHoldingLock();
+ return vm->currentThreadIsHoldingAPILock();
}
static inline bool isValidThreadState(VM* vm)
{
- if (vm->identifierTable != wtfThreadData().currentIdentifierTable())
+ if (vm->atomicStringTable() != wtfThreadData().atomicStringTable())
return false;
if (vm->isSharedInstance() && !isValidSharedInstanceThreadState(vm))
@@ -181,12 +240,17 @@ static inline bool isValidThreadState(VM* vm)
}
struct MarkObject : public MarkedBlock::VoidFunctor {
- void operator()(JSCell* cell)
+ inline void visit(JSCell* cell)
{
if (cell->isZapped())
return;
Heap::heap(cell)->setMarked(cell);
}
+ IterationStatus operator()(JSCell* cell)
+ {
+ visit(cell);
+ return IterationStatus::Continue;
+ }
};
struct Count : public MarkedBlock::CountFunctor {
@@ -194,30 +258,36 @@ struct Count : public MarkedBlock::CountFunctor {
};
struct CountIfGlobalObject : MarkedBlock::CountFunctor {
- void operator()(JSCell* cell) {
+ inline void visit(JSCell* cell)
+ {
if (!cell->isObject())
return;
if (!asObject(cell)->isGlobalObject())
return;
count(1);
}
+ IterationStatus operator()(JSCell* cell)
+ {
+ visit(cell);
+ return IterationStatus::Continue;
+ }
};
class RecordType {
public:
- typedef PassOwnPtr<TypeCountSet> ReturnType;
+ typedef std::unique_ptr<TypeCountSet> ReturnType;
RecordType();
- void operator()(JSCell*);
+ IterationStatus operator()(JSCell*);
ReturnType returnValue();
private:
const char* typeName(JSCell*);
- OwnPtr<TypeCountSet> m_typeCountSet;
+ std::unique_ptr<TypeCountSet> m_typeCountSet;
};
inline RecordType::RecordType()
- : m_typeCountSet(adoptPtr(new TypeCountSet))
+ : m_typeCountSet(std::make_unique<TypeCountSet>())
{
}
@@ -229,47 +299,72 @@ inline const char* RecordType::typeName(JSCell* cell)
return info->className;
}
-inline void RecordType::operator()(JSCell* cell)
+inline IterationStatus RecordType::operator()(JSCell* cell)
{
m_typeCountSet->add(typeName(cell));
+ return IterationStatus::Continue;
}
-inline PassOwnPtr<TypeCountSet> RecordType::returnValue()
+inline std::unique_ptr<TypeCountSet> RecordType::returnValue()
{
- return m_typeCountSet.release();
+ return WTFMove(m_typeCountSet);
}
} // anonymous namespace
Heap::Heap(VM* vm, HeapType heapType)
: m_heapType(heapType)
- , m_ramSize(ramSize())
+ , m_ramSize(Options::forceRAMSize() ? Options::forceRAMSize() : ramSize())
, m_minBytesPerCycle(minHeapSize(m_heapType, m_ramSize))
, m_sizeAfterLastCollect(0)
- , m_bytesAllocatedLimit(m_minBytesPerCycle)
- , m_bytesAllocated(0)
- , m_bytesAbandoned(0)
+ , m_sizeAfterLastFullCollect(0)
+ , m_sizeBeforeLastFullCollect(0)
+ , m_sizeAfterLastEdenCollect(0)
+ , m_sizeBeforeLastEdenCollect(0)
+ , m_bytesAllocatedThisCycle(0)
+ , m_bytesAbandonedSinceLastFullCollect(0)
+ , m_maxEdenSize(m_minBytesPerCycle)
+ , m_maxHeapSize(m_minBytesPerCycle)
+ , m_shouldDoFullCollection(false)
+ , m_totalBytesVisited(0)
+ , m_totalBytesCopied(0)
, m_operationInProgress(NoOperation)
- , m_blockAllocator()
, m_objectSpace(this)
, m_storageSpace(this)
+ , m_extraMemorySize(0)
+ , m_deprecatedExtraMemorySize(0)
, m_machineThreads(this)
- , m_sharedData(vm)
- , m_slotVisitor(m_sharedData)
- , m_copyVisitor(m_sharedData)
+ , m_slotVisitor(*this)
, m_handleSet(vm)
, m_isSafeToCollect(false)
+ , m_writeBarrierBuffer(256)
, m_vm(vm)
- , m_lastGCLength(0)
- , m_lastCodeDiscardTime(WTF::currentTime())
- , m_activityCallback(DefaultGCActivityCallback::create(this))
- , m_sweeper(IncrementalSweeper::create(this))
+ // We seed with 10ms so that GCActivityCallback::didAllocate doesn't continuously
+ // schedule the timer if we've never done a collection.
+ , m_lastFullGCLength(0.01)
+ , m_lastEdenGCLength(0.01)
+ , m_fullActivityCallback(GCActivityCallback::createFullTimer(this))
+ , m_edenActivityCallback(GCActivityCallback::createEdenTimer(this))
+#if USE(CF)
+ , m_sweeper(std::make_unique<IncrementalSweeper>(this, CFRunLoopGetCurrent()))
+#else
+ , m_sweeper(std::make_unique<IncrementalSweeper>(this))
+#endif
+ , m_deferralDepth(0)
+#if USE(CF)
+ , m_delayedReleaseRecursionCount(0)
+#endif
+ , m_helperClient(&heapHelperPool())
{
m_storageSpace.init();
+ if (Options::verifyHeap())
+ m_verifier = std::make_unique<HeapVerifier>(this, Options::numberOfGCCyclesToRecordForVerification());
}
Heap::~Heap()
{
+ for (WeakBlock* block : m_logicallyEmptyWeakBlocks)
+ WeakBlock::destroy(*this, block);
}
bool Heap::isPagedOut(double deadline)
@@ -281,40 +376,61 @@ bool Heap::isPagedOut(double deadline)
// Run all pending finalizers now because we won't get another chance.
void Heap::lastChanceToFinalize()
{
- RELEASE_ASSERT(!m_vm->dynamicGlobalObject);
+ RELEASE_ASSERT(!m_vm->entryScope);
RELEASE_ASSERT(m_operationInProgress == NoOperation);
+ m_codeBlocks.lastChanceToFinalize();
m_objectSpace.lastChanceToFinalize();
+ releaseDelayedReleasedObjects();
+
+ sweepAllLogicallyEmptyWeakBlocks();
+}
+
+void Heap::releaseDelayedReleasedObjects()
+{
+#if USE(CF)
+ // We need to guard against the case that releasing an object can create more objects due to the
+ // release calling into JS. When those JS call(s) exit and all locks are being dropped we end up
+ // back here and could try to recursively release objects. We guard that with a recursive entry
+ // count. Only the initial call will release objects, recursive calls simple return and let the
+ // the initial call to the function take care of any objects created during release time.
+ // This also means that we need to loop until there are no objects in m_delayedReleaseObjects
+ // and use a temp Vector for the actual releasing.
+ if (!m_delayedReleaseRecursionCount++) {
+ while (!m_delayedReleaseObjects.isEmpty()) {
+ ASSERT(m_vm->currentThreadIsHoldingAPILock());
+
+ Vector<RetainPtr<CFTypeRef>> objectsToRelease = WTFMove(m_delayedReleaseObjects);
-#if ENABLE(SIMPLE_HEAP_PROFILING)
- m_slotVisitor.m_visitedTypeCounts.dump(WTF::dataFile(), "Visited Type Counts");
- m_destroyedTypeCounts.dump(WTF::dataFile(), "Destroyed Type Counts");
+ {
+ // We need to drop locks before calling out to arbitrary code.
+ JSLock::DropAllLocks dropAllLocks(m_vm);
+
+ objectsToRelease.clear();
+ }
+ }
+ }
+ m_delayedReleaseRecursionCount--;
#endif
}
-void Heap::reportExtraMemoryCostSlowCase(size_t cost)
+void Heap::reportExtraMemoryAllocatedSlowCase(size_t size)
{
- // Our frequency of garbage collection tries to balance memory use against speed
- // by collecting based on the number of newly created values. However, for values
- // that hold on to a great deal of memory that's not in the form of other JS values,
- // that is not good enough - in some cases a lot of those objects can pile up and
- // use crazy amounts of memory without a GC happening. So we track these extra
- // memory costs. Only unusually large objects are noted, and we only keep track
- // of this extra cost until the next GC. In garbage collected languages, most values
- // are either very short lived temporaries, or have extremely long lifetimes. So
- // if a large value survives one garbage collection, there is not much point to
- // collecting more frequently as long as it stays alive.
+ didAllocate(size);
+ collectIfNecessaryOrDefer();
+}
- didAllocate(cost);
- if (shouldCollect())
- collect(DoNotSweep);
+void Heap::deprecatedReportExtraMemorySlowCase(size_t size)
+{
+ m_deprecatedExtraMemorySize += size;
+ reportExtraMemoryAllocatedSlowCase(size);
}
void Heap::reportAbandonedObjectGraph()
{
// Our clients don't know exactly how much memory they
// are abandoning so we just guess for them.
- double abandonedBytes = 0.10 * m_sizeAfterLastCollect;
+ double abandonedBytes = 0.1 * m_sizeAfterLastCollect;
// We want to accelerate the next collection. Because memory has just
// been abandoned, the next collection has the potential to
@@ -325,14 +441,17 @@ void Heap::reportAbandonedObjectGraph()
void Heap::didAbandon(size_t bytes)
{
- m_activityCallback->didAllocate(m_bytesAllocated + m_bytesAbandoned);
- m_bytesAbandoned += bytes;
+ if (m_fullActivityCallback) {
+ m_fullActivityCallback->didAllocate(
+ m_sizeAfterLastCollect - m_sizeAfterLastFullCollect + m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect);
+ }
+ m_bytesAbandonedSinceLastFullCollect += bytes;
}
void Heap::protect(JSValue k)
{
ASSERT(k);
- ASSERT(m_vm->apiLock().currentThreadIsHoldingLock());
+ ASSERT(m_vm->currentThreadIsHoldingAPILock());
if (!k.isCell())
return;
@@ -343,7 +462,7 @@ void Heap::protect(JSValue k)
bool Heap::unprotect(JSValue k)
{
ASSERT(k);
- ASSERT(m_vm->apiLock().currentThreadIsHoldingLock());
+ ASSERT(m_vm->currentThreadIsHoldingAPILock());
if (!k.isCell())
return false;
@@ -351,42 +470,11 @@ bool Heap::unprotect(JSValue k)
return m_protectedValues.remove(k.asCell());
}
-void Heap::jettisonDFGCodeBlock(PassOwnPtr<CodeBlock> codeBlock)
+void Heap::addReference(JSCell* cell, ArrayBuffer* buffer)
{
- m_dfgCodeBlocks.jettison(codeBlock);
-}
-
-void Heap::markProtectedObjects(HeapRootVisitor& heapRootVisitor)
-{
- ProtectCountSet::iterator end = m_protectedValues.end();
- for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it)
- heapRootVisitor.visit(&it->key);
-}
-
-void Heap::pushTempSortVector(Vector<ValueStringPair, 0, UnsafeVectorOverflow>* tempVector)
-{
- m_tempSortingVectors.append(tempVector);
-}
-
-void Heap::popTempSortVector(Vector<ValueStringPair, 0, UnsafeVectorOverflow>* tempVector)
-{
- ASSERT_UNUSED(tempVector, tempVector == m_tempSortingVectors.last());
- m_tempSortingVectors.removeLast();
-}
-
-void Heap::markTempSortVectors(HeapRootVisitor& heapRootVisitor)
-{
- typedef Vector<Vector<ValueStringPair, 0, UnsafeVectorOverflow>* > VectorOfValueStringVectors;
-
- VectorOfValueStringVectors::iterator end = m_tempSortingVectors.end();
- for (VectorOfValueStringVectors::iterator it = m_tempSortingVectors.begin(); it != end; ++it) {
- Vector<ValueStringPair, 0, UnsafeVectorOverflow>* tempSortingVector = *it;
-
- Vector<ValueStringPair>::iterator vectorEnd = tempSortingVector->end();
- for (Vector<ValueStringPair>::iterator vectorIt = tempSortingVector->begin(); vectorIt != vectorEnd; ++vectorIt) {
- if (vectorIt->first)
- heapRootVisitor.visit(&vectorIt->first);
- }
+ if (m_arrayBuffers.addReference(cell, buffer)) {
+ collectIfNecessaryOrDefer();
+ didAllocate(buffer->gcSizeEstimateInBytes());
}
}
@@ -397,6 +485,7 @@ void Heap::harvestWeakReferences()
void Heap::finalizeUnconditionalFinalizers()
{
+ GCPHASE(FinalizeUnconditionalFinalizers);
m_slotVisitor.finalizeUnconditionalFinalizers();
}
@@ -405,213 +494,423 @@ inline JSStack& Heap::stack()
return m_vm->interpreter->stack();
}
-void Heap::canonicalizeCellLivenessData()
+void Heap::willStartIterating()
{
- m_objectSpace.canonicalizeCellLivenessData();
+ m_objectSpace.willStartIterating();
}
-void Heap::getConservativeRegisterRoots(HashSet<JSCell*>& roots)
+void Heap::didFinishIterating()
{
- ASSERT(isValidThreadState(m_vm));
- ConservativeRoots stackRoots(&m_objectSpace.blocks(), &m_storageSpace);
- stack().gatherConservativeRoots(stackRoots);
- size_t stackRootCount = stackRoots.size();
- JSCell** registerRoots = stackRoots.roots();
- for (size_t i = 0; i < stackRootCount; i++) {
- setMarked(registerRoots[i]);
- roots.add(registerRoots[i]);
- }
+ m_objectSpace.didFinishIterating();
}
-void Heap::markRoots()
+void Heap::completeAllDFGPlans()
{
- SamplingRegion samplingRegion("Garbage Collection: Tracing");
+#if ENABLE(DFG_JIT)
+ DFG::completeAllPlansForVM(*m_vm);
+#endif
+}
+
+void Heap::markRoots(double gcStartTime, void* stackOrigin, void* stackTop, MachineThreads::RegisterState& calleeSavedRegisters)
+{
+ SamplingRegion samplingRegion("Garbage Collection: Marking");
GCPHASE(MarkRoots);
ASSERT(isValidThreadState(m_vm));
-#if ENABLE(OBJECT_MARK_LOGGING)
- double gcStartTime = WTF::currentTime();
-#endif
-
- void* dummy;
-
// We gather conservative roots before clearing mark bits because conservative
// gathering uses the mark bits to determine whether a reference is valid.
- ConservativeRoots machineThreadRoots(&m_objectSpace.blocks(), &m_storageSpace);
- m_jitStubRoutines.clearMarks();
- {
- GCPHASE(GatherConservativeRoots);
- m_machineThreads.gatherConservativeRoots(machineThreadRoots, &dummy);
- }
-
- ConservativeRoots stackRoots(&m_objectSpace.blocks(), &m_storageSpace);
- m_dfgCodeBlocks.clearMarks();
- {
- GCPHASE(GatherStackRoots);
- stack().gatherConservativeRoots(
- stackRoots, m_jitStubRoutines, m_dfgCodeBlocks);
- }
+ ConservativeRoots conservativeRoots(&m_objectSpace.blocks(), &m_storageSpace);
+ gatherStackRoots(conservativeRoots, stackOrigin, stackTop, calleeSavedRegisters);
+ gatherJSStackRoots(conservativeRoots);
+ gatherScratchBufferRoots(conservativeRoots);
#if ENABLE(DFG_JIT)
- ConservativeRoots scratchBufferRoots(&m_objectSpace.blocks(), &m_storageSpace);
- {
- GCPHASE(GatherScratchBufferRoots);
- m_vm->gatherConservativeRoots(scratchBufferRoots);
- }
+ DFG::rememberCodeBlocks(*m_vm);
#endif
- {
- GCPHASE(clearMarks);
- m_objectSpace.clearMarks();
+#if ENABLE(SAMPLING_PROFILER)
+ if (SamplingProfiler* samplingProfiler = m_vm->samplingProfiler()) {
+ // Note that we need to own the lock from now until we're done
+ // marking the SamplingProfiler's data because once we verify the
+ // SamplingProfiler's stack traces, we don't want it to accumulate
+ // more stack traces before we get the chance to mark it.
+ // This lock is released inside visitSamplingProfiler().
+ samplingProfiler->getLock().lock();
+ samplingProfiler->processUnverifiedStackTraces();
}
+#endif // ENABLE(SAMPLING_PROFILER)
- m_sharedData.didStartMarking();
- SlotVisitor& visitor = m_slotVisitor;
- visitor.setup();
- HeapRootVisitor heapRootVisitor(visitor);
+ if (m_operationInProgress == FullCollection) {
+ m_opaqueRoots.clear();
+ m_slotVisitor.clearMarkStack();
+ }
- {
- ParallelModeEnabler enabler(visitor);
+ clearLivenessData();
- if (m_vm->codeBlocksBeingCompiled.size()) {
- GCPHASE(VisitActiveCodeBlock);
- for (size_t i = 0; i < m_vm->codeBlocksBeingCompiled.size(); i++)
- m_vm->codeBlocksBeingCompiled[i]->visitAggregate(visitor);
- }
+ m_parallelMarkersShouldExit = false;
- m_vm->smallStrings.visitStrongReferences(visitor);
+ m_helperClient.setFunction(
+ [this] () {
+ SlotVisitor* slotVisitor;
+ {
+ LockHolder locker(m_parallelSlotVisitorLock);
+ if (m_availableParallelSlotVisitors.isEmpty()) {
+ std::unique_ptr<SlotVisitor> newVisitor =
+ std::make_unique<SlotVisitor>(*this);
+ slotVisitor = newVisitor.get();
+ m_parallelSlotVisitors.append(WTFMove(newVisitor));
+ } else
+ slotVisitor = m_availableParallelSlotVisitors.takeLast();
+ }
- {
- GCPHASE(VisitMachineRoots);
- MARK_LOG_ROOT(visitor, "C++ Stack");
- visitor.append(machineThreadRoots);
- visitor.donateAndDrain();
- }
- {
- GCPHASE(VisitStackRoots);
- MARK_LOG_ROOT(visitor, "Stack");
- visitor.append(stackRoots);
- visitor.donateAndDrain();
- }
-#if ENABLE(DFG_JIT)
- {
- GCPHASE(VisitScratchBufferRoots);
- MARK_LOG_ROOT(visitor, "Scratch Buffers");
- visitor.append(scratchBufferRoots);
- visitor.donateAndDrain();
- }
-#endif
- {
- GCPHASE(VisitProtectedObjects);
- MARK_LOG_ROOT(visitor, "Protected Objects");
- markProtectedObjects(heapRootVisitor);
- visitor.donateAndDrain();
- }
- {
- GCPHASE(VisitTempSortVectors);
- MARK_LOG_ROOT(visitor, "Temp Sort Vectors");
- markTempSortVectors(heapRootVisitor);
- visitor.donateAndDrain();
- }
+ WTF::registerGCThread();
- {
- GCPHASE(MarkingArgumentBuffers);
- if (m_markListSet && m_markListSet->size()) {
- MARK_LOG_ROOT(visitor, "Argument Buffers");
- MarkedArgumentBuffer::markLists(heapRootVisitor, *m_markListSet);
- visitor.donateAndDrain();
+ {
+ ParallelModeEnabler parallelModeEnabler(*slotVisitor);
+ slotVisitor->didStartMarking();
+ slotVisitor->drainFromShared(SlotVisitor::SlaveDrain);
}
- }
- if (m_vm->exception) {
- GCPHASE(MarkingException);
- MARK_LOG_ROOT(visitor, "Exceptions");
- heapRootVisitor.visit(&m_vm->exception);
- visitor.donateAndDrain();
- }
-
- {
- GCPHASE(VisitStrongHandles);
- MARK_LOG_ROOT(visitor, "Strong Handles");
- m_handleSet.visitStrongHandles(heapRootVisitor);
- visitor.donateAndDrain();
- }
-
- {
- GCPHASE(HandleStack);
- MARK_LOG_ROOT(visitor, "Handle Stack");
- m_handleStack.visit(heapRootVisitor);
- visitor.donateAndDrain();
- }
-
- {
- GCPHASE(TraceCodeBlocksAndJITStubRoutines);
- MARK_LOG_ROOT(visitor, "Trace Code Blocks and JIT Stub Routines");
- m_dfgCodeBlocks.traceMarkedCodeBlocks(visitor);
- m_jitStubRoutines.traceMarkedStubRoutines(visitor);
- visitor.donateAndDrain();
- }
+
+ {
+ LockHolder locker(m_parallelSlotVisitorLock);
+ m_availableParallelSlotVisitors.append(slotVisitor);
+ }
+ });
+
+ m_slotVisitor.didStartMarking();
-#if ENABLE(PARALLEL_GC)
- {
- GCPHASE(Convergence);
- visitor.drainFromShared(SlotVisitor::MasterDrain);
- }
-#endif
+ HeapRootVisitor heapRootVisitor(m_slotVisitor);
+
+ {
+ ParallelModeEnabler enabler(m_slotVisitor);
+
+ m_slotVisitor.donateAndDrain();
+ visitExternalRememberedSet();
+ visitSmallStrings();
+ visitConservativeRoots(conservativeRoots);
+ visitProtectedObjects(heapRootVisitor);
+ visitArgumentBuffers(heapRootVisitor);
+ visitException(heapRootVisitor);
+ visitStrongHandles(heapRootVisitor);
+ visitHandleStack(heapRootVisitor);
+ visitSamplingProfiler();
+ traceCodeBlocksAndJITStubRoutines();
+ converge();
}
// Weak references must be marked last because their liveness depends on
// the liveness of the rest of the object graph.
+ visitWeakHandles(heapRootVisitor);
+
{
- GCPHASE(VisitingLiveWeakHandles);
- MARK_LOG_ROOT(visitor, "Live Weak Handles");
- while (true) {
- m_objectSpace.visitWeakSets(heapRootVisitor);
- harvestWeakReferences();
- if (visitor.isEmpty())
- break;
- {
- ParallelModeEnabler enabler(visitor);
- visitor.donateAndDrain();
-#if ENABLE(PARALLEL_GC)
- visitor.drainFromShared(SlotVisitor::MasterDrain);
-#endif
- }
+ std::lock_guard<Lock> lock(m_markingMutex);
+ m_parallelMarkersShouldExit = true;
+ m_markingConditionVariable.notifyAll();
+ }
+ m_helperClient.finish();
+ updateObjectCounts(gcStartTime);
+ resetVisitors();
+}
+
+void Heap::copyBackingStores()
+{
+ GCPHASE(CopyBackingStores);
+ if (m_operationInProgress == EdenCollection)
+ m_storageSpace.startedCopying<EdenCollection>();
+ else {
+ ASSERT(m_operationInProgress == FullCollection);
+ m_storageSpace.startedCopying<FullCollection>();
+ }
+
+ if (m_storageSpace.shouldDoCopyPhase()) {
+ if (m_operationInProgress == EdenCollection) {
+ // Reset the vector to be empty, but don't throw away the backing store.
+ m_blocksToCopy.shrink(0);
+ for (CopiedBlock* block = m_storageSpace.m_newGen.fromSpace->head(); block; block = block->next())
+ m_blocksToCopy.append(block);
+ } else {
+ ASSERT(m_operationInProgress == FullCollection);
+ WTF::copyToVector(m_storageSpace.m_blockSet, m_blocksToCopy);
}
+
+ ParallelVectorIterator<Vector<CopiedBlock*>> iterator(
+ m_blocksToCopy, s_blockFragmentLength);
+
+ // Note that it's safe to use the [&] capture list here, even though we're creating a task
+ // that other threads run. That's because after runFunctionInParallel() returns, the task
+ // we have created is not going to be running anymore. Hence, everything on the stack here
+ // outlives the task.
+ m_helperClient.runFunctionInParallel(
+ [&] () {
+ CopyVisitor copyVisitor(*this);
+
+ iterator.iterate(
+ [&] (CopiedBlock* block) {
+ if (!block->hasWorkList())
+ return;
+
+ CopyWorkList& workList = block->workList();
+ for (CopyWorklistItem item : workList) {
+ if (item.token() == ButterflyCopyToken) {
+ JSObject::copyBackingStore(
+ item.cell(), copyVisitor, ButterflyCopyToken);
+ continue;
+ }
+
+ item.cell()->methodTable()->copyBackingStore(
+ item.cell(), copyVisitor, item.token());
+ }
+
+ ASSERT(!block->liveBytes());
+ m_storageSpace.recycleEvacuatedBlock(block, m_operationInProgress);
+ });
+ });
}
+
+ m_storageSpace.doneCopying();
+}
- GCCOUNTER(VisitedValueCount, visitor.visitCount());
+void Heap::gatherStackRoots(ConservativeRoots& roots, void* stackOrigin, void* stackTop, MachineThreads::RegisterState& calleeSavedRegisters)
+{
+ GCPHASE(GatherStackRoots);
+ m_jitStubRoutines.clearMarks();
+ m_machineThreads.gatherConservativeRoots(roots, m_jitStubRoutines, m_codeBlocks, stackOrigin, stackTop, calleeSavedRegisters);
+}
- m_sharedData.didFinishMarking();
-#if ENABLE(OBJECT_MARK_LOGGING)
- size_t visitCount = visitor.visitCount();
-#if ENABLE(PARALLEL_GC)
- visitCount += m_sharedData.childVisitCount();
+void Heap::gatherJSStackRoots(ConservativeRoots& roots)
+{
+#if !ENABLE(JIT)
+ GCPHASE(GatherJSStackRoots);
+ stack().gatherConservativeRoots(roots, m_jitStubRoutines, m_codeBlocks);
+#else
+ UNUSED_PARAM(roots);
#endif
- MARK_LOG_MESSAGE2("\nNumber of live Objects after full GC %lu, took %.6f secs\n", visitCount, WTF::currentTime() - gcStartTime);
+}
+
+void Heap::gatherScratchBufferRoots(ConservativeRoots& roots)
+{
+#if ENABLE(DFG_JIT)
+ GCPHASE(GatherScratchBufferRoots);
+ m_vm->gatherConservativeRoots(roots);
+#else
+ UNUSED_PARAM(roots);
#endif
+}
+
+void Heap::clearLivenessData()
+{
+ GCPHASE(ClearLivenessData);
+ if (m_operationInProgress == FullCollection)
+ m_codeBlocks.clearMarksForFullCollection();
+
+ m_objectSpace.clearNewlyAllocated();
+ m_objectSpace.clearMarks();
+}
- visitor.reset();
-#if ENABLE(PARALLEL_GC)
- m_sharedData.resetChildren();
+void Heap::visitExternalRememberedSet()
+{
+#if JSC_OBJC_API_ENABLED
+ scanExternalRememberedSet(*m_vm, m_slotVisitor);
#endif
- m_sharedData.reset();
}
-void Heap::copyBackingStores()
+void Heap::visitSmallStrings()
{
- m_storageSpace.startedCopying();
- if (m_storageSpace.shouldDoCopyPhase()) {
- m_sharedData.didStartCopying();
- m_copyVisitor.startCopying();
- m_copyVisitor.copyFromShared();
- m_copyVisitor.doneCopying();
- // We need to wait for everybody to finish and return their CopiedBlocks
- // before signaling that the phase is complete.
- m_storageSpace.doneCopying();
- m_sharedData.didFinishCopying();
- } else
- m_storageSpace.doneCopying();
+ GCPHASE(VisitSmallStrings);
+ if (!m_vm->smallStrings.needsToBeVisited(m_operationInProgress))
+ return;
+
+ m_vm->smallStrings.visitStrongReferences(m_slotVisitor);
+ if (Options::logGC() == GCLogging::Verbose)
+ dataLog("Small strings:\n", m_slotVisitor);
+ m_slotVisitor.donateAndDrain();
+}
+
+void Heap::visitConservativeRoots(ConservativeRoots& roots)
+{
+ GCPHASE(VisitConservativeRoots);
+ m_slotVisitor.append(roots);
+
+ if (Options::logGC() == GCLogging::Verbose)
+ dataLog("Conservative Roots:\n", m_slotVisitor);
+
+ m_slotVisitor.donateAndDrain();
+}
+
+void Heap::visitCompilerWorklistWeakReferences()
+{
+#if ENABLE(DFG_JIT)
+ for (auto worklist : m_suspendedCompilerWorklists)
+ worklist->visitWeakReferences(m_slotVisitor);
+
+ if (Options::logGC() == GCLogging::Verbose)
+ dataLog("DFG Worklists:\n", m_slotVisitor);
+#endif
+}
+
+void Heap::removeDeadCompilerWorklistEntries()
+{
+#if ENABLE(DFG_JIT)
+ GCPHASE(FinalizeDFGWorklists);
+ for (auto worklist : m_suspendedCompilerWorklists)
+ worklist->removeDeadPlans(*m_vm);
+#endif
+}
+
+void Heap::visitProtectedObjects(HeapRootVisitor& heapRootVisitor)
+{
+ GCPHASE(VisitProtectedObjects);
+
+ for (auto& pair : m_protectedValues)
+ heapRootVisitor.visit(&pair.key);
+
+ if (Options::logGC() == GCLogging::Verbose)
+ dataLog("Protected Objects:\n", m_slotVisitor);
+
+ m_slotVisitor.donateAndDrain();
+}
+
+void Heap::visitArgumentBuffers(HeapRootVisitor& visitor)
+{
+ GCPHASE(MarkingArgumentBuffers);
+ if (!m_markListSet || !m_markListSet->size())
+ return;
+
+ MarkedArgumentBuffer::markLists(visitor, *m_markListSet);
+
+ if (Options::logGC() == GCLogging::Verbose)
+ dataLog("Argument Buffers:\n", m_slotVisitor);
+
+ m_slotVisitor.donateAndDrain();
+}
+
+void Heap::visitException(HeapRootVisitor& visitor)
+{
+ GCPHASE(MarkingException);
+ if (!m_vm->exception() && !m_vm->lastException())
+ return;
+
+ visitor.visit(m_vm->addressOfException());
+ visitor.visit(m_vm->addressOfLastException());
+
+ if (Options::logGC() == GCLogging::Verbose)
+ dataLog("Exceptions:\n", m_slotVisitor);
+
+ m_slotVisitor.donateAndDrain();
+}
+
+void Heap::visitStrongHandles(HeapRootVisitor& visitor)
+{
+ GCPHASE(VisitStrongHandles);
+ m_handleSet.visitStrongHandles(visitor);
+
+ if (Options::logGC() == GCLogging::Verbose)
+ dataLog("Strong Handles:\n", m_slotVisitor);
+
+ m_slotVisitor.donateAndDrain();
+}
+
+void Heap::visitHandleStack(HeapRootVisitor& visitor)
+{
+ GCPHASE(VisitHandleStack);
+ m_handleStack.visit(visitor);
+
+ if (Options::logGC() == GCLogging::Verbose)
+ dataLog("Handle Stack:\n", m_slotVisitor);
+
+ m_slotVisitor.donateAndDrain();
+}
+
+void Heap::visitSamplingProfiler()
+{
+#if ENABLE(SAMPLING_PROFILER)
+ if (SamplingProfiler* samplingProfiler = m_vm->samplingProfiler()) {
+ ASSERT(samplingProfiler->getLock().isLocked());
+ GCPHASE(VisitSamplingProfiler);
+ samplingProfiler->visit(m_slotVisitor);
+ if (Options::logGC() == GCLogging::Verbose)
+ dataLog("Sampling Profiler data:\n", m_slotVisitor);
+
+ m_slotVisitor.donateAndDrain();
+ samplingProfiler->getLock().unlock();
+ }
+#endif // ENABLE(SAMPLING_PROFILER)
+}
+
+void Heap::traceCodeBlocksAndJITStubRoutines()
+{
+ GCPHASE(TraceCodeBlocksAndJITStubRoutines);
+ m_jitStubRoutines.traceMarkedStubRoutines(m_slotVisitor);
+
+ if (Options::logGC() == GCLogging::Verbose)
+ dataLog("Code Blocks and JIT Stub Routines:\n", m_slotVisitor);
+
+ m_slotVisitor.donateAndDrain();
+}
+
+void Heap::converge()
+{
+ GCPHASE(Convergence);
+ m_slotVisitor.drainFromShared(SlotVisitor::MasterDrain);
+}
+
+void Heap::visitWeakHandles(HeapRootVisitor& visitor)
+{
+ GCPHASE(VisitingLiveWeakHandles);
+ while (true) {
+ m_objectSpace.visitWeakSets(visitor);
+ harvestWeakReferences();
+ visitCompilerWorklistWeakReferences();
+ if (m_slotVisitor.isEmpty())
+ break;
+
+ if (Options::logGC() == GCLogging::Verbose)
+ dataLog("Live Weak Handles:\n", m_slotVisitor);
+
+ {
+ ParallelModeEnabler enabler(m_slotVisitor);
+ m_slotVisitor.donateAndDrain();
+ m_slotVisitor.drainFromShared(SlotVisitor::MasterDrain);
+ }
+ }
+}
+
+void Heap::updateObjectCounts(double gcStartTime)
+{
+ GCCOUNTER(VisitedValueCount, m_slotVisitor.visitCount() + threadVisitCount());
+
+ if (Options::logGC() == GCLogging::Verbose) {
+ size_t visitCount = m_slotVisitor.visitCount();
+ visitCount += threadVisitCount();
+ dataLogF("\nNumber of live Objects after GC %lu, took %.6f secs\n", static_cast<unsigned long>(visitCount), WTF::monotonicallyIncreasingTime() - gcStartTime);
+ }
+
+ size_t bytesRemovedFromOldSpaceDueToReallocation =
+ m_storageSpace.takeBytesRemovedFromOldSpaceDueToReallocation();
+
+ if (m_operationInProgress == FullCollection) {
+ m_totalBytesVisited = 0;
+ m_totalBytesCopied = 0;
+ } else
+ m_totalBytesCopied -= bytesRemovedFromOldSpaceDueToReallocation;
+
+ m_totalBytesVisitedThisCycle = m_slotVisitor.bytesVisited() + threadBytesVisited();
+ m_totalBytesCopiedThisCycle = m_slotVisitor.bytesCopied() + threadBytesCopied();
+
+ m_totalBytesVisited += m_totalBytesVisitedThisCycle;
+ m_totalBytesCopied += m_totalBytesCopiedThisCycle;
+}
+
+void Heap::resetVisitors()
+{
+ m_slotVisitor.reset();
+
+ for (auto& parallelVisitor : m_parallelSlotVisitors)
+ parallelVisitor->reset();
+
+ ASSERT(m_sharedMarkStack.isEmpty());
+ m_weakReferenceHarvesters.removeAll();
}
size_t Heap::objectCount()
@@ -619,14 +918,19 @@ size_t Heap::objectCount()
return m_objectSpace.objectCount();
}
+size_t Heap::extraMemorySize()
+{
+ return m_extraMemorySize + m_deprecatedExtraMemorySize + m_arrayBuffers.size();
+}
+
size_t Heap::size()
{
- return m_objectSpace.size() + m_storageSpace.size();
+ return m_objectSpace.size() + m_storageSpace.size() + extraMemorySize();
}
size_t Heap::capacity()
{
- return m_objectSpace.capacity() + m_storageSpace.capacity();
+ return m_objectSpace.capacity() + m_storageSpace.capacity() + extraMemorySize();
}
size_t Heap::protectedGlobalObjectCount()
@@ -636,7 +940,8 @@ size_t Heap::protectedGlobalObjectCount()
size_t Heap::globalObjectCount()
{
- return m_objectSpace.forEachLiveCell<CountIfGlobalObject>();
+ HeapIterationScope iterationScope(*this);
+ return m_objectSpace.forEachLiveCell<CountIfGlobalObject>(iterationScope);
}
size_t Heap::protectedObjectCount()
@@ -644,184 +949,509 @@ size_t Heap::protectedObjectCount()
return forEachProtectedCell<Count>();
}
-PassOwnPtr<TypeCountSet> Heap::protectedObjectTypeCounts()
+std::unique_ptr<TypeCountSet> Heap::protectedObjectTypeCounts()
{
return forEachProtectedCell<RecordType>();
}
-PassOwnPtr<TypeCountSet> Heap::objectTypeCounts()
+std::unique_ptr<TypeCountSet> Heap::objectTypeCounts()
{
- return m_objectSpace.forEachLiveCell<RecordType>();
+ HeapIterationScope iterationScope(*this);
+ return m_objectSpace.forEachLiveCell<RecordType>(iterationScope);
}
-void Heap::deleteAllCompiledCode()
+void Heap::deleteAllCodeBlocks()
{
- // If JavaScript is running, it's not safe to delete code, since we'll end
- // up deleting code that is live on the stack.
- if (m_vm->dynamicGlobalObject)
- return;
+ // If JavaScript is running, it's not safe to delete all JavaScript code, since
+ // we'll end up returning to deleted code.
+ RELEASE_ASSERT(!m_vm->entryScope);
+ ASSERT(m_operationInProgress == NoOperation);
+
+ completeAllDFGPlans();
- for (ExecutableBase* current = m_compiledCode.head(); current; current = current->next()) {
+ for (ExecutableBase* executable : m_executables)
+ executable->clearCode();
+}
+
+void Heap::deleteAllUnlinkedCodeBlocks()
+{
+ for (ExecutableBase* current : m_executables) {
if (!current->isFunctionExecutable())
continue;
- static_cast<FunctionExecutable*>(current)->clearCodeIfNotCompiling();
+ static_cast<FunctionExecutable*>(current)->unlinkedExecutable()->clearCode();
}
-
- m_dfgCodeBlocks.clearMarks();
- m_dfgCodeBlocks.deleteUnmarkedJettisonedCodeBlocks();
}
-void Heap::deleteUnmarkedCompiledCode()
+void Heap::clearUnmarkedExecutables()
{
- ExecutableBase* next;
- for (ExecutableBase* current = m_compiledCode.head(); current; current = next) {
- next = current->next();
+ GCPHASE(ClearUnmarkedExecutables);
+ for (unsigned i = m_executables.size(); i--;) {
+ ExecutableBase* current = m_executables[i];
if (isMarked(current))
continue;
- // We do this because executable memory is limited on some platforms and because
- // CodeBlock requires eager finalization.
- ExecutableBase::clearCodeVirtual(current);
- m_compiledCode.remove(current);
+ // Eagerly dereference the Executable's JITCode in order to run watchpoint
+ // destructors. Otherwise, watchpoints might fire for deleted CodeBlocks.
+ current->clearCode();
+ std::swap(m_executables[i], m_executables.last());
+ m_executables.removeLast();
}
- m_dfgCodeBlocks.deleteUnmarkedJettisonedCodeBlocks();
+ m_executables.shrinkToFit();
+}
+
+void Heap::deleteUnmarkedCompiledCode()
+{
+ GCPHASE(DeleteCodeBlocks);
+ clearUnmarkedExecutables();
+ m_codeBlocks.deleteUnmarkedAndUnreferenced(m_operationInProgress);
m_jitStubRoutines.deleteUnmarkedJettisonedStubRoutines();
}
-void Heap::collectAllGarbage()
+void Heap::addToRememberedSet(const JSCell* cell)
+{
+ ASSERT(cell);
+ ASSERT(!Options::useConcurrentJIT() || !isCompilationThread());
+ ASSERT(cell->cellState() == CellState::OldBlack);
+ // Indicate that this object is grey and that it's one of the following:
+ // - A re-greyed object during a concurrent collection.
+ // - An old remembered object.
+ // "OldGrey" doesn't tell us which of these things is true, but we usually treat the two cases the
+ // same.
+ cell->setCellState(CellState::OldGrey);
+ m_slotVisitor.appendToMarkStack(const_cast<JSCell*>(cell));
+}
+
+void* Heap::copyBarrier(const JSCell*, void*& pointer)
+{
+ // Do nothing for now, except making sure that the low bits are masked off. This helps to
+ // simulate enough of this barrier that at least we can test the low bits assumptions.
+ pointer = bitwise_cast<void*>(
+ bitwise_cast<uintptr_t>(pointer) & ~static_cast<uintptr_t>(CopyBarrierBase::spaceBits));
+
+ return pointer;
+}
+
+void Heap::collectAndSweep(HeapOperation collectionType)
{
if (!m_isSafeToCollect)
return;
- collect(DoSweep);
+ collect(collectionType);
+
+ SamplingRegion samplingRegion("Garbage Collection: Sweeping");
+
+ DeferGCForAWhile deferGC(*this);
+ m_objectSpace.sweep();
+ m_objectSpace.shrink();
+
+ sweepAllLogicallyEmptyWeakBlocks();
}
-static double minute = 60.0;
+NEVER_INLINE void Heap::collect(HeapOperation collectionType)
+{
+ void* stackTop;
+ ALLOCATE_AND_GET_REGISTER_STATE(registers);
+
+ collectImpl(collectionType, wtfThreadData().stack().origin(), &stackTop, registers);
-void Heap::collect(SweepToggle sweepToggle)
+ sanitizeStackForVM(m_vm);
+}
+
+NEVER_INLINE void Heap::collectImpl(HeapOperation collectionType, void* stackOrigin, void* stackTop, MachineThreads::RegisterState& calleeSavedRegisters)
{
+#if ENABLE(ALLOCATION_LOGGING)
+ dataLogF("JSC GC starting collection.\n");
+#endif
+
+ double before = 0;
+ if (Options::logGC()) {
+ dataLog("[GC: ", capacity() / 1024, " kb ");
+ before = currentTimeMS();
+ }
+
SamplingRegion samplingRegion("Garbage Collection");
- GCPHASE(Collect);
- ASSERT(vm()->apiLock().currentThreadIsHoldingLock());
- RELEASE_ASSERT(vm()->identifierTable == wtfThreadData().currentIdentifierTable());
+ if (vm()->typeProfiler()) {
+ DeferGCForAWhile awhile(*this);
+ vm()->typeProfilerLog()->processLogEntries(ASCIILiteral("GC"));
+ }
+
+ RELEASE_ASSERT(!m_deferralDepth);
+ ASSERT(vm()->currentThreadIsHoldingAPILock());
+ RELEASE_ASSERT(vm()->atomicStringTable() == wtfThreadData().atomicStringTable());
ASSERT(m_isSafeToCollect);
JAVASCRIPTCORE_GC_BEGIN();
RELEASE_ASSERT(m_operationInProgress == NoOperation);
- m_operationInProgress = Collection;
- m_activityCallback->willCollect();
+ suspendCompilerThreads();
+ willStartCollection(collectionType);
+ GCPHASE(Collect);
- double lastGCStartTime = WTF::currentTime();
- if (lastGCStartTime - m_lastCodeDiscardTime > minute) {
- deleteAllCompiledCode();
- m_lastCodeDiscardTime = WTF::currentTime();
- }
+ double gcStartTime = WTF::monotonicallyIncreasingTime();
+ if (m_verifier) {
+ // Verify that live objects from the last GC cycle haven't been corrupted by
+ // mutators before we begin this new GC cycle.
+ m_verifier->verify(HeapVerifier::Phase::BeforeGC);
- {
- GCPHASE(Canonicalize);
- m_objectSpace.canonicalizeCellLivenessData();
+ m_verifier->initializeGCCycle();
+ m_verifier->gatherLiveObjects(HeapVerifier::Phase::BeforeMarking);
}
- markRoots();
-
- {
- GCPHASE(ReapingWeakHandles);
- m_objectSpace.reapWeakSets();
- }
+ flushOldStructureIDTables();
+ stopAllocation();
+ flushWriteBarrierBuffer();
- JAVASCRIPTCORE_GC_MARKED();
+ markRoots(gcStartTime, stackOrigin, stackTop, calleeSavedRegisters);
- {
- m_blockSnapshot.resize(m_objectSpace.blocks().set().size());
- MarkedBlockSnapshotFunctor functor(m_blockSnapshot);
- m_objectSpace.forEachBlock(functor);
+ if (m_verifier) {
+ m_verifier->gatherLiveObjects(HeapVerifier::Phase::AfterMarking);
+ m_verifier->verify(HeapVerifier::Phase::AfterMarking);
}
+ JAVASCRIPTCORE_GC_MARKED();
+
+ if (vm()->typeProfiler())
+ vm()->typeProfiler()->invalidateTypeSetCache();
+
+ reapWeakHandles();
+ pruneStaleEntriesFromWeakGCMaps();
+ sweepArrayBuffers();
+ snapshotMarkedSpace();
copyBackingStores();
- {
- GCPHASE(FinalizeUnconditionalFinalizers);
- finalizeUnconditionalFinalizers();
+ finalizeUnconditionalFinalizers();
+ removeDeadCompilerWorklistEntries();
+ deleteUnmarkedCompiledCode();
+ deleteSourceProviderCaches();
+ notifyIncrementalSweeper();
+ writeBarrierCurrentlyExecutingCodeBlocks();
+
+ resetAllocators();
+ updateAllocationLimits();
+ didFinishCollection(gcStartTime);
+ resumeCompilerThreads();
+
+ if (m_verifier) {
+ m_verifier->trimDeadObjects();
+ m_verifier->verify(HeapVerifier::Phase::AfterGC);
}
- {
- GCPHASE(finalizeSmallStrings);
- m_vm->smallStrings.finalizeSmallStrings();
+ if (Options::logGC()) {
+ double after = currentTimeMS();
+ dataLog(after - before, " ms]\n");
}
+}
- {
- GCPHASE(DeleteCodeBlocks);
- deleteUnmarkedCompiledCode();
+void Heap::suspendCompilerThreads()
+{
+#if ENABLE(DFG_JIT)
+ GCPHASE(SuspendCompilerThreads);
+ ASSERT(m_suspendedCompilerWorklists.isEmpty());
+ for (unsigned i = DFG::numberOfWorklists(); i--;) {
+ if (DFG::Worklist* worklist = DFG::worklistForIndexOrNull(i)) {
+ m_suspendedCompilerWorklists.append(worklist);
+ worklist->suspendAllThreads();
+ }
}
+#endif
+}
- {
- GCPHASE(DeleteSourceProviderCaches);
- m_vm->clearSourceProviderCaches();
+void Heap::willStartCollection(HeapOperation collectionType)
+{
+ GCPHASE(StartingCollection);
+
+ if (Options::logGC())
+ dataLog("=> ");
+
+ if (shouldDoFullCollection(collectionType)) {
+ m_operationInProgress = FullCollection;
+ m_shouldDoFullCollection = false;
+ if (Options::logGC())
+ dataLog("FullCollection, ");
+ } else {
+ m_operationInProgress = EdenCollection;
+ if (Options::logGC())
+ dataLog("EdenCollection, ");
+ }
+ if (m_operationInProgress == FullCollection) {
+ m_sizeBeforeLastFullCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
+ m_extraMemorySize = 0;
+ m_deprecatedExtraMemorySize = 0;
+
+ if (m_fullActivityCallback)
+ m_fullActivityCallback->willCollect();
+ } else {
+ ASSERT(m_operationInProgress == EdenCollection);
+ m_sizeBeforeLastEdenCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
}
- if (sweepToggle == DoSweep) {
- SamplingRegion samplingRegion("Garbage Collection: Sweeping");
- GCPHASE(Sweeping);
- m_objectSpace.sweep();
- m_objectSpace.shrink();
+ if (m_edenActivityCallback)
+ m_edenActivityCallback->willCollect();
+
+ for (auto* observer : m_observers)
+ observer->willGarbageCollect();
+}
+
+void Heap::flushOldStructureIDTables()
+{
+ GCPHASE(FlushOldStructureIDTables);
+ m_structureIDTable.flushOldTables();
+}
+
+void Heap::flushWriteBarrierBuffer()
+{
+ GCPHASE(FlushWriteBarrierBuffer);
+ if (m_operationInProgress == EdenCollection) {
+ m_writeBarrierBuffer.flush(*this);
+ return;
}
+ m_writeBarrierBuffer.reset();
+}
- m_sweeper->startSweeping(m_blockSnapshot);
- m_bytesAbandoned = 0;
+void Heap::stopAllocation()
+{
+ GCPHASE(StopAllocation);
+ m_objectSpace.stopAllocating();
+ if (m_operationInProgress == FullCollection)
+ m_storageSpace.didStartFullCollection();
+}
+void Heap::reapWeakHandles()
+{
+ GCPHASE(ReapingWeakHandles);
+ m_objectSpace.reapWeakSets();
+}
+
+void Heap::pruneStaleEntriesFromWeakGCMaps()
+{
+ GCPHASE(PruningStaleEntriesFromWeakGCMaps);
+ if (m_operationInProgress != FullCollection)
+ return;
+ for (auto& pruneCallback : m_weakGCMaps.values())
+ pruneCallback();
+}
+
+void Heap::sweepArrayBuffers()
+{
+ GCPHASE(SweepingArrayBuffers);
+ m_arrayBuffers.sweep();
+}
+
+struct MarkedBlockSnapshotFunctor : public MarkedBlock::VoidFunctor {
+ MarkedBlockSnapshotFunctor(Vector<MarkedBlock*>& blocks)
+ : m_index(0)
+ , m_blocks(blocks)
{
- GCPHASE(ResetAllocators);
- m_objectSpace.resetAllocators();
}
+
+ void operator()(MarkedBlock* block) { m_blocks[m_index++] = block; }
+
+ size_t m_index;
+ Vector<MarkedBlock*>& m_blocks;
+};
+
+void Heap::snapshotMarkedSpace()
+{
+ GCPHASE(SnapshotMarkedSpace);
+
+ if (m_operationInProgress == EdenCollection) {
+ m_blockSnapshot.appendVector(m_objectSpace.blocksWithNewObjects());
+ // Sort and deduplicate the block snapshot since we might be appending to an unfinished work list.
+ std::sort(m_blockSnapshot.begin(), m_blockSnapshot.end());
+ m_blockSnapshot.shrink(std::unique(m_blockSnapshot.begin(), m_blockSnapshot.end()) - m_blockSnapshot.begin());
+ } else {
+ m_blockSnapshot.resizeToFit(m_objectSpace.blocks().set().size());
+ MarkedBlockSnapshotFunctor functor(m_blockSnapshot);
+ m_objectSpace.forEachBlock(functor);
+ }
+}
+
+void Heap::deleteSourceProviderCaches()
+{
+ GCPHASE(DeleteSourceProviderCaches);
+ m_vm->clearSourceProviderCaches();
+}
+
+void Heap::notifyIncrementalSweeper()
+{
+ GCPHASE(NotifyIncrementalSweeper);
+
+ if (m_operationInProgress == FullCollection) {
+ if (!m_logicallyEmptyWeakBlocks.isEmpty())
+ m_indexOfNextLogicallyEmptyWeakBlockToSweep = 0;
+ }
+
+ m_sweeper->startSweeping();
+}
+
+void Heap::writeBarrierCurrentlyExecutingCodeBlocks()
+{
+ GCPHASE(WriteBarrierCurrentlyExecutingCodeBlocks);
+ m_codeBlocks.writeBarrierCurrentlyExecutingCodeBlocks(this);
+}
+
+void Heap::resetAllocators()
+{
+ GCPHASE(ResetAllocators);
+ m_objectSpace.resetAllocators();
+}
+
+void Heap::updateAllocationLimits()
+{
+ GCPHASE(UpdateAllocationLimits);
+
+ // Calculate our current heap size threshold for the purpose of figuring out when we should
+ // run another collection. This isn't the same as either size() or capacity(), though it should
+ // be somewhere between the two. The key is to match the size calculations involved calls to
+ // didAllocate(), while never dangerously underestimating capacity(). In extreme cases of
+ // fragmentation, we may have size() much smaller than capacity(). Our collector sometimes
+ // temporarily allows very high fragmentation because it doesn't defragment old blocks in copied
+ // space.
+ size_t currentHeapSize = 0;
+
+ // For marked space, we use the total number of bytes visited. This matches the logic for
+ // MarkedAllocator's calls to didAllocate(), which effectively accounts for the total size of
+ // objects allocated rather than blocks used. This will underestimate capacity(), and in case
+ // of fragmentation, this may be substantial. Fortunately, marked space rarely fragments because
+ // cells usually have a narrow range of sizes. So, the underestimation is probably OK.
+ currentHeapSize += m_totalBytesVisited;
+
+ // For copied space, we use the capacity of storage space. This is because copied space may get
+ // badly fragmented between full collections. This arises when each eden collection evacuates
+ // much less than one CopiedBlock's worth of stuff. It can also happen when CopiedBlocks get
+ // pinned due to very short-lived objects. In such a case, we want to get to a full collection
+ // sooner rather than later. If we used m_totalBytesCopied, then for for each CopiedBlock that an
+ // eden allocation promoted, we would only deduct the one object's size from eden size. This
+ // would mean that we could "leak" many CopiedBlocks before we did a full collection and
+ // defragmented all of them. It would be great to use m_totalBytesCopied, but we'd need to
+ // augment it with something that accounts for those fragmented blocks.
+ // FIXME: Make it possible to compute heap size using m_totalBytesCopied rather than
+ // m_storageSpace.capacity()
+ // https://bugs.webkit.org/show_bug.cgi?id=150268
+ ASSERT(m_totalBytesCopied <= m_storageSpace.size());
+ currentHeapSize += m_storageSpace.capacity();
+
+ // It's up to the user to ensure that extraMemorySize() ends up corresponding to allocation-time
+ // extra memory reporting.
+ currentHeapSize += extraMemorySize();
- size_t currentHeapSize = size();
if (Options::gcMaxHeapSize() && currentHeapSize > Options::gcMaxHeapSize())
HeapStatistics::exitWithFailure();
+ if (m_operationInProgress == FullCollection) {
+ // To avoid pathological GC churn in very small and very large heaps, we set
+ // the new allocation limit based on the current size of the heap, with a
+ // fixed minimum.
+ m_maxHeapSize = max(minHeapSize(m_heapType, m_ramSize), proportionalHeapSize(currentHeapSize, m_ramSize));
+ m_maxEdenSize = m_maxHeapSize - currentHeapSize;
+ m_sizeAfterLastFullCollect = currentHeapSize;
+ m_bytesAbandonedSinceLastFullCollect = 0;
+ } else {
+ static const bool verbose = false;
+
+ ASSERT(currentHeapSize >= m_sizeAfterLastCollect);
+ m_maxEdenSize = m_maxHeapSize - currentHeapSize;
+ m_sizeAfterLastEdenCollect = currentHeapSize;
+ if (verbose) {
+ dataLog("Max heap size: ", m_maxHeapSize, "\n");
+ dataLog("Current heap size: ", currentHeapSize, "\n");
+ dataLog("Size after last eden collection: ", m_sizeAfterLastEdenCollect, "\n");
+ }
+ double edenToOldGenerationRatio = (double)m_maxEdenSize / (double)m_maxHeapSize;
+ if (verbose)
+ dataLog("Eden to old generation ratio: ", edenToOldGenerationRatio, "\n");
+ double minEdenToOldGenerationRatio = 1.0 / 3.0;
+ if (edenToOldGenerationRatio < minEdenToOldGenerationRatio)
+ m_shouldDoFullCollection = true;
+ // This seems suspect at first, but what it does is ensure that the nursery size is fixed.
+ m_maxHeapSize += currentHeapSize - m_sizeAfterLastCollect;
+ m_maxEdenSize = m_maxHeapSize - currentHeapSize;
+ if (m_fullActivityCallback) {
+ ASSERT(currentHeapSize >= m_sizeAfterLastFullCollect);
+ m_fullActivityCallback->didAllocate(currentHeapSize - m_sizeAfterLastFullCollect);
+ }
+ }
+
m_sizeAfterLastCollect = currentHeapSize;
+ m_bytesAllocatedThisCycle = 0;
- // To avoid pathological GC churn in very small and very large heaps, we set
- // the new allocation limit based on the current size of the heap, with a
- // fixed minimum.
- size_t maxHeapSize = max(minHeapSize(m_heapType, m_ramSize), proportionalHeapSize(currentHeapSize, m_ramSize));
- m_bytesAllocatedLimit = maxHeapSize - currentHeapSize;
+ if (Options::logGC())
+ dataLog(currentHeapSize / 1024, " kb, ");
+}
- m_bytesAllocated = 0;
- double lastGCEndTime = WTF::currentTime();
- m_lastGCLength = lastGCEndTime - lastGCStartTime;
+void Heap::didFinishCollection(double gcStartTime)
+{
+ GCPHASE(FinishingCollection);
+ double gcEndTime = WTF::monotonicallyIncreasingTime();
+ HeapOperation operation = m_operationInProgress;
+ if (m_operationInProgress == FullCollection)
+ m_lastFullGCLength = gcEndTime - gcStartTime;
+ else
+ m_lastEdenGCLength = gcEndTime - gcStartTime;
if (Options::recordGCPauseTimes())
- HeapStatistics::recordGCPauseTime(lastGCStartTime, lastGCEndTime);
- RELEASE_ASSERT(m_operationInProgress == Collection);
-
- m_operationInProgress = NoOperation;
- JAVASCRIPTCORE_GC_END();
+ HeapStatistics::recordGCPauseTime(gcStartTime, gcEndTime);
if (Options::useZombieMode())
zombifyDeadObjects();
- if (Options::objectsAreImmortal())
+ if (Options::useImmortalObjects())
markDeadObjects();
- if (Options::showObjectStatistics())
- HeapStatistics::showObjectStatistics(this);
+ if (Options::dumpObjectStatistics())
+ HeapStatistics::dumpObjectStatistics(this);
+
+ if (Options::logGC() == GCLogging::Verbose)
+ GCLogging::dumpObjectGraph(this);
+
+ RELEASE_ASSERT(m_operationInProgress == EdenCollection || m_operationInProgress == FullCollection);
+ m_operationInProgress = NoOperation;
+ JAVASCRIPTCORE_GC_END();
+
+ for (auto* observer : m_observers)
+ observer->didGarbageCollect(operation);
+}
+
+void Heap::resumeCompilerThreads()
+{
+#if ENABLE(DFG_JIT)
+ GCPHASE(ResumeCompilerThreads);
+ for (auto worklist : m_suspendedCompilerWorklists)
+ worklist->resumeAllThreads();
+ m_suspendedCompilerWorklists.clear();
+#endif
}
void Heap::markDeadObjects()
{
- m_objectSpace.forEachDeadCell<MarkObject>();
+ HeapIterationScope iterationScope(*this);
+ m_objectSpace.forEachDeadCell<MarkObject>(iterationScope);
+}
+
+void Heap::setFullActivityCallback(PassRefPtr<FullGCActivityCallback> activityCallback)
+{
+ m_fullActivityCallback = activityCallback;
+}
+
+void Heap::setEdenActivityCallback(PassRefPtr<EdenGCActivityCallback> activityCallback)
+{
+ m_edenActivityCallback = activityCallback;
+}
+
+GCActivityCallback* Heap::fullActivityCallback()
+{
+ return m_fullActivityCallback.get();
}
-void Heap::setActivityCallback(PassOwnPtr<GCActivityCallback> activityCallback)
+GCActivityCallback* Heap::edenActivityCallback()
{
- m_activityCallback = activityCallback;
+ return m_edenActivityCallback.get();
}
-GCActivityCallback* Heap::activityCallback()
+void Heap::setIncrementalSweeper(std::unique_ptr<IncrementalSweeper> sweeper)
{
- return m_activityCallback.get();
+ m_sweeper = WTFMove(sweeper);
}
IncrementalSweeper* Heap::sweeper()
@@ -831,13 +1461,17 @@ IncrementalSweeper* Heap::sweeper()
void Heap::setGarbageCollectionTimerEnabled(bool enable)
{
- activityCallback()->setEnabled(enable);
+ if (m_fullActivityCallback)
+ m_fullActivityCallback->setEnabled(enable);
+ if (m_edenActivityCallback)
+ m_edenActivityCallback->setEnabled(enable);
}
void Heap::didAllocate(size_t bytes)
{
- m_activityCallback->didAllocate(m_bytesAllocated + m_bytesAbandoned);
- m_bytesAllocated += bytes;
+ if (m_edenActivityCallback)
+ m_edenActivityCallback->didAllocate(m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect);
+ m_bytesAllocatedThisCycle += bytes;
}
bool Heap::isValidAllocation(size_t)
@@ -864,14 +1498,31 @@ void Heap::FinalizerOwner::finalize(Handle<Unknown> handle, void* context)
WeakSet::deallocate(WeakImpl::asWeakImpl(slot));
}
-void Heap::addCompiledCode(ExecutableBase* executable)
+void Heap::addExecutable(ExecutableBase* executable)
{
- m_compiledCode.append(executable);
+ m_executables.append(executable);
+}
+
+void Heap::collectAllGarbageIfNotDoneRecently()
+{
+ if (!m_fullActivityCallback) {
+ collectAllGarbage();
+ return;
+ }
+
+ if (m_fullActivityCallback->didSyncGCRecently()) {
+ // A synchronous GC was already requested recently so we merely accelerate next collection.
+ reportAbandonedObjectGraph();
+ return;
+ }
+
+ m_fullActivityCallback->setDidSyncGCRecently();
+ collectAllGarbage();
}
class Zombify : public MarkedBlock::VoidFunctor {
public:
- void operator()(JSCell* cell)
+ inline void visit(JSCell* cell)
{
void** current = reinterpret_cast<void**>(cell);
@@ -882,15 +1533,111 @@ public:
void* limit = static_cast<void*>(reinterpret_cast<char*>(cell) + MarkedBlock::blockFor(cell)->cellSize());
for (; current < limit; current++)
- *current = reinterpret_cast<void*>(0xbbadbeef);
+ *current = zombifiedBits;
+ }
+ IterationStatus operator()(JSCell* cell)
+ {
+ visit(cell);
+ return IterationStatus::Continue;
}
};
void Heap::zombifyDeadObjects()
{
// Sweep now because destructors will crash once we're zombified.
- m_objectSpace.sweep();
- m_objectSpace.forEachDeadCell<Zombify>();
+ {
+ SamplingRegion samplingRegion("Garbage Collection: Sweeping");
+ m_objectSpace.zombifySweep();
+ }
+ HeapIterationScope iterationScope(*this);
+ m_objectSpace.forEachDeadCell<Zombify>(iterationScope);
+}
+
+void Heap::flushWriteBarrierBuffer(JSCell* cell)
+{
+ m_writeBarrierBuffer.flush(*this);
+ m_writeBarrierBuffer.add(cell);
+}
+
+bool Heap::shouldDoFullCollection(HeapOperation requestedCollectionType) const
+{
+ if (!Options::useGenerationalGC())
+ return true;
+
+ switch (requestedCollectionType) {
+ case EdenCollection:
+ return false;
+ case FullCollection:
+ return true;
+ case AnyCollection:
+ return m_shouldDoFullCollection;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return false;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+ return false;
+}
+
+void Heap::addLogicallyEmptyWeakBlock(WeakBlock* block)
+{
+ m_logicallyEmptyWeakBlocks.append(block);
+}
+
+void Heap::sweepAllLogicallyEmptyWeakBlocks()
+{
+ if (m_logicallyEmptyWeakBlocks.isEmpty())
+ return;
+
+ m_indexOfNextLogicallyEmptyWeakBlockToSweep = 0;
+ while (sweepNextLogicallyEmptyWeakBlock()) { }
+}
+
+bool Heap::sweepNextLogicallyEmptyWeakBlock()
+{
+ if (m_indexOfNextLogicallyEmptyWeakBlockToSweep == WTF::notFound)
+ return false;
+
+ WeakBlock* block = m_logicallyEmptyWeakBlocks[m_indexOfNextLogicallyEmptyWeakBlockToSweep];
+
+ block->sweep();
+ if (block->isEmpty()) {
+ std::swap(m_logicallyEmptyWeakBlocks[m_indexOfNextLogicallyEmptyWeakBlockToSweep], m_logicallyEmptyWeakBlocks.last());
+ m_logicallyEmptyWeakBlocks.removeLast();
+ WeakBlock::destroy(*this, block);
+ } else
+ m_indexOfNextLogicallyEmptyWeakBlockToSweep++;
+
+ if (m_indexOfNextLogicallyEmptyWeakBlockToSweep >= m_logicallyEmptyWeakBlocks.size()) {
+ m_indexOfNextLogicallyEmptyWeakBlockToSweep = WTF::notFound;
+ return false;
+ }
+
+ return true;
+}
+
+size_t Heap::threadVisitCount()
+{
+ unsigned long result = 0;
+ for (auto& parallelVisitor : m_parallelSlotVisitors)
+ result += parallelVisitor->visitCount();
+ return result;
+}
+
+size_t Heap::threadBytesVisited()
+{
+ size_t result = 0;
+ for (auto& parallelVisitor : m_parallelSlotVisitors)
+ result += parallelVisitor->bytesVisited();
+ return result;
+}
+
+size_t Heap::threadBytesCopied()
+{
+ size_t result = 0;
+ for (auto& parallelVisitor : m_parallelSlotVisitors)
+ result += parallelVisitor->bytesCopied();
+ return result;
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/Heap.h b/Source/JavaScriptCore/heap/Heap.h
index 8266f5fd5..beee86538 100644
--- a/Source/JavaScriptCore/heap/Heap.h
+++ b/Source/JavaScriptCore/heap/Heap.h
@@ -1,7 +1,7 @@
/*
* Copyright (C) 1999-2000 Harri Porten (porten@kde.org)
* Copyright (C) 2001 Peter Kelly (pmk@post.com)
- * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2003-2009, 2013-2015 Apple Inc. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
@@ -22,384 +22,444 @@
#ifndef Heap_h
#define Heap_h
-#include "BlockAllocator.h"
+#include "ArrayBuffer.h"
+#include "CodeBlockSet.h"
#include "CopyVisitor.h"
-#include "DFGCodeBlocks.h"
-#include "GCThreadSharedData.h"
+#include "GCIncomingRefCountedSet.h"
#include "HandleSet.h"
#include "HandleStack.h"
+#include "HeapObserver.h"
+#include "HeapOperation.h"
#include "JITStubRoutineSet.h"
+#include "ListableHandler.h"
+#include "MachineStackMarker.h"
#include "MarkedAllocator.h"
#include "MarkedBlock.h"
#include "MarkedBlockSet.h"
#include "MarkedSpace.h"
#include "Options.h"
#include "SlotVisitor.h"
+#include "StructureIDTable.h"
+#include "TinyBloomFilter.h"
+#include "UnconditionalFinalizer.h"
#include "WeakHandleOwner.h"
+#include "WeakReferenceHarvester.h"
+#include "WriteBarrierBuffer.h"
#include "WriteBarrierSupport.h"
#include <wtf/HashCountedSet.h>
#include <wtf/HashSet.h>
-
-#define COLLECT_ON_EVERY_ALLOCATION 0
+#include <wtf/ParallelHelperPool.h>
namespace JSC {
- class CopiedSpace;
- class CodeBlock;
- class ExecutableBase;
- class GCActivityCallback;
- class GCAwareJITStubRoutine;
- class GlobalCodeBlock;
- class Heap;
- class HeapRootVisitor;
- class IncrementalSweeper;
- class JITStubRoutine;
- class JSCell;
- class VM;
- class JSStack;
- class JSValue;
- class LiveObjectIterator;
- class LLIntOffsetsExtractor;
- class MarkedArgumentBuffer;
- class WeakGCHandlePool;
- class SlotVisitor;
-
- typedef std::pair<JSValue, WTF::String> ValueStringPair;
- typedef HashCountedSet<JSCell*> ProtectCountSet;
- typedef HashCountedSet<const char*> TypeCountSet;
-
- enum OperationInProgress { NoOperation, Allocation, Collection };
-
- enum HeapType { SmallHeap, LargeHeap };
-
- class Heap {
- WTF_MAKE_NONCOPYABLE(Heap);
- public:
- friend class JIT;
- friend class GCThreadSharedData;
- static Heap* heap(const JSValue); // 0 for immediate values
- static Heap* heap(const JSCell*);
-
- // This constant determines how many blocks we iterate between checks of our
- // deadline when calling Heap::isPagedOut. Decreasing it will cause us to detect
- // overstepping our deadline more quickly, while increasing it will cause
- // our scan to run faster.
- static const unsigned s_timeCheckResolution = 16;
-
- static bool isLive(const void*);
- static bool isMarked(const void*);
- static bool testAndSetMarked(const void*);
- static void setMarked(const void*);
-
- static bool isWriteBarrierEnabled();
- static void writeBarrier(const JSCell*, JSValue);
- static void writeBarrier(const JSCell*, JSCell*);
- static uint8_t* addressOfCardFor(JSCell*);
-
- Heap(VM*, HeapType);
- ~Heap();
- JS_EXPORT_PRIVATE void lastChanceToFinalize();
-
- VM* vm() const { return m_vm; }
- MarkedSpace& objectSpace() { return m_objectSpace; }
- MachineThreads& machineThreads() { return m_machineThreads; }
-
- JS_EXPORT_PRIVATE GCActivityCallback* activityCallback();
- JS_EXPORT_PRIVATE void setActivityCallback(PassOwnPtr<GCActivityCallback>);
- JS_EXPORT_PRIVATE void setGarbageCollectionTimerEnabled(bool);
-
- JS_EXPORT_PRIVATE IncrementalSweeper* sweeper();
-
- // true if an allocation or collection is in progress
- inline bool isBusy();
-
- MarkedAllocator& allocatorForObjectWithoutDestructor(size_t bytes) { return m_objectSpace.allocatorFor(bytes); }
- MarkedAllocator& allocatorForObjectWithNormalDestructor(size_t bytes) { return m_objectSpace.normalDestructorAllocatorFor(bytes); }
- MarkedAllocator& allocatorForObjectWithImmortalStructureDestructor(size_t bytes) { return m_objectSpace.immortalStructureDestructorAllocatorFor(bytes); }
- CopiedAllocator& storageAllocator() { return m_storageSpace.allocator(); }
- CheckedBoolean tryAllocateStorage(size_t, void**);
- CheckedBoolean tryReallocateStorage(void**, size_t, size_t);
-
- typedef void (*Finalizer)(JSCell*);
- JS_EXPORT_PRIVATE void addFinalizer(JSCell*, Finalizer);
- void addCompiledCode(ExecutableBase*);
-
- void notifyIsSafeToCollect() { m_isSafeToCollect = true; }
- bool isSafeToCollect() const { return m_isSafeToCollect; }
-
- JS_EXPORT_PRIVATE void collectAllGarbage();
- enum SweepToggle { DoNotSweep, DoSweep };
- bool shouldCollect();
- void collect(SweepToggle);
-
- void reportExtraMemoryCost(size_t cost);
- JS_EXPORT_PRIVATE void reportAbandonedObjectGraph();
-
- JS_EXPORT_PRIVATE void protect(JSValue);
- JS_EXPORT_PRIVATE bool unprotect(JSValue); // True when the protect count drops to 0.
-
- void jettisonDFGCodeBlock(PassOwnPtr<CodeBlock>);
-
- JS_EXPORT_PRIVATE size_t size();
- JS_EXPORT_PRIVATE size_t capacity();
- JS_EXPORT_PRIVATE size_t objectCount();
- JS_EXPORT_PRIVATE size_t globalObjectCount();
- JS_EXPORT_PRIVATE size_t protectedObjectCount();
- JS_EXPORT_PRIVATE size_t protectedGlobalObjectCount();
- JS_EXPORT_PRIVATE PassOwnPtr<TypeCountSet> protectedObjectTypeCounts();
- JS_EXPORT_PRIVATE PassOwnPtr<TypeCountSet> objectTypeCounts();
- void showStatistics();
-
- void pushTempSortVector(Vector<ValueStringPair, 0, UnsafeVectorOverflow>*);
- void popTempSortVector(Vector<ValueStringPair, 0, UnsafeVectorOverflow>*);
+class CodeBlock;
+class CopiedSpace;
+class EdenGCActivityCallback;
+class ExecutableBase;
+class FullGCActivityCallback;
+class GCActivityCallback;
+class GCAwareJITStubRoutine;
+class Heap;
+class HeapRootVisitor;
+class HeapVerifier;
+class IncrementalSweeper;
+class JITStubRoutine;
+class JSCell;
+class JSStack;
+class JSValue;
+class LLIntOffsetsExtractor;
+class MarkedArgumentBuffer;
+class VM;
+
+namespace DFG {
+class SpeculativeJIT;
+class Worklist;
+}
+
+static void* const zombifiedBits = reinterpret_cast<void*>(static_cast<uintptr_t>(0xdeadbeef));
+
+typedef HashCountedSet<JSCell*> ProtectCountSet;
+typedef HashCountedSet<const char*> TypeCountSet;
+
+enum HeapType { SmallHeap, LargeHeap };
+
+class Heap {
+ WTF_MAKE_NONCOPYABLE(Heap);
+public:
+ friend class JIT;
+ friend class DFG::SpeculativeJIT;
+ static Heap* heap(const JSValue); // 0 for immediate values
+ static Heap* heap(const JSCell*);
+
+ // This constant determines how many blocks we iterate between checks of our
+ // deadline when calling Heap::isPagedOut. Decreasing it will cause us to detect
+ // overstepping our deadline more quickly, while increasing it will cause
+ // our scan to run faster.
+ static const unsigned s_timeCheckResolution = 16;
+
+ static bool isLive(const void*);
+ static bool isMarked(const void*);
+ static bool testAndSetMarked(const void*);
+ static void setMarked(const void*);
+
+ // This function must be run after stopAllocation() is called and
+ // before liveness data is cleared to be accurate.
+ static bool isPointerGCObject(TinyBloomFilter, MarkedBlockSet&, void* pointer);
+ static bool isValueGCObject(TinyBloomFilter, MarkedBlockSet&, JSValue);
+
+ void writeBarrier(const JSCell*);
+ void writeBarrier(const JSCell*, JSValue);
+ void writeBarrier(const JSCell*, JSCell*);
+
+ JS_EXPORT_PRIVATE static void* copyBarrier(const JSCell* owner, void*& copiedSpacePointer);
+
+ WriteBarrierBuffer& writeBarrierBuffer() { return m_writeBarrierBuffer; }
+ void flushWriteBarrierBuffer(JSCell*);
+
+ Heap(VM*, HeapType);
+ ~Heap();
+ void lastChanceToFinalize();
+ void releaseDelayedReleasedObjects();
+
+ VM* vm() const { return m_vm; }
+ MarkedSpace& objectSpace() { return m_objectSpace; }
+ CopiedSpace& storageSpace() { return m_storageSpace; }
+ MachineThreads& machineThreads() { return m_machineThreads; }
+
+ const SlotVisitor& slotVisitor() const { return m_slotVisitor; }
+
+ JS_EXPORT_PRIVATE GCActivityCallback* fullActivityCallback();
+ JS_EXPORT_PRIVATE GCActivityCallback* edenActivityCallback();
+ JS_EXPORT_PRIVATE void setFullActivityCallback(PassRefPtr<FullGCActivityCallback>);
+ JS_EXPORT_PRIVATE void setEdenActivityCallback(PassRefPtr<EdenGCActivityCallback>);
+ JS_EXPORT_PRIVATE void setGarbageCollectionTimerEnabled(bool);
+
+ JS_EXPORT_PRIVATE IncrementalSweeper* sweeper();
+ JS_EXPORT_PRIVATE void setIncrementalSweeper(std::unique_ptr<IncrementalSweeper>);
+
+ void addObserver(HeapObserver* observer) { m_observers.append(observer); }
+ void removeObserver(HeapObserver* observer) { m_observers.removeFirst(observer); }
+
+ // true if collection is in progress
+ bool isCollecting();
+ HeapOperation operationInProgress() { return m_operationInProgress; }
+ // true if an allocation or collection is in progress
+ bool isBusy();
+ MarkedSpace::Subspace& subspaceForObjectWithoutDestructor() { return m_objectSpace.subspaceForObjectsWithoutDestructor(); }
+ MarkedSpace::Subspace& subspaceForObjectDestructor() { return m_objectSpace.subspaceForObjectsWithDestructor(); }
+ template<typename ClassType> MarkedSpace::Subspace& subspaceForObjectOfType();
+ MarkedAllocator& allocatorForObjectWithoutDestructor(size_t bytes) { return m_objectSpace.allocatorFor(bytes); }
+ MarkedAllocator& allocatorForObjectWithDestructor(size_t bytes) { return m_objectSpace.destructorAllocatorFor(bytes); }
+ template<typename ClassType> MarkedAllocator& allocatorForObjectOfType(size_t bytes);
+ CopiedAllocator& storageAllocator() { return m_storageSpace.allocator(); }
+ CheckedBoolean tryAllocateStorage(JSCell* intendedOwner, size_t, void**);
+ CheckedBoolean tryReallocateStorage(JSCell* intendedOwner, void**, size_t, size_t);
+ void ascribeOwner(JSCell* intendedOwner, void*);
+
+ typedef void (*Finalizer)(JSCell*);
+ JS_EXPORT_PRIVATE void addFinalizer(JSCell*, Finalizer);
+ void addExecutable(ExecutableBase*);
+
+ void notifyIsSafeToCollect() { m_isSafeToCollect = true; }
+ bool isSafeToCollect() const { return m_isSafeToCollect; }
+
+ JS_EXPORT_PRIVATE void collectAllGarbageIfNotDoneRecently();
+ void collectAllGarbage() { collectAndSweep(FullCollection); }
+ JS_EXPORT_PRIVATE void collectAndSweep(HeapOperation collectionType = AnyCollection);
+ bool shouldCollect();
+ JS_EXPORT_PRIVATE void collect(HeapOperation collectionType = AnyCollection);
+ bool collectIfNecessaryOrDefer(); // Returns true if it did collect.
+
+ void completeAllDFGPlans();
+
+ // Use this API to report non-GC memory referenced by GC objects. Be sure to
+ // call both of these functions: Calling only one may trigger catastropic
+ // memory growth.
+ void reportExtraMemoryAllocated(size_t);
+ void reportExtraMemoryVisited(CellState cellStateBeforeVisiting, size_t);
+
+ // Use this API to report non-GC memory if you can't use the better API above.
+ void deprecatedReportExtraMemory(size_t);
+
+ JS_EXPORT_PRIVATE void reportAbandonedObjectGraph();
+
+ JS_EXPORT_PRIVATE void protect(JSValue);
+ JS_EXPORT_PRIVATE bool unprotect(JSValue); // True when the protect count drops to 0.
+
+ JS_EXPORT_PRIVATE size_t extraMemorySize(); // Non-GC memory referenced by GC objects.
+ JS_EXPORT_PRIVATE size_t size();
+ JS_EXPORT_PRIVATE size_t capacity();
+ JS_EXPORT_PRIVATE size_t objectCount();
+ JS_EXPORT_PRIVATE size_t globalObjectCount();
+ JS_EXPORT_PRIVATE size_t protectedObjectCount();
+ JS_EXPORT_PRIVATE size_t protectedGlobalObjectCount();
+ JS_EXPORT_PRIVATE std::unique_ptr<TypeCountSet> protectedObjectTypeCounts();
+ JS_EXPORT_PRIVATE std::unique_ptr<TypeCountSet> objectTypeCounts();
+
+ HashSet<MarkedArgumentBuffer*>& markListSet();
+
+ template<typename Functor> typename Functor::ReturnType forEachProtectedCell(Functor&);
+ template<typename Functor> typename Functor::ReturnType forEachProtectedCell();
+ template<typename Functor> void forEachCodeBlock(Functor&);
+
+ HandleSet* handleSet() { return &m_handleSet; }
+ HandleStack* handleStack() { return &m_handleStack; }
+
+ void willStartIterating();
+ void didFinishIterating();
+
+ double lastFullGCLength() const { return m_lastFullGCLength; }
+ double lastEdenGCLength() const { return m_lastEdenGCLength; }
+ void increaseLastFullGCLength(double amount) { m_lastFullGCLength += amount; }
+
+ size_t sizeBeforeLastEdenCollection() const { return m_sizeBeforeLastEdenCollect; }
+ size_t sizeAfterLastEdenCollection() const { return m_sizeAfterLastEdenCollect; }
+ size_t sizeBeforeLastFullCollection() const { return m_sizeBeforeLastFullCollect; }
+ size_t sizeAfterLastFullCollection() const { return m_sizeAfterLastFullCollect; }
+
+ void deleteAllCodeBlocks();
+ void deleteAllUnlinkedCodeBlocks();
+
+ void didAllocate(size_t);
+ void didAbandon(size_t);
+
+ bool isPagedOut(double deadline);
+
+ const JITStubRoutineSet& jitStubRoutines() { return m_jitStubRoutines; }
- HashSet<MarkedArgumentBuffer*>& markListSet() { if (!m_markListSet) m_markListSet = adoptPtr(new HashSet<MarkedArgumentBuffer*>); return *m_markListSet; }
-
- template<typename Functor> typename Functor::ReturnType forEachProtectedCell(Functor&);
- template<typename Functor> typename Functor::ReturnType forEachProtectedCell();
-
- HandleSet* handleSet() { return &m_handleSet; }
- HandleStack* handleStack() { return &m_handleStack; }
-
- void canonicalizeCellLivenessData();
- void getConservativeRegisterRoots(HashSet<JSCell*>& roots);
-
- double lastGCLength() { return m_lastGCLength; }
- void increaseLastGCLength(double amount) { m_lastGCLength += amount; }
-
- JS_EXPORT_PRIVATE void deleteAllCompiledCode();
-
- void didAllocate(size_t);
- void didAbandon(size_t);
-
- bool isPagedOut(double deadline);
-
- const JITStubRoutineSet& jitStubRoutines() { return m_jitStubRoutines; }
-
- private:
- friend class CodeBlock;
- friend class CopiedBlock;
- friend class GCAwareJITStubRoutine;
- friend class HandleSet;
- friend class JITStubRoutine;
- friend class LLIntOffsetsExtractor;
- friend class MarkedSpace;
- friend class MarkedAllocator;
- friend class MarkedBlock;
- friend class CopiedSpace;
- friend class CopyVisitor;
- friend class SlotVisitor;
- friend class SuperRegion;
- friend class IncrementalSweeper;
- friend class HeapStatistics;
- friend class WeakSet;
- template<typename T> friend void* allocateCell(Heap&);
- template<typename T> friend void* allocateCell(Heap&, size_t);
-
- void* allocateWithImmortalStructureDestructor(size_t); // For use with special objects whose Structures never die.
- void* allocateWithNormalDestructor(size_t); // For use with objects that inherit directly or indirectly from JSDestructibleObject.
- void* allocateWithoutDestructor(size_t); // For use with objects without destructors.
-
- static const size_t minExtraCost = 256;
- static const size_t maxExtraCost = 1024 * 1024;
-
- class FinalizerOwner : public WeakHandleOwner {
- virtual void finalize(Handle<Unknown>, void* context);
- };
-
- JS_EXPORT_PRIVATE bool isValidAllocation(size_t);
- JS_EXPORT_PRIVATE void reportExtraMemoryCostSlowCase(size_t);
-
- void markRoots();
- void markProtectedObjects(HeapRootVisitor&);
- void markTempSortVectors(HeapRootVisitor&);
- void copyBackingStores();
- void harvestWeakReferences();
- void finalizeUnconditionalFinalizers();
- void deleteUnmarkedCompiledCode();
- void zombifyDeadObjects();
- void markDeadObjects();
-
- JSStack& stack();
- BlockAllocator& blockAllocator();
-
- const HeapType m_heapType;
- const size_t m_ramSize;
- const size_t m_minBytesPerCycle;
- size_t m_sizeAfterLastCollect;
-
- size_t m_bytesAllocatedLimit;
- size_t m_bytesAllocated;
- size_t m_bytesAbandoned;
-
- OperationInProgress m_operationInProgress;
- BlockAllocator m_blockAllocator;
- MarkedSpace m_objectSpace;
- CopiedSpace m_storageSpace;
-
-#if ENABLE(SIMPLE_HEAP_PROFILING)
- VTableSpectrum m_destroyedTypeCounts;
+ void addReference(JSCell*, ArrayBuffer*);
+
+ bool isDeferred() const { return !!m_deferralDepth || !Options::useGC(); }
+
+ StructureIDTable& structureIDTable() { return m_structureIDTable; }
+
+ CodeBlockSet& codeBlockSet() { return m_codeBlocks; }
+
+#if USE(CF)
+ template<typename T> void releaseSoon(RetainPtr<T>&&);
+#endif
+
+ static bool isZombified(JSCell* cell) { return *(void**)cell == zombifiedBits; }
+
+ void registerWeakGCMap(void* weakGCMap, std::function<void()> pruningCallback);
+ void unregisterWeakGCMap(void* weakGCMap);
+
+ void addLogicallyEmptyWeakBlock(WeakBlock*);
+
+#if ENABLE(RESOURCE_USAGE)
+ size_t blockBytesAllocated() const { return m_blockBytesAllocated; }
#endif
- ProtectCountSet m_protectedValues;
- Vector<Vector<ValueStringPair, 0, UnsafeVectorOverflow>* > m_tempSortingVectors;
- OwnPtr<HashSet<MarkedArgumentBuffer*> > m_markListSet;
-
- MachineThreads m_machineThreads;
-
- GCThreadSharedData m_sharedData;
- SlotVisitor m_slotVisitor;
- CopyVisitor m_copyVisitor;
-
- HandleSet m_handleSet;
- HandleStack m_handleStack;
- DFGCodeBlocks m_dfgCodeBlocks;
- JITStubRoutineSet m_jitStubRoutines;
- FinalizerOwner m_finalizerOwner;
-
- bool m_isSafeToCollect;
-
- VM* m_vm;
- double m_lastGCLength;
- double m_lastCodeDiscardTime;
-
- DoublyLinkedList<ExecutableBase> m_compiledCode;
-
- OwnPtr<GCActivityCallback> m_activityCallback;
- OwnPtr<IncrementalSweeper> m_sweeper;
- Vector<MarkedBlock*> m_blockSnapshot;
+ void didAllocateBlock(size_t capacity);
+ void didFreeBlock(size_t capacity);
+
+private:
+ friend class CodeBlock;
+ friend class CopiedBlock;
+ friend class DeferGC;
+ friend class DeferGCForAWhile;
+ friend class GCAwareJITStubRoutine;
+ friend class GCLogging;
+ friend class GCThread;
+ friend class HandleSet;
+ friend class HeapVerifier;
+ friend class JITStubRoutine;
+ friend class LLIntOffsetsExtractor;
+ friend class MarkedSpace;
+ friend class MarkedAllocator;
+ friend class MarkedBlock;
+ friend class CopiedSpace;
+ friend class CopyVisitor;
+ friend class SlotVisitor;
+ friend class IncrementalSweeper;
+ friend class HeapStatistics;
+ friend class VM;
+ friend class WeakSet;
+ template<typename T> friend void* allocateCell(Heap&);
+ template<typename T> friend void* allocateCell(Heap&, size_t);
+
+ void* allocateWithDestructor(size_t); // For use with objects with destructors.
+ void* allocateWithoutDestructor(size_t); // For use with objects without destructors.
+ template<typename ClassType> void* allocateObjectOfType(size_t); // Chooses one of the methods above based on type.
+
+ static const size_t minExtraMemory = 256;
+
+ class FinalizerOwner : public WeakHandleOwner {
+ virtual void finalize(Handle<Unknown>, void* context) override;
};
- struct MarkedBlockSnapshotFunctor : public MarkedBlock::VoidFunctor {
- MarkedBlockSnapshotFunctor(Vector<MarkedBlock*>& blocks)
- : m_index(0)
- , m_blocks(blocks)
- {
- }
+ JS_EXPORT_PRIVATE bool isValidAllocation(size_t);
+ JS_EXPORT_PRIVATE void reportExtraMemoryAllocatedSlowCase(size_t);
+ JS_EXPORT_PRIVATE void deprecatedReportExtraMemorySlowCase(size_t);
+
+ void collectImpl(HeapOperation, void* stackOrigin, void* stackTop, MachineThreads::RegisterState&);
+
+ void suspendCompilerThreads();
+ void willStartCollection(HeapOperation collectionType);
+ void flushOldStructureIDTables();
+ void flushWriteBarrierBuffer();
+ void stopAllocation();
- void operator()(MarkedBlock* block) { m_blocks[m_index++] = block; }
+ void markRoots(double gcStartTime, void* stackOrigin, void* stackTop, MachineThreads::RegisterState&);
+ void gatherStackRoots(ConservativeRoots&, void* stackOrigin, void* stackTop, MachineThreads::RegisterState&);
+ void gatherJSStackRoots(ConservativeRoots&);
+ void gatherScratchBufferRoots(ConservativeRoots&);
+ void clearLivenessData();
+ void visitExternalRememberedSet();
+ void visitSmallStrings();
+ void visitConservativeRoots(ConservativeRoots&);
+ void visitCompilerWorklistWeakReferences();
+ void removeDeadCompilerWorklistEntries();
+ void visitProtectedObjects(HeapRootVisitor&);
+ void visitArgumentBuffers(HeapRootVisitor&);
+ void visitException(HeapRootVisitor&);
+ void visitStrongHandles(HeapRootVisitor&);
+ void visitHandleStack(HeapRootVisitor&);
+ void visitSamplingProfiler();
+ void traceCodeBlocksAndJITStubRoutines();
+ void converge();
+ void visitWeakHandles(HeapRootVisitor&);
+ void updateObjectCounts(double gcStartTime);
+ void resetVisitors();
+
+ void reapWeakHandles();
+ void pruneStaleEntriesFromWeakGCMaps();
+ void sweepArrayBuffers();
+ void snapshotMarkedSpace();
+ void deleteSourceProviderCaches();
+ void notifyIncrementalSweeper();
+ void writeBarrierCurrentlyExecutingCodeBlocks();
+ void resetAllocators();
+ void copyBackingStores();
+ void harvestWeakReferences();
+ void finalizeUnconditionalFinalizers();
+ void clearUnmarkedExecutables();
+ void deleteUnmarkedCompiledCode();
+ JS_EXPORT_PRIVATE void addToRememberedSet(const JSCell*);
+ void updateAllocationLimits();
+ void didFinishCollection(double gcStartTime);
+ void resumeCompilerThreads();
+ void zombifyDeadObjects();
+ void markDeadObjects();
+
+ void sweepAllLogicallyEmptyWeakBlocks();
+ bool sweepNextLogicallyEmptyWeakBlock();
+
+ bool shouldDoFullCollection(HeapOperation requestedCollectionType) const;
+
+ JSStack& stack();
- size_t m_index;
- Vector<MarkedBlock*>& m_blocks;
- };
+ void incrementDeferralDepth();
+ void decrementDeferralDepth();
+ void decrementDeferralDepthAndGCIfNeeded();
+
+ size_t threadVisitCount();
+ size_t threadBytesVisited();
+ size_t threadBytesCopied();
+
+ const HeapType m_heapType;
+ const size_t m_ramSize;
+ const size_t m_minBytesPerCycle;
+ size_t m_sizeAfterLastCollect;
+ size_t m_sizeAfterLastFullCollect;
+ size_t m_sizeBeforeLastFullCollect;
+ size_t m_sizeAfterLastEdenCollect;
+ size_t m_sizeBeforeLastEdenCollect;
+
+ size_t m_bytesAllocatedThisCycle;
+ size_t m_bytesAbandonedSinceLastFullCollect;
+ size_t m_maxEdenSize;
+ size_t m_maxHeapSize;
+ bool m_shouldDoFullCollection;
+ size_t m_totalBytesVisited;
+ size_t m_totalBytesVisitedThisCycle;
+ size_t m_totalBytesCopied;
+ size_t m_totalBytesCopiedThisCycle;
+
+ HeapOperation m_operationInProgress;
+ StructureIDTable m_structureIDTable;
+ MarkedSpace m_objectSpace;
+ CopiedSpace m_storageSpace;
+ GCIncomingRefCountedSet<ArrayBuffer> m_arrayBuffers;
+ size_t m_extraMemorySize;
+ size_t m_deprecatedExtraMemorySize;
- inline bool Heap::shouldCollect()
- {
- if (Options::gcMaxHeapSize())
- return m_bytesAllocated > Options::gcMaxHeapSize() && m_isSafeToCollect && m_operationInProgress == NoOperation;
- return m_bytesAllocated > m_bytesAllocatedLimit && m_isSafeToCollect && m_operationInProgress == NoOperation;
- }
-
- bool Heap::isBusy()
- {
- return m_operationInProgress != NoOperation;
- }
-
- inline Heap* Heap::heap(const JSCell* cell)
- {
- return MarkedBlock::blockFor(cell)->heap();
- }
-
- inline Heap* Heap::heap(const JSValue v)
- {
- if (!v.isCell())
- return 0;
- return heap(v.asCell());
- }
-
- inline bool Heap::isLive(const void* cell)
- {
- return MarkedBlock::blockFor(cell)->isLiveCell(cell);
- }
-
- inline bool Heap::isMarked(const void* cell)
- {
- return MarkedBlock::blockFor(cell)->isMarked(cell);
- }
-
- inline bool Heap::testAndSetMarked(const void* cell)
- {
- return MarkedBlock::blockFor(cell)->testAndSetMarked(cell);
- }
-
- inline void Heap::setMarked(const void* cell)
- {
- MarkedBlock::blockFor(cell)->setMarked(cell);
- }
-
- inline bool Heap::isWriteBarrierEnabled()
- {
-#if ENABLE(WRITE_BARRIER_PROFILING)
- return true;
-#else
- return false;
-#endif
- }
-
- inline void Heap::writeBarrier(const JSCell*, JSCell*)
- {
- WriteBarrierCounters::countWriteBarrier();
- }
-
- inline void Heap::writeBarrier(const JSCell*, JSValue)
- {
- WriteBarrierCounters::countWriteBarrier();
- }
-
- inline void Heap::reportExtraMemoryCost(size_t cost)
- {
- if (cost > minExtraCost)
- reportExtraMemoryCostSlowCase(cost);
- }
-
- template<typename Functor> inline typename Functor::ReturnType Heap::forEachProtectedCell(Functor& functor)
- {
- ProtectCountSet::iterator end = m_protectedValues.end();
- for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it)
- functor(it->key);
- m_handleSet.forEachStrongHandle(functor, m_protectedValues);
-
- return functor.returnValue();
- }
-
- template<typename Functor> inline typename Functor::ReturnType Heap::forEachProtectedCell()
- {
- Functor functor;
- return forEachProtectedCell(functor);
- }
-
- inline void* Heap::allocateWithNormalDestructor(size_t bytes)
- {
- ASSERT(isValidAllocation(bytes));
- return m_objectSpace.allocateWithNormalDestructor(bytes);
- }
+ HashSet<const JSCell*> m_copyingRememberedSet;
+
+ ProtectCountSet m_protectedValues;
+ std::unique_ptr<HashSet<MarkedArgumentBuffer*>> m_markListSet;
+
+ MachineThreads m_machineThreads;
- inline void* Heap::allocateWithImmortalStructureDestructor(size_t bytes)
- {
- ASSERT(isValidAllocation(bytes));
- return m_objectSpace.allocateWithImmortalStructureDestructor(bytes);
- }
+ SlotVisitor m_slotVisitor;
+
+ // We pool the slot visitors used by parallel marking threads. It's useful to be able to
+ // enumerate over them, and it's useful to have them cache some small amount of memory from
+ // one GC to the next. GC marking threads claim these at the start of marking, and return
+ // them at the end.
+ Vector<std::unique_ptr<SlotVisitor>> m_parallelSlotVisitors;
+ Vector<SlotVisitor*> m_availableParallelSlotVisitors;
+ Lock m_parallelSlotVisitorLock;
+
+ HandleSet m_handleSet;
+ HandleStack m_handleStack;
+ CodeBlockSet m_codeBlocks;
+ JITStubRoutineSet m_jitStubRoutines;
+ FinalizerOwner m_finalizerOwner;
- inline void* Heap::allocateWithoutDestructor(size_t bytes)
- {
- ASSERT(isValidAllocation(bytes));
- return m_objectSpace.allocateWithoutDestructor(bytes);
- }
-
- inline CheckedBoolean Heap::tryAllocateStorage(size_t bytes, void** outPtr)
- {
- return m_storageSpace.tryAllocate(bytes, outPtr);
- }
+ bool m_isSafeToCollect;
+
+ WriteBarrierBuffer m_writeBarrierBuffer;
+
+ VM* m_vm;
+ double m_lastFullGCLength;
+ double m_lastEdenGCLength;
+
+ Vector<ExecutableBase*> m_executables;
+
+ Vector<WeakBlock*> m_logicallyEmptyWeakBlocks;
+ size_t m_indexOfNextLogicallyEmptyWeakBlockToSweep { WTF::notFound };
- inline CheckedBoolean Heap::tryReallocateStorage(void** ptr, size_t oldSize, size_t newSize)
- {
- return m_storageSpace.tryReallocate(ptr, oldSize, newSize);
- }
-
- inline BlockAllocator& Heap::blockAllocator()
- {
- return m_blockAllocator;
- }
+ RefPtr<FullGCActivityCallback> m_fullActivityCallback;
+ RefPtr<GCActivityCallback> m_edenActivityCallback;
+ std::unique_ptr<IncrementalSweeper> m_sweeper;
+ Vector<MarkedBlock*> m_blockSnapshot;
+
+ Vector<HeapObserver*> m_observers;
+
+ unsigned m_deferralDepth;
+ Vector<DFG::Worklist*> m_suspendedCompilerWorklists;
+
+ std::unique_ptr<HeapVerifier> m_verifier;
+#if USE(CF)
+ Vector<RetainPtr<CFTypeRef>> m_delayedReleaseObjects;
+ unsigned m_delayedReleaseRecursionCount;
+#endif
+
+ HashMap<void*, std::function<void()>> m_weakGCMaps;
+
+ Lock m_markingMutex;
+ Condition m_markingConditionVariable;
+ MarkStackArray m_sharedMarkStack;
+ unsigned m_numberOfActiveParallelMarkers { 0 };
+ unsigned m_numberOfWaitingParallelMarkers { 0 };
+ bool m_parallelMarkersShouldExit { false };
+
+ Lock m_opaqueRootsMutex;
+ HashSet<void*> m_opaqueRoots;
+
+ Vector<CopiedBlock*> m_blocksToCopy;
+ static const size_t s_blockFragmentLength = 32;
+
+ ListableHandler<WeakReferenceHarvester>::List m_weakReferenceHarvesters;
+ ListableHandler<UnconditionalFinalizer>::List m_unconditionalFinalizers;
+
+ ParallelHelperClient m_helperClient;
+
+#if ENABLE(RESOURCE_USAGE)
+ size_t m_blockBytesAllocated { 0 };
+#endif
+};
} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/HeapHelperPool.cpp b/Source/JavaScriptCore/heap/HeapHelperPool.cpp
new file mode 100644
index 000000000..791aa756f
--- /dev/null
+++ b/Source/JavaScriptCore/heap/HeapHelperPool.cpp
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "HeapHelperPool.h"
+
+#include <mutex>
+#include "Options.h"
+
+namespace JSC {
+
+ParallelHelperPool& heapHelperPool()
+{
+ static std::once_flag initializeHelperPoolOnceFlag;
+ static ParallelHelperPool* helperPool;
+ std::call_once(
+ initializeHelperPoolOnceFlag,
+ [] {
+ helperPool = new ParallelHelperPool();
+ helperPool->ensureThreads(Options::numberOfGCMarkers() - 1);
+ });
+ return *helperPool;
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/VTableSpectrum.h b/Source/JavaScriptCore/heap/HeapHelperPool.h
index a50a04f47..99144be37 100644
--- a/Source/JavaScriptCore/heap/VTableSpectrum.h
+++ b/Source/JavaScriptCore/heap/HeapHelperPool.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,27 +23,15 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef VTableSpectrum_h
-#define VTableSpectrum_h
+#ifndef HeapHelperPool_h
+#define HeapHelperPool_h
-#include <stdio.h>
-#include <wtf/Spectrum.h>
+#include <wtf/ParallelHelperPool.h>
namespace JSC {
-class JSCell;
-
-class VTableSpectrum : Spectrum<void*> {
-public:
- VTableSpectrum();
- ~VTableSpectrum();
-
- void countVPtr(void*);
- JS_EXPORT_PRIVATE void count(JSCell*);
-
- void dump(FILE* output, const char* comment);
-};
+ParallelHelperPool& heapHelperPool();
} // namespace JSC
-#endif // VTableSpectrum_h
+#endif // HeapHelperPool_h
diff --git a/Source/JavaScriptCore/heap/HeapInlines.h b/Source/JavaScriptCore/heap/HeapInlines.h
new file mode 100644
index 000000000..cee723430
--- /dev/null
+++ b/Source/JavaScriptCore/heap/HeapInlines.h
@@ -0,0 +1,358 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef HeapInlines_h
+#define HeapInlines_h
+
+#include "CopyBarrier.h"
+#include "Heap.h"
+#include "JSCell.h"
+#include "Structure.h"
+#include <type_traits>
+#include <wtf/Assertions.h>
+
+namespace JSC {
+
+inline bool Heap::shouldCollect()
+{
+ if (isDeferred())
+ return false;
+ if (!m_isSafeToCollect)
+ return false;
+ if (m_operationInProgress != NoOperation)
+ return false;
+ if (Options::gcMaxHeapSize())
+ return m_bytesAllocatedThisCycle > Options::gcMaxHeapSize();
+ return m_bytesAllocatedThisCycle > m_maxEdenSize;
+}
+
+inline bool Heap::isBusy()
+{
+ return m_operationInProgress != NoOperation;
+}
+
+inline bool Heap::isCollecting()
+{
+ return m_operationInProgress == FullCollection || m_operationInProgress == EdenCollection;
+}
+
+inline Heap* Heap::heap(const JSCell* cell)
+{
+ return MarkedBlock::blockFor(cell)->heap();
+}
+
+inline Heap* Heap::heap(const JSValue v)
+{
+ if (!v.isCell())
+ return 0;
+ return heap(v.asCell());
+}
+
+inline bool Heap::isLive(const void* cell)
+{
+ return MarkedBlock::blockFor(cell)->isLiveCell(cell);
+}
+
+inline bool Heap::isMarked(const void* cell)
+{
+ return MarkedBlock::blockFor(cell)->isMarked(cell);
+}
+
+inline bool Heap::testAndSetMarked(const void* cell)
+{
+ return MarkedBlock::blockFor(cell)->testAndSetMarked(cell);
+}
+
+inline void Heap::setMarked(const void* cell)
+{
+ MarkedBlock::blockFor(cell)->setMarked(cell);
+}
+
+inline void Heap::writeBarrier(const JSCell* from, JSValue to)
+{
+#if ENABLE(WRITE_BARRIER_PROFILING)
+ WriteBarrierCounters::countWriteBarrier();
+#endif
+ if (!to.isCell())
+ return;
+ writeBarrier(from, to.asCell());
+}
+
+inline void Heap::writeBarrier(const JSCell* from, JSCell* to)
+{
+#if ENABLE(WRITE_BARRIER_PROFILING)
+ WriteBarrierCounters::countWriteBarrier();
+#endif
+ if (!from || from->cellState() != CellState::OldBlack)
+ return;
+ if (!to || to->cellState() != CellState::NewWhite)
+ return;
+ addToRememberedSet(from);
+}
+
+inline void Heap::writeBarrier(const JSCell* from)
+{
+ ASSERT_GC_OBJECT_LOOKS_VALID(const_cast<JSCell*>(from));
+ if (!from || from->cellState() != CellState::OldBlack)
+ return;
+ addToRememberedSet(from);
+}
+
+inline void Heap::reportExtraMemoryAllocated(size_t size)
+{
+ if (size > minExtraMemory)
+ reportExtraMemoryAllocatedSlowCase(size);
+}
+
+inline void Heap::reportExtraMemoryVisited(CellState dataBeforeVisiting, size_t size)
+{
+ // We don't want to double-count the extra memory that was reported in previous collections.
+ if (operationInProgress() == EdenCollection && dataBeforeVisiting == CellState::OldGrey)
+ return;
+
+ size_t* counter = &m_extraMemorySize;
+
+ for (;;) {
+ size_t oldSize = *counter;
+ if (WTF::weakCompareAndSwap(counter, oldSize, oldSize + size))
+ return;
+ }
+}
+
+inline void Heap::deprecatedReportExtraMemory(size_t size)
+{
+ if (size > minExtraMemory)
+ deprecatedReportExtraMemorySlowCase(size);
+}
+
+template<typename Functor> inline void Heap::forEachCodeBlock(Functor& functor)
+{
+ // We don't know the full set of CodeBlocks until compilation has terminated.
+ completeAllDFGPlans();
+
+ return m_codeBlocks.iterate<Functor>(functor);
+}
+
+template<typename Functor> inline typename Functor::ReturnType Heap::forEachProtectedCell(Functor& functor)
+{
+ for (auto& pair : m_protectedValues)
+ functor(pair.key);
+ m_handleSet.forEachStrongHandle(functor, m_protectedValues);
+
+ return functor.returnValue();
+}
+
+template<typename Functor> inline typename Functor::ReturnType Heap::forEachProtectedCell()
+{
+ Functor functor;
+ return forEachProtectedCell(functor);
+}
+
+inline void* Heap::allocateWithDestructor(size_t bytes)
+{
+#if ENABLE(ALLOCATION_LOGGING)
+ dataLogF("JSC GC allocating %lu bytes with normal destructor.\n", bytes);
+#endif
+ ASSERT(isValidAllocation(bytes));
+ return m_objectSpace.allocateWithDestructor(bytes);
+}
+
+inline void* Heap::allocateWithoutDestructor(size_t bytes)
+{
+#if ENABLE(ALLOCATION_LOGGING)
+ dataLogF("JSC GC allocating %lu bytes without destructor.\n", bytes);
+#endif
+ ASSERT(isValidAllocation(bytes));
+ return m_objectSpace.allocateWithoutDestructor(bytes);
+}
+
+template<typename ClassType>
+void* Heap::allocateObjectOfType(size_t bytes)
+{
+ // JSCell::classInfo() expects objects allocated with normal destructor to derive from JSDestructibleObject.
+ ASSERT((!ClassType::needsDestruction || (ClassType::StructureFlags & StructureIsImmortal) || std::is_convertible<ClassType, JSDestructibleObject>::value));
+
+ if (ClassType::needsDestruction)
+ return allocateWithDestructor(bytes);
+ return allocateWithoutDestructor(bytes);
+}
+
+template<typename ClassType>
+MarkedSpace::Subspace& Heap::subspaceForObjectOfType()
+{
+ // JSCell::classInfo() expects objects allocated with normal destructor to derive from JSDestructibleObject.
+ ASSERT((!ClassType::needsDestruction || (ClassType::StructureFlags & StructureIsImmortal) || std::is_convertible<ClassType, JSDestructibleObject>::value));
+
+ if (ClassType::needsDestruction)
+ return subspaceForObjectDestructor();
+ return subspaceForObjectWithoutDestructor();
+}
+
+template<typename ClassType>
+MarkedAllocator& Heap::allocatorForObjectOfType(size_t bytes)
+{
+ // JSCell::classInfo() expects objects allocated with normal destructor to derive from JSDestructibleObject.
+ ASSERT((!ClassType::needsDestruction || (ClassType::StructureFlags & StructureIsImmortal) || std::is_convertible<ClassType, JSDestructibleObject>::value));
+
+ if (ClassType::needsDestruction)
+ return allocatorForObjectWithDestructor(bytes);
+ return allocatorForObjectWithoutDestructor(bytes);
+}
+
+inline CheckedBoolean Heap::tryAllocateStorage(JSCell* intendedOwner, size_t bytes, void** outPtr)
+{
+ CheckedBoolean result = m_storageSpace.tryAllocate(bytes, outPtr);
+#if ENABLE(ALLOCATION_LOGGING)
+ dataLogF("JSC GC allocating %lu bytes of storage for %p: %p.\n", bytes, intendedOwner, *outPtr);
+#else
+ UNUSED_PARAM(intendedOwner);
+#endif
+ return result;
+}
+
+inline CheckedBoolean Heap::tryReallocateStorage(JSCell* intendedOwner, void** ptr, size_t oldSize, size_t newSize)
+{
+#if ENABLE(ALLOCATION_LOGGING)
+ void* oldPtr = *ptr;
+#endif
+ CheckedBoolean result = m_storageSpace.tryReallocate(ptr, oldSize, newSize);
+#if ENABLE(ALLOCATION_LOGGING)
+ dataLogF("JSC GC reallocating %lu -> %lu bytes of storage for %p: %p -> %p.\n", oldSize, newSize, intendedOwner, oldPtr, *ptr);
+#else
+ UNUSED_PARAM(intendedOwner);
+#endif
+ return result;
+}
+
+inline void Heap::ascribeOwner(JSCell* intendedOwner, void* storage)
+{
+#if ENABLE(ALLOCATION_LOGGING)
+ dataLogF("JSC GC ascribing %p as owner of storage %p.\n", intendedOwner, storage);
+#else
+ UNUSED_PARAM(intendedOwner);
+ UNUSED_PARAM(storage);
+#endif
+}
+
+#if USE(CF)
+template <typename T>
+inline void Heap::releaseSoon(RetainPtr<T>&& object)
+{
+ m_delayedReleaseObjects.append(WTFMove(object));
+}
+#endif
+
+inline void Heap::incrementDeferralDepth()
+{
+ RELEASE_ASSERT(m_deferralDepth < 100); // Sanity check to make sure this doesn't get ridiculous.
+ m_deferralDepth++;
+}
+
+inline void Heap::decrementDeferralDepth()
+{
+ RELEASE_ASSERT(m_deferralDepth >= 1);
+ m_deferralDepth--;
+}
+
+inline bool Heap::collectIfNecessaryOrDefer()
+{
+ if (!shouldCollect())
+ return false;
+
+ collect();
+ return true;
+}
+
+inline void Heap::decrementDeferralDepthAndGCIfNeeded()
+{
+ decrementDeferralDepth();
+ collectIfNecessaryOrDefer();
+}
+
+inline HashSet<MarkedArgumentBuffer*>& Heap::markListSet()
+{
+ if (!m_markListSet)
+ m_markListSet = std::make_unique<HashSet<MarkedArgumentBuffer*>>();
+ return *m_markListSet;
+}
+
+inline void Heap::registerWeakGCMap(void* weakGCMap, std::function<void()> pruningCallback)
+{
+ m_weakGCMaps.add(weakGCMap, WTFMove(pruningCallback));
+}
+
+inline void Heap::unregisterWeakGCMap(void* weakGCMap)
+{
+ m_weakGCMaps.remove(weakGCMap);
+}
+
+inline void Heap::didAllocateBlock(size_t capacity)
+{
+#if ENABLE(RESOURCE_USAGE)
+ m_blockBytesAllocated += capacity;
+#else
+ UNUSED_PARAM(capacity);
+#endif
+}
+
+inline void Heap::didFreeBlock(size_t capacity)
+{
+#if ENABLE(RESOURCE_USAGE)
+ m_blockBytesAllocated -= capacity;
+#else
+ UNUSED_PARAM(capacity);
+#endif
+}
+
+inline bool Heap::isPointerGCObject(TinyBloomFilter filter, MarkedBlockSet& markedBlockSet, void* pointer)
+{
+ MarkedBlock* candidate = MarkedBlock::blockFor(pointer);
+ if (filter.ruleOut(bitwise_cast<Bits>(candidate))) {
+ ASSERT(!candidate || !markedBlockSet.set().contains(candidate));
+ return false;
+ }
+
+ if (!MarkedBlock::isAtomAligned(pointer))
+ return false;
+
+ if (!markedBlockSet.set().contains(candidate))
+ return false;
+
+ if (!candidate->isLiveCell(pointer))
+ return false;
+
+ return true;
+}
+
+inline bool Heap::isValueGCObject(TinyBloomFilter filter, MarkedBlockSet& markedBlockSet, JSValue value)
+{
+ if (!value.isCell())
+ return false;
+ return isPointerGCObject(filter, markedBlockSet, static_cast<void*>(value.asCell()));
+}
+
+} // namespace JSC
+
+#endif // HeapInlines_h
diff --git a/Source/JavaScriptCore/heap/SuperRegion.h b/Source/JavaScriptCore/heap/HeapIterationScope.h
index b659510f8..382661c60 100644
--- a/Source/JavaScriptCore/heap/SuperRegion.h
+++ b/Source/JavaScriptCore/heap/HeapIterationScope.h
@@ -23,36 +23,36 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef SuperRegion_h
-#define SuperRegion_h
+#ifndef HeapIterationScope_h
+#define HeapIterationScope_h
-#include <wtf/MetaAllocator.h>
-#include <wtf/PageBlock.h>
-#include <wtf/PageReservation.h>
+#include "Heap.h"
+#include <wtf/Noncopyable.h>
namespace JSC {
-class VM;
-
-class SuperRegion : public WTF::MetaAllocator {
+class HeapIterationScope {
+ WTF_MAKE_NONCOPYABLE(HeapIterationScope);
public:
- SuperRegion();
- virtual ~SuperRegion();
-
-protected:
- virtual void* allocateNewSpace(size_t&);
- virtual void notifyNeedPage(void*);
- virtual void notifyPageIsFree(void*);
+ HeapIterationScope(Heap&);
+ ~HeapIterationScope();
private:
- static const uint64_t s_fixedHeapMemoryPoolSize;
+ Heap& m_heap;
+};
- static void* getAlignedBase(PageReservation&);
+inline HeapIterationScope::HeapIterationScope(Heap& heap)
+ : m_heap(heap)
+{
+ m_heap.willStartIterating();
+}
- PageReservation m_reservation;
- void* m_reservationBase;
-};
+inline HeapIterationScope::~HeapIterationScope()
+{
+ m_heap.didFinishIterating();
+}
} // namespace JSC
-#endif // SuperRegion_h
+
+#endif // HeapIterationScope_h
diff --git a/Source/JavaScriptCore/heap/HeapObserver.h b/Source/JavaScriptCore/heap/HeapObserver.h
new file mode 100644
index 000000000..c4c282c79
--- /dev/null
+++ b/Source/JavaScriptCore/heap/HeapObserver.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef HeapObserver_h
+#define HeapObserver_h
+
+#include "HeapOperation.h"
+
+namespace JSC {
+
+class HeapObserver {
+public:
+ virtual ~HeapObserver() { }
+ virtual void willGarbageCollect() = 0;
+ virtual void didGarbageCollect(HeapOperation) = 0;
+};
+
+} // namespace JSC
+
+#endif // HeapObserver_h
diff --git a/Source/JavaScriptCore/heap/HeapOperation.h b/Source/JavaScriptCore/heap/HeapOperation.h
new file mode 100644
index 000000000..272e3c068
--- /dev/null
+++ b/Source/JavaScriptCore/heap/HeapOperation.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef HeapOperation_h
+#define HeapOperation_h
+
+namespace JSC {
+
+enum HeapOperation { NoOperation, Allocation, FullCollection, EdenCollection, AnyCollection };
+
+} // namespace JSC
+
+#endif // HeapOperation_h
diff --git a/Source/JavaScriptCore/heap/HeapRootVisitor.h b/Source/JavaScriptCore/heap/HeapRootVisitor.h
index 5b11a5ead..cac0b3746 100644
--- a/Source/JavaScriptCore/heap/HeapRootVisitor.h
+++ b/Source/JavaScriptCore/heap/HeapRootVisitor.h
@@ -58,22 +58,23 @@ namespace JSC {
inline void HeapRootVisitor::visit(JSValue* slot)
{
- m_visitor.append(slot);
+ m_visitor.appendUnbarrieredValue(slot);
}
inline void HeapRootVisitor::visit(JSValue* slot, size_t count)
{
- m_visitor.append(slot, count);
+ for (size_t i = 0; i < count; ++i)
+ m_visitor.appendUnbarrieredValue(&slot[i]);
}
inline void HeapRootVisitor::visit(JSString** slot)
{
- m_visitor.append(reinterpret_cast<JSCell**>(slot));
+ m_visitor.appendUnbarrieredPointer(slot);
}
inline void HeapRootVisitor::visit(JSCell** slot)
{
- m_visitor.append(slot);
+ m_visitor.appendUnbarrieredPointer(slot);
}
inline SlotVisitor& HeapRootVisitor::visitor()
diff --git a/Source/JavaScriptCore/heap/HeapStatistics.cpp b/Source/JavaScriptCore/heap/HeapStatistics.cpp
index b63c316ce..12d12ce92 100644
--- a/Source/JavaScriptCore/heap/HeapStatistics.cpp
+++ b/Source/JavaScriptCore/heap/HeapStatistics.cpp
@@ -27,16 +27,18 @@
#include "HeapStatistics.h"
#include "Heap.h"
+#include "HeapIterationScope.h"
+#include "JSCInlines.h"
#include "JSObject.h"
-#include "Operations.h"
#include "Options.h"
#include <stdlib.h>
+#include <wtf/CurrentTime.h>
+#include <wtf/DataLog.h>
+#include <wtf/StdLibExtras.h>
+
#if OS(UNIX)
#include <sys/resource.h>
#endif
-#include <wtf/CurrentTime.h>
-#include <wtf/DataLog.h>
-#include <wtf/Deque.h>
namespace JSC {
@@ -131,6 +133,7 @@ void HeapStatistics::logStatistics()
void HeapStatistics::exitWithFailure()
{
+ exit(-1);
}
void HeapStatistics::reportSuccess()
@@ -139,33 +142,11 @@ void HeapStatistics::reportSuccess()
#endif // OS(UNIX)
-size_t HeapStatistics::parseMemoryAmount(char* s)
-{
- size_t multiplier = 1;
- char* afterS;
- size_t value = strtol(s, &afterS, 10);
- char next = afterS[0];
- switch (next) {
- case 'K':
- multiplier = KB;
- break;
- case 'M':
- multiplier = MB;
- break;
- case 'G':
- multiplier = GB;
- break;
- default:
- break;
- }
- return value * multiplier;
-}
-
class StorageStatistics : public MarkedBlock::VoidFunctor {
public:
StorageStatistics();
- void operator()(JSCell*);
+ IterationStatus operator()(JSCell*);
size_t objectWithOutOfLineStorageCount();
size_t objectCount();
@@ -174,6 +155,8 @@ public:
size_t storageCapacity();
private:
+ void visit(JSCell*);
+
size_t m_objectWithOutOfLineStorageCount;
size_t m_objectCount;
size_t m_storageSize;
@@ -188,13 +171,13 @@ inline StorageStatistics::StorageStatistics()
{
}
-inline void StorageStatistics::operator()(JSCell* cell)
+inline void StorageStatistics::visit(JSCell* cell)
{
if (!cell->isObject())
return;
JSObject* object = jsCast<JSObject*>(cell);
- if (hasIndexedProperties(object->structure()->indexingType()))
+ if (hasIndexedProperties(object->indexingType()))
return;
if (object->structure()->isUncacheableDictionary())
@@ -207,6 +190,12 @@ inline void StorageStatistics::operator()(JSCell* cell)
m_storageCapacity += object->structure()->totalStorageCapacity() * sizeof(WriteBarrierBase<Unknown>);
}
+inline IterationStatus StorageStatistics::operator()(JSCell* cell)
+{
+ visit(cell);
+ return IterationStatus::Continue;
+}
+
inline size_t StorageStatistics::objectWithOutOfLineStorageCount()
{
return m_objectWithOutOfLineStorageCount;
@@ -227,15 +216,18 @@ inline size_t StorageStatistics::storageCapacity()
return m_storageCapacity;
}
-void HeapStatistics::showObjectStatistics(Heap* heap)
+void HeapStatistics::dumpObjectStatistics(Heap* heap)
{
dataLogF("\n=== Heap Statistics: ===\n");
dataLogF("size: %ldkB\n", static_cast<long>(heap->m_sizeAfterLastCollect / KB));
dataLogF("capacity: %ldkB\n", static_cast<long>(heap->capacity() / KB));
- dataLogF("pause time: %lfs\n\n", heap->m_lastGCLength);
+ dataLogF("pause time: %lfs\n\n", heap->m_lastFullGCLength);
StorageStatistics storageStatistics;
- heap->m_objectSpace.forEachLiveCell(storageStatistics);
+ {
+ HeapIterationScope iterationScope(*heap);
+ heap->m_objectSpace.forEachLiveCell(iterationScope, storageStatistics);
+ }
long wastedPropertyStorageBytes = 0;
long wastedPropertyStoragePercent = 0;
long objectWithOutOfLineStorageCount = 0;
diff --git a/Source/JavaScriptCore/heap/HeapStatistics.h b/Source/JavaScriptCore/heap/HeapStatistics.h
index 13a29efbe..1ffda6e23 100644
--- a/Source/JavaScriptCore/heap/HeapStatistics.h
+++ b/Source/JavaScriptCore/heap/HeapStatistics.h
@@ -27,7 +27,7 @@
#define HeapStatistics_h
#include "JSExportMacros.h"
-#include <wtf/Deque.h>
+#include <wtf/Vector.h>
namespace JSC {
@@ -40,13 +40,8 @@ public:
static void initialize();
static void recordGCPauseTime(double start, double end);
- static size_t parseMemoryAmount(char*);
- static void showObjectStatistics(Heap*);
-
- static const size_t KB = 1024;
- static const size_t MB = 1024 * KB;
- static const size_t GB = 1024 * MB;
+ static void dumpObjectStatistics(Heap*);
private:
static void logStatistics();
diff --git a/Source/JavaScriptCore/heap/HeapTimer.cpp b/Source/JavaScriptCore/heap/HeapTimer.cpp
index a30a28b45..1ab4dd568 100644
--- a/Source/JavaScriptCore/heap/HeapTimer.cpp
+++ b/Source/JavaScriptCore/heap/HeapTimer.cpp
@@ -26,10 +26,11 @@
#include "config.h"
#include "HeapTimer.h"
-#include "APIShims.h"
+#include "GCActivityCallback.h"
+#include "IncrementalSweeper.h"
#include "JSObject.h"
#include "JSString.h"
-
+#include "JSCInlines.h"
#include <wtf/MainThread.h>
#include <wtf/Threading.h>
@@ -40,6 +41,8 @@
#include <QTimerEvent>
#elif PLATFORM(EFL)
#include <Ecore.h>
+#elif USE(GLIB)
+#include <glib.h>
#endif
namespace JSC {
@@ -67,7 +70,7 @@ HeapTimer::HeapTimer(VM* vm, CFRunLoopRef runLoop)
m_context.info = &vm->apiLock();
m_context.retain = retainAPILock;
m_context.release = releaseAPILock;
- m_timer = adoptCF(CFRunLoopTimerCreate(0, s_decade, s_decade, 0, 0, HeapTimer::timerDidFire, &m_context));
+ m_timer = adoptCF(CFRunLoopTimerCreate(kCFAllocatorDefault, s_decade, s_decade, 0, 0, HeapTimer::timerDidFire, &m_context));
CFRunLoopAddTimer(m_runLoop.get(), m_timer.get(), kCFRunLoopCommonModes);
}
@@ -90,45 +93,23 @@ void HeapTimer::timerDidFire(CFRunLoopTimerRef timer, void* context)
}
HeapTimer* heapTimer = 0;
- if (vm->heap.activityCallback()->m_timer.get() == timer)
- heapTimer = vm->heap.activityCallback();
+ if (vm->heap.fullActivityCallback() && vm->heap.fullActivityCallback()->m_timer.get() == timer)
+ heapTimer = vm->heap.fullActivityCallback();
+ else if (vm->heap.edenActivityCallback() && vm->heap.edenActivityCallback()->m_timer.get() == timer)
+ heapTimer = vm->heap.edenActivityCallback();
else if (vm->heap.sweeper()->m_timer.get() == timer)
heapTimer = vm->heap.sweeper();
else
RELEASE_ASSERT_NOT_REACHED();
{
- APIEntryShim shim(vm);
+ JSLockHolder locker(vm);
heapTimer->doWork();
}
apiLock->unlock();
}
-#elif PLATFORM(BLACKBERRY)
-
-HeapTimer::HeapTimer(VM* vm)
- : m_vm(vm)
- , m_timer(this, &HeapTimer::timerDidFire)
-{
- // FIXME: Implement HeapTimer for other threads.
- if (WTF::isMainThread() && !m_timer.tryCreateClient())
- CRASH();
-}
-
-HeapTimer::~HeapTimer()
-{
-}
-
-void HeapTimer::timerDidFire()
-{
- doWork();
-}
-
-void HeapTimer::invalidate()
-{
-}
-
#elif PLATFORM(QT)
HeapTimer::HeapTimer(VM* vm)
@@ -155,7 +136,7 @@ void HeapTimer::timerEvent(QTimerEvent*)
return;
}
- APIEntryShim shim(m_vm);
+ JSLockHolder locker(m_vm);
doWork();
}
@@ -198,13 +179,67 @@ bool HeapTimer::timerEvent(void* info)
{
HeapTimer* agent = static_cast<HeapTimer*>(info);
- APIEntryShim shim(agent->m_vm);
+ JSLockHolder locker(agent->m_vm);
agent->doWork();
agent->m_timer = 0;
return ECORE_CALLBACK_CANCEL;
}
+#elif USE(GLIB)
+
+static GSourceFuncs heapTimerSourceFunctions = {
+ nullptr, // prepare
+ nullptr, // check
+ // dispatch
+ [](GSource* source, GSourceFunc callback, gpointer userData) -> gboolean
+ {
+ if (g_source_get_ready_time(source) == -1)
+ return G_SOURCE_CONTINUE;
+ g_source_set_ready_time(source, -1);
+ return callback(userData);
+ },
+ nullptr, // finalize
+ nullptr, // closure_callback
+ nullptr, // closure_marshall
+};
+
+HeapTimer::HeapTimer(VM* vm)
+ : m_vm(vm)
+ , m_apiLock(&vm->apiLock())
+ , m_timer(adoptGRef(g_source_new(&heapTimerSourceFunctions, sizeof(GSource))))
+{
+ g_source_set_name(m_timer.get(), "[JavaScriptCore] HeapTimer");
+ g_source_set_callback(m_timer.get(), [](gpointer userData) -> gboolean {
+ static_cast<HeapTimer*>(userData)->timerDidFire();
+ return G_SOURCE_CONTINUE;
+ }, this, nullptr);
+ g_source_attach(m_timer.get(), g_main_context_get_thread_default());
+}
+
+HeapTimer::~HeapTimer()
+{
+ g_source_destroy(m_timer.get());
+}
+
+void HeapTimer::timerDidFire()
+{
+ m_apiLock->lock();
+
+ if (!m_apiLock->vm()) {
+ // The VM has been destroyed, so we should just give up.
+ m_apiLock->unlock();
+ return;
+ }
+
+ {
+ JSLockHolder locker(m_vm);
+ doWork();
+ }
+
+ m_apiLock->unlock();
+}
+
#else
HeapTimer::HeapTimer(VM* vm)
: m_vm(vm)
diff --git a/Source/JavaScriptCore/heap/HeapTimer.h b/Source/JavaScriptCore/heap/HeapTimer.h
index f7576edd9..740a71f57 100644
--- a/Source/JavaScriptCore/heap/HeapTimer.h
+++ b/Source/JavaScriptCore/heap/HeapTimer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,24 +26,26 @@
#ifndef HeapTimer_h
#define HeapTimer_h
+#include <wtf/Lock.h>
#include <wtf/RetainPtr.h>
#include <wtf/Threading.h>
#if USE(CF)
#include <CoreFoundation/CoreFoundation.h>
-#elif PLATFORM(BLACKBERRY)
-#include <BlackBerryPlatformTimer.h>
+#endif
+
+#if USE(GLIB) && !PLATFORM(EFL) && !PLATFORM(QT)
+#include <wtf/glib/GRefPtr.h>
#elif PLATFORM(QT)
#include <QBasicTimer>
#include <QMutex>
#include <QObject>
#include <QThread>
-#elif PLATFORM(EFL)
-typedef struct _Ecore_Timer Ecore_Timer;
#endif
namespace JSC {
+class JSLock;
class VM;
#if PLATFORM(QT) && !USE(CF)
@@ -59,7 +61,7 @@ public:
HeapTimer(VM*);
#endif
- virtual ~HeapTimer();
+ JS_EXPORT_PRIVATE virtual ~HeapTimer();
virtual void doWork() = 0;
protected:
@@ -72,11 +74,7 @@ protected:
RetainPtr<CFRunLoopRef> m_runLoop;
CFRunLoopTimerContext m_context;
- Mutex m_shutdownMutex;
-#elif PLATFORM(BLACKBERRY)
- void timerDidFire();
-
- BlackBerry::Platform::Timer<HeapTimer> m_timer;
+ Lock m_shutdownMutex;
#elif PLATFORM(QT)
void timerEvent(QTimerEvent*);
void customEvent(QEvent*);
@@ -88,6 +86,10 @@ protected:
Ecore_Timer* add(double delay, void* agent);
void stop();
Ecore_Timer* m_timer;
+#elif USE(GLIB)
+ void timerDidFire();
+ RefPtr<JSLock> m_apiLock;
+ GRefPtr<GSource> m_timer;
#endif
private:
diff --git a/Source/JavaScriptCore/heap/HeapVerifier.cpp b/Source/JavaScriptCore/heap/HeapVerifier.cpp
new file mode 100644
index 000000000..0f4e28277
--- /dev/null
+++ b/Source/JavaScriptCore/heap/HeapVerifier.cpp
@@ -0,0 +1,290 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "HeapVerifier.h"
+
+#include "ButterflyInlines.h"
+#include "CopiedSpaceInlines.h"
+#include "HeapIterationScope.h"
+#include "JSCInlines.h"
+#include "JSObject.h"
+
+namespace JSC {
+
+HeapVerifier::HeapVerifier(Heap* heap, unsigned numberOfGCCyclesToRecord)
+ : m_heap(heap)
+ , m_currentCycle(0)
+ , m_numberOfCycles(numberOfGCCyclesToRecord)
+{
+ RELEASE_ASSERT(m_numberOfCycles > 0);
+ m_cycles = std::make_unique<GCCycle[]>(m_numberOfCycles);
+}
+
+const char* HeapVerifier::collectionTypeName(HeapOperation type)
+{
+ switch (type) {
+ case NoOperation:
+ return "NoOperation";
+ case AnyCollection:
+ return "AnyCollection";
+ case Allocation:
+ return "Allocation";
+ case EdenCollection:
+ return "EdenCollection";
+ case FullCollection:
+ return "FullCollection";
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+ return nullptr; // Silencing a compiler warning.
+}
+
+const char* HeapVerifier::phaseName(HeapVerifier::Phase phase)
+{
+ switch (phase) {
+ case Phase::BeforeGC:
+ return "BeforeGC";
+ case Phase::BeforeMarking:
+ return "BeforeMarking";
+ case Phase::AfterMarking:
+ return "AfterMarking";
+ case Phase::AfterGC:
+ return "AfterGC";
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+ return nullptr; // Silencing a compiler warning.
+}
+
+static void getButterflyDetails(JSObject* obj, void*& butterflyBase, size_t& butterflyCapacityInBytes, CopiedBlock*& butterflyBlock)
+{
+ Structure* structure = obj->structure();
+ Butterfly* butterfly = obj->butterfly();
+ butterflyBase = butterfly->base(structure);
+ butterflyBlock = CopiedSpace::blockFor(butterflyBase);
+
+ size_t propertyCapacity = structure->outOfLineCapacity();
+ size_t preCapacity;
+ size_t indexingPayloadSizeInBytes;
+ bool hasIndexingHeader = obj->hasIndexingHeader();
+ if (UNLIKELY(hasIndexingHeader)) {
+ preCapacity = butterfly->indexingHeader()->preCapacity(structure);
+ indexingPayloadSizeInBytes = butterfly->indexingHeader()->indexingPayloadSizeInBytes(structure);
+ } else {
+ preCapacity = 0;
+ indexingPayloadSizeInBytes = 0;
+ }
+ butterflyCapacityInBytes = Butterfly::totalSize(preCapacity, propertyCapacity, hasIndexingHeader, indexingPayloadSizeInBytes);
+}
+
+void HeapVerifier::initializeGCCycle()
+{
+ Heap* heap = m_heap;
+ incrementCycle();
+ currentCycle().collectionType = heap->operationInProgress();
+}
+
+struct GatherLiveObjFunctor : MarkedBlock::CountFunctor {
+ GatherLiveObjFunctor(LiveObjectList& list)
+ : m_list(list)
+ {
+ ASSERT(!list.liveObjects.size());
+ }
+
+ inline void visit(JSCell* cell)
+ {
+ if (!cell->isObject())
+ return;
+ LiveObjectData data(asObject(cell));
+ m_list.liveObjects.append(data);
+ }
+
+ IterationStatus operator()(JSCell* cell)
+ {
+ visit(cell);
+ return IterationStatus::Continue;
+ }
+
+ LiveObjectList& m_list;
+};
+
+void HeapVerifier::gatherLiveObjects(HeapVerifier::Phase phase)
+{
+ Heap* heap = m_heap;
+ LiveObjectList& list = *liveObjectListForGathering(phase);
+
+ HeapIterationScope iterationScope(*heap);
+ list.reset();
+ GatherLiveObjFunctor functor(list);
+ heap->m_objectSpace.forEachLiveCell(iterationScope, functor);
+}
+
+LiveObjectList* HeapVerifier::liveObjectListForGathering(HeapVerifier::Phase phase)
+{
+ switch (phase) {
+ case Phase::BeforeMarking:
+ return &currentCycle().before;
+ case Phase::AfterMarking:
+ return &currentCycle().after;
+ case Phase::BeforeGC:
+ case Phase::AfterGC:
+ // We should not be gathering live objects during these phases.
+ break;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+ return nullptr; // Silencing a compiler warning.
+}
+
+static void trimDeadObjectsFromList(HashSet<JSObject*>& knownLiveSet, LiveObjectList& list)
+{
+ if (!list.hasLiveObjects)
+ return;
+
+ size_t liveObjectsFound = 0;
+ for (size_t i = 0; i < list.liveObjects.size(); i++) {
+ LiveObjectData& objData = list.liveObjects[i];
+ if (objData.isConfirmedDead)
+ continue; // Don't "resurrect" known dead objects.
+ if (!knownLiveSet.contains(objData.obj)) {
+ objData.isConfirmedDead = true;
+ continue;
+ }
+ liveObjectsFound++;
+ }
+ list.hasLiveObjects = !!liveObjectsFound;
+}
+
+void HeapVerifier::trimDeadObjects()
+{
+ HashSet<JSObject*> knownLiveSet;
+
+ LiveObjectList& after = currentCycle().after;
+ for (size_t i = 0; i < after.liveObjects.size(); i++) {
+ LiveObjectData& objData = after.liveObjects[i];
+ knownLiveSet.add(objData.obj);
+ }
+
+ trimDeadObjectsFromList(knownLiveSet, currentCycle().before);
+
+ for (int i = -1; i > -m_numberOfCycles; i--) {
+ trimDeadObjectsFromList(knownLiveSet, cycleForIndex(i).before);
+ trimDeadObjectsFromList(knownLiveSet, cycleForIndex(i).after);
+ }
+}
+
+bool HeapVerifier::verifyButterflyIsInStorageSpace(Phase phase, LiveObjectList& list)
+{
+ auto& liveObjects = list.liveObjects;
+
+ CopiedSpace& storageSpace = m_heap->m_storageSpace;
+ bool listNamePrinted = false;
+ bool success = true;
+ for (size_t i = 0; i < liveObjects.size(); i++) {
+ LiveObjectData& objectData = liveObjects[i];
+ if (objectData.isConfirmedDead)
+ continue;
+
+ JSObject* obj = objectData.obj;
+ Butterfly* butterfly = obj->butterfly();
+ if (butterfly) {
+ void* butterflyBase;
+ size_t butterflyCapacityInBytes;
+ CopiedBlock* butterflyBlock;
+ getButterflyDetails(obj, butterflyBase, butterflyCapacityInBytes, butterflyBlock);
+
+ if (!storageSpace.contains(butterflyBlock)) {
+ if (!listNamePrinted) {
+ dataLogF("Verification @ phase %s FAILED in object list '%s' (size %zu)\n",
+ phaseName(phase), list.name, liveObjects.size());
+ listNamePrinted = true;
+ }
+
+ Structure* structure = obj->structure();
+ const char* structureClassName = structure->classInfo()->className;
+ dataLogF(" butterfly %p (base %p size %zu block %p) NOT in StorageSpace | obj %p type '%s'\n",
+ butterfly, butterflyBase, butterflyCapacityInBytes, butterflyBlock, obj, structureClassName);
+ success = false;
+ }
+ }
+ }
+ return success;
+}
+
+void HeapVerifier::verify(HeapVerifier::Phase phase)
+{
+ bool beforeVerified = verifyButterflyIsInStorageSpace(phase, currentCycle().before);
+ bool afterVerified = verifyButterflyIsInStorageSpace(phase, currentCycle().after);
+ RELEASE_ASSERT(beforeVerified && afterVerified);
+}
+
+void HeapVerifier::reportObject(LiveObjectData& objData, int cycleIndex, HeapVerifier::GCCycle& cycle, LiveObjectList& list)
+{
+ JSObject* obj = objData.obj;
+
+ if (objData.isConfirmedDead) {
+ dataLogF("FOUND dead obj %p in GC[%d] %s list '%s'\n",
+ obj, cycleIndex, cycle.collectionTypeName(), list.name);
+ return;
+ }
+
+ Structure* structure = obj->structure();
+ Butterfly* butterfly = obj->butterfly();
+ void* butterflyBase;
+ size_t butterflyCapacityInBytes;
+ CopiedBlock* butterflyBlock;
+ getButterflyDetails(obj, butterflyBase, butterflyCapacityInBytes, butterflyBlock);
+
+ dataLogF("FOUND obj %p type '%s' butterfly %p (base %p size %zu block %p) in GC[%d] %s list '%s'\n",
+ obj, structure->classInfo()->className,
+ butterfly, butterflyBase, butterflyCapacityInBytes, butterflyBlock,
+ cycleIndex, cycle.collectionTypeName(), list.name);
+}
+
+void HeapVerifier::checkIfRecorded(JSObject* obj)
+{
+ bool found = false;
+
+ for (int cycleIndex = 0; cycleIndex > -m_numberOfCycles; cycleIndex--) {
+ GCCycle& cycle = cycleForIndex(cycleIndex);
+ LiveObjectList& beforeList = cycle.before;
+ LiveObjectList& afterList = cycle.after;
+
+ LiveObjectData* objData;
+ objData = beforeList.findObject(obj);
+ if (objData) {
+ reportObject(*objData, cycleIndex, cycle, beforeList);
+ found = true;
+ }
+ objData = afterList.findObject(obj);
+ if (objData) {
+ reportObject(*objData, cycleIndex, cycle, afterList);
+ found = true;
+ }
+ }
+
+ if (!found)
+ dataLogF("obj %p NOT FOUND\n", obj);
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/HeapVerifier.h b/Source/JavaScriptCore/heap/HeapVerifier.h
new file mode 100644
index 000000000..d55ec4a89
--- /dev/null
+++ b/Source/JavaScriptCore/heap/HeapVerifier.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2014-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef HeapVerifier_h
+#define HeapVerifier_h
+
+#include "Heap.h"
+#include "LiveObjectList.h"
+
+namespace JSC {
+
+class JSObject;
+class MarkedBlock;
+
+class HeapVerifier {
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ enum class Phase {
+ BeforeGC,
+ BeforeMarking,
+ AfterMarking,
+ AfterGC
+ };
+
+ HeapVerifier(Heap*, unsigned numberOfGCCyclesToRecord);
+
+ void initializeGCCycle();
+ void gatherLiveObjects(Phase);
+ void trimDeadObjects();
+ void verify(Phase);
+
+ // Scans all previously recorded LiveObjectLists and checks if the specified
+ // object was in any of those lists.
+ JS_EXPORT_PRIVATE void checkIfRecorded(JSObject*);
+
+ static const char* collectionTypeName(HeapOperation);
+ static const char* phaseName(Phase);
+
+private:
+ struct GCCycle {
+ GCCycle()
+ : before("Before Marking")
+ , after("After Marking")
+ {
+ }
+
+ HeapOperation collectionType;
+ LiveObjectList before;
+ LiveObjectList after;
+
+ const char* collectionTypeName() const
+ {
+ return HeapVerifier::collectionTypeName(collectionType);
+ }
+ };
+
+ void incrementCycle() { m_currentCycle = (m_currentCycle + 1) % m_numberOfCycles; }
+ GCCycle& currentCycle() { return m_cycles[m_currentCycle]; }
+ GCCycle& cycleForIndex(int cycleIndex)
+ {
+ ASSERT(cycleIndex <= 0 && cycleIndex > -m_numberOfCycles);
+ cycleIndex += m_currentCycle;
+ if (cycleIndex < 0)
+ cycleIndex += m_numberOfCycles;
+ ASSERT(cycleIndex < m_numberOfCycles);
+ return m_cycles[cycleIndex];
+ }
+
+ LiveObjectList* liveObjectListForGathering(Phase);
+ bool verifyButterflyIsInStorageSpace(Phase, LiveObjectList&);
+
+ static void reportObject(LiveObjectData&, int cycleIndex, HeapVerifier::GCCycle&, LiveObjectList&);
+
+ Heap* m_heap;
+ int m_currentCycle;
+ int m_numberOfCycles;
+ std::unique_ptr<GCCycle[]> m_cycles;
+};
+
+} // namespace JSC
+
+#endif // HeapVerifier
diff --git a/Source/JavaScriptCore/heap/IncrementalSweeper.cpp b/Source/JavaScriptCore/heap/IncrementalSweeper.cpp
index 038432a5d..548a8bcf4 100644
--- a/Source/JavaScriptCore/heap/IncrementalSweeper.cpp
+++ b/Source/JavaScriptCore/heap/IncrementalSweeper.cpp
@@ -26,37 +26,37 @@
#include "config.h"
#include "IncrementalSweeper.h"
-#include "APIShims.h"
#include "Heap.h"
#include "JSObject.h"
#include "JSString.h"
#include "MarkedBlock.h"
+#include "JSCInlines.h"
#include <wtf/HashSet.h>
#include <wtf/WTFThreadData.h>
+#if PLATFORM(EFL)
+#include <Ecore.h>
+#include <wtf/CurrentTime.h>
+#elif USE(GLIB) && !PLATFORM(QT)
+#include <glib.h>
+#endif
+
namespace JSC {
-#if USE(CF) || PLATFORM(BLACKBERRY) || PLATFORM(QT)
+#if USE(CF) || PLATFORM(EFL) || USE(GLIB) || PLATFORM(QT)
static const double sweepTimeSlice = .01; // seconds
static const double sweepTimeTotal = .10;
static const double sweepTimeMultiplier = 1.0 / sweepTimeTotal;
#if USE(CF)
-
IncrementalSweeper::IncrementalSweeper(Heap* heap, CFRunLoopRef runLoop)
: HeapTimer(heap->vm(), runLoop)
- , m_currentBlockToSweepIndex(0)
, m_blocksToSweep(heap->m_blockSnapshot)
{
}
-PassOwnPtr<IncrementalSweeper> IncrementalSweeper::create(Heap* heap)
-{
- return adoptPtr(new IncrementalSweeper(heap, CFRunLoopGetCurrent()));
-}
-
void IncrementalSweeper::scheduleTimer()
{
CFRunLoopTimerSetNextFireDate(m_timer.get(), CFAbsoluteTimeGetCurrent() + (sweepTimeSlice * sweepTimeMultiplier));
@@ -66,35 +66,62 @@ void IncrementalSweeper::cancelTimer()
{
CFRunLoopTimerSetNextFireDate(m_timer.get(), CFAbsoluteTimeGetCurrent() + s_decade);
}
-
-#elif PLATFORM(BLACKBERRY) || PLATFORM(QT)
-
+#elif PLATFORM(EFL)
IncrementalSweeper::IncrementalSweeper(Heap* heap)
: HeapTimer(heap->vm())
- , m_currentBlockToSweepIndex(0)
, m_blocksToSweep(heap->m_blockSnapshot)
{
}
-PassOwnPtr<IncrementalSweeper> IncrementalSweeper::create(Heap* heap)
+void IncrementalSweeper::scheduleTimer()
+{
+ if (ecore_timer_freeze_get(m_timer))
+ ecore_timer_thaw(m_timer);
+
+ double targetTime = currentTime() + (sweepTimeSlice * sweepTimeMultiplier);
+ ecore_timer_interval_set(m_timer, targetTime);
+}
+
+void IncrementalSweeper::cancelTimer()
+{
+ ecore_timer_freeze(m_timer);
+}
+#elif PLATFORM(QT)
+IncrementalSweeper::IncrementalSweeper(Heap* heap)
+ : HeapTimer(heap->vm())
+ , m_blocksToSweep(heap->m_blockSnapshot)
{
- return adoptPtr(new IncrementalSweeper(heap));
}
void IncrementalSweeper::scheduleTimer()
{
-#if PLATFORM(QT)
m_timer.start(sweepTimeSlice * sweepTimeMultiplier * 1000, this);
-#else
- m_timer.start(sweepTimeSlice * sweepTimeMultiplier);
-#endif
}
void IncrementalSweeper::cancelTimer()
{
m_timer.stop();
}
+#elif USE(GLIB)
+IncrementalSweeper::IncrementalSweeper(Heap* heap)
+ : HeapTimer(heap->vm())
+ , m_blocksToSweep(heap->m_blockSnapshot)
+{
+}
+
+void IncrementalSweeper::scheduleTimer()
+{
+ auto delayDuration = std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::duration<double>(sweepTimeSlice * sweepTimeMultiplier));
+ gint64 currentTime = g_get_monotonic_time();
+ gint64 targetTime = currentTime + std::min<gint64>(G_MAXINT64 - currentTime, delayDuration.count());
+ ASSERT(targetTime >= currentTime);
+ g_source_set_ready_time(m_timer.get(), targetTime);
+}
+void IncrementalSweeper::cancelTimer()
+{
+ g_source_set_ready_time(m_timer.get(), -1);
+}
#endif
void IncrementalSweeper::doWork()
@@ -104,9 +131,7 @@ void IncrementalSweeper::doWork()
void IncrementalSweeper::doSweep(double sweepBeginTime)
{
- while (m_currentBlockToSweepIndex < m_blocksToSweep.size()) {
- sweepNextBlock();
-
+ while (sweepNextBlock()) {
double elapsedTime = WTF::monotonicallyIncreasingTime() - sweepBeginTime;
if (elapsedTime < sweepTimeSlice)
continue;
@@ -119,30 +144,30 @@ void IncrementalSweeper::doSweep(double sweepBeginTime)
cancelTimer();
}
-void IncrementalSweeper::sweepNextBlock()
+bool IncrementalSweeper::sweepNextBlock()
{
- while (m_currentBlockToSweepIndex < m_blocksToSweep.size()) {
- MarkedBlock* block = m_blocksToSweep[m_currentBlockToSweepIndex++];
+ while (!m_blocksToSweep.isEmpty()) {
+ MarkedBlock* block = m_blocksToSweep.takeLast();
if (!block->needsSweeping())
continue;
+ DeferGCForAWhile deferGC(m_vm->heap);
block->sweep();
m_vm->heap.objectSpace().freeOrShrinkBlock(block);
- return;
+ return true;
}
+
+ return m_vm->heap.sweepNextLogicallyEmptyWeakBlock();
}
-void IncrementalSweeper::startSweeping(Vector<MarkedBlock*>& blockSnapshot)
+void IncrementalSweeper::startSweeping()
{
- m_blocksToSweep = blockSnapshot;
- m_currentBlockToSweepIndex = 0;
scheduleTimer();
}
void IncrementalSweeper::willFinishSweeping()
{
- m_currentBlockToSweepIndex = 0;
m_blocksToSweep.clear();
if (m_vm)
cancelTimer();
@@ -150,8 +175,8 @@ void IncrementalSweeper::willFinishSweeping()
#else
-IncrementalSweeper::IncrementalSweeper(VM* vm)
- : HeapTimer(vm)
+IncrementalSweeper::IncrementalSweeper(Heap* heap)
+ : HeapTimer(heap->vm())
{
}
@@ -159,12 +184,7 @@ void IncrementalSweeper::doWork()
{
}
-PassOwnPtr<IncrementalSweeper> IncrementalSweeper::create(Heap* heap)
-{
- return adoptPtr(new IncrementalSweeper(heap->vm()));
-}
-
-void IncrementalSweeper::startSweeping(Vector<MarkedBlock*>&)
+void IncrementalSweeper::startSweeping()
{
}
@@ -172,8 +192,9 @@ void IncrementalSweeper::willFinishSweeping()
{
}
-void IncrementalSweeper::sweepNextBlock()
+bool IncrementalSweeper::sweepNextBlock()
{
+ return false;
}
#endif
diff --git a/Source/JavaScriptCore/heap/IncrementalSweeper.h b/Source/JavaScriptCore/heap/IncrementalSweeper.h
index 1c6a3786d..87d265ddb 100644
--- a/Source/JavaScriptCore/heap/IncrementalSweeper.h
+++ b/Source/JavaScriptCore/heap/IncrementalSweeper.h
@@ -27,42 +27,35 @@
#define IncrementalSweeper_h
#include "HeapTimer.h"
-#include "MarkedBlock.h"
-#include <wtf/HashSet.h>
-#include <wtf/PassOwnPtr.h>
-#include <wtf/RetainPtr.h>
#include <wtf/Vector.h>
namespace JSC {
class Heap;
+class MarkedBlock;
class IncrementalSweeper : public HeapTimer {
+ WTF_MAKE_FAST_ALLOCATED;
public:
- static PassOwnPtr<IncrementalSweeper> create(Heap*);
- void startSweeping(Vector<MarkedBlock*>&);
- virtual void doWork();
- void sweepNextBlock();
- void willFinishSweeping();
-
-private:
-#if USE(CF) || PLATFORM(BLACKBERRY) || PLATFORM(QT)
#if USE(CF)
- IncrementalSweeper(Heap*, CFRunLoopRef);
+ JS_EXPORT_PRIVATE IncrementalSweeper(Heap*, CFRunLoopRef);
#else
- IncrementalSweeper(Heap*);
+ explicit IncrementalSweeper(Heap*);
#endif
-
+
+ void startSweeping();
+
+ JS_EXPORT_PRIVATE virtual void doWork() override;
+ bool sweepNextBlock();
+ void willFinishSweeping();
+
+#if USE(CF) || PLATFORM(EFL) || USE(GLIB) || PLATFORM(QT)
+private:
void doSweep(double startTime);
void scheduleTimer();
void cancelTimer();
- unsigned m_currentBlockToSweepIndex;
Vector<MarkedBlock*>& m_blocksToSweep;
-#else
-
- IncrementalSweeper(VM*);
-
#endif
};
diff --git a/Source/JavaScriptCore/heap/JITStubRoutineSet.cpp b/Source/JavaScriptCore/heap/JITStubRoutineSet.cpp
index a37dc6f5c..ae8059532 100644
--- a/Source/JavaScriptCore/heap/JITStubRoutineSet.cpp
+++ b/Source/JavaScriptCore/heap/JITStubRoutineSet.cpp
@@ -29,7 +29,7 @@
#if ENABLE(JIT)
#include "GCAwareJITStubRoutine.h"
-
+#include "JSCInlines.h"
#include "SlotVisitor.h"
namespace JSC {
diff --git a/Source/JavaScriptCore/heap/JITStubRoutineSet.h b/Source/JavaScriptCore/heap/JITStubRoutineSet.h
index 2c2e9fa86..25ec44c13 100644
--- a/Source/JavaScriptCore/heap/JITStubRoutineSet.h
+++ b/Source/JavaScriptCore/heap/JITStubRoutineSet.h
@@ -26,10 +26,8 @@
#ifndef JITStubRoutineSet_h
#define JITStubRoutineSet_h
-#include <wtf/Platform.h>
-
#include "JITStubRoutine.h"
-#include <wtf/FastAllocBase.h>
+#include <wtf/FastMalloc.h>
#include <wtf/HashMap.h>
#include <wtf/Vector.h>
diff --git a/Source/JavaScriptCore/heap/ListableHandler.h b/Source/JavaScriptCore/heap/ListableHandler.h
index 16c34146c..2e58ee8a6 100644
--- a/Source/JavaScriptCore/heap/ListableHandler.h
+++ b/Source/JavaScriptCore/heap/ListableHandler.h
@@ -21,15 +21,14 @@
#define ListableHandler_h
#include <stdint.h>
+#include <wtf/Lock.h>
#include <wtf/Locker.h>
#include <wtf/Noncopyable.h>
#include <wtf/ThreadingPrimitives.h>
-#include <wtf/TCSpinLock.h>
namespace JSC {
-class MarkStack;
-class MarkStackThreadSharedData;
+class Heap;
class SlotVisitor;
template<typename T>
@@ -51,8 +50,7 @@ protected:
private:
// Allow these classes to use ListableHandler::List.
- friend class MarkStack;
- friend class GCThreadSharedData;
+ friend class Heap;
friend class SlotVisitor;
class List {
@@ -61,12 +59,11 @@ private:
List()
: m_first(0)
{
- m_lock.Init();
}
void addThreadSafe(T* handler)
{
- SpinLockHolder locker(&m_lock);
+ LockHolder locker(&m_lock);
addNotThreadSafe(handler);
}
@@ -104,7 +101,7 @@ private:
m_first = handler;
}
- SpinLock m_lock;
+ Lock m_lock;
T* m_first;
};
diff --git a/Source/JavaScriptCore/heap/LiveObjectData.h b/Source/JavaScriptCore/heap/LiveObjectData.h
new file mode 100644
index 000000000..6953266e0
--- /dev/null
+++ b/Source/JavaScriptCore/heap/LiveObjectData.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef LiveObjectData_h
+#define LiveObjectData_h
+
+namespace JSC {
+
+class JSObject;
+
+struct LiveObjectData {
+ LiveObjectData(JSObject* obj, bool isConfirmedDead = false)
+ : obj(obj)
+ , isConfirmedDead(isConfirmedDead)
+ {
+ }
+
+ JSObject* obj;
+ bool isConfirmedDead;
+};
+
+} // namespace JSC
+
+#endif // LiveObjectData_h
+
diff --git a/Source/JavaScriptCore/heap/LiveObjectList.cpp b/Source/JavaScriptCore/heap/LiveObjectList.cpp
new file mode 100644
index 000000000..af0367dc3
--- /dev/null
+++ b/Source/JavaScriptCore/heap/LiveObjectList.cpp
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "LiveObjectList.h"
+
+namespace JSC {
+
+LiveObjectData* LiveObjectList::findObject(JSObject* obj)
+{
+ for (size_t i = 0; i < liveObjects.size(); i++) {
+ LiveObjectData& data = liveObjects[i];
+ if (obj == data.obj)
+ return &data;
+ }
+ return nullptr;
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/LiveObjectList.h b/Source/JavaScriptCore/heap/LiveObjectList.h
new file mode 100644
index 000000000..4d2874570
--- /dev/null
+++ b/Source/JavaScriptCore/heap/LiveObjectList.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef LiveObjectList_h
+#define LiveObjectList_h
+
+#include "LiveObjectData.h"
+#include <wtf/Vector.h>
+
+namespace JSC {
+
+struct LiveObjectList {
+ LiveObjectList(const char* name)
+ : name(name)
+ , hasLiveObjects(true)
+ {
+ }
+
+ void reset()
+ {
+ liveObjects.clear();
+ hasLiveObjects = true; // Presume to have live objects until the list is trimmed.
+ }
+
+ LiveObjectData* findObject(JSObject*);
+
+ const char* name;
+ Vector<LiveObjectData> liveObjects;
+ bool hasLiveObjects;
+};
+
+} // namespace JSC
+
+#endif // LiveObjectList_h
+
diff --git a/Source/JavaScriptCore/heap/Local.h b/Source/JavaScriptCore/heap/Local.h
index d23435989..14c4dee26 100644
--- a/Source/JavaScriptCore/heap/Local.h
+++ b/Source/JavaScriptCore/heap/Local.h
@@ -141,7 +141,7 @@ private:
namespace WTF {
-template<typename T> struct VectorTraits<JSC::Local<T> > : SimpleClassVectorTraits {
+template<typename T> struct VectorTraits<JSC::Local<T>> : SimpleClassVectorTraits {
static const bool needsDestruction = false;
static const bool canInitializeWithMemset = false;
static const bool canCompareWithMemcmp = false;
diff --git a/Source/JavaScriptCore/heap/MachineStackMarker.cpp b/Source/JavaScriptCore/heap/MachineStackMarker.cpp
index 78fdfa496..883914fd3 100644
--- a/Source/JavaScriptCore/heap/MachineStackMarker.cpp
+++ b/Source/JavaScriptCore/heap/MachineStackMarker.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2003-2009, 2015 Apple Inc. All rights reserved.
* Copyright (C) 2007 Eric Seidel <eric@webkit.org>
* Copyright (C) 2009 Acision BV. All rights reserved.
*
@@ -23,8 +23,12 @@
#include "MachineStackMarker.h"
#include "ConservativeRoots.h"
+#include "GPRInfo.h"
#include "Heap.h"
#include "JSArray.h"
+#include "JSCInlines.h"
+#include "LLIntPCRanges.h"
+#include "MacroAssembler.h"
#include "VM.h"
#include <setjmp.h>
#include <stdlib.h>
@@ -58,97 +62,154 @@
#include <pthread_np.h>
#endif
-#if OS(QNX)
-#include <fcntl.h>
-#include <sys/procfs.h>
-#include <stdio.h>
-#include <errno.h>
-#endif
-
#if USE(PTHREADS) && !OS(WINDOWS) && !OS(DARWIN)
#include <signal.h>
-#endif
-#endif
-
-using namespace WTF;
-
-namespace JSC {
+// We use SIGUSR2 to suspend and resume machine threads in JavaScriptCore.
+static const int SigThreadSuspendResume = SIGUSR2;
+static StaticLock globalSignalLock;
+thread_local static std::atomic<JSC::MachineThreads::Thread*> threadLocalCurrentThread;
-static inline void swapIfBackwards(void*& begin, void*& end)
+static void pthreadSignalHandlerSuspendResume(int, siginfo_t*, void* ucontext)
{
-#if OS(WINCE)
- if (begin <= end)
+ // Touching thread local atomic types from signal handlers is allowed.
+ JSC::MachineThreads::Thread* thread = threadLocalCurrentThread.load();
+
+ if (thread->suspended.load(std::memory_order_acquire)) {
+ // This is signal handler invocation that is intended to be used to resume sigsuspend.
+ // So this handler invocation itself should not process.
+ //
+ // When signal comes, first, the system calls signal handler. And later, sigsuspend will be resumed. Signal handler invocation always precedes.
+ // So, the problem never happens that suspended.store(true, ...) will be executed before the handler is called.
+ // http://pubs.opengroup.org/onlinepubs/009695399/functions/sigsuspend.html
return;
- std::swap(begin, end);
+ }
+
+ ucontext_t* userContext = static_cast<ucontext_t*>(ucontext);
+#if CPU(PPC)
+ thread->suspendedMachineContext = *userContext->uc_mcontext.uc_regs;
#else
-UNUSED_PARAM(begin);
-UNUSED_PARAM(end);
+ thread->suspendedMachineContext = userContext->uc_mcontext;
#endif
-}
-
-#if OS(DARWIN)
-typedef mach_port_t PlatformThread;
-#elif OS(WINDOWS)
-typedef HANDLE PlatformThread;
-#elif USE(PTHREADS)
-typedef pthread_t PlatformThread;
-static const int SigThreadSuspendResume = SIGUSR2;
-#if defined(SA_RESTART)
-static void pthreadSignalHandlerSuspendResume(int)
-{
- sigset_t signalSet;
- sigemptyset(&signalSet);
- sigaddset(&signalSet, SigThreadSuspendResume);
- sigsuspend(&signalSet);
+ // Allow suspend caller to see that this thread is suspended.
+ // sem_post is async-signal-safe function. It means that we can call this from a signal handler.
+ // http://pubs.opengroup.org/onlinepubs/009695399/functions/xsh_chap02_04.html#tag_02_04_03
+ //
+ // And sem_post emits memory barrier that ensures that suspendedMachineContext is correctly saved.
+ // http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap04.html#tag_04_11
+ sem_post(&thread->semaphoreForSuspendResume);
+
+ // Reaching here, SigThreadSuspendResume is blocked in this handler (this is configured by sigaction's sa_mask).
+ // So before calling sigsuspend, SigThreadSuspendResume to this thread is deferred. This ensures that the handler is not executed recursively.
+ sigset_t blockedSignalSet;
+ sigfillset(&blockedSignalSet);
+ sigdelset(&blockedSignalSet, SigThreadSuspendResume);
+ sigsuspend(&blockedSignalSet);
+
+ // Allow resume caller to see that this thread is resumed.
+ sem_post(&thread->semaphoreForSuspendResume);
}
+#endif // USE(PTHREADS) && !OS(WINDOWS) && !OS(DARWIN)
+
#endif
-#endif
-class MachineThreads::Thread {
- WTF_MAKE_FAST_ALLOCATED;
+using namespace WTF;
+
+namespace JSC {
+
+using Thread = MachineThreads::Thread;
+
+class ActiveMachineThreadsManager;
+static ActiveMachineThreadsManager& activeMachineThreadsManager();
+
+class ActiveMachineThreadsManager {
+ WTF_MAKE_NONCOPYABLE(ActiveMachineThreadsManager);
public:
- Thread(const PlatformThread& platThread, void* base)
- : platformThread(platThread)
- , stackBase(base)
+
+ class Locker {
+ public:
+ Locker(ActiveMachineThreadsManager& manager)
+ : m_locker(manager.m_lock)
+ {
+ }
+
+ private:
+ LockHolder m_locker;
+ };
+
+ void add(MachineThreads* machineThreads)
{
-#if USE(PTHREADS) && !OS(WINDOWS) && !OS(DARWIN) && defined(SA_RESTART)
- // if we have SA_RESTART, enable SIGUSR2 debugging mechanism
- struct sigaction action;
- action.sa_handler = pthreadSignalHandlerSuspendResume;
- sigemptyset(&action.sa_mask);
- action.sa_flags = SA_RESTART;
- sigaction(SigThreadSuspendResume, &action, 0);
+ LockHolder managerLock(m_lock);
+ m_set.add(machineThreads);
+ }
- sigset_t mask;
- sigemptyset(&mask);
- sigaddset(&mask, SigThreadSuspendResume);
- pthread_sigmask(SIG_UNBLOCK, &mask, 0);
-#endif
+ void remove(MachineThreads* machineThreads)
+ {
+ LockHolder managerLock(m_lock);
+ auto recordedMachineThreads = m_set.take(machineThreads);
+ RELEASE_ASSERT(recordedMachineThreads = machineThreads);
+ }
+
+ bool contains(MachineThreads* machineThreads)
+ {
+ return m_set.contains(machineThreads);
}
- Thread* next;
- PlatformThread platformThread;
- void* stackBase;
+private:
+ typedef HashSet<MachineThreads*> MachineThreadsSet;
+
+ ActiveMachineThreadsManager() { }
+
+ Lock m_lock;
+ MachineThreadsSet m_set;
+
+ friend ActiveMachineThreadsManager& activeMachineThreadsManager();
};
+static ActiveMachineThreadsManager& activeMachineThreadsManager()
+{
+ static std::once_flag initializeManagerOnceFlag;
+ static ActiveMachineThreadsManager* manager = nullptr;
+
+ std::call_once(initializeManagerOnceFlag, [] {
+ manager = new ActiveMachineThreadsManager();
+ });
+ return *manager;
+}
+
+static inline PlatformThread getCurrentPlatformThread()
+{
+#if OS(DARWIN)
+ return pthread_mach_thread_np(pthread_self());
+#elif OS(WINDOWS)
+ return GetCurrentThreadId();
+#elif USE(PTHREADS)
+ return pthread_self();
+#endif
+}
+
MachineThreads::MachineThreads(Heap* heap)
: m_registeredThreads(0)
- , m_threadSpecific(0)
+ , m_threadSpecificForMachineThreads(0)
+ , m_threadSpecificForThread(0)
#if !ASSERT_DISABLED
, m_heap(heap)
#endif
{
UNUSED_PARAM(heap);
+ threadSpecificKeyCreate(&m_threadSpecificForMachineThreads, removeThread);
+ threadSpecificKeyCreate(&m_threadSpecificForThread, nullptr);
+ activeMachineThreadsManager().add(this);
}
MachineThreads::~MachineThreads()
{
- if (m_threadSpecific)
- threadSpecificKeyDelete(m_threadSpecific);
+ activeMachineThreadsManager().remove(this);
+ threadSpecificKeyDelete(m_threadSpecificForMachineThreads);
+ threadSpecificKeyDelete(m_threadSpecificForThread);
- MutexLocker registeredThreadsLock(m_registeredThreadsMutex);
+ LockHolder registeredThreadsLock(m_registeredThreadsMutex);
for (Thread* t = m_registeredThreads; t;) {
Thread* next = t->next;
delete t;
@@ -156,171 +217,235 @@ MachineThreads::~MachineThreads()
}
}
-static inline PlatformThread getCurrentPlatformThread()
+Thread* MachineThreads::Thread::createForCurrentThread()
{
-#if OS(DARWIN)
- return pthread_mach_thread_np(pthread_self());
-#elif OS(WINDOWS)
- return GetCurrentThread();
-#elif USE(PTHREADS)
- return pthread_self();
-#endif
+ auto stackBounds = wtfThreadData().stack();
+ return new Thread(getCurrentPlatformThread(), stackBounds.origin(), stackBounds.end());
}
-static inline bool equalThread(const PlatformThread& first, const PlatformThread& second)
+bool MachineThreads::Thread::operator==(const PlatformThread& other) const
{
#if OS(DARWIN) || OS(WINDOWS)
- return first == second;
+ return platformThread == other;
#elif USE(PTHREADS)
- return !!pthread_equal(first, second);
+ return !!pthread_equal(platformThread, other);
#else
#error Need a way to compare threads on this platform
#endif
}
-void MachineThreads::makeUsableFromMultipleThreads()
+#ifndef NDEBUG
+static bool isThreadInList(Thread* listHead, Thread* target)
{
- if (m_threadSpecific)
- return;
+ for (Thread* thread = listHead; thread; thread = thread->next) {
+ if (thread == target)
+ return true;
+ }
- threadSpecificKeyCreate(&m_threadSpecific, removeThread);
+ return false;
}
+#endif
void MachineThreads::addCurrentThread()
{
- ASSERT(!m_heap->vm()->exclusiveThread || m_heap->vm()->exclusiveThread == currentThread());
+ ASSERT(!m_heap->vm()->hasExclusiveThread() || m_heap->vm()->exclusiveThread() == std::this_thread::get_id());
- if (!m_threadSpecific || threadSpecificGet(m_threadSpecific))
+ if (threadSpecificGet(m_threadSpecificForMachineThreads)) {
+#ifndef NDEBUG
+ LockHolder lock(m_registeredThreadsMutex);
+ ASSERT(threadSpecificGet(m_threadSpecificForMachineThreads) == this);
+ ASSERT(threadSpecificGet(m_threadSpecificForThread));
+ ASSERT(isThreadInList(m_registeredThreads, static_cast<Thread*>(threadSpecificGet(m_threadSpecificForThread))));
+#endif
return;
+ }
- threadSpecificSet(m_threadSpecific, this);
- Thread* thread = new Thread(getCurrentPlatformThread(), wtfThreadData().stack().origin());
+ Thread* thread = Thread::createForCurrentThread();
+ threadSpecificSet(m_threadSpecificForMachineThreads, this);
+ threadSpecificSet(m_threadSpecificForThread, thread);
- MutexLocker lock(m_registeredThreadsMutex);
+ LockHolder lock(m_registeredThreadsMutex);
thread->next = m_registeredThreads;
m_registeredThreads = thread;
}
-void MachineThreads::removeThread(void* p)
+Thread* MachineThreads::machineThreadForCurrentThread()
{
- if (p)
- static_cast<MachineThreads*>(p)->removeCurrentThread();
+ Thread* result = static_cast<Thread*>(threadSpecificGet(m_threadSpecificForThread));
+ RELEASE_ASSERT(result);
+#ifndef NDEBUG
+ LockHolder lock(m_registeredThreadsMutex);
+ ASSERT(isThreadInList(m_registeredThreads, result));
+#endif
+
+ return result;
}
-void MachineThreads::removeCurrentThread()
+void MachineThreads::removeThread(void* p)
{
- PlatformThread currentPlatformThread = getCurrentPlatformThread();
-
- MutexLocker lock(m_registeredThreadsMutex);
+ auto& manager = activeMachineThreadsManager();
+ ActiveMachineThreadsManager::Locker lock(manager);
+ auto machineThreads = static_cast<MachineThreads*>(p);
+ if (manager.contains(machineThreads)) {
+ // There's a chance that the MachineThreads registry that this thread
+ // was registered with was already destructed, and another one happened
+ // to be instantiated at the same address. Hence, this thread may or
+ // may not be found in this MachineThreads registry. We only need to
+ // do a removal if this thread is found in it.
+ machineThreads->removeThreadIfFound(getCurrentPlatformThread());
+ }
+}
- if (equalThread(currentPlatformThread, m_registeredThreads->platformThread)) {
- Thread* t = m_registeredThreads;
+template<typename PlatformThread>
+void MachineThreads::removeThreadIfFound(PlatformThread platformThread)
+{
+ LockHolder lock(m_registeredThreadsMutex);
+ Thread* t = m_registeredThreads;
+ if (*t == platformThread) {
m_registeredThreads = m_registeredThreads->next;
delete t;
} else {
Thread* last = m_registeredThreads;
- Thread* t;
for (t = m_registeredThreads->next; t; t = t->next) {
- if (equalThread(t->platformThread, currentPlatformThread)) {
+ if (*t == platformThread) {
last->next = t->next;
break;
}
last = t;
}
- ASSERT(t); // If t is NULL, we never found ourselves in the list.
delete t;
}
}
-#if COMPILER(GCC)
-#define REGISTER_BUFFER_ALIGNMENT __attribute__ ((aligned (sizeof(void*))))
-#else
-#define REGISTER_BUFFER_ALIGNMENT
+SUPPRESS_ASAN
+void MachineThreads::gatherFromCurrentThread(ConservativeRoots& conservativeRoots, JITStubRoutineSet& jitStubRoutines, CodeBlockSet& codeBlocks, void* stackOrigin, void* stackTop, RegisterState& calleeSavedRegisters)
+{
+ void* registersBegin = &calleeSavedRegisters;
+ void* registersEnd = reinterpret_cast<void*>(roundUpToMultipleOf<sizeof(void*)>(reinterpret_cast<uintptr_t>(&calleeSavedRegisters + 1)));
+ conservativeRoots.add(registersBegin, registersEnd, jitStubRoutines, codeBlocks);
+
+ conservativeRoots.add(stackTop, stackOrigin, jitStubRoutines, codeBlocks);
+}
+
+MachineThreads::Thread::Thread(const PlatformThread& platThread, void* base, void* end)
+ : platformThread(platThread)
+ , stackBase(base)
+ , stackEnd(end)
+{
+#if OS(WINDOWS)
+ ASSERT(platformThread == GetCurrentThreadId());
+ bool isSuccessful =
+ DuplicateHandle(GetCurrentProcess(), GetCurrentThread(), GetCurrentProcess(),
+ &platformThreadHandle, 0, FALSE, DUPLICATE_SAME_ACCESS);
+ RELEASE_ASSERT(isSuccessful);
+#elif USE(PTHREADS) && !OS(DARWIN)
+ threadLocalCurrentThread.store(this);
+
+ // Signal handlers are process global configuration.
+ static std::once_flag initializeSignalHandler;
+ std::call_once(initializeSignalHandler, [] {
+ // Intentionally block SigThreadSuspendResume in the handler.
+ // SigThreadSuspendResume will be allowed in the handler by sigsuspend.
+ struct sigaction action;
+ sigemptyset(&action.sa_mask);
+ sigaddset(&action.sa_mask, SigThreadSuspendResume);
+
+ action.sa_sigaction = pthreadSignalHandlerSuspendResume;
+ action.sa_flags = SA_RESTART | SA_SIGINFO;
+ sigaction(SigThreadSuspendResume, &action, 0);
+ });
+
+ sigset_t mask;
+ sigemptyset(&mask);
+ sigaddset(&mask, SigThreadSuspendResume);
+ pthread_sigmask(SIG_UNBLOCK, &mask, 0);
+
+ sem_init(&semaphoreForSuspendResume, /* Only available in this process. */ 0, /* Initial value for the semaphore. */ 0);
#endif
+}
-void MachineThreads::gatherFromCurrentThread(ConservativeRoots& conservativeRoots, void* stackCurrent)
+MachineThreads::Thread::~Thread()
{
- // setjmp forces volatile registers onto the stack
- jmp_buf registers REGISTER_BUFFER_ALIGNMENT;
-#if COMPILER(MSVC)
-#pragma warning(push)
-#pragma warning(disable: 4611)
-#endif
- setjmp(registers);
-#if COMPILER(MSVC)
-#pragma warning(pop)
-#endif
-
- void* registersBegin = &registers;
- void* registersEnd = reinterpret_cast<void*>(roundUpToMultipleOf<sizeof(void*)>(reinterpret_cast<uintptr_t>(&registers + 1)));
- swapIfBackwards(registersBegin, registersEnd);
- conservativeRoots.add(registersBegin, registersEnd);
-
- void* stackBegin = stackCurrent;
- void* stackEnd = wtfThreadData().stack().origin();
- swapIfBackwards(stackBegin, stackEnd);
- conservativeRoots.add(stackBegin, stackEnd);
+#if OS(WINDOWS)
+ CloseHandle(platformThreadHandle);
+#elif USE(PTHREADS) && !OS(DARWIN)
+ sem_destroy(&semaphoreForSuspendResume);
+#endif
}
-static inline void suspendThread(const PlatformThread& platformThread)
+bool MachineThreads::Thread::suspend()
{
#if OS(DARWIN)
- thread_suspend(platformThread);
+ kern_return_t result = thread_suspend(platformThread);
+ return result == KERN_SUCCESS;
#elif OS(WINDOWS)
- SuspendThread(platformThread);
+ bool threadIsSuspended = (SuspendThread(platformThreadHandle) != (DWORD)-1);
+ ASSERT(threadIsSuspended);
+ return threadIsSuspended;
#elif USE(PTHREADS)
- pthread_kill(platformThread, SigThreadSuspendResume);
+ ASSERT_WITH_MESSAGE(getCurrentPlatformThread() != platformThread, "Currently we don't support suspend the current thread itself.");
+ {
+ // During suspend, suspend or resume should not be executed from the other threads.
+ // We use global lock instead of per thread lock.
+ // Consider the following case, there are threads A and B.
+ // And A attempt to suspend B and B attempt to suspend A.
+ // A and B send signals. And later, signals are delivered to A and B.
+ // In that case, both will be suspended.
+ LockHolder lock(globalSignalLock);
+ if (!suspendCount) {
+ // Ideally, we would like to use pthread_sigqueue. It allows us to pass the argument to the signal handler.
+ // But it can be used in a few platforms, like Linux.
+ // Instead, we use Thread* stored in the thread local storage to pass it to the signal handler.
+ if (pthread_kill(platformThread, SigThreadSuspendResume) == ESRCH)
+ return false;
+ sem_wait(&semaphoreForSuspendResume);
+ // Release barrier ensures that this operation is always executed after all the above processing is done.
+ suspended.store(true, std::memory_order_release);
+ }
+ ++suspendCount;
+ }
+ return true;
#else
#error Need a way to suspend threads on this platform
#endif
}
-static inline void resumeThread(const PlatformThread& platformThread)
+void MachineThreads::Thread::resume()
{
#if OS(DARWIN)
thread_resume(platformThread);
#elif OS(WINDOWS)
- ResumeThread(platformThread);
+ ResumeThread(platformThreadHandle);
#elif USE(PTHREADS)
- pthread_kill(platformThread, SigThreadSuspendResume);
+ {
+ // During resume, suspend or resume should not be executed from the other threads.
+ LockHolder lock(globalSignalLock);
+ if (suspendCount == 1) {
+ // When allowing SigThreadSuspendResume interrupt in the signal handler by sigsuspend and SigThreadSuspendResume is actually issued,
+ // the signal handler itself will be called once again.
+ // There are several ways to distinguish the handler invocation for suspend and resume.
+ // 1. Use different signal numbers. And check the signal number in the handler.
+ // 2. Use some arguments to distinguish suspend and resume in the handler. If pthread_sigqueue can be used, we can take this.
+ // 3. Use thread local storage with atomic variables in the signal handler.
+ // In this implementaiton, we take (3). suspended flag is used to distinguish it.
+ if (pthread_kill(platformThread, SigThreadSuspendResume) == ESRCH)
+ return;
+ sem_wait(&semaphoreForSuspendResume);
+ // Release barrier ensures that this operation is always executed after all the above processing is done.
+ suspended.store(false, std::memory_order_release);
+ }
+ --suspendCount;
+ }
#else
#error Need a way to resume threads on this platform
#endif
}
-typedef unsigned long usword_t; // word size, assumed to be either 32 or 64 bit
-
-#if OS(DARWIN)
-
-#if CPU(X86)
-typedef i386_thread_state_t PlatformThreadRegisters;
-#elif CPU(X86_64)
-typedef x86_thread_state64_t PlatformThreadRegisters;
-#elif CPU(PPC)
-typedef ppc_thread_state_t PlatformThreadRegisters;
-#elif CPU(PPC64)
-typedef ppc_thread_state64_t PlatformThreadRegisters;
-#elif CPU(ARM)
-typedef arm_thread_state_t PlatformThreadRegisters;
-#else
-#error Unknown Architecture
-#endif
-
-#elif OS(WINDOWS)
-typedef CONTEXT PlatformThreadRegisters;
-#elif OS(QNX)
-typedef struct _debug_thread_info PlatformThreadRegisters;
-#elif USE(PTHREADS)
-typedef pthread_attr_t PlatformThreadRegisters;
-#else
-#error Need a thread register struct for this platform
-#endif
-
-static size_t getPlatformThreadRegisters(const PlatformThread& platformThread, PlatformThreadRegisters& regs)
+size_t MachineThreads::Thread::getRegisters(Thread::Registers& registers)
{
+ Thread::Registers::PlatformRegisters& regs = registers.regs;
#if OS(DARWIN)
-
#if CPU(X86)
unsigned user_count = sizeof(regs)/sizeof(int);
thread_state_flavor_t flavor = i386_THREAD_STATE;
@@ -336,6 +461,9 @@ static size_t getPlatformThreadRegisters(const PlatformThread& platformThread, P
#elif CPU(ARM)
unsigned user_count = ARM_THREAD_STATE_COUNT;
thread_state_flavor_t flavor = ARM_THREAD_STATE;
+#elif CPU(ARM64)
+ unsigned user_count = ARM_THREAD_STATE64_COUNT;
+ thread_state_flavor_t flavor = ARM_THREAD_STATE64;
#else
#error Unknown Architecture
#endif
@@ -346,45 +474,32 @@ static size_t getPlatformThreadRegisters(const PlatformThread& platformThread, P
"JavaScript garbage collection failed because thread_get_state returned an error (%d). This is probably the result of running inside Rosetta, which is not supported.", result);
CRASH();
}
- return user_count * sizeof(usword_t);
+ return user_count * sizeof(uintptr_t);
// end OS(DARWIN)
#elif OS(WINDOWS)
regs.ContextFlags = CONTEXT_INTEGER | CONTEXT_CONTROL;
- GetThreadContext(platformThread, &regs);
+ GetThreadContext(platformThreadHandle, &regs);
return sizeof(CONTEXT);
-#elif OS(QNX)
- memset(&regs, 0, sizeof(regs));
- regs.tid = platformThread;
- // FIXME: If we find this hurts performance, we can consider caching the fd and keeping it open.
- int fd = open("/proc/self/as", O_RDONLY);
- if (fd == -1) {
- LOG_ERROR("Unable to open /proc/self/as (errno: %d)", errno);
- CRASH();
- }
- int rc = devctl(fd, DCMD_PROC_TIDSTATUS, &regs, sizeof(regs), 0);
- if (rc != EOK) {
- LOG_ERROR("devctl(DCMD_PROC_TIDSTATUS) failed (error: %d)", rc);
- CRASH();
- }
- close(fd);
- return sizeof(struct _debug_thread_info);
#elif USE(PTHREADS)
- pthread_attr_init(&regs);
+ pthread_attr_init(&regs.attribute);
#if HAVE(PTHREAD_NP_H) || OS(NETBSD)
+#if !OS(OPENBSD)
// e.g. on FreeBSD 5.4, neundorf@kde.org
- pthread_attr_get_np(platformThread, &regs);
+ pthread_attr_get_np(platformThread, &regs.attribute);
+#endif
#else
// FIXME: this function is non-portable; other POSIX systems may have different np alternatives
- pthread_getattr_np(platformThread, &regs);
+ pthread_getattr_np(platformThread, &regs.attribute);
#endif
+ regs.machineContext = suspendedMachineContext;
return 0;
#else
#error Need a way to get thread registers on this platform
#endif
}
-static inline void* otherThreadStackPointer(const PlatformThreadRegisters& regs)
+void* MachineThreads::Thread::Registers::stackPointer() const
{
#if OS(DARWIN)
@@ -398,6 +513,8 @@ static inline void* otherThreadStackPointer(const PlatformThreadRegisters& regs)
return reinterpret_cast<void*>(regs.__r1);
#elif CPU(ARM)
return reinterpret_cast<void*>(regs.__sp);
+#elif CPU(ARM64)
+ return reinterpret_cast<void*>(regs.__sp);
#else
#error Unknown Architecture
#endif
@@ -431,81 +548,509 @@ static inline void* otherThreadStackPointer(const PlatformThreadRegisters& regs)
#error Unknown Architecture
#endif
-#elif OS(QNX)
- return reinterpret_cast<void*>((uintptr_t) regs.sp);
-
#elif USE(PTHREADS)
+
+#if OS(FREEBSD) && ENABLE(JIT)
+
+#if CPU(X86)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_esp);
+#elif CPU(X86_64)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_rsp);
+#elif CPU(ARM)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.__gregs[_REG_SP]);
+#elif CPU(ARM64)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_gpregs.gp_sp);
+#elif CPU(MIPS)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_regs[29]);
+#else
+#error Unknown Architecture
+#endif
+
+#elif defined(__GLIBC__) && ENABLE(JIT)
+
+#if CPU(X86)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.gregs[REG_ESP]);
+#elif CPU(X86_64)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.gregs[REG_RSP]);
+#elif CPU(ARM)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.arm_sp);
+#elif CPU(ARM64)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.sp);
+#elif CPU(MIPS)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.gregs[29]);
+#else
+#error Unknown Architecture
+#endif
+
+#else
void* stackBase = 0;
size_t stackSize = 0;
- int rc = pthread_attr_getstack(&regs, &stackBase, &stackSize);
+#if OS(OPENBSD)
+ stack_t ss;
+ int rc = pthread_stackseg_np(pthread_self(), &ss);
+ stackBase = (void*)((size_t) ss.ss_sp - ss.ss_size);
+ stackSize = ss.ss_size;
+#else
+ int rc = pthread_attr_getstack(&regs.attribute, &stackBase, &stackSize);
+#endif
(void)rc; // FIXME: Deal with error code somehow? Seems fatal.
ASSERT(stackBase);
return static_cast<char*>(stackBase) + stackSize;
+#endif
+
#else
#error Need a way to get the stack pointer for another thread on this platform
#endif
}
-static void freePlatformThreadRegisters(PlatformThreadRegisters& regs)
+#if ENABLE(SAMPLING_PROFILER)
+void* MachineThreads::Thread::Registers::framePointer() const
{
-#if USE(PTHREADS) && !OS(WINDOWS) && !OS(DARWIN) && !OS(QNX)
- pthread_attr_destroy(&regs);
+#if OS(DARWIN)
+
+#if __DARWIN_UNIX03
+
+#if CPU(X86)
+ return reinterpret_cast<void*>(regs.__ebp);
+#elif CPU(X86_64)
+ return reinterpret_cast<void*>(regs.__rbp);
+#elif CPU(ARM)
+ return reinterpret_cast<void*>(regs.__r[11]);
+#elif CPU(ARM64)
+ return reinterpret_cast<void*>(regs.__x[29]);
+#else
+#error Unknown Architecture
+#endif
+
+#else // !__DARWIN_UNIX03
+
+#if CPU(X86)
+ return reinterpret_cast<void*>(regs.esp);
+#elif CPU(X86_64)
+ return reinterpret_cast<void*>(regs.rsp);
+#else
+#error Unknown Architecture
+#endif
+
+#endif // __DARWIN_UNIX03
+
+// end OS(DARWIN)
+#elif OS(WINDOWS)
+
+#if CPU(ARM)
+ return reinterpret_cast<void*>((uintptr_t) regs.R11);
+#elif CPU(MIPS)
+#error Dont know what to do with mips. Do we even need this?
+#elif CPU(X86)
+ return reinterpret_cast<void*>((uintptr_t) regs.Ebp);
+#elif CPU(X86_64)
+ return reinterpret_cast<void*>((uintptr_t) regs.Rbp);
+#else
+#error Unknown Architecture
+#endif
+
+#elif OS(FREEBSD)
+
+#if CPU(X86)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_ebp);
+#elif CPU(X86_64)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_rbp);
+#elif CPU(ARM)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.__gregs[_REG_FP]);
+#elif CPU(ARM64)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_gpregs.gp_x[29]);
+#elif CPU(MIPS)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_regs[30]);
+#else
+#error Unknown Architecture
+#endif
+
+#elif defined(__GLIBC__)
+
+// The following sequence depends on glibc's sys/ucontext.h.
+#if CPU(X86)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.gregs[REG_EBP]);
+#elif CPU(X86_64)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.gregs[REG_RBP]);
+#elif CPU(ARM)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.arm_fp);
+#elif CPU(ARM64)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.regs[29]);
+#elif CPU(MIPS)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.gregs[30]);
+#else
+#error Unknown Architecture
+#endif
+
+#else
+#error Need a way to get the frame pointer for another thread on this platform
+#endif
+}
+
+void* MachineThreads::Thread::Registers::instructionPointer() const
+{
+#if OS(DARWIN)
+
+#if __DARWIN_UNIX03
+
+#if CPU(X86)
+ return reinterpret_cast<void*>(regs.__eip);
+#elif CPU(X86_64)
+ return reinterpret_cast<void*>(regs.__rip);
+#elif CPU(ARM)
+ return reinterpret_cast<void*>(regs.__pc);
+#elif CPU(ARM64)
+ return reinterpret_cast<void*>(regs.__pc);
+#else
+#error Unknown Architecture
+#endif
+
+#else // !__DARWIN_UNIX03
+#if CPU(X86)
+ return reinterpret_cast<void*>(regs.eip);
+#elif CPU(X86_64)
+ return reinterpret_cast<void*>(regs.rip);
+#else
+#error Unknown Architecture
+#endif
+
+#endif // __DARWIN_UNIX03
+
+// end OS(DARWIN)
+#elif OS(WINDOWS)
+
+#if CPU(ARM)
+ return reinterpret_cast<void*>((uintptr_t) regs.Pc);
+#elif CPU(MIPS)
+#error Dont know what to do with mips. Do we even need this?
+#elif CPU(X86)
+ return reinterpret_cast<void*>((uintptr_t) regs.Eip);
+#elif CPU(X86_64)
+ return reinterpret_cast<void*>((uintptr_t) regs.Rip);
+#else
+#error Unknown Architecture
+#endif
+
+#elif OS(FREEBSD)
+
+#if CPU(X86)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_eip);
+#elif CPU(X86_64)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_rip);
+#elif CPU(ARM)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.__gregs[_REG_PC]);
+#elif CPU(ARM64)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_gpregs.gp_elr);
+#elif CPU(MIPS)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_pc);
+#else
+#error Unknown Architecture
+#endif
+
+#elif defined(__GLIBC__)
+
+// The following sequence depends on glibc's sys/ucontext.h.
+#if CPU(X86)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.gregs[REG_EIP]);
+#elif CPU(X86_64)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.gregs[REG_RIP]);
+#elif CPU(ARM)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.arm_pc);
+#elif CPU(ARM64)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.pc);
+#elif CPU(MIPS)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.pc);
+#else
+#error Unknown Architecture
+#endif
+
+#else
+#error Need a way to get the instruction pointer for another thread on this platform
+#endif
+}
+void* MachineThreads::Thread::Registers::llintPC() const
+{
+ // LLInt uses regT4 as PC.
+#if OS(DARWIN)
+
+#if __DARWIN_UNIX03
+
+#if CPU(X86)
+ static_assert(LLInt::LLIntPC == X86Registers::esi, "Wrong LLInt PC.");
+ return reinterpret_cast<void*>(regs.__esi);
+#elif CPU(X86_64)
+ static_assert(LLInt::LLIntPC == X86Registers::r8, "Wrong LLInt PC.");
+ return reinterpret_cast<void*>(regs.__r8);
+#elif CPU(ARM)
+ static_assert(LLInt::LLIntPC == ARMRegisters::r8, "Wrong LLInt PC.");
+ return reinterpret_cast<void*>(regs.__r[8]);
+#elif CPU(ARM64)
+ static_assert(LLInt::LLIntPC == ARM64Registers::x4, "Wrong LLInt PC.");
+ return reinterpret_cast<void*>(regs.__x[4]);
+#else
+#error Unknown Architecture
+#endif
+
+#else // !__DARWIN_UNIX03
+#if CPU(X86)
+ static_assert(LLInt::LLIntPC == X86Registers::esi, "Wrong LLInt PC.");
+ return reinterpret_cast<void*>(regs.esi);
+#elif CPU(X86_64)
+ static_assert(LLInt::LLIntPC == X86Registers::r8, "Wrong LLInt PC.");
+ return reinterpret_cast<void*>(regs.r8);
+#else
+#error Unknown Architecture
+#endif
+
+#endif // __DARWIN_UNIX03
+
+// end OS(DARWIN)
+#elif OS(WINDOWS)
+
+#if CPU(ARM)
+ static_assert(LLInt::LLIntPC == ARMRegisters::r8, "Wrong LLInt PC.");
+ return reinterpret_cast<void*>((uintptr_t) regs.R8);
+#elif CPU(MIPS)
+#error Dont know what to do with mips. Do we even need this?
+#elif CPU(X86)
+ static_assert(LLInt::LLIntPC == X86Registers::esi, "Wrong LLInt PC.");
+ return reinterpret_cast<void*>((uintptr_t) regs.Esi);
+#elif CPU(X86_64)
+ static_assert(LLInt::LLIntPC == X86Registers::r10, "Wrong LLInt PC.");
+ return reinterpret_cast<void*>((uintptr_t) regs.R10);
+#else
+#error Unknown Architecture
+#endif
+
+#elif OS(FREEBSD)
+
+#if CPU(X86)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_esi);
+#elif CPU(X86_64)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_r8);
+#elif CPU(ARM)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.__gregs[_REG_R8]);
+#elif CPU(ARM64)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_gpregs.gp_x[4]);
+#elif CPU(MIPS)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_regs[12]);
+#else
+#error Unknown Architecture
+#endif
+
+#elif defined(__GLIBC__)
+
+// The following sequence depends on glibc's sys/ucontext.h.
+#if CPU(X86)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.gregs[REG_ESI]);
+#elif CPU(X86_64)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.gregs[REG_R8]);
+#elif CPU(ARM)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.arm_r8);
+#elif CPU(ARM64)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.regs[4]);
+#elif CPU(MIPS)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.gregs[12]);
+#else
+#error Unknown Architecture
+#endif
+
+#else
+#error Need a way to get the LLIntPC for another thread on this platform
+#endif
+}
+#endif // ENABLE(SAMPLING_PROFILER)
+
+void MachineThreads::Thread::freeRegisters(Thread::Registers& registers)
+{
+ Thread::Registers::PlatformRegisters& regs = registers.regs;
+#if USE(PTHREADS) && !OS(WINDOWS) && !OS(DARWIN)
+ pthread_attr_destroy(&regs.attribute);
#else
UNUSED_PARAM(regs);
#endif
}
-void MachineThreads::gatherFromOtherThread(ConservativeRoots& conservativeRoots, Thread* thread)
+static inline int osRedZoneAdjustment()
+{
+ int redZoneAdjustment = 0;
+#if !OS(WINDOWS)
+#if CPU(X86_64)
+ // See http://people.freebsd.org/~obrien/amd64-elf-abi.pdf Section 3.2.2.
+ redZoneAdjustment = -128;
+#elif CPU(ARM64)
+ // See https://developer.apple.com/library/ios/documentation/Xcode/Conceptual/iPhoneOSABIReference/Articles/ARM64FunctionCallingConventions.html#//apple_ref/doc/uid/TP40013702-SW7
+ redZoneAdjustment = -128;
+#endif
+#endif // !OS(WINDOWS)
+ return redZoneAdjustment;
+}
+
+std::pair<void*, size_t> MachineThreads::Thread::captureStack(void* stackTop)
{
- PlatformThreadRegisters regs;
- size_t regSize = getPlatformThreadRegisters(thread->platformThread, regs);
+ char* begin = reinterpret_cast_ptr<char*>(stackBase);
+ char* end = bitwise_cast<char*>(WTF::roundUpToMultipleOf<sizeof(void*)>(reinterpret_cast<uintptr_t>(stackTop)));
+ ASSERT(begin >= end);
- conservativeRoots.add(static_cast<void*>(&regs), static_cast<void*>(reinterpret_cast<char*>(&regs) + regSize));
+ char* endWithRedZone = end + osRedZoneAdjustment();
+ ASSERT(WTF::roundUpToMultipleOf<sizeof(void*)>(reinterpret_cast<uintptr_t>(endWithRedZone)) == reinterpret_cast<uintptr_t>(endWithRedZone));
- void* stackPointer = otherThreadStackPointer(regs);
- void* stackBase = thread->stackBase;
- swapIfBackwards(stackPointer, stackBase);
- conservativeRoots.add(stackPointer, stackBase);
+ if (endWithRedZone < stackEnd)
+ endWithRedZone = reinterpret_cast_ptr<char*>(stackEnd);
- freePlatformThreadRegisters(regs);
+ std::swap(begin, endWithRedZone);
+ return std::make_pair(begin, endWithRedZone - begin);
}
-void MachineThreads::gatherConservativeRoots(ConservativeRoots& conservativeRoots, void* stackCurrent)
+SUPPRESS_ASAN
+static void copyMemory(void* dst, const void* src, size_t size)
{
- gatherFromCurrentThread(conservativeRoots, stackCurrent);
+ size_t dstAsSize = reinterpret_cast<size_t>(dst);
+ size_t srcAsSize = reinterpret_cast<size_t>(src);
+ RELEASE_ASSERT(dstAsSize == WTF::roundUpToMultipleOf<sizeof(intptr_t)>(dstAsSize));
+ RELEASE_ASSERT(srcAsSize == WTF::roundUpToMultipleOf<sizeof(intptr_t)>(srcAsSize));
+ RELEASE_ASSERT(size == WTF::roundUpToMultipleOf<sizeof(intptr_t)>(size));
+
+ intptr_t* dstPtr = reinterpret_cast<intptr_t*>(dst);
+ const intptr_t* srcPtr = reinterpret_cast<const intptr_t*>(src);
+ size /= sizeof(intptr_t);
+ while (size--)
+ *dstPtr++ = *srcPtr++;
+}
+
+
+
+// This function must not call malloc(), free(), or any other function that might
+// acquire a lock. Since 'thread' is suspended, trying to acquire a lock
+// will deadlock if 'thread' holds that lock.
+// This function, specifically the memory copying, was causing problems with Address Sanitizer in
+// apps. Since we cannot blacklist the system memcpy we must use our own naive implementation,
+// copyMemory, for ASan to work on either instrumented or non-instrumented builds. This is not a
+// significant performance loss as tryCopyOtherThreadStack is only called as part of an O(heapsize)
+// operation. As the heap is generally much larger than the stack the performance hit is minimal.
+// See: https://bugs.webkit.org/show_bug.cgi?id=146297
+void MachineThreads::tryCopyOtherThreadStack(Thread* thread, void* buffer, size_t capacity, size_t* size)
+{
+ Thread::Registers registers;
+ size_t registersSize = thread->getRegisters(registers);
+ std::pair<void*, size_t> stack = thread->captureStack(registers.stackPointer());
- if (m_threadSpecific) {
- PlatformThread currentPlatformThread = getCurrentPlatformThread();
+ bool canCopy = *size + registersSize + stack.second <= capacity;
- MutexLocker lock(m_registeredThreadsMutex);
+ if (canCopy)
+ copyMemory(static_cast<char*>(buffer) + *size, &registers, registersSize);
+ *size += registersSize;
-#ifndef NDEBUG
- // Forbid malloc during the gather phase. The gather phase suspends
- // threads, so a malloc during gather would risk a deadlock with a
- // thread that had been suspended while holding the malloc lock.
- fastMallocForbid();
-#endif
- for (Thread* thread = m_registeredThreads; thread; thread = thread->next) {
- if (!equalThread(thread->platformThread, currentPlatformThread))
- suspendThread(thread->platformThread);
- }
+ if (canCopy)
+ copyMemory(static_cast<char*>(buffer) + *size, stack.first, stack.second);
+ *size += stack.second;
- // It is safe to access the registeredThreads list, because we earlier asserted that locks are being held,
- // and since this is a shared heap, they are real locks.
- for (Thread* thread = m_registeredThreads; thread; thread = thread->next) {
- if (!equalThread(thread->platformThread, currentPlatformThread))
- gatherFromOtherThread(conservativeRoots, thread);
- }
+ thread->freeRegisters(registers);
+}
- for (Thread* thread = m_registeredThreads; thread; thread = thread->next) {
- if (!equalThread(thread->platformThread, currentPlatformThread))
- resumeThread(thread->platformThread);
- }
+bool MachineThreads::tryCopyOtherThreadStacks(LockHolder&, void* buffer, size_t capacity, size_t* size)
+{
+ // Prevent two VMs from suspending each other's threads at the same time,
+ // which can cause deadlock: <rdar://problem/20300842>.
+ static StaticLock mutex;
+ std::lock_guard<StaticLock> lock(mutex);
-#ifndef NDEBUG
- fastMallocAllow();
+ *size = 0;
+
+ PlatformThread currentPlatformThread = getCurrentPlatformThread();
+ int numberOfThreads = 0; // Using 0 to denote that we haven't counted the number of threads yet.
+ int index = 1;
+ Thread* threadsToBeDeleted = nullptr;
+
+ Thread* previousThread = nullptr;
+ for (Thread* thread = m_registeredThreads; thread; index++) {
+ if (*thread != currentPlatformThread) {
+ bool success = thread->suspend();
+#if OS(DARWIN)
+ if (!success) {
+ if (!numberOfThreads) {
+ for (Thread* countedThread = m_registeredThreads; countedThread; countedThread = countedThread->next)
+ numberOfThreads++;
+ }
+
+ // Re-do the suspension to get the actual failure result for logging.
+ kern_return_t error = thread_suspend(thread->platformThread);
+ ASSERT(error != KERN_SUCCESS);
+
+ WTFReportError(__FILE__, __LINE__, WTF_PRETTY_FUNCTION,
+ "JavaScript garbage collection encountered an invalid thread (err 0x%x): Thread [%d/%d: %p] platformThread %p.",
+ error, index, numberOfThreads, thread, reinterpret_cast<void*>(thread->platformThread));
+
+ // Put the invalid thread on the threadsToBeDeleted list.
+ // We can't just delete it here because we have suspended other
+ // threads, and they may still be holding the C heap lock which
+ // we need for deleting the invalid thread. Hence, we need to
+ // defer the deletion till after we have resumed all threads.
+ Thread* nextThread = thread->next;
+ thread->next = threadsToBeDeleted;
+ threadsToBeDeleted = thread;
+
+ if (previousThread)
+ previousThread->next = nextThread;
+ else
+ m_registeredThreads = nextThread;
+ thread = nextThread;
+ continue;
+ }
+#else
+ UNUSED_PARAM(numberOfThreads);
+ UNUSED_PARAM(previousThread);
+ ASSERT_UNUSED(success, success);
#endif
+ }
+ previousThread = thread;
+ thread = thread->next;
+ }
+
+ for (Thread* thread = m_registeredThreads; thread; thread = thread->next) {
+ if (*thread != currentPlatformThread)
+ tryCopyOtherThreadStack(thread, buffer, capacity, size);
}
+
+ for (Thread* thread = m_registeredThreads; thread; thread = thread->next) {
+ if (*thread != currentPlatformThread)
+ thread->resume();
+ }
+
+ for (Thread* thread = threadsToBeDeleted; thread; ) {
+ Thread* nextThread = thread->next;
+ delete thread;
+ thread = nextThread;
+ }
+
+ return *size <= capacity;
+}
+
+static void growBuffer(size_t size, void** buffer, size_t* capacity)
+{
+ if (*buffer)
+ fastFree(*buffer);
+
+ *capacity = WTF::roundUpToMultipleOf(WTF::pageSize(), size * 2);
+ *buffer = fastMalloc(*capacity);
+}
+
+void MachineThreads::gatherConservativeRoots(ConservativeRoots& conservativeRoots, JITStubRoutineSet& jitStubRoutines, CodeBlockSet& codeBlocks, void* stackOrigin, void* stackTop, RegisterState& calleeSavedRegisters)
+{
+ gatherFromCurrentThread(conservativeRoots, jitStubRoutines, codeBlocks, stackOrigin, stackTop, calleeSavedRegisters);
+
+ size_t size;
+ size_t capacity = 0;
+ void* buffer = nullptr;
+ LockHolder lock(m_registeredThreadsMutex);
+ while (!tryCopyOtherThreadStacks(lock, buffer, capacity, &size))
+ growBuffer(size, &buffer, &capacity);
+
+ if (!buffer)
+ return;
+
+ conservativeRoots.add(buffer, static_cast<char*>(buffer) + size, jitStubRoutines, codeBlocks);
+ fastFree(buffer);
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/MachineStackMarker.h b/Source/JavaScriptCore/heap/MachineStackMarker.h
index cece29cd3..a16f0dae1 100644
--- a/Source/JavaScriptCore/heap/MachineStackMarker.h
+++ b/Source/JavaScriptCore/heap/MachineStackMarker.h
@@ -1,7 +1,7 @@
/*
* Copyright (C) 1999-2000 Harri Porten (porten@kde.org)
* Copyright (C) 2001 Peter Kelly (pmk@post.com)
- * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2015 Apple Inc. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
@@ -22,44 +22,169 @@
#ifndef MachineThreads_h
#define MachineThreads_h
+#include <setjmp.h>
+#include <wtf/Lock.h>
#include <wtf/Noncopyable.h>
#include <wtf/ThreadSpecific.h>
-#include <wtf/ThreadingPrimitives.h>
+
+#if OS(DARWIN)
+#include <mach/thread_act.h>
+#endif
+
+#if USE(PTHREADS) && !OS(WINDOWS) && !OS(DARWIN)
+#include <semaphore.h>
+#include <signal.h>
+// Using signal.h didn't make mcontext_t and ucontext_t available on FreeBSD.
+// This bug has been fixed in FreeBSD 11.0-CURRENT, so this workaround can be
+// removed after FreeBSD 10.x goes EOL.
+// https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=207079
+#if OS(FREEBSD)
+#include <ucontext.h>
+#endif
+#endif
+
+#if OS(DARWIN)
+typedef mach_port_t PlatformThread;
+#elif OS(WINDOWS)
+typedef DWORD PlatformThread;
+#elif USE(PTHREADS)
+typedef pthread_t PlatformThread;
+#endif // OS(DARWIN)
namespace JSC {
- class ConservativeRoots;
- class Heap;
+class CodeBlockSet;
+class ConservativeRoots;
+class Heap;
+class JITStubRoutineSet;
+
+class MachineThreads {
+ WTF_MAKE_NONCOPYABLE(MachineThreads);
+public:
+ typedef jmp_buf RegisterState;
+
+ MachineThreads(Heap*);
+ ~MachineThreads();
+
+ void gatherConservativeRoots(ConservativeRoots&, JITStubRoutineSet&, CodeBlockSet&, void* stackOrigin, void* stackTop, RegisterState& calleeSavedRegisters);
+
+ JS_EXPORT_PRIVATE void addCurrentThread(); // Only needs to be called by clients that can use the same heap from multiple threads.
+
+ class Thread {
+ WTF_MAKE_FAST_ALLOCATED;
+ Thread(const PlatformThread& platThread, void* base, void* end);
- class MachineThreads {
- WTF_MAKE_NONCOPYABLE(MachineThreads);
public:
- MachineThreads(Heap*);
- ~MachineThreads();
+ ~Thread();
- void gatherConservativeRoots(ConservativeRoots&, void* stackCurrent);
+ static Thread* createForCurrentThread();
- void makeUsableFromMultipleThreads();
- JS_EXPORT_PRIVATE void addCurrentThread(); // Only needs to be called by clients that can use the same heap from multiple threads.
+ struct Registers {
+ void* stackPointer() const;
+#if ENABLE(SAMPLING_PROFILER)
+ void* framePointer() const;
+ void* instructionPointer() const;
+ void* llintPC() const;
+#endif // ENABLE(SAMPLING_PROFILER)
+
+#if OS(DARWIN)
+#if CPU(X86)
+ typedef i386_thread_state_t PlatformRegisters;
+#elif CPU(X86_64)
+ typedef x86_thread_state64_t PlatformRegisters;
+#elif CPU(PPC)
+ typedef ppc_thread_state_t PlatformRegisters;
+#elif CPU(PPC64)
+ typedef ppc_thread_state64_t PlatformRegisters;
+#elif CPU(ARM)
+ typedef arm_thread_state_t PlatformRegisters;
+#elif CPU(ARM64)
+ typedef arm_thread_state64_t PlatformRegisters;
+#else
+#error Unknown Architecture
+#endif
+
+#elif OS(WINDOWS)
+ typedef CONTEXT PlatformRegisters;
+#elif USE(PTHREADS)
+ struct PlatformRegisters {
+ pthread_attr_t attribute;
+ mcontext_t machineContext;
+ };
+#else
+#error Need a thread register struct for this platform
+#endif
+
+ PlatformRegisters regs;
+ };
+
+ bool operator==(const PlatformThread& other) const;
+ bool operator!=(const PlatformThread& other) const { return !(*this == other); }
+
+ bool suspend();
+ void resume();
+ size_t getRegisters(Registers&);
+ void freeRegisters(Registers&);
+ std::pair<void*, size_t> captureStack(void* stackTop);
+
+ Thread* next;
+ PlatformThread platformThread;
+ void* stackBase;
+ void* stackEnd;
+#if OS(WINDOWS)
+ HANDLE platformThreadHandle;
+#elif USE(PTHREADS) && !OS(DARWIN)
+ sem_t semaphoreForSuspendResume;
+ mcontext_t suspendedMachineContext;
+ int suspendCount { 0 };
+ std::atomic<bool> suspended { false };
+#endif
+ };
+
+ Lock& getLock() { return m_registeredThreadsMutex; }
+ Thread* threadsListHead(const LockHolder&) const { ASSERT(m_registeredThreadsMutex.isLocked()); return m_registeredThreads; }
+ Thread* machineThreadForCurrentThread();
- private:
- void gatherFromCurrentThread(ConservativeRoots&, void* stackCurrent);
+private:
+ void gatherFromCurrentThread(ConservativeRoots&, JITStubRoutineSet&, CodeBlockSet&, void* stackOrigin, void* stackTop, RegisterState& calleeSavedRegisters);
- class Thread;
+ void tryCopyOtherThreadStack(Thread*, void*, size_t capacity, size_t*);
+ bool tryCopyOtherThreadStacks(LockHolder&, void*, size_t capacity, size_t*);
- static void removeThread(void*);
- void removeCurrentThread();
+ static void removeThread(void*);
- void gatherFromOtherThread(ConservativeRoots&, Thread*);
+ template<typename PlatformThread>
+ void removeThreadIfFound(PlatformThread);
- Mutex m_registeredThreadsMutex;
- Thread* m_registeredThreads;
- WTF::ThreadSpecificKey m_threadSpecific;
+ Lock m_registeredThreadsMutex;
+ Thread* m_registeredThreads;
+ WTF::ThreadSpecificKey m_threadSpecificForMachineThreads;
+ WTF::ThreadSpecificKey m_threadSpecificForThread;
#if !ASSERT_DISABLED
- Heap* m_heap;
+ Heap* m_heap;
#endif
- };
+};
} // namespace JSC
+#if COMPILER(GCC_OR_CLANG)
+#define REGISTER_BUFFER_ALIGNMENT __attribute__ ((aligned (sizeof(void*))))
+#else
+#define REGISTER_BUFFER_ALIGNMENT
+#endif
+
+// ALLOCATE_AND_GET_REGISTER_STATE() is a macro so that it is always "inlined" even in debug builds.
+#if COMPILER(MSVC)
+#pragma warning(push)
+#pragma warning(disable: 4611)
+#define ALLOCATE_AND_GET_REGISTER_STATE(registers) \
+ MachineThreads::RegisterState registers REGISTER_BUFFER_ALIGNMENT; \
+ setjmp(registers)
+#pragma warning(pop)
+#else
+#define ALLOCATE_AND_GET_REGISTER_STATE(registers) \
+ MachineThreads::RegisterState registers REGISTER_BUFFER_ALIGNMENT; \
+ setjmp(registers)
+#endif
+
#endif // MachineThreads_h
diff --git a/Source/JavaScriptCore/heap/MarkStack.cpp b/Source/JavaScriptCore/heap/MarkStack.cpp
index 39907c715..318183a80 100644
--- a/Source/JavaScriptCore/heap/MarkStack.cpp
+++ b/Source/JavaScriptCore/heap/MarkStack.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009, 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2009, 2011, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -25,69 +25,14 @@
#include "config.h"
#include "MarkStack.h"
-#include "MarkStackInlines.h"
-
-#include "ConservativeRoots.h"
-#include "CopiedSpace.h"
-#include "CopiedSpaceInlines.h"
-#include "Heap.h"
-#include "JSArray.h"
-#include "JSCell.h"
-#include "JSObject.h"
-
-#include "SlotVisitorInlines.h"
-#include "Structure.h"
-#include "WriteBarrier.h"
-#include <wtf/Atomics.h>
-#include <wtf/DataLog.h>
-#include <wtf/MainThread.h>
-namespace JSC {
-
-COMPILE_ASSERT(MarkStackSegment::blockSize == WeakBlock::blockSize, blockSizeMatch);
-
-MarkStackArray::MarkStackArray(BlockAllocator& blockAllocator)
- : m_blockAllocator(blockAllocator)
- , m_top(0)
- , m_numberOfSegments(0)
-{
- m_segments.push(MarkStackSegment::create(m_blockAllocator.allocate<MarkStackSegment>()));
- m_numberOfSegments++;
-}
+#include "JSCInlines.h"
-MarkStackArray::~MarkStackArray()
-{
- ASSERT(m_numberOfSegments == 1 && m_segments.size() == 1);
- m_blockAllocator.deallocate(MarkStackSegment::destroy(m_segments.removeHead()));
-}
-
-void MarkStackArray::expand()
-{
- ASSERT(m_segments.head()->m_top == s_segmentCapacity);
-
- MarkStackSegment* nextSegment = MarkStackSegment::create(m_blockAllocator.allocate<MarkStackSegment>());
- m_numberOfSegments++;
-
-#if !ASSERT_DISABLED
- nextSegment->m_top = 0;
-#endif
-
- m_segments.push(nextSegment);
- setTopForEmptySegment();
- validatePrevious();
-}
+namespace JSC {
-bool MarkStackArray::refill()
+MarkStackArray::MarkStackArray()
+ : GCSegmentedArray<const JSCell*>()
{
- validatePrevious();
- if (top())
- return true;
- m_blockAllocator.deallocate(MarkStackSegment::destroy(m_segments.removeHead()));
- ASSERT(m_numberOfSegments > 1);
- m_numberOfSegments--;
- setTopForFullSegment();
- validatePrevious();
- return true;
}
void MarkStackArray::donateSomeCellsTo(MarkStackArray& other)
@@ -112,11 +57,11 @@ void MarkStackArray::donateSomeCellsTo(MarkStackArray& other)
// Remove our head and the head of the other list before we start moving segments around.
// We'll add them back on once we're done donating.
- MarkStackSegment* myHead = m_segments.removeHead();
- MarkStackSegment* otherHead = other.m_segments.removeHead();
+ GCArraySegment<const JSCell*>* myHead = m_segments.removeHead();
+ GCArraySegment<const JSCell*>* otherHead = other.m_segments.removeHead();
while (segmentsToDonate--) {
- MarkStackSegment* current = m_segments.removeHead();
+ GCArraySegment<const JSCell*>* current = m_segments.removeHead();
ASSERT(current);
ASSERT(m_numberOfSegments > 1);
other.m_segments.push(current);
@@ -144,8 +89,8 @@ void MarkStackArray::stealSomeCellsFrom(MarkStackArray& other, size_t idleThread
// If other has an entire segment, steal it and return.
if (other.m_numberOfSegments > 1) {
// Move the heads of the lists aside. We'll push them back on after.
- MarkStackSegment* otherHead = other.m_segments.removeHead();
- MarkStackSegment* myHead = m_segments.removeHead();
+ GCArraySegment<const JSCell*>* otherHead = other.m_segments.removeHead();
+ GCArraySegment<const JSCell*>* myHead = m_segments.removeHead();
ASSERT(other.m_segments.head()->m_top == s_segmentCapacity);
@@ -162,7 +107,8 @@ void MarkStackArray::stealSomeCellsFrom(MarkStackArray& other, size_t idleThread
return;
}
- size_t numberOfCellsToSteal = (other.size() + idleThreadCount - 1) / idleThreadCount; // Round up to steal 1 / 1.
+ // Steal ceil(other.size() / idleThreadCount) things.
+ size_t numberOfCellsToSteal = (other.size() + idleThreadCount - 1) / idleThreadCount;
while (numberOfCellsToSteal-- > 0 && other.canRemoveLast())
append(other.removeLast());
}
diff --git a/Source/JavaScriptCore/heap/MarkStack.h b/Source/JavaScriptCore/heap/MarkStack.h
index c97b6a735..04f19c62c 100644
--- a/Source/JavaScriptCore/heap/MarkStack.h
+++ b/Source/JavaScriptCore/heap/MarkStack.h
@@ -26,102 +26,18 @@
#ifndef MarkStack_h
#define MarkStack_h
-#if ENABLE(OBJECT_MARK_LOGGING)
-#define MARK_LOG_MESSAGE0(message) dataLogF(message)
-#define MARK_LOG_MESSAGE1(message, arg1) dataLogF(message, arg1)
-#define MARK_LOG_MESSAGE2(message, arg1, arg2) dataLogF(message, arg1, arg2)
-#define MARK_LOG_ROOT(visitor, rootName) \
- dataLogF("\n%s: ", rootName); \
- (visitor).resetChildCount()
-#define MARK_LOG_PARENT(visitor, parent) \
- dataLogF("\n%p (%s): ", parent, parent->className() ? parent->className() : "unknown"); \
- (visitor).resetChildCount()
-#define MARK_LOG_CHILD(visitor, child) \
- if ((visitor).childCount()) \
- dataLogFString(", "); \
- dataLogF("%p", child); \
- (visitor).incrementChildCount()
-#else
-#define MARK_LOG_MESSAGE0(message) do { } while (false)
-#define MARK_LOG_MESSAGE1(message, arg1) do { } while (false)
-#define MARK_LOG_MESSAGE2(message, arg1, arg2) do { } while (false)
-#define MARK_LOG_ROOT(visitor, rootName) do { } while (false)
-#define MARK_LOG_PARENT(visitor, parent) do { } while (false)
-#define MARK_LOG_CHILD(visitor, child) do { } while (false)
-#endif
-
-#include "HeapBlock.h"
-#include <wtf/StdLibExtras.h>
+#include "GCSegmentedArrayInlines.h"
namespace JSC {
-class BlockAllocator;
-class DeadBlock;
class JSCell;
-class MarkStackSegment : public HeapBlock<MarkStackSegment> {
+class MarkStackArray : public GCSegmentedArray<const JSCell*> {
public:
- MarkStackSegment(Region* region)
- : HeapBlock<MarkStackSegment>(region)
-#if !ASSERT_DISABLED
- , m_top(0)
-#endif
- {
- }
-
- static MarkStackSegment* create(DeadBlock*);
-
- const JSCell** data()
- {
- return bitwise_cast<const JSCell**>(this + 1);
- }
-
- static const size_t blockSize = 4 * KB;
-
-#if !ASSERT_DISABLED
- size_t m_top;
-#endif
-};
+ MarkStackArray();
-class MarkStackArray {
-public:
- MarkStackArray(BlockAllocator&);
- ~MarkStackArray();
-
- void append(const JSCell*);
-
- bool canRemoveLast();
- const JSCell* removeLast();
- bool refill();
-
void donateSomeCellsTo(MarkStackArray& other);
void stealSomeCellsFrom(MarkStackArray& other, size_t idleThreadCount);
-
- size_t size();
- bool isEmpty();
-
-private:
- template <size_t size> struct CapacityFromSize {
- static const size_t value = (size - sizeof(MarkStackSegment)) / sizeof(const JSCell*);
- };
-
- JS_EXPORT_PRIVATE void expand();
-
- size_t postIncTop();
- size_t preDecTop();
- void setTopForFullSegment();
- void setTopForEmptySegment();
- size_t top();
-
- void validatePrevious();
-
- DoublyLinkedList<MarkStackSegment> m_segments;
- BlockAllocator& m_blockAllocator;
-
- JS_EXPORT_PRIVATE static const size_t s_segmentCapacity = CapacityFromSize<MarkStackSegment::blockSize>::value;
- size_t m_top;
- size_t m_numberOfSegments;
-
};
} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/MarkStackInlines.h b/Source/JavaScriptCore/heap/MarkStackInlines.h
deleted file mode 100644
index c577de602..000000000
--- a/Source/JavaScriptCore/heap/MarkStackInlines.h
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Copyright (C) 2009, 2011 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef MarkStackInlines_h
-#define MarkStackInlines_h
-
-#include "GCThreadSharedData.h"
-#include "MarkStack.h"
-
-namespace JSC {
-
-inline MarkStackSegment* MarkStackSegment::create(DeadBlock* block)
-{
- return new (NotNull, block) MarkStackSegment(block->region());
-}
-
-inline size_t MarkStackArray::postIncTop()
-{
- size_t result = m_top++;
- ASSERT(result == m_segments.head()->m_top++);
- return result;
-}
-
-inline size_t MarkStackArray::preDecTop()
-{
- size_t result = --m_top;
- ASSERT(result == --m_segments.head()->m_top);
- return result;
-}
-
-inline void MarkStackArray::setTopForFullSegment()
-{
- ASSERT(m_segments.head()->m_top == s_segmentCapacity);
- m_top = s_segmentCapacity;
-}
-
-inline void MarkStackArray::setTopForEmptySegment()
-{
- ASSERT(!m_segments.head()->m_top);
- m_top = 0;
-}
-
-inline size_t MarkStackArray::top()
-{
- ASSERT(m_top == m_segments.head()->m_top);
- return m_top;
-}
-
-#if ASSERT_DISABLED
-inline void MarkStackArray::validatePrevious() { }
-#else
-inline void MarkStackArray::validatePrevious()
-{
- unsigned count = 0;
- for (MarkStackSegment* current = m_segments.head(); current; current = current->next())
- count++;
- ASSERT(m_segments.size() == m_numberOfSegments);
-}
-#endif
-
-inline void MarkStackArray::append(const JSCell* cell)
-{
- if (m_top == s_segmentCapacity)
- expand();
- m_segments.head()->data()[postIncTop()] = cell;
-}
-
-inline bool MarkStackArray::canRemoveLast()
-{
- return !!m_top;
-}
-
-inline const JSCell* MarkStackArray::removeLast()
-{
- return m_segments.head()->data()[preDecTop()];
-}
-
-inline bool MarkStackArray::isEmpty()
-{
- if (m_top)
- return false;
- if (m_segments.head()->next()) {
- ASSERT(m_segments.head()->next()->m_top == s_segmentCapacity);
- return false;
- }
- return true;
-}
-
-inline size_t MarkStackArray::size()
-{
- return m_top + s_segmentCapacity * (m_numberOfSegments - 1);
-}
-
-} // namespace JSC
-
-#endif // MarkStackInlines_h
-
diff --git a/Source/JavaScriptCore/heap/MarkedAllocator.cpp b/Source/JavaScriptCore/heap/MarkedAllocator.cpp
index cbdbfd532..9e3f8bae8 100644
--- a/Source/JavaScriptCore/heap/MarkedAllocator.cpp
+++ b/Source/JavaScriptCore/heap/MarkedAllocator.cpp
@@ -1,18 +1,44 @@
+/*
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
#include "config.h"
#include "MarkedAllocator.h"
#include "GCActivityCallback.h"
#include "Heap.h"
#include "IncrementalSweeper.h"
+#include "JSCInlines.h"
#include "VM.h"
#include <wtf/CurrentTime.h>
namespace JSC {
-bool MarkedAllocator::isPagedOut(double deadline)
+static bool isListPagedOut(double deadline, DoublyLinkedList<MarkedBlock>& list)
{
unsigned itersSinceLastTimeCheck = 0;
- MarkedBlock* block = m_blockList.head();
+ MarkedBlock* block = list.head();
while (block) {
block = block->next();
++itersSinceLastTimeCheck;
@@ -23,59 +49,104 @@ bool MarkedAllocator::isPagedOut(double deadline)
itersSinceLastTimeCheck = 0;
}
}
+ return false;
+}
+bool MarkedAllocator::isPagedOut(double deadline)
+{
+ if (isListPagedOut(deadline, m_blockList))
+ return true;
return false;
}
inline void* MarkedAllocator::tryAllocateHelper(size_t bytes)
{
- if (!m_freeList.head) {
- for (MarkedBlock*& block = m_blocksToSweep; block; block = block->next()) {
- MarkedBlock::FreeList freeList = block->sweep(MarkedBlock::SweepToFreeList);
- if (!freeList.head) {
- block->didConsumeFreeList();
- continue;
- }
-
- if (bytes > block->cellSize()) {
- block->canonicalizeCellLivenessData(freeList);
- continue;
- }
-
- m_currentBlock = block;
- m_freeList = freeList;
- break;
- }
+ if (m_currentBlock) {
+ ASSERT(m_currentBlock == m_nextBlockToSweep);
+ m_currentBlock->didConsumeFreeList();
+ m_nextBlockToSweep = m_currentBlock->next();
+ }
+
+ MarkedBlock* next;
+ for (MarkedBlock*& block = m_nextBlockToSweep; block; block = next) {
+ next = block->next();
+
+ MarkedBlock::FreeList freeList = block->sweep(MarkedBlock::SweepToFreeList);
- if (!m_freeList.head) {
- m_currentBlock = 0;
- return 0;
+ double utilization = ((double)MarkedBlock::blockSize - (double)freeList.bytes) / (double)MarkedBlock::blockSize;
+ if (utilization >= Options::minMarkedBlockUtilization()) {
+ ASSERT(freeList.bytes || !freeList.head);
+ m_blockList.remove(block);
+ m_retiredBlocks.push(block);
+ block->didRetireBlock(freeList);
+ continue;
}
+
+ if (bytes > block->cellSize()) {
+ block->stopAllocating(freeList);
+ continue;
+ }
+
+ m_currentBlock = block;
+ m_freeList = freeList;
+ break;
}
+ if (!m_freeList.head) {
+ m_currentBlock = 0;
+ return 0;
+ }
+
+ ASSERT(m_freeList.head);
+ void* head = tryPopFreeList(bytes);
+ ASSERT(head);
+ m_markedSpace->didAllocateInBlock(m_currentBlock);
+ return head;
+}
+
+inline void* MarkedAllocator::tryPopFreeList(size_t bytes)
+{
+ ASSERT(m_currentBlock);
+ if (bytes > m_currentBlock->cellSize())
+ return 0;
+
MarkedBlock::FreeCell* head = m_freeList.head;
m_freeList.head = head->next;
- ASSERT(head);
return head;
}
-
+
inline void* MarkedAllocator::tryAllocate(size_t bytes)
{
ASSERT(!m_heap->isBusy());
m_heap->m_operationInProgress = Allocation;
void* result = tryAllocateHelper(bytes);
+
m_heap->m_operationInProgress = NoOperation;
+ ASSERT(result || !m_currentBlock);
return result;
}
-
+
+ALWAYS_INLINE void MarkedAllocator::doTestCollectionsIfNeeded()
+{
+ if (!Options::slowPathAllocsBetweenGCs())
+ return;
+
+ static unsigned allocationCount = 0;
+ if (!allocationCount) {
+ if (!m_heap->isDeferred())
+ m_heap->collectAllGarbage();
+ ASSERT(m_heap->m_operationInProgress == NoOperation);
+ }
+ if (++allocationCount >= Options::slowPathAllocsBetweenGCs())
+ allocationCount = 0;
+}
+
void* MarkedAllocator::allocateSlowCase(size_t bytes)
{
- ASSERT(m_heap->vm()->apiLock().currentThreadIsHoldingLock());
-#if COLLECT_ON_EVERY_ALLOCATION
- m_heap->collectAllGarbage();
- ASSERT(m_heap->m_operationInProgress == NoOperation);
-#endif
-
+ ASSERT(m_heap->vm()->currentThreadIsHoldingAPILock());
+ doTestCollectionsIfNeeded();
+
+ ASSERT(!m_markedSpace->isIterating());
ASSERT(!m_freeList.head);
m_heap->didAllocate(m_freeList.bytes);
@@ -84,9 +155,7 @@ void* MarkedAllocator::allocateSlowCase(size_t bytes)
if (LIKELY(result != 0))
return result;
- if (m_heap->shouldCollect()) {
- m_heap->collect(Heap::DoNotSweep);
-
+ if (m_heap->collectIfNecessaryOrDefer()) {
result = tryAllocate(bytes);
if (result)
return result;
@@ -106,14 +175,13 @@ void* MarkedAllocator::allocateSlowCase(size_t bytes)
MarkedBlock* MarkedAllocator::allocateBlock(size_t bytes)
{
size_t minBlockSize = MarkedBlock::blockSize;
- size_t minAllocationSize = WTF::roundUpToMultipleOf(WTF::pageSize(), sizeof(MarkedBlock) + bytes);
+ size_t minAllocationSize = WTF::roundUpToMultipleOf<MarkedBlock::atomSize>(sizeof(MarkedBlock)) + WTF::roundUpToMultipleOf<MarkedBlock::atomSize>(bytes);
+ minAllocationSize = WTF::roundUpToMultipleOf(WTF::pageSize(), minAllocationSize);
size_t blockSize = std::max(minBlockSize, minAllocationSize);
size_t cellSize = m_cellSize ? m_cellSize : WTF::roundUpToMultipleOf<MarkedBlock::atomSize>(bytes);
- if (blockSize == MarkedBlock::blockSize)
- return MarkedBlock::create(m_heap->blockAllocator().allocate<MarkedBlock>(), this, cellSize, m_destructorType);
- return MarkedBlock::create(m_heap->blockAllocator().allocateCustomSize(blockSize, MarkedBlock::blockSize), this, cellSize, m_destructorType);
+ return MarkedBlock::create(*m_heap, this, blockSize, cellSize, m_needsDestruction);
}
void MarkedAllocator::addBlock(MarkedBlock* block)
@@ -122,8 +190,7 @@ void MarkedAllocator::addBlock(MarkedBlock* block)
ASSERT(!m_freeList.head);
m_blockList.append(block);
- m_blocksToSweep = m_currentBlock = block;
- m_freeList = block->sweep(MarkedBlock::SweepToFreeList);
+ m_nextBlockToSweep = block;
m_markedSpace->didAddBlock(block);
}
@@ -133,9 +200,33 @@ void MarkedAllocator::removeBlock(MarkedBlock* block)
m_currentBlock = m_currentBlock->next();
m_freeList = MarkedBlock::FreeList();
}
- if (m_blocksToSweep == block)
- m_blocksToSweep = m_blocksToSweep->next();
+ if (m_nextBlockToSweep == block)
+ m_nextBlockToSweep = m_nextBlockToSweep->next();
+
+ block->willRemoveBlock();
m_blockList.remove(block);
}
+void MarkedAllocator::reset()
+{
+ m_lastActiveBlock = 0;
+ m_currentBlock = 0;
+ m_freeList = MarkedBlock::FreeList();
+ if (m_heap->operationInProgress() == FullCollection)
+ m_blockList.append(m_retiredBlocks);
+
+ m_nextBlockToSweep = m_blockList.head();
+}
+
+struct LastChanceToFinalize : MarkedBlock::VoidFunctor {
+ void operator()(MarkedBlock* block) { block->lastChanceToFinalize(); }
+};
+
+void MarkedAllocator::lastChanceToFinalize()
+{
+ m_blockList.append(m_retiredBlocks);
+ LastChanceToFinalize functor;
+ forEachBlock(functor);
+}
+
} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/MarkedAllocator.h b/Source/JavaScriptCore/heap/MarkedAllocator.h
index 686691433..261500029 100644
--- a/Source/JavaScriptCore/heap/MarkedAllocator.h
+++ b/Source/JavaScriptCore/heap/MarkedAllocator.h
@@ -1,3 +1,28 @@
+/*
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
#ifndef MarkedAllocator_h
#define MarkedAllocator_h
@@ -10,10 +35,6 @@ class Heap;
class MarkedSpace;
class LLIntOffsetsExtractor;
-namespace DFG {
-class SpeculativeJIT;
-}
-
class MarkedAllocator {
friend class LLIntOffsetsExtractor;
@@ -21,18 +42,26 @@ public:
static ptrdiff_t offsetOfFreeListHead();
MarkedAllocator();
+ void lastChanceToFinalize();
void reset();
- void canonicalizeCellLivenessData();
+ void stopAllocating();
+ void resumeAllocating();
size_t cellSize() { return m_cellSize; }
- MarkedBlock::DestructorType destructorType() { return m_destructorType; }
+ bool needsDestruction() { return m_needsDestruction; }
void* allocate(size_t);
Heap* heap() { return m_heap; }
+ MarkedBlock* takeLastActiveBlock()
+ {
+ MarkedBlock* block = m_lastActiveBlock;
+ m_lastActiveBlock = 0;
+ return block;
+ }
template<typename Functor> void forEachBlock(Functor&);
void addBlock(MarkedBlock*);
void removeBlock(MarkedBlock*);
- void init(Heap*, MarkedSpace*, size_t cellSize, MarkedBlock::DestructorType);
+ void init(Heap*, MarkedSpace*, size_t cellSize, bool needsDestruction);
bool isPagedOut(double deadline);
@@ -40,14 +69,18 @@ private:
JS_EXPORT_PRIVATE void* allocateSlowCase(size_t);
void* tryAllocate(size_t);
void* tryAllocateHelper(size_t);
+ void* tryPopFreeList(size_t);
MarkedBlock* allocateBlock(size_t);
+ ALWAYS_INLINE void doTestCollectionsIfNeeded();
MarkedBlock::FreeList m_freeList;
MarkedBlock* m_currentBlock;
- MarkedBlock* m_blocksToSweep;
+ MarkedBlock* m_lastActiveBlock;
+ MarkedBlock* m_nextBlockToSweep;
DoublyLinkedList<MarkedBlock> m_blockList;
+ DoublyLinkedList<MarkedBlock> m_retiredBlocks;
size_t m_cellSize;
- MarkedBlock::DestructorType m_destructorType;
+ bool m_needsDestruction { false };
Heap* m_heap;
MarkedSpace* m_markedSpace;
};
@@ -59,20 +92,20 @@ inline ptrdiff_t MarkedAllocator::offsetOfFreeListHead()
inline MarkedAllocator::MarkedAllocator()
: m_currentBlock(0)
- , m_blocksToSweep(0)
+ , m_lastActiveBlock(0)
+ , m_nextBlockToSweep(0)
, m_cellSize(0)
- , m_destructorType(MarkedBlock::None)
, m_heap(0)
, m_markedSpace(0)
{
}
-inline void MarkedAllocator::init(Heap* heap, MarkedSpace* markedSpace, size_t cellSize, MarkedBlock::DestructorType destructorType)
+inline void MarkedAllocator::init(Heap* heap, MarkedSpace* markedSpace, size_t cellSize, bool needsDestruction)
{
m_heap = heap;
m_markedSpace = markedSpace;
m_cellSize = cellSize;
- m_destructorType = destructorType;
+ m_needsDestruction = needsDestruction;
}
inline void* MarkedAllocator::allocate(size_t bytes)
@@ -93,25 +126,30 @@ inline void* MarkedAllocator::allocate(size_t bytes)
return head;
}
-inline void MarkedAllocator::reset()
-{
- m_currentBlock = 0;
- m_freeList = MarkedBlock::FreeList();
- m_blocksToSweep = m_blockList.head();
-}
-
-inline void MarkedAllocator::canonicalizeCellLivenessData()
+inline void MarkedAllocator::stopAllocating()
{
+ ASSERT(!m_lastActiveBlock);
if (!m_currentBlock) {
ASSERT(!m_freeList.head);
return;
}
- m_currentBlock->canonicalizeCellLivenessData(m_freeList);
+ m_currentBlock->stopAllocating(m_freeList);
+ m_lastActiveBlock = m_currentBlock;
m_currentBlock = 0;
m_freeList = MarkedBlock::FreeList();
}
+inline void MarkedAllocator::resumeAllocating()
+{
+ if (!m_lastActiveBlock)
+ return;
+
+ m_freeList = m_lastActiveBlock->resumeAllocating();
+ m_currentBlock = m_lastActiveBlock;
+ m_lastActiveBlock = 0;
+}
+
template <typename Functor> inline void MarkedAllocator::forEachBlock(Functor& functor)
{
MarkedBlock* next;
@@ -119,8 +157,13 @@ template <typename Functor> inline void MarkedAllocator::forEachBlock(Functor& f
next = block->next();
functor(block);
}
+
+ for (MarkedBlock* block = m_retiredBlocks.head(); block; block = next) {
+ next = block->next();
+ functor(block);
+ }
}
-
+
} // namespace JSC
-#endif
+#endif // MarkedAllocator_h
diff --git a/Source/JavaScriptCore/heap/MarkedBlock.cpp b/Source/JavaScriptCore/heap/MarkedBlock.cpp
index 0df2e1fb8..9123f4e2c 100644
--- a/Source/JavaScriptCore/heap/MarkedBlock.cpp
+++ b/Source/JavaScriptCore/heap/MarkedBlock.cpp
@@ -26,28 +26,49 @@
#include "config.h"
#include "MarkedBlock.h"
-#include "IncrementalSweeper.h"
#include "JSCell.h"
#include "JSDestructibleObject.h"
-#include "Operations.h"
+#include "JSCInlines.h"
namespace JSC {
-MarkedBlock* MarkedBlock::create(DeadBlock* block, MarkedAllocator* allocator, size_t cellSize, DestructorType destructorType)
+static const bool computeBalance = false;
+static size_t balance;
+
+MarkedBlock* MarkedBlock::create(Heap& heap, MarkedAllocator* allocator, size_t capacity, size_t cellSize, bool needsDestruction)
+{
+ if (computeBalance) {
+ balance++;
+ if (!(balance % 10))
+ dataLog("MarkedBlock Balance: ", balance, "\n");
+ }
+ MarkedBlock* block = new (NotNull, fastAlignedMalloc(blockSize, capacity)) MarkedBlock(allocator, capacity, cellSize, needsDestruction);
+ heap.didAllocateBlock(capacity);
+ return block;
+}
+
+void MarkedBlock::destroy(Heap& heap, MarkedBlock* block)
{
- ASSERT(reinterpret_cast<size_t>(block) == (reinterpret_cast<size_t>(block) & blockMask));
- Region* region = block->region();
- return new (NotNull, block) MarkedBlock(region, allocator, cellSize, destructorType);
+ if (computeBalance) {
+ balance--;
+ if (!(balance % 10))
+ dataLog("MarkedBlock Balance: ", balance, "\n");
+ }
+ size_t capacity = block->capacity();
+ block->~MarkedBlock();
+ fastAlignedFree(block);
+ heap.didFreeBlock(capacity);
}
-MarkedBlock::MarkedBlock(Region* region, MarkedAllocator* allocator, size_t cellSize, DestructorType destructorType)
- : HeapBlock<MarkedBlock>(region)
+MarkedBlock::MarkedBlock(MarkedAllocator* allocator, size_t capacity, size_t cellSize, bool needsDestruction)
+ : DoublyLinkedListNode<MarkedBlock>()
, m_atomsPerCell((cellSize + atomSize - 1) / atomSize)
- , m_endAtom((allocator->cellSize() ? atomsPerBlock : region->blockSize() / atomSize) - m_atomsPerCell + 1)
- , m_destructorType(destructorType)
+ , m_endAtom((allocator->cellSize() ? atomsPerBlock - m_atomsPerCell : firstAtom()) + 1)
+ , m_capacity(capacity)
+ , m_needsDestruction(needsDestruction)
, m_allocator(allocator)
, m_state(New) // All cells start out unmarked.
- , m_weakSet(allocator->heap()->vm())
+ , m_weakSet(allocator->heap()->vm(), *this)
{
ASSERT(allocator);
HEAP_LOG_BLOCK_STATE_TRANSITION(this);
@@ -59,20 +80,22 @@ inline void MarkedBlock::callDestructor(JSCell* cell)
if (cell->isZapped())
return;
-#if ENABLE(SIMPLE_HEAP_PROFILING)
- m_heap->m_destroyedTypeCounts.countVPtr(vptr);
-#endif
-
- cell->methodTableForDestruction()->destroy(cell);
+ ASSERT(cell->structureID());
+ if (cell->inlineTypeFlags() & StructureIsImmortal)
+ cell->structure(*vm())->classInfo()->methodTable.destroy(cell);
+ else
+ jsCast<JSDestructibleObject*>(cell)->classInfo()->methodTable.destroy(cell);
cell->zap();
}
-template<MarkedBlock::BlockState blockState, MarkedBlock::SweepMode sweepMode, MarkedBlock::DestructorType dtorType>
+template<MarkedBlock::BlockState blockState, MarkedBlock::SweepMode sweepMode, bool callDestructors>
MarkedBlock::FreeList MarkedBlock::specializedSweep()
{
ASSERT(blockState != Allocated && blockState != FreeListed);
- ASSERT(!(dtorType == MarkedBlock::None && sweepMode == SweepOnly));
+ ASSERT(!(!callDestructors && sweepMode == SweepOnly));
+ SamplingRegion samplingRegion((!callDestructors && blockState != New) ? "Calling destructors" : "sweeping");
+
// This produces a free list that is ordered in reverse through the block.
// This is fine, since the allocation code makes no assumptions about the
// order of the free list.
@@ -84,7 +107,7 @@ MarkedBlock::FreeList MarkedBlock::specializedSweep()
JSCell* cell = reinterpret_cast_ptr<JSCell*>(&atoms()[i]);
- if (dtorType != MarkedBlock::None && blockState != New)
+ if (callDestructors && blockState != New)
callDestructor(cell);
if (sweepMode == SweepToFreeList) {
@@ -98,7 +121,7 @@ MarkedBlock::FreeList MarkedBlock::specializedSweep()
// We only want to discard the newlyAllocated bits if we're creating a FreeList,
// otherwise we would lose information on what's currently alive.
if (sweepMode == SweepToFreeList && m_newlyAllocated)
- m_newlyAllocated.clear();
+ m_newlyAllocated = nullptr;
m_state = ((sweepMode == SweepToFreeList) ? FreeListed : Marked);
return FreeList(head, count * cellSize());
@@ -110,34 +133,33 @@ MarkedBlock::FreeList MarkedBlock::sweep(SweepMode sweepMode)
m_weakSet.sweep();
- if (sweepMode == SweepOnly && m_destructorType == MarkedBlock::None)
+ if (sweepMode == SweepOnly && !m_needsDestruction)
return FreeList();
- if (m_destructorType == MarkedBlock::ImmortalStructure)
- return sweepHelper<MarkedBlock::ImmortalStructure>(sweepMode);
- if (m_destructorType == MarkedBlock::Normal)
- return sweepHelper<MarkedBlock::Normal>(sweepMode);
- return sweepHelper<MarkedBlock::None>(sweepMode);
+ if (m_needsDestruction)
+ return sweepHelper<true>(sweepMode);
+ return sweepHelper<false>(sweepMode);
}
-template<MarkedBlock::DestructorType dtorType>
+template<bool callDestructors>
MarkedBlock::FreeList MarkedBlock::sweepHelper(SweepMode sweepMode)
{
switch (m_state) {
case New:
ASSERT(sweepMode == SweepToFreeList);
- return specializedSweep<New, SweepToFreeList, dtorType>();
+ return specializedSweep<New, SweepToFreeList, callDestructors>();
case FreeListed:
// Happens when a block transitions to fully allocated.
ASSERT(sweepMode == SweepToFreeList);
return FreeList();
+ case Retired:
case Allocated:
RELEASE_ASSERT_NOT_REACHED();
return FreeList();
case Marked:
return sweepMode == SweepToFreeList
- ? specializedSweep<Marked, SweepToFreeList, dtorType>()
- : specializedSweep<Marked, SweepOnly, dtorType>();
+ ? specializedSweep<Marked, SweepToFreeList, callDestructors>()
+ : specializedSweep<Marked, SweepOnly, callDestructors>();
}
RELEASE_ASSERT_NOT_REACHED();
@@ -151,17 +173,18 @@ public:
{
}
- void operator()(JSCell* cell)
+ IterationStatus operator()(JSCell* cell)
{
ASSERT(MarkedBlock::blockFor(cell) == m_block);
m_block->setNewlyAllocated(cell);
+ return IterationStatus::Continue;
}
private:
MarkedBlock* m_block;
};
-void MarkedBlock::canonicalizeCellLivenessData(const FreeList& freeList)
+void MarkedBlock::stopAllocating(const FreeList& freeList)
{
HEAP_LOG_BLOCK_STATE_TRANSITION(this);
FreeCell* head = freeList.head;
@@ -184,7 +207,7 @@ void MarkedBlock::canonicalizeCellLivenessData(const FreeList& freeList)
// way to tell what's live vs dead.
ASSERT(!m_newlyAllocated);
- m_newlyAllocated = adoptPtr(new WTF::Bitmap<atomsPerBlock>());
+ m_newlyAllocated = std::make_unique<WTF::Bitmap<atomsPerBlock>>();
SetNewlyAllocatedFunctor functor(this);
forEachCell(functor);
@@ -199,4 +222,81 @@ void MarkedBlock::canonicalizeCellLivenessData(const FreeList& freeList)
m_state = Marked;
}
+void MarkedBlock::clearMarks()
+{
+ if (heap()->operationInProgress() == JSC::EdenCollection)
+ this->clearMarksWithCollectionType<EdenCollection>();
+ else
+ this->clearMarksWithCollectionType<FullCollection>();
+}
+
+template <HeapOperation collectionType>
+void MarkedBlock::clearMarksWithCollectionType()
+{
+ ASSERT(collectionType == FullCollection || collectionType == EdenCollection);
+ HEAP_LOG_BLOCK_STATE_TRANSITION(this);
+
+ ASSERT(m_state != New && m_state != FreeListed);
+ if (collectionType == FullCollection) {
+ m_marks.clearAll();
+ // This will become true at the end of the mark phase. We set it now to
+ // avoid an extra pass to do so later.
+ m_state = Marked;
+ return;
+ }
+
+ ASSERT(collectionType == EdenCollection);
+ // If a block was retired then there's no way an EdenCollection can un-retire it.
+ if (m_state != Retired)
+ m_state = Marked;
+}
+
+void MarkedBlock::lastChanceToFinalize()
+{
+ m_weakSet.lastChanceToFinalize();
+
+ clearNewlyAllocated();
+ clearMarksWithCollectionType<FullCollection>();
+ sweep();
+}
+
+MarkedBlock::FreeList MarkedBlock::resumeAllocating()
+{
+ HEAP_LOG_BLOCK_STATE_TRANSITION(this);
+
+ ASSERT(m_state == Marked);
+
+ if (!m_newlyAllocated) {
+ // We didn't have to create a "newly allocated" bitmap. That means we were already Marked
+ // when we last stopped allocation, so return an empty free list and stay in the Marked state.
+ return FreeList();
+ }
+
+ // Re-create our free list from before stopping allocation.
+ return sweep(SweepToFreeList);
+}
+
+void MarkedBlock::didRetireBlock(const FreeList& freeList)
+{
+ HEAP_LOG_BLOCK_STATE_TRANSITION(this);
+ FreeCell* head = freeList.head;
+
+ // Currently we don't notify the Heap that we're giving up on this block.
+ // The Heap might be able to make a better decision about how many bytes should
+ // be allocated before the next collection if it knew about this retired block.
+ // On the other hand we'll waste at most 10% of our Heap space between FullCollections
+ // and only under heavy fragmentation.
+
+ // We need to zap the free list when retiring a block so that we don't try to destroy
+ // previously destroyed objects when we re-sweep the block in the future.
+ FreeCell* next;
+ for (FreeCell* current = head; current; current = next) {
+ next = current->next;
+ reinterpret_cast<JSCell*>(current)->zap();
+ }
+
+ ASSERT(m_state == FreeListed);
+ m_state = Retired;
+}
+
} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/MarkedBlock.h b/Source/JavaScriptCore/heap/MarkedBlock.h
index fcc3016d9..546971ce8 100644
--- a/Source/JavaScriptCore/heap/MarkedBlock.h
+++ b/Source/JavaScriptCore/heap/MarkedBlock.h
@@ -22,17 +22,14 @@
#ifndef MarkedBlock_h
#define MarkedBlock_h
-#include "BlockAllocator.h"
-#include "HeapBlock.h"
-
+#include "HeapOperation.h"
+#include "IterationStatus.h"
#include "WeakSet.h"
#include <wtf/Bitmap.h>
#include <wtf/DataLog.h>
#include <wtf/DoublyLinkedList.h>
#include <wtf/HashFunctions.h>
-#include <wtf/PageAllocationAligned.h>
#include <wtf/StdLibExtras.h>
-#include <wtf/Vector.h>
// Set to log state transitions of blocks.
#define HEAP_LOG_BLOCK_STATE_TRANSITIONS 0
@@ -56,8 +53,6 @@ namespace JSC {
typedef uintptr_t Bits;
- static const size_t MB = 1024 * 1024;
-
bool isZapped(const JSCell*);
// A marked block is a page-aligned container for heap-allocated objects.
@@ -68,14 +63,19 @@ namespace JSC {
// size is equal to the difference between the cell size and the object
// size.
- class MarkedBlock : public HeapBlock<MarkedBlock> {
+ class MarkedBlock : public DoublyLinkedListNode<MarkedBlock> {
+ friend class WTF::DoublyLinkedListNode<MarkedBlock>;
+ friend class LLIntOffsetsExtractor;
+ friend struct VerifyMarkedOrRetired;
public:
- static const size_t atomSize = 8; // bytes
- static const size_t blockSize = 64 * KB;
+ static const size_t atomSize = 16; // bytes
+ static const size_t blockSize = 16 * KB;
static const size_t blockMask = ~(blockSize - 1); // blockSize must be a power of two.
static const size_t atomsPerBlock = blockSize / atomSize;
- static const size_t atomMask = atomsPerBlock - 1;
+
+ static_assert(!(MarkedBlock::atomSize & (MarkedBlock::atomSize - 1)), "MarkedBlock::atomSize must be a power of two.");
+ static_assert(!(MarkedBlock::blockSize & (MarkedBlock::blockSize - 1)), "MarkedBlock::blockSize must be a power of two.");
struct FreeCell {
FreeCell* next;
@@ -106,8 +106,8 @@ namespace JSC {
ReturnType m_count;
};
- enum DestructorType { None, ImmortalStructure, Normal };
- static MarkedBlock* create(DeadBlock*, MarkedAllocator*, size_t cellSize, DestructorType);
+ static MarkedBlock* create(Heap&, MarkedAllocator*, size_t capacity, size_t cellSize, bool needsDestruction);
+ static void destroy(Heap&, MarkedBlock*);
static bool isAtomAligned(const void*);
static MarkedBlock* blockFor(const void*);
@@ -132,14 +132,21 @@ namespace JSC {
// cell liveness data. To restore accurate cell liveness data, call one
// of these functions:
void didConsumeFreeList(); // Call this once you've allocated all the items in the free list.
- void canonicalizeCellLivenessData(const FreeList&);
+ void stopAllocating(const FreeList&);
+ FreeList resumeAllocating(); // Call this if you canonicalized a block for some non-collection related purpose.
+ // Returns true if the "newly allocated" bitmap was non-null
+ // and was successfully cleared and false otherwise.
+ bool clearNewlyAllocated();
void clearMarks();
+ template <HeapOperation collectionType>
+ void clearMarksWithCollectionType();
+
size_t markCount();
bool isEmpty();
size_t cellSize();
- DestructorType destructorType();
+ bool needsDestruction() const;
size_t size();
size_t capacity();
@@ -148,6 +155,8 @@ namespace JSC {
bool testAndSetMarked(const void*);
bool isLive(const JSCell*);
bool isLiveCell(const void*);
+ bool isAtom(const void*);
+ bool isMarkedOrNewlyAllocated(const JSCell*);
void setMarked(const void*);
void clearMarked(const void*);
@@ -155,36 +164,40 @@ namespace JSC {
void setNewlyAllocated(const void*);
void clearNewlyAllocated(const void*);
- bool needsSweeping();
+ bool isAllocated() const;
+ bool isMarkedOrRetired() const;
+ bool needsSweeping() const;
+ void didRetireBlock(const FreeList&);
+ void willRemoveBlock();
- template <typename Functor> void forEachCell(Functor&);
- template <typename Functor> void forEachLiveCell(Functor&);
- template <typename Functor> void forEachDeadCell(Functor&);
+ template <typename Functor> IterationStatus forEachCell(Functor&);
+ template <typename Functor> IterationStatus forEachLiveCell(Functor&);
+ template <typename Functor> IterationStatus forEachDeadCell(Functor&);
private:
- static const size_t atomAlignmentMask = atomSize - 1; // atomSize must be a power of two.
+ static const size_t atomAlignmentMask = atomSize - 1;
- enum BlockState { New, FreeListed, Allocated, Marked };
- template<DestructorType> FreeList sweepHelper(SweepMode = SweepOnly);
+ enum BlockState { New, FreeListed, Allocated, Marked, Retired };
+ template<bool callDestructors> FreeList sweepHelper(SweepMode = SweepOnly);
typedef char Atom[atomSize];
- MarkedBlock(Region*, MarkedAllocator*, size_t cellSize, DestructorType);
+ MarkedBlock(MarkedAllocator*, size_t capacity, size_t cellSize, bool needsDestruction);
Atom* atoms();
size_t atomNumber(const void*);
void callDestructor(JSCell*);
- template<BlockState, SweepMode, DestructorType> FreeList specializedSweep();
+ template<BlockState, SweepMode, bool callDestructors> FreeList specializedSweep();
+ MarkedBlock* m_prev;
+ MarkedBlock* m_next;
+
size_t m_atomsPerCell;
size_t m_endAtom; // This is a fuzzy end. Always test for < m_endAtom.
-#if ENABLE(PARALLEL_GC)
- WTF::Bitmap<atomsPerBlock, WTF::BitmapAtomic> m_marks;
-#else
- WTF::Bitmap<atomsPerBlock, WTF::BitmapNotAtomic> m_marks;
-#endif
- OwnPtr<WTF::Bitmap<atomsPerBlock> > m_newlyAllocated;
+ WTF::Bitmap<atomsPerBlock, WTF::BitmapAtomic, uint8_t> m_marks;
+ std::unique_ptr<WTF::Bitmap<atomsPerBlock>> m_newlyAllocated;
- DestructorType m_destructorType;
+ size_t m_capacity;
+ bool m_needsDestruction;
MarkedAllocator* m_allocator;
BlockState m_state;
WeakSet m_weakSet;
@@ -222,14 +235,6 @@ namespace JSC {
return reinterpret_cast<MarkedBlock*>(reinterpret_cast<Bits>(p) & blockMask);
}
- inline void MarkedBlock::lastChanceToFinalize()
- {
- m_weakSet.lastChanceToFinalize();
-
- clearMarks();
- sweep();
- }
-
inline MarkedAllocator* MarkedBlock::allocator() const
{
return m_allocator;
@@ -265,25 +270,17 @@ namespace JSC {
m_weakSet.reap();
}
- inline void MarkedBlock::didConsumeFreeList()
+ inline void MarkedBlock::willRemoveBlock()
{
- HEAP_LOG_BLOCK_STATE_TRANSITION(this);
-
- ASSERT(m_state == FreeListed);
- m_state = Allocated;
+ ASSERT(m_state != Retired);
}
- inline void MarkedBlock::clearMarks()
+ inline void MarkedBlock::didConsumeFreeList()
{
HEAP_LOG_BLOCK_STATE_TRANSITION(this);
- ASSERT(m_state != New && m_state != FreeListed);
- m_marks.clearAll();
- m_newlyAllocated.clear();
-
- // This will become true at the end of the mark phase. We set it now to
- // avoid an extra pass to do so later.
- m_state = Marked;
+ ASSERT(m_state == FreeListed);
+ m_state = Allocated;
}
inline size_t MarkedBlock::markCount()
@@ -301,9 +298,9 @@ namespace JSC {
return m_atomsPerCell * atomSize;
}
- inline MarkedBlock::DestructorType MarkedBlock::destructorType()
+ inline bool MarkedBlock::needsDestruction() const
{
- return m_destructorType;
+ return m_needsDestruction;
}
inline size_t MarkedBlock::size()
@@ -313,7 +310,7 @@ namespace JSC {
inline size_t MarkedBlock::capacity()
{
- return region()->blockSize();
+ return m_capacity;
}
inline size_t MarkedBlock::atomNumber(const void* p)
@@ -357,14 +354,30 @@ namespace JSC {
m_newlyAllocated->clear(atomNumber(p));
}
+ inline bool MarkedBlock::clearNewlyAllocated()
+ {
+ if (m_newlyAllocated) {
+ m_newlyAllocated = nullptr;
+ return true;
+ }
+ return false;
+ }
+
+ inline bool MarkedBlock::isMarkedOrNewlyAllocated(const JSCell* cell)
+ {
+ ASSERT(m_state == Retired || m_state == Marked);
+ return m_marks.get(atomNumber(cell)) || (m_newlyAllocated && isNewlyAllocated(cell));
+ }
+
inline bool MarkedBlock::isLive(const JSCell* cell)
{
switch (m_state) {
case Allocated:
return true;
+ case Retired:
case Marked:
- return m_marks.get(atomNumber(cell)) || (m_newlyAllocated && isNewlyAllocated(cell));
+ return isMarkedOrNewlyAllocated(cell);
case New:
case FreeListed:
@@ -376,7 +389,7 @@ namespace JSC {
return false;
}
- inline bool MarkedBlock::isLiveCell(const void* p)
+ inline bool MarkedBlock::isAtom(const void* p)
{
ASSERT(MarkedBlock::isAtomAligned(p));
size_t atomNumber = this->atomNumber(p);
@@ -387,45 +400,67 @@ namespace JSC {
return false;
if (atomNumber >= m_endAtom) // Filters pointers into invalid cells out of the range.
return false;
+ return true;
+ }
+ inline bool MarkedBlock::isLiveCell(const void* p)
+ {
+ if (!isAtom(p))
+ return false;
return isLive(static_cast<const JSCell*>(p));
}
- template <typename Functor> inline void MarkedBlock::forEachCell(Functor& functor)
+ template <typename Functor> inline IterationStatus MarkedBlock::forEachCell(Functor& functor)
{
for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
JSCell* cell = reinterpret_cast_ptr<JSCell*>(&atoms()[i]);
- functor(cell);
+ if (functor(cell) == IterationStatus::Done)
+ return IterationStatus::Done;
}
+ return IterationStatus::Continue;
}
- template <typename Functor> inline void MarkedBlock::forEachLiveCell(Functor& functor)
+ template <typename Functor> inline IterationStatus MarkedBlock::forEachLiveCell(Functor& functor)
{
for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
JSCell* cell = reinterpret_cast_ptr<JSCell*>(&atoms()[i]);
if (!isLive(cell))
continue;
- functor(cell);
+ if (functor(cell) == IterationStatus::Done)
+ return IterationStatus::Done;
}
+ return IterationStatus::Continue;
}
- template <typename Functor> inline void MarkedBlock::forEachDeadCell(Functor& functor)
+ template <typename Functor> inline IterationStatus MarkedBlock::forEachDeadCell(Functor& functor)
{
for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
JSCell* cell = reinterpret_cast_ptr<JSCell*>(&atoms()[i]);
if (isLive(cell))
continue;
- functor(cell);
+ if (functor(cell) == IterationStatus::Done)
+ return IterationStatus::Done;
}
+ return IterationStatus::Continue;
}
- inline bool MarkedBlock::needsSweeping()
+ inline bool MarkedBlock::needsSweeping() const
{
return m_state == Marked;
}
+ inline bool MarkedBlock::isAllocated() const
+ {
+ return m_state == Allocated;
+ }
+
+ inline bool MarkedBlock::isMarkedOrRetired() const
+ {
+ return m_state == Marked || m_state == Retired;
+ }
+
} // namespace JSC
namespace WTF {
diff --git a/Source/JavaScriptCore/heap/MarkedBlockSet.h b/Source/JavaScriptCore/heap/MarkedBlockSet.h
index 022a17389..9cf19088c 100644
--- a/Source/JavaScriptCore/heap/MarkedBlockSet.h
+++ b/Source/JavaScriptCore/heap/MarkedBlockSet.h
@@ -57,7 +57,7 @@ inline void MarkedBlockSet::add(MarkedBlock* block)
inline void MarkedBlockSet::remove(MarkedBlock* block)
{
- int oldCapacity = m_set.capacity();
+ unsigned oldCapacity = m_set.capacity();
m_set.remove(block);
if (m_set.capacity() != oldCapacity) // Indicates we've removed a lot of blocks.
recomputeFilter();
diff --git a/Source/JavaScriptCore/heap/MarkedSpace.cpp b/Source/JavaScriptCore/heap/MarkedSpace.cpp
index 2bef60843..c6d84f059 100644
--- a/Source/JavaScriptCore/heap/MarkedSpace.cpp
+++ b/Source/JavaScriptCore/heap/MarkedSpace.cpp
@@ -22,49 +22,24 @@
#include "MarkedSpace.h"
#include "IncrementalSweeper.h"
-#include "JSGlobalObject.h"
-#include "JSLock.h"
#include "JSObject.h"
-
+#include "JSCInlines.h"
namespace JSC {
-class Structure;
-
-class Free {
-public:
- typedef MarkedBlock* ReturnType;
-
- enum FreeMode { FreeOrShrink, FreeAll };
-
- Free(FreeMode, MarkedSpace*);
- void operator()(MarkedBlock*);
- ReturnType returnValue();
-
+struct Free : MarkedBlock::VoidFunctor {
+ Free(MarkedSpace& space) : m_markedSpace(space) { }
+ void operator()(MarkedBlock* block) { m_markedSpace.freeBlock(block); }
private:
- FreeMode m_freeMode;
- MarkedSpace* m_markedSpace;
- DoublyLinkedList<MarkedBlock> m_blocks;
+ MarkedSpace& m_markedSpace;
};
-inline Free::Free(FreeMode freeMode, MarkedSpace* newSpace)
- : m_freeMode(freeMode)
- , m_markedSpace(newSpace)
-{
-}
-
-inline void Free::operator()(MarkedBlock* block)
-{
- if (m_freeMode == FreeOrShrink)
- m_markedSpace->freeOrShrinkBlock(block);
- else
- m_markedSpace->freeBlock(block);
-}
-
-inline Free::ReturnType Free::returnValue()
-{
- return m_blocks.head();
-}
+struct FreeOrShrink : MarkedBlock::VoidFunctor {
+ FreeOrShrink(MarkedSpace& space) : m_markedSpace(space) { }
+ void operator()(MarkedBlock* block) { m_markedSpace.freeOrShrinkBlock(block); }
+private:
+ MarkedSpace& m_markedSpace;
+};
struct VisitWeakSet : MarkedBlock::VoidFunctor {
VisitWeakSet(HeapRootVisitor& heapRootVisitor) : m_heapRootVisitor(heapRootVisitor) { }
@@ -79,38 +54,38 @@ struct ReapWeakSet : MarkedBlock::VoidFunctor {
MarkedSpace::MarkedSpace(Heap* heap)
: m_heap(heap)
+ , m_capacity(0)
+ , m_isIterating(false)
{
for (size_t cellSize = preciseStep; cellSize <= preciseCutoff; cellSize += preciseStep) {
- allocatorFor(cellSize).init(heap, this, cellSize, MarkedBlock::None);
- normalDestructorAllocatorFor(cellSize).init(heap, this, cellSize, MarkedBlock::Normal);
- immortalStructureDestructorAllocatorFor(cellSize).init(heap, this, cellSize, MarkedBlock::ImmortalStructure);
+ allocatorFor(cellSize).init(heap, this, cellSize, false);
+ destructorAllocatorFor(cellSize).init(heap, this, cellSize, true);
}
- for (size_t cellSize = impreciseStep; cellSize <= impreciseCutoff; cellSize += impreciseStep) {
- allocatorFor(cellSize).init(heap, this, cellSize, MarkedBlock::None);
- normalDestructorAllocatorFor(cellSize).init(heap, this, cellSize, MarkedBlock::Normal);
- immortalStructureDestructorAllocatorFor(cellSize).init(heap, this, cellSize, MarkedBlock::ImmortalStructure);
+ for (size_t cellSize = impreciseStart; cellSize <= impreciseCutoff; cellSize += impreciseStep) {
+ allocatorFor(cellSize).init(heap, this, cellSize, false);
+ destructorAllocatorFor(cellSize).init(heap, this, cellSize, true);
}
- m_normalSpace.largeAllocator.init(heap, this, 0, MarkedBlock::None);
- m_normalDestructorSpace.largeAllocator.init(heap, this, 0, MarkedBlock::Normal);
- m_immortalStructureDestructorSpace.largeAllocator.init(heap, this, 0, MarkedBlock::ImmortalStructure);
+ m_normalSpace.largeAllocator.init(heap, this, 0, false);
+ m_destructorSpace.largeAllocator.init(heap, this, 0, true);
}
MarkedSpace::~MarkedSpace()
{
- Free free(Free::FreeAll, this);
+ Free free(*this);
forEachBlock(free);
+ ASSERT(!m_blocks.set().size());
}
-struct LastChanceToFinalize : MarkedBlock::VoidFunctor {
- void operator()(MarkedBlock* block) { block->lastChanceToFinalize(); }
+struct LastChanceToFinalize {
+ void operator()(MarkedAllocator& allocator) { allocator.lastChanceToFinalize(); }
};
void MarkedSpace::lastChanceToFinalize()
{
- canonicalizeCellLivenessData();
- forEachBlock<LastChanceToFinalize>();
+ stopAllocating();
+ forEachAllocator<LastChanceToFinalize>();
}
void MarkedSpace::sweep()
@@ -119,74 +94,111 @@ void MarkedSpace::sweep()
forEachBlock<Sweep>();
}
+void MarkedSpace::zombifySweep()
+{
+ if (Options::logGC())
+ dataLog("Zombifying sweep...");
+ m_heap->sweeper()->willFinishSweeping();
+ forEachBlock<ZombifySweep>();
+}
+
void MarkedSpace::resetAllocators()
{
for (size_t cellSize = preciseStep; cellSize <= preciseCutoff; cellSize += preciseStep) {
allocatorFor(cellSize).reset();
- normalDestructorAllocatorFor(cellSize).reset();
- immortalStructureDestructorAllocatorFor(cellSize).reset();
+ destructorAllocatorFor(cellSize).reset();
}
- for (size_t cellSize = impreciseStep; cellSize <= impreciseCutoff; cellSize += impreciseStep) {
+ for (size_t cellSize = impreciseStart; cellSize <= impreciseCutoff; cellSize += impreciseStep) {
allocatorFor(cellSize).reset();
- normalDestructorAllocatorFor(cellSize).reset();
- immortalStructureDestructorAllocatorFor(cellSize).reset();
+ destructorAllocatorFor(cellSize).reset();
}
m_normalSpace.largeAllocator.reset();
- m_normalDestructorSpace.largeAllocator.reset();
- m_immortalStructureDestructorSpace.largeAllocator.reset();
+ m_destructorSpace.largeAllocator.reset();
+
+ m_blocksWithNewObjects.clear();
}
void MarkedSpace::visitWeakSets(HeapRootVisitor& heapRootVisitor)
{
VisitWeakSet visitWeakSet(heapRootVisitor);
- forEachBlock(visitWeakSet);
+ if (m_heap->operationInProgress() == EdenCollection) {
+ for (unsigned i = 0; i < m_blocksWithNewObjects.size(); ++i)
+ visitWeakSet(m_blocksWithNewObjects[i]);
+ } else
+ forEachBlock(visitWeakSet);
}
void MarkedSpace::reapWeakSets()
{
- forEachBlock<ReapWeakSet>();
+ if (m_heap->operationInProgress() == EdenCollection) {
+ for (unsigned i = 0; i < m_blocksWithNewObjects.size(); ++i)
+ m_blocksWithNewObjects[i]->reapWeakSet();
+ } else
+ forEachBlock<ReapWeakSet>();
+}
+
+template <typename Functor>
+void MarkedSpace::forEachAllocator()
+{
+ Functor functor;
+ forEachAllocator(functor);
}
-void MarkedSpace::canonicalizeCellLivenessData()
+template <typename Functor>
+void MarkedSpace::forEachAllocator(Functor& functor)
{
for (size_t cellSize = preciseStep; cellSize <= preciseCutoff; cellSize += preciseStep) {
- allocatorFor(cellSize).canonicalizeCellLivenessData();
- normalDestructorAllocatorFor(cellSize).canonicalizeCellLivenessData();
- immortalStructureDestructorAllocatorFor(cellSize).canonicalizeCellLivenessData();
+ functor(allocatorFor(cellSize));
+ functor(destructorAllocatorFor(cellSize));
}
- for (size_t cellSize = impreciseStep; cellSize <= impreciseCutoff; cellSize += impreciseStep) {
- allocatorFor(cellSize).canonicalizeCellLivenessData();
- normalDestructorAllocatorFor(cellSize).canonicalizeCellLivenessData();
- immortalStructureDestructorAllocatorFor(cellSize).canonicalizeCellLivenessData();
+ for (size_t cellSize = impreciseStart; cellSize <= impreciseCutoff; cellSize += impreciseStep) {
+ functor(allocatorFor(cellSize));
+ functor(destructorAllocatorFor(cellSize));
}
- m_normalSpace.largeAllocator.canonicalizeCellLivenessData();
- m_normalDestructorSpace.largeAllocator.canonicalizeCellLivenessData();
- m_immortalStructureDestructorSpace.largeAllocator.canonicalizeCellLivenessData();
+ functor(m_normalSpace.largeAllocator);
+ functor(m_destructorSpace.largeAllocator);
+}
+
+struct StopAllocatingFunctor {
+ void operator()(MarkedAllocator& allocator) { allocator.stopAllocating(); }
+};
+
+void MarkedSpace::stopAllocating()
+{
+ ASSERT(!isIterating());
+ forEachAllocator<StopAllocatingFunctor>();
+}
+
+struct ResumeAllocatingFunctor {
+ void operator()(MarkedAllocator& allocator) { allocator.resumeAllocating(); }
+};
+
+void MarkedSpace::resumeAllocating()
+{
+ ASSERT(isIterating());
+ forEachAllocator<ResumeAllocatingFunctor>();
}
bool MarkedSpace::isPagedOut(double deadline)
{
for (size_t cellSize = preciseStep; cellSize <= preciseCutoff; cellSize += preciseStep) {
if (allocatorFor(cellSize).isPagedOut(deadline)
- || normalDestructorAllocatorFor(cellSize).isPagedOut(deadline)
- || immortalStructureDestructorAllocatorFor(cellSize).isPagedOut(deadline))
+ || destructorAllocatorFor(cellSize).isPagedOut(deadline))
return true;
}
- for (size_t cellSize = impreciseStep; cellSize <= impreciseCutoff; cellSize += impreciseStep) {
+ for (size_t cellSize = impreciseStart; cellSize <= impreciseCutoff; cellSize += impreciseStep) {
if (allocatorFor(cellSize).isPagedOut(deadline)
- || normalDestructorAllocatorFor(cellSize).isPagedOut(deadline)
- || immortalStructureDestructorAllocatorFor(cellSize).isPagedOut(deadline))
+ || destructorAllocatorFor(cellSize).isPagedOut(deadline))
return true;
}
if (m_normalSpace.largeAllocator.isPagedOut(deadline)
- || m_normalDestructorSpace.largeAllocator.isPagedOut(deadline)
- || m_immortalStructureDestructorSpace.largeAllocator.isPagedOut(deadline))
+ || m_destructorSpace.largeAllocator.isPagedOut(deadline))
return true;
return false;
@@ -195,12 +207,9 @@ bool MarkedSpace::isPagedOut(double deadline)
void MarkedSpace::freeBlock(MarkedBlock* block)
{
block->allocator()->removeBlock(block);
+ m_capacity -= block->capacity();
m_blocks.remove(block);
- if (block->capacity() == MarkedBlock::blockSize) {
- m_heap->blockAllocator().deallocate(MarkedBlock::destroy(block));
- return;
- }
- m_heap->blockAllocator().deallocateCustomSize(MarkedBlock::destroy(block));
+ MarkedBlock::destroy(*m_heap, block);
}
void MarkedSpace::freeOrShrinkBlock(MarkedBlock* block)
@@ -213,14 +222,95 @@ void MarkedSpace::freeOrShrinkBlock(MarkedBlock* block)
freeBlock(block);
}
-struct Shrink : MarkedBlock::VoidFunctor {
- void operator()(MarkedBlock* block) { block->shrink(); }
-};
-
void MarkedSpace::shrink()
{
- Free freeOrShrink(Free::FreeOrShrink, this);
+ FreeOrShrink freeOrShrink(*this);
forEachBlock(freeOrShrink);
}
+static void clearNewlyAllocatedInBlock(MarkedBlock* block)
+{
+ if (!block)
+ return;
+ block->clearNewlyAllocated();
+}
+
+struct ClearNewlyAllocated : MarkedBlock::VoidFunctor {
+ void operator()(MarkedBlock* block) { block->clearNewlyAllocated(); }
+};
+
+#ifndef NDEBUG
+struct VerifyNewlyAllocated : MarkedBlock::VoidFunctor {
+ void operator()(MarkedBlock* block) { ASSERT(!block->clearNewlyAllocated()); }
+};
+#endif
+
+void MarkedSpace::clearNewlyAllocated()
+{
+ for (size_t i = 0; i < preciseCount; ++i) {
+ clearNewlyAllocatedInBlock(m_normalSpace.preciseAllocators[i].takeLastActiveBlock());
+ clearNewlyAllocatedInBlock(m_destructorSpace.preciseAllocators[i].takeLastActiveBlock());
+ }
+
+ for (size_t i = 0; i < impreciseCount; ++i) {
+ clearNewlyAllocatedInBlock(m_normalSpace.impreciseAllocators[i].takeLastActiveBlock());
+ clearNewlyAllocatedInBlock(m_destructorSpace.impreciseAllocators[i].takeLastActiveBlock());
+ }
+
+ // We have to iterate all of the blocks in the large allocators because they are
+ // canonicalized as they are used up (see MarkedAllocator::tryAllocateHelper)
+ // which creates the m_newlyAllocated bitmap.
+ ClearNewlyAllocated functor;
+ m_normalSpace.largeAllocator.forEachBlock(functor);
+ m_destructorSpace.largeAllocator.forEachBlock(functor);
+
+#ifndef NDEBUG
+ VerifyNewlyAllocated verifyFunctor;
+ forEachBlock(verifyFunctor);
+#endif
+}
+
+#ifndef NDEBUG
+struct VerifyMarkedOrRetired : MarkedBlock::VoidFunctor {
+ void operator()(MarkedBlock* block)
+ {
+ switch (block->m_state) {
+ case MarkedBlock::Marked:
+ case MarkedBlock::Retired:
+ return;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ }
+};
+#endif
+
+void MarkedSpace::clearMarks()
+{
+ if (m_heap->operationInProgress() == EdenCollection) {
+ for (unsigned i = 0; i < m_blocksWithNewObjects.size(); ++i)
+ m_blocksWithNewObjects[i]->clearMarks();
+ } else
+ forEachBlock<ClearMarks>();
+
+#ifndef NDEBUG
+ VerifyMarkedOrRetired verifyFunctor;
+ forEachBlock(verifyFunctor);
+#endif
+}
+
+void MarkedSpace::willStartIterating()
+{
+ ASSERT(!isIterating());
+ stopAllocating();
+ m_isIterating = true;
+}
+
+void MarkedSpace::didFinishIterating()
+{
+ ASSERT(isIterating());
+ resumeAllocating();
+ m_isIterating = false;
+}
+
} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/MarkedSpace.h b/Source/JavaScriptCore/heap/MarkedSpace.h
index 278f1cc98..c1571e7ac 100644
--- a/Source/JavaScriptCore/heap/MarkedSpace.h
+++ b/Source/JavaScriptCore/heap/MarkedSpace.h
@@ -22,35 +22,40 @@
#ifndef MarkedSpace_h
#define MarkedSpace_h
-#include "MachineStackMarker.h"
#include "MarkedAllocator.h"
#include "MarkedBlock.h"
#include "MarkedBlockSet.h"
-#include <wtf/PageAllocationAligned.h>
-#include <wtf/Bitmap.h>
-#include <wtf/DoublyLinkedList.h>
-#include <wtf/FixedArray.h>
+#include <array>
#include <wtf/HashSet.h>
#include <wtf/Noncopyable.h>
+#include <wtf/RetainPtr.h>
#include <wtf/Vector.h>
namespace JSC {
class Heap;
-class JSCell;
-class LiveObjectIterator;
+class HeapIterationScope;
class LLIntOffsetsExtractor;
-class WeakGCHandle;
-class SlotVisitor;
struct ClearMarks : MarkedBlock::VoidFunctor {
- void operator()(MarkedBlock* block) { block->clearMarks(); }
+ void operator()(MarkedBlock* block)
+ {
+ block->clearMarks();
+ }
};
struct Sweep : MarkedBlock::VoidFunctor {
void operator()(MarkedBlock* block) { block->sweep(); }
};
+struct ZombifySweep : MarkedBlock::VoidFunctor {
+ void operator()(MarkedBlock* block)
+ {
+ if (block->needsSweeping())
+ block->sweep();
+ }
+};
+
struct MarkCount : MarkedBlock::CountFunctor {
void operator()(MarkedBlock* block) { count(block->markCount()); }
};
@@ -59,115 +64,130 @@ struct Size : MarkedBlock::CountFunctor {
void operator()(MarkedBlock* block) { count(block->markCount() * block->cellSize()); }
};
-struct Capacity : MarkedBlock::CountFunctor {
- void operator()(MarkedBlock* block) { count(block->capacity()); }
-};
-
class MarkedSpace {
WTF_MAKE_NONCOPYABLE(MarkedSpace);
public:
+ // [ 16 ... 768 ]
+ static const size_t preciseStep = MarkedBlock::atomSize;
+ static const size_t preciseCutoff = 768;
+ static const size_t preciseCount = preciseCutoff / preciseStep;
+
+ // [ 1024 ... blockSize/2 ]
+ static const size_t impreciseStart = 1024;
+ static const size_t impreciseStep = 256;
+ static const size_t impreciseCutoff = MarkedBlock::blockSize / 2;
+ static const size_t impreciseCount = impreciseCutoff / impreciseStep;
+
+ struct Subspace {
+ std::array<MarkedAllocator, preciseCount> preciseAllocators;
+ std::array<MarkedAllocator, impreciseCount> impreciseAllocators;
+ MarkedAllocator largeAllocator;
+ };
+
MarkedSpace(Heap*);
~MarkedSpace();
void lastChanceToFinalize();
- MarkedAllocator& firstAllocator();
MarkedAllocator& allocatorFor(size_t);
- MarkedAllocator& immortalStructureDestructorAllocatorFor(size_t);
- MarkedAllocator& normalDestructorAllocatorFor(size_t);
- void* allocateWithNormalDestructor(size_t);
- void* allocateWithImmortalStructureDestructor(size_t);
+ MarkedAllocator& destructorAllocatorFor(size_t);
+ void* allocateWithDestructor(size_t);
void* allocateWithoutDestructor(size_t);
-
+
+ Subspace& subspaceForObjectsWithDestructor() { return m_destructorSpace; }
+ Subspace& subspaceForObjectsWithoutDestructor() { return m_normalSpace; }
+
void resetAllocators();
void visitWeakSets(HeapRootVisitor&);
void reapWeakSets();
MarkedBlockSet& blocks() { return m_blocks; }
-
- void canonicalizeCellLivenessData();
+
+ void willStartIterating();
+ bool isIterating() const { return m_isIterating; }
+ void didFinishIterating();
+
+ void stopAllocating();
+ void resumeAllocating(); // If we just stopped allocation but we didn't do a collection, we need to resume allocation.
typedef HashSet<MarkedBlock*>::iterator BlockIterator;
-
- template<typename Functor> typename Functor::ReturnType forEachLiveCell(Functor&);
- template<typename Functor> typename Functor::ReturnType forEachLiveCell();
- template<typename Functor> typename Functor::ReturnType forEachDeadCell(Functor&);
- template<typename Functor> typename Functor::ReturnType forEachDeadCell();
+
+ template<typename Functor> typename Functor::ReturnType forEachLiveCell(HeapIterationScope&, Functor&);
+ template<typename Functor> typename Functor::ReturnType forEachLiveCell(HeapIterationScope&);
+ template<typename Functor> typename Functor::ReturnType forEachDeadCell(HeapIterationScope&, Functor&);
+ template<typename Functor> typename Functor::ReturnType forEachDeadCell(HeapIterationScope&);
template<typename Functor> typename Functor::ReturnType forEachBlock(Functor&);
template<typename Functor> typename Functor::ReturnType forEachBlock();
-
+
void shrink();
void freeBlock(MarkedBlock*);
void freeOrShrinkBlock(MarkedBlock*);
void didAddBlock(MarkedBlock*);
void didConsumeFreeList(MarkedBlock*);
+ void didAllocateInBlock(MarkedBlock*);
void clearMarks();
+ void clearNewlyAllocated();
void sweep();
+ void zombifySweep();
size_t objectCount();
size_t size();
size_t capacity();
bool isPagedOut(double deadline);
+ const Vector<MarkedBlock*>& blocksWithNewObjects() const { return m_blocksWithNewObjects; }
+
private:
friend class LLIntOffsetsExtractor;
+ friend class JIT;
- // [ 32... 128 ]
- static const size_t preciseStep = MarkedBlock::atomSize;
- static const size_t preciseCutoff = 128;
- static const size_t preciseCount = preciseCutoff / preciseStep;
-
- // [ 1024... blockSize ]
- static const size_t impreciseStep = 2 * preciseCutoff;
- static const size_t impreciseCutoff = MarkedBlock::blockSize / 2;
- static const size_t impreciseCount = impreciseCutoff / impreciseStep;
-
- struct Subspace {
- FixedArray<MarkedAllocator, preciseCount> preciseAllocators;
- FixedArray<MarkedAllocator, impreciseCount> impreciseAllocators;
- MarkedAllocator largeAllocator;
- };
+ template<typename Functor> void forEachAllocator(Functor&);
+ template<typename Functor> void forEachAllocator();
- Subspace m_normalDestructorSpace;
- Subspace m_immortalStructureDestructorSpace;
+ Subspace m_destructorSpace;
Subspace m_normalSpace;
Heap* m_heap;
+ size_t m_capacity;
+ bool m_isIterating;
MarkedBlockSet m_blocks;
+ Vector<MarkedBlock*> m_blocksWithNewObjects;
};
-template<typename Functor> inline typename Functor::ReturnType MarkedSpace::forEachLiveCell(Functor& functor)
+template<typename Functor> inline typename Functor::ReturnType MarkedSpace::forEachLiveCell(HeapIterationScope&, Functor& functor)
{
- canonicalizeCellLivenessData();
-
+ ASSERT(isIterating());
BlockIterator end = m_blocks.set().end();
- for (BlockIterator it = m_blocks.set().begin(); it != end; ++it)
- (*it)->forEachLiveCell(functor);
+ for (BlockIterator it = m_blocks.set().begin(); it != end; ++it) {
+ if ((*it)->forEachLiveCell(functor) == IterationStatus::Done)
+ break;
+ }
return functor.returnValue();
}
-template<typename Functor> inline typename Functor::ReturnType MarkedSpace::forEachLiveCell()
+template<typename Functor> inline typename Functor::ReturnType MarkedSpace::forEachLiveCell(HeapIterationScope& scope)
{
Functor functor;
- return forEachLiveCell(functor);
+ return forEachLiveCell(scope, functor);
}
-template<typename Functor> inline typename Functor::ReturnType MarkedSpace::forEachDeadCell(Functor& functor)
+template<typename Functor> inline typename Functor::ReturnType MarkedSpace::forEachDeadCell(HeapIterationScope&, Functor& functor)
{
- canonicalizeCellLivenessData();
-
+ ASSERT(isIterating());
BlockIterator end = m_blocks.set().end();
- for (BlockIterator it = m_blocks.set().begin(); it != end; ++it)
- (*it)->forEachDeadCell(functor);
+ for (BlockIterator it = m_blocks.set().begin(); it != end; ++it) {
+ if ((*it)->forEachDeadCell(functor) == IterationStatus::Done)
+ break;
+ }
return functor.returnValue();
}
-template<typename Functor> inline typename Functor::ReturnType MarkedSpace::forEachDeadCell()
+template<typename Functor> inline typename Functor::ReturnType MarkedSpace::forEachDeadCell(HeapIterationScope& scope)
{
Functor functor;
- return forEachDeadCell(functor);
+ return forEachDeadCell(scope, functor);
}
inline MarkedAllocator& MarkedSpace::allocatorFor(size_t bytes)
@@ -180,24 +200,14 @@ inline MarkedAllocator& MarkedSpace::allocatorFor(size_t bytes)
return m_normalSpace.largeAllocator;
}
-inline MarkedAllocator& MarkedSpace::immortalStructureDestructorAllocatorFor(size_t bytes)
+inline MarkedAllocator& MarkedSpace::destructorAllocatorFor(size_t bytes)
{
ASSERT(bytes);
if (bytes <= preciseCutoff)
- return m_immortalStructureDestructorSpace.preciseAllocators[(bytes - 1) / preciseStep];
+ return m_destructorSpace.preciseAllocators[(bytes - 1) / preciseStep];
if (bytes <= impreciseCutoff)
- return m_immortalStructureDestructorSpace.impreciseAllocators[(bytes - 1) / impreciseStep];
- return m_immortalStructureDestructorSpace.largeAllocator;
-}
-
-inline MarkedAllocator& MarkedSpace::normalDestructorAllocatorFor(size_t bytes)
-{
- ASSERT(bytes);
- if (bytes <= preciseCutoff)
- return m_normalDestructorSpace.preciseAllocators[(bytes - 1) / preciseStep];
- if (bytes <= impreciseCutoff)
- return m_normalDestructorSpace.impreciseAllocators[(bytes - 1) / impreciseStep];
- return m_normalDestructorSpace.largeAllocator;
+ return m_destructorSpace.impreciseAllocators[(bytes - 1) / impreciseStep];
+ return m_destructorSpace.largeAllocator;
}
inline void* MarkedSpace::allocateWithoutDestructor(size_t bytes)
@@ -205,33 +215,24 @@ inline void* MarkedSpace::allocateWithoutDestructor(size_t bytes)
return allocatorFor(bytes).allocate(bytes);
}
-inline void* MarkedSpace::allocateWithImmortalStructureDestructor(size_t bytes)
-{
- return immortalStructureDestructorAllocatorFor(bytes).allocate(bytes);
-}
-
-inline void* MarkedSpace::allocateWithNormalDestructor(size_t bytes)
+inline void* MarkedSpace::allocateWithDestructor(size_t bytes)
{
- return normalDestructorAllocatorFor(bytes).allocate(bytes);
+ return destructorAllocatorFor(bytes).allocate(bytes);
}
template <typename Functor> inline typename Functor::ReturnType MarkedSpace::forEachBlock(Functor& functor)
{
- for (size_t i = 0; i < preciseCount; ++i) {
+ for (size_t i = 0; i < preciseCount; ++i)
m_normalSpace.preciseAllocators[i].forEachBlock(functor);
- m_normalDestructorSpace.preciseAllocators[i].forEachBlock(functor);
- m_immortalStructureDestructorSpace.preciseAllocators[i].forEachBlock(functor);
- }
-
- for (size_t i = 0; i < impreciseCount; ++i) {
+ for (size_t i = 0; i < impreciseCount; ++i)
m_normalSpace.impreciseAllocators[i].forEachBlock(functor);
- m_normalDestructorSpace.impreciseAllocators[i].forEachBlock(functor);
- m_immortalStructureDestructorSpace.impreciseAllocators[i].forEachBlock(functor);
- }
-
m_normalSpace.largeAllocator.forEachBlock(functor);
- m_normalDestructorSpace.largeAllocator.forEachBlock(functor);
- m_immortalStructureDestructorSpace.largeAllocator.forEachBlock(functor);
+
+ for (size_t i = 0; i < preciseCount; ++i)
+ m_destructorSpace.preciseAllocators[i].forEachBlock(functor);
+ for (size_t i = 0; i < impreciseCount; ++i)
+ m_destructorSpace.impreciseAllocators[i].forEachBlock(functor);
+ m_destructorSpace.largeAllocator.forEachBlock(functor);
return functor.returnValue();
}
@@ -244,12 +245,13 @@ template <typename Functor> inline typename Functor::ReturnType MarkedSpace::for
inline void MarkedSpace::didAddBlock(MarkedBlock* block)
{
+ m_capacity += block->capacity();
m_blocks.add(block);
}
-inline void MarkedSpace::clearMarks()
+inline void MarkedSpace::didAllocateInBlock(MarkedBlock* block)
{
- forEachBlock<ClearMarks>();
+ m_blocksWithNewObjects.append(block);
}
inline size_t MarkedSpace::objectCount()
@@ -264,7 +266,7 @@ inline size_t MarkedSpace::size()
inline size_t MarkedSpace::capacity()
{
- return forEachBlock<Capacity>();
+ return m_capacity;
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/OpaqueRootSet.h b/Source/JavaScriptCore/heap/OpaqueRootSet.h
new file mode 100644
index 000000000..a08bdec04
--- /dev/null
+++ b/Source/JavaScriptCore/heap/OpaqueRootSet.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef OpaqueRootSet_h
+#define OpaqueRootSet_h
+
+#include <wtf/HashSet.h>
+
+namespace JSC {
+
+class OpaqueRootSet {
+ WTF_MAKE_NONCOPYABLE(OpaqueRootSet);
+public:
+ OpaqueRootSet()
+ : m_lastQueriedRoot(nullptr)
+ , m_containsLastQueriedRoot(false)
+ {
+ }
+
+ bool contains(void* root) const
+ {
+ if (root != m_lastQueriedRoot) {
+ m_lastQueriedRoot = root;
+ m_containsLastQueriedRoot = m_roots.contains(root);
+ }
+ return m_containsLastQueriedRoot;
+ }
+
+ bool isEmpty() const
+ {
+ return m_roots.isEmpty();
+ }
+
+ void clear()
+ {
+ m_roots.clear();
+ m_lastQueriedRoot = nullptr;
+ m_containsLastQueriedRoot = false;
+ }
+
+ void add(void* root)
+ {
+ if (root == m_lastQueriedRoot)
+ m_containsLastQueriedRoot = true;
+ m_roots.add(root);
+ }
+
+ int size() const
+ {
+ return m_roots.size();
+ }
+
+ HashSet<void*>::const_iterator begin() const
+ {
+ return m_roots.begin();
+ }
+
+ HashSet<void*>::const_iterator end() const
+ {
+ return m_roots.end();
+ }
+
+
+private:
+ HashSet<void*> m_roots;
+ mutable void* m_lastQueriedRoot;
+ mutable bool m_containsLastQueriedRoot;
+};
+
+} // namespace JSC
+
+#endif // OpaqueRootSet_h
diff --git a/Source/JavaScriptCore/heap/PassWeak.h b/Source/JavaScriptCore/heap/PassWeak.h
deleted file mode 100644
index 506a63970..000000000
--- a/Source/JavaScriptCore/heap/PassWeak.h
+++ /dev/null
@@ -1,203 +0,0 @@
-/*
- * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef PassWeak_h
-#define PassWeak_h
-
-#include "JSCell.h"
-#include "WeakSetInlines.h"
-#include <wtf/Assertions.h>
-#include <wtf/NullPtr.h>
-#include <wtf/TypeTraits.h>
-
-namespace JSC {
-
-template<typename T> class PassWeak;
-template<typename T> PassWeak<T> adoptWeak(WeakImpl*);
-
-template<typename T> class PassWeak {
-public:
- PassWeak();
- PassWeak(std::nullptr_t);
- PassWeak(T*, WeakHandleOwner* = 0, void* context = 0);
-
- // It somewhat breaks the type system to allow transfer of ownership out of
- // a const PassWeak. However, it makes it much easier to work with PassWeak
- // temporaries, and we don't have a need to use real const PassWeaks anyway.
- PassWeak(const PassWeak&);
- template<typename U> PassWeak(const PassWeak<U>&);
-
- ~PassWeak();
-
- T* operator->() const;
- T& operator*() const;
- T* get() const;
-
- bool operator!() const;
-
- // This conversion operator allows implicit conversion to bool but not to other integer types.
- typedef void* (PassWeak::*UnspecifiedBoolType);
- operator UnspecifiedBoolType*() const;
-
- WeakImpl* leakImpl() const WARN_UNUSED_RETURN;
-
-private:
- friend PassWeak adoptWeak<T>(WeakImpl*);
- explicit PassWeak(WeakImpl*);
-
- WeakImpl* m_impl;
-};
-
-template<typename T> inline PassWeak<T>::PassWeak()
- : m_impl(0)
-{
-}
-
-template<typename T> inline PassWeak<T>::PassWeak(std::nullptr_t)
- : m_impl(0)
-{
-}
-
-template<typename T> inline PassWeak<T>::PassWeak(T* cell, WeakHandleOwner* weakOwner, void* context)
- : m_impl(cell ? WeakSet::allocate(cell, weakOwner, context) : 0)
-{
-}
-
-template<typename T> inline PassWeak<T>::PassWeak(const PassWeak& o)
- : m_impl(o.leakImpl())
-{
-}
-
-template<typename T> template<typename U> inline PassWeak<T>::PassWeak(const PassWeak<U>& o)
- : m_impl(o.leakImpl())
-{
-}
-
-template<typename T> inline PassWeak<T>::~PassWeak()
-{
- if (!m_impl)
- return;
- WeakSet::deallocate(m_impl);
-}
-
-template<typename T> inline T* PassWeak<T>::operator->() const
-{
- ASSERT(m_impl && m_impl->state() == WeakImpl::Live);
- return jsCast<T*>(m_impl->jsValue().asCell());
-}
-
-template<typename T> inline T& PassWeak<T>::operator*() const
-{
- ASSERT(m_impl && m_impl->state() == WeakImpl::Live);
- return *jsCast<T*>(m_impl->jsValue().asCell());
-}
-
-template<typename T> inline T* PassWeak<T>::get() const
-{
- if (!m_impl || m_impl->state() != WeakImpl::Live)
- return 0;
- return jsCast<T*>(m_impl->jsValue().asCell());
-}
-
-template<typename T> inline bool PassWeak<T>::operator!() const
-{
- return !m_impl || m_impl->state() != WeakImpl::Live || !m_impl->jsValue();
-}
-
-template<typename T> inline PassWeak<T>::operator UnspecifiedBoolType*() const
-{
- return reinterpret_cast<UnspecifiedBoolType*>(!!*this);
-}
-
-template<typename T> inline PassWeak<T>::PassWeak(WeakImpl* impl)
-: m_impl(impl)
-{
-}
-
-template<typename T> inline WeakImpl* PassWeak<T>::leakImpl() const
-{
- WeakImpl* tmp = 0;
- std::swap(tmp, const_cast<WeakImpl*&>(m_impl));
- return tmp;
-}
-
-template<typename T> PassWeak<T> inline adoptWeak(WeakImpl* impl)
-{
- return PassWeak<T>(impl);
-}
-
-template<typename T, typename U> inline bool operator==(const PassWeak<T>& a, const PassWeak<U>& b)
-{
- return a.get() == b.get();
-}
-
-template<typename T, typename U> inline bool operator==(const PassWeak<T>& a, const Weak<U>& b)
-{
- return a.get() == b.get();
-}
-
-template<typename T, typename U> inline bool operator==(const Weak<T>& a, const PassWeak<U>& b)
-{
- return a.get() == b.get();
-}
-
-template<typename T, typename U> inline bool operator==(const PassWeak<T>& a, U* b)
-{
- return a.get() == b;
-}
-
-template<typename T, typename U> inline bool operator==(T* a, const PassWeak<U>& b)
-{
- return a == b.get();
-}
-
-template<typename T, typename U> inline bool operator!=(const PassWeak<T>& a, const PassWeak<U>& b)
-{
- return a.get() != b.get();
-}
-
-template<typename T, typename U> inline bool operator!=(const PassWeak<T>& a, const Weak<U>& b)
-{
- return a.get() != b.get();
-}
-
-template<typename T, typename U> inline bool operator!=(const Weak<T>& a, const PassWeak<U>& b)
-{
- return a.get() != b.get();
-}
-
-template<typename T, typename U> inline bool operator!=(const PassWeak<T>& a, U* b)
-{
- return a.get() != b;
-}
-
-template<typename T, typename U> inline bool operator!=(T* a, const PassWeak<U>& b)
-{
- return a != b.get();
-}
-
-} // namespace JSC
-
-#endif // PassWeak_h
diff --git a/Source/JavaScriptCore/heap/Region.h b/Source/JavaScriptCore/heap/Region.h
deleted file mode 100644
index 366f25643..000000000
--- a/Source/JavaScriptCore/heap/Region.h
+++ /dev/null
@@ -1,319 +0,0 @@
-/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef JSC_Region_h
-#define JSC_Region_h
-
-#include "HeapBlock.h"
-#include "SuperRegion.h"
-#include <wtf/DoublyLinkedList.h>
-#include <wtf/MetaAllocatorHandle.h>
-#include <wtf/PageAllocationAligned.h>
-
-#define HEAP_MEMORY_ID reinterpret_cast<void*>(static_cast<intptr_t>(-3))
-
-#define ENABLE_SUPER_REGION 0
-
-#ifndef ENABLE_SUPER_REGION
-#if USE(JSVALUE64)
-#define ENABLE_SUPER_REGION 1
-#else
-#define ENABLE_SUPER_REGION 0
-#endif
-#endif
-
-namespace JSC {
-
-class DeadBlock : public HeapBlock<DeadBlock> {
-public:
- DeadBlock(Region*);
-};
-
-inline DeadBlock::DeadBlock(Region* region)
- : HeapBlock<DeadBlock>(region)
-{
-}
-
-class Region : public DoublyLinkedListNode<Region> {
- friend CLASS_IF_GCC DoublyLinkedListNode<Region>;
- friend class BlockAllocator;
-public:
- ~Region();
- static Region* create(SuperRegion*, size_t blockSize);
- static Region* createCustomSize(SuperRegion*, size_t blockSize, size_t blockAlignment);
- Region* reset(size_t blockSize);
- void destroy();
-
- size_t blockSize() const { return m_blockSize; }
- bool isFull() const { return m_blocksInUse == m_totalBlocks; }
- bool isEmpty() const { return !m_blocksInUse; }
- bool isCustomSize() const { return m_isCustomSize; }
-
- DeadBlock* allocate();
- void deallocate(void*);
-
- static const size_t s_regionSize = 64 * KB;
- static const size_t s_regionMask = ~(s_regionSize - 1);
-
-protected:
- Region(size_t blockSize, size_t totalBlocks, bool isExcess);
- void initializeBlockList();
-
- bool m_isExcess;
-
-private:
- void* base();
- size_t size();
-
- size_t m_totalBlocks;
- size_t m_blocksInUse;
- size_t m_blockSize;
- bool m_isCustomSize;
- Region* m_prev;
- Region* m_next;
- DoublyLinkedList<DeadBlock> m_deadBlocks;
-};
-
-
-class NormalRegion : public Region {
- friend class Region;
-private:
- NormalRegion(PassRefPtr<WTF::MetaAllocatorHandle>, size_t blockSize, size_t totalBlocks);
-
- static NormalRegion* tryCreate(SuperRegion*, size_t blockSize);
- static NormalRegion* tryCreateCustomSize(SuperRegion*, size_t blockSize, size_t blockAlignment);
-
- void* base() { return m_allocation->start(); }
- size_t size() { return m_allocation->sizeInBytes(); }
-
- NormalRegion* reset(size_t blockSize);
-
- RefPtr<WTF::MetaAllocatorHandle> m_allocation;
-};
-
-class ExcessRegion : public Region {
- friend class Region;
-private:
- ExcessRegion(PageAllocationAligned&, size_t blockSize, size_t totalBlocks);
-
- ~ExcessRegion();
-
- static ExcessRegion* create(size_t blockSize);
- static ExcessRegion* createCustomSize(size_t blockSize, size_t blockAlignment);
-
- void* base() { return m_allocation.base(); }
- size_t size() { return m_allocation.size(); }
-
- ExcessRegion* reset(size_t blockSize);
-
- PageAllocationAligned m_allocation;
-};
-
-inline NormalRegion::NormalRegion(PassRefPtr<WTF::MetaAllocatorHandle> allocation, size_t blockSize, size_t totalBlocks)
- : Region(blockSize, totalBlocks, false)
- , m_allocation(allocation)
-{
- initializeBlockList();
-}
-
-inline NormalRegion* NormalRegion::tryCreate(SuperRegion* superRegion, size_t blockSize)
-{
- RefPtr<WTF::MetaAllocatorHandle> allocation = superRegion->allocate(s_regionSize, HEAP_MEMORY_ID);
- if (!allocation)
- return 0;
- return new NormalRegion(allocation, blockSize, s_regionSize / blockSize);
-}
-
-inline NormalRegion* NormalRegion::tryCreateCustomSize(SuperRegion* superRegion, size_t blockSize, size_t blockAlignment)
-{
- ASSERT_UNUSED(blockAlignment, blockAlignment <= s_regionSize);
- RefPtr<WTF::MetaAllocatorHandle> allocation = superRegion->allocate(blockSize, HEAP_MEMORY_ID);
- if (!allocation)
- return 0;
- return new NormalRegion(allocation, blockSize, 1);
-}
-
-inline NormalRegion* NormalRegion::reset(size_t blockSize)
-{
- ASSERT(!m_isExcess);
- RefPtr<WTF::MetaAllocatorHandle> allocation = m_allocation.release();
- return new (NotNull, this) NormalRegion(allocation.release(), blockSize, s_regionSize / blockSize);
-}
-
-inline ExcessRegion::ExcessRegion(PageAllocationAligned& allocation, size_t blockSize, size_t totalBlocks)
- : Region(blockSize, totalBlocks, true)
- , m_allocation(allocation)
-{
- initializeBlockList();
-}
-
-inline ExcessRegion::~ExcessRegion()
-{
- m_allocation.deallocate();
-}
-
-inline ExcessRegion* ExcessRegion::create(size_t blockSize)
-{
- PageAllocationAligned allocation = PageAllocationAligned::allocate(s_regionSize, s_regionSize, OSAllocator::JSGCHeapPages);
- ASSERT(static_cast<bool>(allocation));
- return new ExcessRegion(allocation, blockSize, s_regionSize / blockSize);
-}
-
-inline ExcessRegion* ExcessRegion::createCustomSize(size_t blockSize, size_t blockAlignment)
-{
- PageAllocationAligned allocation = PageAllocationAligned::allocate(blockSize, blockAlignment, OSAllocator::JSGCHeapPages);
- ASSERT(static_cast<bool>(allocation));
- return new ExcessRegion(allocation, blockSize, 1);
-}
-
-inline ExcessRegion* ExcessRegion::reset(size_t blockSize)
-{
- ASSERT(m_isExcess);
- PageAllocationAligned allocation = m_allocation;
- return new (NotNull, this) ExcessRegion(allocation, blockSize, s_regionSize / blockSize);
-}
-
-inline Region::Region(size_t blockSize, size_t totalBlocks, bool isExcess)
- : DoublyLinkedListNode<Region>()
- , m_isExcess(isExcess)
- , m_totalBlocks(totalBlocks)
- , m_blocksInUse(0)
- , m_blockSize(blockSize)
- , m_isCustomSize(false)
- , m_prev(0)
- , m_next(0)
-{
-}
-
-inline void Region::initializeBlockList()
-{
- char* start = static_cast<char*>(base());
- char* current = start;
- for (size_t i = 0; i < m_totalBlocks; i++) {
- ASSERT(current < start + size());
- m_deadBlocks.append(new (NotNull, current) DeadBlock(this));
- current += m_blockSize;
- }
-}
-
-inline Region* Region::create(SuperRegion* superRegion, size_t blockSize)
-{
-#if ENABLE(SUPER_REGION)
- ASSERT(blockSize <= s_regionSize);
- ASSERT(!(s_regionSize % blockSize));
- Region* region = NormalRegion::tryCreate(superRegion, blockSize);
- if (LIKELY(!!region))
- return region;
-#else
- UNUSED_PARAM(superRegion);
-#endif
- return ExcessRegion::create(blockSize);
-}
-
-inline Region* Region::createCustomSize(SuperRegion* superRegion, size_t blockSize, size_t blockAlignment)
-{
-#if ENABLE(SUPER_REGION)
- Region* region = NormalRegion::tryCreateCustomSize(superRegion, blockSize, blockAlignment);
- if (UNLIKELY(!region))
- region = ExcessRegion::createCustomSize(blockSize, blockAlignment);
-#else
- UNUSED_PARAM(superRegion);
- Region* region = ExcessRegion::createCustomSize(blockSize, blockAlignment);
-#endif
- region->m_isCustomSize = true;
- return region;
-}
-
-inline Region::~Region()
-{
- ASSERT(isEmpty());
-}
-
-inline void Region::destroy()
-{
-#if ENABLE(SUPER_REGION)
- if (UNLIKELY(m_isExcess))
- delete static_cast<ExcessRegion*>(this);
- else
- delete static_cast<NormalRegion*>(this);
-#else
- delete static_cast<ExcessRegion*>(this);
-#endif
-}
-
-inline Region* Region::reset(size_t blockSize)
-{
-#if ENABLE(SUPER_REGION)
- ASSERT(isEmpty());
- if (UNLIKELY(m_isExcess))
- return static_cast<ExcessRegion*>(this)->reset(blockSize);
- return static_cast<NormalRegion*>(this)->reset(blockSize);
-#else
- return static_cast<ExcessRegion*>(this)->reset(blockSize);
-#endif
-}
-
-inline DeadBlock* Region::allocate()
-{
- ASSERT(!isFull());
- m_blocksInUse++;
- return m_deadBlocks.removeHead();
-}
-
-inline void Region::deallocate(void* base)
-{
- ASSERT(base);
- ASSERT(m_blocksInUse);
- ASSERT(base >= this->base() && base < static_cast<char*>(this->base()) + size());
- DeadBlock* block = new (NotNull, base) DeadBlock(this);
- m_deadBlocks.push(block);
- m_blocksInUse--;
-}
-
-inline void* Region::base()
-{
-#if ENABLE(SUPER_REGION)
- if (UNLIKELY(m_isExcess))
- return static_cast<ExcessRegion*>(this)->ExcessRegion::base();
- return static_cast<NormalRegion*>(this)->NormalRegion::base();
-#else
- return static_cast<ExcessRegion*>(this)->ExcessRegion::base();
-#endif
-}
-
-inline size_t Region::size()
-{
-#if ENABLE(SUPER_REGION)
- if (UNLIKELY(m_isExcess))
- return static_cast<ExcessRegion*>(this)->ExcessRegion::size();
- return static_cast<NormalRegion*>(this)->NormalRegion::size();
-#else
- return static_cast<ExcessRegion*>(this)->ExcessRegion::size();
-#endif
-}
-
-} // namespace JSC
-
-#endif // JSC_Region_h
diff --git a/Source/JavaScriptCore/heap/SlotVisitor.cpp b/Source/JavaScriptCore/heap/SlotVisitor.cpp
index 6c2ded09d..bb20aaea9 100644
--- a/Source/JavaScriptCore/heap/SlotVisitor.cpp
+++ b/Source/JavaScriptCore/heap/SlotVisitor.cpp
@@ -1,27 +1,83 @@
+/*
+ * Copyright (C) 2012, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
#include "config.h"
#include "SlotVisitor.h"
#include "SlotVisitorInlines.h"
#include "ConservativeRoots.h"
+#include "CopiedBlockInlines.h"
#include "CopiedSpace.h"
#include "CopiedSpaceInlines.h"
-#include "GCThread.h"
#include "JSArray.h"
#include "JSDestructibleObject.h"
#include "VM.h"
#include "JSObject.h"
#include "JSString.h"
-#include "Operations.h"
-#include <wtf/StackStats.h>
+#include "JSCInlines.h"
+#include <wtf/Lock.h>
namespace JSC {
-SlotVisitor::SlotVisitor(GCThreadSharedData& shared)
- : m_stack(shared.m_vm->heap.blockAllocator())
+#if ENABLE(GC_VALIDATION)
+static void validate(JSCell* cell)
+{
+ RELEASE_ASSERT(cell);
+
+ if (!cell->structure()) {
+ dataLogF("cell at %p has a null structure\n" , cell);
+ CRASH();
+ }
+
+ // Both the cell's structure, and the cell's structure's structure should be the Structure Structure.
+ // I hate this sentence.
+ if (cell->structure()->structure()->JSCell::classInfo() != cell->structure()->JSCell::classInfo()) {
+ const char* parentClassName = 0;
+ const char* ourClassName = 0;
+ if (cell->structure()->structure() && cell->structure()->structure()->JSCell::classInfo())
+ parentClassName = cell->structure()->structure()->JSCell::classInfo()->className;
+ if (cell->structure()->JSCell::classInfo())
+ ourClassName = cell->structure()->JSCell::classInfo()->className;
+ dataLogF("parent structure (%p <%s>) of cell at %p doesn't match cell's structure (%p <%s>)\n",
+ cell->structure()->structure(), parentClassName, cell, cell->structure(), ourClassName);
+ CRASH();
+ }
+
+ // Make sure we can walk the ClassInfo chain
+ const ClassInfo* info = cell->classInfo();
+ do { } while ((info = info->parentClass));
+}
+#endif
+
+SlotVisitor::SlotVisitor(Heap& heap)
+ : m_stack()
+ , m_bytesVisited(0)
+ , m_bytesCopied(0)
, m_visitCount(0)
, m_isInParallelMode(false)
- , m_shared(shared)
- , m_shouldHashCons(false)
+ , m_heap(heap)
#if !ASSERT_DISABLED
, m_isCheckingForDefaultMarkViolation(false)
, m_isDraining(false)
@@ -31,73 +87,103 @@ SlotVisitor::SlotVisitor(GCThreadSharedData& shared)
SlotVisitor::~SlotVisitor()
{
- ASSERT(m_stack.isEmpty());
+ clearMarkStack();
}
-void SlotVisitor::setup()
+void SlotVisitor::didStartMarking()
{
- m_shared.m_shouldHashCons = m_shared.m_vm->haveEnoughNewStringsToHashCons();
- m_shouldHashCons = m_shared.m_shouldHashCons;
-#if ENABLE(PARALLEL_GC)
- for (unsigned i = 0; i < m_shared.m_gcThreads.size(); ++i)
- m_shared.m_gcThreads[i]->slotVisitor()->m_shouldHashCons = m_shared.m_shouldHashCons;
-#endif
+ if (heap()->operationInProgress() == FullCollection)
+ ASSERT(m_opaqueRoots.isEmpty()); // Should have merged by now.
}
void SlotVisitor::reset()
{
+ m_bytesVisited = 0;
+ m_bytesCopied = 0;
m_visitCount = 0;
ASSERT(m_stack.isEmpty());
-#if ENABLE(PARALLEL_GC)
- ASSERT(m_opaqueRoots.isEmpty()); // Should have merged by now.
-#else
- m_opaqueRoots.clear();
-#endif
- if (m_shouldHashCons) {
- m_uniqueStrings.clear();
- m_shouldHashCons = false;
- }
+}
+
+void SlotVisitor::clearMarkStack()
+{
+ m_stack.clear();
}
void SlotVisitor::append(ConservativeRoots& conservativeRoots)
{
- StackStats::probe();
JSCell** roots = conservativeRoots.roots();
size_t size = conservativeRoots.size();
for (size_t i = 0; i < size; ++i)
- internalAppend(roots[i]);
+ append(roots[i]);
+}
+
+void SlotVisitor::append(JSValue value)
+{
+ if (!value || !value.isCell())
+ return;
+ setMarkedAndAppendToMarkStack(value.asCell());
}
-ALWAYS_INLINE static void visitChildren(SlotVisitor& visitor, const JSCell* cell)
+void SlotVisitor::setMarkedAndAppendToMarkStack(JSCell* cell)
{
- StackStats::probe();
-#if ENABLE(SIMPLE_HEAP_PROFILING)
- m_visitedTypeCounts.count(cell);
+ ASSERT(!m_isCheckingForDefaultMarkViolation);
+ if (!cell)
+ return;
+
+#if ENABLE(GC_VALIDATION)
+ validate(cell);
#endif
+ if (Heap::testAndSetMarked(cell) || !cell->structure()) {
+ ASSERT(cell->structure());
+ return;
+ }
+
+ // Indicate that the object is grey and that:
+ // In case of concurrent GC: it's the first time it is grey in this GC cycle.
+ // In case of eden collection: it's a new object that became grey rather than an old remembered object.
+ cell->setCellState(CellState::NewGrey);
+
+ appendToMarkStack(cell);
+}
+
+void SlotVisitor::appendToMarkStack(JSCell* cell)
+{
ASSERT(Heap::isMarked(cell));
+ ASSERT(!cell->isZapped());
+
+ m_visitCount++;
+ m_bytesVisited += MarkedBlock::blockFor(cell)->cellSize();
+ m_stack.append(cell);
+}
+
+ALWAYS_INLINE void SlotVisitor::visitChildren(const JSCell* cell)
+{
+ ASSERT(Heap::isMarked(cell));
+
+ m_currentObjectCellStateBeforeVisiting = cell->cellState();
+ cell->setCellState(CellState::OldBlack);
if (isJSString(cell)) {
- JSString::visitChildren(const_cast<JSCell*>(cell), visitor);
+ JSString::visitChildren(const_cast<JSCell*>(cell), *this);
return;
}
if (isJSFinalObject(cell)) {
- JSFinalObject::visitChildren(const_cast<JSCell*>(cell), visitor);
+ JSFinalObject::visitChildren(const_cast<JSCell*>(cell), *this);
return;
}
if (isJSArray(cell)) {
- JSArray::visitChildren(const_cast<JSCell*>(cell), visitor);
+ JSArray::visitChildren(const_cast<JSCell*>(cell), *this);
return;
}
- cell->methodTable()->visitChildren(const_cast<JSCell*>(cell), visitor);
+ cell->methodTable()->visitChildren(const_cast<JSCell*>(cell), *this);
}
void SlotVisitor::donateKnownParallel()
{
- StackStats::probe();
// NOTE: Because we re-try often, we can afford to be conservative, and
// assume that donating is not profitable.
@@ -107,81 +193,50 @@ void SlotVisitor::donateKnownParallel()
// If there's already some shared work queued up, be conservative and assume
// that donating more is not profitable.
- if (m_shared.m_sharedMarkStack.size())
+ if (m_heap.m_sharedMarkStack.size())
return;
// If we're contending on the lock, be conservative and assume that another
// thread is already donating.
- MutexTryLocker locker(m_shared.m_markingLock);
- if (!locker.locked())
+ std::unique_lock<Lock> lock(m_heap.m_markingMutex, std::try_to_lock);
+ if (!lock.owns_lock())
return;
// Otherwise, assume that a thread will go idle soon, and donate.
- m_stack.donateSomeCellsTo(m_shared.m_sharedMarkStack);
+ m_stack.donateSomeCellsTo(m_heap.m_sharedMarkStack);
- if (m_shared.m_numberOfActiveParallelMarkers < Options::numberOfGCMarkers())
- m_shared.m_markingCondition.broadcast();
+ m_heap.m_markingConditionVariable.notifyAll();
}
void SlotVisitor::drain()
{
- StackStats::probe();
ASSERT(m_isInParallelMode);
-#if ENABLE(PARALLEL_GC)
- if (Options::numberOfGCMarkers() > 1) {
- while (!m_stack.isEmpty()) {
- m_stack.refill();
- for (unsigned countdown = Options::minimumNumberOfScansBetweenRebalance(); m_stack.canRemoveLast() && countdown--;)
- visitChildren(*this, m_stack.removeLast());
- donateKnownParallel();
- }
-
- mergeOpaqueRootsIfNecessary();
- return;
- }
-#endif
-
while (!m_stack.isEmpty()) {
m_stack.refill();
- while (m_stack.canRemoveLast())
- visitChildren(*this, m_stack.removeLast());
+ for (unsigned countdown = Options::minimumNumberOfScansBetweenRebalance(); m_stack.canRemoveLast() && countdown--;)
+ visitChildren(m_stack.removeLast());
+ donateKnownParallel();
}
+
+ mergeOpaqueRootsIfNecessary();
}
void SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode)
{
- StackStats::probe();
ASSERT(m_isInParallelMode);
ASSERT(Options::numberOfGCMarkers());
- bool shouldBeParallel;
-
-#if ENABLE(PARALLEL_GC)
- shouldBeParallel = Options::numberOfGCMarkers() > 1;
-#else
- ASSERT(Options::numberOfGCMarkers() == 1);
- shouldBeParallel = false;
-#endif
-
- if (!shouldBeParallel) {
- // This call should be a no-op.
- ASSERT_UNUSED(sharedDrainMode, sharedDrainMode == MasterDrain);
- ASSERT(m_stack.isEmpty());
- ASSERT(m_shared.m_sharedMarkStack.isEmpty());
- return;
- }
-
-#if ENABLE(PARALLEL_GC)
{
- MutexLocker locker(m_shared.m_markingLock);
- m_shared.m_numberOfActiveParallelMarkers++;
+ std::lock_guard<Lock> lock(m_heap.m_markingMutex);
+ m_heap.m_numberOfActiveParallelMarkers++;
}
while (true) {
{
- MutexLocker locker(m_shared.m_markingLock);
- m_shared.m_numberOfActiveParallelMarkers--;
+ std::unique_lock<Lock> lock(m_heap.m_markingMutex);
+ m_heap.m_numberOfActiveParallelMarkers--;
+ m_heap.m_numberOfWaitingParallelMarkers++;
// How we wait differs depending on drain mode.
if (sharedDrainMode == MasterDrain) {
@@ -189,182 +244,173 @@ void SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode)
// for us to do.
while (true) {
// Did we reach termination?
- if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty()) {
+ if (!m_heap.m_numberOfActiveParallelMarkers
+ && m_heap.m_sharedMarkStack.isEmpty()) {
// Let any sleeping slaves know it's time for them to return;
- m_shared.m_markingCondition.broadcast();
+ m_heap.m_markingConditionVariable.notifyAll();
return;
}
// Is there work to be done?
- if (!m_shared.m_sharedMarkStack.isEmpty())
+ if (!m_heap.m_sharedMarkStack.isEmpty())
break;
// Otherwise wait.
- m_shared.m_markingCondition.wait(m_shared.m_markingLock);
+ m_heap.m_markingConditionVariable.wait(lock);
}
} else {
ASSERT(sharedDrainMode == SlaveDrain);
// Did we detect termination? If so, let the master know.
- if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty())
- m_shared.m_markingCondition.broadcast();
-
- while (m_shared.m_sharedMarkStack.isEmpty() && !m_shared.m_parallelMarkersShouldExit)
- m_shared.m_markingCondition.wait(m_shared.m_markingLock);
+ if (!m_heap.m_numberOfActiveParallelMarkers
+ && m_heap.m_sharedMarkStack.isEmpty())
+ m_heap.m_markingConditionVariable.notifyAll();
+
+ m_heap.m_markingConditionVariable.wait(
+ lock,
+ [this] {
+ return !m_heap.m_sharedMarkStack.isEmpty()
+ || m_heap.m_parallelMarkersShouldExit;
+ });
// Is the current phase done? If so, return from this function.
- if (m_shared.m_parallelMarkersShouldExit)
+ if (m_heap.m_parallelMarkersShouldExit)
return;
}
-
- size_t idleThreadCount = Options::numberOfGCMarkers() - m_shared.m_numberOfActiveParallelMarkers;
- m_stack.stealSomeCellsFrom(m_shared.m_sharedMarkStack, idleThreadCount);
- m_shared.m_numberOfActiveParallelMarkers++;
+
+ m_stack.stealSomeCellsFrom(
+ m_heap.m_sharedMarkStack, m_heap.m_numberOfWaitingParallelMarkers);
+ m_heap.m_numberOfActiveParallelMarkers++;
+ m_heap.m_numberOfWaitingParallelMarkers--;
}
drain();
}
-#endif
}
-void SlotVisitor::mergeOpaqueRoots()
+void SlotVisitor::addOpaqueRoot(void* root)
{
- StackStats::probe();
- ASSERT(!m_opaqueRoots.isEmpty()); // Should only be called when opaque roots are non-empty.
- {
- MutexLocker locker(m_shared.m_opaqueRootsLock);
- HashSet<void*>::iterator begin = m_opaqueRoots.begin();
- HashSet<void*>::iterator end = m_opaqueRoots.end();
- for (HashSet<void*>::iterator iter = begin; iter != end; ++iter)
- m_shared.m_opaqueRoots.add(*iter);
+ if (Options::numberOfGCMarkers() == 1) {
+ // Put directly into the shared HashSet.
+ m_heap.m_opaqueRoots.add(root);
+ return;
}
- m_opaqueRoots.clear();
+ // Put into the local set, but merge with the shared one every once in
+ // a while to make sure that the local sets don't grow too large.
+ mergeOpaqueRootsIfProfitable();
+ m_opaqueRoots.add(root);
}
-ALWAYS_INLINE bool JSString::tryHashConsLock()
+bool SlotVisitor::containsOpaqueRoot(void* root) const
{
-#if ENABLE(PARALLEL_GC)
- unsigned currentFlags = m_flags;
-
- if (currentFlags & HashConsLock)
- return false;
-
- unsigned newFlags = currentFlags | HashConsLock;
-
- if (!WTF::weakCompareAndSwap(&m_flags, currentFlags, newFlags))
- return false;
-
- WTF::memoryBarrierAfterLock();
- return true;
-#else
- if (isHashConsSingleton())
- return false;
-
- m_flags |= HashConsLock;
-
- return true;
-#endif
+ ASSERT(!m_isInParallelMode);
+ ASSERT(m_opaqueRoots.isEmpty());
+ return m_heap.m_opaqueRoots.contains(root);
}
-ALWAYS_INLINE void JSString::releaseHashConsLock()
+TriState SlotVisitor::containsOpaqueRootTriState(void* root) const
{
-#if ENABLE(PARALLEL_GC)
- WTF::memoryBarrierBeforeUnlock();
-#endif
- m_flags &= ~HashConsLock;
+ if (m_opaqueRoots.contains(root))
+ return TrueTriState;
+ std::lock_guard<Lock> lock(m_heap.m_opaqueRootsMutex);
+ if (m_heap.m_opaqueRoots.contains(root))
+ return TrueTriState;
+ return MixedTriState;
}
-ALWAYS_INLINE bool JSString::shouldTryHashCons()
+int SlotVisitor::opaqueRootCount()
{
- return ((length() > 1) && !isRope() && !isHashConsSingleton());
+ ASSERT(!m_isInParallelMode);
+ ASSERT(m_opaqueRoots.isEmpty());
+ return m_heap.m_opaqueRoots.size();
}
-ALWAYS_INLINE void SlotVisitor::internalAppend(JSValue* slot)
+void SlotVisitor::mergeOpaqueRootsIfNecessary()
{
- // This internalAppend is only intended for visits to object and array backing stores.
- // as it can change the JSValue pointed to be the argument when the original JSValue
- // is a string that contains the same contents as another string.
-
- StackStats::probe();
- ASSERT(slot);
- JSValue value = *slot;
- ASSERT(value);
- if (!value.isCell())
+ if (m_opaqueRoots.isEmpty())
return;
-
- JSCell* cell = value.asCell();
- if (!cell)
+ mergeOpaqueRoots();
+}
+
+void SlotVisitor::mergeOpaqueRootsIfProfitable()
+{
+ if (static_cast<unsigned>(m_opaqueRoots.size()) < Options::opaqueRootMergeThreshold())
+ return;
+ mergeOpaqueRoots();
+}
+
+void SlotVisitor::donate()
+{
+ ASSERT(m_isInParallelMode);
+ if (Options::numberOfGCMarkers() == 1)
return;
+
+ donateKnownParallel();
+}
- validate(cell);
+void SlotVisitor::donateAndDrain()
+{
+ donate();
+ drain();
+}
- if (m_shouldHashCons && cell->isString()) {
- JSString* string = jsCast<JSString*>(cell);
- if (string->shouldTryHashCons() && string->tryHashConsLock()) {
- UniqueStringMap::AddResult addResult = m_uniqueStrings.add(string->string().impl(), value);
- if (addResult.isNewEntry)
- string->setHashConsSingleton();
- else {
- JSValue existingJSValue = addResult.iterator->value;
- if (value != existingJSValue)
- jsCast<JSString*>(existingJSValue.asCell())->clearHashConsSingleton();
- *slot = existingJSValue;
- string->releaseHashConsLock();
- return;
- }
- string->releaseHashConsLock();
- }
+void SlotVisitor::copyLater(JSCell* owner, CopyToken token, void* ptr, size_t bytes)
+{
+ ASSERT(bytes);
+ CopiedBlock* block = CopiedSpace::blockFor(ptr);
+ if (block->isOversize()) {
+ ASSERT(bytes <= block->size());
+ // FIXME: We should be able to shrink the allocation if bytes went below the block size.
+ // For now, we just make sure that our accounting of how much memory we are actually using
+ // is correct.
+ // https://bugs.webkit.org/show_bug.cgi?id=144749
+ bytes = block->size();
+ m_heap.m_storageSpace.pin(block);
}
- internalAppend(cell);
+ ASSERT(heap()->m_storageSpace.contains(block));
+
+ LockHolder locker(&block->workListLock());
+ // We always report live bytes, except if during an eden collection we see an old object pointing to an
+ // old backing store and the old object is being marked because of the remembered set. Note that if we
+ // ask the object itself, it will always tell us that it's an old black object - because even during an
+ // eden collection we have already indicated that the object is old. That's why we use the
+ // SlotVisitor's cache of the object's old state.
+ if (heap()->operationInProgress() == FullCollection
+ || !block->isOld()
+ || m_currentObjectCellStateBeforeVisiting != CellState::OldGrey) {
+ m_bytesCopied += bytes;
+ block->reportLiveBytes(locker, owner, token, bytes);
+ }
+}
+
+void SlotVisitor::mergeOpaqueRoots()
+{
+ ASSERT(!m_opaqueRoots.isEmpty()); // Should only be called when opaque roots are non-empty.
+ {
+ std::lock_guard<Lock> lock(m_heap.m_opaqueRootsMutex);
+ for (auto* root : m_opaqueRoots)
+ m_heap.m_opaqueRoots.add(root);
+ }
+ m_opaqueRoots.clear();
}
void SlotVisitor::harvestWeakReferences()
{
- StackStats::probe();
- for (WeakReferenceHarvester* current = m_shared.m_weakReferenceHarvesters.head(); current; current = current->next())
+ for (WeakReferenceHarvester* current = m_heap.m_weakReferenceHarvesters.head(); current; current = current->next())
current->visitWeakReferences(*this);
}
void SlotVisitor::finalizeUnconditionalFinalizers()
{
- StackStats::probe();
- while (m_shared.m_unconditionalFinalizers.hasNext())
- m_shared.m_unconditionalFinalizers.removeNext()->finalizeUnconditionally();
+ while (m_heap.m_unconditionalFinalizers.hasNext())
+ m_heap.m_unconditionalFinalizers.removeNext()->finalizeUnconditionally();
}
-#if ENABLE(GC_VALIDATION)
-void SlotVisitor::validate(JSCell* cell)
-{
- RELEASE_ASSERT(cell);
-
- if (!cell->structure()) {
- dataLogF("cell at %p has a null structure\n" , cell);
- CRASH();
- }
-
- // Both the cell's structure, and the cell's structure's structure should be the Structure Structure.
- // I hate this sentence.
- if (cell->structure()->structure()->JSCell::classInfo() != cell->structure()->JSCell::classInfo()) {
- const char* parentClassName = 0;
- const char* ourClassName = 0;
- if (cell->structure()->structure() && cell->structure()->structure()->JSCell::classInfo())
- parentClassName = cell->structure()->structure()->JSCell::classInfo()->className;
- if (cell->structure()->JSCell::classInfo())
- ourClassName = cell->structure()->JSCell::classInfo()->className;
- dataLogF("parent structure (%p <%s>) of cell at %p doesn't match cell's structure (%p <%s>)\n",
- cell->structure()->structure(), parentClassName, cell, cell->structure(), ourClassName);
- CRASH();
- }
-
- // Make sure we can walk the ClassInfo chain
- const ClassInfo* info = cell->classInfo();
- do { } while ((info = info->parentClass));
-}
-#else
-void SlotVisitor::validate(JSCell*)
+void SlotVisitor::dump(PrintStream&) const
{
+ for (const JSCell* cell : markStack())
+ dataLog(*cell, "\n");
}
-#endif
} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/SlotVisitor.h b/Source/JavaScriptCore/heap/SlotVisitor.h
index e1808faf0..add29658f 100644
--- a/Source/JavaScriptCore/heap/SlotVisitor.h
+++ b/Source/JavaScriptCore/heap/SlotVisitor.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2011-2013, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,32 +26,46 @@
#ifndef SlotVisitor_h
#define SlotVisitor_h
+#include "CellState.h"
+#include "CopyToken.h"
#include "HandleTypes.h"
-#include "MarkStackInlines.h"
-
-#include <wtf/text/StringHash.h>
+#include "MarkStack.h"
+#include "OpaqueRootSet.h"
namespace JSC {
class ConservativeRoots;
class GCThreadSharedData;
class Heap;
+template<typename T> class JITWriteBarrier;
+class UnconditionalFinalizer;
template<typename T> class Weak;
+class WeakReferenceHarvester;
template<typename T> class WriteBarrierBase;
-template<typename T> class JITWriteBarrier;
class SlotVisitor {
WTF_MAKE_NONCOPYABLE(SlotVisitor);
+ WTF_MAKE_FAST_ALLOCATED;
+
friend class HeapRootVisitor; // Allowed to mark a JSValue* or JSCell** directly.
+ friend class Heap;
public:
- SlotVisitor(GCThreadSharedData&);
+ SlotVisitor(Heap&);
~SlotVisitor();
+ MarkStackArray& markStack() { return m_stack; }
+ const MarkStackArray& markStack() const { return m_stack; }
+
+ VM& vm();
+ const VM& vm() const;
+ Heap* heap() const;
+
void append(ConservativeRoots&);
template<typename T> void append(JITWriteBarrier<T>*);
template<typename T> void append(WriteBarrierBase<T>*);
+ template<typename Iterator> void append(Iterator begin , Iterator end);
void appendValues(WriteBarrierBase<Unknown>*, size_t count);
template<typename T>
@@ -59,18 +73,23 @@ public:
void appendUnbarrieredValue(JSValue*);
template<typename T>
void appendUnbarrieredWeak(Weak<T>*);
+ template<typename T>
+ void appendUnbarrieredReadOnlyPointer(T*);
+ void appendUnbarrieredReadOnlyValue(JSValue);
- void addOpaqueRoot(void*);
- bool containsOpaqueRoot(void*);
- TriState containsOpaqueRootTriState(void*);
+ JS_EXPORT_PRIVATE void addOpaqueRoot(void*);
+ JS_EXPORT_PRIVATE bool containsOpaqueRoot(void*) const;
+ TriState containsOpaqueRootTriState(void*) const;
int opaqueRootCount();
- GCThreadSharedData& sharedData() { return m_shared; }
bool isEmpty() { return m_stack.isEmpty(); }
- void setup();
+ void didStartMarking();
void reset();
+ void clearMarkStack();
+ size_t bytesVisited() const { return m_bytesVisited; }
+ size_t bytesCopied() const { return m_bytesCopied; }
size_t visitCount() const { return m_visitCount; }
void donate();
@@ -83,55 +102,42 @@ public:
void harvestWeakReferences();
void finalizeUnconditionalFinalizers();
- void copyLater(JSCell*, void*, size_t);
+ void copyLater(JSCell*, CopyToken, void*, size_t);
+
+ void reportExtraMemoryVisited(size_t);
-#if ENABLE(SIMPLE_HEAP_PROFILING)
- VTableSpectrum m_visitedTypeCounts;
-#endif
-
void addWeakReferenceHarvester(WeakReferenceHarvester*);
void addUnconditionalFinalizer(UnconditionalFinalizer*);
-#if ENABLE(OBJECT_MARK_LOGGING)
- inline void resetChildCount() { m_logChildCount = 0; }
- inline unsigned childCount() { return m_logChildCount; }
- inline void incrementChildCount() { m_logChildCount++; }
-#endif
+ void dump(PrintStream&) const;
private:
friend class ParallelModeEnabler;
- JS_EXPORT_PRIVATE static void validate(JSCell*);
-
- void append(JSValue*);
- void append(JSValue*, size_t count);
- void append(JSCell**);
+ JS_EXPORT_PRIVATE void append(JSValue); // This is private to encourage clients to use WriteBarrier<T>.
- void internalAppend(JSCell*);
- void internalAppend(JSValue);
- void internalAppend(JSValue*);
+ JS_EXPORT_PRIVATE void setMarkedAndAppendToMarkStack(JSCell*);
+ void appendToMarkStack(JSCell*);
JS_EXPORT_PRIVATE void mergeOpaqueRoots();
void mergeOpaqueRootsIfNecessary();
void mergeOpaqueRootsIfProfitable();
+
+ void visitChildren(const JSCell*);
void donateKnownParallel();
MarkStackArray m_stack;
- HashSet<void*> m_opaqueRoots; // Handle-owning data structures not visible to the garbage collector.
+ OpaqueRootSet m_opaqueRoots; // Handle-owning data structures not visible to the garbage collector.
+ size_t m_bytesVisited;
+ size_t m_bytesCopied;
size_t m_visitCount;
bool m_isInParallelMode;
- GCThreadSharedData& m_shared;
-
- bool m_shouldHashCons; // Local per-thread copy of shared flag for performance reasons
- typedef HashMap<StringImpl*, JSValue> UniqueStringMap;
- UniqueStringMap m_uniqueStrings;
+ Heap& m_heap;
-#if ENABLE(OBJECT_MARK_LOGGING)
- unsigned m_logChildCount;
-#endif
+ CellState m_currentObjectCellStateBeforeVisiting { CellState::NewWhite };
public:
#if !ASSERT_DISABLED
diff --git a/Source/JavaScriptCore/heap/SlotVisitorInlines.h b/Source/JavaScriptCore/heap/SlotVisitorInlines.h
index da338ce11..033872c58 100644
--- a/Source/JavaScriptCore/heap/SlotVisitorInlines.h
+++ b/Source/JavaScriptCore/heap/SlotVisitorInlines.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2013, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,167 +26,92 @@
#ifndef SlotVisitorInlines_h
#define SlotVisitorInlines_h
-#include "CopiedBlockInlines.h"
-#include "CopiedSpaceInlines.h"
-#include "Options.h"
#include "SlotVisitor.h"
#include "Weak.h"
#include "WeakInlines.h"
namespace JSC {
-ALWAYS_INLINE void SlotVisitor::append(JSValue* slot, size_t count)
-{
- for (size_t i = 0; i < count; ++i) {
- JSValue& value = slot[i];
- internalAppend(value);
- }
-}
-
template<typename T>
inline void SlotVisitor::appendUnbarrieredPointer(T** slot)
{
ASSERT(slot);
- JSCell* cell = *slot;
- internalAppend(cell);
+ append(*slot);
}
-ALWAYS_INLINE void SlotVisitor::append(JSValue* slot)
+template<typename T>
+inline void SlotVisitor::appendUnbarrieredReadOnlyPointer(T* cell)
{
- ASSERT(slot);
- internalAppend(*slot);
+ append(cell);
}
-ALWAYS_INLINE void SlotVisitor::appendUnbarrieredValue(JSValue* slot)
+inline void SlotVisitor::appendUnbarrieredValue(JSValue* slot)
{
ASSERT(slot);
- internalAppend(*slot);
+ append(*slot);
}
-ALWAYS_INLINE void SlotVisitor::append(JSCell** slot)
+inline void SlotVisitor::appendUnbarrieredReadOnlyValue(JSValue value)
{
- ASSERT(slot);
- internalAppend(*slot);
+ append(value);
}
template<typename T>
-ALWAYS_INLINE void SlotVisitor::appendUnbarrieredWeak(Weak<T>* weak)
+inline void SlotVisitor::appendUnbarrieredWeak(Weak<T>* weak)
{
ASSERT(weak);
- if (weak->get())
- internalAppend(weak->get());
+ append(weak->get());
}
-ALWAYS_INLINE void SlotVisitor::internalAppend(JSValue value)
+template<typename T>
+inline void SlotVisitor::append(WriteBarrierBase<T>* slot)
{
- if (!value || !value.isCell())
- return;
- internalAppend(value.asCell());
+ append(slot->get());
}
-inline void SlotVisitor::addWeakReferenceHarvester(WeakReferenceHarvester* weakReferenceHarvester)
+template<typename Iterator>
+inline void SlotVisitor::append(Iterator begin, Iterator end)
{
- m_shared.m_weakReferenceHarvesters.addThreadSafe(weakReferenceHarvester);
+ for (auto it = begin; it != end; ++it)
+ append(&*it);
}
-inline void SlotVisitor::addUnconditionalFinalizer(UnconditionalFinalizer* unconditionalFinalizer)
+inline void SlotVisitor::appendValues(WriteBarrierBase<Unknown>* barriers, size_t count)
{
- m_shared.m_unconditionalFinalizers.addThreadSafe(unconditionalFinalizer);
+ for (size_t i = 0; i < count; ++i)
+ append(&barriers[i]);
}
-inline void SlotVisitor::addOpaqueRoot(void* root)
+inline void SlotVisitor::addWeakReferenceHarvester(WeakReferenceHarvester* weakReferenceHarvester)
{
-#if ENABLE(PARALLEL_GC)
- if (Options::numberOfGCMarkers() == 1) {
- // Put directly into the shared HashSet.
- m_shared.m_opaqueRoots.add(root);
- return;
- }
- // Put into the local set, but merge with the shared one every once in
- // a while to make sure that the local sets don't grow too large.
- mergeOpaqueRootsIfProfitable();
- m_opaqueRoots.add(root);
-#else
- m_opaqueRoots.add(root);
-#endif
+ m_heap.m_weakReferenceHarvesters.addThreadSafe(weakReferenceHarvester);
}
-inline bool SlotVisitor::containsOpaqueRoot(void* root)
+inline void SlotVisitor::addUnconditionalFinalizer(UnconditionalFinalizer* unconditionalFinalizer)
{
- ASSERT(!m_isInParallelMode);
-#if ENABLE(PARALLEL_GC)
- ASSERT(m_opaqueRoots.isEmpty());
- return m_shared.m_opaqueRoots.contains(root);
-#else
- return m_opaqueRoots.contains(root);
-#endif
+ m_heap.m_unconditionalFinalizers.addThreadSafe(unconditionalFinalizer);
}
-inline TriState SlotVisitor::containsOpaqueRootTriState(void* root)
+inline void SlotVisitor::reportExtraMemoryVisited(size_t size)
{
- if (m_opaqueRoots.contains(root))
- return TrueTriState;
- MutexLocker locker(m_shared.m_opaqueRootsLock);
- if (m_shared.m_opaqueRoots.contains(root))
- return TrueTriState;
- return MixedTriState;
+ heap()->reportExtraMemoryVisited(m_currentObjectCellStateBeforeVisiting, size);
}
-inline int SlotVisitor::opaqueRootCount()
+inline Heap* SlotVisitor::heap() const
{
- ASSERT(!m_isInParallelMode);
-#if ENABLE(PARALLEL_GC)
- ASSERT(m_opaqueRoots.isEmpty());
- return m_shared.m_opaqueRoots.size();
-#else
- return m_opaqueRoots.size();
-#endif
+ return &m_heap;
}
-inline void SlotVisitor::mergeOpaqueRootsIfNecessary()
-{
- if (m_opaqueRoots.isEmpty())
- return;
- mergeOpaqueRoots();
-}
-
-inline void SlotVisitor::mergeOpaqueRootsIfProfitable()
-{
- if (static_cast<unsigned>(m_opaqueRoots.size()) < Options::opaqueRootMergeThreshold())
- return;
- mergeOpaqueRoots();
-}
-
-inline void SlotVisitor::donate()
+inline VM& SlotVisitor::vm()
{
- ASSERT(m_isInParallelMode);
- if (Options::numberOfGCMarkers() == 1)
- return;
-
- donateKnownParallel();
+ return *m_heap.m_vm;
}
-inline void SlotVisitor::donateAndDrain()
+inline const VM& SlotVisitor::vm() const
{
- donate();
- drain();
+ return *m_heap.m_vm;
}
-inline void SlotVisitor::copyLater(JSCell* owner, void* ptr, size_t bytes)
-{
- ASSERT(bytes);
- CopiedBlock* block = CopiedSpace::blockFor(ptr);
- if (block->isOversize()) {
- m_shared.m_copiedSpace->pin(block);
- return;
- }
-
- if (block->isPinned())
- return;
-
- block->reportLiveBytes(owner, bytes);
-}
-
} // namespace JSC
#endif // SlotVisitorInlines_h
diff --git a/Source/JavaScriptCore/heap/Strong.h b/Source/JavaScriptCore/heap/Strong.h
index e00e92061..5c0f83267 100644
--- a/Source/JavaScriptCore/heap/Strong.h
+++ b/Source/JavaScriptCore/heap/Strong.h
@@ -84,9 +84,7 @@ public:
bool operator!() const { return !slot() || !*slot(); }
- // This conversion operator allows implicit conversion to bool but not to other integer types.
- typedef JSValue (HandleBase::*UnspecifiedBoolType);
- operator UnspecifiedBoolType*() const { return !!*this ? reinterpret_cast<UnspecifiedBoolType*>(1) : 0; }
+ explicit operator bool() const { return !!*this; }
void swap(Strong& other)
{
@@ -148,11 +146,11 @@ template<class T> inline void swap(Strong<T>& a, Strong<T>& b)
namespace WTF {
-template<typename T> struct VectorTraits<JSC::Strong<T> > : SimpleClassVectorTraits {
+template<typename T> struct VectorTraits<JSC::Strong<T>> : SimpleClassVectorTraits {
static const bool canCompareWithMemcmp = false;
};
-template<typename P> struct HashTraits<JSC::Strong<P> > : SimpleClassHashTraits<JSC::Strong<P> > { };
+template<typename P> struct HashTraits<JSC::Strong<P>> : SimpleClassHashTraits<JSC::Strong<P>> { };
}
diff --git a/Source/JavaScriptCore/heap/TinyBloomFilter.h b/Source/JavaScriptCore/heap/TinyBloomFilter.h
index a75ce8ce5..15a419de8 100644
--- a/Source/JavaScriptCore/heap/TinyBloomFilter.h
+++ b/Source/JavaScriptCore/heap/TinyBloomFilter.h
@@ -35,6 +35,7 @@ public:
TinyBloomFilter();
void add(Bits);
+ void add(TinyBloomFilter&);
bool ruleOut(Bits) const; // True for 0.
void reset();
@@ -52,6 +53,11 @@ inline void TinyBloomFilter::add(Bits bits)
m_bits |= bits;
}
+inline void TinyBloomFilter::add(TinyBloomFilter& other)
+{
+ m_bits |= other.m_bits;
+}
+
inline bool TinyBloomFilter::ruleOut(Bits bits) const
{
if (!bits)
diff --git a/Source/JavaScriptCore/heap/UnconditionalFinalizer.h b/Source/JavaScriptCore/heap/UnconditionalFinalizer.h
index 26029d046..1cd7c7bfb 100644
--- a/Source/JavaScriptCore/heap/UnconditionalFinalizer.h
+++ b/Source/JavaScriptCore/heap/UnconditionalFinalizer.h
@@ -35,7 +35,8 @@ namespace JSC {
// associated with each CodeBlock.
class UnconditionalFinalizer : public ListableHandler<UnconditionalFinalizer> {
-public:
+ WTF_MAKE_FAST_ALLOCATED;
+public:
virtual void finalizeUnconditionally() = 0;
protected:
diff --git a/Source/JavaScriptCore/heap/VTableSpectrum.cpp b/Source/JavaScriptCore/heap/VTableSpectrum.cpp
deleted file mode 100644
index a6f8e54bd..000000000
--- a/Source/JavaScriptCore/heap/VTableSpectrum.cpp
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "VTableSpectrum.h"
-
-#include "JSObject.h"
-#include "Structure.h"
-#include <algorithm>
-#include <stdio.h>
-#include <wtf/Vector.h>
-
-#if PLATFORM(MAC)
-#include <dlfcn.h>
-#endif
-
-namespace JSC {
-
-VTableSpectrum::VTableSpectrum()
-{
-}
-
-VTableSpectrum::~VTableSpectrum()
-{
-}
-
-void VTableSpectrum::countVPtr(void* vTablePointer)
-{
- add(vTablePointer);
-}
-
-void VTableSpectrum::count(JSCell* cell)
-{
- // FIXME: we need to change this class to count ClassInfos rather than vptrs
- UNUSED_PARAM(cell);
-}
-
-void VTableSpectrum::dump(FILE* output, const char* comment)
-{
- fprintf(output, "%s:\n", comment);
-
- Vector<KeyAndCount> list = buildList();
-
- for (size_t index = list.size(); index-- > 0;) {
- KeyAndCount item = list.at(index);
-#if PLATFORM(MAC)
- Dl_info info;
- if (dladdr(item.key, &info)) {
- char* findResult = strrchr(info.dli_fname, '/');
- const char* strippedFileName;
-
- if (findResult)
- strippedFileName = findResult + 1;
- else
- strippedFileName = info.dli_fname;
-
- fprintf(output, " %s:%s(%p): %lu\n", strippedFileName, info.dli_sname, item.key, item.count);
- continue;
- }
-#endif
- fprintf(output, " %p: %lu\n", item.key, item.count);
- }
-
- fflush(output);
-}
-
-} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/Weak.cpp b/Source/JavaScriptCore/heap/Weak.cpp
index 3857b60d2..a30b7c085 100644
--- a/Source/JavaScriptCore/heap/Weak.cpp
+++ b/Source/JavaScriptCore/heap/Weak.cpp
@@ -26,6 +26,7 @@
#include "config.h"
#include "Weak.h"
+#include "JSCInlines.h"
#include "WeakSetInlines.h"
namespace JSC {
diff --git a/Source/JavaScriptCore/heap/Weak.h b/Source/JavaScriptCore/heap/Weak.h
index 5c901df22..2d82f67ae 100644
--- a/Source/JavaScriptCore/heap/Weak.h
+++ b/Source/JavaScriptCore/heap/Weak.h
@@ -26,12 +26,12 @@
#ifndef Weak_h
#define Weak_h
+#include "JSExportMacros.h"
+#include <cstddef>
#include <wtf/Noncopyable.h>
-#include <wtf/NullPtr.h>
namespace JSC {
-template<typename T> class PassWeak;
class WeakImpl;
class WeakHandleOwner;
@@ -46,18 +46,18 @@ public:
{
}
- explicit Weak(std::nullptr_t)
+ Weak(std::nullptr_t)
: m_impl(0)
{
}
- explicit Weak(T*, WeakHandleOwner* = 0, void* context = 0);
+ Weak(T*, WeakHandleOwner* = 0, void* context = 0);
enum HashTableDeletedValueTag { HashTableDeletedValue };
bool isHashTableDeletedValue() const;
Weak(HashTableDeletedValueTag);
- template<typename U> Weak(const PassWeak<U>&);
+ Weak(Weak&&);
~Weak()
{
@@ -65,8 +65,9 @@ public:
}
void swap(Weak&);
- Weak& operator=(const PassWeak<T>&);
-
+
+ Weak& operator=(Weak&&);
+
bool operator!() const;
T* operator->() const;
T& operator*() const;
@@ -74,11 +75,9 @@ public:
bool was(T*) const;
- // This conversion operator allows implicit conversion to bool but not to other integer types.
- typedef void* (Weak::*UnspecifiedBoolType);
- operator UnspecifiedBoolType*() const;
+ explicit operator bool() const;
- PassWeak<T> release();
+ WeakImpl* leakImpl() WARN_UNUSED_RETURN;
void clear()
{
if (!m_impl)
diff --git a/Source/JavaScriptCore/heap/WeakBlock.cpp b/Source/JavaScriptCore/heap/WeakBlock.cpp
index 957090569..ddbbc8c84 100644
--- a/Source/JavaScriptCore/heap/WeakBlock.cpp
+++ b/Source/JavaScriptCore/heap/WeakBlock.cpp
@@ -28,20 +28,28 @@
#include "Heap.h"
#include "HeapRootVisitor.h"
+#include "JSCInlines.h"
#include "JSObject.h"
-#include "Operations.h"
-#include "Structure.h"
+#include "WeakHandleOwner.h"
namespace JSC {
-WeakBlock* WeakBlock::create(DeadBlock* block)
+WeakBlock* WeakBlock::create(Heap& heap, MarkedBlock& markedBlock)
{
- Region* region = block->region();
- return new (NotNull, block) WeakBlock(region);
+ heap.didAllocateBlock(WeakBlock::blockSize);
+ return new (NotNull, fastMalloc(blockSize)) WeakBlock(markedBlock);
}
-WeakBlock::WeakBlock(Region* region)
- : HeapBlock<WeakBlock>(region)
+void WeakBlock::destroy(Heap& heap, WeakBlock* block)
+{
+ block->~WeakBlock();
+ fastFree(block);
+ heap.didFreeBlock(WeakBlock::blockSize);
+}
+
+WeakBlock::WeakBlock(MarkedBlock& markedBlock)
+ : DoublyLinkedListNode<WeakBlock>()
+ , m_markedBlock(&markedBlock)
{
for (size_t i = 0; i < weakImplCount(); ++i) {
WeakImpl* weakImpl = &weakImpls()[i];
@@ -76,8 +84,11 @@ void WeakBlock::sweep()
finalize(weakImpl);
if (weakImpl->state() == WeakImpl::Deallocated)
addToFreeList(&sweepResult.freeList, weakImpl);
- else
+ else {
sweepResult.blockIsFree = false;
+ if (weakImpl->state() == WeakImpl::Live)
+ sweepResult.blockIsLogicallyEmpty = false;
+ }
}
m_sweepResult = sweepResult;
@@ -90,6 +101,12 @@ void WeakBlock::visit(HeapRootVisitor& heapRootVisitor)
if (isEmpty())
return;
+ // If this WeakBlock doesn't belong to a MarkedBlock, we won't even be here.
+ ASSERT(m_markedBlock);
+
+ // We only visit after marking.
+ ASSERT(m_markedBlock->isMarkedOrRetired());
+
SlotVisitor& visitor = heapRootVisitor.visitor();
for (size_t i = 0; i < weakImplCount(); ++i) {
@@ -97,14 +114,14 @@ void WeakBlock::visit(HeapRootVisitor& heapRootVisitor)
if (weakImpl->state() != WeakImpl::Live)
continue;
- const JSValue& jsValue = weakImpl->jsValue();
- if (Heap::isLive(jsValue.asCell()))
- continue;
-
WeakHandleOwner* weakHandleOwner = weakImpl->weakHandleOwner();
if (!weakHandleOwner)
continue;
+ const JSValue& jsValue = weakImpl->jsValue();
+ if (m_markedBlock->isMarkedOrNewlyAllocated(jsValue.asCell()))
+ continue;
+
if (!weakHandleOwner->isReachableFromOpaqueRoots(Handle<Unknown>::wrapSlot(&const_cast<JSValue&>(jsValue)), weakImpl->context(), visitor))
continue;
@@ -118,12 +135,18 @@ void WeakBlock::reap()
if (isEmpty())
return;
+ // If this WeakBlock doesn't belong to a MarkedBlock, we won't even be here.
+ ASSERT(m_markedBlock);
+
+ // We only reap after marking.
+ ASSERT(m_markedBlock->isMarkedOrRetired());
+
for (size_t i = 0; i < weakImplCount(); ++i) {
WeakImpl* weakImpl = &weakImpls()[i];
if (weakImpl->state() > WeakImpl::Dead)
continue;
- if (Heap::isLive(weakImpl->jsValue().asCell())) {
+ if (m_markedBlock->isMarkedOrNewlyAllocated(weakImpl->jsValue().asCell())) {
ASSERT(weakImpl->state() == WeakImpl::Live);
continue;
}
diff --git a/Source/JavaScriptCore/heap/WeakBlock.h b/Source/JavaScriptCore/heap/WeakBlock.h
index b6b631e27..f5fbfdc87 100644
--- a/Source/JavaScriptCore/heap/WeakBlock.h
+++ b/Source/JavaScriptCore/heap/WeakBlock.h
@@ -26,41 +26,40 @@
#ifndef WeakBlock_h
#define WeakBlock_h
-#include "HeapBlock.h"
-#include "WeakHandleOwner.h"
#include "WeakImpl.h"
#include <wtf/DoublyLinkedList.h>
#include <wtf/StdLibExtras.h>
namespace JSC {
-class DeadBlock;
+class Heap;
class HeapRootVisitor;
-class JSValue;
-class WeakHandleOwner;
+class MarkedBlock;
-class WeakBlock : public HeapBlock<WeakBlock> {
+class WeakBlock : public DoublyLinkedListNode<WeakBlock> {
public:
friend class WTF::DoublyLinkedListNode<WeakBlock>;
- static const size_t blockSize = 4 * KB; // 5% of MarkedBlock size
+ static const size_t blockSize = 1 * KB; // 1/16 of MarkedBlock size
struct FreeCell {
FreeCell* next;
};
struct SweepResult {
- SweepResult();
bool isNull() const;
- bool blockIsFree;
- FreeCell* freeList;
+ bool blockIsFree { true };
+ bool blockIsLogicallyEmpty { true };
+ FreeCell* freeList { nullptr };
};
- static WeakBlock* create(DeadBlock*);
+ static WeakBlock* create(Heap&, MarkedBlock&);
+ static void destroy(Heap&, WeakBlock*);
static WeakImpl* asWeakImpl(FreeCell*);
bool isEmpty();
+ bool isLogicallyEmptyButNotFree() const;
void sweep();
SweepResult takeSweepResult();
@@ -69,27 +68,23 @@ public:
void reap();
void lastChanceToFinalize();
+ void disconnectMarkedBlock() { m_markedBlock = nullptr; }
private:
static FreeCell* asFreeCell(WeakImpl*);
- WeakBlock(Region*);
- WeakImpl* firstWeakImpl();
+ explicit WeakBlock(MarkedBlock&);
void finalize(WeakImpl*);
WeakImpl* weakImpls();
size_t weakImplCount();
void addToFreeList(FreeCell**, WeakImpl*);
+ MarkedBlock* m_markedBlock;
+ WeakBlock* m_prev;
+ WeakBlock* m_next;
SweepResult m_sweepResult;
};
-inline WeakBlock::SweepResult::SweepResult()
- : blockIsFree(true)
- , freeList(0)
-{
- ASSERT(isNull());
-}
-
inline bool WeakBlock::SweepResult::isNull() const
{
return blockIsFree && !freeList; // This state is impossible, so we can use it to mean null.
@@ -138,6 +133,11 @@ inline bool WeakBlock::isEmpty()
return !m_sweepResult.isNull() && m_sweepResult.blockIsFree;
}
+inline bool WeakBlock::isLogicallyEmptyButNotFree() const
+{
+ return !m_sweepResult.isNull() && !m_sweepResult.blockIsFree && m_sweepResult.blockIsLogicallyEmpty;
+}
+
} // namespace JSC
#endif // WeakBlock_h
diff --git a/Source/JavaScriptCore/heap/WeakHandleOwner.cpp b/Source/JavaScriptCore/heap/WeakHandleOwner.cpp
index 67e1774df..044518f7a 100644
--- a/Source/JavaScriptCore/heap/WeakHandleOwner.cpp
+++ b/Source/JavaScriptCore/heap/WeakHandleOwner.cpp
@@ -26,6 +26,8 @@
#include "config.h"
#include "WeakHandleOwner.h"
+#include "JSCInlines.h"
+
namespace JSC {
class SlotVisitor;
diff --git a/Source/JavaScriptCore/heap/WeakInlines.h b/Source/JavaScriptCore/heap/WeakInlines.h
index 221b6c11a..4653a9f8c 100644
--- a/Source/JavaScriptCore/heap/WeakInlines.h
+++ b/Source/JavaScriptCore/heap/WeakInlines.h
@@ -26,7 +26,7 @@
#ifndef WeakInlines_h
#define WeakInlines_h
-#include "PassWeak.h"
+#include "JSCell.h"
#include "WeakSetInlines.h"
#include <wtf/Assertions.h>
#include <wtf/HashTraits.h>
@@ -48,7 +48,7 @@ template<typename T> inline Weak<T>::Weak(typename Weak<T>::HashTableDeletedValu
{
}
-template<typename T> template<typename U> inline Weak<T>::Weak(const PassWeak<U>& other)
+template<typename T> inline Weak<T>::Weak(Weak&& other)
: m_impl(other.leakImpl())
{
}
@@ -63,10 +63,10 @@ template<typename T> inline void Weak<T>::swap(Weak& other)
std::swap(m_impl, other.m_impl);
}
-template<typename T> inline Weak<T>& Weak<T>::operator=(const PassWeak<T>& o)
+template<typename T> inline auto Weak<T>::operator=(Weak&& other) -> Weak&
{
- clear();
- m_impl = o.leakImpl();
+ Weak weak = WTFMove(other);
+ swap(weak);
return *this;
}
@@ -91,7 +91,7 @@ template<typename T> inline T* Weak<T>::get() const
template<typename T> inline bool Weak<T>::was(T* other) const
{
- return jsCast<T*>(m_impl->jsValue().asCell()) == other;
+ return static_cast<T*>(m_impl->jsValue().asCell()) == other;
}
template<typename T> inline bool Weak<T>::operator!() const
@@ -99,16 +99,16 @@ template<typename T> inline bool Weak<T>::operator!() const
return !m_impl || !m_impl->jsValue() || m_impl->state() != WeakImpl::Live;
}
-template<typename T> inline Weak<T>::operator UnspecifiedBoolType*() const
+template<typename T> inline Weak<T>::operator bool() const
{
- return reinterpret_cast<UnspecifiedBoolType*>(!!*this);
+ return !!*this;
}
-template<typename T> inline PassWeak<T> Weak<T>::release()
+template<typename T> inline WeakImpl* Weak<T>::leakImpl()
{
- PassWeak<T> tmp = adoptWeak<T>(m_impl);
- m_impl = 0;
- return tmp;
+ WeakImpl* impl = m_impl;
+ m_impl = nullptr;
+ return impl;
}
template<typename T> inline WeakImpl* Weak<T>::hashTableDeletedValue()
@@ -123,10 +123,10 @@ template <typename T> inline bool operator==(const Weak<T>& lhs, const Weak<T>&
// This function helps avoid modifying a weak table while holding an iterator into it. (Object allocation
// can run a finalizer that modifies the table. We avoid that by requiring a pre-constructed object as our value.)
-template<typename Map, typename Key, typename Value> inline void weakAdd(Map& map, const Key& key, Value value)
+template<typename Map, typename Key, typename Value> inline void weakAdd(Map& map, const Key& key, Value&& value)
{
ASSERT(!map.get(key));
- map.set(key, value); // The table may still have a zombie for value.
+ map.set(key, std::forward<Value>(value)); // The table may still have a zombie for value.
}
template<typename Map, typename Key, typename Value> inline void weakRemove(Map& map, const Key& key, Value value)
@@ -151,23 +151,16 @@ template<typename T> inline void weakClear(Weak<T>& weak, T* cell)
namespace WTF {
-template<typename T> struct VectorTraits<JSC::Weak<T> > : SimpleClassVectorTraits {
+template<typename T> struct VectorTraits<JSC::Weak<T>> : SimpleClassVectorTraits {
static const bool canCompareWithMemcmp = false;
};
-template<typename T> struct HashTraits<JSC::Weak<T> > : SimpleClassHashTraits<JSC::Weak<T> > {
+template<typename T> struct HashTraits<JSC::Weak<T>> : SimpleClassHashTraits<JSC::Weak<T>> {
typedef JSC::Weak<T> StorageType;
typedef std::nullptr_t EmptyValueType;
static EmptyValueType emptyValue() { return nullptr; }
- typedef JSC::PassWeak<T> PassInType;
- static void store(PassInType value, StorageType& storage) { storage = value; }
-
- typedef JSC::PassWeak<T> PassOutType;
- static PassOutType passOut(StorageType& value) { return value.release(); }
- static PassOutType passOut(EmptyValueType) { return PassOutType(); }
-
typedef T* PeekType;
static PeekType peek(const StorageType& value) { return value.get(); }
static PeekType peek(EmptyValueType) { return PeekType(); }
diff --git a/Source/JavaScriptCore/heap/WeakReferenceHarvester.h b/Source/JavaScriptCore/heap/WeakReferenceHarvester.h
index 90b4deed0..59bd62f74 100644
--- a/Source/JavaScriptCore/heap/WeakReferenceHarvester.h
+++ b/Source/JavaScriptCore/heap/WeakReferenceHarvester.h
@@ -24,8 +24,6 @@
namespace JSC {
-class MarkStack;
-class MarkStackSharedData;
class SlotVisitor;
class WeakReferenceHarvester : public ListableHandler<WeakReferenceHarvester> {
diff --git a/Source/JavaScriptCore/heap/WeakSet.cpp b/Source/JavaScriptCore/heap/WeakSet.cpp
index e62e66eae..8624993ca 100644
--- a/Source/JavaScriptCore/heap/WeakSet.cpp
+++ b/Source/JavaScriptCore/heap/WeakSet.cpp
@@ -27,24 +27,37 @@
#include "WeakSet.h"
#include "Heap.h"
+#include "JSCInlines.h"
#include "VM.h"
namespace JSC {
WeakSet::~WeakSet()
{
+ Heap& heap = *this->heap();
WeakBlock* next = 0;
for (WeakBlock* block = m_blocks.head(); block; block = next) {
next = block->next();
- heap()->blockAllocator().deallocate(WeakBlock::destroy(block));
+ WeakBlock::destroy(heap, block);
}
m_blocks.clear();
}
void WeakSet::sweep()
{
- for (WeakBlock* block = m_blocks.head(); block; block = block->next())
+ for (WeakBlock* block = m_blocks.head(); block;) {
+ WeakBlock* nextBlock = block->next();
block->sweep();
+ if (block->isLogicallyEmptyButNotFree()) {
+ // If this WeakBlock is logically empty, but still has Weaks pointing into it,
+ // we can't destroy it just yet. Detach it from the WeakSet and hand ownership
+ // to the Heap so we don't pin down the entire 64kB MarkedBlock.
+ m_blocks.remove(block);
+ heap()->addLogicallyEmptyWeakBlock(block);
+ block->disconnectMarkedBlock();
+ }
+ block = nextBlock;
+ }
resetAllocator();
}
@@ -73,7 +86,7 @@ WeakBlock::FreeCell* WeakSet::tryFindAllocator()
WeakBlock::FreeCell* WeakSet::addAllocator()
{
- WeakBlock* block = WeakBlock::create(heap()->blockAllocator().allocate<WeakBlock>());
+ WeakBlock* block = WeakBlock::create(*heap(), m_markedBlock);
heap()->didAllocate(WeakBlock::blockSize);
m_blocks.append(block);
WeakBlock::SweepResult sweepResult = block->takeSweepResult();
@@ -84,7 +97,7 @@ WeakBlock::FreeCell* WeakSet::addAllocator()
void WeakSet::removeAllocator(WeakBlock* block)
{
m_blocks.remove(block);
- heap()->blockAllocator().deallocate(WeakBlock::destroy(block));
+ WeakBlock::destroy(*heap(), block);
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/WeakSet.h b/Source/JavaScriptCore/heap/WeakSet.h
index 580cbe7a9..dbde5108b 100644
--- a/Source/JavaScriptCore/heap/WeakSet.h
+++ b/Source/JavaScriptCore/heap/WeakSet.h
@@ -31,14 +31,17 @@
namespace JSC {
class Heap;
+class MarkedBlock;
class WeakImpl;
class WeakSet {
+ friend class LLIntOffsetsExtractor;
+
public:
static WeakImpl* allocate(JSValue, WeakHandleOwner* = 0, void* context = 0);
static void deallocate(WeakImpl*);
- WeakSet(VM*);
+ WeakSet(VM*, MarkedBlock&);
~WeakSet();
void lastChanceToFinalize();
@@ -63,12 +66,14 @@ private:
WeakBlock* m_nextAllocator;
DoublyLinkedList<WeakBlock> m_blocks;
VM* m_vm;
+ MarkedBlock& m_markedBlock;
};
-inline WeakSet::WeakSet(VM* vm)
+inline WeakSet::WeakSet(VM* vm, MarkedBlock& markedBlock)
: m_allocator(0)
, m_nextAllocator(0)
, m_vm(vm)
+ , m_markedBlock(markedBlock)
{
}
diff --git a/Source/JavaScriptCore/heap/SuperRegion.cpp b/Source/JavaScriptCore/heap/WriteBarrierBuffer.cpp
index d58f600b5..10b7430ec 100644
--- a/Source/JavaScriptCore/heap/SuperRegion.cpp
+++ b/Source/JavaScriptCore/heap/WriteBarrierBuffer.cpp
@@ -24,59 +24,46 @@
*/
#include "config.h"
-#include "SuperRegion.h"
+#include "WriteBarrierBuffer.h"
-#include "Region.h"
+#include "GCAssertions.h"
+#include "Heap.h"
+#include "JSCell.h"
+#include "JSCInlines.h"
+#include "Structure.h"
namespace JSC {
-const uint64_t SuperRegion::s_fixedHeapMemoryPoolSize = 4 * 1024 * static_cast<uint64_t>(MB);
-
-SuperRegion::SuperRegion()
- : MetaAllocator(Region::s_regionSize, Region::s_regionSize)
- , m_reservationBase(0)
-{
-#if ENABLE(SUPER_REGION)
- // Over-allocate so that we can make sure that we're aligned to the size of Regions.
- m_reservation = PageReservation::reserve(s_fixedHeapMemoryPoolSize + Region::s_regionSize, OSAllocator::JSGCHeapPages);
- m_reservationBase = getAlignedBase(m_reservation);
- addFreshFreeSpace(m_reservationBase, s_fixedHeapMemoryPoolSize);
-#else
- UNUSED_PARAM(m_reservation);
- UNUSED_PARAM(m_reservationBase);
-#endif
-}
-
-SuperRegion::~SuperRegion()
+WriteBarrierBuffer::WriteBarrierBuffer(unsigned capacity)
+ : m_currentIndex(0)
+ , m_capacity(capacity)
+ , m_buffer(static_cast<JSCell**>(fastMalloc(sizeof(JSCell*) * capacity)))
{
-#if ENABLE(SUPER_REGION)
- m_reservation.deallocate();
-#endif
}
-void* SuperRegion::getAlignedBase(PageReservation& reservation)
+WriteBarrierBuffer::~WriteBarrierBuffer()
{
- for (char* current = static_cast<char*>(reservation.base()); current < static_cast<char*>(reservation.base()) + Region::s_regionSize; current += pageSize()) {
- if (!(reinterpret_cast<size_t>(current) & ~Region::s_regionMask))
- return current;
- }
- ASSERT_NOT_REACHED();
- return 0;
+ fastFree(m_buffer);
}
-void* SuperRegion::allocateNewSpace(size_t&)
+void WriteBarrierBuffer::flush(Heap& heap)
{
- return 0;
+ ASSERT(m_currentIndex <= m_capacity);
+ for (size_t i = 0; i < m_currentIndex; ++i)
+ heap.writeBarrier(m_buffer[i]);
+ m_currentIndex = 0;
}
-void SuperRegion::notifyNeedPage(void* page)
+void WriteBarrierBuffer::reset()
{
- m_reservation.commit(page, Region::s_regionSize);
+ m_currentIndex = 0;
}
-void SuperRegion::notifyPageIsFree(void* page)
+void WriteBarrierBuffer::add(JSCell* cell)
{
- m_reservation.decommit(page, Region::s_regionSize);
+ ASSERT_GC_OBJECT_LOOKS_VALID(cell);
+ ASSERT(m_currentIndex < m_capacity);
+ m_buffer[m_currentIndex++] = cell;
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/GCThread.h b/Source/JavaScriptCore/heap/WriteBarrierBuffer.h
index 0d218f975..7359083cd 100644
--- a/Source/JavaScriptCore/heap/GCThread.h
+++ b/Source/JavaScriptCore/heap/WriteBarrierBuffer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,41 +23,46 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef GCThread_h
-#define GCThread_h
+#ifndef WriteBarrierBuffer_h
+#define WriteBarrierBuffer_h
-#include <GCThreadSharedData.h>
-#include <wtf/Deque.h>
-#include <wtf/OwnPtr.h>
-#include <wtf/Threading.h>
+#include <wtf/FastMalloc.h>
namespace JSC {
-class CopyVisitor;
-class GCThreadSharedData;
-class SlotVisitor;
+class Heap;
+class JSCell;
-class GCThread {
+class WriteBarrierBuffer {
public:
- GCThread(GCThreadSharedData&, SlotVisitor*, CopyVisitor*);
+ WriteBarrierBuffer(unsigned capacity);
+ ~WriteBarrierBuffer();
- SlotVisitor* slotVisitor();
- CopyVisitor* copyVisitor();
- ThreadIdentifier threadID();
- void initializeThreadID(ThreadIdentifier);
+ void add(JSCell*);
+ void flush(Heap&);
+ void reset();
- static void gcThreadStartFunc(void*);
+ unsigned* currentIndexAddress()
+ {
+ return &m_currentIndex;
+ }
-private:
- void gcThreadMain();
- GCPhase waitForNextPhase();
+ unsigned capacity() const
+ {
+ return m_capacity;
+ }
+
+ JSCell** buffer()
+ {
+ return m_buffer;
+ }
- ThreadIdentifier m_threadID;
- GCThreadSharedData& m_shared;
- OwnPtr<SlotVisitor> m_slotVisitor;
- OwnPtr<CopyVisitor> m_copyVisitor;
+private:
+ unsigned m_currentIndex;
+ const unsigned m_capacity;
+ JSCell** const m_buffer;
};
} // namespace JSC
-#endif
+#endif // WriteBarrierBuffer_h
diff --git a/Source/JavaScriptCore/heap/WriteBarrierSupport.cpp b/Source/JavaScriptCore/heap/WriteBarrierSupport.cpp
index 5ca33c861..984f0044b 100644
--- a/Source/JavaScriptCore/heap/WriteBarrierSupport.cpp
+++ b/Source/JavaScriptCore/heap/WriteBarrierSupport.cpp
@@ -26,6 +26,8 @@
#include "config.h"
#include "WriteBarrierSupport.h"
+#include "JSCInlines.h"
+
namespace JSC {
#if ENABLE(WRITE_BARRIER_PROFILING)