summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/heap/Heap.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'Source/JavaScriptCore/heap/Heap.cpp')
-rw-r--r--Source/JavaScriptCore/heap/Heap.cpp111
1 files changed, 58 insertions, 53 deletions
diff --git a/Source/JavaScriptCore/heap/Heap.cpp b/Source/JavaScriptCore/heap/Heap.cpp
index 2254b5b01..9b68fb10a 100644
--- a/Source/JavaScriptCore/heap/Heap.cpp
+++ b/Source/JavaScriptCore/heap/Heap.cpp
@@ -35,9 +35,9 @@
#include "Tracing.h"
#include "WeakSetInlines.h"
#include <algorithm>
+#include <wtf/RAMSize.h>
#include <wtf/CurrentTime.h>
-
using namespace std;
using namespace JSC;
@@ -45,14 +45,8 @@ namespace JSC {
namespace {
-#if CPU(X86) || CPU(X86_64)
-static const size_t largeHeapSize = 16 * 1024 * 1024;
-#elif PLATFORM(IOS)
-static const size_t largeHeapSize = 8 * 1024 * 1024;
-#else
-static const size_t largeHeapSize = 512 * 1024;
-#endif
-static const size_t smallHeapSize = 512 * 1024;
+static const size_t largeHeapSize = 32 * MB; // About 1.5X the average webpage.
+static const size_t smallHeapSize = 1 * MB; // Matches the FastMalloc per-thread cache.
#if ENABLE(GC_LOGGING)
#if COMPILER(CLANG)
@@ -148,14 +142,23 @@ struct GCCounter {
#define GCCOUNTER(name, value) do { } while (false)
#endif
-static size_t heapSizeForHint(HeapSize heapSize)
+static inline size_t minHeapSize(HeapType heapType, size_t ramSize)
{
- if (heapSize == LargeHeap)
- return largeHeapSize;
- ASSERT(heapSize == SmallHeap);
+ if (heapType == LargeHeap)
+ return min(largeHeapSize, ramSize / 4);
return smallHeapSize;
}
+static inline size_t proportionalHeapSize(size_t heapSize, size_t ramSize)
+{
+ // Try to stay under 1/2 RAM size to leave room for the DOM, rendering, networking, etc.
+ if (heapSize < ramSize / 4)
+ return 2 * heapSize;
+ if (heapSize < ramSize / 2)
+ return 1.5 * heapSize;
+ return 1.25 * heapSize;
+}
+
static inline bool isValidSharedInstanceThreadState()
{
if (!JSLock::lockCount())
@@ -230,9 +233,10 @@ inline PassOwnPtr<TypeCountSet> RecordType::returnValue()
} // anonymous namespace
-Heap::Heap(JSGlobalData* globalData, HeapSize heapSize)
- : m_heapSize(heapSize)
- , m_minBytesPerCycle(heapSizeForHint(heapSize))
+Heap::Heap(JSGlobalData* globalData, HeapType heapType)
+ : m_heapType(heapType)
+ , m_ramSize(ramSize())
+ , m_minBytesPerCycle(minHeapSize(m_heapType, m_ramSize))
, m_sizeAfterLastCollect(0)
, m_bytesAllocatedLimit(m_minBytesPerCycle)
, m_bytesAllocated(0)
@@ -240,12 +244,10 @@ Heap::Heap(JSGlobalData* globalData, HeapSize heapSize)
, m_operationInProgress(NoOperation)
, m_objectSpace(this)
, m_storageSpace(this)
- , m_markListSet(0)
, m_activityCallback(DefaultGCActivityCallback::create(this))
, m_machineThreads(this)
, m_sharedData(globalData)
, m_slotVisitor(m_sharedData)
- , m_weakSet(this)
, m_handleSet(globalData)
, m_isSafeToCollect(false)
, m_globalData(globalData)
@@ -257,13 +259,6 @@ Heap::Heap(JSGlobalData* globalData, HeapSize heapSize)
Heap::~Heap()
{
- delete m_markListSet;
-
- m_objectSpace.shrink();
- m_storageSpace.freeAllBlocks();
-
- ASSERT(!size());
- ASSERT(!capacity());
}
bool Heap::isPagedOut(double deadline)
@@ -282,11 +277,7 @@ void Heap::lastChanceToFinalize()
if (size_t size = m_protectedValues.size())
WTFLogAlways("ERROR: JavaScriptCore heap deallocated while %ld values were still protected", static_cast<unsigned long>(size));
- m_weakSet.finalizeAll();
- m_objectSpace.canonicalizeCellLivenessData();
- m_objectSpace.clearMarks();
- m_objectSpace.sweep();
- m_globalData->smallStrings.finalizeSmallStrings();
+ m_objectSpace.lastChanceToFinalize();
#if ENABLE(SIMPLE_HEAP_PROFILING)
m_slotVisitor.m_visitedTypeCounts.dump(WTF::dataFile(), "Visited Type Counts");
@@ -451,6 +442,15 @@ void Heap::markRoots(bool fullGC)
GCPHASE(GatherRegisterFileRoots);
registerFile().gatherConservativeRoots(registerFileRoots, m_dfgCodeBlocks);
}
+
+#if ENABLE(DFG_JIT)
+ ConservativeRoots scratchBufferRoots(&m_objectSpace.blocks(), &m_storageSpace);
+ {
+ GCPHASE(GatherScratchBufferRoots);
+ m_globalData->gatherConservativeRoots(scratchBufferRoots);
+ }
+#endif
+
#if ENABLE(GGC)
MarkedBlock::DirtyCellVector dirtyCells;
if (!fullGC) {
@@ -497,6 +497,13 @@ void Heap::markRoots(bool fullGC)
visitor.append(registerFileRoots);
visitor.donateAndDrain();
}
+#if ENABLE(DFG_JIT)
+ {
+ GCPHASE(VisitScratchBufferRoots);
+ visitor.append(scratchBufferRoots);
+ visitor.donateAndDrain();
+ }
+#endif
{
GCPHASE(VisitProtectedObjects);
markProtectedObjects(heapRootVisitor);
@@ -552,7 +559,7 @@ void Heap::markRoots(bool fullGC)
{
GCPHASE(VisitingLiveWeakHandles);
while (true) {
- m_weakSet.visitLiveWeakImpls(heapRootVisitor);
+ m_objectSpace.visitWeakSets(heapRootVisitor);
harvestWeakReferences();
if (visitor.isEmpty())
break;
@@ -566,11 +573,6 @@ void Heap::markRoots(bool fullGC)
}
}
- {
- GCPHASE(VisitingDeadWeakHandles);
- m_weakSet.visitDeadWeakImpls(heapRootVisitor);
- }
-
GCCOUNTER(VisitedValueCount, visitor.visitCount());
visitor.doneCopying();
@@ -674,25 +676,24 @@ void Heap::collect(SweepToggle sweepToggle)
markRoots(fullGC);
{
+ GCPHASE(ReapingWeakHandles);
+ m_objectSpace.reapWeakSets();
+ }
+
+ {
GCPHASE(FinalizeUnconditionalFinalizers);
finalizeUnconditionalFinalizers();
}
-
+
{
GCPHASE(FinalizeWeakHandles);
- m_weakSet.sweep();
+ m_objectSpace.sweepWeakSets();
m_globalData->smallStrings.finalizeSmallStrings();
}
JAVASCRIPTCORE_GC_MARKED();
{
- GCPHASE(ResetAllocators);
- m_objectSpace.resetAllocators();
- m_weakSet.resetAllocator();
- }
-
- {
GCPHASE(DeleteCodeBlocks);
m_dfgCodeBlocks.deleteUnmarkedJettisonedCodeBlocks();
}
@@ -702,19 +703,23 @@ void Heap::collect(SweepToggle sweepToggle)
GCPHASE(Sweeping);
m_objectSpace.sweep();
m_objectSpace.shrink();
- m_weakSet.shrink();
m_bytesAbandoned = 0;
}
- // To avoid pathological GC churn in large heaps, we set the new allocation
- // limit to be the current size of the heap. This heuristic
- // is a bit arbitrary. Using the current size of the heap after this
- // collection gives us a 2X multiplier, which is a 1:1 (heap size :
- // new bytes allocated) proportion, and seems to work well in benchmarks.
- size_t newSize = size();
+ {
+ GCPHASE(ResetAllocators);
+ m_objectSpace.resetAllocators();
+ }
+
+ size_t currentHeapSize = size();
if (fullGC) {
- m_sizeAfterLastCollect = newSize;
- m_bytesAllocatedLimit = max(newSize, m_minBytesPerCycle);
+ m_sizeAfterLastCollect = currentHeapSize;
+
+ // To avoid pathological GC churn in very small and very large heaps, we set
+ // the new allocation limit based on the current size of the heap, with a
+ // fixed minimum.
+ size_t maxHeapSize = max(minHeapSize(m_heapType, m_ramSize), proportionalHeapSize(currentHeapSize, m_ramSize));
+ m_bytesAllocatedLimit = maxHeapSize - currentHeapSize;
}
m_bytesAllocated = 0;
double lastGCEndTime = WTF::currentTime();