summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/heap/MarkStack.cpp
diff options
context:
space:
mode:
authorSimon Hausmann <simon.hausmann@nokia.com>2012-06-20 13:01:08 +0200
committerSimon Hausmann <simon.hausmann@nokia.com>2012-06-20 13:01:08 +0200
commit49233e234e5c787396cadb2cea33b31ae0cd65c1 (patch)
tree5410cb9a8fd53168bb60d62c54b654d86f03c38d /Source/JavaScriptCore/heap/MarkStack.cpp
parentb211c645d8ab690f713515dfdc84d80b11c27d2c (diff)
downloadqtwebkit-49233e234e5c787396cadb2cea33b31ae0cd65c1.tar.gz
Imported WebKit commit 3a8c29f35d00659d2ce7a0ccdfa8304f14e82327 (http://svn.webkit.org/repository/webkit/trunk@120813)
New snapshot with Windows build fixes
Diffstat (limited to 'Source/JavaScriptCore/heap/MarkStack.cpp')
-rw-r--r--Source/JavaScriptCore/heap/MarkStack.cpp142
1 files changed, 87 insertions, 55 deletions
diff --git a/Source/JavaScriptCore/heap/MarkStack.cpp b/Source/JavaScriptCore/heap/MarkStack.cpp
index 678f1cb45..3eb02c4e8 100644
--- a/Source/JavaScriptCore/heap/MarkStack.cpp
+++ b/Source/JavaScriptCore/heap/MarkStack.cpp
@@ -36,6 +36,7 @@
#include "JSObject.h"
#include "ScopeChain.h"
#include "Structure.h"
+#include "UString.h"
#include "WriteBarrier.h"
#include <wtf/DataLog.h>
#include <wtf/MainThread.h>
@@ -45,6 +46,7 @@ namespace JSC {
MarkStackSegmentAllocator::MarkStackSegmentAllocator()
: m_nextFreeSegment(0)
{
+ m_lock.Init();
}
MarkStackSegmentAllocator::~MarkStackSegmentAllocator()
@@ -55,7 +57,7 @@ MarkStackSegmentAllocator::~MarkStackSegmentAllocator()
MarkStackSegment* MarkStackSegmentAllocator::allocate()
{
{
- MutexLocker locker(m_lock);
+ SpinLockHolder locker(&m_lock);
if (m_nextFreeSegment) {
MarkStackSegment* result = m_nextFreeSegment;
m_nextFreeSegment = result->m_previous;
@@ -68,7 +70,7 @@ MarkStackSegment* MarkStackSegmentAllocator::allocate()
void MarkStackSegmentAllocator::release(MarkStackSegment* segment)
{
- MutexLocker locker(m_lock);
+ SpinLockHolder locker(&m_lock);
segment->m_previous = m_nextFreeSegment;
m_nextFreeSegment = segment;
}
@@ -77,7 +79,7 @@ void MarkStackSegmentAllocator::shrinkReserve()
{
MarkStackSegment* segments;
{
- MutexLocker locker(m_lock);
+ SpinLockHolder locker(&m_lock);
segments = m_nextFreeSegment;
m_nextFreeSegment = 0;
}
@@ -141,23 +143,31 @@ bool MarkStackArray::refill()
return true;
}
-bool MarkStackArray::donateSomeCellsTo(MarkStackArray& other)
+void MarkStackArray::donateSomeCellsTo(MarkStackArray& other)
{
+ // Try to donate about 1 / 2 of our cells. To reduce copying costs,
+ // we prefer donating whole segments over donating individual cells,
+ // even if this skews away from our 1 / 2 target.
+
ASSERT(m_segmentCapacity == other.m_segmentCapacity);
+
+ size_t segmentsToDonate = (m_numberOfPreviousSegments + 2 - 1) / 2; // Round up to donate 1 / 1 previous segments.
+
+ if (!segmentsToDonate) {
+ size_t cellsToDonate = m_top / 2; // Round down to donate 0 / 1 cells.
+ while (cellsToDonate--) {
+ ASSERT(m_top);
+ other.append(removeLast());
+ }
+ return;
+ }
+
validatePrevious();
other.validatePrevious();
-
- // Fast check: see if the other mark stack already has enough segments.
- if (other.m_numberOfPreviousSegments + 1 >= Options::maximumNumberOfSharedSegments)
- return false;
-
- size_t numberOfCellsToKeep = Options::minimumNumberOfCellsToKeep;
- ASSERT(m_top > numberOfCellsToKeep || m_topSegment->m_previous);
-
- // Looks like we should donate! Give the other mark stack all of our
- // previous segments, and then top it off.
+
MarkStackSegment* previous = m_topSegment->m_previous;
- while (previous) {
+ while (segmentsToDonate--) {
+ ASSERT(previous);
ASSERT(m_numberOfPreviousSegments);
MarkStackSegment* current = previous;
@@ -169,23 +179,18 @@ bool MarkStackArray::donateSomeCellsTo(MarkStackArray& other)
m_numberOfPreviousSegments--;
other.m_numberOfPreviousSegments++;
}
- ASSERT(!m_numberOfPreviousSegments);
- m_topSegment->m_previous = 0;
+ m_topSegment->m_previous = previous;
+
validatePrevious();
other.validatePrevious();
-
- // Now top off. We want to keep at a minimum numberOfCellsToKeep, but if
- // we really have a lot of work, we give up half.
- if (m_top > numberOfCellsToKeep * 2)
- numberOfCellsToKeep = m_top / 2;
- while (m_top > numberOfCellsToKeep)
- other.append(removeLast());
-
- return true;
}
-void MarkStackArray::stealSomeCellsFrom(MarkStackArray& other)
+void MarkStackArray::stealSomeCellsFrom(MarkStackArray& other, size_t idleThreadCount)
{
+ // Try to steal 1 / Nth of the shared array, where N is the number of idle threads.
+ // To reduce copying costs, we prefer stealing a whole segment over stealing
+ // individual cells, even if this skews away from our 1 / N target.
+
ASSERT(m_segmentCapacity == other.m_segmentCapacity);
validatePrevious();
other.validatePrevious();
@@ -210,28 +215,42 @@ void MarkStackArray::stealSomeCellsFrom(MarkStackArray& other)
other.validatePrevious();
return;
}
-
- // Otherwise drain 1/Nth of the shared array where N is the number of
- // workers, or Options::minimumNumberOfCellsToKeep, whichever is bigger.
- size_t numberOfCellsToSteal = std::max((size_t)Options::minimumNumberOfCellsToKeep, other.size() / Options::numberOfGCMarkers);
+
+ size_t numberOfCellsToSteal = (other.size() + idleThreadCount - 1) / idleThreadCount; // Round up to steal 1 / 1.
while (numberOfCellsToSteal-- > 0 && other.canRemoveLast())
append(other.removeLast());
}
#if ENABLE(PARALLEL_GC)
-void MarkStackThreadSharedData::markingThreadMain()
+void MarkStackThreadSharedData::resetChildren()
+{
+ for (unsigned i = 0; i < m_markingThreadsMarkStack.size(); ++i)
+ m_markingThreadsMarkStack[i]->reset();
+}
+
+size_t MarkStackThreadSharedData::childVisitCount()
+{
+ unsigned long result = 0;
+ for (unsigned i = 0; i < m_markingThreadsMarkStack.size(); ++i)
+ result += m_markingThreadsMarkStack[i]->visitCount();
+ return result;
+}
+
+void MarkStackThreadSharedData::markingThreadMain(SlotVisitor* slotVisitor)
{
WTF::registerGCThread();
{
- SlotVisitor slotVisitor(*this);
- ParallelModeEnabler enabler(slotVisitor);
- slotVisitor.drainFromShared(SlotVisitor::SlaveDrain);
+ ParallelModeEnabler enabler(*slotVisitor);
+ slotVisitor->drainFromShared(SlotVisitor::SlaveDrain);
}
+ delete slotVisitor;
}
-void MarkStackThreadSharedData::markingThreadStartFunc(void* shared)
-{
- static_cast<MarkStackThreadSharedData*>(shared)->markingThreadMain();
+void MarkStackThreadSharedData::markingThreadStartFunc(void* myVisitor)
+{
+ SlotVisitor* slotVisitor = static_cast<SlotVisitor*>(myVisitor);
+
+ slotVisitor->sharedData().markingThreadMain(slotVisitor);
}
#endif
@@ -244,7 +263,9 @@ MarkStackThreadSharedData::MarkStackThreadSharedData(JSGlobalData* globalData)
{
#if ENABLE(PARALLEL_GC)
for (unsigned i = 1; i < Options::numberOfGCMarkers; ++i) {
- m_markingThreads.append(createThread(markingThreadStartFunc, this, "JavaScriptCore::Marking"));
+ SlotVisitor* slotVisitor = new SlotVisitor(*this);
+ m_markingThreadsMarkStack.append(slotVisitor);
+ m_markingThreads.append(createThread(markingThreadStartFunc, slotVisitor, "JavaScriptCore::Marking"));
ASSERT(m_markingThreads.last());
}
#endif
@@ -276,7 +297,6 @@ void MarkStackThreadSharedData::reset()
#else
ASSERT(m_opaqueRoots.isEmpty());
#endif
-
m_weakReferenceHarvesters.removeAll();
}
@@ -325,18 +345,31 @@ ALWAYS_INLINE static void visitChildren(SlotVisitor& visitor, const JSCell* cell
cell->methodTable()->visitChildren(const_cast<JSCell*>(cell), visitor);
}
-void SlotVisitor::donateSlow()
+void SlotVisitor::donateKnownParallel()
{
- // Refuse to donate if shared has more entries than I do.
- if (m_shared.m_sharedMarkStack.size() > m_stack.size())
+ // NOTE: Because we re-try often, we can afford to be conservative, and
+ // assume that donating is not profitable.
+
+ // Avoid locking when a thread reaches a dead end in the object graph.
+ if (m_stack.size() < 2)
return;
- MutexLocker locker(m_shared.m_markingLock);
- if (m_stack.donateSomeCellsTo(m_shared.m_sharedMarkStack)) {
- // Only wake up threads if the shared stack is big enough; otherwise assume that
- // it's more profitable for us to just scan this ourselves later.
- if (m_shared.m_sharedMarkStack.size() >= Options::sharedStackWakeupThreshold)
- m_shared.m_markingCondition.broadcast();
- }
+
+ // If there's already some shared work queued up, be conservative and assume
+ // that donating more is not profitable.
+ if (m_shared.m_sharedMarkStack.size())
+ return;
+
+ // If we're contending on the lock, be conservative and assume that another
+ // thread is already donating.
+ MutexTryLocker locker(m_shared.m_markingLock);
+ if (!locker.locked())
+ return;
+
+ // Otherwise, assume that a thread will go idle soon, and donate.
+ m_stack.donateSomeCellsTo(m_shared.m_sharedMarkStack);
+
+ if (m_shared.m_numberOfActiveParallelMarkers < Options::numberOfGCMarkers)
+ m_shared.m_markingCondition.broadcast();
}
void SlotVisitor::drain()
@@ -436,7 +469,8 @@ void SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode)
}
}
- m_stack.stealSomeCellsFrom(m_shared.m_sharedMarkStack);
+ size_t idleThreadCount = Options::numberOfGCMarkers - m_shared.m_numberOfActiveParallelMarkers;
+ m_stack.stealSomeCellsFrom(m_shared.m_sharedMarkStack, idleThreadCount);
m_shared.m_numberOfActiveParallelMarkers++;
}
@@ -461,8 +495,7 @@ void MarkStack::mergeOpaqueRoots()
void SlotVisitor::startCopying()
{
ASSERT(!m_copyBlock);
- if (!m_shared.m_copiedSpace->borrowBlock(&m_copyBlock))
- CRASH();
+ m_copyBlock = m_shared.m_copiedSpace->allocateBlockForCopyingPhase();
}
void* SlotVisitor::allocateNewSpace(void* ptr, size_t bytes)
@@ -483,8 +516,7 @@ void* SlotVisitor::allocateNewSpace(void* ptr, size_t bytes)
// We don't need to lock across these two calls because the master thread won't
// call doneCopying() because this thread is considered active.
m_shared.m_copiedSpace->doneFillingBlock(m_copyBlock);
- if (!m_shared.m_copiedSpace->borrowBlock(&m_copyBlock))
- CRASH();
+ m_copyBlock = m_shared.m_copiedSpace->allocateBlockForCopyingPhase();
}
return CopiedSpace::allocateFromBlock(m_copyBlock, bytes);
}