summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/interpreter/JSStack.cpp
diff options
context:
space:
mode:
authorKonstantin Tokarev <annulen@yandex.ru>2016-08-25 19:20:41 +0300
committerKonstantin Tokarev <annulen@yandex.ru>2017-02-02 12:30:55 +0000
commit6882a04fb36642862b11efe514251d32070c3d65 (patch)
treeb7959826000b061fd5ccc7512035c7478742f7b0 /Source/JavaScriptCore/interpreter/JSStack.cpp
parentab6df191029eeeb0b0f16f127d553265659f739e (diff)
downloadqtwebkit-6882a04fb36642862b11efe514251d32070c3d65.tar.gz
Imported QtWebKit TP3 (git b57bc6801f1876c3220d5a4bfea33d620d477443)
Change-Id: I3b1d8a2808782c9f34d50240000e20cb38d3680f Reviewed-by: Konstantin Tokarev <annulen@yandex.ru>
Diffstat (limited to 'Source/JavaScriptCore/interpreter/JSStack.cpp')
-rw-r--r--Source/JavaScriptCore/interpreter/JSStack.cpp149
1 files changed, 91 insertions, 58 deletions
diff --git a/Source/JavaScriptCore/interpreter/JSStack.cpp b/Source/JavaScriptCore/interpreter/JSStack.cpp
index ec2962a92..6e441dfd5 100644
--- a/Source/JavaScriptCore/interpreter/JSStack.cpp
+++ b/Source/JavaScriptCore/interpreter/JSStack.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2013, 2014, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -27,125 +27,158 @@
*/
#include "config.h"
-#include "JSStack.h"
#include "JSStackInlines.h"
#include "ConservativeRoots.h"
#include "Interpreter.h"
+#include "JSCInlines.h"
+#include "Options.h"
+#include <wtf/Lock.h>
namespace JSC {
+#if !ENABLE(JIT)
static size_t committedBytesCount = 0;
-static Mutex& stackStatisticsMutex()
+static size_t commitSize()
{
- DEFINE_STATIC_LOCAL(Mutex, staticMutex, ());
- return staticMutex;
-}
+ static size_t size = std::max<size_t>(16 * 1024, pageSize());
+ return size;
+}
+
+static StaticLock stackStatisticsMutex;
+#endif // !ENABLE(JIT)
-JSStack::JSStack(VM& vm, size_t capacity)
- : m_end(0)
+JSStack::JSStack(VM& vm)
+ : m_vm(vm)
, m_topCallFrame(vm.topCallFrame)
+#if !ENABLE(JIT)
+ , m_end(0)
+ , m_reservedZoneSizeInRegisters(0)
+#endif
{
+#if !ENABLE(JIT)
+ size_t capacity = Options::maxPerThreadStackUsage();
ASSERT(capacity && isPageAligned(capacity));
- m_reservation = PageReservation::reserve(roundUpAllocationSize(capacity * sizeof(Register), commitSize), OSAllocator::JSVMStackPages);
- m_end = static_cast<Register*>(m_reservation.base());
- m_commitEnd = static_cast<Register*>(m_reservation.base());
-
- disableErrorStackReserve();
+ m_reservation = PageReservation::reserve(WTF::roundUpToMultipleOf(commitSize(), capacity), OSAllocator::JSVMStackPages);
+ setStackLimit(highAddress());
+ m_commitTop = highAddress();
+
+ m_lastStackTop = baseOfStack();
+#endif // !ENABLE(JIT)
m_topCallFrame = 0;
}
+#if !ENABLE(JIT)
JSStack::~JSStack()
{
- void* base = m_reservation.base();
- m_reservation.decommit(base, reinterpret_cast<intptr_t>(m_commitEnd) - reinterpret_cast<intptr_t>(base));
- addToCommittedByteCount(-(reinterpret_cast<intptr_t>(m_commitEnd) - reinterpret_cast<intptr_t>(base)));
+ ptrdiff_t sizeToDecommit = reinterpret_cast<char*>(highAddress()) - reinterpret_cast<char*>(m_commitTop);
+ m_reservation.decommit(reinterpret_cast<void*>(m_commitTop), sizeToDecommit);
+ addToCommittedByteCount(-sizeToDecommit);
m_reservation.deallocate();
}
-bool JSStack::growSlowCase(Register* newEnd)
+bool JSStack::growSlowCase(Register* newTopOfStack)
{
+ Register* newTopOfStackWithReservedZone = newTopOfStack - m_reservedZoneSizeInRegisters;
+
// If we have already committed enough memory to satisfy this request,
// just update the end pointer and return.
- if (newEnd <= m_commitEnd) {
- m_end = newEnd;
+ if (newTopOfStackWithReservedZone >= m_commitTop) {
+ setStackLimit(newTopOfStack);
return true;
}
// Compute the chunk size of additional memory to commit, and see if we
// have it is still within our budget. If not, we'll fail to grow and
// return false.
- long delta = roundUpAllocationSize(reinterpret_cast<char*>(newEnd) - reinterpret_cast<char*>(m_commitEnd), commitSize);
- if (reinterpret_cast<char*>(m_commitEnd) + delta > reinterpret_cast<char*>(m_useableEnd))
+ ptrdiff_t delta = reinterpret_cast<char*>(m_commitTop) - reinterpret_cast<char*>(newTopOfStackWithReservedZone);
+ delta = WTF::roundUpToMultipleOf(commitSize(), delta);
+ Register* newCommitTop = m_commitTop - (delta / sizeof(Register));
+ if (newCommitTop < reservationTop())
return false;
- // Otherwise, the growth is still within our budget. Go ahead and commit
- // it and return true.
- m_reservation.commit(m_commitEnd, delta);
+ // Otherwise, the growth is still within our budget. Commit it and return true.
+ m_reservation.commit(newCommitTop, delta);
addToCommittedByteCount(delta);
- m_commitEnd = reinterpret_cast_ptr<Register*>(reinterpret_cast<char*>(m_commitEnd) + delta);
- m_end = newEnd;
+ m_commitTop = newCommitTop;
+ setStackLimit(newTopOfStack);
return true;
}
-void JSStack::gatherConservativeRoots(ConservativeRoots& conservativeRoots)
+void JSStack::gatherConservativeRoots(ConservativeRoots& conservativeRoots, JITStubRoutineSet& jitStubRoutines, CodeBlockSet& codeBlocks)
{
- conservativeRoots.add(begin(), getTopOfStack());
+ conservativeRoots.add(topOfStack() + 1, highAddress(), jitStubRoutines, codeBlocks);
}
-void JSStack::gatherConservativeRoots(ConservativeRoots& conservativeRoots, JITStubRoutineSet& jitStubRoutines, DFGCodeBlocks& dfgCodeBlocks)
+void JSStack::sanitizeStack()
{
- conservativeRoots.add(begin(), getTopOfStack(), jitStubRoutines, dfgCodeBlocks);
+#if !ASAN_ENABLED
+ ASSERT(topOfStack() <= baseOfStack());
+
+ if (m_lastStackTop < topOfStack()) {
+ char* begin = reinterpret_cast<char*>(m_lastStackTop + 1);
+ char* end = reinterpret_cast<char*>(topOfStack() + 1);
+ memset(begin, 0, end - begin);
+ }
+
+ m_lastStackTop = topOfStack();
+#endif
}
void JSStack::releaseExcessCapacity()
{
- ptrdiff_t delta = reinterpret_cast<uintptr_t>(m_commitEnd) - reinterpret_cast<uintptr_t>(m_reservation.base());
- m_reservation.decommit(m_reservation.base(), delta);
+ Register* highAddressWithReservedZone = highAddress() - m_reservedZoneSizeInRegisters;
+ ptrdiff_t delta = reinterpret_cast<char*>(highAddressWithReservedZone) - reinterpret_cast<char*>(m_commitTop);
+ m_reservation.decommit(m_commitTop, delta);
addToCommittedByteCount(-delta);
- m_commitEnd = static_cast<Register*>(m_reservation.base());
+ m_commitTop = highAddressWithReservedZone;
}
-void JSStack::initializeThreading()
+void JSStack::addToCommittedByteCount(long byteCount)
{
- stackStatisticsMutex();
+ LockHolder locker(stackStatisticsMutex);
+ ASSERT(static_cast<long>(committedBytesCount) + byteCount > -1);
+ committedBytesCount += byteCount;
}
-size_t JSStack::committedByteCount()
+void JSStack::setReservedZoneSize(size_t reservedZoneSize)
{
- MutexLocker locker(stackStatisticsMutex());
- return committedBytesCount;
+ m_reservedZoneSizeInRegisters = reservedZoneSize / sizeof(Register);
+ if (m_commitTop >= (m_end + 1) - m_reservedZoneSizeInRegisters)
+ growSlowCase(m_end + 1);
}
+#endif // !ENABLE(JIT)
-void JSStack::addToCommittedByteCount(long byteCount)
+#if ENABLE(JIT)
+Register* JSStack::lowAddress() const
{
- MutexLocker locker(stackStatisticsMutex());
- ASSERT(static_cast<long>(committedBytesCount) + byteCount > -1);
- committedBytesCount += byteCount;
+ ASSERT(wtfThreadData().stack().isGrowingDownward());
+ return reinterpret_cast<Register*>(m_vm.stackLimit());
}
-void JSStack::enableErrorStackReserve()
+Register* JSStack::highAddress() const
{
- m_useableEnd = reservationEnd();
+ ASSERT(wtfThreadData().stack().isGrowingDownward());
+ return reinterpret_cast<Register*>(wtfThreadData().stack().origin());
}
+#endif // ENABLE(JIT)
-void JSStack::disableErrorStackReserve()
+size_t JSStack::committedByteCount()
{
- char* useableEnd = reinterpret_cast<char*>(reservationEnd()) - commitSize;
- m_useableEnd = reinterpret_cast_ptr<Register*>(useableEnd);
-
- // By the time we get here, we are guaranteed to be destructing the last
- // Interpreter::ErrorHandlingMode that enabled this reserve in the first
- // place. That means the stack space beyond m_useableEnd before we
- // enabled the reserve was not previously in use. Hence, it is safe to
- // shrink back to that m_useableEnd.
- if (m_end > m_useableEnd) {
- ASSERT(m_topCallFrame->frameExtent() <= m_useableEnd);
- shrink(m_useableEnd);
- }
+#if !ENABLE(JIT)
+ LockHolder locker(stackStatisticsMutex);
+ return committedBytesCount;
+#else
+ // When using the C stack, we don't know how many stack pages are actually
+ // committed. So, we use the current stack usage as an estimate.
+ ASSERT(wtfThreadData().stack().isGrowingDownward());
+ int8_t* current = reinterpret_cast<int8_t*>(&current);
+ int8_t* high = reinterpret_cast<int8_t*>(wtfThreadData().stack().origin());
+ return high - current;
+#endif
}
} // namespace JSC