summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/interpreter/JSStack.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'Source/JavaScriptCore/interpreter/JSStack.cpp')
-rw-r--r--Source/JavaScriptCore/interpreter/JSStack.cpp150
1 files changed, 63 insertions, 87 deletions
diff --git a/Source/JavaScriptCore/interpreter/JSStack.cpp b/Source/JavaScriptCore/interpreter/JSStack.cpp
index 6e441dfd5..8b73d3cc9 100644
--- a/Source/JavaScriptCore/interpreter/JSStack.cpp
+++ b/Source/JavaScriptCore/interpreter/JSStack.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2013, 2014, 2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -31,154 +31,130 @@
#include "ConservativeRoots.h"
#include "Interpreter.h"
-#include "JSCInlines.h"
-#include "Options.h"
-#include <wtf/Lock.h>
+#include <wtf/PageBlock.h>
namespace JSC {
-#if !ENABLE(JIT)
static size_t committedBytesCount = 0;
+static Mutex& stackStatisticsMutex()
+{
+ DEFINE_STATIC_LOCAL(Mutex, staticMutex, ());
+ return staticMutex;
+}
+
static size_t commitSize()
{
- static size_t size = std::max<size_t>(16 * 1024, pageSize());
+ static size_t size = 0;
+ if (!size)
+ size = std::max(16 * 1024, static_cast<int>(WTF::pageSize()));
return size;
}
-static StaticLock stackStatisticsMutex;
-#endif // !ENABLE(JIT)
-
-JSStack::JSStack(VM& vm)
+JSStack::JSStack(VM& vm, size_t capacity)
: m_vm(vm)
- , m_topCallFrame(vm.topCallFrame)
-#if !ENABLE(JIT)
, m_end(0)
- , m_reservedZoneSizeInRegisters(0)
-#endif
+ , m_topCallFrame(vm.topCallFrame)
{
-#if !ENABLE(JIT)
- size_t capacity = Options::maxPerThreadStackUsage();
ASSERT(capacity && isPageAligned(capacity));
- m_reservation = PageReservation::reserve(WTF::roundUpToMultipleOf(commitSize(), capacity), OSAllocator::JSVMStackPages);
- setStackLimit(highAddress());
- m_commitTop = highAddress();
-
- m_lastStackTop = baseOfStack();
-#endif // !ENABLE(JIT)
+ m_reservation = PageReservation::reserve(roundUpAllocationSize(capacity * sizeof(Register), commitSize()), OSAllocator::JSVMStackPages);
+ updateStackLimit(highAddress());
+ m_commitEnd = highAddress();
+
+ disableErrorStackReserve();
m_topCallFrame = 0;
}
-#if !ENABLE(JIT)
JSStack::~JSStack()
{
- ptrdiff_t sizeToDecommit = reinterpret_cast<char*>(highAddress()) - reinterpret_cast<char*>(m_commitTop);
- m_reservation.decommit(reinterpret_cast<void*>(m_commitTop), sizeToDecommit);
- addToCommittedByteCount(-sizeToDecommit);
+ void* highAddress = reinterpret_cast<void*>(static_cast<char*>(m_reservation.base()) + m_reservation.size());
+ m_reservation.decommit(reinterpret_cast<void*>(m_commitEnd), reinterpret_cast<intptr_t>(highAddress) - reinterpret_cast<intptr_t>(m_commitEnd));
+ addToCommittedByteCount(-(reinterpret_cast<intptr_t>(highAddress) - reinterpret_cast<intptr_t>(m_commitEnd)));
m_reservation.deallocate();
}
-bool JSStack::growSlowCase(Register* newTopOfStack)
+bool JSStack::growSlowCase(Register* newEnd)
{
- Register* newTopOfStackWithReservedZone = newTopOfStack - m_reservedZoneSizeInRegisters;
-
// If we have already committed enough memory to satisfy this request,
// just update the end pointer and return.
- if (newTopOfStackWithReservedZone >= m_commitTop) {
- setStackLimit(newTopOfStack);
+ if (newEnd >= m_commitEnd) {
+ updateStackLimit(newEnd);
return true;
}
// Compute the chunk size of additional memory to commit, and see if we
// have it is still within our budget. If not, we'll fail to grow and
// return false.
- ptrdiff_t delta = reinterpret_cast<char*>(m_commitTop) - reinterpret_cast<char*>(newTopOfStackWithReservedZone);
- delta = WTF::roundUpToMultipleOf(commitSize(), delta);
- Register* newCommitTop = m_commitTop - (delta / sizeof(Register));
- if (newCommitTop < reservationTop())
+ long delta = roundUpAllocationSize(reinterpret_cast<char*>(m_commitEnd) - reinterpret_cast<char*>(newEnd), commitSize());
+ if (reinterpret_cast<char*>(m_commitEnd) - delta <= reinterpret_cast<char*>(m_useableEnd))
return false;
- // Otherwise, the growth is still within our budget. Commit it and return true.
- m_reservation.commit(newCommitTop, delta);
+ // Otherwise, the growth is still within our budget. Go ahead and commit
+ // it and return true.
+ m_reservation.commit(reinterpret_cast<char*>(m_commitEnd) - delta, delta);
addToCommittedByteCount(delta);
- m_commitTop = newCommitTop;
- setStackLimit(newTopOfStack);
+ m_commitEnd = reinterpret_cast_ptr<Register*>(reinterpret_cast<char*>(m_commitEnd) - delta);
+ updateStackLimit(newEnd);
return true;
}
-void JSStack::gatherConservativeRoots(ConservativeRoots& conservativeRoots, JITStubRoutineSet& jitStubRoutines, CodeBlockSet& codeBlocks)
+void JSStack::gatherConservativeRoots(ConservativeRoots& conservativeRoots)
{
- conservativeRoots.add(topOfStack() + 1, highAddress(), jitStubRoutines, codeBlocks);
+ conservativeRoots.add(getBaseOfStack(), getTopOfStack());
}
-void JSStack::sanitizeStack()
+void JSStack::gatherConservativeRoots(ConservativeRoots& conservativeRoots, JITStubRoutineSet& jitStubRoutines, CodeBlockSet& codeBlocks)
{
-#if !ASAN_ENABLED
- ASSERT(topOfStack() <= baseOfStack());
-
- if (m_lastStackTop < topOfStack()) {
- char* begin = reinterpret_cast<char*>(m_lastStackTop + 1);
- char* end = reinterpret_cast<char*>(topOfStack() + 1);
- memset(begin, 0, end - begin);
- }
-
- m_lastStackTop = topOfStack();
-#endif
+ conservativeRoots.add(getBaseOfStack(), getTopOfStack(), jitStubRoutines, codeBlocks);
}
void JSStack::releaseExcessCapacity()
{
- Register* highAddressWithReservedZone = highAddress() - m_reservedZoneSizeInRegisters;
- ptrdiff_t delta = reinterpret_cast<char*>(highAddressWithReservedZone) - reinterpret_cast<char*>(m_commitTop);
- m_reservation.decommit(m_commitTop, delta);
+ ptrdiff_t delta = reinterpret_cast<uintptr_t>(highAddress()) - reinterpret_cast<uintptr_t>(m_commitEnd);
+ m_reservation.decommit(m_commitEnd, delta);
addToCommittedByteCount(-delta);
- m_commitTop = highAddressWithReservedZone;
+ m_commitEnd = highAddress();
}
-void JSStack::addToCommittedByteCount(long byteCount)
+void JSStack::initializeThreading()
{
- LockHolder locker(stackStatisticsMutex);
- ASSERT(static_cast<long>(committedBytesCount) + byteCount > -1);
- committedBytesCount += byteCount;
+ stackStatisticsMutex();
}
-void JSStack::setReservedZoneSize(size_t reservedZoneSize)
+size_t JSStack::committedByteCount()
{
- m_reservedZoneSizeInRegisters = reservedZoneSize / sizeof(Register);
- if (m_commitTop >= (m_end + 1) - m_reservedZoneSizeInRegisters)
- growSlowCase(m_end + 1);
+ MutexLocker locker(stackStatisticsMutex());
+ return committedBytesCount;
}
-#endif // !ENABLE(JIT)
-#if ENABLE(JIT)
-Register* JSStack::lowAddress() const
+void JSStack::addToCommittedByteCount(long byteCount)
{
- ASSERT(wtfThreadData().stack().isGrowingDownward());
- return reinterpret_cast<Register*>(m_vm.stackLimit());
+ MutexLocker locker(stackStatisticsMutex());
+ ASSERT(static_cast<long>(committedBytesCount) + byteCount > -1);
+ committedBytesCount += byteCount;
}
-Register* JSStack::highAddress() const
+void JSStack::enableErrorStackReserve()
{
- ASSERT(wtfThreadData().stack().isGrowingDownward());
- return reinterpret_cast<Register*>(wtfThreadData().stack().origin());
+ m_useableEnd = reservationEnd();
}
-#endif // ENABLE(JIT)
-size_t JSStack::committedByteCount()
+void JSStack::disableErrorStackReserve()
{
-#if !ENABLE(JIT)
- LockHolder locker(stackStatisticsMutex);
- return committedBytesCount;
-#else
- // When using the C stack, we don't know how many stack pages are actually
- // committed. So, we use the current stack usage as an estimate.
- ASSERT(wtfThreadData().stack().isGrowingDownward());
- int8_t* current = reinterpret_cast<int8_t*>(&current);
- int8_t* high = reinterpret_cast<int8_t*>(wtfThreadData().stack().origin());
- return high - current;
-#endif
+ char* useableEnd = reinterpret_cast<char*>(reservationEnd()) + commitSize();
+ m_useableEnd = reinterpret_cast_ptr<Register*>(useableEnd);
+
+ // By the time we get here, we are guaranteed to be destructing the last
+ // Interpreter::ErrorHandlingMode that enabled this reserve in the first
+ // place. That means the stack space beyond m_useableEnd before we
+ // enabled the reserve was not previously in use. Hence, it is safe to
+ // shrink back to that m_useableEnd.
+ if (m_end < m_useableEnd) {
+ ASSERT(m_topCallFrame->frameExtent() >= m_useableEnd);
+ shrink(m_useableEnd);
+ }
}
} // namespace JSC