diff options
author | Lorry Tar Creator <lorry-tar-importer@lorry> | 2016-05-24 08:28:08 +0000 |
---|---|---|
committer | Lorry Tar Creator <lorry-tar-importer@lorry> | 2016-05-24 08:28:08 +0000 |
commit | a4e969f4965059196ca948db781e52f7cfebf19e (patch) | |
tree | 6ca352808c8fdc52006a0f33f6ae3c593b23867d /Source/JavaScriptCore/interpreter/JSStack.cpp | |
parent | 41386e9cb918eed93b3f13648cbef387e371e451 (diff) | |
download | WebKitGtk-tarball-a4e969f4965059196ca948db781e52f7cfebf19e.tar.gz |
webkitgtk-2.12.3webkitgtk-2.12.3
Diffstat (limited to 'Source/JavaScriptCore/interpreter/JSStack.cpp')
-rw-r--r-- | Source/JavaScriptCore/interpreter/JSStack.cpp | 147 |
1 files changed, 90 insertions, 57 deletions
diff --git a/Source/JavaScriptCore/interpreter/JSStack.cpp b/Source/JavaScriptCore/interpreter/JSStack.cpp index 722e1bc23..6e441dfd5 100644 --- a/Source/JavaScriptCore/interpreter/JSStack.cpp +++ b/Source/JavaScriptCore/interpreter/JSStack.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2013, 2014, 2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -10,7 +10,7 @@ * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * 3. Neither the name of Apple Inc. ("Apple") nor the names of * its contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * @@ -31,121 +31,154 @@ #include "ConservativeRoots.h" #include "Interpreter.h" +#include "JSCInlines.h" +#include "Options.h" +#include <wtf/Lock.h> namespace JSC { +#if !ENABLE(JIT) static size_t committedBytesCount = 0; -static Mutex& stackStatisticsMutex() +static size_t commitSize() { - DEFINE_STATIC_LOCAL(Mutex, staticMutex, ()); - return staticMutex; -} + static size_t size = std::max<size_t>(16 * 1024, pageSize()); + return size; +} + +static StaticLock stackStatisticsMutex; +#endif // !ENABLE(JIT) -JSStack::JSStack(VM& vm, size_t capacity) +JSStack::JSStack(VM& vm) : m_vm(vm) - , m_end(0) , m_topCallFrame(vm.topCallFrame) +#if !ENABLE(JIT) + , m_end(0) + , m_reservedZoneSizeInRegisters(0) +#endif { +#if !ENABLE(JIT) + size_t capacity = Options::maxPerThreadStackUsage(); ASSERT(capacity && isPageAligned(capacity)); - m_reservation = PageReservation::reserve(roundUpAllocationSize(capacity * sizeof(Register), commitSize), OSAllocator::JSVMStackPages); - updateStackLimit(highAddress()); - m_commitEnd = highAddress(); - - disableErrorStackReserve(); + m_reservation = PageReservation::reserve(WTF::roundUpToMultipleOf(commitSize(), capacity), OSAllocator::JSVMStackPages); + setStackLimit(highAddress()); + m_commitTop = highAddress(); + + m_lastStackTop = baseOfStack(); +#endif // !ENABLE(JIT) m_topCallFrame = 0; } +#if !ENABLE(JIT) JSStack::~JSStack() { - void* highAddress = reinterpret_cast<void*>(static_cast<char*>(m_reservation.base()) + m_reservation.size()); - m_reservation.decommit(reinterpret_cast<void*>(m_commitEnd), reinterpret_cast<intptr_t>(highAddress) - reinterpret_cast<intptr_t>(m_commitEnd)); - addToCommittedByteCount(-(reinterpret_cast<intptr_t>(highAddress) - reinterpret_cast<intptr_t>(m_commitEnd))); + ptrdiff_t sizeToDecommit = reinterpret_cast<char*>(highAddress()) - reinterpret_cast<char*>(m_commitTop); + m_reservation.decommit(reinterpret_cast<void*>(m_commitTop), sizeToDecommit); + addToCommittedByteCount(-sizeToDecommit); m_reservation.deallocate(); } -bool JSStack::growSlowCase(Register* newEnd) +bool JSStack::growSlowCase(Register* newTopOfStack) { + Register* newTopOfStackWithReservedZone = newTopOfStack - m_reservedZoneSizeInRegisters; + // If we have already committed enough memory to satisfy this request, // just update the end pointer and return. - if (newEnd >= m_commitEnd) { - updateStackLimit(newEnd); + if (newTopOfStackWithReservedZone >= m_commitTop) { + setStackLimit(newTopOfStack); return true; } // Compute the chunk size of additional memory to commit, and see if we // have it is still within our budget. If not, we'll fail to grow and // return false. - long delta = roundUpAllocationSize(reinterpret_cast<char*>(m_commitEnd) - reinterpret_cast<char*>(newEnd), commitSize); - if (reinterpret_cast<char*>(m_commitEnd) - delta <= reinterpret_cast<char*>(m_useableEnd)) + ptrdiff_t delta = reinterpret_cast<char*>(m_commitTop) - reinterpret_cast<char*>(newTopOfStackWithReservedZone); + delta = WTF::roundUpToMultipleOf(commitSize(), delta); + Register* newCommitTop = m_commitTop - (delta / sizeof(Register)); + if (newCommitTop < reservationTop()) return false; - // Otherwise, the growth is still within our budget. Go ahead and commit - // it and return true. - m_reservation.commit(reinterpret_cast<char*>(m_commitEnd) - delta, delta); + // Otherwise, the growth is still within our budget. Commit it and return true. + m_reservation.commit(newCommitTop, delta); addToCommittedByteCount(delta); - m_commitEnd = reinterpret_cast_ptr<Register*>(reinterpret_cast<char*>(m_commitEnd) - delta); - updateStackLimit(newEnd); + m_commitTop = newCommitTop; + setStackLimit(newTopOfStack); return true; } -void JSStack::gatherConservativeRoots(ConservativeRoots& conservativeRoots) +void JSStack::gatherConservativeRoots(ConservativeRoots& conservativeRoots, JITStubRoutineSet& jitStubRoutines, CodeBlockSet& codeBlocks) { - conservativeRoots.add(getBaseOfStack(), getTopOfStack()); + conservativeRoots.add(topOfStack() + 1, highAddress(), jitStubRoutines, codeBlocks); } -void JSStack::gatherConservativeRoots(ConservativeRoots& conservativeRoots, JITStubRoutineSet& jitStubRoutines, CodeBlockSet& codeBlocks) +void JSStack::sanitizeStack() { - conservativeRoots.add(getBaseOfStack(), getTopOfStack(), jitStubRoutines, codeBlocks); +#if !ASAN_ENABLED + ASSERT(topOfStack() <= baseOfStack()); + + if (m_lastStackTop < topOfStack()) { + char* begin = reinterpret_cast<char*>(m_lastStackTop + 1); + char* end = reinterpret_cast<char*>(topOfStack() + 1); + memset(begin, 0, end - begin); + } + + m_lastStackTop = topOfStack(); +#endif } void JSStack::releaseExcessCapacity() { - ptrdiff_t delta = reinterpret_cast<uintptr_t>(highAddress()) - reinterpret_cast<uintptr_t>(m_commitEnd); - m_reservation.decommit(m_commitEnd, delta); + Register* highAddressWithReservedZone = highAddress() - m_reservedZoneSizeInRegisters; + ptrdiff_t delta = reinterpret_cast<char*>(highAddressWithReservedZone) - reinterpret_cast<char*>(m_commitTop); + m_reservation.decommit(m_commitTop, delta); addToCommittedByteCount(-delta); - m_commitEnd = highAddress(); + m_commitTop = highAddressWithReservedZone; } -void JSStack::initializeThreading() +void JSStack::addToCommittedByteCount(long byteCount) { - stackStatisticsMutex(); + LockHolder locker(stackStatisticsMutex); + ASSERT(static_cast<long>(committedBytesCount) + byteCount > -1); + committedBytesCount += byteCount; } -size_t JSStack::committedByteCount() +void JSStack::setReservedZoneSize(size_t reservedZoneSize) { - MutexLocker locker(stackStatisticsMutex()); - return committedBytesCount; + m_reservedZoneSizeInRegisters = reservedZoneSize / sizeof(Register); + if (m_commitTop >= (m_end + 1) - m_reservedZoneSizeInRegisters) + growSlowCase(m_end + 1); } +#endif // !ENABLE(JIT) -void JSStack::addToCommittedByteCount(long byteCount) +#if ENABLE(JIT) +Register* JSStack::lowAddress() const { - MutexLocker locker(stackStatisticsMutex()); - ASSERT(static_cast<long>(committedBytesCount) + byteCount > -1); - committedBytesCount += byteCount; + ASSERT(wtfThreadData().stack().isGrowingDownward()); + return reinterpret_cast<Register*>(m_vm.stackLimit()); } -void JSStack::enableErrorStackReserve() +Register* JSStack::highAddress() const { - m_useableEnd = reservationEnd(); + ASSERT(wtfThreadData().stack().isGrowingDownward()); + return reinterpret_cast<Register*>(wtfThreadData().stack().origin()); } +#endif // ENABLE(JIT) -void JSStack::disableErrorStackReserve() +size_t JSStack::committedByteCount() { - char* useableEnd = reinterpret_cast<char*>(reservationEnd()) + commitSize; - m_useableEnd = reinterpret_cast_ptr<Register*>(useableEnd); - - // By the time we get here, we are guaranteed to be destructing the last - // Interpreter::ErrorHandlingMode that enabled this reserve in the first - // place. That means the stack space beyond m_useableEnd before we - // enabled the reserve was not previously in use. Hence, it is safe to - // shrink back to that m_useableEnd. - if (m_end < m_useableEnd) { - ASSERT(m_topCallFrame->frameExtent() >= m_useableEnd); - shrink(m_useableEnd); - } +#if !ENABLE(JIT) + LockHolder locker(stackStatisticsMutex); + return committedBytesCount; +#else + // When using the C stack, we don't know how many stack pages are actually + // committed. So, we use the current stack usage as an estimate. + ASSERT(wtfThreadData().stack().isGrowingDownward()); + int8_t* current = reinterpret_cast<int8_t*>(¤t); + int8_t* high = reinterpret_cast<int8_t*>(wtfThreadData().stack().origin()); + return high - current; +#endif } } // namespace JSC |