diff options
Diffstat (limited to 'Source/JavaScriptCore/heap')
-rw-r--r-- | Source/JavaScriptCore/heap/CopiedAllocator.h | 114 | ||||
-rw-r--r-- | Source/JavaScriptCore/heap/CopiedBlock.h | 76 | ||||
-rw-r--r-- | Source/JavaScriptCore/heap/CopiedSpace.cpp | 32 | ||||
-rw-r--r-- | Source/JavaScriptCore/heap/CopiedSpace.h | 2 | ||||
-rw-r--r-- | Source/JavaScriptCore/heap/CopiedSpaceInlineMethods.h | 30 | ||||
-rw-r--r-- | Source/JavaScriptCore/heap/MachineStackMarker.cpp | 14 | ||||
-rw-r--r-- | Source/JavaScriptCore/heap/MachineStackMarker.h | 4 | ||||
-rw-r--r-- | Source/JavaScriptCore/heap/MarkStack.cpp | 32 | ||||
-rw-r--r-- | Source/JavaScriptCore/heap/SlotVisitor.h | 3 |
9 files changed, 197 insertions, 110 deletions
diff --git a/Source/JavaScriptCore/heap/CopiedAllocator.h b/Source/JavaScriptCore/heap/CopiedAllocator.h index 7455ec816..32b84f008 100644 --- a/Source/JavaScriptCore/heap/CopiedAllocator.h +++ b/Source/JavaScriptCore/heap/CopiedAllocator.h @@ -27,72 +27,128 @@ #define CopiedAllocator_h #include "CopiedBlock.h" +#include <wtf/CheckedBoolean.h> +#include <wtf/DataLog.h> namespace JSC { class CopiedAllocator { - friend class JIT; public: CopiedAllocator(); - void* allocate(size_t); - bool fitsInCurrentBlock(size_t); - bool wasLastAllocation(void*, size_t); - void startedCopying(); - void resetCurrentBlock(CopiedBlock*); + + bool fastPathShouldSucceed(size_t bytes) const; + CheckedBoolean tryAllocate(size_t bytes, void** outPtr); + CheckedBoolean tryReallocate(void *oldPtr, size_t oldBytes, size_t newBytes); + void* forceAllocate(size_t bytes); + CopiedBlock* resetCurrentBlock(); + void setCurrentBlock(CopiedBlock*); size_t currentCapacity(); + + bool isValid() { return !!m_currentBlock; } -private: CopiedBlock* currentBlock() { return m_currentBlock; } - char* m_currentOffset; + // Yes, these are public. No, that doesn't mean you can play with them. + // If I had made them private then I'd have to list off all of the JIT + // classes and functions that are entitled to modify these directly, and + // that would have been gross. + size_t m_currentRemaining; + char* m_currentPayloadEnd; CopiedBlock* m_currentBlock; }; inline CopiedAllocator::CopiedAllocator() - : m_currentOffset(0) + : m_currentRemaining(0) + , m_currentPayloadEnd(0) , m_currentBlock(0) { } -inline void* CopiedAllocator::allocate(size_t bytes) +inline bool CopiedAllocator::fastPathShouldSucceed(size_t bytes) const { - ASSERT(m_currentOffset); ASSERT(is8ByteAligned(reinterpret_cast<void*>(bytes))); - ASSERT(fitsInCurrentBlock(bytes)); - void* ptr = static_cast<void*>(m_currentOffset); - m_currentOffset += bytes; - ASSERT(is8ByteAligned(ptr)); - return ptr; + + return bytes <= m_currentRemaining; } -inline bool CopiedAllocator::fitsInCurrentBlock(size_t bytes) +inline CheckedBoolean CopiedAllocator::tryAllocate(size_t bytes, void** outPtr) { - return m_currentOffset + bytes < reinterpret_cast<char*>(m_currentBlock) + HeapBlock::s_blockSize && m_currentOffset + bytes > m_currentOffset; + ASSERT(is8ByteAligned(reinterpret_cast<void*>(bytes))); + + // This code is written in a gratuitously low-level manner, in order to + // serve as a kind of template for what the JIT would do. Note that the + // way it's written it ought to only require one register, which doubles + // as the result, provided that the compiler does a minimal amount of + // control flow simplification and the bytes argument is a constant. + + size_t currentRemaining = m_currentRemaining; + if (bytes > currentRemaining) + return false; + currentRemaining -= bytes; + m_currentRemaining = currentRemaining; + *outPtr = m_currentPayloadEnd - currentRemaining - bytes; + + ASSERT(is8ByteAligned(*outPtr)); + + return true; +} + +inline CheckedBoolean CopiedAllocator::tryReallocate( + void* oldPtr, size_t oldBytes, size_t newBytes) +{ + ASSERT(is8ByteAligned(oldPtr)); + ASSERT(is8ByteAligned(reinterpret_cast<void*>(oldBytes))); + ASSERT(is8ByteAligned(reinterpret_cast<void*>(newBytes))); + + ASSERT(newBytes > oldBytes); + + size_t additionalBytes = newBytes - oldBytes; + + size_t currentRemaining = m_currentRemaining; + if (m_currentPayloadEnd - currentRemaining - oldBytes != static_cast<char*>(oldPtr)) + return false; + + if (additionalBytes > currentRemaining) + return false; + + m_currentRemaining = currentRemaining - additionalBytes; + + return true; } -inline bool CopiedAllocator::wasLastAllocation(void* ptr, size_t size) +inline void* CopiedAllocator::forceAllocate(size_t bytes) { - return static_cast<char*>(ptr) + size == m_currentOffset && ptr > m_currentBlock && ptr < reinterpret_cast<char*>(m_currentBlock) + HeapBlock::s_blockSize; + void* result = 0; // Needed because compilers don't realize this will always be assigned. + CheckedBoolean didSucceed = tryAllocate(bytes, &result); + ASSERT(didSucceed); + return result; } -inline void CopiedAllocator::startedCopying() +inline CopiedBlock* CopiedAllocator::resetCurrentBlock() { - if (m_currentBlock) - m_currentBlock->m_offset = static_cast<void*>(m_currentOffset); - m_currentOffset = 0; - m_currentBlock = 0; + CopiedBlock* result = m_currentBlock; + if (result) { + result->m_remaining = m_currentRemaining; + m_currentBlock = 0; + m_currentRemaining = 0; + m_currentPayloadEnd = 0; + } + return result; } -inline void CopiedAllocator::resetCurrentBlock(CopiedBlock* newBlock) +inline void CopiedAllocator::setCurrentBlock(CopiedBlock* newBlock) { - if (m_currentBlock) - m_currentBlock->m_offset = static_cast<void*>(m_currentOffset); + ASSERT(!m_currentBlock); m_currentBlock = newBlock; - m_currentOffset = static_cast<char*>(newBlock->m_offset); + ASSERT(newBlock); + m_currentRemaining = newBlock->m_remaining; + m_currentPayloadEnd = newBlock->payloadEnd(); } inline size_t CopiedAllocator::currentCapacity() { + if (!m_currentBlock) + return 0; return m_currentBlock->capacity(); } diff --git a/Source/JavaScriptCore/heap/CopiedBlock.h b/Source/JavaScriptCore/heap/CopiedBlock.h index 5ed58008e..6717a6835 100644 --- a/Source/JavaScriptCore/heap/CopiedBlock.h +++ b/Source/JavaScriptCore/heap/CopiedBlock.h @@ -42,15 +42,30 @@ public: static CopiedBlock* createNoZeroFill(const PageAllocationAligned&); static PageAllocationAligned destroy(CopiedBlock*); + // The payload is the region of the block that is usable for allocations. char* payload(); + char* payloadEnd(); + size_t payloadCapacity(); + + // The data is the region of the block that has been used for allocations. + char* data(); + char* dataEnd(); + size_t dataSize(); + + // The wilderness is the region of the block that is usable for allocations + // but has not been so used. + char* wilderness(); + char* wildernessEnd(); + size_t wildernessSize(); + size_t size(); size_t capacity(); private: CopiedBlock(const PageAllocationAligned&); - void zeroFillToEnd(); // Can be called at any time to zero-fill to the end of the block. + void zeroFillWilderness(); // Can be called at any time to zero-fill to the end of the block. - void* m_offset; + size_t m_remaining; uintptr_t m_isPinned; }; @@ -62,19 +77,18 @@ inline CopiedBlock* CopiedBlock::createNoZeroFill(const PageAllocationAligned& a inline CopiedBlock* CopiedBlock::create(const PageAllocationAligned& allocation) { CopiedBlock* block = createNoZeroFill(allocation); - block->zeroFillToEnd(); + block->zeroFillWilderness(); return block; } -inline void CopiedBlock::zeroFillToEnd() +inline void CopiedBlock::zeroFillWilderness() { #if USE(JSVALUE64) - char* offset = static_cast<char*>(m_offset); - memset(static_cast<void*>(offset), 0, static_cast<size_t>((reinterpret_cast<char*>(this) + m_allocation.size()) - offset)); + memset(wilderness(), 0, wildernessSize()); #else JSValue emptyValue; - JSValue* limit = reinterpret_cast_ptr<JSValue*>(reinterpret_cast<char*>(this) + m_allocation.size()); - for (JSValue* currentValue = reinterpret_cast<JSValue*>(m_offset); currentValue < limit; currentValue++) + JSValue* limit = reinterpret_cast_ptr<JSValue*>(wildernessEnd()); + for (JSValue* currentValue = reinterpret_cast<JSValue*>(wilderness()); currentValue < limit; currentValue++) *currentValue = emptyValue; #endif } @@ -90,10 +104,10 @@ inline PageAllocationAligned CopiedBlock::destroy(CopiedBlock* block) inline CopiedBlock::CopiedBlock(const PageAllocationAligned& allocation) : HeapBlock(allocation) - , m_offset(payload()) + , m_remaining(payloadCapacity()) , m_isPinned(false) { - ASSERT(is8ByteAligned(static_cast<void*>(m_offset))); + ASSERT(is8ByteAligned(reinterpret_cast<void*>(m_remaining))); } inline char* CopiedBlock::payload() @@ -101,9 +115,49 @@ inline char* CopiedBlock::payload() return reinterpret_cast<char*>(this) + ((sizeof(CopiedBlock) + 7) & ~7); } +inline char* CopiedBlock::payloadEnd() +{ + return reinterpret_cast<char*>(this) + m_allocation.size(); +} + +inline size_t CopiedBlock::payloadCapacity() +{ + return payloadEnd() - payload(); +} + +inline char* CopiedBlock::data() +{ + return payload(); +} + +inline char* CopiedBlock::dataEnd() +{ + return payloadEnd() - m_remaining; +} + +inline size_t CopiedBlock::dataSize() +{ + return dataEnd() - data(); +} + +inline char* CopiedBlock::wilderness() +{ + return dataEnd(); +} + +inline char* CopiedBlock::wildernessEnd() +{ + return payloadEnd(); +} + +inline size_t CopiedBlock::wildernessSize() +{ + return wildernessEnd() - wilderness(); +} + inline size_t CopiedBlock::size() { - return static_cast<size_t>(static_cast<char*>(m_offset) - payload()); + return dataSize(); } inline size_t CopiedBlock::capacity() diff --git a/Source/JavaScriptCore/heap/CopiedSpace.cpp b/Source/JavaScriptCore/heap/CopiedSpace.cpp index 9eb70a556..147dfa4b3 100644 --- a/Source/JavaScriptCore/heap/CopiedSpace.cpp +++ b/Source/JavaScriptCore/heap/CopiedSpace.cpp @@ -71,8 +71,7 @@ CheckedBoolean CopiedSpace::tryAllocateSlowCase(size_t bytes, void** outPtr) allocateBlock(); - *outPtr = m_allocator.allocate(bytes); - ASSERT(*outPtr); + *outPtr = m_allocator.forceAllocate(bytes); return true; } @@ -93,7 +92,10 @@ CheckedBoolean CopiedSpace::tryAllocateOversize(size_t bytes, void** outPtr) m_blockFilter.add(reinterpret_cast<Bits>(block)); m_blockSet.add(block); - *outPtr = allocateFromBlock(block, bytes); + CopiedAllocator allocator; + allocator.setCurrentBlock(block); + *outPtr = allocator.forceAllocate(bytes); + allocator.resetCurrentBlock(); m_heap->didAllocate(blockSize); @@ -107,17 +109,12 @@ CheckedBoolean CopiedSpace::tryReallocate(void** ptr, size_t oldSize, size_t new void* oldPtr = *ptr; ASSERT(!m_heap->globalData()->isInitializingObject()); - + if (isOversize(oldSize) || isOversize(newSize)) return tryReallocateOversize(ptr, oldSize, newSize); - - if (m_allocator.wasLastAllocation(oldPtr, oldSize)) { - size_t delta = newSize - oldSize; - if (m_allocator.fitsInCurrentBlock(delta)) { - (void)m_allocator.allocate(delta); - return true; - } - } + + if (m_allocator.tryReallocate(oldPtr, oldSize, newSize)) + return true; void* result = 0; if (!tryAllocate(newSize, &result)) { @@ -157,16 +154,17 @@ CheckedBoolean CopiedSpace::tryReallocateOversize(void** ptr, size_t oldSize, si void CopiedSpace::doneFillingBlock(CopiedBlock* block) { - ASSERT(block); - ASSERT(block->m_offset < reinterpret_cast<char*>(block) + HeapBlock::s_blockSize); ASSERT(m_inCopyingPhase); + + if (!block) + return; - if (block->m_offset == block->payload()) { + if (!block->dataSize()) { recycleBlock(block); return; } - block->zeroFillToEnd(); + block->zeroFillWilderness(); { SpinLockHolder locker(&m_toSpaceLock); @@ -226,7 +224,7 @@ void CopiedSpace::doneCopying() if (!m_toSpace->head()) allocateBlock(); else - m_allocator.resetCurrentBlock(static_cast<CopiedBlock*>(m_toSpace->head())); + m_allocator.setCurrentBlock(static_cast<CopiedBlock*>(m_toSpace->head())); } size_t CopiedSpace::size() diff --git a/Source/JavaScriptCore/heap/CopiedSpace.h b/Source/JavaScriptCore/heap/CopiedSpace.h index 530e989da..de682a4c1 100644 --- a/Source/JavaScriptCore/heap/CopiedSpace.h +++ b/Source/JavaScriptCore/heap/CopiedSpace.h @@ -77,9 +77,7 @@ public: static CopiedBlock* blockFor(void*); private: - static void* allocateFromBlock(CopiedBlock*, size_t); static bool isOversize(size_t); - static bool fitsInBlock(CopiedBlock*, size_t); static CopiedBlock* oversizeBlockFor(void* ptr); CheckedBoolean tryAllocateSlowCase(size_t, void**); diff --git a/Source/JavaScriptCore/heap/CopiedSpaceInlineMethods.h b/Source/JavaScriptCore/heap/CopiedSpaceInlineMethods.h index 1366cd8a7..f702e1dd9 100644 --- a/Source/JavaScriptCore/heap/CopiedSpaceInlineMethods.h +++ b/Source/JavaScriptCore/heap/CopiedSpaceInlineMethods.h @@ -64,7 +64,7 @@ inline void CopiedSpace::startedCopying() m_toSpace = temp; m_blockFilter.reset(); - m_allocator.startedCopying(); + m_allocator.resetCurrentBlock(); ASSERT(!m_inCopyingPhase); ASSERT(!m_numberOfLoanedBlocks); @@ -94,7 +94,7 @@ inline CopiedBlock* CopiedSpace::allocateBlockForCopyingPhase() m_numberOfLoanedBlocks++; } - ASSERT(block->m_offset == block->payload()); + ASSERT(!block->dataSize()); return block; } @@ -103,45 +103,27 @@ inline void CopiedSpace::allocateBlock() if (m_heap->shouldCollect()) m_heap->collect(Heap::DoNotSweep); + m_allocator.resetCurrentBlock(); + CopiedBlock* block = CopiedBlock::create(m_heap->blockAllocator().allocate()); m_toSpace->push(block); m_blockFilter.add(reinterpret_cast<Bits>(block)); m_blockSet.add(block); - m_allocator.resetCurrentBlock(block); -} - -inline bool CopiedSpace::fitsInBlock(CopiedBlock* block, size_t bytes) -{ - return static_cast<char*>(block->m_offset) + bytes < reinterpret_cast<char*>(block) + block->capacity() && static_cast<char*>(block->m_offset) + bytes > block->m_offset; + m_allocator.setCurrentBlock(block); } inline CheckedBoolean CopiedSpace::tryAllocate(size_t bytes, void** outPtr) { ASSERT(!m_heap->globalData()->isInitializingObject()); - if (isOversize(bytes) || !m_allocator.fitsInCurrentBlock(bytes)) + if (isOversize(bytes) || !m_allocator.tryAllocate(bytes, outPtr)) return tryAllocateSlowCase(bytes, outPtr); - *outPtr = m_allocator.allocate(bytes); ASSERT(*outPtr); return true; } -inline void* CopiedSpace::allocateFromBlock(CopiedBlock* block, size_t bytes) -{ - ASSERT(fitsInBlock(block, bytes)); - ASSERT(is8ByteAligned(block->m_offset)); - - void* ptr = block->m_offset; - ASSERT(block->m_offset >= block->payload() && block->m_offset < reinterpret_cast<char*>(block) + block->capacity()); - block->m_offset = static_cast<void*>((static_cast<char*>(ptr) + bytes)); - ASSERT(block->m_offset >= block->payload() && block->m_offset < reinterpret_cast<char*>(block) + block->capacity()); - - ASSERT(is8ByteAligned(ptr)); - return ptr; -} - inline bool CopiedSpace::isOversize(size_t bytes) { return bytes > s_maxAllocationSize; diff --git a/Source/JavaScriptCore/heap/MachineStackMarker.cpp b/Source/JavaScriptCore/heap/MachineStackMarker.cpp index 8e0c57b6a..7eb57479b 100644 --- a/Source/JavaScriptCore/heap/MachineStackMarker.cpp +++ b/Source/JavaScriptCore/heap/MachineStackMarker.cpp @@ -141,8 +141,10 @@ MachineThreads::MachineThreads(Heap* heap) MachineThreads::~MachineThreads() { - if (m_threadSpecific) - ThreadSpecificKeyDelete(m_threadSpecific); + if (m_threadSpecific) { + int error = pthread_key_delete(m_threadSpecific); + ASSERT_UNUSED(error, !error); + } MutexLocker registeredThreadsLock(m_registeredThreadsMutex); for (Thread* t = m_registeredThreads; t;) { @@ -179,17 +181,19 @@ void MachineThreads::makeUsableFromMultipleThreads() if (m_threadSpecific) return; - ThreadSpecificKeyCreate(&m_threadSpecific, removeThread); + int error = pthread_key_create(&m_threadSpecific, removeThread); + if (error) + CRASH(); } void MachineThreads::addCurrentThread() { ASSERT(!m_heap->globalData()->exclusiveThread || m_heap->globalData()->exclusiveThread == currentThread()); - if (!m_threadSpecific || ThreadSpecificGet(m_threadSpecific)) + if (!m_threadSpecific || pthread_getspecific(m_threadSpecific)) return; - ThreadSpecificSet(m_threadSpecific, this); + pthread_setspecific(m_threadSpecific, this); Thread* thread = new Thread(getCurrentPlatformThread(), wtfThreadData().stack().origin()); MutexLocker lock(m_registeredThreadsMutex); diff --git a/Source/JavaScriptCore/heap/MachineStackMarker.h b/Source/JavaScriptCore/heap/MachineStackMarker.h index 3d4aa22d4..5c7705fcf 100644 --- a/Source/JavaScriptCore/heap/MachineStackMarker.h +++ b/Source/JavaScriptCore/heap/MachineStackMarker.h @@ -22,8 +22,8 @@ #ifndef MachineThreads_h #define MachineThreads_h +#include <pthread.h> #include <wtf/Noncopyable.h> -#include <wtf/ThreadSpecific.h> #include <wtf/ThreadingPrimitives.h> namespace JSC { @@ -55,7 +55,7 @@ namespace JSC { Heap* m_heap; Mutex m_registeredThreadsMutex; Thread* m_registeredThreads; - WTF::ThreadSpecificKey m_threadSpecific; + pthread_key_t m_threadSpecific; }; } // namespace JSC diff --git a/Source/JavaScriptCore/heap/MarkStack.cpp b/Source/JavaScriptCore/heap/MarkStack.cpp index 9d9130026..9c679b0ed 100644 --- a/Source/JavaScriptCore/heap/MarkStack.cpp +++ b/Source/JavaScriptCore/heap/MarkStack.cpp @@ -515,9 +515,8 @@ void MarkStack::mergeOpaqueRoots() void SlotVisitor::startCopying() { - ASSERT(!m_copyBlock); - m_copyBlock = m_shared.m_copiedSpace->allocateBlockForCopyingPhase(); -} + ASSERT(!m_copiedAllocator.isValid()); +} void* SlotVisitor::allocateNewSpace(void* ptr, size_t bytes) { @@ -528,18 +527,17 @@ void* SlotVisitor::allocateNewSpace(void* ptr, size_t bytes) if (m_shared.m_copiedSpace->isPinned(ptr)) return 0; + + void* result = 0; // Compilers don't realize that this will be assigned. + if (m_copiedAllocator.tryAllocate(bytes, &result)) + return result; + + m_shared.m_copiedSpace->doneFillingBlock(m_copiedAllocator.resetCurrentBlock()); + m_copiedAllocator.setCurrentBlock(m_shared.m_copiedSpace->allocateBlockForCopyingPhase()); - // The only time it's possible to have a null copy block is if we have just started copying. - if (!m_copyBlock) - startCopying(); - - if (!CopiedSpace::fitsInBlock(m_copyBlock, bytes)) { - // We don't need to lock across these two calls because the master thread won't - // call doneCopying() because this thread is considered active. - m_shared.m_copiedSpace->doneFillingBlock(m_copyBlock); - m_copyBlock = m_shared.m_copiedSpace->allocateBlockForCopyingPhase(); - } - return CopiedSpace::allocateFromBlock(m_copyBlock, bytes); + CheckedBoolean didSucceed = m_copiedAllocator.tryAllocate(bytes, &result); + ASSERT(didSucceed); + return result; } ALWAYS_INLINE bool JSString::tryHashConstLock() @@ -639,12 +637,10 @@ void SlotVisitor::copyAndAppend(void** ptr, size_t bytes, JSValue* values, unsig void SlotVisitor::doneCopying() { - if (!m_copyBlock) + if (!m_copiedAllocator.isValid()) return; - m_shared.m_copiedSpace->doneFillingBlock(m_copyBlock); - - m_copyBlock = 0; + m_shared.m_copiedSpace->doneFillingBlock(m_copiedAllocator.resetCurrentBlock()); } void SlotVisitor::harvestWeakReferences() diff --git a/Source/JavaScriptCore/heap/SlotVisitor.h b/Source/JavaScriptCore/heap/SlotVisitor.h index 70d68bb04..d16602f15 100644 --- a/Source/JavaScriptCore/heap/SlotVisitor.h +++ b/Source/JavaScriptCore/heap/SlotVisitor.h @@ -70,12 +70,11 @@ private: void donateKnownParallel(); - CopiedBlock* m_copyBlock; + CopiedAllocator m_copiedAllocator; }; inline SlotVisitor::SlotVisitor(MarkStackThreadSharedData& shared) : MarkStack(shared) - , m_copyBlock(0) { } |