diff options
Diffstat (limited to 'Source/JavaScriptCore/runtime/JSLock.cpp')
-rw-r--r-- | Source/JavaScriptCore/runtime/JSLock.cpp | 259 |
1 files changed, 158 insertions, 101 deletions
diff --git a/Source/JavaScriptCore/runtime/JSLock.cpp b/Source/JavaScriptCore/runtime/JSLock.cpp index 4baa2a603..a2eff3c29 100644 --- a/Source/JavaScriptCore/runtime/JSLock.cpp +++ b/Source/JavaScriptCore/runtime/JSLock.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005, 2008, 2012 Apple Inc. All rights reserved. + * Copyright (C) 2005, 2008, 2012, 2014 Apple Inc. All rights reserved. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public @@ -25,29 +25,22 @@ #include "CallFrame.h" #include "JSGlobalObject.h" #include "JSObject.h" -#include "Operations.h" - -#if USE(PTHREADS) -#include <pthread.h> -#endif +#include "JSCInlines.h" +#include "SamplingProfiler.h" +#include <thread> namespace JSC { -Mutex* GlobalJSLock::s_sharedInstanceLock = 0; +StaticLock GlobalJSLock::s_sharedInstanceMutex; GlobalJSLock::GlobalJSLock() { - s_sharedInstanceLock->lock(); + s_sharedInstanceMutex.lock(); } GlobalJSLock::~GlobalJSLock() { - s_sharedInstanceLock->unlock(); -} - -void GlobalJSLock::initialize() -{ - s_sharedInstanceLock = new Mutex(); + s_sharedInstanceMutex.unlock(); } JSLockHolder::JSLockHolder(ExecState* exec) @@ -76,17 +69,18 @@ void JSLockHolder::init() JSLockHolder::~JSLockHolder() { RefPtr<JSLock> apiLock(&m_vm->apiLock()); - m_vm.clear(); + m_vm = nullptr; apiLock->unlock(); } JSLock::JSLock(VM* vm) - : m_ownerThread(0) + : m_ownerThreadID(std::thread::id()) , m_lockCount(0) , m_lockDropDepth(0) + , m_hasExclusiveThread(false) , m_vm(vm) + , m_entryAtomicStringTable(nullptr) { - m_spinLock.Init(); } JSLock::~JSLock() @@ -96,39 +90,104 @@ JSLock::~JSLock() void JSLock::willDestroyVM(VM* vm) { ASSERT_UNUSED(vm, m_vm == vm); - m_vm = 0; + m_vm = nullptr; +} + +void JSLock::setExclusiveThread(std::thread::id threadId) +{ + RELEASE_ASSERT(!m_lockCount && m_ownerThreadID == std::thread::id()); + m_hasExclusiveThread = (threadId != std::thread::id()); + m_ownerThreadID = threadId; } void JSLock::lock() { - ThreadIdentifier currentThread = WTF::currentThread(); - { - SpinLockHolder holder(&m_spinLock); - if (m_ownerThread == currentThread && m_lockCount) { - m_lockCount++; - return; - } - } + lock(1); +} - m_lock.lock(); +void JSLock::lock(intptr_t lockCount) +{ + ASSERT(lockCount > 0); + if (currentThreadIsHoldingLock()) { + m_lockCount += lockCount; + return; + } - { - SpinLockHolder holder(&m_spinLock); - m_ownerThread = currentThread; - ASSERT(!m_lockCount); - m_lockCount = 1; + if (!m_hasExclusiveThread) { + m_lock.lock(); + m_ownerThreadID = std::this_thread::get_id(); } + ASSERT(!m_lockCount); + m_lockCount = lockCount; + + didAcquireLock(); +} + +void JSLock::didAcquireLock() +{ + // FIXME: What should happen to the per-thread identifier table if we don't have a VM? + if (!m_vm) + return; + + RELEASE_ASSERT(!m_vm->stackPointerAtVMEntry()); + void* p = &p; // A proxy for the current stack pointer. + m_vm->setStackPointerAtVMEntry(p); + + WTFThreadData& threadData = wtfThreadData(); + m_vm->setLastStackTop(threadData.savedLastStackTop()); + + ASSERT(!m_entryAtomicStringTable); + m_entryAtomicStringTable = threadData.setCurrentAtomicStringTable(m_vm->atomicStringTable()); + ASSERT(m_entryAtomicStringTable); + + m_vm->heap.machineThreads().addCurrentThread(); + +#if ENABLE(SAMPLING_PROFILER) + // Note: this must come after addCurrentThread(). + if (SamplingProfiler* samplingProfiler = m_vm->samplingProfiler()) + samplingProfiler->noticeJSLockAcquisition(); +#endif } void JSLock::unlock() { - ASSERT(currentThreadIsHoldingLock()); + unlock(1); +} + +void JSLock::unlock(intptr_t unlockCount) +{ + RELEASE_ASSERT(currentThreadIsHoldingLock()); + ASSERT(m_lockCount >= unlockCount); + + // Maintain m_lockCount while calling willReleaseLock() so that its callees know that + // they still have the lock. + if (unlockCount == m_lockCount) + willReleaseLock(); + + m_lockCount -= unlockCount; + + if (!m_lockCount) { + + if (!m_hasExclusiveThread) { + m_ownerThreadID = std::thread::id(); + m_lock.unlock(); + } + } +} + +void JSLock::willReleaseLock() +{ + if (m_vm) { + m_vm->drainMicrotasks(); - SpinLockHolder holder(&m_spinLock); - m_lockCount--; + m_vm->heap.releaseDelayedReleasedObjects(); + m_vm->setStackPointerAtVMEntry(nullptr); + } - if (!m_lockCount) - m_lock.unlock(); + if (m_entryAtomicStringTable) { + wtfThreadData().setCurrentAtomicStringTable(m_entryAtomicStringTable); + m_entryAtomicStringTable = nullptr; + } } void JSLock::lock(ExecState* exec) @@ -143,93 +202,91 @@ void JSLock::unlock(ExecState* exec) bool JSLock::currentThreadIsHoldingLock() { - return m_lockCount && m_ownerThread == WTF::currentThread(); -} - -// This is fairly nasty. We allow multiple threads to run on the same -// context, and we do not require any locking semantics in doing so - -// clients of the API may simply use the context from multiple threads -// concurently, and assume this will work. In order to make this work, -// We lock the context when a thread enters, and unlock it when it leaves. -// However we do not only unlock when the thread returns from its -// entry point (evaluate script or call function), we also unlock the -// context if the thread leaves JSC by making a call out to an external -// function through a callback. -// -// All threads using the context share the same JS stack (the JSStack). -// Whenever a thread calls into JSC it starts using the JSStack from the -// previous 'high water mark' - the maximum point the stack has ever grown to -// (returned by JSStack::end()). So if a first thread calls out to a -// callback, and a second thread enters JSC, then also exits by calling out -// to a callback, we can be left with stackframes from both threads in the -// JSStack. As such, a problem may occur should the first thread's -// callback complete first, and attempt to return to JSC. Were we to allow -// this to happen, and were its stack to grow further, then it may potentially -// write over the second thread's call frames. -// -// To avoid JS stack corruption we enforce a policy of only ever allowing two -// threads to use a JS context concurrently, and only allowing the second of -// these threads to execute until it has completed and fully returned from its -// outermost call into JSC. We enforce this policy using 'lockDropDepth'. The -// first time a thread exits it will call DropAllLocks - which will do as expected -// and drop locks allowing another thread to enter. Should another thread, or the -// same thread again, enter JSC (through evaluate script or call function), and exit -// again through a callback, then the locks will not be dropped when DropAllLocks -// is called (since lockDropDepth is non-zero). Since this thread is still holding -// the locks, only it will be able to re-enter JSC (either be returning from the -// callback, or by re-entering through another call to evaulate script or call -// function). -// -// This policy is slightly more restricive than it needs to be for correctness - -// we could validly allow futher entries into JSC from other threads, we only -// need ensure that callbacks return in the reverse chronological order of the -// order in which they were made - though implementing the less restrictive policy -// would likely increase complexity and overhead. -// + ASSERT(!m_hasExclusiveThread || (exclusiveThread() == std::this_thread::get_id())); + if (m_hasExclusiveThread) + return !!m_lockCount; + return m_ownerThreadID == std::this_thread::get_id(); +} // This function returns the number of locks that were dropped. -unsigned JSLock::dropAllLocks() +unsigned JSLock::dropAllLocks(DropAllLocks* dropper) { - if (m_lockDropDepth++) + if (m_hasExclusiveThread) { + ASSERT(exclusiveThread() == std::this_thread::get_id()); + return 0; + } + + if (!currentThreadIsHoldingLock()) return 0; - return dropAllLocksUnconditionally(); + ++m_lockDropDepth; + + dropper->setDropDepth(m_lockDropDepth); + + WTFThreadData& threadData = wtfThreadData(); + threadData.setSavedStackPointerAtVMEntry(m_vm->stackPointerAtVMEntry()); + threadData.setSavedLastStackTop(m_vm->lastStackTop()); + + unsigned droppedLockCount = m_lockCount; + unlock(droppedLockCount); + + return droppedLockCount; } -unsigned JSLock::dropAllLocksUnconditionally() +void JSLock::grabAllLocks(DropAllLocks* dropper, unsigned droppedLockCount) { - unsigned lockCount = m_lockCount; - for (unsigned i = 0; i < lockCount; i++) - unlock(); + ASSERT(!m_hasExclusiveThread || !droppedLockCount); - return lockCount; + // If no locks were dropped, nothing to do! + if (!droppedLockCount) + return; + + ASSERT(!currentThreadIsHoldingLock()); + lock(droppedLockCount); + + while (dropper->dropDepth() != m_lockDropDepth) { + unlock(droppedLockCount); + std::this_thread::yield(); + lock(droppedLockCount); + } + + --m_lockDropDepth; + + WTFThreadData& threadData = wtfThreadData(); + m_vm->setStackPointerAtVMEntry(threadData.savedStackPointerAtVMEntry()); + m_vm->setLastStackTop(threadData.savedLastStackTop()); } -void JSLock::grabAllLocks(unsigned lockCount) +JSLock::DropAllLocks::DropAllLocks(VM* vm) + : m_droppedLockCount(0) + // If the VM is in the middle of being destroyed then we don't want to resurrect it + // by allowing DropAllLocks to ref it. By this point the JSLock has already been + // released anyways, so it doesn't matter that DropAllLocks is a no-op. + , m_vm(vm->refCount() ? vm : nullptr) { - for (unsigned i = 0; i < lockCount; i++) - lock(); - - m_lockDropDepth--; + if (!m_vm) + return; + wtfThreadData().resetCurrentAtomicStringTable(); + RELEASE_ASSERT(!m_vm->apiLock().currentThreadIsHoldingLock() || !m_vm->isCollectorBusy()); + m_droppedLockCount = m_vm->apiLock().dropAllLocks(this); } JSLock::DropAllLocks::DropAllLocks(ExecState* exec) - : m_lockCount(0) - , m_vm(&exec->vm()) + : DropAllLocks(exec ? &exec->vm() : nullptr) { - m_lockCount = m_vm->apiLock().dropAllLocks(); } -JSLock::DropAllLocks::DropAllLocks(VM* vm) - : m_lockCount(0) - , m_vm(vm) +JSLock::DropAllLocks::DropAllLocks(VM& vm) + : DropAllLocks(&vm) { - m_lockCount = m_vm->apiLock().dropAllLocks(); } JSLock::DropAllLocks::~DropAllLocks() { - m_vm->apiLock().grabAllLocks(m_lockCount); + if (!m_vm) + return; + m_vm->apiLock().grabAllLocks(this, m_droppedLockCount); + wtfThreadData().setCurrentAtomicStringTable(m_vm->atomicStringTable()); } } // namespace JSC |