summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/runtime/JSLock.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'Source/JavaScriptCore/runtime/JSLock.cpp')
-rw-r--r--Source/JavaScriptCore/runtime/JSLock.cpp328
1 files changed, 181 insertions, 147 deletions
diff --git a/Source/JavaScriptCore/runtime/JSLock.cpp b/Source/JavaScriptCore/runtime/JSLock.cpp
index a2eff3c29..7604075f8 100644
--- a/Source/JavaScriptCore/runtime/JSLock.cpp
+++ b/Source/JavaScriptCore/runtime/JSLock.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005, 2008, 2012, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2005, 2008, 2012 Apple Inc. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
@@ -25,26 +25,33 @@
#include "CallFrame.h"
#include "JSGlobalObject.h"
#include "JSObject.h"
-#include "JSCInlines.h"
-#include "SamplingProfiler.h"
-#include <thread>
+#include "Operations.h"
+
+#if USE(PTHREADS)
+#include <pthread.h>
+#endif
namespace JSC {
-StaticLock GlobalJSLock::s_sharedInstanceMutex;
+Mutex* GlobalJSLock::s_sharedInstanceLock = 0;
GlobalJSLock::GlobalJSLock()
{
- s_sharedInstanceMutex.lock();
+ s_sharedInstanceLock->lock();
}
GlobalJSLock::~GlobalJSLock()
{
- s_sharedInstanceMutex.unlock();
+ s_sharedInstanceLock->unlock();
+}
+
+void GlobalJSLock::initialize()
+{
+ s_sharedInstanceLock = new Mutex();
}
JSLockHolder::JSLockHolder(ExecState* exec)
- : m_vm(&exec->vm())
+ : m_vm(exec ? &exec->vm() : 0)
{
init();
}
@@ -63,24 +70,27 @@ JSLockHolder::JSLockHolder(VM& vm)
void JSLockHolder::init()
{
- m_vm->apiLock().lock();
+ if (m_vm)
+ m_vm->apiLock().lock();
}
JSLockHolder::~JSLockHolder()
{
+ if (!m_vm)
+ return;
+
RefPtr<JSLock> apiLock(&m_vm->apiLock());
- m_vm = nullptr;
+ m_vm.clear();
apiLock->unlock();
}
JSLock::JSLock(VM* vm)
- : m_ownerThreadID(std::thread::id())
+ : m_ownerThread(0)
, m_lockCount(0)
, m_lockDropDepth(0)
- , m_hasExclusiveThread(false)
, m_vm(vm)
- , m_entryAtomicStringTable(nullptr)
{
+ m_spinLock.Init();
}
JSLock::~JSLock()
@@ -90,104 +100,39 @@ JSLock::~JSLock()
void JSLock::willDestroyVM(VM* vm)
{
ASSERT_UNUSED(vm, m_vm == vm);
- m_vm = nullptr;
-}
-
-void JSLock::setExclusiveThread(std::thread::id threadId)
-{
- RELEASE_ASSERT(!m_lockCount && m_ownerThreadID == std::thread::id());
- m_hasExclusiveThread = (threadId != std::thread::id());
- m_ownerThreadID = threadId;
+ m_vm = 0;
}
void JSLock::lock()
{
- lock(1);
-}
-
-void JSLock::lock(intptr_t lockCount)
-{
- ASSERT(lockCount > 0);
- if (currentThreadIsHoldingLock()) {
- m_lockCount += lockCount;
- return;
- }
-
- if (!m_hasExclusiveThread) {
- m_lock.lock();
- m_ownerThreadID = std::this_thread::get_id();
+ ThreadIdentifier currentThread = WTF::currentThread();
+ {
+ SpinLockHolder holder(&m_spinLock);
+ if (m_ownerThread == currentThread && m_lockCount) {
+ m_lockCount++;
+ return;
+ }
}
- ASSERT(!m_lockCount);
- m_lockCount = lockCount;
-
- didAcquireLock();
-}
-
-void JSLock::didAcquireLock()
-{
- // FIXME: What should happen to the per-thread identifier table if we don't have a VM?
- if (!m_vm)
- return;
-
- RELEASE_ASSERT(!m_vm->stackPointerAtVMEntry());
- void* p = &p; // A proxy for the current stack pointer.
- m_vm->setStackPointerAtVMEntry(p);
-
- WTFThreadData& threadData = wtfThreadData();
- m_vm->setLastStackTop(threadData.savedLastStackTop());
-
- ASSERT(!m_entryAtomicStringTable);
- m_entryAtomicStringTable = threadData.setCurrentAtomicStringTable(m_vm->atomicStringTable());
- ASSERT(m_entryAtomicStringTable);
-
- m_vm->heap.machineThreads().addCurrentThread();
-
-#if ENABLE(SAMPLING_PROFILER)
- // Note: this must come after addCurrentThread().
- if (SamplingProfiler* samplingProfiler = m_vm->samplingProfiler())
- samplingProfiler->noticeJSLockAcquisition();
-#endif
-}
-
-void JSLock::unlock()
-{
- unlock(1);
-}
-
-void JSLock::unlock(intptr_t unlockCount)
-{
- RELEASE_ASSERT(currentThreadIsHoldingLock());
- ASSERT(m_lockCount >= unlockCount);
-
- // Maintain m_lockCount while calling willReleaseLock() so that its callees know that
- // they still have the lock.
- if (unlockCount == m_lockCount)
- willReleaseLock();
-
- m_lockCount -= unlockCount;
- if (!m_lockCount) {
+ m_lock.lock();
- if (!m_hasExclusiveThread) {
- m_ownerThreadID = std::thread::id();
- m_lock.unlock();
- }
+ {
+ SpinLockHolder holder(&m_spinLock);
+ m_ownerThread = currentThread;
+ ASSERT(!m_lockCount);
+ m_lockCount = 1;
}
}
-void JSLock::willReleaseLock()
+void JSLock::unlock()
{
- if (m_vm) {
- m_vm->drainMicrotasks();
+ SpinLockHolder holder(&m_spinLock);
+ ASSERT(currentThreadIsHoldingLock());
- m_vm->heap.releaseDelayedReleasedObjects();
- m_vm->setStackPointerAtVMEntry(nullptr);
- }
+ m_lockCount--;
- if (m_entryAtomicStringTable) {
- wtfThreadData().setCurrentAtomicStringTable(m_entryAtomicStringTable);
- m_entryAtomicStringTable = nullptr;
- }
+ if (!m_lockCount)
+ m_lock.unlock();
}
void JSLock::lock(ExecState* exec)
@@ -202,91 +147,180 @@ void JSLock::unlock(ExecState* exec)
bool JSLock::currentThreadIsHoldingLock()
{
- ASSERT(!m_hasExclusiveThread || (exclusiveThread() == std::this_thread::get_id()));
- if (m_hasExclusiveThread)
- return !!m_lockCount;
- return m_ownerThreadID == std::this_thread::get_id();
+ return m_lockCount && m_ownerThread == WTF::currentThread();
}
+// This is fairly nasty. We allow multiple threads to run on the same
+// context, and we do not require any locking semantics in doing so -
+// clients of the API may simply use the context from multiple threads
+// concurently, and assume this will work. In order to make this work,
+// We lock the context when a thread enters, and unlock it when it leaves.
+// However we do not only unlock when the thread returns from its
+// entry point (evaluate script or call function), we also unlock the
+// context if the thread leaves JSC by making a call out to an external
+// function through a callback.
+//
+// All threads using the context share the same JS stack (the JSStack).
+// Whenever a thread calls into JSC it starts using the JSStack from the
+// previous 'high water mark' - the maximum point the stack has ever grown to
+// (returned by JSStack::end()). So if a first thread calls out to a
+// callback, and a second thread enters JSC, then also exits by calling out
+// to a callback, we can be left with stackframes from both threads in the
+// JSStack. As such, a problem may occur should the first thread's
+// callback complete first, and attempt to return to JSC. Were we to allow
+// this to happen, and were its stack to grow further, then it may potentially
+// write over the second thread's call frames.
+//
+// To avoid JS stack corruption we enforce a policy of only ever allowing two
+// threads to use a JS context concurrently, and only allowing the second of
+// these threads to execute until it has completed and fully returned from its
+// outermost call into JSC. We enforce this policy using 'lockDropDepth'. The
+// first time a thread exits it will call DropAllLocks - which will do as expected
+// and drop locks allowing another thread to enter. Should another thread, or the
+// same thread again, enter JSC (through evaluate script or call function), and exit
+// again through a callback, then the locks will not be dropped when DropAllLocks
+// is called (since lockDropDepth is non-zero). Since this thread is still holding
+// the locks, only it will be able to re-enter JSC (either be returning from the
+// callback, or by re-entering through another call to evaulate script or call
+// function).
+//
+// This policy is slightly more restricive than it needs to be for correctness -
+// we could validly allow futher entries into JSC from other threads, we only
+// need ensure that callbacks return in the reverse chronological order of the
+// order in which they were made - though implementing the less restrictive policy
+// would likely increase complexity and overhead.
+//
+
// This function returns the number of locks that were dropped.
-unsigned JSLock::dropAllLocks(DropAllLocks* dropper)
+unsigned JSLock::dropAllLocks(SpinLock& spinLock)
{
- if (m_hasExclusiveThread) {
- ASSERT(exclusiveThread() == std::this_thread::get_id());
+#if PLATFORM(IOS)
+ ASSERT_UNUSED(spinLock, spinLock.IsHeld());
+ // Check if this thread is currently holding the lock.
+ // FIXME: Maybe we want to require this, guard with an ASSERT?
+ unsigned lockCount = m_lockCount;
+ if (!lockCount || m_ownerThread != WTF::currentThread())
return 0;
- }
- if (!currentThreadIsHoldingLock())
+ // Don't drop the locks if they've already been dropped once.
+ // (If the prior drop came from another thread, and it resumed first,
+ // it could trash our register file).
+ if (m_lockDropDepth)
return 0;
+ // m_lockDropDepth is only incremented if any locks were dropped.
++m_lockDropDepth;
+ m_lockCount = 0;
+ m_lock.unlock();
+ return lockCount;
+#else
+ if (m_lockDropDepth++)
+ return 0;
- dropper->setDropDepth(m_lockDropDepth);
-
- WTFThreadData& threadData = wtfThreadData();
- threadData.setSavedStackPointerAtVMEntry(m_vm->stackPointerAtVMEntry());
- threadData.setSavedLastStackTop(m_vm->lastStackTop());
+ return dropAllLocksUnconditionally(spinLock);
+#endif
+}
- unsigned droppedLockCount = m_lockCount;
- unlock(droppedLockCount);
+unsigned JSLock::dropAllLocksUnconditionally(SpinLock& spinLock)
+{
+#if PLATFORM(IOS)
+ ASSERT_UNUSED(spinLock, spinLock.IsHeld());
+ // Check if this thread is currently holding the lock.
+ // FIXME: Maybe we want to require this, guard with an ASSERT?
+ unsigned lockCount = m_lockCount;
+ if (!lockCount || m_ownerThread != WTF::currentThread())
+ return 0;
- return droppedLockCount;
+ // m_lockDropDepth is only incremented if any locks were dropped.
+ ++m_lockDropDepth;
+ m_lockCount = 0;
+ m_lock.unlock();
+ return lockCount;
+#else
+ UNUSED_PARAM(spinLock);
+ unsigned lockCount = m_lockCount;
+ for (unsigned i = 0; i < lockCount; ++i)
+ unlock();
+
+ return lockCount;
+#endif
}
-void JSLock::grabAllLocks(DropAllLocks* dropper, unsigned droppedLockCount)
+void JSLock::grabAllLocks(unsigned lockCount, SpinLock& spinLock)
{
- ASSERT(!m_hasExclusiveThread || !droppedLockCount);
-
+#if PLATFORM(IOS)
+ ASSERT(spinLock.IsHeld());
// If no locks were dropped, nothing to do!
- if (!droppedLockCount)
+ if (!lockCount)
return;
- ASSERT(!currentThreadIsHoldingLock());
- lock(droppedLockCount);
-
- while (dropper->dropDepth() != m_lockDropDepth) {
- unlock(droppedLockCount);
- std::this_thread::yield();
- lock(droppedLockCount);
+ ThreadIdentifier currentThread = WTF::currentThread();
+ // Check if this thread is currently holding the lock.
+ // FIXME: Maybe we want to prohibit this, guard against with an ASSERT?
+ if (m_ownerThread == currentThread && m_lockCount) {
+ m_lockCount += lockCount;
+ --m_lockDropDepth;
+ return;
}
+ spinLock.Unlock();
+ m_lock.lock();
+ spinLock.Lock();
+
+ m_ownerThread = currentThread;
+ ASSERT(!m_lockCount);
+ m_lockCount = lockCount;
--m_lockDropDepth;
+#else
+ UNUSED_PARAM(spinLock);
+ for (unsigned i = 0; i < lockCount; ++i)
+ lock();
- WTFThreadData& threadData = wtfThreadData();
- m_vm->setStackPointerAtVMEntry(threadData.savedStackPointerAtVMEntry());
- m_vm->setLastStackTop(threadData.savedLastStackTop());
+ --m_lockDropDepth;
+#endif
}
-JSLock::DropAllLocks::DropAllLocks(VM* vm)
- : m_droppedLockCount(0)
- // If the VM is in the middle of being destroyed then we don't want to resurrect it
- // by allowing DropAllLocks to ref it. By this point the JSLock has already been
- // released anyways, so it doesn't matter that DropAllLocks is a no-op.
- , m_vm(vm->refCount() ? vm : nullptr)
+JSLock::DropAllLocks::DropAllLocks(ExecState* exec, AlwaysDropLocksTag alwaysDropLocks)
+ : m_lockCount(0)
+ , m_vm(exec ? &exec->vm() : nullptr)
{
if (!m_vm)
return;
- wtfThreadData().resetCurrentAtomicStringTable();
- RELEASE_ASSERT(!m_vm->apiLock().currentThreadIsHoldingLock() || !m_vm->isCollectorBusy());
- m_droppedLockCount = m_vm->apiLock().dropAllLocks(this);
-}
-
-JSLock::DropAllLocks::DropAllLocks(ExecState* exec)
- : DropAllLocks(exec ? &exec->vm() : nullptr)
-{
+ SpinLock& spinLock = m_vm->apiLock().m_spinLock;
+#if PLATFORM(IOS)
+ SpinLockHolder holder(&spinLock);
+#endif
+ if (alwaysDropLocks)
+ m_lockCount = m_vm->apiLock().dropAllLocksUnconditionally(spinLock);
+ else
+ m_lockCount = m_vm->apiLock().dropAllLocks(spinLock);
}
-JSLock::DropAllLocks::DropAllLocks(VM& vm)
- : DropAllLocks(&vm)
+JSLock::DropAllLocks::DropAllLocks(VM* vm, AlwaysDropLocksTag alwaysDropLocks)
+ : m_lockCount(0)
+ , m_vm(vm)
{
+ if (!m_vm)
+ return;
+ SpinLock& spinLock = m_vm->apiLock().m_spinLock;
+#if PLATFORM(IOS)
+ SpinLockHolder holder(&spinLock);
+#endif
+ if (alwaysDropLocks)
+ m_lockCount = m_vm->apiLock().dropAllLocksUnconditionally(spinLock);
+ else
+ m_lockCount = m_vm->apiLock().dropAllLocks(spinLock);
}
JSLock::DropAllLocks::~DropAllLocks()
{
if (!m_vm)
return;
- m_vm->apiLock().grabAllLocks(this, m_droppedLockCount);
- wtfThreadData().setCurrentAtomicStringTable(m_vm->atomicStringTable());
+ SpinLock& spinLock = m_vm->apiLock().m_spinLock;
+#if PLATFORM(IOS)
+ SpinLockHolder holder(&spinLock);
+#endif
+ m_vm->apiLock().grabAllLocks(m_lockCount, spinLock);
}
} // namespace JSC