summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/bytecode/CallLinkStatus.cpp
diff options
context:
space:
mode:
authorKonstantin Tokarev <annulen@yandex.ru>2016-08-25 19:20:41 +0300
committerKonstantin Tokarev <annulen@yandex.ru>2017-02-02 12:30:55 +0000
commit6882a04fb36642862b11efe514251d32070c3d65 (patch)
treeb7959826000b061fd5ccc7512035c7478742f7b0 /Source/JavaScriptCore/bytecode/CallLinkStatus.cpp
parentab6df191029eeeb0b0f16f127d553265659f739e (diff)
downloadqtwebkit-6882a04fb36642862b11efe514251d32070c3d65.tar.gz
Imported QtWebKit TP3 (git b57bc6801f1876c3220d5a4bfea33d620d477443)
Change-Id: I3b1d8a2808782c9f34d50240000e20cb38d3680f Reviewed-by: Konstantin Tokarev <annulen@yandex.ru>
Diffstat (limited to 'Source/JavaScriptCore/bytecode/CallLinkStatus.cpp')
-rw-r--r--Source/JavaScriptCore/bytecode/CallLinkStatus.cpp329
1 files changed, 265 insertions, 64 deletions
diff --git a/Source/JavaScriptCore/bytecode/CallLinkStatus.cpp b/Source/JavaScriptCore/bytecode/CallLinkStatus.cpp
index 509b15aaf..8ffc23d13 100644
--- a/Source/JavaScriptCore/bytecode/CallLinkStatus.cpp
+++ b/Source/JavaScriptCore/bytecode/CallLinkStatus.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,101 +26,305 @@
#include "config.h"
#include "CallLinkStatus.h"
+#include "CallLinkInfo.h"
#include "CodeBlock.h"
+#include "DFGJITCode.h"
+#include "InlineCallFrame.h"
#include "LLIntCallLinkInfo.h"
-#include "Operations.h"
+#include "JSCInlines.h"
#include <wtf/CommaPrinter.h>
+#include <wtf/ListDump.h>
namespace JSC {
+static const bool verbose = false;
+
CallLinkStatus::CallLinkStatus(JSValue value)
- : m_callTarget(value)
- , m_executable(0)
- , m_structure(0)
- , m_couldTakeSlowPath(false)
+ : m_couldTakeSlowPath(false)
, m_isProved(false)
{
- if (!value || !value.isCell())
- return;
-
- m_structure = value.asCell()->structure();
-
- if (!value.asCell()->inherits(&JSFunction::s_info))
+ if (!value || !value.isCell()) {
+ m_couldTakeSlowPath = true;
return;
+ }
- m_executable = jsCast<JSFunction*>(value.asCell())->executable();
+ m_variants.append(CallVariant(value.asCell()));
}
-JSFunction* CallLinkStatus::function() const
+CallLinkStatus CallLinkStatus::computeFromLLInt(const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, unsigned bytecodeIndex)
{
- if (!m_callTarget || !m_callTarget.isCell())
- return 0;
+ UNUSED_PARAM(profiledBlock);
+ UNUSED_PARAM(bytecodeIndex);
+#if ENABLE(DFG_JIT)
+ if (profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadCell))) {
+ // We could force this to be a closure call, but instead we'll just assume that it
+ // takes slow path.
+ return takesSlowPath();
+ }
+#else
+ UNUSED_PARAM(locker);
+#endif
+
+ VM& vm = *profiledBlock->vm();
+
+ Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex;
+ OpcodeID op = vm.interpreter->getOpcodeID(instruction[0].u.opcode);
+ if (op != op_call && op != op_construct && op != op_tail_call)
+ return CallLinkStatus();
- if (!m_callTarget.asCell()->inherits(&JSFunction::s_info))
- return 0;
+ LLIntCallLinkInfo* callLinkInfo = instruction[5].u.callLinkInfo;
- return jsCast<JSFunction*>(m_callTarget.asCell());
+ return CallLinkStatus(callLinkInfo->lastSeenCallee.get());
}
-InternalFunction* CallLinkStatus::internalFunction() const
+CallLinkStatus CallLinkStatus::computeFor(
+ CodeBlock* profiledBlock, unsigned bytecodeIndex, const CallLinkInfoMap& map)
{
- if (!m_callTarget || !m_callTarget.isCell())
- return 0;
+ ConcurrentJITLocker locker(profiledBlock->m_lock);
- if (!m_callTarget.asCell()->inherits(&InternalFunction::s_info))
- return 0;
+ UNUSED_PARAM(profiledBlock);
+ UNUSED_PARAM(bytecodeIndex);
+ UNUSED_PARAM(map);
+#if ENABLE(DFG_JIT)
+ ExitSiteData exitSiteData = computeExitSiteData(locker, profiledBlock, bytecodeIndex);
- return jsCast<InternalFunction*>(m_callTarget.asCell());
+ CallLinkInfo* callLinkInfo = map.get(CodeOrigin(bytecodeIndex));
+ if (!callLinkInfo) {
+ if (exitSiteData.takesSlowPath)
+ return takesSlowPath();
+ return computeFromLLInt(locker, profiledBlock, bytecodeIndex);
+ }
+
+ return computeFor(locker, profiledBlock, *callLinkInfo, exitSiteData);
+#else
+ return CallLinkStatus();
+#endif
}
-Intrinsic CallLinkStatus::intrinsicFor(CodeSpecializationKind kind) const
+CallLinkStatus::ExitSiteData CallLinkStatus::computeExitSiteData(
+ const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, unsigned bytecodeIndex)
{
- if (!m_executable)
- return NoIntrinsic;
+ ExitSiteData exitSiteData;
+
+#if ENABLE(DFG_JIT)
+ exitSiteData.takesSlowPath =
+ profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadType))
+ || profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadExecutable));
+ exitSiteData.badFunction =
+ profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadCell));
+#else
+ UNUSED_PARAM(locker);
+ UNUSED_PARAM(profiledBlock);
+ UNUSED_PARAM(bytecodeIndex);
+#endif
- return m_executable->intrinsicFor(kind);
+ return exitSiteData;
}
-CallLinkStatus CallLinkStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex)
+#if ENABLE(JIT)
+CallLinkStatus CallLinkStatus::computeFor(
+ const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, CallLinkInfo& callLinkInfo)
{
+ // We don't really need this, but anytime we have to debug this code, it becomes indispensable.
UNUSED_PARAM(profiledBlock);
- UNUSED_PARAM(bytecodeIndex);
-#if ENABLE(LLINT)
- Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex;
- LLIntCallLinkInfo* callLinkInfo = instruction[4].u.callLinkInfo;
- return CallLinkStatus(callLinkInfo->lastSeenCallee.get());
-#else
- return CallLinkStatus();
-#endif
+ CallLinkStatus result = computeFromCallLinkInfo(locker, callLinkInfo);
+ result.m_maxNumArguments = callLinkInfo.maxNumArguments();
+ return result;
}
-CallLinkStatus CallLinkStatus::computeFor(CodeBlock* profiledBlock, unsigned bytecodeIndex)
+CallLinkStatus CallLinkStatus::computeFromCallLinkInfo(
+ const ConcurrentJITLocker&, CallLinkInfo& callLinkInfo)
{
- UNUSED_PARAM(profiledBlock);
- UNUSED_PARAM(bytecodeIndex);
-#if ENABLE(JIT) && ENABLE(VALUE_PROFILER)
- if (!profiledBlock->numberOfCallLinkInfos())
- return computeFromLLInt(profiledBlock, bytecodeIndex);
+ if (callLinkInfo.clearedByGC())
+ return takesSlowPath();
- if (profiledBlock->couldTakeSlowCase(bytecodeIndex))
- return CallLinkStatus::takesSlowPath();
+ // Note that despite requiring that the locker is held, this code is racy with respect
+ // to the CallLinkInfo: it may get cleared while this code runs! This is because
+ // CallLinkInfo::unlink() may be called from a different CodeBlock than the one that owns
+ // the CallLinkInfo and currently we save space by not having CallLinkInfos know who owns
+ // them. So, there is no way for either the caller of CallLinkInfo::unlock() or unlock()
+ // itself to figure out which lock to lock.
+ //
+ // Fortunately, that doesn't matter. The only things we ask of CallLinkInfo - the slow
+ // path count, the stub, and the target - can all be asked racily. Stubs and targets can
+ // only be deleted at next GC, so if we load a non-null one, then it must contain data
+ // that is still marginally valid (i.e. the pointers ain't stale). This kind of raciness
+ // is probably OK for now.
- CallLinkInfo& callLinkInfo = profiledBlock->getCallLinkInfo(bytecodeIndex);
- if (callLinkInfo.stub)
- return CallLinkStatus(callLinkInfo.stub->executable(), callLinkInfo.stub->structure());
+ // PolymorphicCallStubRoutine is a GCAwareJITStubRoutine, so if non-null, it will stay alive
+ // until next GC even if the CallLinkInfo is concurrently cleared. Also, the variants list is
+ // never mutated after the PolymorphicCallStubRoutine is instantiated. We have some conservative
+ // fencing in place to make sure that we see the variants list after construction.
+ if (PolymorphicCallStubRoutine* stub = callLinkInfo.stub()) {
+ WTF::loadLoadFence();
+
+ CallEdgeList edges = stub->edges();
+
+ // Now that we've loaded the edges list, there are no further concurrency concerns. We will
+ // just manipulate and prune this list to our liking - mostly removing entries that are too
+ // infrequent and ensuring that it's sorted in descending order of frequency.
+
+ RELEASE_ASSERT(edges.size());
+
+ std::sort(
+ edges.begin(), edges.end(),
+ [] (CallEdge a, CallEdge b) {
+ return a.count() > b.count();
+ });
+ RELEASE_ASSERT(edges.first().count() >= edges.last().count());
+
+ double totalCallsToKnown = 0;
+ double totalCallsToUnknown = callLinkInfo.slowPathCount();
+ CallVariantList variants;
+ for (size_t i = 0; i < edges.size(); ++i) {
+ CallEdge edge = edges[i];
+ // If the call is at the tail of the distribution, then we don't optimize it and we
+ // treat it as if it was a call to something unknown. We define the tail as being either
+ // a call that doesn't belong to the N most frequent callees (N =
+ // maxPolymorphicCallVariantsForInlining) or that has a total call count that is too
+ // small.
+ if (i >= Options::maxPolymorphicCallVariantsForInlining()
+ || edge.count() < Options::frequentCallThreshold())
+ totalCallsToUnknown += edge.count();
+ else {
+ totalCallsToKnown += edge.count();
+ variants.append(edge.callee());
+ }
+ }
+
+ // Bail if we didn't find any calls that qualified.
+ RELEASE_ASSERT(!!totalCallsToKnown == !!variants.size());
+ if (variants.isEmpty())
+ return takesSlowPath();
+
+ // We require that the distribution of callees is skewed towards a handful of common ones.
+ if (totalCallsToKnown / totalCallsToUnknown < Options::minimumCallToKnownRate())
+ return takesSlowPath();
+
+ RELEASE_ASSERT(totalCallsToKnown);
+ RELEASE_ASSERT(variants.size());
+
+ CallLinkStatus result;
+ result.m_variants = variants;
+ result.m_couldTakeSlowPath = !!totalCallsToUnknown;
+ result.m_isBasedOnStub = true;
+ return result;
+ }
- JSFunction* target = callLinkInfo.lastSeenCallee.get();
- if (!target)
- return computeFromLLInt(profiledBlock, bytecodeIndex);
+ CallLinkStatus result;
- if (callLinkInfo.hasSeenClosure)
- return CallLinkStatus(target->executable(), target->structure());
+ if (JSFunction* target = callLinkInfo.lastSeenCallee()) {
+ CallVariant variant(target);
+ if (callLinkInfo.hasSeenClosure())
+ variant = variant.despecifiedClosure();
+ result.m_variants.append(variant);
+ }
+
+ result.m_couldTakeSlowPath = !!callLinkInfo.slowPathCount();
- return CallLinkStatus(target);
-#else
- return CallLinkStatus();
+ return result;
+}
+
+CallLinkStatus CallLinkStatus::computeFor(
+ const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, CallLinkInfo& callLinkInfo,
+ ExitSiteData exitSiteData)
+{
+ CallLinkStatus result = computeFor(locker, profiledBlock, callLinkInfo);
+ if (exitSiteData.badFunction) {
+ if (result.isBasedOnStub()) {
+ // If we have a polymorphic stub, then having an exit site is not quite so useful. In
+ // most cases, the information in the stub has higher fidelity.
+ result.makeClosureCall();
+ } else {
+ // We might not have a polymorphic stub for any number of reasons. When this happens, we
+ // are in less certain territory, so exit sites mean a lot.
+ result.m_couldTakeSlowPath = true;
+ }
+ }
+ if (exitSiteData.takesSlowPath)
+ result.m_couldTakeSlowPath = true;
+
+ return result;
+}
#endif
+
+void CallLinkStatus::computeDFGStatuses(
+ CodeBlock* dfgCodeBlock, CallLinkStatus::ContextMap& map)
+{
+#if ENABLE(DFG_JIT)
+ RELEASE_ASSERT(dfgCodeBlock->jitType() == JITCode::DFGJIT);
+ CodeBlock* baselineCodeBlock = dfgCodeBlock->alternative();
+ for (auto iter = dfgCodeBlock->callLinkInfosBegin(); !!iter; ++iter) {
+ CallLinkInfo& info = **iter;
+ CodeOrigin codeOrigin = info.codeOrigin();
+
+ // Check if we had already previously made a terrible mistake in the FTL for this
+ // code origin. Note that this is approximate because we could have a monovariant
+ // inline in the FTL that ended up failing. We should fix that at some point by
+ // having data structures to track the context of frequent exits. This is currently
+ // challenging because it would require creating a CodeOrigin-based database in
+ // baseline CodeBlocks, but those CodeBlocks don't really have a place to put the
+ // InlineCallFrames.
+ CodeBlock* currentBaseline =
+ baselineCodeBlockForOriginAndBaselineCodeBlock(codeOrigin, baselineCodeBlock);
+ ExitSiteData exitSiteData;
+ {
+ ConcurrentJITLocker locker(currentBaseline->m_lock);
+ exitSiteData = computeExitSiteData(
+ locker, currentBaseline, codeOrigin.bytecodeIndex);
+ }
+
+ {
+ ConcurrentJITLocker locker(dfgCodeBlock->m_lock);
+ map.add(info.codeOrigin(), computeFor(locker, dfgCodeBlock, info, exitSiteData));
+ }
+ }
+#else
+ UNUSED_PARAM(dfgCodeBlock);
+#endif // ENABLE(DFG_JIT)
+
+ if (verbose) {
+ dataLog("Context map:\n");
+ ContextMap::iterator iter = map.begin();
+ ContextMap::iterator end = map.end();
+ for (; iter != end; ++iter) {
+ dataLog(" ", iter->key, ":\n");
+ dataLog(" ", iter->value, "\n");
+ }
+ }
+}
+
+CallLinkStatus CallLinkStatus::computeFor(
+ CodeBlock* profiledBlock, CodeOrigin codeOrigin,
+ const CallLinkInfoMap& baselineMap, const CallLinkStatus::ContextMap& dfgMap)
+{
+ auto iter = dfgMap.find(codeOrigin);
+ if (iter != dfgMap.end())
+ return iter->value;
+
+ return computeFor(profiledBlock, codeOrigin.bytecodeIndex, baselineMap);
+}
+
+void CallLinkStatus::setProvenConstantCallee(CallVariant variant)
+{
+ m_variants = CallVariantList{ variant };
+ m_couldTakeSlowPath = false;
+ m_isProved = true;
+}
+
+bool CallLinkStatus::isClosureCall() const
+{
+ for (unsigned i = m_variants.size(); i--;) {
+ if (m_variants[i].isClosureCall())
+ return true;
+ }
+ return false;
+}
+
+void CallLinkStatus::makeClosureCall()
+{
+ m_variants = despecifiedVariantList(m_variants);
}
void CallLinkStatus::dump(PrintStream& out) const
@@ -138,14 +342,11 @@ void CallLinkStatus::dump(PrintStream& out) const
if (m_couldTakeSlowPath)
out.print(comma, "Could Take Slow Path");
- if (m_callTarget)
- out.print(comma, "Known target: ", m_callTarget);
-
- if (m_executable)
- out.print(comma, "Executable/CallHash: ", RawPointer(m_executable), "/", m_executable->hashFor(CodeForCall));
+ if (!m_variants.isEmpty())
+ out.print(comma, listDump(m_variants));
- if (m_structure)
- out.print(comma, "Structure: ", RawPointer(m_structure));
+ if (m_maxNumArguments)
+ out.print(comma, "maxNumArguments = ", m_maxNumArguments);
}
} // namespace JSC