summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp
diff options
context:
space:
mode:
authorLorry Tar Creator <lorry-tar-importer@lorry>2016-04-10 09:28:39 +0000
committerLorry Tar Creator <lorry-tar-importer@lorry>2016-04-10 09:28:39 +0000
commit32761a6cee1d0dee366b885b7b9c777e67885688 (patch)
treed6bec92bebfb216f4126356e55518842c2f476a1 /Source/JavaScriptCore/bytecode/GetByIdStatus.cpp
parenta4e969f4965059196ca948db781e52f7cfebf19e (diff)
downloadWebKitGtk-tarball-32761a6cee1d0dee366b885b7b9c777e67885688.tar.gz
webkitgtk-2.4.11webkitgtk-2.4.11
Diffstat (limited to 'Source/JavaScriptCore/bytecode/GetByIdStatus.cpp')
-rw-r--r--Source/JavaScriptCore/bytecode/GetByIdStatus.cpp473
1 files changed, 202 insertions, 271 deletions
diff --git a/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp b/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp
index 66a4dd81d..fbb3da1a5 100644
--- a/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp
+++ b/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -27,348 +27,279 @@
#include "GetByIdStatus.h"
#include "CodeBlock.h"
-#include "ComplexGetStatus.h"
-#include "JSCInlines.h"
#include "JSScope.h"
#include "LLIntData.h"
#include "LowLevelInterpreter.h"
-#include "PolymorphicAccess.h"
-#include <wtf/ListDump.h>
+#include "Operations.h"
namespace JSC {
-bool GetByIdStatus::appendVariant(const GetByIdVariant& variant)
-{
- // Attempt to merge this variant with an already existing variant.
- for (unsigned i = 0; i < m_variants.size(); ++i) {
- if (m_variants[i].attemptToMerge(variant))
- return true;
- }
-
- // Make sure there is no overlap. We should have pruned out opportunities for
- // overlap but it's possible that an inline cache got into a weird state. We are
- // defensive and bail if we detect crazy.
- for (unsigned i = 0; i < m_variants.size(); ++i) {
- if (m_variants[i].structureSet().overlaps(variant.structureSet()))
- return false;
- }
-
- m_variants.append(variant);
- return true;
-}
-
-#if ENABLE(DFG_JIT)
-bool GetByIdStatus::hasExitSite(const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, unsigned bytecodeIndex)
-{
- return profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadCache))
- || profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadConstantCache));
-}
-#endif
-
-GetByIdStatus GetByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex, UniquedStringImpl* uid)
+GetByIdStatus GetByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex, StringImpl* uid)
{
UNUSED_PARAM(profiledBlock);
UNUSED_PARAM(bytecodeIndex);
UNUSED_PARAM(uid);
-
- VM& vm = *profiledBlock->vm();
-
+#if ENABLE(LLINT)
Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex;
- if (instruction[0].u.opcode == LLInt::getOpcode(op_get_array_length))
+ if (instruction[0].u.opcode == LLInt::getOpcode(llint_op_get_array_length))
return GetByIdStatus(NoInformation, false);
- StructureID structureID = instruction[4].u.structureID;
- if (!structureID)
+ Structure* structure = instruction[4].u.structure.get();
+ if (!structure)
return GetByIdStatus(NoInformation, false);
- Structure* structure = vm.heap.structureIDTable().get(structureID);
-
if (structure->takesSlowPathInDFGForImpureProperty())
return GetByIdStatus(NoInformation, false);
unsigned attributesIgnored;
- PropertyOffset offset = structure->getConcurrently(uid, attributesIgnored);
+ JSCell* specificValue;
+ PropertyOffset offset = structure->getConcurrently(
+ *profiledBlock->vm(), uid, attributesIgnored, specificValue);
+ if (structure->isDictionary())
+ specificValue = 0;
if (!isValidOffset(offset))
return GetByIdStatus(NoInformation, false);
- return GetByIdStatus(Simple, false, GetByIdVariant(StructureSet(structure), offset));
+ return GetByIdStatus(Simple, false, StructureSet(structure), offset, specificValue);
+#else
+ return GetByIdStatus(NoInformation, false);
+#endif
}
-GetByIdStatus GetByIdStatus::computeFor(CodeBlock* profiledBlock, StubInfoMap& map, unsigned bytecodeIndex, UniquedStringImpl* uid)
+void GetByIdStatus::computeForChain(GetByIdStatus& result, CodeBlock* profiledBlock, StringImpl* uid)
{
- ConcurrentJITLocker locker(profiledBlock->m_lock);
+#if ENABLE(JIT)
+ // Validate the chain. If the chain is invalid, then currently the best thing
+ // we can do is to assume that TakesSlow is true. In the future, it might be
+ // worth exploring reifying the structure chain from the structure we've got
+ // instead of using the one from the cache, since that will do the right things
+ // if the structure chain has changed. But that may be harder, because we may
+ // then end up having a different type of access altogether. And it currently
+ // does not appear to be worth it to do so -- effectively, the heuristic we
+ // have now is that if the structure chain has changed between when it was
+ // cached on in the baseline JIT and when the DFG tried to inline the access,
+ // then we fall back on a polymorphic access.
+ if (!result.m_chain->isStillValid())
+ return;
- GetByIdStatus result;
+ if (result.m_chain->head()->takesSlowPathInDFGForImpureProperty())
+ return;
+ size_t chainSize = result.m_chain->size();
+ for (size_t i = 0; i < chainSize; i++) {
+ if (result.m_chain->at(i)->takesSlowPathInDFGForImpureProperty())
+ return;
+ }
-#if ENABLE(DFG_JIT)
- result = computeForStubInfoWithoutExitSiteFeedback(
- locker, profiledBlock, map.get(CodeOrigin(bytecodeIndex)), uid,
- CallLinkStatus::computeExitSiteData(locker, profiledBlock, bytecodeIndex));
+ JSObject* currentObject = result.m_chain->terminalPrototype();
+ Structure* currentStructure = result.m_chain->last();
- if (!result.takesSlowPath()
- && hasExitSite(locker, profiledBlock, bytecodeIndex))
- return GetByIdStatus(result.makesCalls() ? MakesCalls : TakesSlowPath, true);
+ ASSERT_UNUSED(currentObject, currentObject);
+
+ unsigned attributesIgnored;
+ JSCell* specificValue;
+
+ result.m_offset = currentStructure->getConcurrently(
+ *profiledBlock->vm(), uid, attributesIgnored, specificValue);
+ if (currentStructure->isDictionary())
+ specificValue = 0;
+ if (!isValidOffset(result.m_offset))
+ return;
+
+ result.m_structureSet.add(result.m_chain->head());
+ result.m_specificValue = JSValue(specificValue);
#else
- UNUSED_PARAM(map);
+ UNUSED_PARAM(result);
+ UNUSED_PARAM(profiledBlock);
+ UNUSED_PARAM(uid);
+ UNREACHABLE_FOR_PLATFORM();
#endif
-
- if (!result)
- return computeFromLLInt(profiledBlock, bytecodeIndex, uid);
-
- return result;
}
-#if ENABLE(DFG_JIT)
-GetByIdStatus GetByIdStatus::computeForStubInfo(const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, StructureStubInfo* stubInfo, CodeOrigin codeOrigin, UniquedStringImpl* uid)
+GetByIdStatus GetByIdStatus::computeFor(CodeBlock* profiledBlock, StubInfoMap& map, unsigned bytecodeIndex, StringImpl* uid)
{
- GetByIdStatus result = GetByIdStatus::computeForStubInfoWithoutExitSiteFeedback(
- locker, profiledBlock, stubInfo, uid,
- CallLinkStatus::computeExitSiteData(locker, profiledBlock, codeOrigin.bytecodeIndex));
-
- if (!result.takesSlowPath() && GetByIdStatus::hasExitSite(locker, profiledBlock, codeOrigin.bytecodeIndex))
- return GetByIdStatus(result.makesCalls() ? GetByIdStatus::MakesCalls : GetByIdStatus::TakesSlowPath, true);
- return result;
-}
-#endif // ENABLE(DFG_JIT)
-
+ ConcurrentJITLocker locker(profiledBlock->m_lock);
+
+ UNUSED_PARAM(profiledBlock);
+ UNUSED_PARAM(bytecodeIndex);
+ UNUSED_PARAM(uid);
#if ENABLE(JIT)
-GetByIdStatus GetByIdStatus::computeForStubInfoWithoutExitSiteFeedback(
- const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, StructureStubInfo* stubInfo, UniquedStringImpl* uid,
- CallLinkStatus::ExitSiteData callExitSiteData)
-{
- if (!stubInfo || !stubInfo->everConsidered)
- return GetByIdStatus(NoInformation);
+ StructureStubInfo* stubInfo = map.get(CodeOrigin(bytecodeIndex));
+ if (!stubInfo || !stubInfo->seen)
+ return computeFromLLInt(profiledBlock, bytecodeIndex, uid);
+
+ if (stubInfo->resetByGC)
+ return GetByIdStatus(TakesSlowPath, true);
- PolymorphicAccess* list = 0;
- State slowPathState = TakesSlowPath;
- if (stubInfo->cacheType == CacheType::Stub) {
- list = stubInfo->u.stub;
- for (unsigned i = 0; i < list->size(); ++i) {
- const AccessCase& access = list->at(i);
- if (access.doesCalls())
- slowPathState = MakesCalls;
- }
+ PolymorphicAccessStructureList* list;
+ int listSize;
+ switch (stubInfo->accessType) {
+ case access_get_by_id_self_list:
+ list = stubInfo->u.getByIdSelfList.structureList;
+ listSize = stubInfo->u.getByIdSelfList.listSize;
+ break;
+ case access_get_by_id_proto_list:
+ list = stubInfo->u.getByIdProtoList.structureList;
+ listSize = stubInfo->u.getByIdProtoList.listSize;
+ break;
+ default:
+ list = 0;
+ listSize = 0;
+ break;
+ }
+ for (int i = 0; i < listSize; ++i) {
+ if (!list->list[i].isDirect)
+ return GetByIdStatus(MakesCalls, true);
}
- if (stubInfo->tookSlowPath)
- return GetByIdStatus(slowPathState);
+ // Next check if it takes slow case, in which case we want to be kind of careful.
+ if (profiledBlock->likelyToTakeSlowCase(bytecodeIndex))
+ return GetByIdStatus(TakesSlowPath, true);
// Finally figure out if we can derive an access strategy.
GetByIdStatus result;
- result.m_state = Simple;
result.m_wasSeenInJIT = true; // This is interesting for bytecode dumping only.
- switch (stubInfo->cacheType) {
- case CacheType::Unset:
- return GetByIdStatus(NoInformation);
+ switch (stubInfo->accessType) {
+ case access_unset:
+ return computeFromLLInt(profiledBlock, bytecodeIndex, uid);
- case CacheType::GetByIdSelf: {
- Structure* structure = stubInfo->u.byIdSelf.baseObjectStructure.get();
+ case access_get_by_id_self: {
+ Structure* structure = stubInfo->u.getByIdSelf.baseObjectStructure.get();
if (structure->takesSlowPathInDFGForImpureProperty())
- return GetByIdStatus(slowPathState, true);
+ return GetByIdStatus(TakesSlowPath, true);
unsigned attributesIgnored;
- GetByIdVariant variant;
- variant.m_offset = structure->getConcurrently(uid, attributesIgnored);
- if (!isValidOffset(variant.m_offset))
- return GetByIdStatus(slowPathState, true);
+ JSCell* specificValue;
+ result.m_offset = structure->getConcurrently(
+ *profiledBlock->vm(), uid, attributesIgnored, specificValue);
+ if (structure->isDictionary())
+ specificValue = 0;
+
+ if (isValidOffset(result.m_offset)) {
+ result.m_structureSet.add(structure);
+ result.m_specificValue = JSValue(specificValue);
+ }
- variant.m_structureSet.add(structure);
- bool didAppend = result.appendVariant(variant);
- ASSERT_UNUSED(didAppend, didAppend);
- return result;
+ if (isValidOffset(result.m_offset))
+ ASSERT(result.m_structureSet.size());
+ break;
}
- case CacheType::Stub: {
- for (unsigned listIndex = 0; listIndex < list->size(); ++listIndex) {
- const AccessCase& access = list->at(listIndex);
- if (access.viaProxy())
- return GetByIdStatus(slowPathState, true);
+ case access_get_by_id_self_list: {
+ for (int i = 0; i < listSize; ++i) {
+ ASSERT(list->list[i].isDirect);
- Structure* structure = access.structure();
- if (!structure) {
- // The null structure cases arise due to array.length and string.length. We have no way
- // of creating a GetByIdVariant for those, and we don't really have to since the DFG
- // handles those cases in FixupPhase using value profiling. That's a bit awkward - we
- // shouldn't have to use value profiling to discover something that the AccessCase
- // could have told us. But, it works well enough. So, our only concern here is to not
- // crash on null structure.
- return GetByIdStatus(slowPathState, true);
- }
-
- ComplexGetStatus complexGetStatus = ComplexGetStatus::computeFor(
- structure, access.conditionSet(), uid);
-
- switch (complexGetStatus.kind()) {
- case ComplexGetStatus::ShouldSkip:
- continue;
-
- case ComplexGetStatus::TakesSlowPath:
- return GetByIdStatus(slowPathState, true);
-
- case ComplexGetStatus::Inlineable: {
- std::unique_ptr<CallLinkStatus> callLinkStatus;
- JSFunction* intrinsicFunction = nullptr;
-
- switch (access.type()) {
- case AccessCase::Load: {
- break;
- }
- case AccessCase::IntrinsicGetter: {
- intrinsicFunction = access.intrinsicFunction();
- break;
- }
- case AccessCase::Getter: {
- CallLinkInfo* callLinkInfo = access.callLinkInfo();
- ASSERT(callLinkInfo);
- callLinkStatus = std::make_unique<CallLinkStatus>(
- CallLinkStatus::computeFor(
- locker, profiledBlock, *callLinkInfo, callExitSiteData));
- break;
- }
- default: {
- // FIXME: It would be totally sweet to support more of these at some point in the
- // future. https://bugs.webkit.org/show_bug.cgi?id=133052
- return GetByIdStatus(slowPathState, true);
- } }
-
- GetByIdVariant variant(
- StructureSet(structure), complexGetStatus.offset(),
- complexGetStatus.conditionSet(), WTFMove(callLinkStatus),
- intrinsicFunction);
+ Structure* structure = list->list[i].base.get();
+ if (structure->takesSlowPathInDFGForImpureProperty())
+ return GetByIdStatus(TakesSlowPath, true);
- if (!result.appendVariant(variant))
- return GetByIdStatus(slowPathState, true);
+ if (result.m_structureSet.contains(structure))
+ continue;
+
+ unsigned attributesIgnored;
+ JSCell* specificValue;
+ PropertyOffset myOffset = structure->getConcurrently(
+ *profiledBlock->vm(), uid, attributesIgnored, specificValue);
+ if (structure->isDictionary())
+ specificValue = 0;
+
+ if (!isValidOffset(myOffset)) {
+ result.m_offset = invalidOffset;
+ break;
+ }
+
+ if (!i) {
+ result.m_offset = myOffset;
+ result.m_specificValue = JSValue(specificValue);
+ } else if (result.m_offset != myOffset) {
+ result.m_offset = invalidOffset;
break;
- } }
+ } else if (result.m_specificValue != JSValue(specificValue))
+ result.m_specificValue = JSValue();
+
+ result.m_structureSet.add(structure);
}
+
+ if (isValidOffset(result.m_offset))
+ ASSERT(result.m_structureSet.size());
+ break;
+ }
- return result;
+ case access_get_by_id_proto: {
+ if (!stubInfo->u.getByIdProto.isDirect)
+ return GetByIdStatus(MakesCalls, true);
+ result.m_chain = adoptRef(new IntendedStructureChain(
+ profiledBlock,
+ stubInfo->u.getByIdProto.baseObjectStructure.get(),
+ stubInfo->u.getByIdProto.prototypeStructure.get()));
+ computeForChain(result, profiledBlock, uid);
+ break;
+ }
+
+ case access_get_by_id_chain: {
+ if (!stubInfo->u.getByIdChain.isDirect)
+ return GetByIdStatus(MakesCalls, true);
+ result.m_chain = adoptRef(new IntendedStructureChain(
+ profiledBlock,
+ stubInfo->u.getByIdChain.baseObjectStructure.get(),
+ stubInfo->u.getByIdChain.chain.get(),
+ stubInfo->u.getByIdChain.count));
+ computeForChain(result, profiledBlock, uid);
+ break;
}
default:
- return GetByIdStatus(slowPathState, true);
+ ASSERT(!isValidOffset(result.m_offset));
+ break;
}
- RELEASE_ASSERT_NOT_REACHED();
- return GetByIdStatus();
-}
-#endif // ENABLE(JIT)
-
-GetByIdStatus GetByIdStatus::computeFor(
- CodeBlock* profiledBlock, CodeBlock* dfgBlock, StubInfoMap& baselineMap,
- StubInfoMap& dfgMap, CodeOrigin codeOrigin, UniquedStringImpl* uid)
-{
-#if ENABLE(DFG_JIT)
- if (dfgBlock) {
- CallLinkStatus::ExitSiteData exitSiteData;
- {
- ConcurrentJITLocker locker(profiledBlock->m_lock);
- exitSiteData = CallLinkStatus::computeExitSiteData(
- locker, profiledBlock, codeOrigin.bytecodeIndex);
- }
-
- GetByIdStatus result;
- {
- ConcurrentJITLocker locker(dfgBlock->m_lock);
- result = computeForStubInfoWithoutExitSiteFeedback(
- locker, dfgBlock, dfgMap.get(codeOrigin), uid, exitSiteData);
- }
-
- if (result.takesSlowPath())
- return result;
+ if (!isValidOffset(result.m_offset)) {
+ result.m_state = TakesSlowPath;
+ result.m_structureSet.clear();
+ result.m_chain.clear();
+ result.m_specificValue = JSValue();
+ } else
+ result.m_state = Simple;
- {
- ConcurrentJITLocker locker(profiledBlock->m_lock);
- if (hasExitSite(locker, profiledBlock, codeOrigin.bytecodeIndex))
- return GetByIdStatus(TakesSlowPath, true);
- }
-
- if (result.isSet())
- return result;
- }
-#else
- UNUSED_PARAM(dfgBlock);
- UNUSED_PARAM(dfgMap);
-#endif
-
- return computeFor(profiledBlock, baselineMap, codeOrigin.bytecodeIndex, uid);
+ return result;
+#else // ENABLE(JIT)
+ UNUSED_PARAM(map);
+ return GetByIdStatus(NoInformation, false);
+#endif // ENABLE(JIT)
}
-GetByIdStatus GetByIdStatus::computeFor(const StructureSet& set, UniquedStringImpl* uid)
+GetByIdStatus GetByIdStatus::computeFor(VM& vm, Structure* structure, StringImpl* uid)
{
// For now we only handle the super simple self access case. We could handle the
// prototype case in the future.
- if (set.isEmpty())
- return GetByIdStatus();
+ if (!structure)
+ return GetByIdStatus(TakesSlowPath);
- if (parseIndex(*uid))
+ if (toUInt32FromStringImpl(uid) != PropertyName::NotAnIndex)
+ return GetByIdStatus(TakesSlowPath);
+
+ if (structure->typeInfo().overridesGetOwnPropertySlot() && structure->typeInfo().type() != GlobalObjectType)
return GetByIdStatus(TakesSlowPath);
+ if (!structure->propertyAccessesAreCacheable())
+ return GetByIdStatus(TakesSlowPath);
+
GetByIdStatus result;
+ result.m_wasSeenInJIT = false; // To my knowledge nobody that uses computeFor(VM&, Structure*, StringImpl*) reads this field, but I might as well be honest: no, it wasn't seen in the JIT, since I computed it statically.
+ unsigned attributes;
+ JSCell* specificValue;
+ result.m_offset = structure->getConcurrently(vm, uid, attributes, specificValue);
+ if (!isValidOffset(result.m_offset))
+ return GetByIdStatus(TakesSlowPath); // It's probably a prototype lookup. Give up on life for now, even though we could totally be way smarter about it.
+ if (attributes & Accessor)
+ return GetByIdStatus(MakesCalls);
+ if (structure->isDictionary())
+ specificValue = 0;
+ result.m_structureSet.add(structure);
+ result.m_specificValue = JSValue(specificValue);
result.m_state = Simple;
- result.m_wasSeenInJIT = false;
- for (unsigned i = 0; i < set.size(); ++i) {
- Structure* structure = set[i];
- if (structure->typeInfo().overridesGetOwnPropertySlot() && structure->typeInfo().type() != GlobalObjectType)
- return GetByIdStatus(TakesSlowPath);
-
- if (!structure->propertyAccessesAreCacheable())
- return GetByIdStatus(TakesSlowPath);
-
- unsigned attributes;
- PropertyOffset offset = structure->getConcurrently(uid, attributes);
- if (!isValidOffset(offset))
- return GetByIdStatus(TakesSlowPath); // It's probably a prototype lookup. Give up on life for now, even though we could totally be way smarter about it.
- if (attributes & Accessor)
- return GetByIdStatus(MakesCalls); // We could be smarter here, like strength-reducing this to a Call.
-
- if (!result.appendVariant(GetByIdVariant(structure, offset)))
- return GetByIdStatus(TakesSlowPath);
- }
-
return result;
}
-bool GetByIdStatus::makesCalls() const
-{
- switch (m_state) {
- case NoInformation:
- case TakesSlowPath:
- return false;
- case Simple:
- for (unsigned i = m_variants.size(); i--;) {
- if (m_variants[i].callLinkStatus())
- return true;
- }
- return false;
- case MakesCalls:
- return true;
- }
- RELEASE_ASSERT_NOT_REACHED();
-
- return false;
-}
-
-void GetByIdStatus::dump(PrintStream& out) const
-{
- out.print("(");
- switch (m_state) {
- case NoInformation:
- out.print("NoInformation");
- break;
- case Simple:
- out.print("Simple");
- break;
- case TakesSlowPath:
- out.print("TakesSlowPath");
- break;
- case MakesCalls:
- out.print("MakesCalls");
- break;
- }
- out.print(", ", listDump(m_variants), ", seenInJIT = ", m_wasSeenInJIT, ")");
-}
-
} // namespace JSC