diff options
author | Konstantin Tokarev <annulen@yandex.ru> | 2016-08-25 19:20:41 +0300 |
---|---|---|
committer | Konstantin Tokarev <annulen@yandex.ru> | 2017-02-02 12:30:55 +0000 |
commit | 6882a04fb36642862b11efe514251d32070c3d65 (patch) | |
tree | b7959826000b061fd5ccc7512035c7478742f7b0 /Source/JavaScriptCore/bytecode/GetByIdStatus.cpp | |
parent | ab6df191029eeeb0b0f16f127d553265659f739e (diff) | |
download | qtwebkit-6882a04fb36642862b11efe514251d32070c3d65.tar.gz |
Imported QtWebKit TP3 (git b57bc6801f1876c3220d5a4bfea33d620d477443)
Change-Id: I3b1d8a2808782c9f34d50240000e20cb38d3680f
Reviewed-by: Konstantin Tokarev <annulen@yandex.ru>
Diffstat (limited to 'Source/JavaScriptCore/bytecode/GetByIdStatus.cpp')
-rw-r--r-- | Source/JavaScriptCore/bytecode/GetByIdStatus.cpp | 480 |
1 files changed, 282 insertions, 198 deletions
diff --git a/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp b/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp index db4aa9b99..66a4dd81d 100644 --- a/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp +++ b/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012 Apple Inc. All rights reserved. + * Copyright (C) 2012-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -27,264 +27,348 @@ #include "GetByIdStatus.h" #include "CodeBlock.h" +#include "ComplexGetStatus.h" +#include "JSCInlines.h" #include "JSScope.h" #include "LLIntData.h" #include "LowLevelInterpreter.h" -#include "Operations.h" +#include "PolymorphicAccess.h" +#include <wtf/ListDump.h> namespace JSC { -GetByIdStatus GetByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex, Identifier& ident) +bool GetByIdStatus::appendVariant(const GetByIdVariant& variant) +{ + // Attempt to merge this variant with an already existing variant. + for (unsigned i = 0; i < m_variants.size(); ++i) { + if (m_variants[i].attemptToMerge(variant)) + return true; + } + + // Make sure there is no overlap. We should have pruned out opportunities for + // overlap but it's possible that an inline cache got into a weird state. We are + // defensive and bail if we detect crazy. + for (unsigned i = 0; i < m_variants.size(); ++i) { + if (m_variants[i].structureSet().overlaps(variant.structureSet())) + return false; + } + + m_variants.append(variant); + return true; +} + +#if ENABLE(DFG_JIT) +bool GetByIdStatus::hasExitSite(const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, unsigned bytecodeIndex) +{ + return profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadCache)) + || profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadConstantCache)); +} +#endif + +GetByIdStatus GetByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex, UniquedStringImpl* uid) { UNUSED_PARAM(profiledBlock); UNUSED_PARAM(bytecodeIndex); - UNUSED_PARAM(ident); -#if ENABLE(LLINT) + UNUSED_PARAM(uid); + + VM& vm = *profiledBlock->vm(); + Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex; - if (instruction[0].u.opcode == LLInt::getOpcode(llint_op_get_array_length)) + if (instruction[0].u.opcode == LLInt::getOpcode(op_get_array_length)) return GetByIdStatus(NoInformation, false); - Structure* structure = instruction[4].u.structure.get(); - if (!structure) + StructureID structureID = instruction[4].u.structureID; + if (!structureID) return GetByIdStatus(NoInformation, false); - + + Structure* structure = vm.heap.structureIDTable().get(structureID); + + if (structure->takesSlowPathInDFGForImpureProperty()) + return GetByIdStatus(NoInformation, false); + unsigned attributesIgnored; - JSCell* specificValue; - PropertyOffset offset = structure->get( - *profiledBlock->vm(), ident, attributesIgnored, specificValue); - if (structure->isDictionary()) - specificValue = 0; + PropertyOffset offset = structure->getConcurrently(uid, attributesIgnored); if (!isValidOffset(offset)) return GetByIdStatus(NoInformation, false); - return GetByIdStatus(Simple, false, StructureSet(structure), offset, specificValue); -#else - return GetByIdStatus(NoInformation, false); -#endif + return GetByIdStatus(Simple, false, GetByIdVariant(StructureSet(structure), offset)); } -void GetByIdStatus::computeForChain(GetByIdStatus& result, CodeBlock* profiledBlock, Identifier& ident, Structure* structure) +GetByIdStatus GetByIdStatus::computeFor(CodeBlock* profiledBlock, StubInfoMap& map, unsigned bytecodeIndex, UniquedStringImpl* uid) { -#if ENABLE(JIT) && ENABLE(VALUE_PROFILER) - // Validate the chain. If the chain is invalid, then currently the best thing - // we can do is to assume that TakesSlow is true. In the future, it might be - // worth exploring reifying the structure chain from the structure we've got - // instead of using the one from the cache, since that will do the right things - // if the structure chain has changed. But that may be harder, because we may - // then end up having a different type of access altogether. And it currently - // does not appear to be worth it to do so -- effectively, the heuristic we - // have now is that if the structure chain has changed between when it was - // cached on in the baseline JIT and when the DFG tried to inline the access, - // then we fall back on a polymorphic access. - Structure* currentStructure = structure; - JSObject* currentObject = 0; - for (unsigned i = 0; i < result.m_chain.size(); ++i) { - ASSERT(!currentStructure->isDictionary()); - currentObject = asObject(currentStructure->prototypeForLookup(profiledBlock)); - currentStructure = result.m_chain[i]; - if (currentObject->structure() != currentStructure) - return; - } + ConcurrentJITLocker locker(profiledBlock->m_lock); + + GetByIdStatus result; + +#if ENABLE(DFG_JIT) + result = computeForStubInfoWithoutExitSiteFeedback( + locker, profiledBlock, map.get(CodeOrigin(bytecodeIndex)), uid, + CallLinkStatus::computeExitSiteData(locker, profiledBlock, bytecodeIndex)); - ASSERT(currentObject); - - unsigned attributesIgnored; - JSCell* specificValue; - - result.m_offset = currentStructure->get( - *profiledBlock->vm(), ident, attributesIgnored, specificValue); - if (currentStructure->isDictionary()) - specificValue = 0; - if (!isValidOffset(result.m_offset)) - return; - - result.m_structureSet.add(structure); - result.m_specificValue = JSValue(specificValue); + if (!result.takesSlowPath() + && hasExitSite(locker, profiledBlock, bytecodeIndex)) + return GetByIdStatus(result.makesCalls() ? MakesCalls : TakesSlowPath, true); #else - UNUSED_PARAM(result); - UNUSED_PARAM(profiledBlock); - UNUSED_PARAM(ident); - UNUSED_PARAM(structure); - UNREACHABLE_FOR_PLATFORM(); + UNUSED_PARAM(map); #endif + + if (!result) + return computeFromLLInt(profiledBlock, bytecodeIndex, uid); + + return result; } -GetByIdStatus GetByIdStatus::computeFor(CodeBlock* profiledBlock, unsigned bytecodeIndex, Identifier& ident) +#if ENABLE(DFG_JIT) +GetByIdStatus GetByIdStatus::computeForStubInfo(const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, StructureStubInfo* stubInfo, CodeOrigin codeOrigin, UniquedStringImpl* uid) { - UNUSED_PARAM(profiledBlock); - UNUSED_PARAM(bytecodeIndex); - UNUSED_PARAM(ident); -#if ENABLE(JIT) && ENABLE(VALUE_PROFILER) - if (!profiledBlock->numberOfStructureStubInfos()) - return computeFromLLInt(profiledBlock, bytecodeIndex, ident); - - // First check if it makes either calls, in which case we want to be super careful, or - // if it's not set at all, in which case we punt. - StructureStubInfo& stubInfo = profiledBlock->getStubInfo(bytecodeIndex); - if (!stubInfo.seen) - return computeFromLLInt(profiledBlock, bytecodeIndex, ident); - - if (stubInfo.resetByGC) - return GetByIdStatus(TakesSlowPath, true); + GetByIdStatus result = GetByIdStatus::computeForStubInfoWithoutExitSiteFeedback( + locker, profiledBlock, stubInfo, uid, + CallLinkStatus::computeExitSiteData(locker, profiledBlock, codeOrigin.bytecodeIndex)); - PolymorphicAccessStructureList* list; - int listSize; - switch (stubInfo.accessType) { - case access_get_by_id_self_list: - list = stubInfo.u.getByIdSelfList.structureList; - listSize = stubInfo.u.getByIdSelfList.listSize; - break; - case access_get_by_id_proto_list: - list = stubInfo.u.getByIdProtoList.structureList; - listSize = stubInfo.u.getByIdProtoList.listSize; - break; - default: - list = 0; - listSize = 0; - break; - } - for (int i = 0; i < listSize; ++i) { - if (!list->list[i].isDirect) - return GetByIdStatus(MakesCalls, true); + if (!result.takesSlowPath() && GetByIdStatus::hasExitSite(locker, profiledBlock, codeOrigin.bytecodeIndex)) + return GetByIdStatus(result.makesCalls() ? GetByIdStatus::MakesCalls : GetByIdStatus::TakesSlowPath, true); + return result; +} +#endif // ENABLE(DFG_JIT) + +#if ENABLE(JIT) +GetByIdStatus GetByIdStatus::computeForStubInfoWithoutExitSiteFeedback( + const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, StructureStubInfo* stubInfo, UniquedStringImpl* uid, + CallLinkStatus::ExitSiteData callExitSiteData) +{ + if (!stubInfo || !stubInfo->everConsidered) + return GetByIdStatus(NoInformation); + + PolymorphicAccess* list = 0; + State slowPathState = TakesSlowPath; + if (stubInfo->cacheType == CacheType::Stub) { + list = stubInfo->u.stub; + for (unsigned i = 0; i < list->size(); ++i) { + const AccessCase& access = list->at(i); + if (access.doesCalls()) + slowPathState = MakesCalls; + } } - // Next check if it takes slow case, in which case we want to be kind of careful. - if (profiledBlock->likelyToTakeSlowCase(bytecodeIndex)) - return GetByIdStatus(TakesSlowPath, true); + if (stubInfo->tookSlowPath) + return GetByIdStatus(slowPathState); // Finally figure out if we can derive an access strategy. GetByIdStatus result; + result.m_state = Simple; result.m_wasSeenInJIT = true; // This is interesting for bytecode dumping only. - switch (stubInfo.accessType) { - case access_unset: - return computeFromLLInt(profiledBlock, bytecodeIndex, ident); + switch (stubInfo->cacheType) { + case CacheType::Unset: + return GetByIdStatus(NoInformation); - case access_get_by_id_self: { - Structure* structure = stubInfo.u.getByIdSelf.baseObjectStructure.get(); + case CacheType::GetByIdSelf: { + Structure* structure = stubInfo->u.byIdSelf.baseObjectStructure.get(); + if (structure->takesSlowPathInDFGForImpureProperty()) + return GetByIdStatus(slowPathState, true); unsigned attributesIgnored; - JSCell* specificValue; - result.m_offset = structure->get( - *profiledBlock->vm(), ident, attributesIgnored, specificValue); - if (structure->isDictionary()) - specificValue = 0; + GetByIdVariant variant; + variant.m_offset = structure->getConcurrently(uid, attributesIgnored); + if (!isValidOffset(variant.m_offset)) + return GetByIdStatus(slowPathState, true); - if (isValidOffset(result.m_offset)) { - result.m_structureSet.add(structure); - result.m_specificValue = JSValue(specificValue); - } - - if (isValidOffset(result.m_offset)) - ASSERT(result.m_structureSet.size()); - break; + variant.m_structureSet.add(structure); + bool didAppend = result.appendVariant(variant); + ASSERT_UNUSED(didAppend, didAppend); + return result; } - case access_get_by_id_self_list: { - for (int i = 0; i < listSize; ++i) { - ASSERT(list->list[i].isDirect); - - Structure* structure = list->list[i].base.get(); - if (result.m_structureSet.contains(structure)) - continue; - - unsigned attributesIgnored; - JSCell* specificValue; - PropertyOffset myOffset = structure->get( - *profiledBlock->vm(), ident, attributesIgnored, specificValue); - if (structure->isDictionary()) - specificValue = 0; + case CacheType::Stub: { + for (unsigned listIndex = 0; listIndex < list->size(); ++listIndex) { + const AccessCase& access = list->at(listIndex); + if (access.viaProxy()) + return GetByIdStatus(slowPathState, true); - if (!isValidOffset(myOffset)) { - result.m_offset = invalidOffset; - break; + Structure* structure = access.structure(); + if (!structure) { + // The null structure cases arise due to array.length and string.length. We have no way + // of creating a GetByIdVariant for those, and we don't really have to since the DFG + // handles those cases in FixupPhase using value profiling. That's a bit awkward - we + // shouldn't have to use value profiling to discover something that the AccessCase + // could have told us. But, it works well enough. So, our only concern here is to not + // crash on null structure. + return GetByIdStatus(slowPathState, true); } - - if (!i) { - result.m_offset = myOffset; - result.m_specificValue = JSValue(specificValue); - } else if (result.m_offset != myOffset) { - result.m_offset = invalidOffset; - break; - } else if (result.m_specificValue != JSValue(specificValue)) - result.m_specificValue = JSValue(); - result.m_structureSet.add(structure); + ComplexGetStatus complexGetStatus = ComplexGetStatus::computeFor( + structure, access.conditionSet(), uid); + + switch (complexGetStatus.kind()) { + case ComplexGetStatus::ShouldSkip: + continue; + + case ComplexGetStatus::TakesSlowPath: + return GetByIdStatus(slowPathState, true); + + case ComplexGetStatus::Inlineable: { + std::unique_ptr<CallLinkStatus> callLinkStatus; + JSFunction* intrinsicFunction = nullptr; + + switch (access.type()) { + case AccessCase::Load: { + break; + } + case AccessCase::IntrinsicGetter: { + intrinsicFunction = access.intrinsicFunction(); + break; + } + case AccessCase::Getter: { + CallLinkInfo* callLinkInfo = access.callLinkInfo(); + ASSERT(callLinkInfo); + callLinkStatus = std::make_unique<CallLinkStatus>( + CallLinkStatus::computeFor( + locker, profiledBlock, *callLinkInfo, callExitSiteData)); + break; + } + default: { + // FIXME: It would be totally sweet to support more of these at some point in the + // future. https://bugs.webkit.org/show_bug.cgi?id=133052 + return GetByIdStatus(slowPathState, true); + } } + + GetByIdVariant variant( + StructureSet(structure), complexGetStatus.offset(), + complexGetStatus.conditionSet(), WTFMove(callLinkStatus), + intrinsicFunction); + + if (!result.appendVariant(variant)) + return GetByIdStatus(slowPathState, true); + break; + } } } - - if (isValidOffset(result.m_offset)) - ASSERT(result.m_structureSet.size()); - break; - } - case access_get_by_id_proto: { - if (!stubInfo.u.getByIdProto.isDirect) - return GetByIdStatus(MakesCalls, true); - result.m_chain.append(stubInfo.u.getByIdProto.prototypeStructure.get()); - computeForChain( - result, profiledBlock, ident, - stubInfo.u.getByIdProto.baseObjectStructure.get()); - break; - } - - case access_get_by_id_chain: { - if (!stubInfo.u.getByIdChain.isDirect) - return GetByIdStatus(MakesCalls, true); - for (unsigned i = 0; i < stubInfo.u.getByIdChain.count; ++i) - result.m_chain.append(stubInfo.u.getByIdChain.chain->head()[i].get()); - computeForChain( - result, profiledBlock, ident, - stubInfo.u.getByIdChain.baseObjectStructure.get()); - break; + return result; } default: - ASSERT(!isValidOffset(result.m_offset)); - break; + return GetByIdStatus(slowPathState, true); } - if (!isValidOffset(result.m_offset)) { - result.m_state = TakesSlowPath; - result.m_structureSet.clear(); - result.m_chain.clear(); - result.m_specificValue = JSValue(); - } else - result.m_state = Simple; - - return result; -#else // ENABLE(JIT) - return GetByIdStatus(NoInformation, false); + RELEASE_ASSERT_NOT_REACHED(); + return GetByIdStatus(); +} #endif // ENABLE(JIT) + +GetByIdStatus GetByIdStatus::computeFor( + CodeBlock* profiledBlock, CodeBlock* dfgBlock, StubInfoMap& baselineMap, + StubInfoMap& dfgMap, CodeOrigin codeOrigin, UniquedStringImpl* uid) +{ +#if ENABLE(DFG_JIT) + if (dfgBlock) { + CallLinkStatus::ExitSiteData exitSiteData; + { + ConcurrentJITLocker locker(profiledBlock->m_lock); + exitSiteData = CallLinkStatus::computeExitSiteData( + locker, profiledBlock, codeOrigin.bytecodeIndex); + } + + GetByIdStatus result; + { + ConcurrentJITLocker locker(dfgBlock->m_lock); + result = computeForStubInfoWithoutExitSiteFeedback( + locker, dfgBlock, dfgMap.get(codeOrigin), uid, exitSiteData); + } + + if (result.takesSlowPath()) + return result; + + { + ConcurrentJITLocker locker(profiledBlock->m_lock); + if (hasExitSite(locker, profiledBlock, codeOrigin.bytecodeIndex)) + return GetByIdStatus(TakesSlowPath, true); + } + + if (result.isSet()) + return result; + } +#else + UNUSED_PARAM(dfgBlock); + UNUSED_PARAM(dfgMap); +#endif + + return computeFor(profiledBlock, baselineMap, codeOrigin.bytecodeIndex, uid); } -GetByIdStatus GetByIdStatus::computeFor(VM& vm, Structure* structure, Identifier& ident) +GetByIdStatus GetByIdStatus::computeFor(const StructureSet& set, UniquedStringImpl* uid) { // For now we only handle the super simple self access case. We could handle the // prototype case in the future. - if (PropertyName(ident).asIndex() != PropertyName::NotAnIndex) - return GetByIdStatus(TakesSlowPath); - - if (structure->typeInfo().overridesGetOwnPropertySlot()) - return GetByIdStatus(TakesSlowPath); - - if (!structure->propertyAccessesAreCacheable()) + if (set.isEmpty()) + return GetByIdStatus(); + + if (parseIndex(*uid)) return GetByIdStatus(TakesSlowPath); GetByIdStatus result; - result.m_wasSeenInJIT = false; // To my knowledge nobody that uses computeFor(VM&, Structure*, Identifier&) reads this field, but I might as well be honest: no, it wasn't seen in the JIT, since I computed it statically. - unsigned attributes; - JSCell* specificValue; - result.m_offset = structure->get(vm, ident, attributes, specificValue); - if (!isValidOffset(result.m_offset)) - return GetByIdStatus(TakesSlowPath); // It's probably a prototype lookup. Give up on life for now, even though we could totally be way smarter about it. - if (attributes & Accessor) - return GetByIdStatus(MakesCalls); - if (structure->isDictionary()) - specificValue = 0; - result.m_structureSet.add(structure); - result.m_specificValue = JSValue(specificValue); + result.m_state = Simple; + result.m_wasSeenInJIT = false; + for (unsigned i = 0; i < set.size(); ++i) { + Structure* structure = set[i]; + if (structure->typeInfo().overridesGetOwnPropertySlot() && structure->typeInfo().type() != GlobalObjectType) + return GetByIdStatus(TakesSlowPath); + + if (!structure->propertyAccessesAreCacheable()) + return GetByIdStatus(TakesSlowPath); + + unsigned attributes; + PropertyOffset offset = structure->getConcurrently(uid, attributes); + if (!isValidOffset(offset)) + return GetByIdStatus(TakesSlowPath); // It's probably a prototype lookup. Give up on life for now, even though we could totally be way smarter about it. + if (attributes & Accessor) + return GetByIdStatus(MakesCalls); // We could be smarter here, like strength-reducing this to a Call. + + if (!result.appendVariant(GetByIdVariant(structure, offset))) + return GetByIdStatus(TakesSlowPath); + } + return result; } +bool GetByIdStatus::makesCalls() const +{ + switch (m_state) { + case NoInformation: + case TakesSlowPath: + return false; + case Simple: + for (unsigned i = m_variants.size(); i--;) { + if (m_variants[i].callLinkStatus()) + return true; + } + return false; + case MakesCalls: + return true; + } + RELEASE_ASSERT_NOT_REACHED(); + + return false; +} + +void GetByIdStatus::dump(PrintStream& out) const +{ + out.print("("); + switch (m_state) { + case NoInformation: + out.print("NoInformation"); + break; + case Simple: + out.print("Simple"); + break; + case TakesSlowPath: + out.print("TakesSlowPath"); + break; + case MakesCalls: + out.print("MakesCalls"); + break; + } + out.print(", ", listDump(m_variants), ", seenInJIT = ", m_wasSeenInJIT, ")"); +} + } // namespace JSC |