diff options
Diffstat (limited to 'Source/JavaScriptCore/bytecode/GetByIdStatus.cpp')
-rw-r--r-- | Source/JavaScriptCore/bytecode/GetByIdStatus.cpp | 446 |
1 files changed, 196 insertions, 250 deletions
diff --git a/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp b/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp index 89e5035f3..fbb3da1a5 100644 --- a/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp +++ b/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012-2015 Apple Inc. All rights reserved. + * Copyright (C) 2012, 2013 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,54 +26,23 @@ #include "config.h" #include "GetByIdStatus.h" -#include "AccessorCallJITStubRoutine.h" #include "CodeBlock.h" -#include "ComplexGetStatus.h" -#include "JSCInlines.h" #include "JSScope.h" #include "LLIntData.h" #include "LowLevelInterpreter.h" -#include "PolymorphicGetByIdList.h" -#include <wtf/ListDump.h> +#include "Operations.h" namespace JSC { -bool GetByIdStatus::appendVariant(const GetByIdVariant& variant) -{ - // Attempt to merge this variant with an already existing variant. - for (unsigned i = 0; i < m_variants.size(); ++i) { - if (m_variants[i].attemptToMerge(variant)) - return true; - } - - // Make sure there is no overlap. We should have pruned out opportunities for - // overlap but it's possible that an inline cache got into a weird state. We are - // defensive and bail if we detect crazy. - for (unsigned i = 0; i < m_variants.size(); ++i) { - if (m_variants[i].structureSet().overlaps(variant.structureSet())) - return false; - } - - m_variants.append(variant); - return true; -} - -#if ENABLE(DFG_JIT) -bool GetByIdStatus::hasExitSite(const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, unsigned bytecodeIndex) -{ - return profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadCache)) - || profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadConstantCache)); -} -#endif - -GetByIdStatus GetByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex, UniquedStringImpl* uid) +GetByIdStatus GetByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex, StringImpl* uid) { UNUSED_PARAM(profiledBlock); UNUSED_PARAM(bytecodeIndex); UNUSED_PARAM(uid); +#if ENABLE(LLINT) Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex; - if (instruction[0].u.opcode == LLInt::getOpcode(op_get_array_length)) + if (instruction[0].u.opcode == LLInt::getOpcode(llint_op_get_array_length)) return GetByIdStatus(NoInformation, false); Structure* structure = instruction[4].u.structure.get(); @@ -84,276 +53,253 @@ GetByIdStatus GetByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned return GetByIdStatus(NoInformation, false); unsigned attributesIgnored; - PropertyOffset offset = structure->getConcurrently(uid, attributesIgnored); + JSCell* specificValue; + PropertyOffset offset = structure->getConcurrently( + *profiledBlock->vm(), uid, attributesIgnored, specificValue); + if (structure->isDictionary()) + specificValue = 0; if (!isValidOffset(offset)) return GetByIdStatus(NoInformation, false); - return GetByIdStatus(Simple, false, GetByIdVariant(StructureSet(structure), offset)); + return GetByIdStatus(Simple, false, StructureSet(structure), offset, specificValue); +#else + return GetByIdStatus(NoInformation, false); +#endif } -GetByIdStatus GetByIdStatus::computeFor(CodeBlock* profiledBlock, StubInfoMap& map, unsigned bytecodeIndex, UniquedStringImpl* uid) +void GetByIdStatus::computeForChain(GetByIdStatus& result, CodeBlock* profiledBlock, StringImpl* uid) { - ConcurrentJITLocker locker(profiledBlock->m_lock); +#if ENABLE(JIT) + // Validate the chain. If the chain is invalid, then currently the best thing + // we can do is to assume that TakesSlow is true. In the future, it might be + // worth exploring reifying the structure chain from the structure we've got + // instead of using the one from the cache, since that will do the right things + // if the structure chain has changed. But that may be harder, because we may + // then end up having a different type of access altogether. And it currently + // does not appear to be worth it to do so -- effectively, the heuristic we + // have now is that if the structure chain has changed between when it was + // cached on in the baseline JIT and when the DFG tried to inline the access, + // then we fall back on a polymorphic access. + if (!result.m_chain->isStillValid()) + return; - GetByIdStatus result; + if (result.m_chain->head()->takesSlowPathInDFGForImpureProperty()) + return; + size_t chainSize = result.m_chain->size(); + for (size_t i = 0; i < chainSize; i++) { + if (result.m_chain->at(i)->takesSlowPathInDFGForImpureProperty()) + return; + } -#if ENABLE(DFG_JIT) - result = computeForStubInfoWithoutExitSiteFeedback( - locker, profiledBlock, map.get(CodeOrigin(bytecodeIndex)), uid, - CallLinkStatus::computeExitSiteData(locker, profiledBlock, bytecodeIndex)); + JSObject* currentObject = result.m_chain->terminalPrototype(); + Structure* currentStructure = result.m_chain->last(); - if (!result.takesSlowPath() - && hasExitSite(locker, profiledBlock, bytecodeIndex)) - return GetByIdStatus(result.makesCalls() ? MakesCalls : TakesSlowPath, true); + ASSERT_UNUSED(currentObject, currentObject); + + unsigned attributesIgnored; + JSCell* specificValue; + + result.m_offset = currentStructure->getConcurrently( + *profiledBlock->vm(), uid, attributesIgnored, specificValue); + if (currentStructure->isDictionary()) + specificValue = 0; + if (!isValidOffset(result.m_offset)) + return; + + result.m_structureSet.add(result.m_chain->head()); + result.m_specificValue = JSValue(specificValue); #else - UNUSED_PARAM(map); + UNUSED_PARAM(result); + UNUSED_PARAM(profiledBlock); + UNUSED_PARAM(uid); + UNREACHABLE_FOR_PLATFORM(); #endif - - if (!result) - return computeFromLLInt(profiledBlock, bytecodeIndex, uid); - - return result; } -#if ENABLE(JIT) -GetByIdStatus GetByIdStatus::computeForStubInfo(const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, StructureStubInfo* stubInfo, CodeOrigin codeOrigin, UniquedStringImpl* uid) -{ - GetByIdStatus result = GetByIdStatus::computeForStubInfoWithoutExitSiteFeedback( - locker, profiledBlock, stubInfo, uid, - CallLinkStatus::computeExitSiteData(locker, profiledBlock, codeOrigin.bytecodeIndex)); - - if (!result.takesSlowPath() && GetByIdStatus::hasExitSite(locker, profiledBlock, codeOrigin.bytecodeIndex)) - return GetByIdStatus(result.makesCalls() ? GetByIdStatus::MakesCalls : GetByIdStatus::TakesSlowPath, true); - return result; -} -#endif // ENABLE(JIT) - -#if ENABLE(JIT) -GetByIdStatus GetByIdStatus::computeForStubInfoWithoutExitSiteFeedback( - const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, StructureStubInfo* stubInfo, UniquedStringImpl* uid, - CallLinkStatus::ExitSiteData callExitSiteData) +GetByIdStatus GetByIdStatus::computeFor(CodeBlock* profiledBlock, StubInfoMap& map, unsigned bytecodeIndex, StringImpl* uid) { - if (!stubInfo) - return GetByIdStatus(NoInformation); + ConcurrentJITLocker locker(profiledBlock->m_lock); - if (!stubInfo->seen) - return GetByIdStatus(NoInformation); + UNUSED_PARAM(profiledBlock); + UNUSED_PARAM(bytecodeIndex); + UNUSED_PARAM(uid); +#if ENABLE(JIT) + StructureStubInfo* stubInfo = map.get(CodeOrigin(bytecodeIndex)); + if (!stubInfo || !stubInfo->seen) + return computeFromLLInt(profiledBlock, bytecodeIndex, uid); - PolymorphicGetByIdList* list = 0; - State slowPathState = TakesSlowPath; - if (stubInfo->accessType == access_get_by_id_list) { - list = stubInfo->u.getByIdList.list; - for (unsigned i = 0; i < list->size(); ++i) { - const GetByIdAccess& access = list->at(i); - if (access.doesCalls()) - slowPathState = MakesCalls; - } + if (stubInfo->resetByGC) + return GetByIdStatus(TakesSlowPath, true); + + PolymorphicAccessStructureList* list; + int listSize; + switch (stubInfo->accessType) { + case access_get_by_id_self_list: + list = stubInfo->u.getByIdSelfList.structureList; + listSize = stubInfo->u.getByIdSelfList.listSize; + break; + case access_get_by_id_proto_list: + list = stubInfo->u.getByIdProtoList.structureList; + listSize = stubInfo->u.getByIdProtoList.listSize; + break; + default: + list = 0; + listSize = 0; + break; + } + for (int i = 0; i < listSize; ++i) { + if (!list->list[i].isDirect) + return GetByIdStatus(MakesCalls, true); } - if (stubInfo->tookSlowPath) - return GetByIdStatus(slowPathState); + // Next check if it takes slow case, in which case we want to be kind of careful. + if (profiledBlock->likelyToTakeSlowCase(bytecodeIndex)) + return GetByIdStatus(TakesSlowPath, true); // Finally figure out if we can derive an access strategy. GetByIdStatus result; - result.m_state = Simple; result.m_wasSeenInJIT = true; // This is interesting for bytecode dumping only. switch (stubInfo->accessType) { case access_unset: - return GetByIdStatus(NoInformation); + return computeFromLLInt(profiledBlock, bytecodeIndex, uid); case access_get_by_id_self: { Structure* structure = stubInfo->u.getByIdSelf.baseObjectStructure.get(); if (structure->takesSlowPathInDFGForImpureProperty()) - return GetByIdStatus(slowPathState, true); + return GetByIdStatus(TakesSlowPath, true); unsigned attributesIgnored; - GetByIdVariant variant; - variant.m_offset = structure->getConcurrently(uid, attributesIgnored); - if (!isValidOffset(variant.m_offset)) - return GetByIdStatus(slowPathState, true); + JSCell* specificValue; + result.m_offset = structure->getConcurrently( + *profiledBlock->vm(), uid, attributesIgnored, specificValue); + if (structure->isDictionary()) + specificValue = 0; - variant.m_structureSet.add(structure); - bool didAppend = result.appendVariant(variant); - ASSERT_UNUSED(didAppend, didAppend); - return result; + if (isValidOffset(result.m_offset)) { + result.m_structureSet.add(structure); + result.m_specificValue = JSValue(specificValue); + } + + if (isValidOffset(result.m_offset)) + ASSERT(result.m_structureSet.size()); + break; } - case access_get_by_id_list: { - for (unsigned listIndex = 0; listIndex < list->size(); ++listIndex) { - Structure* structure = list->at(listIndex).structure(); + case access_get_by_id_self_list: { + for (int i = 0; i < listSize; ++i) { + ASSERT(list->list[i].isDirect); - ComplexGetStatus complexGetStatus = ComplexGetStatus::computeFor( - structure, list->at(listIndex).conditionSet(), uid); - - switch (complexGetStatus.kind()) { - case ComplexGetStatus::ShouldSkip: + Structure* structure = list->list[i].base.get(); + if (structure->takesSlowPathInDFGForImpureProperty()) + return GetByIdStatus(TakesSlowPath, true); + + if (result.m_structureSet.contains(structure)) continue; - - case ComplexGetStatus::TakesSlowPath: - return GetByIdStatus(slowPathState, true); - - case ComplexGetStatus::Inlineable: { - std::unique_ptr<CallLinkStatus> callLinkStatus; - switch (list->at(listIndex).type()) { - case GetByIdAccess::SimpleInline: - case GetByIdAccess::SimpleStub: { - break; - } - case GetByIdAccess::Getter: { - AccessorCallJITStubRoutine* stub = static_cast<AccessorCallJITStubRoutine*>( - list->at(listIndex).stubRoutine()); - callLinkStatus = std::make_unique<CallLinkStatus>( - CallLinkStatus::computeFor( - locker, profiledBlock, *stub->m_callLinkInfo, callExitSiteData)); - break; - } - case GetByIdAccess::SimpleMiss: - case GetByIdAccess::CustomGetter: - case GetByIdAccess::WatchedStub:{ - // FIXME: It would be totally sweet to support this at some point in the future. - // https://bugs.webkit.org/show_bug.cgi?id=133052 - return GetByIdStatus(slowPathState, true); - } - default: - RELEASE_ASSERT_NOT_REACHED(); - } - - GetByIdVariant variant( - StructureSet(structure), complexGetStatus.offset(), - complexGetStatus.conditionSet(), WTF::move(callLinkStatus)); - - if (!result.appendVariant(variant)) - return GetByIdStatus(slowPathState, true); + + unsigned attributesIgnored; + JSCell* specificValue; + PropertyOffset myOffset = structure->getConcurrently( + *profiledBlock->vm(), uid, attributesIgnored, specificValue); + if (structure->isDictionary()) + specificValue = 0; + + if (!isValidOffset(myOffset)) { + result.m_offset = invalidOffset; + break; + } + + if (!i) { + result.m_offset = myOffset; + result.m_specificValue = JSValue(specificValue); + } else if (result.m_offset != myOffset) { + result.m_offset = invalidOffset; break; - } } + } else if (result.m_specificValue != JSValue(specificValue)) + result.m_specificValue = JSValue(); + + result.m_structureSet.add(structure); } + + if (isValidOffset(result.m_offset)) + ASSERT(result.m_structureSet.size()); + break; + } - return result; + case access_get_by_id_proto: { + if (!stubInfo->u.getByIdProto.isDirect) + return GetByIdStatus(MakesCalls, true); + result.m_chain = adoptRef(new IntendedStructureChain( + profiledBlock, + stubInfo->u.getByIdProto.baseObjectStructure.get(), + stubInfo->u.getByIdProto.prototypeStructure.get())); + computeForChain(result, profiledBlock, uid); + break; + } + + case access_get_by_id_chain: { + if (!stubInfo->u.getByIdChain.isDirect) + return GetByIdStatus(MakesCalls, true); + result.m_chain = adoptRef(new IntendedStructureChain( + profiledBlock, + stubInfo->u.getByIdChain.baseObjectStructure.get(), + stubInfo->u.getByIdChain.chain.get(), + stubInfo->u.getByIdChain.count)); + computeForChain(result, profiledBlock, uid); + break; } default: - return GetByIdStatus(slowPathState, true); + ASSERT(!isValidOffset(result.m_offset)); + break; } - RELEASE_ASSERT_NOT_REACHED(); - return GetByIdStatus(); -} -#endif // ENABLE(JIT) - -GetByIdStatus GetByIdStatus::computeFor( - CodeBlock* profiledBlock, CodeBlock* dfgBlock, StubInfoMap& baselineMap, - StubInfoMap& dfgMap, CodeOrigin codeOrigin, UniquedStringImpl* uid) -{ -#if ENABLE(DFG_JIT) - if (dfgBlock) { - CallLinkStatus::ExitSiteData exitSiteData; - { - ConcurrentJITLocker locker(profiledBlock->m_lock); - exitSiteData = CallLinkStatus::computeExitSiteData( - locker, profiledBlock, codeOrigin.bytecodeIndex); - } - - GetByIdStatus result; - { - ConcurrentJITLocker locker(dfgBlock->m_lock); - result = computeForStubInfoWithoutExitSiteFeedback( - locker, dfgBlock, dfgMap.get(codeOrigin), uid, exitSiteData); - } - - if (result.takesSlowPath()) - return result; + if (!isValidOffset(result.m_offset)) { + result.m_state = TakesSlowPath; + result.m_structureSet.clear(); + result.m_chain.clear(); + result.m_specificValue = JSValue(); + } else + result.m_state = Simple; - { - ConcurrentJITLocker locker(profiledBlock->m_lock); - if (hasExitSite(locker, profiledBlock, codeOrigin.bytecodeIndex)) - return GetByIdStatus(TakesSlowPath, true); - } - - if (result.isSet()) - return result; - } -#else - UNUSED_PARAM(dfgBlock); - UNUSED_PARAM(dfgMap); -#endif - - return computeFor(profiledBlock, baselineMap, codeOrigin.bytecodeIndex, uid); + return result; +#else // ENABLE(JIT) + UNUSED_PARAM(map); + return GetByIdStatus(NoInformation, false); +#endif // ENABLE(JIT) } -GetByIdStatus GetByIdStatus::computeFor(const StructureSet& set, UniquedStringImpl* uid) +GetByIdStatus GetByIdStatus::computeFor(VM& vm, Structure* structure, StringImpl* uid) { // For now we only handle the super simple self access case. We could handle the // prototype case in the future. - if (set.isEmpty()) - return GetByIdStatus(); + if (!structure) + return GetByIdStatus(TakesSlowPath); - if (parseIndex(*uid)) + if (toUInt32FromStringImpl(uid) != PropertyName::NotAnIndex) return GetByIdStatus(TakesSlowPath); + if (structure->typeInfo().overridesGetOwnPropertySlot() && structure->typeInfo().type() != GlobalObjectType) + return GetByIdStatus(TakesSlowPath); + + if (!structure->propertyAccessesAreCacheable()) + return GetByIdStatus(TakesSlowPath); + GetByIdStatus result; + result.m_wasSeenInJIT = false; // To my knowledge nobody that uses computeFor(VM&, Structure*, StringImpl*) reads this field, but I might as well be honest: no, it wasn't seen in the JIT, since I computed it statically. + unsigned attributes; + JSCell* specificValue; + result.m_offset = structure->getConcurrently(vm, uid, attributes, specificValue); + if (!isValidOffset(result.m_offset)) + return GetByIdStatus(TakesSlowPath); // It's probably a prototype lookup. Give up on life for now, even though we could totally be way smarter about it. + if (attributes & Accessor) + return GetByIdStatus(MakesCalls); + if (structure->isDictionary()) + specificValue = 0; + result.m_structureSet.add(structure); + result.m_specificValue = JSValue(specificValue); result.m_state = Simple; - result.m_wasSeenInJIT = false; - for (unsigned i = 0; i < set.size(); ++i) { - Structure* structure = set[i]; - if (structure->typeInfo().overridesGetOwnPropertySlot() && structure->typeInfo().type() != GlobalObjectType) - return GetByIdStatus(TakesSlowPath); - - if (!structure->propertyAccessesAreCacheable()) - return GetByIdStatus(TakesSlowPath); - - unsigned attributes; - PropertyOffset offset = structure->getConcurrently(uid, attributes); - if (!isValidOffset(offset)) - return GetByIdStatus(TakesSlowPath); // It's probably a prototype lookup. Give up on life for now, even though we could totally be way smarter about it. - if (attributes & Accessor) - return GetByIdStatus(MakesCalls); // We could be smarter here, like strenght-reducing this to a Call. - - if (!result.appendVariant(GetByIdVariant(structure, offset))) - return GetByIdStatus(TakesSlowPath); - } - return result; } -bool GetByIdStatus::makesCalls() const -{ - switch (m_state) { - case NoInformation: - case TakesSlowPath: - return false; - case Simple: - for (unsigned i = m_variants.size(); i--;) { - if (m_variants[i].callLinkStatus()) - return true; - } - return false; - case MakesCalls: - return true; - } - RELEASE_ASSERT_NOT_REACHED(); - - return false; -} - -void GetByIdStatus::dump(PrintStream& out) const -{ - out.print("("); - switch (m_state) { - case NoInformation: - out.print("NoInformation"); - break; - case Simple: - out.print("Simple"); - break; - case TakesSlowPath: - out.print("TakesSlowPath"); - break; - case MakesCalls: - out.print("MakesCalls"); - break; - } - out.print(", ", listDump(m_variants), ", seenInJIT = ", m_wasSeenInJIT, ")"); -} - } // namespace JSC |