summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/bytecode/PutByIdStatus.cpp
diff options
context:
space:
mode:
authorKonstantin Tokarev <annulen@yandex.ru>2016-08-25 19:20:41 +0300
committerKonstantin Tokarev <annulen@yandex.ru>2017-02-02 12:30:55 +0000
commit6882a04fb36642862b11efe514251d32070c3d65 (patch)
treeb7959826000b061fd5ccc7512035c7478742f7b0 /Source/JavaScriptCore/bytecode/PutByIdStatus.cpp
parentab6df191029eeeb0b0f16f127d553265659f739e (diff)
downloadqtwebkit-6882a04fb36642862b11efe514251d32070c3d65.tar.gz
Imported QtWebKit TP3 (git b57bc6801f1876c3220d5a4bfea33d620d477443)
Change-Id: I3b1d8a2808782c9f34d50240000e20cb38d3680f Reviewed-by: Konstantin Tokarev <annulen@yandex.ru>
Diffstat (limited to 'Source/JavaScriptCore/bytecode/PutByIdStatus.cpp')
-rw-r--r--Source/JavaScriptCore/bytecode/PutByIdStatus.cpp462
1 files changed, 330 insertions, 132 deletions
diff --git a/Source/JavaScriptCore/bytecode/PutByIdStatus.cpp b/Source/JavaScriptCore/bytecode/PutByIdStatus.cpp
index 24a57eb50..3d066b9ae 100644
--- a/Source/JavaScriptCore/bytecode/PutByIdStatus.cpp
+++ b/Source/JavaScriptCore/bytecode/PutByIdStatus.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -27,193 +27,391 @@
#include "PutByIdStatus.h"
#include "CodeBlock.h"
+#include "ComplexGetStatus.h"
#include "LLIntData.h"
#include "LowLevelInterpreter.h"
-#include "Operations.h"
+#include "JSCInlines.h"
+#include "PolymorphicAccess.h"
#include "Structure.h"
#include "StructureChain.h"
+#include <wtf/ListDump.h>
namespace JSC {
-PutByIdStatus PutByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex, Identifier& ident)
+bool PutByIdStatus::appendVariant(const PutByIdVariant& variant)
+{
+ for (unsigned i = 0; i < m_variants.size(); ++i) {
+ if (m_variants[i].attemptToMerge(variant))
+ return true;
+ }
+ for (unsigned i = 0; i < m_variants.size(); ++i) {
+ if (m_variants[i].oldStructure().overlaps(variant.oldStructure()))
+ return false;
+ }
+ m_variants.append(variant);
+ return true;
+}
+
+#if ENABLE(DFG_JIT)
+bool PutByIdStatus::hasExitSite(const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, unsigned bytecodeIndex)
+{
+ return profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadCache))
+ || profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadConstantCache));
+
+}
+#endif
+
+PutByIdStatus PutByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex, UniquedStringImpl* uid)
{
UNUSED_PARAM(profiledBlock);
UNUSED_PARAM(bytecodeIndex);
- UNUSED_PARAM(ident);
-#if ENABLE(LLINT)
+ UNUSED_PARAM(uid);
+
+ VM& vm = *profiledBlock->vm();
+
Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex;
- Structure* structure = instruction[4].u.structure.get();
- if (!structure)
- return PutByIdStatus(NoInformation, 0, 0, 0, invalidOffset);
+ StructureID structureID = instruction[4].u.structureID;
+ if (!structureID)
+ return PutByIdStatus(NoInformation);
- if (instruction[0].u.opcode == LLInt::getOpcode(llint_op_put_by_id)
- || instruction[0].u.opcode == LLInt::getOpcode(llint_op_put_by_id_out_of_line)) {
- PropertyOffset offset = structure->get(*profiledBlock->vm(), ident);
+ Structure* structure = vm.heap.structureIDTable().get(structureID);
+
+ StructureID newStructureID = instruction[6].u.structureID;
+ if (!newStructureID) {
+ PropertyOffset offset = structure->getConcurrently(uid);
if (!isValidOffset(offset))
- return PutByIdStatus(NoInformation, 0, 0, 0, invalidOffset);
+ return PutByIdStatus(NoInformation);
- return PutByIdStatus(SimpleReplace, structure, 0, 0, offset);
+ return PutByIdVariant::replace(structure, offset, structure->inferredTypeDescriptorFor(uid));
}
+
+ Structure* newStructure = vm.heap.structureIDTable().get(newStructureID);
ASSERT(structure->transitionWatchpointSetHasBeenInvalidated());
- ASSERT(instruction[0].u.opcode == LLInt::getOpcode(llint_op_put_by_id_transition_direct)
- || instruction[0].u.opcode == LLInt::getOpcode(llint_op_put_by_id_transition_normal)
- || instruction[0].u.opcode == LLInt::getOpcode(llint_op_put_by_id_transition_direct_out_of_line)
- || instruction[0].u.opcode == LLInt::getOpcode(llint_op_put_by_id_transition_normal_out_of_line));
-
- Structure* newStructure = instruction[6].u.structure.get();
- StructureChain* chain = instruction[7].u.structureChain.get();
- ASSERT(newStructure);
- ASSERT(chain);
-
- PropertyOffset offset = newStructure->get(*profiledBlock->vm(), ident);
+ PropertyOffset offset = newStructure->getConcurrently(uid);
if (!isValidOffset(offset))
- return PutByIdStatus(NoInformation, 0, 0, 0, invalidOffset);
+ return PutByIdStatus(NoInformation);
- return PutByIdStatus(SimpleTransition, structure, newStructure, chain, offset);
-#else
- return PutByIdStatus(NoInformation, 0, 0, 0, invalidOffset);
-#endif
+ ObjectPropertyConditionSet conditionSet;
+ if (!(instruction[8].u.putByIdFlags & PutByIdIsDirect)) {
+ conditionSet =
+ generateConditionsForPropertySetterMissConcurrently(
+ *profiledBlock->vm(), profiledBlock->globalObject(), structure, uid);
+ if (!conditionSet.isValid())
+ return PutByIdStatus(NoInformation);
+ }
+
+ return PutByIdVariant::transition(
+ structure, newStructure, conditionSet, offset, newStructure->inferredTypeDescriptorFor(uid));
}
-PutByIdStatus PutByIdStatus::computeFor(CodeBlock* profiledBlock, unsigned bytecodeIndex, Identifier& ident)
+PutByIdStatus PutByIdStatus::computeFor(CodeBlock* profiledBlock, StubInfoMap& map, unsigned bytecodeIndex, UniquedStringImpl* uid)
{
+ ConcurrentJITLocker locker(profiledBlock->m_lock);
+
UNUSED_PARAM(profiledBlock);
UNUSED_PARAM(bytecodeIndex);
- UNUSED_PARAM(ident);
-#if ENABLE(JIT) && ENABLE(VALUE_PROFILER)
- if (!profiledBlock->numberOfStructureStubInfos())
- return computeFromLLInt(profiledBlock, bytecodeIndex, ident);
-
- if (profiledBlock->likelyToTakeSlowCase(bytecodeIndex))
- return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset);
+ UNUSED_PARAM(uid);
+#if ENABLE(DFG_JIT)
+ if (hasExitSite(locker, profiledBlock, bytecodeIndex))
+ return PutByIdStatus(TakesSlowPath);
- StructureStubInfo& stubInfo = profiledBlock->getStubInfo(bytecodeIndex);
- if (!stubInfo.seen)
- return computeFromLLInt(profiledBlock, bytecodeIndex, ident);
+ StructureStubInfo* stubInfo = map.get(CodeOrigin(bytecodeIndex));
+ PutByIdStatus result = computeForStubInfo(
+ locker, profiledBlock, stubInfo, uid,
+ CallLinkStatus::computeExitSiteData(locker, profiledBlock, bytecodeIndex));
+ if (!result)
+ return computeFromLLInt(profiledBlock, bytecodeIndex, uid);
- if (stubInfo.resetByGC)
- return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset);
+ return result;
+#else // ENABLE(JIT)
+ UNUSED_PARAM(map);
+ return PutByIdStatus(NoInformation);
+#endif // ENABLE(JIT)
+}
- switch (stubInfo.accessType) {
- case access_unset:
- // If the JIT saw it but didn't optimize it, then assume that this takes slow path.
- return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset);
+#if ENABLE(JIT)
+PutByIdStatus PutByIdStatus::computeForStubInfo(const ConcurrentJITLocker& locker, CodeBlock* baselineBlock, StructureStubInfo* stubInfo, CodeOrigin codeOrigin, UniquedStringImpl* uid)
+{
+ return computeForStubInfo(
+ locker, baselineBlock, stubInfo, uid,
+ CallLinkStatus::computeExitSiteData(locker, baselineBlock, codeOrigin.bytecodeIndex));
+}
+
+PutByIdStatus PutByIdStatus::computeForStubInfo(
+ const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, StructureStubInfo* stubInfo,
+ UniquedStringImpl* uid, CallLinkStatus::ExitSiteData callExitSiteData)
+{
+ if (!stubInfo || !stubInfo->everConsidered)
+ return PutByIdStatus();
+
+ if (stubInfo->tookSlowPath)
+ return PutByIdStatus(TakesSlowPath);
+
+ switch (stubInfo->cacheType) {
+ case CacheType::Unset:
+ // This means that we attempted to cache but failed for some reason.
+ return PutByIdStatus(TakesSlowPath);
- case access_put_by_id_replace: {
- PropertyOffset offset = stubInfo.u.putByIdReplace.baseObjectStructure->get(
- *profiledBlock->vm(), ident);
+ case CacheType::PutByIdReplace: {
+ PropertyOffset offset =
+ stubInfo->u.byIdSelf.baseObjectStructure->getConcurrently(uid);
if (isValidOffset(offset)) {
- return PutByIdStatus(
- SimpleReplace,
- stubInfo.u.putByIdReplace.baseObjectStructure.get(),
- 0, 0,
- offset);
+ return PutByIdVariant::replace(
+ stubInfo->u.byIdSelf.baseObjectStructure.get(), offset, InferredType::Top);
}
- return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset);
+ return PutByIdStatus(TakesSlowPath);
}
- case access_put_by_id_transition_normal:
- case access_put_by_id_transition_direct: {
- ASSERT(stubInfo.u.putByIdTransition.previousStructure->transitionWatchpointSetHasBeenInvalidated());
- PropertyOffset offset = stubInfo.u.putByIdTransition.structure->get(
- *profiledBlock->vm(), ident);
- if (isValidOffset(offset)) {
- return PutByIdStatus(
- SimpleTransition,
- stubInfo.u.putByIdTransition.previousStructure.get(),
- stubInfo.u.putByIdTransition.structure.get(),
- stubInfo.u.putByIdTransition.chain.get(),
- offset);
+ case CacheType::Stub: {
+ PolymorphicAccess* list = stubInfo->u.stub;
+
+ PutByIdStatus result;
+ result.m_state = Simple;
+
+ State slowPathState = TakesSlowPath;
+ for (unsigned i = 0; i < list->size(); ++i) {
+ const AccessCase& access = list->at(i);
+ if (access.doesCalls())
+ slowPathState = MakesCalls;
}
- return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset);
+
+ for (unsigned i = 0; i < list->size(); ++i) {
+ const AccessCase& access = list->at(i);
+ if (access.viaProxy())
+ return PutByIdStatus(slowPathState);
+
+ PutByIdVariant variant;
+
+ switch (access.type()) {
+ case AccessCase::Replace: {
+ Structure* structure = access.structure();
+ PropertyOffset offset = structure->getConcurrently(uid);
+ if (!isValidOffset(offset))
+ return PutByIdStatus(slowPathState);
+ variant = PutByIdVariant::replace(
+ structure, offset, structure->inferredTypeDescriptorFor(uid));
+ break;
+ }
+
+ case AccessCase::Transition: {
+ PropertyOffset offset =
+ access.newStructure()->getConcurrently(uid);
+ if (!isValidOffset(offset))
+ return PutByIdStatus(slowPathState);
+ ObjectPropertyConditionSet conditionSet = access.conditionSet();
+ if (!conditionSet.structuresEnsureValidity())
+ return PutByIdStatus(slowPathState);
+ variant = PutByIdVariant::transition(
+ access.structure(), access.newStructure(), conditionSet, offset,
+ access.newStructure()->inferredTypeDescriptorFor(uid));
+ break;
+ }
+
+ case AccessCase::Setter: {
+ Structure* structure = access.structure();
+
+ ComplexGetStatus complexGetStatus = ComplexGetStatus::computeFor(
+ structure, access.conditionSet(), uid);
+
+ switch (complexGetStatus.kind()) {
+ case ComplexGetStatus::ShouldSkip:
+ continue;
+
+ case ComplexGetStatus::TakesSlowPath:
+ return PutByIdStatus(slowPathState);
+
+ case ComplexGetStatus::Inlineable: {
+ CallLinkInfo* callLinkInfo = access.callLinkInfo();
+ ASSERT(callLinkInfo);
+ std::unique_ptr<CallLinkStatus> callLinkStatus =
+ std::make_unique<CallLinkStatus>(
+ CallLinkStatus::computeFor(
+ locker, profiledBlock, *callLinkInfo, callExitSiteData));
+
+ variant = PutByIdVariant::setter(
+ structure, complexGetStatus.offset(), complexGetStatus.conditionSet(),
+ WTFMove(callLinkStatus));
+ } }
+ break;
+ }
+
+ case AccessCase::CustomValueSetter:
+ case AccessCase::CustomAccessorSetter:
+ return PutByIdStatus(MakesCalls);
+
+ default:
+ return PutByIdStatus(slowPathState);
+ }
+
+ if (!result.appendVariant(variant))
+ return PutByIdStatus(slowPathState);
+ }
+
+ return result;
}
default:
- return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset);
+ return PutByIdStatus(TakesSlowPath);
}
-#else // ENABLE(JIT)
- return PutByIdStatus(NoInformation, 0, 0, 0, invalidOffset);
-#endif // ENABLE(JIT)
}
+#endif
-PutByIdStatus PutByIdStatus::computeFor(VM& vm, JSGlobalObject* globalObject, Structure* structure, Identifier& ident, bool isDirect)
+PutByIdStatus PutByIdStatus::computeFor(CodeBlock* baselineBlock, CodeBlock* dfgBlock, StubInfoMap& baselineMap, StubInfoMap& dfgMap, CodeOrigin codeOrigin, UniquedStringImpl* uid)
{
- if (PropertyName(ident).asIndex() != PropertyName::NotAnIndex)
- return PutByIdStatus(TakesSlowPath);
-
- if (structure->typeInfo().overridesGetOwnPropertySlot())
- return PutByIdStatus(TakesSlowPath);
+#if ENABLE(DFG_JIT)
+ if (dfgBlock) {
+ CallLinkStatus::ExitSiteData exitSiteData;
+ {
+ ConcurrentJITLocker locker(baselineBlock->m_lock);
+ if (hasExitSite(locker, baselineBlock, codeOrigin.bytecodeIndex))
+ return PutByIdStatus(TakesSlowPath);
+ exitSiteData = CallLinkStatus::computeExitSiteData(
+ locker, baselineBlock, codeOrigin.bytecodeIndex);
+ }
+
+ PutByIdStatus result;
+ {
+ ConcurrentJITLocker locker(dfgBlock->m_lock);
+ result = computeForStubInfo(
+ locker, dfgBlock, dfgMap.get(codeOrigin), uid, exitSiteData);
+ }
+
+ // We use TakesSlowPath in some cases where the stub was unset. That's weird and
+ // it would be better not to do that. But it means that we have to defend
+ // ourselves here.
+ if (result.isSimple())
+ return result;
+ }
+#else
+ UNUSED_PARAM(dfgBlock);
+ UNUSED_PARAM(dfgMap);
+#endif
- if (!structure->propertyAccessesAreCacheable())
+ return computeFor(baselineBlock, baselineMap, codeOrigin.bytecodeIndex, uid);
+}
+
+PutByIdStatus PutByIdStatus::computeFor(JSGlobalObject* globalObject, const StructureSet& set, UniquedStringImpl* uid, bool isDirect)
+{
+ if (parseIndex(*uid))
return PutByIdStatus(TakesSlowPath);
+
+ if (set.isEmpty())
+ return PutByIdStatus();
- unsigned attributes;
- JSCell* specificValue;
- PropertyOffset offset = structure->get(vm, ident, attributes, specificValue);
- if (isValidOffset(offset)) {
- if (attributes & (Accessor | ReadOnly))
+ PutByIdStatus result;
+ result.m_state = Simple;
+ for (unsigned i = 0; i < set.size(); ++i) {
+ Structure* structure = set[i];
+
+ if (structure->typeInfo().overridesGetOwnPropertySlot() && structure->typeInfo().type() != GlobalObjectType)
return PutByIdStatus(TakesSlowPath);
- if (specificValue) {
- // We need the PutById slow path to verify that we're storing the right value into
- // the specialized slot.
+
+ if (!structure->propertyAccessesAreCacheable())
return PutByIdStatus(TakesSlowPath);
+
+ unsigned attributes;
+ PropertyOffset offset = structure->getConcurrently(uid, attributes);
+ if (isValidOffset(offset)) {
+ if (attributes & CustomAccessor)
+ return PutByIdStatus(MakesCalls);
+
+ if (attributes & (Accessor | ReadOnly))
+ return PutByIdStatus(TakesSlowPath);
+
+ WatchpointSet* replaceSet = structure->propertyReplacementWatchpointSet(offset);
+ if (!replaceSet || replaceSet->isStillValid()) {
+ // When this executes, it'll create, and fire, this replacement watchpoint set.
+ // That means that this has probably never executed or that something fishy is
+ // going on. Also, we cannot create or fire the watchpoint set from the concurrent
+ // JIT thread, so even if we wanted to do this, we'd need to have a lazy thingy.
+ // So, better leave this alone and take slow path.
+ return PutByIdStatus(TakesSlowPath);
+ }
+
+ PutByIdVariant variant =
+ PutByIdVariant::replace(structure, offset, structure->inferredTypeDescriptorFor(uid));
+ if (!result.appendVariant(variant))
+ return PutByIdStatus(TakesSlowPath);
+ continue;
}
- return PutByIdStatus(SimpleReplace, structure, 0, 0, offset);
- }
- // Our hypothesis is that we're doing a transition. Before we prove that this is really
- // true, we want to do some sanity checks.
+ // Our hypothesis is that we're doing a transition. Before we prove that this is really
+ // true, we want to do some sanity checks.
- // Don't cache put transitions on dictionaries.
- if (structure->isDictionary())
- return PutByIdStatus(TakesSlowPath);
+ // Don't cache put transitions on dictionaries.
+ if (structure->isDictionary())
+ return PutByIdStatus(TakesSlowPath);
- // If the structure corresponds to something that isn't an object, then give up, since
- // we don't want to be adding properties to strings.
- if (structure->typeInfo().type() == StringType)
- return PutByIdStatus(TakesSlowPath);
+ // If the structure corresponds to something that isn't an object, then give up, since
+ // we don't want to be adding properties to strings.
+ if (!structure->typeInfo().isObject())
+ return PutByIdStatus(TakesSlowPath);
+
+ ObjectPropertyConditionSet conditionSet;
+ if (!isDirect) {
+ conditionSet = generateConditionsForPropertySetterMissConcurrently(
+ globalObject->vm(), globalObject, structure, uid);
+ if (!conditionSet.isValid())
+ return PutByIdStatus(TakesSlowPath);
+ }
- if (!isDirect) {
- // If the prototype chain has setters or read-only properties, then give up.
- if (structure->prototypeChainMayInterceptStoreTo(vm, ident))
+ // We only optimize if there is already a structure that the transition is cached to.
+ Structure* transition =
+ Structure::addPropertyTransitionToExistingStructureConcurrently(structure, uid, 0, offset);
+ if (!transition)
return PutByIdStatus(TakesSlowPath);
-
- // If the prototype chain hasn't been normalized (i.e. there are proxies or dictionaries)
- // then give up. The dictionary case would only happen if this structure has not been
- // used in an optimized put_by_id transition. And really the only reason why we would
- // bail here is that I don't really feel like having the optimizing JIT go and flatten
- // dictionaries if we have evidence to suggest that those objects were never used as
- // prototypes in a cacheable prototype access - i.e. there's a good chance that some of
- // the other checks below will fail.
- if (!isPrototypeChainNormalized(globalObject, structure))
+ ASSERT(isValidOffset(offset));
+
+ bool didAppend = result.appendVariant(
+ PutByIdVariant::transition(
+ structure, transition, conditionSet, offset,
+ transition->inferredTypeDescriptorFor(uid)));
+ if (!didAppend)
return PutByIdStatus(TakesSlowPath);
}
- // We only optimize if there is already a structure that the transition is cached to.
- // Among other things, this allows us to guard against a transition with a specific
- // value.
- //
- // - If we're storing a value that could be specific: this would only be a problem if
- // the existing transition did have a specific value already, since if it didn't,
- // then we would behave "as if" we were not storing a specific value. If it did
- // have a specific value, then we'll know - the fact that we pass 0 for
- // specificValue will tell us.
- //
- // - If we're not storing a value that could be specific: again, this would only be a
- // problem if the existing transition did have a specific value, which we check for
- // by passing 0 for the specificValue.
- Structure* transition = Structure::addPropertyTransitionToExistingStructure(structure, ident, 0, 0, offset);
- if (!transition)
- return PutByIdStatus(TakesSlowPath); // This occurs in bizarre cases only. See above.
- ASSERT(!transition->transitionDidInvolveSpecificValue());
- ASSERT(isValidOffset(offset));
-
- return PutByIdStatus(
- SimpleTransition, structure, transition,
- structure->prototypeChain(vm, globalObject), offset);
+ return result;
+}
+
+bool PutByIdStatus::makesCalls() const
+{
+ if (m_state == MakesCalls)
+ return true;
+
+ if (m_state != Simple)
+ return false;
+
+ for (unsigned i = m_variants.size(); i--;) {
+ if (m_variants[i].makesCalls())
+ return true;
+ }
+
+ return false;
+}
+
+void PutByIdStatus::dump(PrintStream& out) const
+{
+ switch (m_state) {
+ case NoInformation:
+ out.print("(NoInformation)");
+ return;
+
+ case Simple:
+ out.print("(", listDump(m_variants), ")");
+ return;
+
+ case TakesSlowPath:
+ out.print("(TakesSlowPath)");
+ return;
+ case MakesCalls:
+ out.print("(MakesCalls)");
+ return;
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
}
} // namespace JSC