summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/jit/Repatch.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'Source/JavaScriptCore/jit/Repatch.cpp')
-rw-r--r--Source/JavaScriptCore/jit/Repatch.cpp1968
1 files changed, 1321 insertions, 647 deletions
diff --git a/Source/JavaScriptCore/jit/Repatch.cpp b/Source/JavaScriptCore/jit/Repatch.cpp
index bd95f665a..9c31722e8 100644
--- a/Source/JavaScriptCore/jit/Repatch.cpp
+++ b/Source/JavaScriptCore/jit/Repatch.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,26 +28,21 @@
#if ENABLE(JIT)
-#include "BinarySwitch.h"
#include "CCallHelpers.h"
-#include "CallFrameShuffler.h"
+#include "CallFrameInlines.h"
#include "DFGOperations.h"
#include "DFGSpeculativeJIT.h"
#include "FTLThunks.h"
#include "GCAwareJITStubRoutine.h"
-#include "GetterSetter.h"
-#include "JIT.h"
-#include "JITInlines.h"
#include "LinkBuffer.h"
-#include "JSCInlines.h"
-#include "PolymorphicAccess.h"
+#include "Operations.h"
+#include "PolymorphicPutByIdList.h"
+#include "RepatchBuffer.h"
#include "ScratchRegisterAllocator.h"
#include "StackAlignment.h"
#include "StructureRareDataInlines.h"
#include "StructureStubClearingWatchpoint.h"
#include "ThunkGenerators.h"
-#include <wtf/CommaPrinter.h>
-#include <wtf/ListDump.h>
#include <wtf/StringPrintStream.h>
namespace JSC {
@@ -56,25 +51,32 @@ namespace JSC {
// that would ordinarily have well-known values:
// - tagTypeNumberRegister
// - tagMaskRegister
+// - callFrameRegister **
+//
+// We currently only use the callFrameRegister for closure call patching, and we're not going to
+// give the FTL closure call patching support until we switch to the C stack - but when we do that,
+// callFrameRegister will disappear.
-static FunctionPtr readCallTarget(CodeBlock* codeBlock, CodeLocationCall call)
+static FunctionPtr readCallTarget(RepatchBuffer& repatchBuffer, CodeLocationCall call)
{
FunctionPtr result = MacroAssembler::readCallTarget(call);
#if ENABLE(FTL_JIT)
+ CodeBlock* codeBlock = repatchBuffer.codeBlock();
if (codeBlock->jitType() == JITCode::FTLJIT) {
return FunctionPtr(codeBlock->vm()->ftlThunks->keyForSlowPathCallThunk(
MacroAssemblerCodePtr::createFromExecutableAddress(
result.executableAddress())).callTarget());
}
#else
- UNUSED_PARAM(codeBlock);
+ UNUSED_PARAM(repatchBuffer);
#endif // ENABLE(FTL_JIT)
return result;
}
-static void repatchCall(CodeBlock* codeBlock, CodeLocationCall call, FunctionPtr newCalleeFunction)
+static void repatchCall(RepatchBuffer& repatchBuffer, CodeLocationCall call, FunctionPtr newCalleeFunction)
{
#if ENABLE(FTL_JIT)
+ CodeBlock* codeBlock = repatchBuffer.codeBlock();
if (codeBlock->jitType() == JITCode::FTLJIT) {
VM& vm = *codeBlock->vm();
FTL::Thunks& thunks = *vm.ftlThunks;
@@ -85,252 +87,654 @@ static void repatchCall(CodeBlock* codeBlock, CodeLocationCall call, FunctionPtr
newCalleeFunction = FunctionPtr(
thunks.getSlowPathCallThunk(vm, key).code().executableAddress());
}
-#else // ENABLE(FTL_JIT)
- UNUSED_PARAM(codeBlock);
#endif // ENABLE(FTL_JIT)
- MacroAssembler::repatchCall(call, newCalleeFunction);
+ repatchBuffer.relink(call, newCalleeFunction);
}
-static void repatchByIdSelfAccess(
- CodeBlock* codeBlock, StructureStubInfo& stubInfo, Structure* structure,
- PropertyOffset offset, const FunctionPtr &slowPathFunction,
- bool compact)
+static void repatchCall(CodeBlock* codeblock, CodeLocationCall call, FunctionPtr newCalleeFunction)
{
+ RepatchBuffer repatchBuffer(codeblock);
+ repatchCall(repatchBuffer, call, newCalleeFunction);
+}
+
+static void repatchByIdSelfAccess(VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, Structure* structure, const Identifier& propertyName, PropertyOffset offset,
+ const FunctionPtr &slowPathFunction, bool compact)
+{
+ if (structure->typeInfo().newImpurePropertyFiresWatchpoints())
+ vm.registerWatchpointForImpureProperty(propertyName, stubInfo.addWatchpoint(codeBlock));
+
+ RepatchBuffer repatchBuffer(codeBlock);
+
// Only optimize once!
- repatchCall(codeBlock, stubInfo.callReturnLocation, slowPathFunction);
+ repatchCall(repatchBuffer, stubInfo.callReturnLocation, slowPathFunction);
// Patch the structure check & the offset of the load.
- MacroAssembler::repatchInt32(
- stubInfo.callReturnLocation.dataLabel32AtOffset(-(intptr_t)stubInfo.patch.deltaCheckImmToCall),
- bitwise_cast<int32_t>(structure->id()));
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelPtrAtOffset(-(intptr_t)stubInfo.patch.deltaCheckImmToCall), structure);
+ repatchBuffer.setLoadInstructionIsActive(stubInfo.callReturnLocation.convertibleLoadAtOffset(stubInfo.patch.deltaCallToStorageLoad), isOutOfLineOffset(offset));
#if USE(JSVALUE64)
if (compact)
- MacroAssembler::repatchCompact(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.deltaCallToLoadOrStore), offsetRelativeToBase(offset));
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.deltaCallToLoadOrStore), offsetRelativeToPatchedStorage(offset));
else
- MacroAssembler::repatchInt32(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.deltaCallToLoadOrStore), offsetRelativeToBase(offset));
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.deltaCallToLoadOrStore), offsetRelativeToPatchedStorage(offset));
#elif USE(JSVALUE32_64)
if (compact) {
- MacroAssembler::repatchCompact(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.deltaCallToTagLoadOrStore), offsetRelativeToBase(offset) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
- MacroAssembler::repatchCompact(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.deltaCallToPayloadLoadOrStore), offsetRelativeToBase(offset) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.deltaCallToTagLoadOrStore), offsetRelativeToPatchedStorage(offset) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.deltaCallToPayloadLoadOrStore), offsetRelativeToPatchedStorage(offset) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
} else {
- MacroAssembler::repatchInt32(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.deltaCallToTagLoadOrStore), offsetRelativeToBase(offset) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
- MacroAssembler::repatchInt32(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.deltaCallToPayloadLoadOrStore), offsetRelativeToBase(offset) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.deltaCallToTagLoadOrStore), offsetRelativeToPatchedStorage(offset) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.deltaCallToPayloadLoadOrStore), offsetRelativeToPatchedStorage(offset) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
}
#endif
}
-static void resetGetByIDCheckAndLoad(StructureStubInfo& stubInfo)
+static void addStructureTransitionCheck(
+ JSCell* object, Structure* structure, CodeBlock* codeBlock, StructureStubInfo& stubInfo,
+ MacroAssembler& jit, MacroAssembler::JumpList& failureCases, GPRReg scratchGPR)
{
- CodeLocationDataLabel32 structureLabel = stubInfo.callReturnLocation.dataLabel32AtOffset(-(intptr_t)stubInfo.patch.deltaCheckImmToCall);
- if (MacroAssembler::canJumpReplacePatchableBranch32WithPatch()) {
- MacroAssembler::revertJumpReplacementToPatchableBranch32WithPatch(
- MacroAssembler::startOfPatchableBranch32WithPatchOnAddress(structureLabel),
- MacroAssembler::Address(
- static_cast<MacroAssembler::RegisterID>(stubInfo.patch.baseGPR),
- JSCell::structureIDOffset()),
- static_cast<int32_t>(unusedPointer));
- }
- MacroAssembler::repatchInt32(structureLabel, static_cast<int32_t>(unusedPointer));
-#if USE(JSVALUE64)
- MacroAssembler::repatchCompact(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.deltaCallToLoadOrStore), 0);
-#else
- MacroAssembler::repatchCompact(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.deltaCallToTagLoadOrStore), 0);
- MacroAssembler::repatchCompact(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.deltaCallToPayloadLoadOrStore), 0);
+ if (object->structure() == structure && structure->transitionWatchpointSetIsStillValid()) {
+ structure->addTransitionWatchpoint(stubInfo.addWatchpoint(codeBlock));
+#if !ASSERT_DISABLED
+ // If we execute this code, the object must have the structure we expect. Assert
+ // this in debug modes.
+ jit.move(MacroAssembler::TrustedImmPtr(object), scratchGPR);
+ MacroAssembler::Jump ok = jit.branchPtr(
+ MacroAssembler::Equal,
+ MacroAssembler::Address(scratchGPR, JSCell::structureOffset()),
+ MacroAssembler::TrustedImmPtr(structure));
+ jit.breakpoint();
+ ok.link(&jit);
#endif
+ return;
+ }
+
+ jit.move(MacroAssembler::TrustedImmPtr(object), scratchGPR);
+ failureCases.append(
+ jit.branchPtr(
+ MacroAssembler::NotEqual,
+ MacroAssembler::Address(scratchGPR, JSCell::structureOffset()),
+ MacroAssembler::TrustedImmPtr(structure)));
}
-static void resetPutByIDCheckAndLoad(StructureStubInfo& stubInfo)
+static void addStructureTransitionCheck(
+ JSValue prototype, CodeBlock* codeBlock, StructureStubInfo& stubInfo,
+ MacroAssembler& jit, MacroAssembler::JumpList& failureCases, GPRReg scratchGPR)
{
- CodeLocationDataLabel32 structureLabel = stubInfo.callReturnLocation.dataLabel32AtOffset(-(intptr_t)stubInfo.patch.deltaCheckImmToCall);
- if (MacroAssembler::canJumpReplacePatchableBranch32WithPatch()) {
- MacroAssembler::revertJumpReplacementToPatchableBranch32WithPatch(
- MacroAssembler::startOfPatchableBranch32WithPatchOnAddress(structureLabel),
- MacroAssembler::Address(
- static_cast<MacroAssembler::RegisterID>(stubInfo.patch.baseGPR),
- JSCell::structureIDOffset()),
- static_cast<int32_t>(unusedPointer));
- }
- MacroAssembler::repatchInt32(structureLabel, static_cast<int32_t>(unusedPointer));
-#if USE(JSVALUE64)
- MacroAssembler::repatchInt32(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.deltaCallToLoadOrStore), 0);
-#else
- MacroAssembler::repatchInt32(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.deltaCallToTagLoadOrStore), 0);
- MacroAssembler::repatchInt32(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.deltaCallToPayloadLoadOrStore), 0);
-#endif
+ if (prototype.isNull())
+ return;
+
+ ASSERT(prototype.isCell());
+
+ addStructureTransitionCheck(
+ prototype.asCell(), prototype.asCell()->structure(), codeBlock, stubInfo, jit,
+ failureCases, scratchGPR);
}
-static void replaceWithJump(StructureStubInfo& stubInfo, const MacroAssemblerCodePtr target)
+static void replaceWithJump(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo, const MacroAssemblerCodePtr target)
{
- if (MacroAssembler::canJumpReplacePatchableBranch32WithPatch()) {
- MacroAssembler::replaceWithJump(
- MacroAssembler::startOfPatchableBranch32WithPatchOnAddress(
- stubInfo.callReturnLocation.dataLabel32AtOffset(
+ if (MacroAssembler::canJumpReplacePatchableBranchPtrWithPatch()) {
+ repatchBuffer.replaceWithJump(
+ RepatchBuffer::startOfPatchableBranchPtrWithPatchOnAddress(
+ stubInfo.callReturnLocation.dataLabelPtrAtOffset(
-(intptr_t)stubInfo.patch.deltaCheckImmToCall)),
CodeLocationLabel(target));
return;
}
-
- resetGetByIDCheckAndLoad(stubInfo);
- MacroAssembler::repatchJump(
+ repatchBuffer.relink(
stubInfo.callReturnLocation.jumpAtOffset(
stubInfo.patch.deltaCallToJump),
CodeLocationLabel(target));
}
-enum InlineCacheAction {
- GiveUpOnCache,
- RetryCacheLater,
- AttemptToCache
-};
-
-static InlineCacheAction actionForCell(VM& vm, JSCell* cell)
+static void emitRestoreScratch(MacroAssembler& stubJit, bool needToRestoreScratch, GPRReg scratchGPR, MacroAssembler::Jump& success, MacroAssembler::Jump& fail, MacroAssembler::JumpList failureCases)
{
- Structure* structure = cell->structure(vm);
-
- TypeInfo typeInfo = structure->typeInfo();
- if (typeInfo.prohibitsPropertyCaching())
- return GiveUpOnCache;
-
- if (structure->isUncacheableDictionary()) {
- if (structure->hasBeenFlattenedBefore())
- return GiveUpOnCache;
- // Flattening could have changed the offset, so return early for another try.
- asObject(cell)->flattenDictionaryObject(vm);
- return RetryCacheLater;
+ if (needToRestoreScratch) {
+ stubJit.popToRestore(scratchGPR);
+
+ success = stubJit.jump();
+
+ // link failure cases here, so we can pop scratchGPR, and then jump back.
+ failureCases.link(&stubJit);
+
+ stubJit.popToRestore(scratchGPR);
+
+ fail = stubJit.jump();
+ return;
}
- if (!structure->propertyAccessesAreCacheable())
- return GiveUpOnCache;
+ success = stubJit.jump();
+}
- return AttemptToCache;
+static void linkRestoreScratch(LinkBuffer& patchBuffer, bool needToRestoreScratch, MacroAssembler::Jump success, MacroAssembler::Jump fail, MacroAssembler::JumpList failureCases, CodeLocationLabel successLabel, CodeLocationLabel slowCaseBegin)
+{
+ patchBuffer.link(success, successLabel);
+
+ if (needToRestoreScratch) {
+ patchBuffer.link(fail, slowCaseBegin);
+ return;
+ }
+
+ // link failure cases directly back to normal path
+ patchBuffer.link(failureCases, slowCaseBegin);
}
-static bool forceICFailure(ExecState*)
+static void linkRestoreScratch(LinkBuffer& patchBuffer, bool needToRestoreScratch, StructureStubInfo& stubInfo, MacroAssembler::Jump success, MacroAssembler::Jump fail, MacroAssembler::JumpList failureCases)
{
- return Options::forceICFailure();
+ linkRestoreScratch(patchBuffer, needToRestoreScratch, success, fail, failureCases, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone), stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase));
}
-static InlineCacheAction tryCacheGetByID(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot, StructureStubInfo& stubInfo)
+enum ProtoChainGenerationResult {
+ ProtoChainGenerationFailed,
+ ProtoChainGenerationSucceeded
+};
+
+static ProtoChainGenerationResult generateProtoChainAccessStub(ExecState*, const PropertySlot&, const Identifier&, StructureStubInfo&, StructureChain*, size_t, PropertyOffset, Structure*, CodeLocationLabel, CodeLocationLabel, RefPtr<JITStubRoutine>&) WARN_UNUSED_RETURN;
+static ProtoChainGenerationResult generateProtoChainAccessStub(ExecState* exec, const PropertySlot& slot, const Identifier& propertyName, StructureStubInfo& stubInfo, StructureChain* chain, size_t count, PropertyOffset offset, Structure* structure, CodeLocationLabel successLabel, CodeLocationLabel slowCaseLabel, RefPtr<JITStubRoutine>& stubRoutine)
{
- if (forceICFailure(exec))
- return GiveUpOnCache;
+ VM* vm = &exec->vm();
+ GPRReg baseGPR = static_cast<GPRReg>(stubInfo.patch.baseGPR);
+#if USE(JSVALUE32_64)
+ GPRReg resultTagGPR = static_cast<GPRReg>(stubInfo.patch.valueTagGPR);
+#endif
+ GPRReg resultGPR = static_cast<GPRReg>(stubInfo.patch.valueGPR);
+ GPRReg scratchGPR = TempRegisterSet(stubInfo.patch.usedRegisters).getFreeGPR();
+ bool needToRestoreScratch = scratchGPR == InvalidGPRReg;
+ if (needToRestoreScratch && !slot.isCacheableValue())
+ return ProtoChainGenerationFailed;
- // FIXME: Cache property access for immediates.
- if (!baseValue.isCell())
- return GiveUpOnCache;
+ CCallHelpers stubJit(&exec->vm(), exec->codeBlock());
+ if (needToRestoreScratch) {
+#if USE(JSVALUE64)
+ scratchGPR = AssemblyHelpers::selectScratchGPR(baseGPR, resultGPR);
+#else
+ scratchGPR = AssemblyHelpers::selectScratchGPR(baseGPR, resultGPR, resultTagGPR);
+#endif
+ stubJit.pushToSave(scratchGPR);
+ needToRestoreScratch = true;
+ }
+
+ MacroAssembler::JumpList failureCases;
+
+ failureCases.append(stubJit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::structureOffset()), MacroAssembler::TrustedImmPtr(structure)));
CodeBlock* codeBlock = exec->codeBlock();
- VM& vm = exec->vm();
+ if (structure->typeInfo().newImpurePropertyFiresWatchpoints())
+ vm->registerWatchpointForImpureProperty(propertyName, stubInfo.addWatchpoint(codeBlock));
+
+ Structure* currStructure = structure;
+ WriteBarrier<Structure>* it = chain->head();
+ JSObject* protoObject = 0;
+ for (unsigned i = 0; i < count; ++i, ++it) {
+ protoObject = asObject(currStructure->prototypeForLookup(exec));
+ Structure* protoStructure = protoObject->structure();
+ if (protoStructure->typeInfo().newImpurePropertyFiresWatchpoints())
+ vm->registerWatchpointForImpureProperty(propertyName, stubInfo.addWatchpoint(codeBlock));
+ addStructureTransitionCheck(
+ protoObject, protoStructure, codeBlock, stubInfo, stubJit,
+ failureCases, scratchGPR);
+ currStructure = it->get();
+ }
+
+ bool isAccessor = slot.isCacheableGetter() || slot.isCacheableCustom();
+ if (isAccessor)
+ stubJit.move(baseGPR, scratchGPR);
+
+ if (!slot.isCacheableCustom()) {
+ if (isInlineOffset(offset)) {
+#if USE(JSVALUE64)
+ stubJit.load64(protoObject->locationForOffset(offset), resultGPR);
+#elif USE(JSVALUE32_64)
+ stubJit.move(MacroAssembler::TrustedImmPtr(protoObject->locationForOffset(offset)), resultGPR);
+ stubJit.load32(MacroAssembler::Address(resultGPR, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
+ stubJit.load32(MacroAssembler::Address(resultGPR, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultGPR);
+#endif
+ } else {
+ stubJit.loadPtr(protoObject->butterflyAddress(), resultGPR);
+#if USE(JSVALUE64)
+ stubJit.load64(MacroAssembler::Address(resultGPR, offsetInButterfly(offset) * sizeof(WriteBarrier<Unknown>)), resultGPR);
+#elif USE(JSVALUE32_64)
+ stubJit.load32(MacroAssembler::Address(resultGPR, offsetInButterfly(offset) * sizeof(WriteBarrier<Unknown>) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
+ stubJit.load32(MacroAssembler::Address(resultGPR, offsetInButterfly(offset) * sizeof(WriteBarrier<Unknown>) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultGPR);
+#endif
+ }
+ }
+ MacroAssembler::Call operationCall;
+ MacroAssembler::Call handlerCall;
+ FunctionPtr operationFunction;
+ MacroAssembler::Jump success, fail;
+ if (isAccessor) {
+ GPRReg callFrameRegister = static_cast<GPRReg>(stubInfo.patch.callFrameRegister);
+ if (slot.isCacheableGetter()) {
+ stubJit.setupArguments(callFrameRegister, scratchGPR, resultGPR);
+ operationFunction = operationCallGetter;
+ } else {
+ stubJit.move(MacroAssembler::TrustedImmPtr(protoObject), scratchGPR);
+ stubJit.setupArguments(callFrameRegister, scratchGPR,
+ MacroAssembler::TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()),
+ MacroAssembler::TrustedImmPtr(propertyName.impl()));
+ operationFunction = operationCallCustomGetter;
+ }
- std::unique_ptr<AccessCase> newCase;
+ // Need to make sure that whenever this call is made in the future, we remember the
+ // place that we made it from. It just so happens to be the place that we are at
+ // right now!
+ stubJit.store32(MacroAssembler::TrustedImm32(exec->locationAsRawBits()),
+ CCallHelpers::tagFor(static_cast<VirtualRegister>(JSStack::ArgumentCount)));
- if (isJSArray(baseValue) && propertyName == exec->propertyNames().length)
- newCase = AccessCase::getLength(vm, codeBlock, AccessCase::ArrayLength);
- else if (isJSString(baseValue) && propertyName == exec->propertyNames().length)
- newCase = AccessCase::getLength(vm, codeBlock, AccessCase::StringLength);
- else {
- if (!slot.isCacheable() && !slot.isUnset())
- return GiveUpOnCache;
-
- ObjectPropertyConditionSet conditionSet;
- JSCell* baseCell = baseValue.asCell();
- Structure* structure = baseCell->structure(vm);
-
- bool loadTargetFromProxy = false;
- if (baseCell->type() == PureForwardingProxyType) {
- baseValue = jsCast<JSProxy*>(baseCell)->target();
- baseCell = baseValue.asCell();
- structure = baseCell->structure(vm);
- loadTargetFromProxy = true;
+ operationCall = stubJit.call();
+#if USE(JSVALUE64)
+ stubJit.move(GPRInfo::returnValueGPR, resultGPR);
+#else
+ stubJit.setupResults(resultGPR, resultTagGPR);
+#endif
+ MacroAssembler::Jump noException = stubJit.emitExceptionCheck(CCallHelpers::InvertedExceptionCheck);
+
+ stubJit.setupArgumentsExecState();
+ handlerCall = stubJit.call();
+ stubJit.jumpToExceptionHandler();
+
+ noException.link(&stubJit);
+ }
+ emitRestoreScratch(stubJit, needToRestoreScratch, scratchGPR, success, fail, failureCases);
+
+ LinkBuffer patchBuffer(*vm, &stubJit, exec->codeBlock());
+
+ linkRestoreScratch(patchBuffer, needToRestoreScratch, success, fail, failureCases, successLabel, slowCaseLabel);
+ if (isAccessor) {
+ patchBuffer.link(operationCall, operationFunction);
+ patchBuffer.link(handlerCall, lookupExceptionHandler);
+ }
+ stubRoutine = FINALIZE_CODE_FOR_DFG_STUB(
+ patchBuffer,
+ ("DFG prototype chain access stub for %s, return point %p",
+ toCString(*exec->codeBlock()).data(), successLabel.executableAddress()));
+ return ProtoChainGenerationSucceeded;
+}
+
+static bool tryCacheGetByID(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot, StructureStubInfo& stubInfo)
+{
+ // FIXME: Write a test that proves we need to check for recursion here just
+ // like the interpreter does, then add a check for recursion.
+
+ CodeBlock* codeBlock = exec->codeBlock();
+ VM* vm = &exec->vm();
+
+ if (isJSArray(baseValue) && propertyName == exec->propertyNames().length) {
+ GPRReg baseGPR = static_cast<GPRReg>(stubInfo.patch.baseGPR);
+#if USE(JSVALUE32_64)
+ GPRReg resultTagGPR = static_cast<GPRReg>(stubInfo.patch.valueTagGPR);
+#endif
+ GPRReg resultGPR = static_cast<GPRReg>(stubInfo.patch.valueGPR);
+ GPRReg scratchGPR = TempRegisterSet(stubInfo.patch.usedRegisters).getFreeGPR();
+ bool needToRestoreScratch = false;
+
+ MacroAssembler stubJit;
+
+ if (scratchGPR == InvalidGPRReg) {
+#if USE(JSVALUE64)
+ scratchGPR = AssemblyHelpers::selectScratchGPR(baseGPR, resultGPR);
+#else
+ scratchGPR = AssemblyHelpers::selectScratchGPR(baseGPR, resultGPR, resultTagGPR);
+#endif
+ stubJit.pushToSave(scratchGPR);
+ needToRestoreScratch = true;
}
+
+ MacroAssembler::JumpList failureCases;
+
+ stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSCell::structureOffset()), scratchGPR);
+ stubJit.load8(MacroAssembler::Address(scratchGPR, Structure::indexingTypeOffset()), scratchGPR);
+ failureCases.append(stubJit.branchTest32(MacroAssembler::Zero, scratchGPR, MacroAssembler::TrustedImm32(IsArray)));
+ failureCases.append(stubJit.branchTest32(MacroAssembler::Zero, scratchGPR, MacroAssembler::TrustedImm32(IndexingShapeMask)));
+
+ stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
+ stubJit.load32(MacroAssembler::Address(scratchGPR, ArrayStorage::lengthOffset()), scratchGPR);
+ failureCases.append(stubJit.branch32(MacroAssembler::LessThan, scratchGPR, MacroAssembler::TrustedImm32(0)));
+
+ stubJit.move(scratchGPR, resultGPR);
+#if USE(JSVALUE64)
+ stubJit.or64(AssemblyHelpers::TrustedImm64(TagTypeNumber), resultGPR);
+#elif USE(JSVALUE32_64)
+ stubJit.move(AssemblyHelpers::TrustedImm32(0xffffffff), resultTagGPR); // JSValue::Int32Tag
+#endif
+
+ MacroAssembler::Jump success, fail;
+
+ emitRestoreScratch(stubJit, needToRestoreScratch, scratchGPR, success, fail, failureCases);
+
+ LinkBuffer patchBuffer(*vm, &stubJit, codeBlock);
+
+ linkRestoreScratch(patchBuffer, needToRestoreScratch, stubInfo, success, fail, failureCases);
+
+ stubInfo.stubRoutine = FINALIZE_CODE_FOR_DFG_STUB(
+ patchBuffer,
+ ("DFG GetById array length stub for %s, return point %p",
+ toCString(*exec->codeBlock()).data(), stubInfo.callReturnLocation.labelAtOffset(
+ stubInfo.patch.deltaCallToDone).executableAddress()));
+
+ RepatchBuffer repatchBuffer(codeBlock);
+ replaceWithJump(repatchBuffer, stubInfo, stubInfo.stubRoutine->code().code());
+ repatchCall(repatchBuffer, stubInfo.callReturnLocation, operationGetById);
+
+ return true;
+ }
+
+ // FIXME: should support length access for String.
- InlineCacheAction action = actionForCell(vm, baseCell);
- if (action != AttemptToCache)
- return action;
-
- // Optimize self access.
- if (stubInfo.cacheType == CacheType::Unset
- && slot.isCacheableValue()
- && slot.slotBase() == baseValue
- && !slot.watchpointSet()
- && isInlineOffset(slot.cachedOffset())
- && MacroAssembler::isCompactPtrAlignedAddressOffset(maxOffsetRelativeToBase(slot.cachedOffset()))
- && action == AttemptToCache
- && !structure->needImpurePropertyWatchpoint()
- && !loadTargetFromProxy) {
- structure->startWatchingPropertyForReplacements(vm, slot.cachedOffset());
- repatchByIdSelfAccess(codeBlock, stubInfo, structure, slot.cachedOffset(), operationGetByIdOptimize, true);
- stubInfo.initGetByIdSelf(codeBlock, structure, slot.cachedOffset());
- return RetryCacheLater;
+ // FIXME: Cache property access for immediates.
+ if (!baseValue.isCell())
+ return false;
+ JSCell* baseCell = baseValue.asCell();
+ Structure* structure = baseCell->structure();
+ if (!slot.isCacheable())
+ return false;
+ if (!structure->propertyAccessesAreCacheable())
+ return false;
+
+ // Optimize self access.
+ if (slot.slotBase() == baseValue) {
+ if (!slot.isCacheableValue()
+ || !MacroAssembler::isCompactPtrAlignedAddressOffset(maxOffsetRelativeToPatchedStorage(slot.cachedOffset()))) {
+ repatchCall(codeBlock, stubInfo.callReturnLocation, operationGetByIdBuildList);
+ return true;
}
- PropertyOffset offset = slot.isUnset() ? invalidOffset : slot.cachedOffset();
+ repatchByIdSelfAccess(*vm, codeBlock, stubInfo, structure, propertyName, slot.cachedOffset(), operationGetByIdBuildList, true);
+ stubInfo.initGetByIdSelf(*vm, codeBlock->ownerExecutable(), structure);
+ return true;
+ }
+
+ if (structure->isDictionary())
+ return false;
+
+ if (!stubInfo.patch.registersFlushed) {
+ // We cannot do as much inline caching if the registers were not flushed prior to this GetById. In particular,
+ // non-Value cached properties require planting calls, which requires registers to have been flushed. Thus,
+ // if registers were not flushed, don't do non-Value caching.
+ if (!slot.isCacheableValue())
+ return false;
+ }
+
+ PropertyOffset offset = slot.cachedOffset();
+ size_t count = normalizePrototypeChainForChainAccess(exec, baseValue, slot.slotBase(), propertyName, offset);
+ if (count == InvalidPrototypeChain)
+ return false;
+
+ StructureChain* prototypeChain = structure->prototypeChain(exec);
+ if (generateProtoChainAccessStub(exec, slot, propertyName, stubInfo, prototypeChain, count, offset,
+ structure, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone),
+ stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase), stubInfo.stubRoutine) == ProtoChainGenerationFailed)
+ return false;
+
+ RepatchBuffer repatchBuffer(codeBlock);
+ replaceWithJump(repatchBuffer, stubInfo, stubInfo.stubRoutine->code().code());
+ repatchCall(repatchBuffer, stubInfo.callReturnLocation, operationGetByIdBuildList);
+
+ stubInfo.initGetByIdChain(*vm, codeBlock->ownerExecutable(), structure, prototypeChain, count, slot.isCacheableValue());
+ return true;
+}
+
+void repatchGetByID(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot, StructureStubInfo& stubInfo)
+{
+ GCSafeConcurrentJITLocker locker(exec->codeBlock()->m_lock, exec->vm().heap);
+
+ bool cached = tryCacheGetByID(exec, baseValue, propertyName, slot, stubInfo);
+ if (!cached)
+ repatchCall(exec->codeBlock(), stubInfo.callReturnLocation, operationGetById);
+}
- if (slot.isUnset() || slot.slotBase() != baseValue) {
- if (structure->typeInfo().prohibitsPropertyCaching() || structure->isDictionary())
- return GiveUpOnCache;
-
- if (slot.isUnset() && structure->typeInfo().getOwnPropertySlotIsImpureForPropertyAbsence())
- return GiveUpOnCache;
+static bool getPolymorphicStructureList(
+ VM* vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo,
+ PolymorphicAccessStructureList*& polymorphicStructureList, int& listIndex,
+ CodeLocationLabel& slowCase)
+{
+ slowCase = stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase);
+
+ if (stubInfo.accessType == access_unset) {
+ RELEASE_ASSERT(!stubInfo.stubRoutine);
+ polymorphicStructureList = new PolymorphicAccessStructureList();
+ stubInfo.initGetByIdSelfList(polymorphicStructureList, 0, false);
+ listIndex = 0;
+ } else if (stubInfo.accessType == access_get_by_id_self) {
+ RELEASE_ASSERT(!stubInfo.stubRoutine);
+ polymorphicStructureList = new PolymorphicAccessStructureList(*vm, codeBlock->ownerExecutable(), JITStubRoutine::createSelfManagedRoutine(slowCase), stubInfo.u.getByIdSelf.baseObjectStructure.get(), true);
+ stubInfo.initGetByIdSelfList(polymorphicStructureList, 1, true);
+ listIndex = 1;
+ } else if (stubInfo.accessType == access_get_by_id_chain) {
+ RELEASE_ASSERT(!!stubInfo.stubRoutine);
+ slowCase = CodeLocationLabel(stubInfo.stubRoutine->code().code());
+ polymorphicStructureList = new PolymorphicAccessStructureList(*vm, codeBlock->ownerExecutable(), stubInfo.stubRoutine, stubInfo.u.getByIdChain.baseObjectStructure.get(), stubInfo.u.getByIdChain.chain.get(), true);
+ stubInfo.stubRoutine.clear();
+ stubInfo.initGetByIdSelfList(polymorphicStructureList, 1, false);
+ listIndex = 1;
+ } else {
+ RELEASE_ASSERT(stubInfo.accessType == access_get_by_id_self_list);
+ polymorphicStructureList = stubInfo.u.getByIdSelfList.structureList;
+ listIndex = stubInfo.u.getByIdSelfList.listSize;
+ slowCase = CodeLocationLabel(polymorphicStructureList->list[listIndex - 1].stubRoutine->code().code());
+ }
+
+ if (listIndex == POLYMORPHIC_LIST_CACHE_SIZE)
+ return false;
+
+ RELEASE_ASSERT(listIndex < POLYMORPHIC_LIST_CACHE_SIZE);
+ return true;
+}
+
+static void patchJumpToGetByIdStub(CodeBlock* codeBlock, StructureStubInfo& stubInfo, JITStubRoutine* stubRoutine)
+{
+ RELEASE_ASSERT(stubInfo.accessType == access_get_by_id_self_list);
+ RepatchBuffer repatchBuffer(codeBlock);
+ if (stubInfo.u.getByIdSelfList.didSelfPatching) {
+ repatchBuffer.relink(
+ stubInfo.callReturnLocation.jumpAtOffset(
+ stubInfo.patch.deltaCallToJump),
+ CodeLocationLabel(stubRoutine->code().code()));
+ return;
+ }
+
+ replaceWithJump(repatchBuffer, stubInfo, stubRoutine->code().code());
+}
+
+static bool tryBuildGetByIDList(ExecState* exec, JSValue baseValue, const Identifier& ident, const PropertySlot& slot, StructureStubInfo& stubInfo)
+{
+ if (!baseValue.isCell()
+ || !slot.isCacheable()
+ || !baseValue.asCell()->structure()->propertyAccessesAreCacheable())
+ return false;
+
+ CodeBlock* codeBlock = exec->codeBlock();
+ VM* vm = &exec->vm();
+ JSCell* baseCell = baseValue.asCell();
+ Structure* structure = baseCell->structure();
+
+ if (slot.slotBase() == baseValue) {
+ if (!stubInfo.patch.registersFlushed) {
+ // We cannot do as much inline caching if the registers were not flushed prior to this GetById. In particular,
+ // non-Value cached properties require planting calls, which requires registers to have been flushed. Thus,
+ // if registers were not flushed, don't do non-Value caching.
+ if (!slot.isCacheableValue())
+ return false;
+ }
+
+ PolymorphicAccessStructureList* polymorphicStructureList;
+ int listIndex;
+ CodeLocationLabel slowCase;
- if (slot.isUnset()) {
- conditionSet = generateConditionsForPropertyMiss(
- vm, codeBlock, exec, structure, propertyName.impl());
+ if (!getPolymorphicStructureList(vm, codeBlock, stubInfo, polymorphicStructureList, listIndex, slowCase))
+ return false;
+
+ stubInfo.u.getByIdSelfList.listSize++;
+
+ GPRReg callFrameRegister = static_cast<GPRReg>(stubInfo.patch.callFrameRegister);
+ GPRReg baseGPR = static_cast<GPRReg>(stubInfo.patch.baseGPR);
+#if USE(JSVALUE32_64)
+ GPRReg resultTagGPR = static_cast<GPRReg>(stubInfo.patch.valueTagGPR);
+#endif
+ GPRReg resultGPR = static_cast<GPRReg>(stubInfo.patch.valueGPR);
+ GPRReg scratchGPR = TempRegisterSet(stubInfo.patch.usedRegisters).getFreeGPR();
+
+ CCallHelpers stubJit(vm, codeBlock);
+
+ MacroAssembler::Jump wrongStruct = stubJit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::structureOffset()), MacroAssembler::TrustedImmPtr(structure));
+
+ // The strategy we use for stubs is as follows:
+ // 1) Call DFG helper that calls the getter.
+ // 2) Check if there was an exception, and if there was, call yet another
+ // helper.
+
+ bool isDirect = false;
+ MacroAssembler::Call operationCall;
+ MacroAssembler::Call handlerCall;
+ FunctionPtr operationFunction;
+ MacroAssembler::Jump success;
+
+ if (slot.isCacheableGetter() || slot.isCacheableCustom()) {
+ if (slot.isCacheableGetter()) {
+ ASSERT(scratchGPR != InvalidGPRReg);
+ ASSERT(baseGPR != scratchGPR);
+ if (isInlineOffset(slot.cachedOffset())) {
+#if USE(JSVALUE64)
+ stubJit.load64(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset())), scratchGPR);
+#else
+ stubJit.load32(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset())), scratchGPR);
+#endif
+ } else {
+ stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
+#if USE(JSVALUE64)
+ stubJit.load64(MacroAssembler::Address(scratchGPR, offsetRelativeToBase(slot.cachedOffset())), scratchGPR);
+#else
+ stubJit.load32(MacroAssembler::Address(scratchGPR, offsetRelativeToBase(slot.cachedOffset())), scratchGPR);
+#endif
+ }
+ stubJit.setupArguments(callFrameRegister, baseGPR, scratchGPR);
+ operationFunction = operationCallGetter;
} else {
- conditionSet = generateConditionsForPrototypePropertyHit(
- vm, codeBlock, exec, structure, slot.slotBase(),
- propertyName.impl());
+ stubJit.setupArguments(
+ callFrameRegister, baseGPR,
+ MacroAssembler::TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()),
+ MacroAssembler::TrustedImmPtr(ident.impl()));
+ operationFunction = operationCallCustomGetter;
}
- if (!conditionSet.isValid())
- return GiveUpOnCache;
-
- offset = slot.isUnset() ? invalidOffset : conditionSet.slotBaseCondition().offset();
+ // Need to make sure that whenever this call is made in the future, we remember the
+ // place that we made it from. It just so happens to be the place that we are at
+ // right now!
+ stubJit.store32(
+ MacroAssembler::TrustedImm32(exec->locationAsRawBits()),
+ CCallHelpers::tagFor(static_cast<VirtualRegister>(JSStack::ArgumentCount)));
+
+ operationCall = stubJit.call();
+#if USE(JSVALUE64)
+ stubJit.move(GPRInfo::returnValueGPR, resultGPR);
+#else
+ stubJit.setupResults(resultGPR, resultTagGPR);
+#endif
+ success = stubJit.emitExceptionCheck(CCallHelpers::InvertedExceptionCheck);
+
+ stubJit.setupArgumentsExecState();
+ handlerCall = stubJit.call();
+ stubJit.jumpToExceptionHandler();
+ } else {
+ if (isInlineOffset(slot.cachedOffset())) {
+#if USE(JSVALUE64)
+ stubJit.load64(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset())), resultGPR);
+#else
+ if (baseGPR == resultTagGPR) {
+ stubJit.load32(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultGPR);
+ stubJit.load32(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
+ } else {
+ stubJit.load32(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
+ stubJit.load32(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultGPR);
+ }
+#endif
+ } else {
+ stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), resultGPR);
+#if USE(JSVALUE64)
+ stubJit.load64(MacroAssembler::Address(resultGPR, offsetRelativeToBase(slot.cachedOffset())), resultGPR);
+#else
+ stubJit.load32(MacroAssembler::Address(resultGPR, offsetRelativeToBase(slot.cachedOffset()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
+ stubJit.load32(MacroAssembler::Address(resultGPR, offsetRelativeToBase(slot.cachedOffset()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultGPR);
+#endif
+ }
+ success = stubJit.jump();
+ isDirect = true;
}
- JSFunction* getter = nullptr;
- if (slot.isCacheableGetter())
- getter = jsDynamicCast<JSFunction*>(slot.getterSetter()->getter());
-
- if (!loadTargetFromProxy && getter && AccessCase::canEmitIntrinsicGetter(getter, structure))
- newCase = AccessCase::getIntrinsic(vm, codeBlock, getter, slot.cachedOffset(), structure, conditionSet);
- else {
- AccessCase::AccessType type;
- if (slot.isCacheableValue())
- type = AccessCase::Load;
- else if (slot.isUnset())
- type = AccessCase::Miss;
- else if (slot.isCacheableGetter())
- type = AccessCase::Getter;
- else if (slot.attributes() & CustomAccessor)
- type = AccessCase::CustomAccessorGetter;
- else
- type = AccessCase::CustomValueGetter;
-
- newCase = AccessCase::get(
- vm, codeBlock, type, offset, structure, conditionSet, loadTargetFromProxy,
- slot.watchpointSet(), slot.isCacheableCustom() ? slot.customGetter() : nullptr,
- slot.isCacheableCustom() ? slot.slotBase() : nullptr);
+ LinkBuffer patchBuffer(*vm, &stubJit, codeBlock);
+
+ patchBuffer.link(wrongStruct, slowCase);
+ patchBuffer.link(success, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone));
+ if (!isDirect) {
+ patchBuffer.link(operationCall, operationFunction);
+ patchBuffer.link(handlerCall, lookupExceptionHandler);
}
+
+ RefPtr<JITStubRoutine> stubRoutine =
+ createJITStubRoutine(
+ FINALIZE_DFG_CODE(
+ patchBuffer,
+ ("DFG GetById polymorphic list access for %s, return point %p",
+ toCString(*exec->codeBlock()).data(), stubInfo.callReturnLocation.labelAtOffset(
+ stubInfo.patch.deltaCallToDone).executableAddress())),
+ *vm,
+ codeBlock->ownerExecutable(),
+ slot.isCacheableGetter() || slot.isCacheableCustom());
+
+ polymorphicStructureList->list[listIndex].set(*vm, codeBlock->ownerExecutable(), stubRoutine, structure, isDirect);
+
+ patchJumpToGetByIdStub(codeBlock, stubInfo, stubRoutine.get());
+ return listIndex < (POLYMORPHIC_LIST_CACHE_SIZE - 1);
}
+
+ if (baseValue.asCell()->structure()->typeInfo().prohibitsPropertyCaching()
+ || baseValue.asCell()->structure()->isDictionary())
+ return false;
+
+ if (!stubInfo.patch.registersFlushed) {
+ // We cannot do as much inline caching if the registers were not flushed prior to this GetById. In particular,
+ // non-Value cached properties require planting calls, which requires registers to have been flushed. Thus,
+ // if registers were not flushed, don't do non-Value caching.
+ if (!slot.isCacheableValue())
+ return false;
+ }
+
- MacroAssemblerCodePtr codePtr =
- stubInfo.addAccessCase(codeBlock, propertyName, WTFMove(newCase));
-
- if (!codePtr)
- return GiveUpOnCache;
+ PropertyOffset offset = slot.cachedOffset();
+ size_t count = normalizePrototypeChainForChainAccess(exec, baseValue, slot.slotBase(), ident, offset);
+ if (count == InvalidPrototypeChain)
+ return false;
- replaceWithJump(stubInfo, codePtr);
+ StructureChain* prototypeChain = structure->prototypeChain(exec);
+
+ PolymorphicAccessStructureList* polymorphicStructureList;
+ int listIndex;
+ CodeLocationLabel slowCase;
+ if (!getPolymorphicStructureList(vm, codeBlock, stubInfo, polymorphicStructureList, listIndex, slowCase))
+ return false;
+
+ stubInfo.u.getByIdProtoList.listSize++;
- return RetryCacheLater;
+ RefPtr<JITStubRoutine> stubRoutine;
+
+ if (generateProtoChainAccessStub(exec, slot, ident, stubInfo, prototypeChain, count, offset, structure,
+ stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone),
+ slowCase, stubRoutine) == ProtoChainGenerationFailed)
+ return false;
+
+ polymorphicStructureList->list[listIndex].set(*vm, codeBlock->ownerExecutable(), stubRoutine, structure, slot.isCacheableValue());
+
+ patchJumpToGetByIdStub(codeBlock, stubInfo, stubRoutine.get());
+
+ return listIndex < (POLYMORPHIC_LIST_CACHE_SIZE - 1);
}
-void repatchGetByID(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot, StructureStubInfo& stubInfo)
+void buildGetByIDList(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot, StructureStubInfo& stubInfo)
{
GCSafeConcurrentJITLocker locker(exec->codeBlock()->m_lock, exec->vm().heap);
- if (tryCacheGetByID(exec, baseValue, propertyName, slot, stubInfo) == GiveUpOnCache)
+ bool dontChangeCall = tryBuildGetByIDList(exec, baseValue, propertyName, slot, stubInfo);
+ if (!dontChangeCall)
repatchCall(exec->codeBlock(), stubInfo.callReturnLocation, operationGetById);
}
@@ -346,592 +750,862 @@ static V_JITOperation_ESsiJJI appropriateGenericPutByIdFunction(const PutPropert
return operationPutByIdNonStrict;
}
-static V_JITOperation_ESsiJJI appropriateOptimizingPutByIdFunction(const PutPropertySlot &slot, PutKind putKind)
+static V_JITOperation_ESsiJJI appropriateListBuildingPutByIdFunction(const PutPropertySlot &slot, PutKind putKind)
{
if (slot.isStrictMode()) {
if (putKind == Direct)
- return operationPutByIdDirectStrictOptimize;
- return operationPutByIdStrictOptimize;
+ return operationPutByIdDirectStrictBuildList;
+ return operationPutByIdStrictBuildList;
}
if (putKind == Direct)
- return operationPutByIdDirectNonStrictOptimize;
- return operationPutByIdNonStrictOptimize;
+ return operationPutByIdDirectNonStrictBuildList;
+ return operationPutByIdNonStrictBuildList;
}
-static InlineCacheAction tryCachePutByID(ExecState* exec, JSValue baseValue, Structure* structure, const Identifier& ident, const PutPropertySlot& slot, StructureStubInfo& stubInfo, PutKind putKind)
+#if ENABLE(GGC)
+static MacroAssembler::Call storeToWriteBarrierBuffer(CCallHelpers& jit, GPRReg cell, GPRReg scratch1, GPRReg scratch2, GPRReg callFrameRegister, ScratchRegisterAllocator& allocator)
{
- if (forceICFailure(exec))
- return GiveUpOnCache;
-
- CodeBlock* codeBlock = exec->codeBlock();
- VM& vm = exec->vm();
+ ASSERT(scratch1 != scratch2);
+ WriteBarrierBuffer* writeBarrierBuffer = &jit.vm()->heap.writeBarrierBuffer();
+ jit.move(MacroAssembler::TrustedImmPtr(writeBarrierBuffer), scratch1);
+ jit.load32(MacroAssembler::Address(scratch1, WriteBarrierBuffer::currentIndexOffset()), scratch2);
+ MacroAssembler::Jump needToFlush = jit.branch32(MacroAssembler::AboveOrEqual, scratch2, MacroAssembler::Address(scratch1, WriteBarrierBuffer::capacityOffset()));
+
+ jit.add32(MacroAssembler::TrustedImm32(1), scratch2);
+ jit.store32(scratch2, MacroAssembler::Address(scratch1, WriteBarrierBuffer::currentIndexOffset()));
+
+ jit.loadPtr(MacroAssembler::Address(scratch1, WriteBarrierBuffer::bufferOffset()), scratch1);
+ // We use an offset of -sizeof(void*) because we already added 1 to scratch2.
+ jit.storePtr(cell, MacroAssembler::BaseIndex(scratch1, scratch2, MacroAssembler::ScalePtr, static_cast<int32_t>(-sizeof(void*))));
+
+ MacroAssembler::Jump done = jit.jump();
+ needToFlush.link(&jit);
+
+ ScratchBuffer* scratchBuffer = jit.vm()->scratchBufferForSize(allocator.desiredScratchBufferSize());
+ allocator.preserveUsedRegistersToScratchBuffer(jit, scratchBuffer, scratch1);
+
+ unsigned bytesFromBase = allocator.numberOfReusedRegisters() * sizeof(void*);
+ unsigned bytesToSubtract = 0;
+#if CPU(X86)
+ bytesToSubtract += 2 * sizeof(void*);
+ bytesFromBase += bytesToSubtract;
+#endif
+ unsigned currentAlignment = bytesFromBase % stackAlignmentBytes();
+ bytesToSubtract += currentAlignment;
- if (!baseValue.isCell())
- return GiveUpOnCache;
+ if (bytesToSubtract)
+ jit.subPtr(MacroAssembler::TrustedImm32(bytesToSubtract), MacroAssembler::stackPointerRegister);
+
+ jit.setupArguments(callFrameRegister, cell);
+ MacroAssembler::Call call = jit.call();
+
+ if (bytesToSubtract)
+ jit.addPtr(MacroAssembler::TrustedImm32(bytesToSubtract), MacroAssembler::stackPointerRegister);
+ allocator.restoreUsedRegistersFromScratchBuffer(jit, scratchBuffer, scratch1);
+
+ done.link(&jit);
+
+ return call;
+}
+
+static MacroAssembler::Call writeBarrier(CCallHelpers& jit, GPRReg owner, GPRReg scratch1, GPRReg scratch2, GPRReg callFrameRegister, ScratchRegisterAllocator& allocator)
+{
+ ASSERT(owner != scratch1);
+ ASSERT(owner != scratch2);
+
+ MacroAssembler::Jump definitelyNotMarked = DFG::SpeculativeJIT::genericWriteBarrier(jit, owner, scratch1, scratch2);
+ MacroAssembler::Call call = storeToWriteBarrierBuffer(jit, owner, scratch1, scratch2, callFrameRegister, allocator);
+ definitelyNotMarked.link(&jit);
+ return call;
+}
+#endif // ENABLE(GGC)
+
+static void emitPutReplaceStub(
+ ExecState* exec,
+ JSValue,
+ const Identifier&,
+ const PutPropertySlot& slot,
+ StructureStubInfo& stubInfo,
+ PutKind,
+ Structure* structure,
+ CodeLocationLabel failureLabel,
+ RefPtr<JITStubRoutine>& stubRoutine)
+{
+ VM* vm = &exec->vm();
+#if ENABLE(GGC)
+ GPRReg callFrameRegister = static_cast<GPRReg>(stubInfo.patch.callFrameRegister);
+#endif
+ GPRReg baseGPR = static_cast<GPRReg>(stubInfo.patch.baseGPR);
+#if USE(JSVALUE32_64)
+ GPRReg valueTagGPR = static_cast<GPRReg>(stubInfo.patch.valueTagGPR);
+#endif
+ GPRReg valueGPR = static_cast<GPRReg>(stubInfo.patch.valueGPR);
+
+ ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
+ allocator.lock(baseGPR);
+#if USE(JSVALUE32_64)
+ allocator.lock(valueTagGPR);
+#endif
+ allocator.lock(valueGPR);
- if (!slot.isCacheablePut() && !slot.isCacheableCustom() && !slot.isCacheableSetter())
- return GiveUpOnCache;
+ GPRReg scratchGPR1 = allocator.allocateScratchGPR();
+#if ENABLE(GGC)
+ GPRReg scratchGPR2 = allocator.allocateScratchGPR();
+#endif
- if (!structure->propertyAccessesAreCacheable())
- return GiveUpOnCache;
+ CCallHelpers stubJit(vm, exec->codeBlock());
+
+ allocator.preserveReusedRegistersByPushing(stubJit);
- std::unique_ptr<AccessCase> newCase;
+ MacroAssembler::Jump badStructure = stubJit.branchPtr(
+ MacroAssembler::NotEqual,
+ MacroAssembler::Address(baseGPR, JSCell::structureOffset()),
+ MacroAssembler::TrustedImmPtr(structure));
- if (slot.base() == baseValue && slot.isCacheablePut()) {
- if (slot.type() == PutPropertySlot::ExistingProperty) {
- structure->didCachePropertyReplacement(vm, slot.cachedOffset());
+#if USE(JSVALUE64)
+ if (isInlineOffset(slot.cachedOffset()))
+ stubJit.store64(valueGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue)));
+ else {
+ stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR1);
+ stubJit.store64(valueGPR, MacroAssembler::Address(scratchGPR1, offsetInButterfly(slot.cachedOffset()) * sizeof(JSValue)));
+ }
+#elif USE(JSVALUE32_64)
+ if (isInlineOffset(slot.cachedOffset())) {
+ stubJit.store32(valueGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
+ stubJit.store32(valueTagGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
+ } else {
+ stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR1);
+ stubJit.store32(valueGPR, MacroAssembler::Address(scratchGPR1, offsetInButterfly(slot.cachedOffset()) * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
+ stubJit.store32(valueTagGPR, MacroAssembler::Address(scratchGPR1, offsetInButterfly(slot.cachedOffset()) * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
+ }
+#endif
+
+#if ENABLE(GGC)
+ MacroAssembler::Call writeBarrierOperation = writeBarrier(stubJit, baseGPR, scratchGPR1, scratchGPR2, callFrameRegister, allocator);
+#endif
+
+ MacroAssembler::Jump success;
+ MacroAssembler::Jump failure;
+
+ if (allocator.didReuseRegisters()) {
+ allocator.restoreReusedRegistersByPopping(stubJit);
+ success = stubJit.jump();
- if (stubInfo.cacheType == CacheType::Unset
- && isInlineOffset(slot.cachedOffset())
- && MacroAssembler::isPtrAlignedAddressOffset(maxOffsetRelativeToBase(slot.cachedOffset()))
- && !structure->needImpurePropertyWatchpoint()
- && !structure->inferredTypeFor(ident.impl())) {
+ badStructure.link(&stubJit);
+ allocator.restoreReusedRegistersByPopping(stubJit);
+ failure = stubJit.jump();
+ } else {
+ success = stubJit.jump();
+ failure = badStructure;
+ }
+
+ LinkBuffer patchBuffer(*vm, &stubJit, exec->codeBlock());
+#if ENABLE(GGC)
+ patchBuffer.link(writeBarrierOperation, operationFlushWriteBarrierBuffer);
+#endif
+ patchBuffer.link(success, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone));
+ patchBuffer.link(failure, failureLabel);
+
+ stubRoutine = FINALIZE_CODE_FOR_DFG_STUB(
+ patchBuffer,
+ ("DFG PutById replace stub for %s, return point %p",
+ toCString(*exec->codeBlock()).data(), stubInfo.callReturnLocation.labelAtOffset(
+ stubInfo.patch.deltaCallToDone).executableAddress()));
+}
- repatchByIdSelfAccess(
- codeBlock, stubInfo, structure, slot.cachedOffset(),
- appropriateOptimizingPutByIdFunction(slot, putKind), false);
- stubInfo.initPutByIdReplace(codeBlock, structure, slot.cachedOffset());
- return RetryCacheLater;
- }
+static void emitPutTransitionStub(
+ ExecState* exec,
+ JSValue,
+ const Identifier&,
+ const PutPropertySlot& slot,
+ StructureStubInfo& stubInfo,
+ PutKind putKind,
+ Structure* structure,
+ Structure* oldStructure,
+ StructureChain* prototypeChain,
+ CodeLocationLabel failureLabel,
+ RefPtr<JITStubRoutine>& stubRoutine)
+{
+ VM* vm = &exec->vm();
- newCase = AccessCase::replace(vm, codeBlock, structure, slot.cachedOffset());
- } else {
- ASSERT(slot.type() == PutPropertySlot::NewProperty);
+ GPRReg callFrameRegister = static_cast<GPRReg>(stubInfo.patch.callFrameRegister);
+ GPRReg baseGPR = static_cast<GPRReg>(stubInfo.patch.baseGPR);
+#if USE(JSVALUE32_64)
+ GPRReg valueTagGPR = static_cast<GPRReg>(stubInfo.patch.valueTagGPR);
+#endif
+ GPRReg valueGPR = static_cast<GPRReg>(stubInfo.patch.valueGPR);
+
+ ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
+ allocator.lock(baseGPR);
+#if USE(JSVALUE32_64)
+ allocator.lock(valueTagGPR);
+#endif
+ allocator.lock(valueGPR);
+
+ CCallHelpers stubJit(vm);
+
+ bool needThirdScratch = false;
+ if (structure->outOfLineCapacity() != oldStructure->outOfLineCapacity()
+ && oldStructure->outOfLineCapacity()) {
+ needThirdScratch = true;
+ }
- if (!structure->isObject() || structure->isDictionary())
- return GiveUpOnCache;
+ GPRReg scratchGPR1 = allocator.allocateScratchGPR();
+ ASSERT(scratchGPR1 != baseGPR);
+ ASSERT(scratchGPR1 != valueGPR);
+
+ GPRReg scratchGPR2 = allocator.allocateScratchGPR();
+ ASSERT(scratchGPR2 != baseGPR);
+ ASSERT(scratchGPR2 != valueGPR);
+ ASSERT(scratchGPR2 != scratchGPR1);
+
+ GPRReg scratchGPR3;
+ if (needThirdScratch) {
+ scratchGPR3 = allocator.allocateScratchGPR();
+ ASSERT(scratchGPR3 != baseGPR);
+ ASSERT(scratchGPR3 != valueGPR);
+ ASSERT(scratchGPR3 != scratchGPR1);
+ ASSERT(scratchGPR3 != scratchGPR2);
+ } else
+ scratchGPR3 = InvalidGPRReg;
+
+ allocator.preserveReusedRegistersByPushing(stubJit);
- PropertyOffset offset;
- Structure* newStructure =
- Structure::addPropertyTransitionToExistingStructureConcurrently(
- structure, ident.impl(), 0, offset);
- if (!newStructure || !newStructure->propertyAccessesAreCacheable())
- return GiveUpOnCache;
+ MacroAssembler::JumpList failureCases;
+
+ ASSERT(oldStructure->transitionWatchpointSetHasBeenInvalidated());
+
+ failureCases.append(stubJit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::structureOffset()), MacroAssembler::TrustedImmPtr(oldStructure)));
+
+ addStructureTransitionCheck(
+ oldStructure->storedPrototype(), exec->codeBlock(), stubInfo, stubJit, failureCases,
+ scratchGPR1);
+
+ if (putKind == NotDirect) {
+ for (WriteBarrier<Structure>* it = prototypeChain->head(); *it; ++it) {
+ addStructureTransitionCheck(
+ (*it)->storedPrototype(), exec->codeBlock(), stubInfo, stubJit, failureCases,
+ scratchGPR1);
+ }
+ }
- ASSERT(newStructure->previousID() == structure);
- ASSERT(!newStructure->isDictionary());
- ASSERT(newStructure->isObject());
+ MacroAssembler::JumpList slowPath;
+
+ bool scratchGPR1HasStorage = false;
+
+ if (structure->outOfLineCapacity() != oldStructure->outOfLineCapacity()) {
+ size_t newSize = structure->outOfLineCapacity() * sizeof(JSValue);
+ CopiedAllocator* copiedAllocator = &vm->heap.storageAllocator();
+
+ if (!oldStructure->outOfLineCapacity()) {
+ stubJit.loadPtr(&copiedAllocator->m_currentRemaining, scratchGPR1);
+ slowPath.append(stubJit.branchSubPtr(MacroAssembler::Signed, MacroAssembler::TrustedImm32(newSize), scratchGPR1));
+ stubJit.storePtr(scratchGPR1, &copiedAllocator->m_currentRemaining);
+ stubJit.negPtr(scratchGPR1);
+ stubJit.addPtr(MacroAssembler::AbsoluteAddress(&copiedAllocator->m_currentPayloadEnd), scratchGPR1);
+ stubJit.addPtr(MacroAssembler::TrustedImm32(sizeof(JSValue)), scratchGPR1);
+ } else {
+ size_t oldSize = oldStructure->outOfLineCapacity() * sizeof(JSValue);
+ ASSERT(newSize > oldSize);
- ObjectPropertyConditionSet conditionSet;
- if (putKind == NotDirect) {
- conditionSet =
- generateConditionsForPropertySetterMiss(
- vm, codeBlock, exec, newStructure, ident.impl());
- if (!conditionSet.isValid())
- return GiveUpOnCache;
+ stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR3);
+ stubJit.loadPtr(&copiedAllocator->m_currentRemaining, scratchGPR1);
+ slowPath.append(stubJit.branchSubPtr(MacroAssembler::Signed, MacroAssembler::TrustedImm32(newSize), scratchGPR1));
+ stubJit.storePtr(scratchGPR1, &copiedAllocator->m_currentRemaining);
+ stubJit.negPtr(scratchGPR1);
+ stubJit.addPtr(MacroAssembler::AbsoluteAddress(&copiedAllocator->m_currentPayloadEnd), scratchGPR1);
+ stubJit.addPtr(MacroAssembler::TrustedImm32(sizeof(JSValue)), scratchGPR1);
+ // We have scratchGPR1 = new storage, scratchGPR3 = old storage, scratchGPR2 = available
+ for (size_t offset = 0; offset < oldSize; offset += sizeof(void*)) {
+ stubJit.loadPtr(MacroAssembler::Address(scratchGPR3, -static_cast<ptrdiff_t>(offset + sizeof(JSValue) + sizeof(void*))), scratchGPR2);
+ stubJit.storePtr(scratchGPR2, MacroAssembler::Address(scratchGPR1, -static_cast<ptrdiff_t>(offset + sizeof(JSValue) + sizeof(void*))));
}
-
- newCase = AccessCase::transition(vm, codeBlock, structure, newStructure, offset, conditionSet);
}
- } else if (slot.isCacheableCustom() || slot.isCacheableSetter()) {
- if (slot.isCacheableCustom()) {
- ObjectPropertyConditionSet conditionSet;
-
- if (slot.base() != baseValue) {
- conditionSet =
- generateConditionsForPrototypePropertyHitCustom(
- vm, codeBlock, exec, structure, slot.base(), ident.impl());
- if (!conditionSet.isValid())
- return GiveUpOnCache;
- }
+
+ stubJit.storePtr(scratchGPR1, MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()));
+ scratchGPR1HasStorage = true;
+ }
- newCase = AccessCase::setter(
- vm, codeBlock, slot.isCustomAccessor() ? AccessCase::CustomAccessorSetter : AccessCase::CustomValueSetter, structure, invalidOffset, conditionSet,
- slot.customSetter(), slot.base());
- } else {
- ObjectPropertyConditionSet conditionSet;
- PropertyOffset offset;
-
- if (slot.base() != baseValue) {
- conditionSet =
- generateConditionsForPrototypePropertyHit(
- vm, codeBlock, exec, structure, slot.base(), ident.impl());
- if (!conditionSet.isValid())
- return GiveUpOnCache;
- offset = conditionSet.slotBaseCondition().offset();
- } else
- offset = slot.cachedOffset();
-
- newCase = AccessCase::setter(
- vm, codeBlock, AccessCase::Setter, structure, offset, conditionSet);
+ stubJit.storePtr(MacroAssembler::TrustedImmPtr(structure), MacroAssembler::Address(baseGPR, JSCell::structureOffset()));
+#if USE(JSVALUE64)
+ if (isInlineOffset(slot.cachedOffset()))
+ stubJit.store64(valueGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue)));
+ else {
+ if (!scratchGPR1HasStorage)
+ stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR1);
+ stubJit.store64(valueGPR, MacroAssembler::Address(scratchGPR1, offsetInButterfly(slot.cachedOffset()) * sizeof(JSValue)));
+ }
+#elif USE(JSVALUE32_64)
+ if (isInlineOffset(slot.cachedOffset())) {
+ stubJit.store32(valueGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
+ stubJit.store32(valueTagGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
+ } else {
+ if (!scratchGPR1HasStorage)
+ stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR1);
+ stubJit.store32(valueGPR, MacroAssembler::Address(scratchGPR1, offsetInButterfly(slot.cachedOffset()) * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
+ stubJit.store32(valueTagGPR, MacroAssembler::Address(scratchGPR1, offsetInButterfly(slot.cachedOffset()) * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
+ }
+#endif
+
+#if ENABLE(GGC)
+ MacroAssembler::Call writeBarrierOperation = writeBarrier(stubJit, baseGPR, scratchGPR1, scratchGPR2, callFrameRegister, allocator);
+#endif
+
+ MacroAssembler::Jump success;
+ MacroAssembler::Jump failure;
+
+ if (allocator.didReuseRegisters()) {
+ allocator.restoreReusedRegistersByPopping(stubJit);
+ success = stubJit.jump();
+
+ failureCases.link(&stubJit);
+ allocator.restoreReusedRegistersByPopping(stubJit);
+ failure = stubJit.jump();
+ } else
+ success = stubJit.jump();
+
+ MacroAssembler::Call operationCall;
+ MacroAssembler::Jump successInSlowPath;
+
+ if (structure->outOfLineCapacity() != oldStructure->outOfLineCapacity()) {
+ slowPath.link(&stubJit);
+
+ allocator.restoreReusedRegistersByPopping(stubJit);
+ ScratchBuffer* scratchBuffer = vm->scratchBufferForSize(allocator.desiredScratchBufferSize());
+ allocator.preserveUsedRegistersToScratchBuffer(stubJit, scratchBuffer, scratchGPR1);
+#if USE(JSVALUE64)
+ stubJit.setupArguments(callFrameRegister, baseGPR, MacroAssembler::TrustedImmPtr(structure), MacroAssembler::TrustedImm32(slot.cachedOffset()), valueGPR);
+#else
+ stubJit.setupArguments(callFrameRegister, baseGPR, MacroAssembler::TrustedImmPtr(structure), MacroAssembler::TrustedImm32(slot.cachedOffset()), valueGPR, valueTagGPR);
+#endif
+ operationCall = stubJit.call();
+ allocator.restoreUsedRegistersFromScratchBuffer(stubJit, scratchBuffer, scratchGPR1);
+ successInSlowPath = stubJit.jump();
+ }
+
+ LinkBuffer patchBuffer(*vm, &stubJit, exec->codeBlock());
+#if ENABLE(GGC)
+ patchBuffer.link(writeBarrierOperation, operationFlushWriteBarrierBuffer);
+#endif
+ patchBuffer.link(success, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone));
+ if (allocator.didReuseRegisters())
+ patchBuffer.link(failure, failureLabel);
+ else
+ patchBuffer.link(failureCases, failureLabel);
+ if (structure->outOfLineCapacity() != oldStructure->outOfLineCapacity()) {
+ patchBuffer.link(operationCall, operationReallocateStorageAndFinishPut);
+ patchBuffer.link(successInSlowPath, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone));
+ }
+
+ stubRoutine =
+ createJITStubRoutine(
+ FINALIZE_DFG_CODE(
+ patchBuffer,
+ ("DFG PutById %stransition stub (%p -> %p) for %s, return point %p",
+ structure->outOfLineCapacity() != oldStructure->outOfLineCapacity() ? "reallocating " : "",
+ oldStructure, structure,
+ toCString(*exec->codeBlock()).data(), stubInfo.callReturnLocation.labelAtOffset(
+ stubInfo.patch.deltaCallToDone).executableAddress())),
+ *vm,
+ exec->codeBlock()->ownerExecutable(),
+ structure->outOfLineCapacity() != oldStructure->outOfLineCapacity(),
+ structure);
+}
+
+static bool tryCachePutByID(ExecState* exec, JSValue baseValue, const Identifier& ident, const PutPropertySlot& slot, StructureStubInfo& stubInfo, PutKind putKind)
+{
+ CodeBlock* codeBlock = exec->codeBlock();
+ VM* vm = &exec->vm();
+
+ if (!baseValue.isCell())
+ return false;
+ JSCell* baseCell = baseValue.asCell();
+ Structure* structure = baseCell->structure();
+ Structure* oldStructure = structure->previousID();
+
+ if (!slot.isCacheable())
+ return false;
+ if (!structure->propertyAccessesAreCacheable())
+ return false;
+
+ // Optimize self access.
+ if (slot.base() == baseValue) {
+ if (slot.type() == PutPropertySlot::NewProperty) {
+ if (structure->isDictionary())
+ return false;
+
+ // Skip optimizing the case where we need a realloc, if we don't have
+ // enough registers to make it happen.
+ if (GPRInfo::numberOfRegisters < 6
+ && oldStructure->outOfLineCapacity() != structure->outOfLineCapacity()
+ && oldStructure->outOfLineCapacity())
+ return false;
+
+ // Skip optimizing the case where we need realloc, and the structure has
+ // indexing storage.
+ if (oldStructure->couldHaveIndexingHeader())
+ return false;
+
+ if (normalizePrototypeChain(exec, baseCell) == InvalidPrototypeChain)
+ return false;
+
+ StructureChain* prototypeChain = structure->prototypeChain(exec);
+
+ emitPutTransitionStub(
+ exec, baseValue, ident, slot, stubInfo, putKind,
+ structure, oldStructure, prototypeChain,
+ stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase),
+ stubInfo.stubRoutine);
+
+ RepatchBuffer repatchBuffer(codeBlock);
+ repatchBuffer.relink(
+ stubInfo.callReturnLocation.jumpAtOffset(
+ stubInfo.patch.deltaCallToJump),
+ CodeLocationLabel(stubInfo.stubRoutine->code().code()));
+ repatchCall(repatchBuffer, stubInfo.callReturnLocation, appropriateListBuildingPutByIdFunction(slot, putKind));
+
+ stubInfo.initPutByIdTransition(*vm, codeBlock->ownerExecutable(), oldStructure, structure, prototypeChain, putKind == Direct);
+
+ return true;
}
+
+ if (!MacroAssembler::isPtrAlignedAddressOffset(offsetRelativeToPatchedStorage(slot.cachedOffset())))
+ return false;
+
+ repatchByIdSelfAccess(*vm, codeBlock, stubInfo, structure, ident, slot.cachedOffset(), appropriateListBuildingPutByIdFunction(slot, putKind), false);
+ stubInfo.initPutByIdReplace(*vm, codeBlock->ownerExecutable(), structure);
+ return true;
}
- MacroAssemblerCodePtr codePtr = stubInfo.addAccessCase(codeBlock, ident, WTFMove(newCase));
+ return false;
+}
+
+void repatchPutByID(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PutPropertySlot& slot, StructureStubInfo& stubInfo, PutKind putKind)
+{
+ GCSafeConcurrentJITLocker locker(exec->codeBlock()->m_lock, exec->vm().heap);
- if (!codePtr)
- return GiveUpOnCache;
+ bool cached = tryCachePutByID(exec, baseValue, propertyName, slot, stubInfo, putKind);
+ if (!cached)
+ repatchCall(exec->codeBlock(), stubInfo.callReturnLocation, appropriateGenericPutByIdFunction(slot, putKind));
+}
- resetPutByIDCheckAndLoad(stubInfo);
- MacroAssembler::repatchJump(
- stubInfo.callReturnLocation.jumpAtOffset(
- stubInfo.patch.deltaCallToJump),
- CodeLocationLabel(codePtr));
+static bool tryBuildPutByIdList(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PutPropertySlot& slot, StructureStubInfo& stubInfo, PutKind putKind)
+{
+ CodeBlock* codeBlock = exec->codeBlock();
+ VM* vm = &exec->vm();
+
+ if (!baseValue.isCell())
+ return false;
+ JSCell* baseCell = baseValue.asCell();
+ Structure* structure = baseCell->structure();
+ Structure* oldStructure = structure->previousID();
+
+ if (!slot.isCacheable())
+ return false;
+ if (!structure->propertyAccessesAreCacheable())
+ return false;
+
+ // Optimize self access.
+ if (slot.base() == baseValue) {
+ PolymorphicPutByIdList* list;
+ RefPtr<JITStubRoutine> stubRoutine;
+
+ if (slot.type() == PutPropertySlot::NewProperty) {
+ if (structure->isDictionary())
+ return false;
+
+ // Skip optimizing the case where we need a realloc, if we don't have
+ // enough registers to make it happen.
+ if (GPRInfo::numberOfRegisters < 6
+ && oldStructure->outOfLineCapacity() != structure->outOfLineCapacity()
+ && oldStructure->outOfLineCapacity())
+ return false;
+
+ // Skip optimizing the case where we need realloc, and the structure has
+ // indexing storage.
+ if (oldStructure->couldHaveIndexingHeader())
+ return false;
+
+ if (normalizePrototypeChain(exec, baseCell) == InvalidPrototypeChain)
+ return false;
+
+ StructureChain* prototypeChain = structure->prototypeChain(exec);
+
+ // We're now committed to creating the stub. Mogrify the meta-data accordingly.
+ list = PolymorphicPutByIdList::from(
+ putKind, stubInfo,
+ stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase));
+
+ emitPutTransitionStub(
+ exec, baseValue, propertyName, slot, stubInfo, putKind,
+ structure, oldStructure, prototypeChain,
+ CodeLocationLabel(list->currentSlowPathTarget()),
+ stubRoutine);
+
+ list->addAccess(
+ PutByIdAccess::transition(
+ *vm, codeBlock->ownerExecutable(),
+ oldStructure, structure, prototypeChain,
+ stubRoutine));
+ } else {
+ // We're now committed to creating the stub. Mogrify the meta-data accordingly.
+ list = PolymorphicPutByIdList::from(
+ putKind, stubInfo,
+ stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase));
+
+ emitPutReplaceStub(
+ exec, baseValue, propertyName, slot, stubInfo, putKind,
+ structure, CodeLocationLabel(list->currentSlowPathTarget()), stubRoutine);
+
+ list->addAccess(
+ PutByIdAccess::replace(
+ *vm, codeBlock->ownerExecutable(),
+ structure, stubRoutine));
+ }
+
+ RepatchBuffer repatchBuffer(codeBlock);
+ repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.deltaCallToJump), CodeLocationLabel(stubRoutine->code().code()));
+
+ if (list->isFull())
+ repatchCall(repatchBuffer, stubInfo.callReturnLocation, appropriateGenericPutByIdFunction(slot, putKind));
+
+ return true;
+ }
- return RetryCacheLater;
+ return false;
}
-void repatchPutByID(ExecState* exec, JSValue baseValue, Structure* structure, const Identifier& propertyName, const PutPropertySlot& slot, StructureStubInfo& stubInfo, PutKind putKind)
+void buildPutByIdList(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PutPropertySlot& slot, StructureStubInfo& stubInfo, PutKind putKind)
{
GCSafeConcurrentJITLocker locker(exec->codeBlock()->m_lock, exec->vm().heap);
- if (tryCachePutByID(exec, baseValue, structure, propertyName, slot, stubInfo, putKind) == GiveUpOnCache)
+ bool cached = tryBuildPutByIdList(exec, baseValue, propertyName, slot, stubInfo, putKind);
+ if (!cached)
repatchCall(exec->codeBlock(), stubInfo.callReturnLocation, appropriateGenericPutByIdFunction(slot, putKind));
}
-static InlineCacheAction tryRepatchIn(
+static bool tryRepatchIn(
ExecState* exec, JSCell* base, const Identifier& ident, bool wasFound,
const PropertySlot& slot, StructureStubInfo& stubInfo)
{
- if (forceICFailure(exec))
- return GiveUpOnCache;
-
- if (!base->structure()->propertyAccessesAreCacheable() || (!wasFound && !base->structure()->propertyAccessesAreCacheableForAbsence()))
- return GiveUpOnCache;
+ if (!base->structure()->propertyAccessesAreCacheable())
+ return false;
if (wasFound) {
if (!slot.isCacheable())
- return GiveUpOnCache;
+ return false;
}
CodeBlock* codeBlock = exec->codeBlock();
- VM& vm = exec->vm();
- Structure* structure = base->structure(vm);
+ VM* vm = &exec->vm();
+ Structure* structure = base->structure();
- ObjectPropertyConditionSet conditionSet;
- if (wasFound) {
- if (slot.slotBase() != base) {
- conditionSet = generateConditionsForPrototypePropertyHit(
- vm, codeBlock, exec, structure, slot.slotBase(), ident.impl());
- }
+ PropertyOffset offsetIgnored;
+ size_t count = normalizePrototypeChainForChainAccess(exec, base, wasFound ? slot.slotBase() : JSValue(), ident, offsetIgnored);
+ if (count == InvalidPrototypeChain)
+ return false;
+
+ PolymorphicAccessStructureList* polymorphicStructureList;
+ int listIndex;
+
+ CodeLocationLabel successLabel = stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone);
+ CodeLocationLabel slowCaseLabel;
+
+ if (stubInfo.accessType == access_unset) {
+ polymorphicStructureList = new PolymorphicAccessStructureList();
+ stubInfo.initInList(polymorphicStructureList, 0);
+ slowCaseLabel = stubInfo.callReturnLocation.labelAtOffset(
+ stubInfo.patch.deltaCallToSlowCase);
+ listIndex = 0;
} else {
- conditionSet = generateConditionsForPropertyMiss(
- vm, codeBlock, exec, structure, ident.impl());
+ RELEASE_ASSERT(stubInfo.accessType == access_in_list);
+ polymorphicStructureList = stubInfo.u.inList.structureList;
+ listIndex = stubInfo.u.inList.listSize;
+ slowCaseLabel = CodeLocationLabel(polymorphicStructureList->list[listIndex - 1].stubRoutine->code().code());
+
+ if (listIndex == POLYMORPHIC_LIST_CACHE_SIZE)
+ return false;
}
- if (!conditionSet.isValid())
- return GiveUpOnCache;
-
- std::unique_ptr<AccessCase> newCase = AccessCase::in(
- vm, codeBlock, wasFound ? AccessCase::InHit : AccessCase::InMiss, structure, conditionSet);
-
- MacroAssemblerCodePtr codePtr = stubInfo.addAccessCase(codeBlock, ident, WTFMove(newCase));
- if (!codePtr)
- return GiveUpOnCache;
+
+ StructureChain* chain = structure->prototypeChain(exec);
+ RefPtr<JITStubRoutine> stubRoutine;
+
+ {
+ GPRReg baseGPR = static_cast<GPRReg>(stubInfo.patch.baseGPR);
+ GPRReg resultGPR = static_cast<GPRReg>(stubInfo.patch.valueGPR);
+ GPRReg scratchGPR = TempRegisterSet(stubInfo.patch.usedRegisters).getFreeGPR();
+
+ CCallHelpers stubJit(vm);
+
+ bool needToRestoreScratch;
+ if (scratchGPR == InvalidGPRReg) {
+ scratchGPR = AssemblyHelpers::selectScratchGPR(baseGPR, resultGPR);
+ stubJit.pushToSave(scratchGPR);
+ needToRestoreScratch = true;
+ } else
+ needToRestoreScratch = false;
+
+ MacroAssembler::JumpList failureCases;
+ failureCases.append(stubJit.branchPtr(
+ MacroAssembler::NotEqual,
+ MacroAssembler::Address(baseGPR, JSCell::structureOffset()),
+ MacroAssembler::TrustedImmPtr(structure)));
+
+ CodeBlock* codeBlock = exec->codeBlock();
+ if (structure->typeInfo().newImpurePropertyFiresWatchpoints())
+ vm->registerWatchpointForImpureProperty(ident, stubInfo.addWatchpoint(codeBlock));
+
+ Structure* currStructure = structure;
+ WriteBarrier<Structure>* it = chain->head();
+ for (unsigned i = 0; i < count; ++i, ++it) {
+ JSObject* prototype = asObject(currStructure->prototypeForLookup(exec));
+ Structure* protoStructure = prototype->structure();
+ addStructureTransitionCheck(
+ prototype, protoStructure, exec->codeBlock(), stubInfo, stubJit,
+ failureCases, scratchGPR);
+ if (protoStructure->typeInfo().newImpurePropertyFiresWatchpoints())
+ vm->registerWatchpointForImpureProperty(ident, stubInfo.addWatchpoint(codeBlock));
+ currStructure = it->get();
+ }
+
+#if USE(JSVALUE64)
+ stubJit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(wasFound))), resultGPR);
+#else
+ stubJit.move(MacroAssembler::TrustedImm32(wasFound), resultGPR);
+#endif
+
+ MacroAssembler::Jump success, fail;
+
+ emitRestoreScratch(stubJit, needToRestoreScratch, scratchGPR, success, fail, failureCases);
+
+ LinkBuffer patchBuffer(*vm, &stubJit, exec->codeBlock());
- MacroAssembler::repatchJump(
- stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.deltaCallToJump),
- CodeLocationLabel(codePtr));
+ linkRestoreScratch(patchBuffer, needToRestoreScratch, success, fail, failureCases, successLabel, slowCaseLabel);
+
+ stubRoutine = FINALIZE_CODE_FOR_DFG_STUB(
+ patchBuffer,
+ ("DFG In (found = %s) stub for %s, return point %p",
+ wasFound ? "yes" : "no", toCString(*exec->codeBlock()).data(),
+ successLabel.executableAddress()));
+ }
+
+ polymorphicStructureList->list[listIndex].set(*vm, codeBlock->ownerExecutable(), stubRoutine, structure, true);
+ stubInfo.u.inList.listSize++;
- return RetryCacheLater;
+ RepatchBuffer repatchBuffer(codeBlock);
+ repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.deltaCallToJump), CodeLocationLabel(stubRoutine->code().code()));
+
+ return listIndex < (POLYMORPHIC_LIST_CACHE_SIZE - 1);
}
void repatchIn(
ExecState* exec, JSCell* base, const Identifier& ident, bool wasFound,
const PropertySlot& slot, StructureStubInfo& stubInfo)
{
- if (tryRepatchIn(exec, base, ident, wasFound, slot, stubInfo) == GiveUpOnCache)
- repatchCall(exec->codeBlock(), stubInfo.callReturnLocation, operationIn);
-}
-
-static void linkSlowFor(VM*, CallLinkInfo& callLinkInfo, MacroAssemblerCodeRef codeRef)
-{
- MacroAssembler::repatchNearCall(callLinkInfo.callReturnLocation(), CodeLocationLabel(codeRef.code()));
-}
-
-static void linkSlowFor(VM* vm, CallLinkInfo& callLinkInfo, ThunkGenerator generator)
-{
- linkSlowFor(vm, callLinkInfo, vm->getCTIStub(generator));
+ if (tryRepatchIn(exec, base, ident, wasFound, slot, stubInfo))
+ return;
+ repatchCall(exec->codeBlock(), stubInfo.callReturnLocation, operationIn);
}
-static void linkSlowFor(VM* vm, CallLinkInfo& callLinkInfo)
+static void linkSlowFor(RepatchBuffer& repatchBuffer, VM* vm, CallLinkInfo& callLinkInfo, CodeSpecializationKind kind)
{
- MacroAssemblerCodeRef virtualThunk = virtualThunkFor(vm, callLinkInfo);
- linkSlowFor(vm, callLinkInfo, virtualThunk);
- callLinkInfo.setSlowStub(createJITStubRoutine(virtualThunk, *vm, nullptr, true));
+ if (kind == CodeForCall) {
+ repatchBuffer.relink(callLinkInfo.callReturnLocation, vm->getCTIStub(virtualCallThunkGenerator).code());
+ return;
+ }
+ ASSERT(kind == CodeForConstruct);
+ repatchBuffer.relink(callLinkInfo.callReturnLocation, vm->getCTIStub(virtualConstructThunkGenerator).code());
}
-void linkFor(
- ExecState* exec, CallLinkInfo& callLinkInfo, CodeBlock* calleeCodeBlock,
- JSFunction* callee, MacroAssemblerCodePtr codePtr)
+void linkFor(ExecState* exec, CallLinkInfo& callLinkInfo, CodeBlock* calleeCodeBlock, JSFunction* callee, MacroAssemblerCodePtr codePtr, CodeSpecializationKind kind)
{
- ASSERT(!callLinkInfo.stub());
+ ASSERT(!callLinkInfo.stub);
+
+ // If you're being call-linked from a DFG caller then you obviously didn't get inlined.
+ if (calleeCodeBlock)
+ calleeCodeBlock->m_shouldAlwaysBeInlined = false;
CodeBlock* callerCodeBlock = exec->callerFrame()->codeBlock();
-
VM* vm = callerCodeBlock->vm();
+ RepatchBuffer repatchBuffer(callerCodeBlock);
+
ASSERT(!callLinkInfo.isLinked());
- callLinkInfo.setCallee(exec->callerFrame()->vm(), callLinkInfo.hotPathBegin(), callerCodeBlock, callee);
- callLinkInfo.setLastSeenCallee(exec->callerFrame()->vm(), callerCodeBlock, callee);
- if (shouldDumpDisassemblyFor(callerCodeBlock))
- dataLog("Linking call in ", *callerCodeBlock, " at ", callLinkInfo.codeOrigin(), " to ", pointerDump(calleeCodeBlock), ", entrypoint at ", codePtr, "\n");
- MacroAssembler::repatchNearCall(callLinkInfo.hotPathOther(), CodeLocationLabel(codePtr));
+ callLinkInfo.callee.set(exec->callerFrame()->vm(), callLinkInfo.hotPathBegin, callerCodeBlock->ownerExecutable(), callee);
+ callLinkInfo.lastSeenCallee.set(exec->callerFrame()->vm(), callerCodeBlock->ownerExecutable(), callee);
+ repatchBuffer.relink(callLinkInfo.hotPathOther, codePtr);
if (calleeCodeBlock)
calleeCodeBlock->linkIncomingCall(exec->callerFrame(), &callLinkInfo);
- if (callLinkInfo.specializationKind() == CodeForCall && callLinkInfo.allowStubs()) {
- linkSlowFor(vm, callLinkInfo, linkPolymorphicCallThunkGenerator);
+ if (kind == CodeForCall) {
+ repatchBuffer.relink(callLinkInfo.callReturnLocation, vm->getCTIStub(linkClosureCallThunkGenerator).code());
return;
}
- linkSlowFor(vm, callLinkInfo);
+ ASSERT(kind == CodeForConstruct);
+ linkSlowFor(repatchBuffer, vm, callLinkInfo, CodeForConstruct);
}
-void linkSlowFor(
- ExecState* exec, CallLinkInfo& callLinkInfo)
+void linkSlowFor(ExecState* exec, CallLinkInfo& callLinkInfo, CodeSpecializationKind kind)
{
CodeBlock* callerCodeBlock = exec->callerFrame()->codeBlock();
VM* vm = callerCodeBlock->vm();
- linkSlowFor(vm, callLinkInfo);
-}
-
-static void revertCall(VM* vm, CallLinkInfo& callLinkInfo, MacroAssemblerCodeRef codeRef)
-{
- MacroAssembler::revertJumpReplacementToBranchPtrWithPatch(
- MacroAssembler::startOfBranchPtrWithPatchOnRegister(callLinkInfo.hotPathBegin()),
- static_cast<MacroAssembler::RegisterID>(callLinkInfo.calleeGPR()), 0);
- linkSlowFor(vm, callLinkInfo, codeRef);
- callLinkInfo.clearSeen();
- callLinkInfo.clearCallee();
- callLinkInfo.clearStub();
- callLinkInfo.clearSlowStub();
- if (callLinkInfo.isOnList())
- callLinkInfo.remove();
-}
-
-void unlinkFor(VM& vm, CallLinkInfo& callLinkInfo)
-{
- if (Options::dumpDisassembly())
- dataLog("Unlinking call from ", callLinkInfo.callReturnLocation(), "\n");
-
- revertCall(&vm, callLinkInfo, vm.getCTIStub(linkCallThunkGenerator));
-}
-
-void linkVirtualFor(
- ExecState* exec, CallLinkInfo& callLinkInfo)
-{
- CodeBlock* callerCodeBlock = exec->callerFrame()->codeBlock();
- VM* vm = callerCodeBlock->vm();
-
- if (shouldDumpDisassemblyFor(callerCodeBlock))
- dataLog("Linking virtual call at ", *callerCodeBlock, " ", exec->callerFrame()->codeOrigin(), "\n");
+ RepatchBuffer repatchBuffer(callerCodeBlock);
- MacroAssemblerCodeRef virtualThunk = virtualThunkFor(vm, callLinkInfo);
- revertCall(vm, callLinkInfo, virtualThunk);
- callLinkInfo.setSlowStub(createJITStubRoutine(virtualThunk, *vm, nullptr, true));
+ linkSlowFor(repatchBuffer, vm, callLinkInfo, kind);
}
-namespace {
-struct CallToCodePtr {
- CCallHelpers::Call call;
- MacroAssemblerCodePtr codePtr;
-};
-} // annonymous namespace
-
-void linkPolymorphicCall(
- ExecState* exec, CallLinkInfo& callLinkInfo, CallVariant newVariant)
+void linkClosureCall(ExecState* exec, CallLinkInfo& callLinkInfo, CodeBlock* calleeCodeBlock, Structure* structure, ExecutableBase* executable, MacroAssemblerCodePtr codePtr)
{
- RELEASE_ASSERT(callLinkInfo.allowStubs());
-
- // Currently we can't do anything for non-function callees.
- // https://bugs.webkit.org/show_bug.cgi?id=140685
- if (!newVariant || !newVariant.executable()) {
- linkVirtualFor(exec, callLinkInfo);
- return;
- }
+ ASSERT(!callLinkInfo.stub);
CodeBlock* callerCodeBlock = exec->callerFrame()->codeBlock();
VM* vm = callerCodeBlock->vm();
- CallVariantList list;
- if (PolymorphicCallStubRoutine* stub = callLinkInfo.stub())
- list = stub->variants();
- else if (JSFunction* oldCallee = callLinkInfo.callee())
- list = CallVariantList{ CallVariant(oldCallee) };
-
- list = variantListWithVariant(list, newVariant);
-
- // If there are any closure calls then it makes sense to treat all of them as closure calls.
- // This makes switching on callee cheaper. It also produces profiling that's easier on the DFG;
- // the DFG doesn't really want to deal with a combination of closure and non-closure callees.
- bool isClosureCall = false;
- for (CallVariant variant : list) {
- if (variant.isClosureCall()) {
- list = despecifiedVariantList(list);
- isClosureCall = true;
- break;
- }
- }
-
- if (isClosureCall)
- callLinkInfo.setHasSeenClosure();
-
- Vector<PolymorphicCallCase> callCases;
-
- // Figure out what our cases are.
- for (CallVariant variant : list) {
- CodeBlock* codeBlock;
- if (variant.executable()->isHostFunction())
- codeBlock = nullptr;
- else {
- ExecutableBase* executable = variant.executable();
-#if ENABLE(WEBASSEMBLY)
- if (executable->isWebAssemblyExecutable())
- codeBlock = jsCast<WebAssemblyExecutable*>(executable)->codeBlockForCall();
- else
-#endif
- codeBlock = jsCast<FunctionExecutable*>(executable)->codeBlockForCall();
- // If we cannot handle a callee, either because we don't have a CodeBlock or because arity mismatch,
- // assume that it's better for this whole thing to be a virtual call.
- if (!codeBlock || exec->argumentCountIncludingThis() < static_cast<size_t>(codeBlock->numParameters()) || callLinkInfo.isVarargs()) {
- linkVirtualFor(exec, callLinkInfo);
- return;
- }
- }
-
- callCases.append(PolymorphicCallCase(variant, codeBlock));
- }
-
- // If we are over the limit, just use a normal virtual call.
- unsigned maxPolymorphicCallVariantListSize;
- if (callerCodeBlock->jitType() == JITCode::topTierJIT())
- maxPolymorphicCallVariantListSize = Options::maxPolymorphicCallVariantListSizeForTopTier();
- else
- maxPolymorphicCallVariantListSize = Options::maxPolymorphicCallVariantListSize();
- if (list.size() > maxPolymorphicCallVariantListSize) {
- linkVirtualFor(exec, callLinkInfo);
- return;
- }
-
- GPRReg calleeGPR = static_cast<GPRReg>(callLinkInfo.calleeGPR());
+ GPRReg calleeGPR = static_cast<GPRReg>(callLinkInfo.calleeGPR);
CCallHelpers stubJit(vm, callerCodeBlock);
CCallHelpers::JumpList slowPath;
- std::unique_ptr<CallFrameShuffler> frameShuffler;
- if (callLinkInfo.frameShuffleData()) {
- ASSERT(callLinkInfo.isTailCall());
- frameShuffler = std::make_unique<CallFrameShuffler>(stubJit, *callLinkInfo.frameShuffleData());
-#if USE(JSVALUE32_64)
- // We would have already checked that the callee is a cell, and we can
- // use the additional register this buys us.
- frameShuffler->assumeCalleeIsCell();
-#endif
- frameShuffler->lockGPR(calleeGPR);
- }
- GPRReg comparisonValueGPR;
-
- if (isClosureCall) {
- GPRReg scratchGPR;
- if (frameShuffler)
- scratchGPR = frameShuffler->acquireGPR();
- else
- scratchGPR = AssemblyHelpers::selectScratchGPR(calleeGPR);
- // Verify that we have a function and stash the executable in scratchGPR.
-
#if USE(JSVALUE64)
- // We can't rely on tagMaskRegister being set, so we do this the hard
- // way.
- stubJit.move(MacroAssembler::TrustedImm64(TagMask), scratchGPR);
- slowPath.append(stubJit.branchTest64(CCallHelpers::NonZero, calleeGPR, scratchGPR));
+ // We can safely clobber everything except the calleeGPR. We can't rely on tagMaskRegister
+ // being set. So we do this the hard way.
+ GPRReg scratch = AssemblyHelpers::selectScratchGPR(calleeGPR);
+ stubJit.move(MacroAssembler::TrustedImm64(TagMask), scratch);
+ slowPath.append(stubJit.branchTest64(CCallHelpers::NonZero, calleeGPR, scratch));
#else
- // We would have already checked that the callee is a cell.
+ // We would have already checked that the callee is a cell.
#endif
- slowPath.append(
- stubJit.branch8(
- CCallHelpers::NotEqual,
- CCallHelpers::Address(calleeGPR, JSCell::typeInfoTypeOffset()),
- CCallHelpers::TrustedImm32(JSFunctionType)));
+ slowPath.append(
+ stubJit.branchPtr(
+ CCallHelpers::NotEqual,
+ CCallHelpers::Address(calleeGPR, JSCell::structureOffset()),
+ CCallHelpers::TrustedImmPtr(structure)));
- stubJit.loadPtr(
+ slowPath.append(
+ stubJit.branchPtr(
+ CCallHelpers::NotEqual,
CCallHelpers::Address(calleeGPR, JSFunction::offsetOfExecutable()),
- scratchGPR);
-
- comparisonValueGPR = scratchGPR;
- } else
- comparisonValueGPR = calleeGPR;
-
- Vector<int64_t> caseValues(callCases.size());
- Vector<CallToCodePtr> calls(callCases.size());
- std::unique_ptr<uint32_t[]> fastCounts;
-
- if (callerCodeBlock->jitType() != JITCode::topTierJIT())
- fastCounts = std::make_unique<uint32_t[]>(callCases.size());
-
- for (size_t i = 0; i < callCases.size(); ++i) {
- if (fastCounts)
- fastCounts[i] = 0;
-
- CallVariant variant = callCases[i].variant();
- int64_t newCaseValue;
- if (isClosureCall)
- newCaseValue = bitwise_cast<intptr_t>(variant.executable());
- else
- newCaseValue = bitwise_cast<intptr_t>(variant.function());
-
- if (!ASSERT_DISABLED) {
- for (size_t j = 0; j < i; ++j) {
- if (caseValues[j] != newCaseValue)
- continue;
-
- dataLog("ERROR: Attempt to add duplicate case value.\n");
- dataLog("Existing case values: ");
- CommaPrinter comma;
- for (size_t k = 0; k < i; ++k)
- dataLog(comma, caseValues[k]);
- dataLog("\n");
- dataLog("Attempting to add: ", newCaseValue, "\n");
- dataLog("Variant list: ", listDump(callCases), "\n");
- RELEASE_ASSERT_NOT_REACHED();
- }
- }
-
- caseValues[i] = newCaseValue;
- }
-
- GPRReg fastCountsBaseGPR;
- if (frameShuffler)
- fastCountsBaseGPR = frameShuffler->acquireGPR();
- else {
- fastCountsBaseGPR =
- AssemblyHelpers::selectScratchGPR(calleeGPR, comparisonValueGPR, GPRInfo::regT3);
- }
- stubJit.move(CCallHelpers::TrustedImmPtr(fastCounts.get()), fastCountsBaseGPR);
- if (!frameShuffler && callLinkInfo.isTailCall())
- stubJit.emitRestoreCalleeSaves();
- BinarySwitch binarySwitch(comparisonValueGPR, caseValues, BinarySwitch::IntPtr);
- CCallHelpers::JumpList done;
- while (binarySwitch.advance(stubJit)) {
- size_t caseIndex = binarySwitch.caseIndex();
-
- CallVariant variant = callCases[caseIndex].variant();
-
- ASSERT(variant.executable()->hasJITCodeForCall());
- MacroAssemblerCodePtr codePtr =
- variant.executable()->generatedJITCodeForCall()->addressForCall(ArityCheckNotRequired);
-
- if (fastCounts) {
- stubJit.add32(
- CCallHelpers::TrustedImm32(1),
- CCallHelpers::Address(fastCountsBaseGPR, caseIndex * sizeof(uint32_t)));
- }
- if (frameShuffler) {
- CallFrameShuffler(stubJit, frameShuffler->snapshot()).prepareForTailCall();
- calls[caseIndex].call = stubJit.nearTailCall();
- } else if (callLinkInfo.isTailCall()) {
- stubJit.prepareForTailCallSlow();
- calls[caseIndex].call = stubJit.nearTailCall();
- } else
- calls[caseIndex].call = stubJit.nearCall();
- calls[caseIndex].codePtr = codePtr;
- done.append(stubJit.jump());
- }
+ CCallHelpers::TrustedImmPtr(executable)));
- slowPath.link(&stubJit);
- binarySwitch.fallThrough().link(&stubJit);
+ stubJit.loadPtr(
+ CCallHelpers::Address(calleeGPR, JSFunction::offsetOfScopeChain()),
+ GPRInfo::returnValueGPR);
- if (frameShuffler) {
- frameShuffler->releaseGPR(calleeGPR);
- frameShuffler->releaseGPR(comparisonValueGPR);
- frameShuffler->releaseGPR(fastCountsBaseGPR);
-#if USE(JSVALUE32_64)
- frameShuffler->setCalleeJSValueRegs(JSValueRegs(GPRInfo::regT1, GPRInfo::regT0));
+#if USE(JSVALUE64)
+ stubJit.store64(
+ GPRInfo::returnValueGPR,
+ CCallHelpers::Address(GPRInfo::callFrameRegister, static_cast<ptrdiff_t>(sizeof(Register) * JSStack::ScopeChain)));
#else
- frameShuffler->setCalleeJSValueRegs(JSValueRegs(GPRInfo::regT0));
+ stubJit.storePtr(
+ GPRInfo::returnValueGPR,
+ CCallHelpers::Address(GPRInfo::callFrameRegister, static_cast<ptrdiff_t>(sizeof(Register) * JSStack::ScopeChain) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
+ stubJit.store32(
+ CCallHelpers::TrustedImm32(JSValue::CellTag),
+ CCallHelpers::Address(GPRInfo::callFrameRegister, static_cast<ptrdiff_t>(sizeof(Register) * JSStack::ScopeChain) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
#endif
- frameShuffler->prepareForSlowPath();
- } else {
- stubJit.move(calleeGPR, GPRInfo::regT0);
+
+ AssemblyHelpers::Call call = stubJit.nearCall();
+ AssemblyHelpers::Jump done = stubJit.jump();
+
+ slowPath.link(&stubJit);
+ stubJit.move(calleeGPR, GPRInfo::regT0);
#if USE(JSVALUE32_64)
- stubJit.move(CCallHelpers::TrustedImm32(JSValue::CellTag), GPRInfo::regT1);
+ stubJit.move(CCallHelpers::TrustedImm32(JSValue::CellTag), GPRInfo::regT1);
#endif
- }
- stubJit.move(CCallHelpers::TrustedImmPtr(&callLinkInfo), GPRInfo::regT2);
- stubJit.move(CCallHelpers::TrustedImmPtr(callLinkInfo.callReturnLocation().executableAddress()), GPRInfo::regT4);
-
- stubJit.restoreReturnAddressBeforeReturn(GPRInfo::regT4);
+ stubJit.move(CCallHelpers::TrustedImmPtr(callLinkInfo.callReturnLocation.executableAddress()), GPRInfo::nonArgGPR2);
+ stubJit.restoreReturnAddressBeforeReturn(GPRInfo::nonArgGPR2);
AssemblyHelpers::Jump slow = stubJit.jump();
-
- LinkBuffer patchBuffer(*vm, stubJit, callerCodeBlock, JITCompilationCanFail);
- if (patchBuffer.didFailToAllocate()) {
- linkVirtualFor(exec, callLinkInfo);
- return;
- }
- RELEASE_ASSERT(callCases.size() == calls.size());
- for (CallToCodePtr callToCodePtr : calls) {
- // Tail call special-casing ensures proper linking on ARM Thumb2, where a tail call jumps to an address
- // with a non-decorated bottom bit but a normal call calls an address with a decorated bottom bit.
- bool isTailCall = callToCodePtr.call.isFlagSet(CCallHelpers::Call::Tail);
- patchBuffer.link(
- callToCodePtr.call, FunctionPtr(isTailCall ? callToCodePtr.codePtr.dataLocation() : callToCodePtr.codePtr.executableAddress()));
- }
- if (JITCode::isOptimizingJIT(callerCodeBlock->jitType()))
- patchBuffer.link(done, callLinkInfo.callReturnLocation().labelAtOffset(0));
- else
- patchBuffer.link(done, callLinkInfo.hotPathOther().labelAtOffset(0));
- patchBuffer.link(slow, CodeLocationLabel(vm->getCTIStub(linkPolymorphicCallThunkGenerator).code()));
-
- RefPtr<PolymorphicCallStubRoutine> stubRoutine = adoptRef(new PolymorphicCallStubRoutine(
- FINALIZE_CODE_FOR(
- callerCodeBlock, patchBuffer,
- ("Polymorphic call stub for %s, return point %p, targets %s",
- toCString(*callerCodeBlock).data(), callLinkInfo.callReturnLocation().labelAtOffset(0).executableAddress(),
- toCString(listDump(callCases)).data())),
- *vm, callerCodeBlock, exec->callerFrame(), callLinkInfo, callCases,
- WTFMove(fastCounts)));
-
- MacroAssembler::replaceWithJump(
- MacroAssembler::startOfBranchPtrWithPatchOnRegister(callLinkInfo.hotPathBegin()),
+ LinkBuffer patchBuffer(*vm, &stubJit, callerCodeBlock);
+
+ patchBuffer.link(call, FunctionPtr(codePtr.executableAddress()));
+ patchBuffer.link(done, callLinkInfo.callReturnLocation.labelAtOffset(0));
+ patchBuffer.link(slow, CodeLocationLabel(vm->getCTIStub(virtualCallThunkGenerator).code()));
+
+ RefPtr<ClosureCallStubRoutine> stubRoutine = adoptRef(new ClosureCallStubRoutine(
+ FINALIZE_DFG_CODE(
+ patchBuffer,
+ ("DFG closure call stub for %s, return point %p, target %p (%s)",
+ toCString(*callerCodeBlock).data(), callLinkInfo.callReturnLocation.labelAtOffset(0).executableAddress(),
+ codePtr.executableAddress(), toCString(pointerDump(calleeCodeBlock)).data())),
+ *vm, callerCodeBlock->ownerExecutable(), structure, executable, callLinkInfo.codeOrigin));
+
+ RepatchBuffer repatchBuffer(callerCodeBlock);
+
+ repatchBuffer.replaceWithJump(
+ RepatchBuffer::startOfBranchPtrWithPatchOnRegister(callLinkInfo.hotPathBegin),
CodeLocationLabel(stubRoutine->code().code()));
- // The original slow path is unreachable on 64-bits, but still
- // reachable on 32-bits since a non-cell callee will always
- // trigger the slow path
- linkSlowFor(vm, callLinkInfo);
+ linkSlowFor(repatchBuffer, vm, callLinkInfo, CodeForCall);
- // If there had been a previous stub routine, that one will die as soon as the GC runs and sees
- // that it's no longer on stack.
- callLinkInfo.setStub(stubRoutine.release());
+ callLinkInfo.stub = stubRoutine.release();
- // The call link info no longer has a call cache apart from the jump to the polymorphic call
- // stub.
- if (callLinkInfo.isOnList())
- callLinkInfo.remove();
+ ASSERT(!calleeCodeBlock || calleeCodeBlock->isIncomingCallAlreadyLinked(&callLinkInfo));
}
-void resetGetByID(CodeBlock* codeBlock, StructureStubInfo& stubInfo)
+void resetGetByID(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo)
{
- repatchCall(codeBlock, stubInfo.callReturnLocation, operationGetByIdOptimize);
- resetGetByIDCheckAndLoad(stubInfo);
- MacroAssembler::repatchJump(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.deltaCallToJump), stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase));
+ repatchCall(repatchBuffer, stubInfo.callReturnLocation, operationGetByIdOptimize);
+ CodeLocationDataLabelPtr structureLabel = stubInfo.callReturnLocation.dataLabelPtrAtOffset(-(intptr_t)stubInfo.patch.deltaCheckImmToCall);
+ if (MacroAssembler::canJumpReplacePatchableBranchPtrWithPatch()) {
+ repatchBuffer.revertJumpReplacementToPatchableBranchPtrWithPatch(
+ RepatchBuffer::startOfPatchableBranchPtrWithPatchOnAddress(structureLabel),
+ MacroAssembler::Address(
+ static_cast<MacroAssembler::RegisterID>(stubInfo.patch.baseGPR),
+ JSCell::structureOffset()),
+ reinterpret_cast<void*>(unusedPointer));
+ }
+ repatchBuffer.repatch(structureLabel, reinterpret_cast<void*>(unusedPointer));
+#if USE(JSVALUE64)
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.deltaCallToLoadOrStore), 0);
+#else
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.deltaCallToTagLoadOrStore), 0);
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.deltaCallToPayloadLoadOrStore), 0);
+#endif
+ repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.deltaCallToJump), stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase));
}
-void resetPutByID(CodeBlock* codeBlock, StructureStubInfo& stubInfo)
+void resetPutByID(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo)
{
- V_JITOperation_ESsiJJI unoptimizedFunction = bitwise_cast<V_JITOperation_ESsiJJI>(readCallTarget(codeBlock, stubInfo.callReturnLocation).executableAddress());
+ V_JITOperation_ESsiJJI unoptimizedFunction = bitwise_cast<V_JITOperation_ESsiJJI>(readCallTarget(repatchBuffer, stubInfo.callReturnLocation).executableAddress());
V_JITOperation_ESsiJJI optimizedFunction;
- if (unoptimizedFunction == operationPutByIdStrict || unoptimizedFunction == operationPutByIdStrictOptimize)
+ if (unoptimizedFunction == operationPutByIdStrict || unoptimizedFunction == operationPutByIdStrictBuildList)
optimizedFunction = operationPutByIdStrictOptimize;
- else if (unoptimizedFunction == operationPutByIdNonStrict || unoptimizedFunction == operationPutByIdNonStrictOptimize)
+ else if (unoptimizedFunction == operationPutByIdNonStrict || unoptimizedFunction == operationPutByIdNonStrictBuildList)
optimizedFunction = operationPutByIdNonStrictOptimize;
- else if (unoptimizedFunction == operationPutByIdDirectStrict || unoptimizedFunction == operationPutByIdDirectStrictOptimize)
+ else if (unoptimizedFunction == operationPutByIdDirectStrict || unoptimizedFunction == operationPutByIdDirectStrictBuildList)
optimizedFunction = operationPutByIdDirectStrictOptimize;
else {
- ASSERT(unoptimizedFunction == operationPutByIdDirectNonStrict || unoptimizedFunction == operationPutByIdDirectNonStrictOptimize);
+ ASSERT(unoptimizedFunction == operationPutByIdDirectNonStrict || unoptimizedFunction == operationPutByIdDirectNonStrictBuildList);
optimizedFunction = operationPutByIdDirectNonStrictOptimize;
}
- repatchCall(codeBlock, stubInfo.callReturnLocation, optimizedFunction);
- resetPutByIDCheckAndLoad(stubInfo);
- MacroAssembler::repatchJump(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.deltaCallToJump), stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase));
+ repatchCall(repatchBuffer, stubInfo.callReturnLocation, optimizedFunction);
+ CodeLocationDataLabelPtr structureLabel = stubInfo.callReturnLocation.dataLabelPtrAtOffset(-(intptr_t)stubInfo.patch.deltaCheckImmToCall);
+ if (MacroAssembler::canJumpReplacePatchableBranchPtrWithPatch()) {
+ repatchBuffer.revertJumpReplacementToPatchableBranchPtrWithPatch(
+ RepatchBuffer::startOfPatchableBranchPtrWithPatchOnAddress(structureLabel),
+ MacroAssembler::Address(
+ static_cast<MacroAssembler::RegisterID>(stubInfo.patch.baseGPR),
+ JSCell::structureOffset()),
+ reinterpret_cast<void*>(unusedPointer));
+ }
+ repatchBuffer.repatch(structureLabel, reinterpret_cast<void*>(unusedPointer));
+#if USE(JSVALUE64)
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.deltaCallToLoadOrStore), 0);
+#else
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.deltaCallToTagLoadOrStore), 0);
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.deltaCallToPayloadLoadOrStore), 0);
+#endif
+ repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.deltaCallToJump), stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase));
}
-void resetIn(CodeBlock*, StructureStubInfo& stubInfo)
+void resetIn(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo)
{
- MacroAssembler::repatchJump(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.deltaCallToJump), stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase));
+ repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.deltaCallToJump), stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase));
}
} // namespace JSC