summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/jit/Repatch.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'Source/JavaScriptCore/jit/Repatch.cpp')
-rw-r--r--Source/JavaScriptCore/jit/Repatch.cpp1923
1 files changed, 769 insertions, 1154 deletions
diff --git a/Source/JavaScriptCore/jit/Repatch.cpp b/Source/JavaScriptCore/jit/Repatch.cpp
index 762f39145..9c31722e8 100644
--- a/Source/JavaScriptCore/jit/Repatch.cpp
+++ b/Source/JavaScriptCore/jit/Repatch.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,29 +28,21 @@
#if ENABLE(JIT)
-#include "AccessorCallJITStubRoutine.h"
-#include "BinarySwitch.h"
#include "CCallHelpers.h"
+#include "CallFrameInlines.h"
#include "DFGOperations.h"
#include "DFGSpeculativeJIT.h"
#include "FTLThunks.h"
#include "GCAwareJITStubRoutine.h"
-#include "GetterSetter.h"
-#include "JIT.h"
-#include "JITInlines.h"
#include "LinkBuffer.h"
-#include "JSCInlines.h"
-#include "PolymorphicGetByIdList.h"
+#include "Operations.h"
#include "PolymorphicPutByIdList.h"
-#include "RegExpMatchesArray.h"
#include "RepatchBuffer.h"
#include "ScratchRegisterAllocator.h"
#include "StackAlignment.h"
#include "StructureRareDataInlines.h"
#include "StructureStubClearingWatchpoint.h"
#include "ThunkGenerators.h"
-#include <wtf/CommaPrinter.h>
-#include <wtf/ListDump.h>
#include <wtf/StringPrintStream.h>
namespace JSC {
@@ -59,6 +51,11 @@ namespace JSC {
// that would ordinarily have well-known values:
// - tagTypeNumberRegister
// - tagMaskRegister
+// - callFrameRegister **
+//
+// We currently only use the callFrameRegister for closure call patching, and we're not going to
+// give the FTL closure call patching support until we switch to the C stack - but when we do that,
+// callFrameRegister will disappear.
static FunctionPtr readCallTarget(RepatchBuffer& repatchBuffer, CodeLocationCall call)
{
@@ -100,21 +97,19 @@ static void repatchCall(CodeBlock* codeblock, CodeLocationCall call, FunctionPtr
repatchCall(repatchBuffer, call, newCalleeFunction);
}
-static void repatchByIdSelfAccess(
- VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, Structure* structure,
- const Identifier& propertyName, PropertyOffset offset, const FunctionPtr &slowPathFunction,
- bool compact)
+static void repatchByIdSelfAccess(VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, Structure* structure, const Identifier& propertyName, PropertyOffset offset,
+ const FunctionPtr &slowPathFunction, bool compact)
{
- if (structure->needImpurePropertyWatchpoint())
+ if (structure->typeInfo().newImpurePropertyFiresWatchpoints())
vm.registerWatchpointForImpureProperty(propertyName, stubInfo.addWatchpoint(codeBlock));
-
+
RepatchBuffer repatchBuffer(codeBlock);
// Only optimize once!
repatchCall(repatchBuffer, stubInfo.callReturnLocation, slowPathFunction);
// Patch the structure check & the offset of the load.
- repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabel32AtOffset(-(intptr_t)stubInfo.patch.deltaCheckImmToCall), bitwise_cast<int32_t>(structure->id()));
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelPtrAtOffset(-(intptr_t)stubInfo.patch.deltaCheckImmToCall), structure);
repatchBuffer.setLoadInstructionIsActive(stubInfo.callReturnLocation.convertibleLoadAtOffset(stubInfo.patch.deltaCallToStorageLoad), isOutOfLineOffset(offset));
#if USE(JSVALUE64)
if (compact)
@@ -132,41 +127,54 @@ static void repatchByIdSelfAccess(
#endif
}
-static void checkObjectPropertyCondition(
- const ObjectPropertyCondition& condition, CodeBlock* codeBlock, StructureStubInfo& stubInfo,
+static void addStructureTransitionCheck(
+ JSCell* object, Structure* structure, CodeBlock* codeBlock, StructureStubInfo& stubInfo,
MacroAssembler& jit, MacroAssembler::JumpList& failureCases, GPRReg scratchGPR)
{
- if (condition.isWatchableAssumingImpurePropertyWatchpoint()) {
- condition.object()->structure()->addTransitionWatchpoint(
- stubInfo.addWatchpoint(codeBlock, condition));
+ if (object->structure() == structure && structure->transitionWatchpointSetIsStillValid()) {
+ structure->addTransitionWatchpoint(stubInfo.addWatchpoint(codeBlock));
+#if !ASSERT_DISABLED
+ // If we execute this code, the object must have the structure we expect. Assert
+ // this in debug modes.
+ jit.move(MacroAssembler::TrustedImmPtr(object), scratchGPR);
+ MacroAssembler::Jump ok = jit.branchPtr(
+ MacroAssembler::Equal,
+ MacroAssembler::Address(scratchGPR, JSCell::structureOffset()),
+ MacroAssembler::TrustedImmPtr(structure));
+ jit.breakpoint();
+ ok.link(&jit);
+#endif
return;
}
-
- Structure* structure = condition.object()->structure();
- RELEASE_ASSERT(condition.structureEnsuresValidityAssumingImpurePropertyWatchpoint(structure));
- jit.move(MacroAssembler::TrustedImmPtr(condition.object()), scratchGPR);
+
+ jit.move(MacroAssembler::TrustedImmPtr(object), scratchGPR);
failureCases.append(
- branchStructure(
- jit, MacroAssembler::NotEqual,
- MacroAssembler::Address(scratchGPR, JSCell::structureIDOffset()), structure));
+ jit.branchPtr(
+ MacroAssembler::NotEqual,
+ MacroAssembler::Address(scratchGPR, JSCell::structureOffset()),
+ MacroAssembler::TrustedImmPtr(structure)));
}
-static void checkObjectPropertyConditions(
- const ObjectPropertyConditionSet& set, CodeBlock* codeBlock, StructureStubInfo& stubInfo,
+static void addStructureTransitionCheck(
+ JSValue prototype, CodeBlock* codeBlock, StructureStubInfo& stubInfo,
MacroAssembler& jit, MacroAssembler::JumpList& failureCases, GPRReg scratchGPR)
{
- for (const ObjectPropertyCondition& condition : set) {
- checkObjectPropertyCondition(
- condition, codeBlock, stubInfo, jit, failureCases, scratchGPR);
- }
+ if (prototype.isNull())
+ return;
+
+ ASSERT(prototype.isCell());
+
+ addStructureTransitionCheck(
+ prototype.asCell(), prototype.asCell()->structure(), codeBlock, stubInfo, jit,
+ failureCases, scratchGPR);
}
static void replaceWithJump(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo, const MacroAssemblerCodePtr target)
{
- if (MacroAssembler::canJumpReplacePatchableBranch32WithPatch()) {
+ if (MacroAssembler::canJumpReplacePatchableBranchPtrWithPatch()) {
repatchBuffer.replaceWithJump(
- RepatchBuffer::startOfPatchableBranch32WithPatchOnAddress(
- stubInfo.callReturnLocation.dataLabel32AtOffset(
+ RepatchBuffer::startOfPatchableBranchPtrWithPatchOnAddress(
+ stubInfo.callReturnLocation.dataLabelPtrAtOffset(
-(intptr_t)stubInfo.patch.deltaCheckImmToCall)),
CodeLocationLabel(target));
return;
@@ -215,554 +223,309 @@ static void linkRestoreScratch(LinkBuffer& patchBuffer, bool needToRestoreScratc
linkRestoreScratch(patchBuffer, needToRestoreScratch, success, fail, failureCases, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone), stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase));
}
-enum ByIdStubKind {
- GetValue,
- GetUndefined,
- CallGetter,
- CallCustomGetter,
- CallSetter,
- CallCustomSetter
+enum ProtoChainGenerationResult {
+ ProtoChainGenerationFailed,
+ ProtoChainGenerationSucceeded
};
-static const char* toString(ByIdStubKind kind)
-{
- switch (kind) {
- case GetValue:
- return "GetValue";
- case GetUndefined:
- return "GetUndefined";
- case CallGetter:
- return "CallGetter";
- case CallCustomGetter:
- return "CallCustomGetter";
- case CallSetter:
- return "CallSetter";
- case CallCustomSetter:
- return "CallCustomSetter";
- default:
- RELEASE_ASSERT_NOT_REACHED();
- return nullptr;
- }
-}
-
-static ByIdStubKind kindFor(const PropertySlot& slot)
-{
- if (slot.isCacheableValue())
- return GetValue;
- if (slot.isUnset())
- return GetUndefined;
- if (slot.isCacheableCustom())
- return CallCustomGetter;
- RELEASE_ASSERT(slot.isCacheableGetter());
- return CallGetter;
-}
-
-static FunctionPtr customFor(const PropertySlot& slot)
-{
- if (!slot.isCacheableCustom())
- return FunctionPtr();
- return FunctionPtr(slot.customGetter());
-}
-
-static ByIdStubKind kindFor(const PutPropertySlot& slot)
-{
- RELEASE_ASSERT(!slot.isCacheablePut());
- if (slot.isCacheableSetter())
- return CallSetter;
- RELEASE_ASSERT(slot.isCacheableCustom());
- return CallCustomSetter;
-}
-
-static FunctionPtr customFor(const PutPropertySlot& slot)
-{
- if (!slot.isCacheableCustom())
- return FunctionPtr();
- return FunctionPtr(slot.customSetter());
-}
-
-static bool generateByIdStub(
- ExecState* exec, ByIdStubKind kind, const Identifier& propertyName,
- FunctionPtr custom, StructureStubInfo& stubInfo, const ObjectPropertyConditionSet& conditionSet,
- JSObject* alternateBase, PropertyOffset offset, Structure* structure, bool loadTargetFromProxy,
- WatchpointSet* watchpointSet, CodeLocationLabel successLabel, CodeLocationLabel slowCaseLabel,
- RefPtr<JITStubRoutine>& stubRoutine)
+static ProtoChainGenerationResult generateProtoChainAccessStub(ExecState*, const PropertySlot&, const Identifier&, StructureStubInfo&, StructureChain*, size_t, PropertyOffset, Structure*, CodeLocationLabel, CodeLocationLabel, RefPtr<JITStubRoutine>&) WARN_UNUSED_RETURN;
+static ProtoChainGenerationResult generateProtoChainAccessStub(ExecState* exec, const PropertySlot& slot, const Identifier& propertyName, StructureStubInfo& stubInfo, StructureChain* chain, size_t count, PropertyOffset offset, Structure* structure, CodeLocationLabel successLabel, CodeLocationLabel slowCaseLabel, RefPtr<JITStubRoutine>& stubRoutine)
{
- ASSERT(conditionSet.structuresEnsureValidityAssumingImpurePropertyWatchpoint());
-
VM* vm = &exec->vm();
GPRReg baseGPR = static_cast<GPRReg>(stubInfo.patch.baseGPR);
- JSValueRegs valueRegs = JSValueRegs(
#if USE(JSVALUE32_64)
- static_cast<GPRReg>(stubInfo.patch.valueTagGPR),
+ GPRReg resultTagGPR = static_cast<GPRReg>(stubInfo.patch.valueTagGPR);
#endif
- static_cast<GPRReg>(stubInfo.patch.valueGPR));
+ GPRReg resultGPR = static_cast<GPRReg>(stubInfo.patch.valueGPR);
GPRReg scratchGPR = TempRegisterSet(stubInfo.patch.usedRegisters).getFreeGPR();
bool needToRestoreScratch = scratchGPR == InvalidGPRReg;
- RELEASE_ASSERT(!needToRestoreScratch || (kind == GetValue || kind == GetUndefined));
+ if (needToRestoreScratch && !slot.isCacheableValue())
+ return ProtoChainGenerationFailed;
CCallHelpers stubJit(&exec->vm(), exec->codeBlock());
if (needToRestoreScratch) {
- scratchGPR = AssemblyHelpers::selectScratchGPR(
- baseGPR, valueRegs.tagGPR(), valueRegs.payloadGPR());
+#if USE(JSVALUE64)
+ scratchGPR = AssemblyHelpers::selectScratchGPR(baseGPR, resultGPR);
+#else
+ scratchGPR = AssemblyHelpers::selectScratchGPR(baseGPR, resultGPR, resultTagGPR);
+#endif
stubJit.pushToSave(scratchGPR);
needToRestoreScratch = true;
}
MacroAssembler::JumpList failureCases;
-
- GPRReg baseForGetGPR;
- if (loadTargetFromProxy) {
- baseForGetGPR = valueRegs.payloadGPR();
- failureCases.append(stubJit.branch8(
- MacroAssembler::NotEqual,
- MacroAssembler::Address(baseGPR, JSCell::typeInfoTypeOffset()),
- MacroAssembler::TrustedImm32(PureForwardingProxyType)));
-
- stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSProxy::targetOffset()), scratchGPR);
-
- failureCases.append(branchStructure(stubJit,
- MacroAssembler::NotEqual,
- MacroAssembler::Address(scratchGPR, JSCell::structureIDOffset()),
- structure));
- } else {
- baseForGetGPR = baseGPR;
-
- failureCases.append(branchStructure(stubJit,
- MacroAssembler::NotEqual,
- MacroAssembler::Address(baseForGetGPR, JSCell::structureIDOffset()),
- structure));
- }
+
+ failureCases.append(stubJit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::structureOffset()), MacroAssembler::TrustedImmPtr(structure)));
CodeBlock* codeBlock = exec->codeBlock();
- if (structure->needImpurePropertyWatchpoint() || conditionSet.needImpurePropertyWatchpoint())
+ if (structure->typeInfo().newImpurePropertyFiresWatchpoints())
vm->registerWatchpointForImpureProperty(propertyName, stubInfo.addWatchpoint(codeBlock));
- if (watchpointSet)
- watchpointSet->add(stubInfo.addWatchpoint(codeBlock));
-
- checkObjectPropertyConditions(
- conditionSet, codeBlock, stubInfo, stubJit, failureCases, scratchGPR);
-
- if (isValidOffset(offset)) {
- Structure* currStructure;
- if (conditionSet.isEmpty())
- currStructure = structure;
- else
- currStructure = conditionSet.slotBaseCondition().object()->structure();
- currStructure->startWatchingPropertyForReplacements(*vm, offset);
+ Structure* currStructure = structure;
+ WriteBarrier<Structure>* it = chain->head();
+ JSObject* protoObject = 0;
+ for (unsigned i = 0; i < count; ++i, ++it) {
+ protoObject = asObject(currStructure->prototypeForLookup(exec));
+ Structure* protoStructure = protoObject->structure();
+ if (protoStructure->typeInfo().newImpurePropertyFiresWatchpoints())
+ vm->registerWatchpointForImpureProperty(propertyName, stubInfo.addWatchpoint(codeBlock));
+ addStructureTransitionCheck(
+ protoObject, protoStructure, codeBlock, stubInfo, stubJit,
+ failureCases, scratchGPR);
+ currStructure = it->get();
}
- GPRReg baseForAccessGPR = InvalidGPRReg;
- if (kind != GetUndefined) {
- if (!conditionSet.isEmpty()) {
- // We could have clobbered scratchGPR earlier, so we have to reload from baseGPR to get the target.
- if (loadTargetFromProxy)
- stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSProxy::targetOffset()), baseForGetGPR);
- stubJit.move(MacroAssembler::TrustedImmPtr(alternateBase), scratchGPR);
- baseForAccessGPR = scratchGPR;
- } else {
- // For proxy objects, we need to do all the Structure checks before moving the baseGPR into
- // baseForGetGPR because if we fail any of the checks then we would have the wrong value in baseGPR
- // on the slow path.
- if (loadTargetFromProxy)
- stubJit.move(scratchGPR, baseForGetGPR);
- baseForAccessGPR = baseForGetGPR;
- }
- }
+ bool isAccessor = slot.isCacheableGetter() || slot.isCacheableCustom();
+ if (isAccessor)
+ stubJit.move(baseGPR, scratchGPR);
- GPRReg loadedValueGPR = InvalidGPRReg;
- if (kind == GetUndefined)
- stubJit.moveTrustedValue(jsUndefined(), valueRegs);
- else if (kind != CallCustomGetter && kind != CallCustomSetter) {
- if (kind == GetValue)
- loadedValueGPR = valueRegs.payloadGPR();
- else
- loadedValueGPR = scratchGPR;
-
- GPRReg storageGPR;
- if (isInlineOffset(offset))
- storageGPR = baseForAccessGPR;
- else {
- stubJit.loadPtr(MacroAssembler::Address(baseForAccessGPR, JSObject::butterflyOffset()), loadedValueGPR);
- storageGPR = loadedValueGPR;
- }
-
+ if (!slot.isCacheableCustom()) {
+ if (isInlineOffset(offset)) {
#if USE(JSVALUE64)
- stubJit.load64(MacroAssembler::Address(storageGPR, offsetRelativeToBase(offset)), loadedValueGPR);
-#else
- if (kind == GetValue)
- stubJit.load32(MacroAssembler::Address(storageGPR, offsetRelativeToBase(offset) + TagOffset), valueRegs.tagGPR());
- stubJit.load32(MacroAssembler::Address(storageGPR, offsetRelativeToBase(offset) + PayloadOffset), loadedValueGPR);
+ stubJit.load64(protoObject->locationForOffset(offset), resultGPR);
+#elif USE(JSVALUE32_64)
+ stubJit.move(MacroAssembler::TrustedImmPtr(protoObject->locationForOffset(offset)), resultGPR);
+ stubJit.load32(MacroAssembler::Address(resultGPR, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
+ stubJit.load32(MacroAssembler::Address(resultGPR, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultGPR);
#endif
+ } else {
+ stubJit.loadPtr(protoObject->butterflyAddress(), resultGPR);
+#if USE(JSVALUE64)
+ stubJit.load64(MacroAssembler::Address(resultGPR, offsetInButterfly(offset) * sizeof(WriteBarrier<Unknown>)), resultGPR);
+#elif USE(JSVALUE32_64)
+ stubJit.load32(MacroAssembler::Address(resultGPR, offsetInButterfly(offset) * sizeof(WriteBarrier<Unknown>) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
+ stubJit.load32(MacroAssembler::Address(resultGPR, offsetInButterfly(offset) * sizeof(WriteBarrier<Unknown>) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultGPR);
+#endif
+ }
}
-
- // Stuff for custom getters.
MacroAssembler::Call operationCall;
MacroAssembler::Call handlerCall;
-
- // Stuff for JS getters.
- MacroAssembler::DataLabelPtr addressOfLinkFunctionCheck;
- MacroAssembler::Call fastPathCall;
- MacroAssembler::Call slowPathCall;
- std::unique_ptr<CallLinkInfo> callLinkInfo;
-
+ FunctionPtr operationFunction;
MacroAssembler::Jump success, fail;
- if (kind != GetValue && kind != GetUndefined) {
+ if (isAccessor) {
+ GPRReg callFrameRegister = static_cast<GPRReg>(stubInfo.patch.callFrameRegister);
+ if (slot.isCacheableGetter()) {
+ stubJit.setupArguments(callFrameRegister, scratchGPR, resultGPR);
+ operationFunction = operationCallGetter;
+ } else {
+ stubJit.move(MacroAssembler::TrustedImmPtr(protoObject), scratchGPR);
+ stubJit.setupArguments(callFrameRegister, scratchGPR,
+ MacroAssembler::TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()),
+ MacroAssembler::TrustedImmPtr(propertyName.impl()));
+ operationFunction = operationCallCustomGetter;
+ }
+
// Need to make sure that whenever this call is made in the future, we remember the
// place that we made it from. It just so happens to be the place that we are at
// right now!
stubJit.store32(MacroAssembler::TrustedImm32(exec->locationAsRawBits()),
CCallHelpers::tagFor(static_cast<VirtualRegister>(JSStack::ArgumentCount)));
- if (kind == CallGetter || kind == CallSetter) {
- // Create a JS call using a JS call inline cache. Assume that:
- //
- // - SP is aligned and represents the extent of the calling compiler's stack usage.
- //
- // - FP is set correctly (i.e. it points to the caller's call frame header).
- //
- // - SP - FP is an aligned difference.
- //
- // - Any byte between FP (exclusive) and SP (inclusive) could be live in the calling
- // code.
- //
- // Therefore, we temporarily grow the stack for the purpose of the call and then
- // shrink it after.
-
- callLinkInfo = std::make_unique<CallLinkInfo>();
- callLinkInfo->setUpCall(CallLinkInfo::Call, stubInfo.codeOrigin, loadedValueGPR);
-
- MacroAssembler::JumpList done;
-
- // There is a 'this' argument but nothing else.
- unsigned numberOfParameters = 1;
- // ... unless we're calling a setter.
- if (kind == CallSetter)
- numberOfParameters++;
-
- // Get the accessor; if there ain't one then the result is jsUndefined().
- if (kind == CallSetter) {
- stubJit.loadPtr(
- MacroAssembler::Address(loadedValueGPR, GetterSetter::offsetOfSetter()),
- loadedValueGPR);
- } else {
- stubJit.loadPtr(
- MacroAssembler::Address(loadedValueGPR, GetterSetter::offsetOfGetter()),
- loadedValueGPR);
- }
- MacroAssembler::Jump returnUndefined = stubJit.branchTestPtr(
- MacroAssembler::Zero, loadedValueGPR);
-
- unsigned numberOfRegsForCall =
- JSStack::CallFrameHeaderSize + numberOfParameters;
-
- unsigned numberOfBytesForCall =
- numberOfRegsForCall * sizeof(Register) - sizeof(CallerFrameAndPC);
-
- unsigned alignedNumberOfBytesForCall =
- WTF::roundUpToMultipleOf(stackAlignmentBytes(), numberOfBytesForCall);
-
- stubJit.subPtr(
- MacroAssembler::TrustedImm32(alignedNumberOfBytesForCall),
- MacroAssembler::stackPointerRegister);
-
- MacroAssembler::Address calleeFrame = MacroAssembler::Address(
- MacroAssembler::stackPointerRegister,
- -static_cast<ptrdiff_t>(sizeof(CallerFrameAndPC)));
-
- stubJit.store32(
- MacroAssembler::TrustedImm32(numberOfParameters),
- calleeFrame.withOffset(
- JSStack::ArgumentCount * sizeof(Register) + PayloadOffset));
-
- stubJit.storeCell(
- loadedValueGPR, calleeFrame.withOffset(JSStack::Callee * sizeof(Register)));
-
- stubJit.storeCell(
- baseForGetGPR,
- calleeFrame.withOffset(
- virtualRegisterForArgument(0).offset() * sizeof(Register)));
-
- if (kind == CallSetter) {
- stubJit.storeValue(
- valueRegs,
- calleeFrame.withOffset(
- virtualRegisterForArgument(1).offset() * sizeof(Register)));
- }
-
- MacroAssembler::Jump slowCase = stubJit.branchPtrWithPatch(
- MacroAssembler::NotEqual, loadedValueGPR, addressOfLinkFunctionCheck,
- MacroAssembler::TrustedImmPtr(0));
-
- fastPathCall = stubJit.nearCall();
-
- stubJit.addPtr(
- MacroAssembler::TrustedImm32(alignedNumberOfBytesForCall),
- MacroAssembler::stackPointerRegister);
- if (kind == CallGetter)
- stubJit.setupResults(valueRegs);
-
- done.append(stubJit.jump());
- slowCase.link(&stubJit);
-
- stubJit.move(loadedValueGPR, GPRInfo::regT0);
-#if USE(JSVALUE32_64)
- stubJit.move(MacroAssembler::TrustedImm32(JSValue::CellTag), GPRInfo::regT1);
-#endif
- stubJit.move(MacroAssembler::TrustedImmPtr(callLinkInfo.get()), GPRInfo::regT2);
- slowPathCall = stubJit.nearCall();
-
- stubJit.addPtr(
- MacroAssembler::TrustedImm32(alignedNumberOfBytesForCall),
- MacroAssembler::stackPointerRegister);
- if (kind == CallGetter)
- stubJit.setupResults(valueRegs);
-
- done.append(stubJit.jump());
- returnUndefined.link(&stubJit);
-
- if (kind == CallGetter)
- stubJit.moveTrustedValue(jsUndefined(), valueRegs);
-
- done.link(&stubJit);
- } else {
- // getter: EncodedJSValue (*GetValueFunc)(ExecState*, JSObject* slotBase, EncodedJSValue thisValue, PropertyName);
- // setter: void (*PutValueFunc)(ExecState*, JSObject* base, EncodedJSValue thisObject, EncodedJSValue value);
+ operationCall = stubJit.call();
#if USE(JSVALUE64)
- if (kind == CallCustomGetter)
- stubJit.setupArgumentsWithExecState(baseForAccessGPR, baseForGetGPR, MacroAssembler::TrustedImmPtr(propertyName.impl()));
- else
- stubJit.setupArgumentsWithExecState(baseForAccessGPR, baseForGetGPR, valueRegs.gpr());
+ stubJit.move(GPRInfo::returnValueGPR, resultGPR);
#else
- if (kind == CallCustomGetter)
- stubJit.setupArgumentsWithExecState(baseForAccessGPR, baseForGetGPR, MacroAssembler::TrustedImm32(JSValue::CellTag), MacroAssembler::TrustedImmPtr(propertyName.impl()));
- else
- stubJit.setupArgumentsWithExecState(baseForAccessGPR, baseForGetGPR, MacroAssembler::TrustedImm32(JSValue::CellTag), valueRegs.payloadGPR(), valueRegs.tagGPR());
+ stubJit.setupResults(resultGPR, resultTagGPR);
#endif
- stubJit.storePtr(GPRInfo::callFrameRegister, &vm->topCallFrame);
+ MacroAssembler::Jump noException = stubJit.emitExceptionCheck(CCallHelpers::InvertedExceptionCheck);
- operationCall = stubJit.call();
- if (kind == CallCustomGetter)
- stubJit.setupResults(valueRegs);
- MacroAssembler::Jump noException = stubJit.emitExceptionCheck(CCallHelpers::InvertedExceptionCheck);
-
- stubJit.setupArguments(CCallHelpers::TrustedImmPtr(vm), GPRInfo::callFrameRegister);
- handlerCall = stubJit.call();
- stubJit.jumpToExceptionHandler();
-
- noException.link(&stubJit);
- }
+ stubJit.setupArgumentsExecState();
+ handlerCall = stubJit.call();
+ stubJit.jumpToExceptionHandler();
+
+ noException.link(&stubJit);
}
emitRestoreScratch(stubJit, needToRestoreScratch, scratchGPR, success, fail, failureCases);
- LinkBuffer patchBuffer(*vm, stubJit, exec->codeBlock(), JITCompilationCanFail);
- if (patchBuffer.didFailToAllocate())
- return false;
+ LinkBuffer patchBuffer(*vm, &stubJit, exec->codeBlock());
linkRestoreScratch(patchBuffer, needToRestoreScratch, success, fail, failureCases, successLabel, slowCaseLabel);
- if (kind == CallCustomGetter || kind == CallCustomSetter) {
- patchBuffer.link(operationCall, custom);
+ if (isAccessor) {
+ patchBuffer.link(operationCall, operationFunction);
patchBuffer.link(handlerCall, lookupExceptionHandler);
- } else if (kind == CallGetter || kind == CallSetter) {
- callLinkInfo->setCallLocations(patchBuffer.locationOfNearCall(slowPathCall),
- patchBuffer.locationOf(addressOfLinkFunctionCheck),
- patchBuffer.locationOfNearCall(fastPathCall));
-
- patchBuffer.link(
- slowPathCall, CodeLocationLabel(vm->getCTIStub(linkCallThunkGenerator).code()));
- }
-
- MacroAssemblerCodeRef code = FINALIZE_CODE_FOR(
- exec->codeBlock(), patchBuffer,
- ("%s access stub for %s, return point %p",
- toString(kind), toCString(*exec->codeBlock()).data(),
- successLabel.executableAddress()));
-
- if (kind == CallGetter || kind == CallSetter)
- stubRoutine = adoptRef(new AccessorCallJITStubRoutine(code, *vm, WTF::move(callLinkInfo)));
- else
- stubRoutine = createJITStubRoutine(code, *vm, codeBlock->ownerExecutable(), true);
-
- return true;
-}
-
-enum InlineCacheAction {
- GiveUpOnCache,
- RetryCacheLater,
- AttemptToCache
-};
-
-static InlineCacheAction actionForCell(VM& vm, JSCell* cell)
-{
- Structure* structure = cell->structure(vm);
-
- TypeInfo typeInfo = structure->typeInfo();
- if (typeInfo.prohibitsPropertyCaching())
- return GiveUpOnCache;
-
- if (structure->isUncacheableDictionary()) {
- if (structure->hasBeenFlattenedBefore())
- return GiveUpOnCache;
- // Flattening could have changed the offset, so return early for another try.
- asObject(cell)->flattenDictionaryObject(vm);
- return RetryCacheLater;
}
-
- if (!structure->propertyAccessesAreCacheable())
- return GiveUpOnCache;
-
- return AttemptToCache;
+ stubRoutine = FINALIZE_CODE_FOR_DFG_STUB(
+ patchBuffer,
+ ("DFG prototype chain access stub for %s, return point %p",
+ toCString(*exec->codeBlock()).data(), successLabel.executableAddress()));
+ return ProtoChainGenerationSucceeded;
}
-static InlineCacheAction tryCacheGetByID(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot, StructureStubInfo& stubInfo)
+static bool tryCacheGetByID(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot, StructureStubInfo& stubInfo)
{
- if (Options::forceICFailure())
- return GiveUpOnCache;
-
// FIXME: Write a test that proves we need to check for recursion here just
// like the interpreter does, then add a check for recursion.
CodeBlock* codeBlock = exec->codeBlock();
VM* vm = &exec->vm();
-
- if ((isJSArray(baseValue) || isJSString(baseValue)) && propertyName == exec->propertyNames().length) {
+
+ if (isJSArray(baseValue) && propertyName == exec->propertyNames().length) {
GPRReg baseGPR = static_cast<GPRReg>(stubInfo.patch.baseGPR);
#if USE(JSVALUE32_64)
GPRReg resultTagGPR = static_cast<GPRReg>(stubInfo.patch.valueTagGPR);
#endif
GPRReg resultGPR = static_cast<GPRReg>(stubInfo.patch.valueGPR);
-
+ GPRReg scratchGPR = TempRegisterSet(stubInfo.patch.usedRegisters).getFreeGPR();
+ bool needToRestoreScratch = false;
+
MacroAssembler stubJit;
-
- if (isJSArray(baseValue)) {
- GPRReg scratchGPR = TempRegisterSet(stubInfo.patch.usedRegisters).getFreeGPR();
- bool needToRestoreScratch = false;
-
- if (scratchGPR == InvalidGPRReg) {
+
+ if (scratchGPR == InvalidGPRReg) {
#if USE(JSVALUE64)
- scratchGPR = AssemblyHelpers::selectScratchGPR(baseGPR, resultGPR);
+ scratchGPR = AssemblyHelpers::selectScratchGPR(baseGPR, resultGPR);
#else
- scratchGPR = AssemblyHelpers::selectScratchGPR(baseGPR, resultGPR, resultTagGPR);
-#endif
- stubJit.pushToSave(scratchGPR);
- needToRestoreScratch = true;
- }
-
- MacroAssembler::JumpList failureCases;
-
- stubJit.load8(MacroAssembler::Address(baseGPR, JSCell::indexingTypeOffset()), scratchGPR);
- failureCases.append(stubJit.branchTest32(MacroAssembler::Zero, scratchGPR, MacroAssembler::TrustedImm32(IsArray)));
- failureCases.append(stubJit.branchTest32(MacroAssembler::Zero, scratchGPR, MacroAssembler::TrustedImm32(IndexingShapeMask)));
-
- stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
- stubJit.load32(MacroAssembler::Address(scratchGPR, ArrayStorage::lengthOffset()), scratchGPR);
- failureCases.append(stubJit.branch32(MacroAssembler::LessThan, scratchGPR, MacroAssembler::TrustedImm32(0)));
-
- stubJit.move(scratchGPR, resultGPR);
-#if USE(JSVALUE64)
- stubJit.or64(AssemblyHelpers::TrustedImm64(TagTypeNumber), resultGPR);
-#elif USE(JSVALUE32_64)
- stubJit.move(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag), resultTagGPR);
+ scratchGPR = AssemblyHelpers::selectScratchGPR(baseGPR, resultGPR, resultTagGPR);
#endif
-
- MacroAssembler::Jump success, fail;
-
- emitRestoreScratch(stubJit, needToRestoreScratch, scratchGPR, success, fail, failureCases);
-
- LinkBuffer patchBuffer(*vm, stubJit, codeBlock, JITCompilationCanFail);
- if (patchBuffer.didFailToAllocate())
- return GiveUpOnCache;
-
- linkRestoreScratch(patchBuffer, needToRestoreScratch, stubInfo, success, fail, failureCases);
-
- stubInfo.stubRoutine = FINALIZE_CODE_FOR_STUB(
- exec->codeBlock(), patchBuffer,
- ("GetById array length stub for %s, return point %p",
- toCString(*exec->codeBlock()).data(), stubInfo.callReturnLocation.labelAtOffset(
- stubInfo.patch.deltaCallToDone).executableAddress()));
-
- RepatchBuffer repatchBuffer(codeBlock);
- replaceWithJump(repatchBuffer, stubInfo, stubInfo.stubRoutine->code().code());
- repatchCall(repatchBuffer, stubInfo.callReturnLocation, operationGetById);
-
- return RetryCacheLater;
+ stubJit.pushToSave(scratchGPR);
+ needToRestoreScratch = true;
}
+
+ MacroAssembler::JumpList failureCases;
+
+ stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSCell::structureOffset()), scratchGPR);
+ stubJit.load8(MacroAssembler::Address(scratchGPR, Structure::indexingTypeOffset()), scratchGPR);
+ failureCases.append(stubJit.branchTest32(MacroAssembler::Zero, scratchGPR, MacroAssembler::TrustedImm32(IsArray)));
+ failureCases.append(stubJit.branchTest32(MacroAssembler::Zero, scratchGPR, MacroAssembler::TrustedImm32(IndexingShapeMask)));
+
+ stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
+ stubJit.load32(MacroAssembler::Address(scratchGPR, ArrayStorage::lengthOffset()), scratchGPR);
+ failureCases.append(stubJit.branch32(MacroAssembler::LessThan, scratchGPR, MacroAssembler::TrustedImm32(0)));
- // String.length case
- MacroAssembler::Jump failure = stubJit.branch8(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::typeInfoTypeOffset()), MacroAssembler::TrustedImm32(StringType));
-
- stubJit.load32(MacroAssembler::Address(baseGPR, JSString::offsetOfLength()), resultGPR);
-
+ stubJit.move(scratchGPR, resultGPR);
#if USE(JSVALUE64)
stubJit.or64(AssemblyHelpers::TrustedImm64(TagTypeNumber), resultGPR);
#elif USE(JSVALUE32_64)
- stubJit.move(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag), resultTagGPR);
+ stubJit.move(AssemblyHelpers::TrustedImm32(0xffffffff), resultTagGPR); // JSValue::Int32Tag
#endif
- MacroAssembler::Jump success = stubJit.jump();
-
- LinkBuffer patchBuffer(*vm, stubJit, codeBlock, JITCompilationCanFail);
- if (patchBuffer.didFailToAllocate())
- return GiveUpOnCache;
+ MacroAssembler::Jump success, fail;
- patchBuffer.link(success, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone));
- patchBuffer.link(failure, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase));
-
- stubInfo.stubRoutine = FINALIZE_CODE_FOR_STUB(
- exec->codeBlock(), patchBuffer,
- ("GetById string length stub for %s, return point %p",
+ emitRestoreScratch(stubJit, needToRestoreScratch, scratchGPR, success, fail, failureCases);
+
+ LinkBuffer patchBuffer(*vm, &stubJit, codeBlock);
+
+ linkRestoreScratch(patchBuffer, needToRestoreScratch, stubInfo, success, fail, failureCases);
+
+ stubInfo.stubRoutine = FINALIZE_CODE_FOR_DFG_STUB(
+ patchBuffer,
+ ("DFG GetById array length stub for %s, return point %p",
toCString(*exec->codeBlock()).data(), stubInfo.callReturnLocation.labelAtOffset(
stubInfo.patch.deltaCallToDone).executableAddress()));
-
+
RepatchBuffer repatchBuffer(codeBlock);
replaceWithJump(repatchBuffer, stubInfo, stubInfo.stubRoutine->code().code());
repatchCall(repatchBuffer, stubInfo.callReturnLocation, operationGetById);
-
- return RetryCacheLater;
+
+ return true;
}
+
+ // FIXME: should support length access for String.
// FIXME: Cache property access for immediates.
if (!baseValue.isCell())
- return GiveUpOnCache;
-
- if (!slot.isCacheable() && !slot.isUnset())
- return GiveUpOnCache;
-
+ return false;
JSCell* baseCell = baseValue.asCell();
- Structure* structure = baseCell->structure(*vm);
-
- InlineCacheAction action = actionForCell(*vm, baseCell);
- if (action != AttemptToCache)
- return action;
+ Structure* structure = baseCell->structure();
+ if (!slot.isCacheable())
+ return false;
+ if (!structure->propertyAccessesAreCacheable())
+ return false;
// Optimize self access.
- if (slot.isCacheableValue()
- && slot.slotBase() == baseValue
- && !slot.watchpointSet()
- && MacroAssembler::isCompactPtrAlignedAddressOffset(maxOffsetRelativeToPatchedStorage(slot.cachedOffset()))) {
- structure->startWatchingPropertyForReplacements(*vm, slot.cachedOffset());
+ if (slot.slotBase() == baseValue) {
+ if (!slot.isCacheableValue()
+ || !MacroAssembler::isCompactPtrAlignedAddressOffset(maxOffsetRelativeToPatchedStorage(slot.cachedOffset()))) {
+ repatchCall(codeBlock, stubInfo.callReturnLocation, operationGetByIdBuildList);
+ return true;
+ }
+
repatchByIdSelfAccess(*vm, codeBlock, stubInfo, structure, propertyName, slot.cachedOffset(), operationGetByIdBuildList, true);
stubInfo.initGetByIdSelf(*vm, codeBlock->ownerExecutable(), structure);
- return RetryCacheLater;
+ return true;
+ }
+
+ if (structure->isDictionary())
+ return false;
+
+ if (!stubInfo.patch.registersFlushed) {
+ // We cannot do as much inline caching if the registers were not flushed prior to this GetById. In particular,
+ // non-Value cached properties require planting calls, which requires registers to have been flushed. Thus,
+ // if registers were not flushed, don't do non-Value caching.
+ if (!slot.isCacheableValue())
+ return false;
}
+
+ PropertyOffset offset = slot.cachedOffset();
+ size_t count = normalizePrototypeChainForChainAccess(exec, baseValue, slot.slotBase(), propertyName, offset);
+ if (count == InvalidPrototypeChain)
+ return false;
- repatchCall(codeBlock, stubInfo.callReturnLocation, operationGetByIdBuildList);
- return RetryCacheLater;
+ StructureChain* prototypeChain = structure->prototypeChain(exec);
+ if (generateProtoChainAccessStub(exec, slot, propertyName, stubInfo, prototypeChain, count, offset,
+ structure, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone),
+ stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase), stubInfo.stubRoutine) == ProtoChainGenerationFailed)
+ return false;
+
+ RepatchBuffer repatchBuffer(codeBlock);
+ replaceWithJump(repatchBuffer, stubInfo, stubInfo.stubRoutine->code().code());
+ repatchCall(repatchBuffer, stubInfo.callReturnLocation, operationGetByIdBuildList);
+
+ stubInfo.initGetByIdChain(*vm, codeBlock->ownerExecutable(), structure, prototypeChain, count, slot.isCacheableValue());
+ return true;
}
void repatchGetByID(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot, StructureStubInfo& stubInfo)
{
GCSafeConcurrentJITLocker locker(exec->codeBlock()->m_lock, exec->vm().heap);
- if (tryCacheGetByID(exec, baseValue, propertyName, slot, stubInfo) == GiveUpOnCache)
+ bool cached = tryCacheGetByID(exec, baseValue, propertyName, slot, stubInfo);
+ if (!cached)
repatchCall(exec->codeBlock(), stubInfo.callReturnLocation, operationGetById);
}
+static bool getPolymorphicStructureList(
+ VM* vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo,
+ PolymorphicAccessStructureList*& polymorphicStructureList, int& listIndex,
+ CodeLocationLabel& slowCase)
+{
+ slowCase = stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase);
+
+ if (stubInfo.accessType == access_unset) {
+ RELEASE_ASSERT(!stubInfo.stubRoutine);
+ polymorphicStructureList = new PolymorphicAccessStructureList();
+ stubInfo.initGetByIdSelfList(polymorphicStructureList, 0, false);
+ listIndex = 0;
+ } else if (stubInfo.accessType == access_get_by_id_self) {
+ RELEASE_ASSERT(!stubInfo.stubRoutine);
+ polymorphicStructureList = new PolymorphicAccessStructureList(*vm, codeBlock->ownerExecutable(), JITStubRoutine::createSelfManagedRoutine(slowCase), stubInfo.u.getByIdSelf.baseObjectStructure.get(), true);
+ stubInfo.initGetByIdSelfList(polymorphicStructureList, 1, true);
+ listIndex = 1;
+ } else if (stubInfo.accessType == access_get_by_id_chain) {
+ RELEASE_ASSERT(!!stubInfo.stubRoutine);
+ slowCase = CodeLocationLabel(stubInfo.stubRoutine->code().code());
+ polymorphicStructureList = new PolymorphicAccessStructureList(*vm, codeBlock->ownerExecutable(), stubInfo.stubRoutine, stubInfo.u.getByIdChain.baseObjectStructure.get(), stubInfo.u.getByIdChain.chain.get(), true);
+ stubInfo.stubRoutine.clear();
+ stubInfo.initGetByIdSelfList(polymorphicStructureList, 1, false);
+ listIndex = 1;
+ } else {
+ RELEASE_ASSERT(stubInfo.accessType == access_get_by_id_self_list);
+ polymorphicStructureList = stubInfo.u.getByIdSelfList.structureList;
+ listIndex = stubInfo.u.getByIdSelfList.listSize;
+ slowCase = CodeLocationLabel(polymorphicStructureList->list[listIndex - 1].stubRoutine->code().code());
+ }
+
+ if (listIndex == POLYMORPHIC_LIST_CACHE_SIZE)
+ return false;
+
+ RELEASE_ASSERT(listIndex < POLYMORPHIC_LIST_CACHE_SIZE);
+ return true;
+}
+
static void patchJumpToGetByIdStub(CodeBlock* codeBlock, StructureStubInfo& stubInfo, JITStubRoutine* stubRoutine)
{
- RELEASE_ASSERT(stubInfo.accessType == access_get_by_id_list);
+ RELEASE_ASSERT(stubInfo.accessType == access_get_by_id_self_list);
RepatchBuffer repatchBuffer(codeBlock);
- if (stubInfo.u.getByIdList.list->didSelfPatching()) {
+ if (stubInfo.u.getByIdSelfList.didSelfPatching) {
repatchBuffer.relink(
stubInfo.callReturnLocation.jumpAtOffset(
stubInfo.patch.deltaCallToJump),
@@ -773,95 +536,205 @@ static void patchJumpToGetByIdStub(CodeBlock* codeBlock, StructureStubInfo& stub
replaceWithJump(repatchBuffer, stubInfo, stubRoutine->code().code());
}
-static InlineCacheAction tryBuildGetByIDList(ExecState* exec, JSValue baseValue, const Identifier& ident, const PropertySlot& slot, StructureStubInfo& stubInfo)
+static bool tryBuildGetByIDList(ExecState* exec, JSValue baseValue, const Identifier& ident, const PropertySlot& slot, StructureStubInfo& stubInfo)
{
if (!baseValue.isCell()
- || (!slot.isCacheable() && !slot.isUnset()))
- return GiveUpOnCache;
-
- JSCell* baseCell = baseValue.asCell();
- bool loadTargetFromProxy = false;
- if (baseCell->type() == PureForwardingProxyType) {
- baseValue = jsCast<JSProxy*>(baseCell)->target();
- baseCell = baseValue.asCell();
- loadTargetFromProxy = true;
- }
+ || !slot.isCacheable()
+ || !baseValue.asCell()->structure()->propertyAccessesAreCacheable())
+ return false;
- VM* vm = &exec->vm();
CodeBlock* codeBlock = exec->codeBlock();
+ VM* vm = &exec->vm();
+ JSCell* baseCell = baseValue.asCell();
+ Structure* structure = baseCell->structure();
+
+ if (slot.slotBase() == baseValue) {
+ if (!stubInfo.patch.registersFlushed) {
+ // We cannot do as much inline caching if the registers were not flushed prior to this GetById. In particular,
+ // non-Value cached properties require planting calls, which requires registers to have been flushed. Thus,
+ // if registers were not flushed, don't do non-Value caching.
+ if (!slot.isCacheableValue())
+ return false;
+ }
+
+ PolymorphicAccessStructureList* polymorphicStructureList;
+ int listIndex;
+ CodeLocationLabel slowCase;
- InlineCacheAction action = actionForCell(*vm, baseCell);
- if (action != AttemptToCache)
- return action;
-
- Structure* structure = baseCell->structure(*vm);
- TypeInfo typeInfo = structure->typeInfo();
+ if (!getPolymorphicStructureList(vm, codeBlock, stubInfo, polymorphicStructureList, listIndex, slowCase))
+ return false;
+
+ stubInfo.u.getByIdSelfList.listSize++;
+
+ GPRReg callFrameRegister = static_cast<GPRReg>(stubInfo.patch.callFrameRegister);
+ GPRReg baseGPR = static_cast<GPRReg>(stubInfo.patch.baseGPR);
+#if USE(JSVALUE32_64)
+ GPRReg resultTagGPR = static_cast<GPRReg>(stubInfo.patch.valueTagGPR);
+#endif
+ GPRReg resultGPR = static_cast<GPRReg>(stubInfo.patch.valueGPR);
+ GPRReg scratchGPR = TempRegisterSet(stubInfo.patch.usedRegisters).getFreeGPR();
+
+ CCallHelpers stubJit(vm, codeBlock);
+
+ MacroAssembler::Jump wrongStruct = stubJit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::structureOffset()), MacroAssembler::TrustedImmPtr(structure));
+
+ // The strategy we use for stubs is as follows:
+ // 1) Call DFG helper that calls the getter.
+ // 2) Check if there was an exception, and if there was, call yet another
+ // helper.
+
+ bool isDirect = false;
+ MacroAssembler::Call operationCall;
+ MacroAssembler::Call handlerCall;
+ FunctionPtr operationFunction;
+ MacroAssembler::Jump success;
+
+ if (slot.isCacheableGetter() || slot.isCacheableCustom()) {
+ if (slot.isCacheableGetter()) {
+ ASSERT(scratchGPR != InvalidGPRReg);
+ ASSERT(baseGPR != scratchGPR);
+ if (isInlineOffset(slot.cachedOffset())) {
+#if USE(JSVALUE64)
+ stubJit.load64(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset())), scratchGPR);
+#else
+ stubJit.load32(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset())), scratchGPR);
+#endif
+ } else {
+ stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
+#if USE(JSVALUE64)
+ stubJit.load64(MacroAssembler::Address(scratchGPR, offsetRelativeToBase(slot.cachedOffset())), scratchGPR);
+#else
+ stubJit.load32(MacroAssembler::Address(scratchGPR, offsetRelativeToBase(slot.cachedOffset())), scratchGPR);
+#endif
+ }
+ stubJit.setupArguments(callFrameRegister, baseGPR, scratchGPR);
+ operationFunction = operationCallGetter;
+ } else {
+ stubJit.setupArguments(
+ callFrameRegister, baseGPR,
+ MacroAssembler::TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()),
+ MacroAssembler::TrustedImmPtr(ident.impl()));
+ operationFunction = operationCallCustomGetter;
+ }
+
+ // Need to make sure that whenever this call is made in the future, we remember the
+ // place that we made it from. It just so happens to be the place that we are at
+ // right now!
+ stubJit.store32(
+ MacroAssembler::TrustedImm32(exec->locationAsRawBits()),
+ CCallHelpers::tagFor(static_cast<VirtualRegister>(JSStack::ArgumentCount)));
+
+ operationCall = stubJit.call();
+#if USE(JSVALUE64)
+ stubJit.move(GPRInfo::returnValueGPR, resultGPR);
+#else
+ stubJit.setupResults(resultGPR, resultTagGPR);
+#endif
+ success = stubJit.emitExceptionCheck(CCallHelpers::InvertedExceptionCheck);
+
+ stubJit.setupArgumentsExecState();
+ handlerCall = stubJit.call();
+ stubJit.jumpToExceptionHandler();
+ } else {
+ if (isInlineOffset(slot.cachedOffset())) {
+#if USE(JSVALUE64)
+ stubJit.load64(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset())), resultGPR);
+#else
+ if (baseGPR == resultTagGPR) {
+ stubJit.load32(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultGPR);
+ stubJit.load32(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
+ } else {
+ stubJit.load32(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
+ stubJit.load32(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultGPR);
+ }
+#endif
+ } else {
+ stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), resultGPR);
+#if USE(JSVALUE64)
+ stubJit.load64(MacroAssembler::Address(resultGPR, offsetRelativeToBase(slot.cachedOffset())), resultGPR);
+#else
+ stubJit.load32(MacroAssembler::Address(resultGPR, offsetRelativeToBase(slot.cachedOffset()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
+ stubJit.load32(MacroAssembler::Address(resultGPR, offsetRelativeToBase(slot.cachedOffset()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultGPR);
+#endif
+ }
+ success = stubJit.jump();
+ isDirect = true;
+ }
- if (stubInfo.patch.spillMode == NeedToSpill) {
+ LinkBuffer patchBuffer(*vm, &stubJit, codeBlock);
+
+ patchBuffer.link(wrongStruct, slowCase);
+ patchBuffer.link(success, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone));
+ if (!isDirect) {
+ patchBuffer.link(operationCall, operationFunction);
+ patchBuffer.link(handlerCall, lookupExceptionHandler);
+ }
+
+ RefPtr<JITStubRoutine> stubRoutine =
+ createJITStubRoutine(
+ FINALIZE_DFG_CODE(
+ patchBuffer,
+ ("DFG GetById polymorphic list access for %s, return point %p",
+ toCString(*exec->codeBlock()).data(), stubInfo.callReturnLocation.labelAtOffset(
+ stubInfo.patch.deltaCallToDone).executableAddress())),
+ *vm,
+ codeBlock->ownerExecutable(),
+ slot.isCacheableGetter() || slot.isCacheableCustom());
+
+ polymorphicStructureList->list[listIndex].set(*vm, codeBlock->ownerExecutable(), stubRoutine, structure, isDirect);
+
+ patchJumpToGetByIdStub(codeBlock, stubInfo, stubRoutine.get());
+ return listIndex < (POLYMORPHIC_LIST_CACHE_SIZE - 1);
+ }
+
+ if (baseValue.asCell()->structure()->typeInfo().prohibitsPropertyCaching()
+ || baseValue.asCell()->structure()->isDictionary())
+ return false;
+
+ if (!stubInfo.patch.registersFlushed) {
// We cannot do as much inline caching if the registers were not flushed prior to this GetById. In particular,
// non-Value cached properties require planting calls, which requires registers to have been flushed. Thus,
// if registers were not flushed, don't do non-Value caching.
- if (!slot.isCacheableValue() && !slot.isUnset())
- return GiveUpOnCache;
+ if (!slot.isCacheableValue())
+ return false;
}
-
- PropertyOffset offset = slot.isUnset() ? invalidOffset : slot.cachedOffset();
- ObjectPropertyConditionSet conditionSet;
- if (slot.isUnset() || slot.slotBase() != baseValue) {
- if (typeInfo.prohibitsPropertyCaching() || structure->isDictionary())
- return GiveUpOnCache;
-
- if (slot.isUnset())
- conditionSet = generateConditionsForPropertyMiss(*vm, codeBlock->ownerExecutable(), exec, structure, ident.impl());
- else
- conditionSet = generateConditionsForPrototypePropertyHit(*vm, codeBlock->ownerExecutable(), exec, structure, slot.slotBase(), ident.impl());
- if (!conditionSet.isValid())
- return GiveUpOnCache;
+ PropertyOffset offset = slot.cachedOffset();
+ size_t count = normalizePrototypeChainForChainAccess(exec, baseValue, slot.slotBase(), ident, offset);
+ if (count == InvalidPrototypeChain)
+ return false;
- offset = slot.isUnset() ? invalidOffset : conditionSet.slotBaseCondition().offset();
- }
+ StructureChain* prototypeChain = structure->prototypeChain(exec);
- PolymorphicGetByIdList* list = PolymorphicGetByIdList::from(stubInfo);
- if (list->isFull()) {
- // We need this extra check because of recursion.
- return GiveUpOnCache;
- }
+ PolymorphicAccessStructureList* polymorphicStructureList;
+ int listIndex;
+ CodeLocationLabel slowCase;
+ if (!getPolymorphicStructureList(vm, codeBlock, stubInfo, polymorphicStructureList, listIndex, slowCase))
+ return false;
+
+ stubInfo.u.getByIdProtoList.listSize++;
RefPtr<JITStubRoutine> stubRoutine;
- bool result = generateByIdStub(
- exec, kindFor(slot), ident, customFor(slot), stubInfo, conditionSet, slot.slotBase(), offset,
- structure, loadTargetFromProxy, slot.watchpointSet(),
+
+ if (generateProtoChainAccessStub(exec, slot, ident, stubInfo, prototypeChain, count, offset, structure,
stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone),
- CodeLocationLabel(list->currentSlowPathTarget(stubInfo)), stubRoutine);
- if (!result)
- return GiveUpOnCache;
-
- GetByIdAccess::AccessType accessType;
- if (slot.isCacheableValue())
- accessType = slot.watchpointSet() ? GetByIdAccess::WatchedStub : GetByIdAccess::SimpleStub;
- else if (slot.isUnset())
- accessType = GetByIdAccess::SimpleMiss;
- else if (slot.isCacheableGetter())
- accessType = GetByIdAccess::Getter;
- else
- accessType = GetByIdAccess::CustomGetter;
+ slowCase, stubRoutine) == ProtoChainGenerationFailed)
+ return false;
- list->addAccess(GetByIdAccess(
- *vm, codeBlock->ownerExecutable(), accessType, stubRoutine, structure,
- conditionSet));
+ polymorphicStructureList->list[listIndex].set(*vm, codeBlock->ownerExecutable(), stubRoutine, structure, slot.isCacheableValue());
patchJumpToGetByIdStub(codeBlock, stubInfo, stubRoutine.get());
- return list->isFull() ? GiveUpOnCache : RetryCacheLater;
+ return listIndex < (POLYMORPHIC_LIST_CACHE_SIZE - 1);
}
void buildGetByIDList(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot, StructureStubInfo& stubInfo)
{
GCSafeConcurrentJITLocker locker(exec->codeBlock()->m_lock, exec->vm().heap);
- if (tryBuildGetByIDList(exec, baseValue, propertyName, slot, stubInfo) == GiveUpOnCache)
+ bool dontChangeCall = tryBuildGetByIDList(exec, baseValue, propertyName, slot, stubInfo);
+ if (!dontChangeCall)
repatchCall(exec->codeBlock(), stubInfo.callReturnLocation, operationGetById);
}
@@ -889,16 +762,79 @@ static V_JITOperation_ESsiJJI appropriateListBuildingPutByIdFunction(const PutPr
return operationPutByIdNonStrictBuildList;
}
-static bool emitPutReplaceStub(
+#if ENABLE(GGC)
+static MacroAssembler::Call storeToWriteBarrierBuffer(CCallHelpers& jit, GPRReg cell, GPRReg scratch1, GPRReg scratch2, GPRReg callFrameRegister, ScratchRegisterAllocator& allocator)
+{
+ ASSERT(scratch1 != scratch2);
+ WriteBarrierBuffer* writeBarrierBuffer = &jit.vm()->heap.writeBarrierBuffer();
+ jit.move(MacroAssembler::TrustedImmPtr(writeBarrierBuffer), scratch1);
+ jit.load32(MacroAssembler::Address(scratch1, WriteBarrierBuffer::currentIndexOffset()), scratch2);
+ MacroAssembler::Jump needToFlush = jit.branch32(MacroAssembler::AboveOrEqual, scratch2, MacroAssembler::Address(scratch1, WriteBarrierBuffer::capacityOffset()));
+
+ jit.add32(MacroAssembler::TrustedImm32(1), scratch2);
+ jit.store32(scratch2, MacroAssembler::Address(scratch1, WriteBarrierBuffer::currentIndexOffset()));
+
+ jit.loadPtr(MacroAssembler::Address(scratch1, WriteBarrierBuffer::bufferOffset()), scratch1);
+ // We use an offset of -sizeof(void*) because we already added 1 to scratch2.
+ jit.storePtr(cell, MacroAssembler::BaseIndex(scratch1, scratch2, MacroAssembler::ScalePtr, static_cast<int32_t>(-sizeof(void*))));
+
+ MacroAssembler::Jump done = jit.jump();
+ needToFlush.link(&jit);
+
+ ScratchBuffer* scratchBuffer = jit.vm()->scratchBufferForSize(allocator.desiredScratchBufferSize());
+ allocator.preserveUsedRegistersToScratchBuffer(jit, scratchBuffer, scratch1);
+
+ unsigned bytesFromBase = allocator.numberOfReusedRegisters() * sizeof(void*);
+ unsigned bytesToSubtract = 0;
+#if CPU(X86)
+ bytesToSubtract += 2 * sizeof(void*);
+ bytesFromBase += bytesToSubtract;
+#endif
+ unsigned currentAlignment = bytesFromBase % stackAlignmentBytes();
+ bytesToSubtract += currentAlignment;
+
+ if (bytesToSubtract)
+ jit.subPtr(MacroAssembler::TrustedImm32(bytesToSubtract), MacroAssembler::stackPointerRegister);
+
+ jit.setupArguments(callFrameRegister, cell);
+ MacroAssembler::Call call = jit.call();
+
+ if (bytesToSubtract)
+ jit.addPtr(MacroAssembler::TrustedImm32(bytesToSubtract), MacroAssembler::stackPointerRegister);
+ allocator.restoreUsedRegistersFromScratchBuffer(jit, scratchBuffer, scratch1);
+
+ done.link(&jit);
+
+ return call;
+}
+
+static MacroAssembler::Call writeBarrier(CCallHelpers& jit, GPRReg owner, GPRReg scratch1, GPRReg scratch2, GPRReg callFrameRegister, ScratchRegisterAllocator& allocator)
+{
+ ASSERT(owner != scratch1);
+ ASSERT(owner != scratch2);
+
+ MacroAssembler::Jump definitelyNotMarked = DFG::SpeculativeJIT::genericWriteBarrier(jit, owner, scratch1, scratch2);
+ MacroAssembler::Call call = storeToWriteBarrierBuffer(jit, owner, scratch1, scratch2, callFrameRegister, allocator);
+ definitelyNotMarked.link(&jit);
+ return call;
+}
+#endif // ENABLE(GGC)
+
+static void emitPutReplaceStub(
ExecState* exec,
+ JSValue,
const Identifier&,
const PutPropertySlot& slot,
StructureStubInfo& stubInfo,
+ PutKind,
Structure* structure,
CodeLocationLabel failureLabel,
RefPtr<JITStubRoutine>& stubRoutine)
{
VM* vm = &exec->vm();
+#if ENABLE(GGC)
+ GPRReg callFrameRegister = static_cast<GPRReg>(stubInfo.patch.callFrameRegister);
+#endif
GPRReg baseGPR = static_cast<GPRReg>(stubInfo.patch.baseGPR);
#if USE(JSVALUE32_64)
GPRReg valueTagGPR = static_cast<GPRReg>(stubInfo.patch.valueTagGPR);
@@ -913,15 +849,18 @@ static bool emitPutReplaceStub(
allocator.lock(valueGPR);
GPRReg scratchGPR1 = allocator.allocateScratchGPR();
+#if ENABLE(GGC)
+ GPRReg scratchGPR2 = allocator.allocateScratchGPR();
+#endif
CCallHelpers stubJit(vm, exec->codeBlock());
allocator.preserveReusedRegistersByPushing(stubJit);
- MacroAssembler::Jump badStructure = branchStructure(stubJit,
+ MacroAssembler::Jump badStructure = stubJit.branchPtr(
MacroAssembler::NotEqual,
- MacroAssembler::Address(baseGPR, JSCell::structureIDOffset()),
- structure);
+ MacroAssembler::Address(baseGPR, JSCell::structureOffset()),
+ MacroAssembler::TrustedImmPtr(structure));
#if USE(JSVALUE64)
if (isInlineOffset(slot.cachedOffset()))
@@ -941,6 +880,10 @@ static bool emitPutReplaceStub(
}
#endif
+#if ENABLE(GGC)
+ MacroAssembler::Call writeBarrierOperation = writeBarrier(stubJit, baseGPR, scratchGPR1, scratchGPR2, callFrameRegister, allocator);
+#endif
+
MacroAssembler::Jump success;
MacroAssembler::Jump failure;
@@ -956,63 +899,36 @@ static bool emitPutReplaceStub(
failure = badStructure;
}
- LinkBuffer patchBuffer(*vm, stubJit, exec->codeBlock(), JITCompilationCanFail);
- if (patchBuffer.didFailToAllocate())
- return false;
-
+ LinkBuffer patchBuffer(*vm, &stubJit, exec->codeBlock());
+#if ENABLE(GGC)
+ patchBuffer.link(writeBarrierOperation, operationFlushWriteBarrierBuffer);
+#endif
patchBuffer.link(success, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone));
patchBuffer.link(failure, failureLabel);
- stubRoutine = FINALIZE_CODE_FOR_STUB(
- exec->codeBlock(), patchBuffer,
- ("PutById replace stub for %s, return point %p",
+ stubRoutine = FINALIZE_CODE_FOR_DFG_STUB(
+ patchBuffer,
+ ("DFG PutById replace stub for %s, return point %p",
toCString(*exec->codeBlock()).data(), stubInfo.callReturnLocation.labelAtOffset(
stubInfo.patch.deltaCallToDone).executableAddress()));
-
- return true;
}
-static bool emitPutTransitionStub(
- ExecState* exec, VM* vm, Structure*& structure, const Identifier& ident,
- const PutPropertySlot& slot, StructureStubInfo& stubInfo, PutKind putKind,
- Structure*& oldStructure, ObjectPropertyConditionSet& conditionSet)
+static void emitPutTransitionStub(
+ ExecState* exec,
+ JSValue,
+ const Identifier&,
+ const PutPropertySlot& slot,
+ StructureStubInfo& stubInfo,
+ PutKind putKind,
+ Structure* structure,
+ Structure* oldStructure,
+ StructureChain* prototypeChain,
+ CodeLocationLabel failureLabel,
+ RefPtr<JITStubRoutine>& stubRoutine)
{
- PropertyName pname(ident);
- oldStructure = structure;
- if (!oldStructure->isObject() || oldStructure->isDictionary() || parseIndex(pname))
- return false;
-
- PropertyOffset propertyOffset;
- structure = Structure::addPropertyTransitionToExistingStructureConcurrently(oldStructure, ident.impl(), 0, propertyOffset);
-
- if (!structure || !structure->isObject() || structure->isDictionary() || !structure->propertyAccessesAreCacheable())
- return false;
-
- // Skip optimizing the case where we need a realloc, if we don't have
- // enough registers to make it happen.
- if (GPRInfo::numberOfRegisters < 6
- && oldStructure->outOfLineCapacity() != structure->outOfLineCapacity()
- && oldStructure->outOfLineCapacity()) {
- return false;
- }
-
- // Skip optimizing the case where we need realloc, and the structure has
- // indexing storage.
- // FIXME: We shouldn't skip this! Implement it!
- // https://bugs.webkit.org/show_bug.cgi?id=130914
- if (oldStructure->couldHaveIndexingHeader())
- return false;
-
- if (putKind == NotDirect) {
- conditionSet = generateConditionsForPropertySetterMiss(
- *vm, exec->codeBlock()->ownerExecutable(), exec, structure, ident.impl());
- if (!conditionSet.isValid())
- return false;
- }
-
- CodeLocationLabel failureLabel = stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase);
- RefPtr<JITStubRoutine>& stubRoutine = stubInfo.stubRoutine;
+ VM* vm = &exec->vm();
+ GPRReg callFrameRegister = static_cast<GPRReg>(stubInfo.patch.callFrameRegister);
GPRReg baseGPR = static_cast<GPRReg>(stubInfo.patch.baseGPR);
#if USE(JSVALUE32_64)
GPRReg valueTagGPR = static_cast<GPRReg>(stubInfo.patch.valueTagGPR);
@@ -1059,13 +975,19 @@ static bool emitPutTransitionStub(
ASSERT(oldStructure->transitionWatchpointSetHasBeenInvalidated());
- failureCases.append(branchStructure(stubJit,
- MacroAssembler::NotEqual,
- MacroAssembler::Address(baseGPR, JSCell::structureIDOffset()),
- oldStructure));
+ failureCases.append(stubJit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::structureOffset()), MacroAssembler::TrustedImmPtr(oldStructure)));
- checkObjectPropertyConditions(
- conditionSet, exec->codeBlock(), stubInfo, stubJit, failureCases, scratchGPR1);
+ addStructureTransitionCheck(
+ oldStructure->storedPrototype(), exec->codeBlock(), stubInfo, stubJit, failureCases,
+ scratchGPR1);
+
+ if (putKind == NotDirect) {
+ for (WriteBarrier<Structure>* it = prototypeChain->head(); *it; ++it) {
+ addStructureTransitionCheck(
+ (*it)->storedPrototype(), exec->codeBlock(), stubInfo, stubJit, failureCases,
+ scratchGPR1);
+ }
+ }
MacroAssembler::JumpList slowPath;
@@ -1104,15 +1026,7 @@ static bool emitPutTransitionStub(
scratchGPR1HasStorage = true;
}
- ASSERT(oldStructure->typeInfo().type() == structure->typeInfo().type());
- ASSERT(oldStructure->typeInfo().inlineTypeFlags() == structure->typeInfo().inlineTypeFlags());
- ASSERT(oldStructure->indexingType() == structure->indexingType());
-#if USE(JSVALUE64)
- uint32_t val = structure->id();
-#else
- uint32_t val = reinterpret_cast<uint32_t>(structure->id());
-#endif
- stubJit.store32(MacroAssembler::TrustedImm32(val), MacroAssembler::Address(baseGPR, JSCell::structureIDOffset()));
+ stubJit.storePtr(MacroAssembler::TrustedImmPtr(structure), MacroAssembler::Address(baseGPR, JSCell::structureOffset()));
#if USE(JSVALUE64)
if (isInlineOffset(slot.cachedOffset()))
stubJit.store64(valueGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue)));
@@ -1133,38 +1047,10 @@ static bool emitPutTransitionStub(
}
#endif
- ScratchBuffer* scratchBuffer = nullptr;
-
#if ENABLE(GGC)
- MacroAssembler::Call callFlushWriteBarrierBuffer;
- MacroAssembler::Jump ownerIsRememberedOrInEden = stubJit.jumpIfIsRememberedOrInEden(baseGPR);
- {
- WriteBarrierBuffer& writeBarrierBuffer = stubJit.vm()->heap.writeBarrierBuffer();
- stubJit.load32(writeBarrierBuffer.currentIndexAddress(), scratchGPR2);
- MacroAssembler::Jump needToFlush =
- stubJit.branch32(MacroAssembler::AboveOrEqual, scratchGPR2, MacroAssembler::TrustedImm32(writeBarrierBuffer.capacity()));
-
- stubJit.add32(MacroAssembler::TrustedImm32(1), scratchGPR2);
- stubJit.store32(scratchGPR2, writeBarrierBuffer.currentIndexAddress());
-
- stubJit.move(MacroAssembler::TrustedImmPtr(writeBarrierBuffer.buffer()), scratchGPR1);
- // We use an offset of -sizeof(void*) because we already added 1 to scratchGPR2.
- stubJit.storePtr(baseGPR, MacroAssembler::BaseIndex(scratchGPR1, scratchGPR2, MacroAssembler::ScalePtr, static_cast<int32_t>(-sizeof(void*))));
-
- MacroAssembler::Jump doneWithBarrier = stubJit.jump();
- needToFlush.link(&stubJit);
-
- scratchBuffer = vm->scratchBufferForSize(allocator.desiredScratchBufferSizeForCall());
- allocator.preserveUsedRegistersToScratchBufferForCall(stubJit, scratchBuffer, scratchGPR2);
- stubJit.setupArgumentsWithExecState(baseGPR);
- callFlushWriteBarrierBuffer = stubJit.call();
- allocator.restoreUsedRegistersFromScratchBufferForCall(stubJit, scratchBuffer, scratchGPR2);
-
- doneWithBarrier.link(&stubJit);
- }
- ownerIsRememberedOrInEden.link(&stubJit);
+ MacroAssembler::Call writeBarrierOperation = writeBarrier(stubJit, baseGPR, scratchGPR1, scratchGPR2, callFrameRegister, allocator);
#endif
-
+
MacroAssembler::Jump success;
MacroAssembler::Jump failure;
@@ -1185,31 +1071,27 @@ static bool emitPutTransitionStub(
slowPath.link(&stubJit);
allocator.restoreReusedRegistersByPopping(stubJit);
- if (!scratchBuffer)
- scratchBuffer = vm->scratchBufferForSize(allocator.desiredScratchBufferSizeForCall());
- allocator.preserveUsedRegistersToScratchBufferForCall(stubJit, scratchBuffer, scratchGPR1);
+ ScratchBuffer* scratchBuffer = vm->scratchBufferForSize(allocator.desiredScratchBufferSize());
+ allocator.preserveUsedRegistersToScratchBuffer(stubJit, scratchBuffer, scratchGPR1);
#if USE(JSVALUE64)
- stubJit.setupArgumentsWithExecState(baseGPR, MacroAssembler::TrustedImmPtr(structure), MacroAssembler::TrustedImm32(slot.cachedOffset()), valueGPR);
+ stubJit.setupArguments(callFrameRegister, baseGPR, MacroAssembler::TrustedImmPtr(structure), MacroAssembler::TrustedImm32(slot.cachedOffset()), valueGPR);
#else
- stubJit.setupArgumentsWithExecState(baseGPR, MacroAssembler::TrustedImmPtr(structure), MacroAssembler::TrustedImm32(slot.cachedOffset()), valueGPR, valueTagGPR);
+ stubJit.setupArguments(callFrameRegister, baseGPR, MacroAssembler::TrustedImmPtr(structure), MacroAssembler::TrustedImm32(slot.cachedOffset()), valueGPR, valueTagGPR);
#endif
operationCall = stubJit.call();
- allocator.restoreUsedRegistersFromScratchBufferForCall(stubJit, scratchBuffer, scratchGPR1);
+ allocator.restoreUsedRegistersFromScratchBuffer(stubJit, scratchBuffer, scratchGPR1);
successInSlowPath = stubJit.jump();
}
- LinkBuffer patchBuffer(*vm, stubJit, exec->codeBlock(), JITCompilationCanFail);
- if (patchBuffer.didFailToAllocate())
- return false;
-
+ LinkBuffer patchBuffer(*vm, &stubJit, exec->codeBlock());
+#if ENABLE(GGC)
+ patchBuffer.link(writeBarrierOperation, operationFlushWriteBarrierBuffer);
+#endif
patchBuffer.link(success, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone));
if (allocator.didReuseRegisters())
patchBuffer.link(failure, failureLabel);
else
patchBuffer.link(failureCases, failureLabel);
-#if ENABLE(GGC)
- patchBuffer.link(callFlushWriteBarrierBuffer, operationFlushWriteBarrierBuffer);
-#endif
if (structure->outOfLineCapacity() != oldStructure->outOfLineCapacity()) {
patchBuffer.link(operationCall, operationReallocateStorageAndFinishPut);
patchBuffer.link(successInSlowPath, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone));
@@ -1217,9 +1099,9 @@ static bool emitPutTransitionStub(
stubRoutine =
createJITStubRoutine(
- FINALIZE_CODE_FOR(
- exec->codeBlock(), patchBuffer,
- ("PutById %stransition stub (%p -> %p) for %s, return point %p",
+ FINALIZE_DFG_CODE(
+ patchBuffer,
+ ("DFG PutById %stransition stub (%p -> %p) for %s, return point %p",
structure->outOfLineCapacity() != oldStructure->outOfLineCapacity() ? "reallocating " : "",
oldStructure, structure,
toCString(*exec->codeBlock()).data(), stubInfo.callReturnLocation.labelAtOffset(
@@ -1228,35 +1110,52 @@ static bool emitPutTransitionStub(
exec->codeBlock()->ownerExecutable(),
structure->outOfLineCapacity() != oldStructure->outOfLineCapacity(),
structure);
-
- return true;
}
-static InlineCacheAction tryCachePutByID(ExecState* exec, JSValue baseValue, Structure* structure, const Identifier& ident, const PutPropertySlot& slot, StructureStubInfo& stubInfo, PutKind putKind)
+static bool tryCachePutByID(ExecState* exec, JSValue baseValue, const Identifier& ident, const PutPropertySlot& slot, StructureStubInfo& stubInfo, PutKind putKind)
{
- if (Options::forceICFailure())
- return GiveUpOnCache;
-
CodeBlock* codeBlock = exec->codeBlock();
VM* vm = &exec->vm();
if (!baseValue.isCell())
- return GiveUpOnCache;
+ return false;
+ JSCell* baseCell = baseValue.asCell();
+ Structure* structure = baseCell->structure();
+ Structure* oldStructure = structure->previousID();
- if (!slot.isCacheablePut() && !slot.isCacheableCustom() && !slot.isCacheableSetter())
- return GiveUpOnCache;
-
+ if (!slot.isCacheable())
+ return false;
if (!structure->propertyAccessesAreCacheable())
- return GiveUpOnCache;
+ return false;
// Optimize self access.
- if (slot.base() == baseValue && slot.isCacheablePut()) {
+ if (slot.base() == baseValue) {
if (slot.type() == PutPropertySlot::NewProperty) {
-
- Structure* oldStructure;
- ObjectPropertyConditionSet conditionSet;
- if (!emitPutTransitionStub(exec, vm, structure, ident, slot, stubInfo, putKind, oldStructure, conditionSet))
- return GiveUpOnCache;
+ if (structure->isDictionary())
+ return false;
+
+ // Skip optimizing the case where we need a realloc, if we don't have
+ // enough registers to make it happen.
+ if (GPRInfo::numberOfRegisters < 6
+ && oldStructure->outOfLineCapacity() != structure->outOfLineCapacity()
+ && oldStructure->outOfLineCapacity())
+ return false;
+
+ // Skip optimizing the case where we need realloc, and the structure has
+ // indexing storage.
+ if (oldStructure->couldHaveIndexingHeader())
+ return false;
+
+ if (normalizePrototypeChain(exec, baseCell) == InvalidPrototypeChain)
+ return false;
+
+ StructureChain* prototypeChain = structure->prototypeChain(exec);
+
+ emitPutTransitionStub(
+ exec, baseValue, ident, slot, stubInfo, putKind,
+ structure, oldStructure, prototypeChain,
+ stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase),
+ stubInfo.stubRoutine);
RepatchBuffer repatchBuffer(codeBlock);
repatchBuffer.relink(
@@ -1265,232 +1164,146 @@ static InlineCacheAction tryCachePutByID(ExecState* exec, JSValue baseValue, Str
CodeLocationLabel(stubInfo.stubRoutine->code().code()));
repatchCall(repatchBuffer, stubInfo.callReturnLocation, appropriateListBuildingPutByIdFunction(slot, putKind));
- stubInfo.initPutByIdTransition(*vm, codeBlock->ownerExecutable(), oldStructure, structure, conditionSet, putKind == Direct);
+ stubInfo.initPutByIdTransition(*vm, codeBlock->ownerExecutable(), oldStructure, structure, prototypeChain, putKind == Direct);
- return RetryCacheLater;
+ return true;
}
if (!MacroAssembler::isPtrAlignedAddressOffset(offsetRelativeToPatchedStorage(slot.cachedOffset())))
- return GiveUpOnCache;
+ return false;
- structure->didCachePropertyReplacement(*vm, slot.cachedOffset());
repatchByIdSelfAccess(*vm, codeBlock, stubInfo, structure, ident, slot.cachedOffset(), appropriateListBuildingPutByIdFunction(slot, putKind), false);
stubInfo.initPutByIdReplace(*vm, codeBlock->ownerExecutable(), structure);
- return RetryCacheLater;
- }
-
- if ((slot.isCacheableCustom() || slot.isCacheableSetter())
- && stubInfo.patch.spillMode == DontSpill) {
- RefPtr<JITStubRoutine> stubRoutine;
-
- ObjectPropertyConditionSet conditionSet;
- PropertyOffset offset;
- if (slot.base() != baseValue) {
- if (slot.isCacheableCustom()) {
- conditionSet =
- generateConditionsForPrototypePropertyHitCustom(
- *vm, codeBlock->ownerExecutable(), exec, structure, slot.base(),
- ident.impl());
- } else {
- conditionSet =
- generateConditionsForPrototypePropertyHit(
- *vm, codeBlock->ownerExecutable(), exec, structure, slot.base(),
- ident.impl());
- }
- if (!conditionSet.isValid())
- return GiveUpOnCache;
- offset = slot.isCacheableCustom() ? invalidOffset : conditionSet.slotBaseCondition().offset();
- } else
- offset = slot.cachedOffset();
-
- PolymorphicPutByIdList* list;
- list = PolymorphicPutByIdList::from(putKind, stubInfo);
-
- bool result = generateByIdStub(
- exec, kindFor(slot), ident, customFor(slot), stubInfo, conditionSet, slot.base(),
- offset, structure, false, nullptr,
- stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone),
- stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase),
- stubRoutine);
- if (!result)
- return GiveUpOnCache;
-
- list->addAccess(PutByIdAccess::setter(
- *vm, codeBlock->ownerExecutable(),
- slot.isCacheableSetter() ? PutByIdAccess::Setter : PutByIdAccess::CustomSetter,
- structure, conditionSet, slot.customSetter(), stubRoutine));
-
- RepatchBuffer repatchBuffer(codeBlock);
- repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.deltaCallToJump), CodeLocationLabel(stubRoutine->code().code()));
- repatchCall(repatchBuffer, stubInfo.callReturnLocation, appropriateListBuildingPutByIdFunction(slot, putKind));
- RELEASE_ASSERT(!list->isFull());
- return RetryCacheLater;
+ return true;
}
- return GiveUpOnCache;
+ return false;
}
-void repatchPutByID(ExecState* exec, JSValue baseValue, Structure* structure, const Identifier& propertyName, const PutPropertySlot& slot, StructureStubInfo& stubInfo, PutKind putKind)
+void repatchPutByID(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PutPropertySlot& slot, StructureStubInfo& stubInfo, PutKind putKind)
{
GCSafeConcurrentJITLocker locker(exec->codeBlock()->m_lock, exec->vm().heap);
- if (tryCachePutByID(exec, baseValue, structure, propertyName, slot, stubInfo, putKind) == GiveUpOnCache)
+ bool cached = tryCachePutByID(exec, baseValue, propertyName, slot, stubInfo, putKind);
+ if (!cached)
repatchCall(exec->codeBlock(), stubInfo.callReturnLocation, appropriateGenericPutByIdFunction(slot, putKind));
}
-static InlineCacheAction tryBuildPutByIdList(ExecState* exec, JSValue baseValue, Structure* structure, const Identifier& propertyName, const PutPropertySlot& slot, StructureStubInfo& stubInfo, PutKind putKind)
+static bool tryBuildPutByIdList(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PutPropertySlot& slot, StructureStubInfo& stubInfo, PutKind putKind)
{
CodeBlock* codeBlock = exec->codeBlock();
VM* vm = &exec->vm();
if (!baseValue.isCell())
- return GiveUpOnCache;
-
- if (!slot.isCacheablePut() && !slot.isCacheableCustom() && !slot.isCacheableSetter())
- return GiveUpOnCache;
-
+ return false;
+ JSCell* baseCell = baseValue.asCell();
+ Structure* structure = baseCell->structure();
+ Structure* oldStructure = structure->previousID();
+
+ if (!slot.isCacheable())
+ return false;
if (!structure->propertyAccessesAreCacheable())
- return GiveUpOnCache;
+ return false;
// Optimize self access.
- if (slot.base() == baseValue && slot.isCacheablePut()) {
+ if (slot.base() == baseValue) {
PolymorphicPutByIdList* list;
RefPtr<JITStubRoutine> stubRoutine;
if (slot.type() == PutPropertySlot::NewProperty) {
- list = PolymorphicPutByIdList::from(putKind, stubInfo);
- if (list->isFull())
- return GiveUpOnCache; // Will get here due to recursion.
-
- Structure* oldStructure;
- ObjectPropertyConditionSet conditionSet;
- if (!emitPutTransitionStub(exec, vm, structure, propertyName, slot, stubInfo, putKind, oldStructure, conditionSet))
- return GiveUpOnCache;
-
- stubRoutine = stubInfo.stubRoutine;
+ if (structure->isDictionary())
+ return false;
+
+ // Skip optimizing the case where we need a realloc, if we don't have
+ // enough registers to make it happen.
+ if (GPRInfo::numberOfRegisters < 6
+ && oldStructure->outOfLineCapacity() != structure->outOfLineCapacity()
+ && oldStructure->outOfLineCapacity())
+ return false;
+
+ // Skip optimizing the case where we need realloc, and the structure has
+ // indexing storage.
+ if (oldStructure->couldHaveIndexingHeader())
+ return false;
+
+ if (normalizePrototypeChain(exec, baseCell) == InvalidPrototypeChain)
+ return false;
+
+ StructureChain* prototypeChain = structure->prototypeChain(exec);
+
+ // We're now committed to creating the stub. Mogrify the meta-data accordingly.
+ list = PolymorphicPutByIdList::from(
+ putKind, stubInfo,
+ stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase));
+
+ emitPutTransitionStub(
+ exec, baseValue, propertyName, slot, stubInfo, putKind,
+ structure, oldStructure, prototypeChain,
+ CodeLocationLabel(list->currentSlowPathTarget()),
+ stubRoutine);
+
list->addAccess(
PutByIdAccess::transition(
*vm, codeBlock->ownerExecutable(),
- oldStructure, structure, conditionSet,
+ oldStructure, structure, prototypeChain,
stubRoutine));
-
} else {
- list = PolymorphicPutByIdList::from(putKind, stubInfo);
- if (list->isFull())
- return GiveUpOnCache; // Will get here due to recursion.
-
- structure->didCachePropertyReplacement(*vm, slot.cachedOffset());
-
// We're now committed to creating the stub. Mogrify the meta-data accordingly.
- bool result = emitPutReplaceStub(
- exec, propertyName, slot, stubInfo,
+ list = PolymorphicPutByIdList::from(
+ putKind, stubInfo,
+ stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase));
+
+ emitPutReplaceStub(
+ exec, baseValue, propertyName, slot, stubInfo, putKind,
structure, CodeLocationLabel(list->currentSlowPathTarget()), stubRoutine);
- if (!result)
- return GiveUpOnCache;
list->addAccess(
PutByIdAccess::replace(
*vm, codeBlock->ownerExecutable(),
structure, stubRoutine));
}
- RepatchBuffer repatchBuffer(codeBlock);
- repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.deltaCallToJump), CodeLocationLabel(stubRoutine->code().code()));
- if (list->isFull())
- repatchCall(repatchBuffer, stubInfo.callReturnLocation, appropriateGenericPutByIdFunction(slot, putKind));
-
- return RetryCacheLater;
- }
-
- if ((slot.isCacheableCustom() || slot.isCacheableSetter())
- && stubInfo.patch.spillMode == DontSpill) {
- RefPtr<JITStubRoutine> stubRoutine;
-
- ObjectPropertyConditionSet conditionSet;
- PropertyOffset offset;
- if (slot.base() != baseValue) {
- if (slot.isCacheableCustom()) {
- conditionSet =
- generateConditionsForPrototypePropertyHitCustom(
- *vm, codeBlock->ownerExecutable(), exec, structure, slot.base(),
- propertyName.impl());
- } else {
- conditionSet =
- generateConditionsForPrototypePropertyHit(
- *vm, codeBlock->ownerExecutable(), exec, structure, slot.base(),
- propertyName.impl());
- }
- if (!conditionSet.isValid())
- return GiveUpOnCache;
- offset = slot.isCacheableCustom() ? invalidOffset : conditionSet.slotBaseCondition().offset();
- } else
- offset = slot.cachedOffset();
-
- PolymorphicPutByIdList* list;
- list = PolymorphicPutByIdList::from(putKind, stubInfo);
-
- bool result = generateByIdStub(
- exec, kindFor(slot), propertyName, customFor(slot), stubInfo, conditionSet, slot.base(),
- offset, structure, false, nullptr,
- stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone),
- CodeLocationLabel(list->currentSlowPathTarget()),
- stubRoutine);
- if (!result)
- return GiveUpOnCache;
- list->addAccess(PutByIdAccess::setter(
- *vm, codeBlock->ownerExecutable(),
- slot.isCacheableSetter() ? PutByIdAccess::Setter : PutByIdAccess::CustomSetter,
- structure, conditionSet, slot.customSetter(), stubRoutine));
-
RepatchBuffer repatchBuffer(codeBlock);
repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.deltaCallToJump), CodeLocationLabel(stubRoutine->code().code()));
+
if (list->isFull())
repatchCall(repatchBuffer, stubInfo.callReturnLocation, appropriateGenericPutByIdFunction(slot, putKind));
-
- return RetryCacheLater;
+
+ return true;
}
- return GiveUpOnCache;
+
+ return false;
}
-void buildPutByIdList(ExecState* exec, JSValue baseValue, Structure* structure, const Identifier& propertyName, const PutPropertySlot& slot, StructureStubInfo& stubInfo, PutKind putKind)
+void buildPutByIdList(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PutPropertySlot& slot, StructureStubInfo& stubInfo, PutKind putKind)
{
GCSafeConcurrentJITLocker locker(exec->codeBlock()->m_lock, exec->vm().heap);
- if (tryBuildPutByIdList(exec, baseValue, structure, propertyName, slot, stubInfo, putKind) == GiveUpOnCache)
+ bool cached = tryBuildPutByIdList(exec, baseValue, propertyName, slot, stubInfo, putKind);
+ if (!cached)
repatchCall(exec->codeBlock(), stubInfo.callReturnLocation, appropriateGenericPutByIdFunction(slot, putKind));
}
-static InlineCacheAction tryRepatchIn(
+static bool tryRepatchIn(
ExecState* exec, JSCell* base, const Identifier& ident, bool wasFound,
const PropertySlot& slot, StructureStubInfo& stubInfo)
{
- if (Options::forceICFailure())
- return GiveUpOnCache;
-
if (!base->structure()->propertyAccessesAreCacheable())
- return GiveUpOnCache;
+ return false;
if (wasFound) {
if (!slot.isCacheable())
- return GiveUpOnCache;
+ return false;
}
CodeBlock* codeBlock = exec->codeBlock();
VM* vm = &exec->vm();
- Structure* structure = base->structure(*vm);
+ Structure* structure = base->structure();
- ObjectPropertyConditionSet conditionSet;
- if (wasFound) {
- if (slot.slotBase() != base) {
- conditionSet = generateConditionsForPrototypePropertyHit(
- *vm, codeBlock->ownerExecutable(), exec, structure, slot.slotBase(), ident.impl());
- }
- } else {
- conditionSet = generateConditionsForPropertyMiss(
- *vm, codeBlock->ownerExecutable(), exec, structure, ident.impl());
- }
- if (!conditionSet.isValid())
- return GiveUpOnCache;
+ PropertyOffset offsetIgnored;
+ size_t count = normalizePrototypeChainForChainAccess(exec, base, wasFound ? slot.slotBase() : JSValue(), ident, offsetIgnored);
+ if (count == InvalidPrototypeChain)
+ return false;
PolymorphicAccessStructureList* polymorphicStructureList;
int listIndex;
@@ -1511,9 +1324,10 @@ static InlineCacheAction tryRepatchIn(
slowCaseLabel = CodeLocationLabel(polymorphicStructureList->list[listIndex - 1].stubRoutine->code().code());
if (listIndex == POLYMORPHIC_LIST_CACHE_SIZE)
- return GiveUpOnCache;
+ return false;
}
+ StructureChain* chain = structure->prototypeChain(exec);
RefPtr<JITStubRoutine> stubRoutine;
{
@@ -1532,20 +1346,27 @@ static InlineCacheAction tryRepatchIn(
needToRestoreScratch = false;
MacroAssembler::JumpList failureCases;
- failureCases.append(branchStructure(stubJit,
+ failureCases.append(stubJit.branchPtr(
MacroAssembler::NotEqual,
- MacroAssembler::Address(baseGPR, JSCell::structureIDOffset()),
- structure));
+ MacroAssembler::Address(baseGPR, JSCell::structureOffset()),
+ MacroAssembler::TrustedImmPtr(structure)));
CodeBlock* codeBlock = exec->codeBlock();
if (structure->typeInfo().newImpurePropertyFiresWatchpoints())
vm->registerWatchpointForImpureProperty(ident, stubInfo.addWatchpoint(codeBlock));
- if (slot.watchpointSet())
- slot.watchpointSet()->add(stubInfo.addWatchpoint(codeBlock));
-
- checkObjectPropertyConditions(
- conditionSet, exec->codeBlock(), stubInfo, stubJit, failureCases, scratchGPR);
+ Structure* currStructure = structure;
+ WriteBarrier<Structure>* it = chain->head();
+ for (unsigned i = 0; i < count; ++i, ++it) {
+ JSObject* prototype = asObject(currStructure->prototypeForLookup(exec));
+ Structure* protoStructure = prototype->structure();
+ addStructureTransitionCheck(
+ prototype, protoStructure, exec->codeBlock(), stubInfo, stubJit,
+ failureCases, scratchGPR);
+ if (protoStructure->typeInfo().newImpurePropertyFiresWatchpoints())
+ vm->registerWatchpointForImpureProperty(ident, stubInfo.addWatchpoint(codeBlock));
+ currStructure = it->get();
+ }
#if USE(JSVALUE64)
stubJit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(wasFound))), resultGPR);
@@ -1557,15 +1378,13 @@ static InlineCacheAction tryRepatchIn(
emitRestoreScratch(stubJit, needToRestoreScratch, scratchGPR, success, fail, failureCases);
- LinkBuffer patchBuffer(*vm, stubJit, exec->codeBlock(), JITCompilationCanFail);
- if (patchBuffer.didFailToAllocate())
- return GiveUpOnCache;
-
+ LinkBuffer patchBuffer(*vm, &stubJit, exec->codeBlock());
+
linkRestoreScratch(patchBuffer, needToRestoreScratch, success, fail, failureCases, successLabel, slowCaseLabel);
- stubRoutine = FINALIZE_CODE_FOR_STUB(
- exec->codeBlock(), patchBuffer,
- ("In (found = %s) stub for %s, return point %p",
+ stubRoutine = FINALIZE_CODE_FOR_DFG_STUB(
+ patchBuffer,
+ ("DFG In (found = %s) stub for %s, return point %p",
wasFound ? "yes" : "no", toCString(*exec->codeBlock()).data(),
successLabel.executableAddress()));
}
@@ -1576,375 +1395,171 @@ static InlineCacheAction tryRepatchIn(
RepatchBuffer repatchBuffer(codeBlock);
repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.deltaCallToJump), CodeLocationLabel(stubRoutine->code().code()));
- return listIndex < (POLYMORPHIC_LIST_CACHE_SIZE - 1) ? RetryCacheLater : GiveUpOnCache;
+ return listIndex < (POLYMORPHIC_LIST_CACHE_SIZE - 1);
}
void repatchIn(
ExecState* exec, JSCell* base, const Identifier& ident, bool wasFound,
const PropertySlot& slot, StructureStubInfo& stubInfo)
{
- if (tryRepatchIn(exec, base, ident, wasFound, slot, stubInfo) == GiveUpOnCache)
- repatchCall(exec->codeBlock(), stubInfo.callReturnLocation, operationIn);
-}
-
-static void linkSlowFor(
- RepatchBuffer& repatchBuffer, VM*, CallLinkInfo& callLinkInfo, MacroAssemblerCodeRef codeRef)
-{
- repatchBuffer.relink(
- callLinkInfo.callReturnLocation(), codeRef.code());
-}
-
-static void linkSlowFor(
- RepatchBuffer& repatchBuffer, VM* vm, CallLinkInfo& callLinkInfo, ThunkGenerator generator)
-{
- linkSlowFor(repatchBuffer, vm, callLinkInfo, vm->getCTIStub(generator));
+ if (tryRepatchIn(exec, base, ident, wasFound, slot, stubInfo))
+ return;
+ repatchCall(exec->codeBlock(), stubInfo.callReturnLocation, operationIn);
}
-static void linkSlowFor(
- RepatchBuffer& repatchBuffer, VM* vm, CallLinkInfo& callLinkInfo)
+static void linkSlowFor(RepatchBuffer& repatchBuffer, VM* vm, CallLinkInfo& callLinkInfo, CodeSpecializationKind kind)
{
- MacroAssemblerCodeRef virtualThunk = virtualThunkFor(vm, callLinkInfo);
- linkSlowFor(repatchBuffer, vm, callLinkInfo, virtualThunk);
- callLinkInfo.setSlowStub(createJITStubRoutine(virtualThunk, *vm, nullptr, true));
+ if (kind == CodeForCall) {
+ repatchBuffer.relink(callLinkInfo.callReturnLocation, vm->getCTIStub(virtualCallThunkGenerator).code());
+ return;
+ }
+ ASSERT(kind == CodeForConstruct);
+ repatchBuffer.relink(callLinkInfo.callReturnLocation, vm->getCTIStub(virtualConstructThunkGenerator).code());
}
-void linkFor(
- ExecState* exec, CallLinkInfo& callLinkInfo, CodeBlock* calleeCodeBlock,
- JSFunction* callee, MacroAssemblerCodePtr codePtr)
+void linkFor(ExecState* exec, CallLinkInfo& callLinkInfo, CodeBlock* calleeCodeBlock, JSFunction* callee, MacroAssemblerCodePtr codePtr, CodeSpecializationKind kind)
{
- ASSERT(!callLinkInfo.stub());
+ ASSERT(!callLinkInfo.stub);
+
+ // If you're being call-linked from a DFG caller then you obviously didn't get inlined.
+ if (calleeCodeBlock)
+ calleeCodeBlock->m_shouldAlwaysBeInlined = false;
CodeBlock* callerCodeBlock = exec->callerFrame()->codeBlock();
-
VM* vm = callerCodeBlock->vm();
RepatchBuffer repatchBuffer(callerCodeBlock);
ASSERT(!callLinkInfo.isLinked());
- callLinkInfo.setCallee(exec->callerFrame()->vm(), callLinkInfo.hotPathBegin(), callerCodeBlock->ownerExecutable(), callee);
- callLinkInfo.setLastSeenCallee(exec->callerFrame()->vm(), callerCodeBlock->ownerExecutable(), callee);
- if (shouldShowDisassemblyFor(callerCodeBlock))
- dataLog("Linking call in ", *callerCodeBlock, " at ", callLinkInfo.codeOrigin(), " to ", pointerDump(calleeCodeBlock), ", entrypoint at ", codePtr, "\n");
- repatchBuffer.relink(callLinkInfo.hotPathOther(), codePtr);
+ callLinkInfo.callee.set(exec->callerFrame()->vm(), callLinkInfo.hotPathBegin, callerCodeBlock->ownerExecutable(), callee);
+ callLinkInfo.lastSeenCallee.set(exec->callerFrame()->vm(), callerCodeBlock->ownerExecutable(), callee);
+ repatchBuffer.relink(callLinkInfo.hotPathOther, codePtr);
if (calleeCodeBlock)
calleeCodeBlock->linkIncomingCall(exec->callerFrame(), &callLinkInfo);
- if (callLinkInfo.specializationKind() == CodeForCall) {
- linkSlowFor(
- repatchBuffer, vm, callLinkInfo, linkPolymorphicCallThunkGenerator);
+ if (kind == CodeForCall) {
+ repatchBuffer.relink(callLinkInfo.callReturnLocation, vm->getCTIStub(linkClosureCallThunkGenerator).code());
return;
}
- ASSERT(callLinkInfo.specializationKind() == CodeForConstruct);
- linkSlowFor(repatchBuffer, vm, callLinkInfo);
+ ASSERT(kind == CodeForConstruct);
+ linkSlowFor(repatchBuffer, vm, callLinkInfo, CodeForConstruct);
}
-void linkSlowFor(
- ExecState* exec, CallLinkInfo& callLinkInfo)
+void linkSlowFor(ExecState* exec, CallLinkInfo& callLinkInfo, CodeSpecializationKind kind)
{
CodeBlock* callerCodeBlock = exec->callerFrame()->codeBlock();
VM* vm = callerCodeBlock->vm();
RepatchBuffer repatchBuffer(callerCodeBlock);
- linkSlowFor(repatchBuffer, vm, callLinkInfo);
+ linkSlowFor(repatchBuffer, vm, callLinkInfo, kind);
}
-static void revertCall(
- RepatchBuffer& repatchBuffer, VM* vm, CallLinkInfo& callLinkInfo, MacroAssemblerCodeRef codeRef)
+void linkClosureCall(ExecState* exec, CallLinkInfo& callLinkInfo, CodeBlock* calleeCodeBlock, Structure* structure, ExecutableBase* executable, MacroAssemblerCodePtr codePtr)
{
- repatchBuffer.revertJumpReplacementToBranchPtrWithPatch(
- RepatchBuffer::startOfBranchPtrWithPatchOnRegister(callLinkInfo.hotPathBegin()),
- static_cast<MacroAssembler::RegisterID>(callLinkInfo.calleeGPR()), 0);
- linkSlowFor(repatchBuffer, vm, callLinkInfo, codeRef);
- callLinkInfo.clearSeen();
- callLinkInfo.clearCallee();
- callLinkInfo.clearStub();
- callLinkInfo.clearSlowStub();
- if (callLinkInfo.isOnList())
- callLinkInfo.remove();
-}
-
-void unlinkFor(
- RepatchBuffer& repatchBuffer, CallLinkInfo& callLinkInfo)
-{
- if (Options::showDisassembly())
- dataLog("Unlinking call from ", callLinkInfo.callReturnLocation(), " in request from ", pointerDump(repatchBuffer.codeBlock()), "\n");
+ ASSERT(!callLinkInfo.stub);
- VM* vm = repatchBuffer.codeBlock()->vm();
- revertCall(repatchBuffer, vm, callLinkInfo, vm->getCTIStub(linkCallThunkGenerator));
-}
-
-void linkVirtualFor(
- ExecState* exec, CallLinkInfo& callLinkInfo)
-{
CodeBlock* callerCodeBlock = exec->callerFrame()->codeBlock();
VM* vm = callerCodeBlock->vm();
- if (shouldShowDisassemblyFor(callerCodeBlock))
- dataLog("Linking virtual call at ", *callerCodeBlock, " ", exec->callerFrame()->codeOrigin(), "\n");
-
- RepatchBuffer repatchBuffer(callerCodeBlock);
- MacroAssemblerCodeRef virtualThunk = virtualThunkFor(vm, callLinkInfo);
- revertCall(repatchBuffer, vm, callLinkInfo, virtualThunk);
- callLinkInfo.setSlowStub(createJITStubRoutine(virtualThunk, *vm, nullptr, true));
-}
-
-namespace {
-struct CallToCodePtr {
- CCallHelpers::Call call;
- MacroAssemblerCodePtr codePtr;
-};
-} // annonymous namespace
-
-void linkPolymorphicCall(
- ExecState* exec, CallLinkInfo& callLinkInfo, CallVariant newVariant)
-{
- // Currently we can't do anything for non-function callees.
- // https://bugs.webkit.org/show_bug.cgi?id=140685
- if (!newVariant || !newVariant.executable()) {
- linkVirtualFor(exec, callLinkInfo);
- return;
- }
-
- CodeBlock* callerCodeBlock = exec->callerFrame()->codeBlock();
- VM* vm = callerCodeBlock->vm();
-
- CallVariantList list;
- if (PolymorphicCallStubRoutine* stub = callLinkInfo.stub())
- list = stub->variants();
- else if (JSFunction* oldCallee = callLinkInfo.callee())
- list = CallVariantList{ CallVariant(oldCallee) };
-
- list = variantListWithVariant(list, newVariant);
-
- // If there are any closure calls then it makes sense to treat all of them as closure calls.
- // This makes switching on callee cheaper. It also produces profiling that's easier on the DFG;
- // the DFG doesn't really want to deal with a combination of closure and non-closure callees.
- bool isClosureCall = false;
- for (CallVariant variant : list) {
- if (variant.isClosureCall()) {
- list = despecifiedVariantList(list);
- isClosureCall = true;
- break;
- }
- }
-
- if (isClosureCall)
- callLinkInfo.setHasSeenClosure();
-
- Vector<PolymorphicCallCase> callCases;
-
- // Figure out what our cases are.
- for (CallVariant variant : list) {
- CodeBlock* codeBlock;
- if (variant.executable()->isHostFunction())
- codeBlock = nullptr;
- else {
- codeBlock = jsCast<FunctionExecutable*>(variant.executable())->codeBlockForCall();
-
- // If we cannot handle a callee, assume that it's better for this whole thing to be a
- // virtual call.
- if (exec->argumentCountIncludingThis() < static_cast<size_t>(codeBlock->numParameters()) || callLinkInfo.callType() == CallLinkInfo::CallVarargs || callLinkInfo.callType() == CallLinkInfo::ConstructVarargs) {
- linkVirtualFor(exec, callLinkInfo);
- return;
- }
- }
-
- callCases.append(PolymorphicCallCase(variant, codeBlock));
- }
-
- // If we are over the limit, just use a normal virtual call.
- unsigned maxPolymorphicCallVariantListSize;
- if (callerCodeBlock->jitType() == JITCode::topTierJIT())
- maxPolymorphicCallVariantListSize = Options::maxPolymorphicCallVariantListSizeForTopTier();
- else
- maxPolymorphicCallVariantListSize = Options::maxPolymorphicCallVariantListSize();
- if (list.size() > maxPolymorphicCallVariantListSize) {
- linkVirtualFor(exec, callLinkInfo);
- return;
- }
-
- GPRReg calleeGPR = static_cast<GPRReg>(callLinkInfo.calleeGPR());
+ GPRReg calleeGPR = static_cast<GPRReg>(callLinkInfo.calleeGPR);
CCallHelpers stubJit(vm, callerCodeBlock);
CCallHelpers::JumpList slowPath;
- ptrdiff_t offsetToFrame = -sizeof(CallerFrameAndPC);
-
- if (!ASSERT_DISABLED) {
- CCallHelpers::Jump okArgumentCount = stubJit.branch32(
- CCallHelpers::Below, CCallHelpers::Address(CCallHelpers::stackPointerRegister, static_cast<ptrdiff_t>(sizeof(Register) * JSStack::ArgumentCount) + offsetToFrame + PayloadOffset), CCallHelpers::TrustedImm32(10000000));
- stubJit.abortWithReason(RepatchInsaneArgumentCount);
- okArgumentCount.link(&stubJit);
- }
-
- GPRReg scratch = AssemblyHelpers::selectScratchGPR(calleeGPR);
- GPRReg comparisonValueGPR;
-
- if (isClosureCall) {
- // Verify that we have a function and stash the executable in scratch.
-
#if USE(JSVALUE64)
- // We can safely clobber everything except the calleeGPR. We can't rely on tagMaskRegister
- // being set. So we do this the hard way.
- stubJit.move(MacroAssembler::TrustedImm64(TagMask), scratch);
- slowPath.append(stubJit.branchTest64(CCallHelpers::NonZero, calleeGPR, scratch));
+ // We can safely clobber everything except the calleeGPR. We can't rely on tagMaskRegister
+ // being set. So we do this the hard way.
+ GPRReg scratch = AssemblyHelpers::selectScratchGPR(calleeGPR);
+ stubJit.move(MacroAssembler::TrustedImm64(TagMask), scratch);
+ slowPath.append(stubJit.branchTest64(CCallHelpers::NonZero, calleeGPR, scratch));
#else
- // We would have already checked that the callee is a cell.
+ // We would have already checked that the callee is a cell.
#endif
- slowPath.append(
- stubJit.branch8(
- CCallHelpers::NotEqual,
- CCallHelpers::Address(calleeGPR, JSCell::typeInfoTypeOffset()),
- CCallHelpers::TrustedImm32(JSFunctionType)));
+ slowPath.append(
+ stubJit.branchPtr(
+ CCallHelpers::NotEqual,
+ CCallHelpers::Address(calleeGPR, JSCell::structureOffset()),
+ CCallHelpers::TrustedImmPtr(structure)));
- stubJit.loadPtr(
+ slowPath.append(
+ stubJit.branchPtr(
+ CCallHelpers::NotEqual,
CCallHelpers::Address(calleeGPR, JSFunction::offsetOfExecutable()),
- scratch);
-
- comparisonValueGPR = scratch;
- } else
- comparisonValueGPR = calleeGPR;
-
- Vector<int64_t> caseValues(callCases.size());
- Vector<CallToCodePtr> calls(callCases.size());
- std::unique_ptr<uint32_t[]> fastCounts;
+ CCallHelpers::TrustedImmPtr(executable)));
- if (callerCodeBlock->jitType() != JITCode::topTierJIT())
- fastCounts = std::make_unique<uint32_t[]>(callCases.size());
-
- for (size_t i = 0; i < callCases.size(); ++i) {
- if (fastCounts)
- fastCounts[i] = 0;
-
- CallVariant variant = callCases[i].variant();
- int64_t newCaseValue;
- if (isClosureCall)
- newCaseValue = bitwise_cast<intptr_t>(variant.executable());
- else
- newCaseValue = bitwise_cast<intptr_t>(variant.function());
-
- if (!ASSERT_DISABLED) {
- for (size_t j = 0; j < i; ++j) {
- if (caseValues[j] != newCaseValue)
- continue;
-
- dataLog("ERROR: Attempt to add duplicate case value.\n");
- dataLog("Existing case values: ");
- CommaPrinter comma;
- for (size_t k = 0; k < i; ++k)
- dataLog(comma, caseValues[k]);
- dataLog("\n");
- dataLog("Attempting to add: ", newCaseValue, "\n");
- dataLog("Variant list: ", listDump(callCases), "\n");
- RELEASE_ASSERT_NOT_REACHED();
- }
- }
-
- caseValues[i] = newCaseValue;
- }
+ stubJit.loadPtr(
+ CCallHelpers::Address(calleeGPR, JSFunction::offsetOfScopeChain()),
+ GPRInfo::returnValueGPR);
+
+#if USE(JSVALUE64)
+ stubJit.store64(
+ GPRInfo::returnValueGPR,
+ CCallHelpers::Address(GPRInfo::callFrameRegister, static_cast<ptrdiff_t>(sizeof(Register) * JSStack::ScopeChain)));
+#else
+ stubJit.storePtr(
+ GPRInfo::returnValueGPR,
+ CCallHelpers::Address(GPRInfo::callFrameRegister, static_cast<ptrdiff_t>(sizeof(Register) * JSStack::ScopeChain) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
+ stubJit.store32(
+ CCallHelpers::TrustedImm32(JSValue::CellTag),
+ CCallHelpers::Address(GPRInfo::callFrameRegister, static_cast<ptrdiff_t>(sizeof(Register) * JSStack::ScopeChain) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
+#endif
- GPRReg fastCountsBaseGPR =
- AssemblyHelpers::selectScratchGPR(calleeGPR, comparisonValueGPR, GPRInfo::regT3);
- stubJit.move(CCallHelpers::TrustedImmPtr(fastCounts.get()), fastCountsBaseGPR);
-
- BinarySwitch binarySwitch(comparisonValueGPR, caseValues, BinarySwitch::IntPtr);
- CCallHelpers::JumpList done;
- while (binarySwitch.advance(stubJit)) {
- size_t caseIndex = binarySwitch.caseIndex();
-
- CallVariant variant = callCases[caseIndex].variant();
-
- ASSERT(variant.executable()->hasJITCodeForCall());
- MacroAssemblerCodePtr codePtr =
- variant.executable()->generatedJITCodeForCall()->addressForCall(
- *vm, variant.executable(), ArityCheckNotRequired, callLinkInfo.registerPreservationMode());
-
- if (fastCounts) {
- stubJit.add32(
- CCallHelpers::TrustedImm32(1),
- CCallHelpers::Address(fastCountsBaseGPR, caseIndex * sizeof(uint32_t)));
- }
- calls[caseIndex].call = stubJit.nearCall();
- calls[caseIndex].codePtr = codePtr;
- done.append(stubJit.jump());
- }
+ AssemblyHelpers::Call call = stubJit.nearCall();
+ AssemblyHelpers::Jump done = stubJit.jump();
slowPath.link(&stubJit);
- binarySwitch.fallThrough().link(&stubJit);
stubJit.move(calleeGPR, GPRInfo::regT0);
#if USE(JSVALUE32_64)
stubJit.move(CCallHelpers::TrustedImm32(JSValue::CellTag), GPRInfo::regT1);
#endif
- stubJit.move(CCallHelpers::TrustedImmPtr(&callLinkInfo), GPRInfo::regT2);
- stubJit.move(CCallHelpers::TrustedImmPtr(callLinkInfo.callReturnLocation().executableAddress()), GPRInfo::regT4);
-
- stubJit.restoreReturnAddressBeforeReturn(GPRInfo::regT4);
+ stubJit.move(CCallHelpers::TrustedImmPtr(callLinkInfo.callReturnLocation.executableAddress()), GPRInfo::nonArgGPR2);
+ stubJit.restoreReturnAddressBeforeReturn(GPRInfo::nonArgGPR2);
AssemblyHelpers::Jump slow = stubJit.jump();
-
- LinkBuffer patchBuffer(*vm, stubJit, callerCodeBlock, JITCompilationCanFail);
- if (patchBuffer.didFailToAllocate()) {
- linkVirtualFor(exec, callLinkInfo);
- return;
- }
- RELEASE_ASSERT(callCases.size() == calls.size());
- for (CallToCodePtr callToCodePtr : calls) {
- patchBuffer.link(
- callToCodePtr.call, FunctionPtr(callToCodePtr.codePtr.executableAddress()));
- }
- if (JITCode::isOptimizingJIT(callerCodeBlock->jitType()))
- patchBuffer.link(done, callLinkInfo.callReturnLocation().labelAtOffset(0));
- else
- patchBuffer.link(done, callLinkInfo.hotPathOther().labelAtOffset(0));
- patchBuffer.link(slow, CodeLocationLabel(vm->getCTIStub(linkPolymorphicCallThunkGenerator).code()));
-
- RefPtr<PolymorphicCallStubRoutine> stubRoutine = adoptRef(new PolymorphicCallStubRoutine(
- FINALIZE_CODE_FOR(
- callerCodeBlock, patchBuffer,
- ("Polymorphic call stub for %s, return point %p, targets %s",
- toCString(*callerCodeBlock).data(), callLinkInfo.callReturnLocation().labelAtOffset(0).executableAddress(),
- toCString(listDump(callCases)).data())),
- *vm, callerCodeBlock->ownerExecutable(), exec->callerFrame(), callLinkInfo, callCases,
- WTF::move(fastCounts)));
+ LinkBuffer patchBuffer(*vm, &stubJit, callerCodeBlock);
+
+ patchBuffer.link(call, FunctionPtr(codePtr.executableAddress()));
+ patchBuffer.link(done, callLinkInfo.callReturnLocation.labelAtOffset(0));
+ patchBuffer.link(slow, CodeLocationLabel(vm->getCTIStub(virtualCallThunkGenerator).code()));
+
+ RefPtr<ClosureCallStubRoutine> stubRoutine = adoptRef(new ClosureCallStubRoutine(
+ FINALIZE_DFG_CODE(
+ patchBuffer,
+ ("DFG closure call stub for %s, return point %p, target %p (%s)",
+ toCString(*callerCodeBlock).data(), callLinkInfo.callReturnLocation.labelAtOffset(0).executableAddress(),
+ codePtr.executableAddress(), toCString(pointerDump(calleeCodeBlock)).data())),
+ *vm, callerCodeBlock->ownerExecutable(), structure, executable, callLinkInfo.codeOrigin));
RepatchBuffer repatchBuffer(callerCodeBlock);
repatchBuffer.replaceWithJump(
- RepatchBuffer::startOfBranchPtrWithPatchOnRegister(callLinkInfo.hotPathBegin()),
+ RepatchBuffer::startOfBranchPtrWithPatchOnRegister(callLinkInfo.hotPathBegin),
CodeLocationLabel(stubRoutine->code().code()));
- // The original slow path is unreachable on 64-bits, but still
- // reachable on 32-bits since a non-cell callee will always
- // trigger the slow path
- linkSlowFor(repatchBuffer, vm, callLinkInfo);
-
- // If there had been a previous stub routine, that one will die as soon as the GC runs and sees
- // that it's no longer on stack.
- callLinkInfo.setStub(stubRoutine.release());
-
- // The call link info no longer has a call cache apart from the jump to the polymorphic call
- // stub.
- if (callLinkInfo.isOnList())
- callLinkInfo.remove();
+ linkSlowFor(repatchBuffer, vm, callLinkInfo, CodeForCall);
+
+ callLinkInfo.stub = stubRoutine.release();
+
+ ASSERT(!calleeCodeBlock || calleeCodeBlock->isIncomingCallAlreadyLinked(&callLinkInfo));
}
void resetGetByID(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo)
{
repatchCall(repatchBuffer, stubInfo.callReturnLocation, operationGetByIdOptimize);
- CodeLocationDataLabel32 structureLabel = stubInfo.callReturnLocation.dataLabel32AtOffset(-(intptr_t)stubInfo.patch.deltaCheckImmToCall);
- if (MacroAssembler::canJumpReplacePatchableBranch32WithPatch()) {
- repatchBuffer.revertJumpReplacementToPatchableBranch32WithPatch(
- RepatchBuffer::startOfPatchableBranch32WithPatchOnAddress(structureLabel),
+ CodeLocationDataLabelPtr structureLabel = stubInfo.callReturnLocation.dataLabelPtrAtOffset(-(intptr_t)stubInfo.patch.deltaCheckImmToCall);
+ if (MacroAssembler::canJumpReplacePatchableBranchPtrWithPatch()) {
+ repatchBuffer.revertJumpReplacementToPatchableBranchPtrWithPatch(
+ RepatchBuffer::startOfPatchableBranchPtrWithPatchOnAddress(structureLabel),
MacroAssembler::Address(
static_cast<MacroAssembler::RegisterID>(stubInfo.patch.baseGPR),
- JSCell::structureIDOffset()),
- static_cast<int32_t>(unusedPointer));
+ JSCell::structureOffset()),
+ reinterpret_cast<void*>(unusedPointer));
}
- repatchBuffer.repatch(structureLabel, static_cast<int32_t>(unusedPointer));
+ repatchBuffer.repatch(structureLabel, reinterpret_cast<void*>(unusedPointer));
#if USE(JSVALUE64)
repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.deltaCallToLoadOrStore), 0);
#else
@@ -1969,16 +1584,16 @@ void resetPutByID(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo)
optimizedFunction = operationPutByIdDirectNonStrictOptimize;
}
repatchCall(repatchBuffer, stubInfo.callReturnLocation, optimizedFunction);
- CodeLocationDataLabel32 structureLabel = stubInfo.callReturnLocation.dataLabel32AtOffset(-(intptr_t)stubInfo.patch.deltaCheckImmToCall);
- if (MacroAssembler::canJumpReplacePatchableBranch32WithPatch()) {
- repatchBuffer.revertJumpReplacementToPatchableBranch32WithPatch(
- RepatchBuffer::startOfPatchableBranch32WithPatchOnAddress(structureLabel),
+ CodeLocationDataLabelPtr structureLabel = stubInfo.callReturnLocation.dataLabelPtrAtOffset(-(intptr_t)stubInfo.patch.deltaCheckImmToCall);
+ if (MacroAssembler::canJumpReplacePatchableBranchPtrWithPatch()) {
+ repatchBuffer.revertJumpReplacementToPatchableBranchPtrWithPatch(
+ RepatchBuffer::startOfPatchableBranchPtrWithPatchOnAddress(structureLabel),
MacroAssembler::Address(
static_cast<MacroAssembler::RegisterID>(stubInfo.patch.baseGPR),
- JSCell::structureIDOffset()),
- static_cast<int32_t>(unusedPointer));
+ JSCell::structureOffset()),
+ reinterpret_cast<void*>(unusedPointer));
}
- repatchBuffer.repatch(structureLabel, static_cast<int32_t>(unusedPointer));
+ repatchBuffer.repatch(structureLabel, reinterpret_cast<void*>(unusedPointer));
#if USE(JSVALUE64)
repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.deltaCallToLoadOrStore), 0);
#else