summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/dfg
diff options
context:
space:
mode:
authorSimon Hausmann <simon.hausmann@nokia.com>2012-02-03 09:55:33 +0100
committerSimon Hausmann <simon.hausmann@nokia.com>2012-02-03 09:55:33 +0100
commitcd44dc59cdfc39534aef4d417e9f3c412e3be139 (patch)
tree8d89889ba95ed6ec9322e733846cc9cce9d7dff1 /Source/JavaScriptCore/dfg
parentd11f84f5b5cdc0d92a08af01b13472fdd5f9acb9 (diff)
downloadqtwebkit-cd44dc59cdfc39534aef4d417e9f3c412e3be139.tar.gz
Imported WebKit commit fce473cb4d55aa9fe9d0b0322a2fffecb731b961 (http://svn.webkit.org/repository/webkit/trunk@106560)
Diffstat (limited to 'Source/JavaScriptCore/dfg')
-rw-r--r--Source/JavaScriptCore/dfg/DFGAbstractState.cpp44
-rw-r--r--Source/JavaScriptCore/dfg/DFGAbstractValue.h2
-rw-r--r--Source/JavaScriptCore/dfg/DFGAssemblyHelpers.h10
-rw-r--r--Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp329
-rw-r--r--Source/JavaScriptCore/dfg/DFGCCallHelpers.h548
-rw-r--r--Source/JavaScriptCore/dfg/DFGDriver.cpp17
-rw-r--r--Source/JavaScriptCore/dfg/DFGDriver.h10
-rw-r--r--Source/JavaScriptCore/dfg/DFGGraph.cpp6
-rw-r--r--Source/JavaScriptCore/dfg/DFGGraph.h64
-rw-r--r--Source/JavaScriptCore/dfg/DFGJITCompiler.cpp9
-rw-r--r--Source/JavaScriptCore/dfg/DFGJITCompiler.h100
-rw-r--r--Source/JavaScriptCore/dfg/DFGNode.h9
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSREntry.cpp8
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRExitCompiler.cpp2
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp4
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp4
-rw-r--r--Source/JavaScriptCore/dfg/DFGOperations.cpp233
-rw-r--r--Source/JavaScriptCore/dfg/DFGOperations.h4
-rw-r--r--Source/JavaScriptCore/dfg/DFGPropagator.cpp63
-rw-r--r--Source/JavaScriptCore/dfg/DFGRepatch.cpp120
-rw-r--r--Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp222
-rw-r--r--Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h633
-rw-r--r--Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp242
-rw-r--r--Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp237
-rw-r--r--Source/JavaScriptCore/dfg/DFGStructureSet.h166
-rw-r--r--Source/JavaScriptCore/dfg/DFGThunks.cpp2
26 files changed, 1782 insertions, 1306 deletions
diff --git a/Source/JavaScriptCore/dfg/DFGAbstractState.cpp b/Source/JavaScriptCore/dfg/DFGAbstractState.cpp
index eb00bcb3c..bd35e1d43 100644
--- a/Source/JavaScriptCore/dfg/DFGAbstractState.cpp
+++ b/Source/JavaScriptCore/dfg/DFGAbstractState.cpp
@@ -121,6 +121,8 @@ void AbstractState::initialize(Graph& graph)
root->valuesAtHead.argument(i).set(PredictInt32Array);
else if (isUint8ArrayPrediction(prediction))
root->valuesAtHead.argument(i).set(PredictUint8Array);
+ else if (isUint8ClampedArrayPrediction(prediction))
+ root->valuesAtHead.argument(i).set(PredictUint8ClampedArray);
else if (isUint16ArrayPrediction(prediction))
root->valuesAtHead.argument(i).set(PredictUint16Array);
else if (isUint32ArrayPrediction(prediction))
@@ -151,14 +153,14 @@ bool AbstractState::endBasicBlock(MergeMode mergeMode)
if (mergeMode != DontMerge || !ASSERT_DISABLED) {
for (size_t argument = 0; argument < block->variablesAtTail.numberOfArguments(); ++argument) {
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" Merging state for argument %lu.\n", argument);
+ printf(" Merging state for argument %zu.\n", argument);
#endif
changed |= mergeStateAtTail(block->valuesAtTail.argument(argument), m_variables.argument(argument), block->variablesAtTail.argument(argument));
}
for (size_t local = 0; local < block->variablesAtTail.numberOfLocals(); ++local) {
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" Merging state for local %lu.\n", local);
+ printf(" Merging state for local %zu.\n", local);
#endif
changed |= mergeStateAtTail(block->valuesAtTail.local(local), m_variables.local(local), block->variablesAtTail.local(local));
}
@@ -278,7 +280,7 @@ bool AbstractState::execute(NodeIndex nodeIndex)
case ValueAdd:
case ArithAdd: {
- if (Node::shouldSpeculateInteger(m_graph[node.child1()], m_graph[node.child2()]) && node.canSpeculateInteger()) {
+ if (m_graph.addShouldSpeculateInteger(node, m_codeBlock)) {
forNode(node.child1()).filter(PredictInt32);
forNode(node.child2()).filter(PredictInt32);
forNode(nodeIndex).set(PredictInt32);
@@ -296,7 +298,19 @@ bool AbstractState::execute(NodeIndex nodeIndex)
break;
}
- case ArithSub:
+ case ArithSub: {
+ if (m_graph.addShouldSpeculateInteger(node, m_codeBlock)) {
+ forNode(node.child1()).filter(PredictInt32);
+ forNode(node.child2()).filter(PredictInt32);
+ forNode(nodeIndex).set(PredictInt32);
+ break;
+ }
+ forNode(node.child1()).filter(PredictNumber);
+ forNode(node.child2()).filter(PredictNumber);
+ forNode(nodeIndex).set(PredictDouble);
+ break;
+ }
+
case ArithMul:
case ArithDiv:
case ArithMin:
@@ -448,6 +462,12 @@ bool AbstractState::execute(NodeIndex nodeIndex)
forNode(nodeIndex).set(PredictInt32);
break;
}
+ if (m_graph[node.child1()].shouldSpeculateUint8ClampedArray()) {
+ forNode(node.child1()).filter(PredictUint8ClampedArray);
+ forNode(node.child2()).filter(PredictInt32);
+ forNode(nodeIndex).set(PredictInt32);
+ break;
+ }
if (m_graph[node.child1()].shouldSpeculateUint16Array()) {
forNode(node.child1()).filter(PredictUint16Array);
forNode(node.child2()).filter(PredictInt32);
@@ -522,6 +542,12 @@ bool AbstractState::execute(NodeIndex nodeIndex)
forNode(node.child3()).filter(PredictNumber);
break;
}
+ if (m_graph[node.child1()].shouldSpeculateUint8ClampedArray()) {
+ forNode(node.child1()).filter(PredictUint8ClampedArray);
+ forNode(node.child2()).filter(PredictInt32);
+ forNode(node.child3()).filter(PredictNumber);
+ break;
+ }
if (m_graph[node.child1()].shouldSpeculateUint16Array()) {
forNode(node.child1()).filter(PredictUint16Array);
forNode(node.child2()).filter(PredictInt32);
@@ -688,6 +714,7 @@ bool AbstractState::execute(NodeIndex nodeIndex)
break;
case GetById:
+ case GetByIdFlush:
if (!node.prediction()) {
m_isValid = false;
break;
@@ -728,6 +755,10 @@ bool AbstractState::execute(NodeIndex nodeIndex)
forNode(node.child1()).filter(PredictUint8Array);
forNode(nodeIndex).set(PredictInt32);
break;
+ case GetUint8ClampedArrayLength:
+ forNode(node.child1()).filter(PredictUint8ClampedArray);
+ forNode(nodeIndex).set(PredictInt32);
+ break;
case GetUint16ArrayLength:
forNode(node.child1()).filter(PredictUint16Array);
forNode(nodeIndex).set(PredictInt32);
@@ -797,6 +828,11 @@ bool AbstractState::execute(NodeIndex nodeIndex)
forNode(nodeIndex).clear();
break;
}
+ if (m_graph[node.child1()].shouldSpeculateUint8ClampedArray()) {
+ forNode(node.child1()).filter(PredictUint8ClampedArray);
+ forNode(nodeIndex).clear();
+ break;
+ }
if (m_graph[node.child1()].shouldSpeculateUint16Array()) {
forNode(node.child1()).filter(PredictUint16Array);
forNode(nodeIndex).set(PredictOther);
diff --git a/Source/JavaScriptCore/dfg/DFGAbstractValue.h b/Source/JavaScriptCore/dfg/DFGAbstractValue.h
index ee43b6c4a..15bc0d496 100644
--- a/Source/JavaScriptCore/dfg/DFGAbstractValue.h
+++ b/Source/JavaScriptCore/dfg/DFGAbstractValue.h
@@ -30,9 +30,9 @@
#if ENABLE(DFG_JIT)
-#include "DFGStructureSet.h"
#include "JSCell.h"
#include "PredictedType.h"
+#include "StructureSet.h"
namespace JSC { namespace DFG {
diff --git a/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.h b/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.h
index a9dec5062..e0d817c9f 100644
--- a/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.h
+++ b/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.h
@@ -246,6 +246,16 @@ public:
m_assembler.vmov(fpr, payloadGPR, tagGPR);
}
#endif
+
+ enum ExceptionCheckKind { NormalExceptionCheck, InvertedExceptionCheck };
+ Jump emitExceptionCheck(ExceptionCheckKind kind = NormalExceptionCheck)
+ {
+#if USE(JSVALUE64)
+ return branchTestPtr(kind == NormalExceptionCheck ? NonZero : Zero, AbsoluteAddress(&globalData()->exception));
+#elif USE(JSVALUE32_64)
+ return branch32(kind == NormalExceptionCheck ? NotEqual : Equal, AbsoluteAddress(reinterpret_cast<char*>(&globalData()->exception) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag));
+#endif
+ }
#if ENABLE(SAMPLING_COUNTERS)
static void emitCount(MacroAssembler& jit, AbstractSamplingCounter& counter, int32_t increment = 1)
diff --git a/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp b/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
index 964618c43..87c3a23b9 100644
--- a/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
+++ b/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
@@ -28,9 +28,13 @@
#if ENABLE(DFG_JIT)
+#include "CallLinkStatus.h"
+#include "CodeBlock.h"
#include "DFGByteCodeCache.h"
#include "DFGCapabilities.h"
-#include "CodeBlock.h"
+#include "GetByIdStatus.h"
+#include "MethodCallLinkStatus.h"
+#include "PutByIdStatus.h"
#include <wtf/HashMap.h>
#include <wtf/MathExtras.h>
@@ -48,6 +52,7 @@ public:
, m_graph(graph)
, m_currentBlock(0)
, m_currentIndex(0)
+ , m_currentProfilingIndex(0)
, m_constantUndefined(UINT_MAX)
, m_constantNull(UINT_MAX)
, m_constantNaN(UINT_MAX)
@@ -329,7 +334,8 @@ private:
JSValue v = valueOfJSConstant(index);
if (v.isInt32())
return getJSConstant(node.constantNumber());
- // FIXME: We could convert the double ToInteger at this point.
+ if (v.isNumber())
+ return getJSConstantForValue(JSValue(JSC::toInt32(v.asNumber())));
}
return addToGraph(ValueToInt32, index);
@@ -351,6 +357,17 @@ private:
return addToGraph(ValueToNumber, OpInfo(NodeUseBottom), index);
}
+
+ NodeIndex getJSConstantForValue(JSValue constantValue)
+ {
+ unsigned constantIndex = m_codeBlock->addOrFindConstant(constantValue);
+ if (constantIndex >= m_constants.size())
+ m_constants.append(ConstantRecord());
+
+ ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
+
+ return getJSConstant(constantIndex);
+ }
NodeIndex getJSConstant(unsigned constant)
{
@@ -382,16 +399,6 @@ private:
{
return isJSConstant(nodeIndex) && valueOfJSConstant(nodeIndex).isInt32();
}
- bool isSmallInt32Constant(NodeIndex nodeIndex)
- {
- if (!isJSConstant(nodeIndex))
- return false;
- JSValue value = valueOfJSConstant(nodeIndex);
- if (!value.isInt32())
- return false;
- int32_t intValue = value.asInt32();
- return intValue >= -5 && intValue <= 5;
- }
// Convenience methods for getting constant values.
JSValue valueOfJSConstant(NodeIndex index)
{
@@ -403,7 +410,7 @@ private:
ASSERT(isInt32Constant(nodeIndex));
return valueOfJSConstant(nodeIndex).asInt32();
}
-
+
// This method returns a JSConstant with the value 'undefined'.
NodeIndex constantUndefined()
{
@@ -519,7 +526,7 @@ private:
CodeOrigin currentCodeOrigin()
{
- return CodeOrigin(m_currentIndex, m_inlineStackTop->m_inlineCallFrame);
+ return CodeOrigin(m_currentIndex, m_inlineStackTop->m_inlineCallFrame, m_currentProfilingIndex - m_currentIndex);
}
// These methods create a node and add it to the graph. If nodes of this type are
@@ -574,8 +581,10 @@ private:
Instruction* putInstruction = currentInstruction + OPCODE_LENGTH(op_call);
PredictedType prediction = PredictNone;
- if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result)
- prediction = getPrediction(m_graph.size(), m_currentIndex + OPCODE_LENGTH(op_call));
+ if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result) {
+ m_currentProfilingIndex = m_currentIndex + OPCODE_LENGTH(op_call);
+ prediction = getPrediction();
+ }
addVarArgChild(get(currentInstruction[1].u.operand));
int argCount = currentInstruction[2].u.operand;
@@ -622,12 +631,12 @@ private:
PredictedType getPredictionWithoutOSRExit()
{
- return getPredictionWithoutOSRExit(m_graph.size(), m_currentIndex);
+ return getPredictionWithoutOSRExit(m_graph.size(), m_currentProfilingIndex);
}
PredictedType getPrediction()
{
- return getPrediction(m_graph.size(), m_currentIndex);
+ return getPrediction(m_graph.size(), m_currentProfilingIndex);
}
NodeIndex makeSafe(NodeIndex nodeIndex)
@@ -700,6 +709,29 @@ private:
return nodeIndex;
}
+ bool willNeedFlush(StructureStubInfo& stubInfo)
+ {
+ PolymorphicAccessStructureList* list;
+ int listSize;
+ switch (stubInfo.accessType) {
+ case access_get_by_id_self_list:
+ list = stubInfo.u.getByIdSelfList.structureList;
+ listSize = stubInfo.u.getByIdSelfList.listSize;
+ break;
+ case access_get_by_id_proto_list:
+ list = stubInfo.u.getByIdProtoList.structureList;
+ listSize = stubInfo.u.getByIdProtoList.listSize;
+ break;
+ default:
+ return false;
+ }
+ for (int i = 0; i < listSize; ++i) {
+ if (!list->list[i].isDirect)
+ return true;
+ }
+ return false;
+ }
+
bool structureChainIsStillValid(bool direct, Structure* previousStructure, StructureChain* chain)
{
if (direct)
@@ -727,6 +759,8 @@ private:
BasicBlock* m_currentBlock;
// The bytecode index of the current instruction being generated.
unsigned m_currentIndex;
+ // The bytecode index of the value profile of the current instruction being generated.
+ unsigned m_currentProfilingIndex;
// We use these values during code generation, and to avoid the need for
// special handling we make sure they are available as constants in the
@@ -890,6 +924,7 @@ private:
m_currentIndex += OPCODE_LENGTH(name); \
return shouldContinueParsing
+
void ByteCodeParser::handleCall(Interpreter* interpreter, Instruction* currentInstruction, NodeType op, CodeSpecializationKind kind)
{
ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct));
@@ -898,13 +933,15 @@ void ByteCodeParser::handleCall(Interpreter* interpreter, Instruction* currentIn
enum { ConstantFunction, LinkedFunction, UnknownFunction } callType;
#if DFG_ENABLE(DEBUG_VERBOSE)
- printf("Slow case count for call at @%lu bc#%u: %u/%u; exit profile: %d.\n", m_graph.size(), m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_profiledBlock->executionEntryCount(), m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
+ printf("Slow case count for call at @%zu bc#%u: %u/%u; exit profile: %d.\n", m_graph.size(), m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_profiledBlock->executionEntryCount(), m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
#endif
-
+
+ CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
+ m_inlineStackTop->m_profiledBlock, m_currentIndex);
+
if (m_graph.isFunctionConstant(m_codeBlock, callTarget))
callType = ConstantFunction;
- else if (!!m_inlineStackTop->m_profiledBlock->getCallLinkInfo(m_currentIndex).lastSeenCallee
- && !m_inlineStackTop->m_profiledBlock->couldTakeSlowCase(m_currentIndex)
+ else if (callLinkStatus.isSet() && !callLinkStatus.couldTakeSlowPath()
&& !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache))
callType = LinkedFunction;
else
@@ -922,7 +959,8 @@ void ByteCodeParser::handleCall(Interpreter* interpreter, Instruction* currentIn
if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result) {
resultOperand = putInstruction[1].u.operand;
usesResult = true;
- prediction = getPrediction(m_graph.size(), nextOffset);
+ m_currentProfilingIndex = nextOffset;
+ prediction = getPrediction();
nextOffset += OPCODE_LENGTH(op_call_put_result);
}
JSFunction* expectedFunction;
@@ -934,7 +972,7 @@ void ByteCodeParser::handleCall(Interpreter* interpreter, Instruction* currentIn
certainAboutExpectedFunction = true;
} else {
ASSERT(callType == LinkedFunction);
- expectedFunction = m_inlineStackTop->m_profiledBlock->getCallLinkInfo(m_currentIndex).lastSeenCallee.get();
+ expectedFunction = callLinkStatus.callTarget();
intrinsic = expectedFunction->executable()->intrinsicFor(kind);
certainAboutExpectedFunction = false;
}
@@ -1029,7 +1067,7 @@ bool ByteCodeParser::handleInlining(bool usesResult, int callTarget, NodeIndex c
int inlineCallFrameStart = m_inlineStackTop->remapOperand(registerOffset) - RegisterFile::CallFrameHeaderSize;
// Make sure that the area used by the call frame is reserved.
- for (int arg = inlineCallFrameStart + RegisterFile::CallFrameHeaderSize + codeBlock->m_numVars; arg-- > inlineCallFrameStart + 1;)
+ for (int arg = inlineCallFrameStart + RegisterFile::CallFrameHeaderSize + codeBlock->m_numVars; arg-- > inlineCallFrameStart;)
m_preservedVars.set(m_inlineStackTop->remapOperand(arg));
// Make sure that we have enough locals.
@@ -1044,13 +1082,16 @@ bool ByteCodeParser::handleInlining(bool usesResult, int callTarget, NodeIndex c
// This is where the actual inlining really happens.
unsigned oldIndex = m_currentIndex;
+ unsigned oldProfilingIndex = m_currentProfilingIndex;
m_currentIndex = 0;
+ m_currentProfilingIndex = 0;
addToGraph(InlineStart);
parseCodeBlock();
m_currentIndex = oldIndex;
+ m_currentProfilingIndex = oldProfilingIndex;
// If the inlined code created some new basic blocks, then we have linking to do.
if (inlineStackEntry.m_callsiteBlockHead != m_graph.m_blocks.size() - 1) {
@@ -1129,7 +1170,7 @@ bool ByteCodeParser::handleInlining(bool usesResult, int callTarget, NodeIndex c
// Need to create a new basic block for the continuation at the caller.
OwnPtr<BasicBlock> block = adoptPtr(new BasicBlock(nextOffset, m_graph.size(), m_numArguments, m_numLocals));
#if DFG_ENABLE(DEBUG_VERBOSE)
- printf("Creating inline epilogue basic block %p, #%lu for %p bc#%u at inline depth %u.\n", block.get(), m_graph.m_blocks.size(), m_inlineStackTop->executable(), m_currentIndex, CodeOrigin::inlineDepthForCallFrame(m_inlineStackTop->m_inlineCallFrame));
+ printf("Creating inline epilogue basic block %p, #%zu for %p bc#%u at inline depth %u.\n", block.get(), m_graph.m_blocks.size(), m_inlineStackTop->executable(), m_currentIndex, CodeOrigin::inlineDepthForCallFrame(m_inlineStackTop->m_inlineCallFrame));
#endif
m_currentBlock = block.get();
ASSERT(m_inlineStackTop->m_caller->m_blockLinkingTargets.isEmpty() || m_graph.m_blocks[m_inlineStackTop->m_caller->m_blockLinkingTargets.last()]->bytecodeBegin < nextOffset);
@@ -1308,6 +1349,8 @@ bool ByteCodeParser::parseBlock(unsigned limit)
}
while (true) {
+ m_currentProfilingIndex = m_currentIndex;
+
// Don't extend over jump destinations.
if (m_currentIndex == limit) {
// Ordinarily we want to plant a jump. But refuse to do this if the block is
@@ -1680,6 +1723,7 @@ bool ByteCodeParser::parseBlock(unsigned limit)
}
case op_method_check: {
+ m_currentProfilingIndex += OPCODE_LENGTH(op_method_check);
Instruction* getInstruction = currentInstruction + OPCODE_LENGTH(op_method_check);
PredictedType prediction = getPrediction();
@@ -1691,23 +1735,26 @@ bool ByteCodeParser::parseBlock(unsigned limit)
// Check if the method_check was monomorphic. If so, emit a CheckXYZMethod
// node, which is a lot more efficient.
- StructureStubInfo& stubInfo = m_inlineStackTop->m_profiledBlock->getStubInfo(m_currentIndex);
- MethodCallLinkInfo& methodCall = m_inlineStackTop->m_profiledBlock->getMethodCallLinkInfo(m_currentIndex);
+ GetByIdStatus getByIdStatus = GetByIdStatus::computeFor(
+ m_inlineStackTop->m_profiledBlock,
+ m_currentIndex,
+ m_codeBlock->identifier(identifier));
+ MethodCallLinkStatus methodCallStatus = MethodCallLinkStatus::computeFor(
+ m_inlineStackTop->m_profiledBlock, m_currentIndex);
- if (methodCall.seen
- && !!methodCall.cachedStructure
- && !stubInfo.seen
+ if (methodCallStatus.isSet()
+ && !getByIdStatus.isSet()
&& !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)) {
// It's monomorphic as far as we can tell, since the method_check was linked
// but the slow path (i.e. the normal get_by_id) never fired.
- addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(methodCall.cachedStructure.get())), base);
- if (methodCall.cachedPrototype.get() != m_inlineStackTop->m_profiledBlock->globalObject()->methodCallDummy())
- addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(methodCall.cachedPrototypeStructure.get())), cellConstant(methodCall.cachedPrototype.get()));
+ addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(methodCallStatus.structure())), base);
+ if (methodCallStatus.needsPrototypeCheck())
+ addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(methodCallStatus.prototypeStructure())), cellConstant(methodCallStatus.prototype()));
- set(getInstruction[1].u.operand, cellConstant(methodCall.cachedFunction.get()));
+ set(getInstruction[1].u.operand, cellConstant(methodCallStatus.function()));
} else
- set(getInstruction[1].u.operand, addToGraph(GetById, OpInfo(identifier), OpInfo(prediction), base));
+ set(getInstruction[1].u.operand, addToGraph(getByIdStatus.makesCalls() ? GetByIdFlush : GetById, OpInfo(identifier), OpInfo(prediction), base));
m_currentIndex += OPCODE_LENGTH(op_method_check) + OPCODE_LENGTH(op_get_by_id);
continue;
@@ -1737,178 +1784,109 @@ bool ByteCodeParser::parseBlock(unsigned limit)
unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
Identifier identifier = m_codeBlock->identifier(identifierNumber);
- StructureStubInfo& stubInfo = m_inlineStackTop->m_profiledBlock->getStubInfo(m_currentIndex);
+ GetByIdStatus getByIdStatus = GetByIdStatus::computeFor(
+ m_inlineStackTop->m_profiledBlock, m_currentIndex, identifier);
#if DFG_ENABLE(DEBUG_VERBOSE)
- printf("Slow case count for GetById @%lu bc#%u: %u; exit profile: %d\n", m_graph.size(), m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
+ printf("Slow case count for GetById @%zu bc#%u: %u; exit profile: %d\n", m_graph.size(), m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
#endif
- size_t offset = notFound;
- StructureSet structureSet;
- if (stubInfo.seen
- && !m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
+ if (getByIdStatus.isSimpleDirect()
&& !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)) {
- switch (stubInfo.accessType) {
- case access_get_by_id_self: {
- Structure* structure = stubInfo.u.getByIdSelf.baseObjectStructure.get();
- offset = structure->get(*m_globalData, identifier);
-
- if (offset != notFound)
- structureSet.add(structure);
-
- if (offset != notFound)
- ASSERT(structureSet.size());
- break;
- }
-
- case access_get_by_id_self_list: {
- PolymorphicAccessStructureList* list = stubInfo.u.getByIdProtoList.structureList;
- unsigned size = stubInfo.u.getByIdProtoList.listSize;
- for (unsigned i = 0; i < size; ++i) {
- if (!list->list[i].isDirect) {
- offset = notFound;
- break;
- }
-
- Structure* structure = list->list[i].base.get();
- if (structureSet.contains(structure))
- continue;
-
- size_t myOffset = structure->get(*m_globalData, identifier);
-
- if (myOffset == notFound) {
- offset = notFound;
- break;
- }
-
- if (!i)
- offset = myOffset;
- else if (offset != myOffset) {
- offset = notFound;
- break;
- }
-
- structureSet.add(structure);
- }
-
- if (offset != notFound)
- ASSERT(structureSet.size());
- break;
- }
-
- default:
- ASSERT(offset == notFound);
- break;
- }
- }
-
- if (offset != notFound) {
- ASSERT(structureSet.size());
+ ASSERT(getByIdStatus.structureSet().size());
// The implementation of GetByOffset does not know to terminate speculative
// execution if it doesn't have a prediction, so we do it manually.
if (prediction == PredictNone)
addToGraph(ForceOSRExit);
- addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structureSet)), base);
+ addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(getByIdStatus.structureSet())), base);
set(currentInstruction[1].u.operand, addToGraph(GetByOffset, OpInfo(m_graph.m_storageAccessData.size()), OpInfo(prediction), addToGraph(GetPropertyStorage, base)));
StorageAccessData storageAccessData;
- storageAccessData.offset = offset;
+ storageAccessData.offset = getByIdStatus.offset();
storageAccessData.identifierNumber = identifierNumber;
m_graph.m_storageAccessData.append(storageAccessData);
} else
- set(currentInstruction[1].u.operand, addToGraph(GetById, OpInfo(identifierNumber), OpInfo(prediction), base));
+ set(currentInstruction[1].u.operand, addToGraph(getByIdStatus.makesCalls() ? GetByIdFlush : GetById, OpInfo(identifierNumber), OpInfo(prediction), base));
NEXT_OPCODE(op_get_by_id);
}
-
case op_put_by_id: {
NodeIndex value = get(currentInstruction[3].u.operand);
NodeIndex base = get(currentInstruction[1].u.operand);
unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
bool direct = currentInstruction[8].u.operand;
- StructureStubInfo& stubInfo = m_inlineStackTop->m_profiledBlock->getStubInfo(m_currentIndex);
- if (!stubInfo.seen)
+ PutByIdStatus putByIdStatus = PutByIdStatus::computeFor(
+ m_inlineStackTop->m_profiledBlock,
+ m_currentIndex,
+ m_codeBlock->identifier(identifierNumber));
+ if (!putByIdStatus.isSet())
addToGraph(ForceOSRExit);
- bool alreadyGenerated = false;
+ bool hasExitSite = m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache);
#if DFG_ENABLE(DEBUG_VERBOSE)
- printf("Slow case count for PutById @%lu bc#%u: %u; exit profile: %d\n", m_graph.size(), m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
+ printf("Slow case count for PutById @%zu bc#%u: %u; exit profile: %d\n", m_graph.size(), m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
#endif
- if (stubInfo.seen
- && !m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
- && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)) {
- switch (stubInfo.accessType) {
- case access_put_by_id_replace: {
- Structure* structure = stubInfo.u.putByIdReplace.baseObjectStructure.get();
- Identifier identifier = m_codeBlock->identifier(identifierNumber);
- size_t offset = structure->get(*m_globalData, identifier);
-
- if (offset != notFound) {
- addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structure)), base);
- addToGraph(PutByOffset, OpInfo(m_graph.m_storageAccessData.size()), base, addToGraph(GetPropertyStorage, base), value);
-
- StorageAccessData storageAccessData;
- storageAccessData.offset = offset;
- storageAccessData.identifierNumber = identifierNumber;
- m_graph.m_storageAccessData.append(storageAccessData);
-
- alreadyGenerated = true;
- }
- break;
- }
-
- case access_put_by_id_transition_normal:
- case access_put_by_id_transition_direct: {
- Structure* previousStructure = stubInfo.u.putByIdTransition.previousStructure.get();
- Structure* newStructure = stubInfo.u.putByIdTransition.structure.get();
-
- if (previousStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity())
- break;
-
- StructureChain* structureChain = stubInfo.u.putByIdTransition.chain.get();
-
- Identifier identifier = m_codeBlock->identifier(identifierNumber);
- size_t offset = newStructure->get(*m_globalData, identifier);
+ if (!hasExitSite && putByIdStatus.isSimpleReplace()) {
+ addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(putByIdStatus.oldStructure())), base);
+ addToGraph(PutByOffset, OpInfo(m_graph.m_storageAccessData.size()), base, addToGraph(GetPropertyStorage, base), value);
+
+ StorageAccessData storageAccessData;
+ storageAccessData.offset = putByIdStatus.offset();
+ storageAccessData.identifierNumber = identifierNumber;
+ m_graph.m_storageAccessData.append(storageAccessData);
+ } else if (!hasExitSite
+ && putByIdStatus.isSimpleTransition()
+ && putByIdStatus.oldStructure()->propertyStorageCapacity() == putByIdStatus.newStructure()->propertyStorageCapacity()
+ && structureChainIsStillValid(
+ direct,
+ putByIdStatus.oldStructure(),
+ putByIdStatus.structureChain())) {
+
+ addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(putByIdStatus.oldStructure())), base);
+ if (!direct) {
+ if (!putByIdStatus.oldStructure()->storedPrototype().isNull())
+ addToGraph(
+ CheckStructure,
+ OpInfo(m_graph.addStructureSet(putByIdStatus.oldStructure()->storedPrototype().asCell()->structure())),
+ cellConstant(putByIdStatus.oldStructure()->storedPrototype().asCell()));
- if (offset != notFound && structureChainIsStillValid(direct, previousStructure, structureChain)) {
- addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(previousStructure)), base);
- if (!direct) {
- if (!previousStructure->storedPrototype().isNull())
- addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(previousStructure->storedPrototype().asCell()->structure())), cellConstant(previousStructure->storedPrototype().asCell()));
-
- for (WriteBarrier<Structure>* it = structureChain->head(); *it; ++it) {
- JSValue prototype = (*it)->storedPrototype();
- if (prototype.isNull())
- continue;
- ASSERT(prototype.isCell());
- addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(prototype.asCell()->structure())), cellConstant(prototype.asCell()));
- }
- }
- addToGraph(PutStructure, OpInfo(m_graph.addStructureTransitionData(StructureTransitionData(previousStructure, newStructure))), base);
-
- addToGraph(PutByOffset, OpInfo(m_graph.m_storageAccessData.size()), base, addToGraph(GetPropertyStorage, base), value);
-
- StorageAccessData storageAccessData;
- storageAccessData.offset = offset;
- storageAccessData.identifierNumber = identifierNumber;
- m_graph.m_storageAccessData.append(storageAccessData);
-
- alreadyGenerated = true;
+ for (WriteBarrier<Structure>* it = putByIdStatus.structureChain()->head(); *it; ++it) {
+ JSValue prototype = (*it)->storedPrototype();
+ if (prototype.isNull())
+ continue;
+ ASSERT(prototype.isCell());
+ addToGraph(
+ CheckStructure,
+ OpInfo(m_graph.addStructureSet(prototype.asCell()->structure())),
+ cellConstant(prototype.asCell()));
}
- break;
- }
-
- default:
- break;
}
- }
-
- if (!alreadyGenerated) {
+ addToGraph(
+ PutStructure,
+ OpInfo(
+ m_graph.addStructureTransitionData(
+ StructureTransitionData(
+ putByIdStatus.oldStructure(),
+ putByIdStatus.newStructure()))),
+ base);
+
+ addToGraph(
+ PutByOffset,
+ OpInfo(m_graph.m_storageAccessData.size()),
+ base,
+ addToGraph(GetPropertyStorage, base),
+ value);
+
+ StorageAccessData storageAccessData;
+ storageAccessData.offset = putByIdStatus.offset();
+ storageAccessData.identifierNumber = identifierNumber;
+ m_graph.m_storageAccessData.append(storageAccessData);
+ } else {
if (direct)
addToGraph(PutByIdDirect, OpInfo(identifierNumber), base, value);
else
@@ -2570,7 +2548,7 @@ void ByteCodeParser::parseCodeBlock()
} else {
OwnPtr<BasicBlock> block = adoptPtr(new BasicBlock(m_currentIndex, m_graph.size(), m_numArguments, m_numLocals));
#if DFG_ENABLE(DEBUG_VERBOSE)
- printf("Creating basic block %p, #%lu for %p bc#%u at inline depth %u.\n", block.get(), m_graph.m_blocks.size(), m_inlineStackTop->executable(), m_currentIndex, CodeOrigin::inlineDepthForCallFrame(m_inlineStackTop->m_inlineCallFrame));
+ printf("Creating basic block %p, #%zu for %p bc#%u at inline depth %u.\n", block.get(), m_graph.m_blocks.size(), m_inlineStackTop->executable(), m_currentIndex, CodeOrigin::inlineDepthForCallFrame(m_inlineStackTop->m_inlineCallFrame));
#endif
m_currentBlock = block.get();
ASSERT(m_inlineStackTop->m_unlinkedBlocks.isEmpty() || m_graph.m_blocks[m_inlineStackTop->m_unlinkedBlocks.last().m_blockIndex]->bytecodeBegin < m_currentIndex);
@@ -2620,6 +2598,9 @@ bool ByteCodeParser::parse()
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
printf("Processing local variable phis.\n");
#endif
+
+ m_currentProfilingIndex = m_currentIndex;
+
processPhiStack<LocalPhiStack>();
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
printf("Processing argument phis.\n");
diff --git a/Source/JavaScriptCore/dfg/DFGCCallHelpers.h b/Source/JavaScriptCore/dfg/DFGCCallHelpers.h
new file mode 100644
index 000000000..3481f99e8
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGCCallHelpers.h
@@ -0,0 +1,548 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGCCallHelpers_h
+#define DFGCCallHelpers_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(DFG_JIT)
+
+#include "DFGAssemblyHelpers.h"
+#include "DFGGPRInfo.h"
+
+namespace JSC { namespace DFG {
+
+class CCallHelpers : public AssemblyHelpers {
+public:
+ CCallHelpers(JSGlobalData* globalData, CodeBlock* codeBlock)
+ : AssemblyHelpers(globalData, codeBlock)
+ {
+ }
+
+ // These methods used to sort arguments into the correct registers.
+ // On X86 we use cdecl calling conventions, which pass all arguments on the
+ // stack. On other architectures we may need to sort values into the
+ // correct registers.
+#if !NUMBER_OF_ARGUMENT_REGISTERS
+ unsigned m_callArgumentOffset;
+ void resetCallArguments() { m_callArgumentOffset = 0; }
+
+ // These methods are using internally to implement the callOperation methods.
+ void addCallArgument(GPRReg value)
+ {
+ poke(value, m_callArgumentOffset++);
+ }
+ void addCallArgument(TrustedImm32 imm)
+ {
+ poke(imm, m_callArgumentOffset++);
+ }
+ void addCallArgument(TrustedImmPtr pointer)
+ {
+ poke(pointer, m_callArgumentOffset++);
+ }
+ void addCallArgument(FPRReg value)
+ {
+ storeDouble(value, Address(stackPointerRegister, m_callArgumentOffset * sizeof(void*)));
+ m_callArgumentOffset += sizeof(double) / sizeof(void*);
+ }
+
+ ALWAYS_INLINE void setupArguments(FPRReg arg1)
+ {
+ resetCallArguments();
+ addCallArgument(arg1);
+ }
+
+ ALWAYS_INLINE void setupArguments(FPRReg arg1, FPRReg arg2)
+ {
+ resetCallArguments();
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ }
+
+ ALWAYS_INLINE void setupArguments(GPRReg arg1, GPRReg arg2)
+ {
+ resetCallArguments();
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ }
+
+ ALWAYS_INLINE void setupArgumentsExecState()
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImm32 arg2)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImmPtr arg2)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2, TrustedImmPtr arg3)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, TrustedImmPtr arg3)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImmPtr arg4)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImm32 arg4)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImm32 arg2, GPRReg arg3, GPRReg arg4)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ addCallArgument(arg5);
+ }
+#endif // !NUMBER_OF_ARGUMENT_REGISTERS
+ // These methods are suitable for any calling convention that provides for
+ // at least 4 argument registers, e.g. X86_64, ARMv7.
+#if NUMBER_OF_ARGUMENT_REGISTERS >= 4
+ template<GPRReg destA, GPRReg destB>
+ void setupTwoStubArgs(GPRReg srcA, GPRReg srcB)
+ {
+ // Assuming that srcA != srcB, there are 7 interesting states the registers may be in:
+ // (1) both are already in arg regs, the right way around.
+ // (2) both are already in arg regs, the wrong way around.
+ // (3) neither are currently in arg registers.
+ // (4) srcA in in its correct reg.
+ // (5) srcA in in the incorrect reg.
+ // (6) srcB in in its correct reg.
+ // (7) srcB in in the incorrect reg.
+ //
+ // The trivial approach is to simply emit two moves, to put srcA in place then srcB in
+ // place (the MacroAssembler will omit redundant moves). This apporach will be safe in
+ // cases 1, 3, 4, 5, 6, and in cases where srcA==srcB. The two problem cases are 2
+ // (requires a swap) and 7 (must move srcB first, to avoid trampling.)
+
+ if (srcB != destA) {
+ // Handle the easy cases - two simple moves.
+ move(srcA, destA);
+ move(srcB, destB);
+ } else if (srcA != destB) {
+ // Handle the non-swap case - just put srcB in place first.
+ move(srcB, destB);
+ move(srcA, destA);
+ } else
+ swap(destA, destB);
+ }
+#if CPU(X86_64)
+ template<FPRReg destA, FPRReg destB>
+ void setupTwoStubArgs(FPRReg srcA, FPRReg srcB)
+ {
+ // Assuming that srcA != srcB, there are 7 interesting states the registers may be in:
+ // (1) both are already in arg regs, the right way around.
+ // (2) both are already in arg regs, the wrong way around.
+ // (3) neither are currently in arg registers.
+ // (4) srcA in in its correct reg.
+ // (5) srcA in in the incorrect reg.
+ // (6) srcB in in its correct reg.
+ // (7) srcB in in the incorrect reg.
+ //
+ // The trivial approach is to simply emit two moves, to put srcA in place then srcB in
+ // place (the MacroAssembler will omit redundant moves). This apporach will be safe in
+ // cases 1, 3, 4, 5, 6, and in cases where srcA==srcB. The two problem cases are 2
+ // (requires a swap) and 7 (must move srcB first, to avoid trampling.)
+
+ if (srcB != destA) {
+ // Handle the easy cases - two simple moves.
+ moveDouble(srcA, destA);
+ moveDouble(srcB, destB);
+ return;
+ }
+
+ if (srcA != destB) {
+ // Handle the non-swap case - just put srcB in place first.
+ moveDouble(srcB, destB);
+ moveDouble(srcA, destA);
+ return;
+ }
+
+ ASSERT(srcB == destA && srcA == destB);
+ // Need to swap; pick a temporary register.
+ FPRReg temp;
+ if (destA != FPRInfo::argumentFPR3 && destA != FPRInfo::argumentFPR3)
+ temp = FPRInfo::argumentFPR3;
+ else if (destA != FPRInfo::argumentFPR2 && destA != FPRInfo::argumentFPR2)
+ temp = FPRInfo::argumentFPR2;
+ else {
+ ASSERT(destA != FPRInfo::argumentFPR1 && destA != FPRInfo::argumentFPR1);
+ temp = FPRInfo::argumentFPR1;
+ }
+ moveDouble(destA, temp);
+ moveDouble(destB, destA);
+ moveDouble(temp, destB);
+ }
+#endif
+ void setupStubArguments(GPRReg arg1, GPRReg arg2)
+ {
+ setupTwoStubArgs<GPRInfo::argumentGPR1, GPRInfo::argumentGPR2>(arg1, arg2);
+ }
+ void setupStubArguments(GPRReg arg1, GPRReg arg2, GPRReg arg3)
+ {
+ // If neither of arg2/arg3 are in our way, then we can move arg1 into place.
+ // Then we can use setupTwoStubArgs to fix arg2/arg3.
+ if (arg2 != GPRInfo::argumentGPR1 && arg3 != GPRInfo::argumentGPR1) {
+ move(arg1, GPRInfo::argumentGPR1);
+ setupTwoStubArgs<GPRInfo::argumentGPR2, GPRInfo::argumentGPR3>(arg2, arg3);
+ return;
+ }
+
+ // If neither of arg1/arg3 are in our way, then we can move arg2 into place.
+ // Then we can use setupTwoStubArgs to fix arg1/arg3.
+ if (arg1 != GPRInfo::argumentGPR2 && arg3 != GPRInfo::argumentGPR2) {
+ move(arg2, GPRInfo::argumentGPR2);
+ setupTwoStubArgs<GPRInfo::argumentGPR1, GPRInfo::argumentGPR3>(arg1, arg3);
+ return;
+ }
+
+ // If neither of arg1/arg2 are in our way, then we can move arg3 into place.
+ // Then we can use setupTwoStubArgs to fix arg1/arg2.
+ if (arg1 != GPRInfo::argumentGPR3 && arg2 != GPRInfo::argumentGPR3) {
+ move(arg3, GPRInfo::argumentGPR3);
+ setupTwoStubArgs<GPRInfo::argumentGPR1, GPRInfo::argumentGPR2>(arg1, arg2);
+ return;
+ }
+
+ // If we get here, we haven't been able to move any of arg1/arg2/arg3.
+ // Since all three are blocked, then all three must already be in the argument register.
+ // But are they in the right ones?
+
+ // First, ensure arg1 is in place.
+ if (arg1 != GPRInfo::argumentGPR1) {
+ swap(arg1, GPRInfo::argumentGPR1);
+
+ // If arg1 wasn't in argumentGPR1, one of arg2/arg3 must be.
+ ASSERT(arg2 == GPRInfo::argumentGPR1 || arg3 == GPRInfo::argumentGPR1);
+ // If arg2 was in argumentGPR1 it no longer is (due to the swap).
+ // Otherwise arg3 must have been. Mark him as moved.
+ if (arg2 == GPRInfo::argumentGPR1)
+ arg2 = arg1;
+ else
+ arg3 = arg1;
+ }
+
+ // Either arg2 & arg3 need swapping, or we're all done.
+ ASSERT((arg2 == GPRInfo::argumentGPR2 || arg3 == GPRInfo::argumentGPR3)
+ || (arg2 == GPRInfo::argumentGPR3 || arg3 == GPRInfo::argumentGPR2));
+
+ if (arg2 != GPRInfo::argumentGPR2)
+ swap(GPRInfo::argumentGPR2, GPRInfo::argumentGPR3);
+ }
+
+#if CPU(X86_64)
+ ALWAYS_INLINE void setupArguments(FPRReg arg1)
+ {
+ moveDouble(arg1, FPRInfo::argumentFPR0);
+ }
+
+ ALWAYS_INLINE void setupArguments(FPRReg arg1, FPRReg arg2)
+ {
+ setupTwoStubArgs<FPRInfo::argumentFPR0, FPRInfo::argumentFPR1>(arg1, arg2);
+ }
+#else
+ ALWAYS_INLINE void setupArguments(FPRReg arg1)
+ {
+ assembler().vmov(GPRInfo::argumentGPR0, GPRInfo::argumentGPR1, arg1);
+ }
+
+ ALWAYS_INLINE void setupArguments(FPRReg arg1, FPRReg arg2)
+ {
+ assembler().vmov(GPRInfo::argumentGPR0, GPRInfo::argumentGPR1, arg1);
+ assembler().vmov(GPRInfo::argumentGPR2, GPRInfo::argumentGPR3, arg2);
+ }
+#endif
+
+ ALWAYS_INLINE void setupArguments(GPRReg arg1, GPRReg arg2)
+ {
+ setupTwoStubArgs<GPRInfo::argumentGPR0, GPRInfo::argumentGPR1>(arg1, arg2);
+ }
+
+ ALWAYS_INLINE void setupArgumentsExecState()
+ {
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1)
+ {
+ move(arg1, GPRInfo::argumentGPR1);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1)
+ {
+ move(arg1, GPRInfo::argumentGPR1);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2)
+ {
+ setupStubArguments(arg1, arg2);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2)
+ {
+ move(arg1, GPRInfo::argumentGPR1);
+ move(arg2, GPRInfo::argumentGPR2);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2)
+ {
+ move(arg2, GPRInfo::argumentGPR2); // Move this first, so setting arg1 does not trample!
+ move(arg1, GPRInfo::argumentGPR1);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImm32 arg2)
+ {
+ move(arg1, GPRInfo::argumentGPR1);
+ move(arg2, GPRInfo::argumentGPR2);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImmPtr arg2)
+ {
+ move(arg1, GPRInfo::argumentGPR1);
+ move(arg2, GPRInfo::argumentGPR2);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3)
+ {
+ setupStubArguments(arg1, arg2, arg3);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3)
+ {
+ setupStubArguments(arg1, arg2);
+ move(arg3, GPRInfo::argumentGPR3);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2, TrustedImmPtr arg3)
+ {
+ move(arg1, GPRInfo::argumentGPR1);
+ move(arg2, GPRInfo::argumentGPR2);
+ move(arg3, GPRInfo::argumentGPR3);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, TrustedImmPtr arg3)
+ {
+ move(arg1, GPRInfo::argumentGPR1);
+ move(arg2, GPRInfo::argumentGPR2);
+ move(arg3, GPRInfo::argumentGPR3);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3)
+ {
+ setupStubArguments(arg1, arg2);
+ move(arg3, GPRInfo::argumentGPR3);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImm32 arg2, GPRReg arg3)
+ {
+ move(arg1, GPRInfo::argumentGPR1);
+ move(arg2, GPRInfo::argumentGPR2);
+ move(arg3, GPRInfo::argumentGPR3);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+#endif // NUMBER_OF_ARGUMENT_REGISTERS >= 4
+ // These methods are suitable for any calling convention that provides for
+ // exactly 4 argument registers, e.g. ARMv7.
+#if NUMBER_OF_ARGUMENT_REGISTERS == 4
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4)
+ {
+ poke(arg4);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImm32 arg4)
+ {
+ poke(arg4);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImm32 arg2, GPRReg arg3, GPRReg arg4)
+ {
+ poke(arg4);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImmPtr arg4)
+ {
+ poke(arg4);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5)
+ {
+ poke(arg5, 1);
+ poke(arg4);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+#endif // NUMBER_OF_ARGUMENT_REGISTERS == 4
+
+ void setupResults(GPRReg destA, GPRReg destB)
+ {
+ GPRReg srcA = GPRInfo::returnValueGPR;
+ GPRReg srcB = GPRInfo::returnValueGPR2;
+
+ if (srcB != destA) {
+ // Handle the easy cases - two simple moves.
+ move(srcA, destA);
+ move(srcB, destB);
+ } else if (srcA != destB) {
+ // Handle the non-swap case - just put srcB in place first.
+ move(srcB, destB);
+ move(srcA, destA);
+ } else
+ swap(destA, destB);
+ }
+};
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
+
+#endif // DFGCCallHelpers_h
+
diff --git a/Source/JavaScriptCore/dfg/DFGDriver.cpp b/Source/JavaScriptCore/dfg/DFGDriver.cpp
index 2143fbc19..27a0dab75 100644
--- a/Source/JavaScriptCore/dfg/DFGDriver.cpp
+++ b/Source/JavaScriptCore/dfg/DFGDriver.cpp
@@ -35,7 +35,7 @@
namespace JSC { namespace DFG {
enum CompileMode { CompileFunction, CompileOther };
-inline bool compile(CompileMode compileMode, ExecState* exec, CodeBlock* codeBlock, JITCode& jitCode, MacroAssemblerCodePtr* jitCodeWithArityCheck)
+inline bool compile(CompileMode compileMode, JSGlobalData& globalData, CodeBlock* codeBlock, JITCode& jitCode, MacroAssemblerCodePtr* jitCodeWithArityCheck)
{
SamplingRegion samplingRegion("DFG Compilation (Driver)");
@@ -47,17 +47,16 @@ inline bool compile(CompileMode compileMode, ExecState* exec, CodeBlock* codeBlo
fprintf(stderr, "DFG compiling code block %p(%p), number of instructions = %u.\n", codeBlock, codeBlock->alternative(), codeBlock->instructionCount());
#endif
- JSGlobalData* globalData = &exec->globalData();
Graph dfg;
- if (!parse(dfg, globalData, codeBlock))
+ if (!parse(dfg, &globalData, codeBlock))
return false;
if (compileMode == CompileFunction)
dfg.predictArgumentTypes(codeBlock);
- propagate(dfg, globalData, codeBlock);
+ propagate(dfg, &globalData, codeBlock);
- JITCompiler dataFlowJIT(globalData, dfg, codeBlock);
+ JITCompiler dataFlowJIT(&globalData, dfg, codeBlock);
if (compileMode == CompileFunction) {
ASSERT(jitCodeWithArityCheck);
@@ -72,14 +71,14 @@ inline bool compile(CompileMode compileMode, ExecState* exec, CodeBlock* codeBlo
return true;
}
-bool tryCompile(ExecState* exec, CodeBlock* codeBlock, JITCode& jitCode)
+bool tryCompile(JSGlobalData& globalData, CodeBlock* codeBlock, JITCode& jitCode)
{
- return compile(CompileOther, exec, codeBlock, jitCode, 0);
+ return compile(CompileOther, globalData, codeBlock, jitCode, 0);
}
-bool tryCompileFunction(ExecState* exec, CodeBlock* codeBlock, JITCode& jitCode, MacroAssemblerCodePtr& jitCodeWithArityCheck)
+bool tryCompileFunction(JSGlobalData& globalData, CodeBlock* codeBlock, JITCode& jitCode, MacroAssemblerCodePtr& jitCodeWithArityCheck)
{
- return compile(CompileFunction, exec, codeBlock, jitCode, &jitCodeWithArityCheck);
+ return compile(CompileFunction, globalData, codeBlock, jitCode, &jitCodeWithArityCheck);
}
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGDriver.h b/Source/JavaScriptCore/dfg/DFGDriver.h
index dad45f32e..639b13f7a 100644
--- a/Source/JavaScriptCore/dfg/DFGDriver.h
+++ b/Source/JavaScriptCore/dfg/DFGDriver.h
@@ -30,19 +30,19 @@
namespace JSC {
-class ExecState;
class CodeBlock;
class JITCode;
+class JSGlobalData;
class MacroAssemblerCodePtr;
namespace DFG {
#if ENABLE(DFG_JIT)
-bool tryCompile(ExecState*, CodeBlock*, JITCode&);
-bool tryCompileFunction(ExecState*, CodeBlock*, JITCode&, MacroAssemblerCodePtr& jitCodeWithArityCheck);
+bool tryCompile(JSGlobalData&, CodeBlock*, JITCode&);
+bool tryCompileFunction(JSGlobalData&, CodeBlock*, JITCode&, MacroAssemblerCodePtr& jitCodeWithArityCheck);
#else
-inline bool tryCompile(ExecState*, CodeBlock*, JITCode&) { return false; }
-inline bool tryCompileFunction(ExecState*, CodeBlock*, JITCode&, MacroAssemblerCodePtr&) { return false; }
+inline bool tryCompile(JSGlobalData&, CodeBlock*, JITCode&) { return false; }
+inline bool tryCompileFunction(JSGlobalData&, CodeBlock*, JITCode&, MacroAssemblerCodePtr&) { return false; }
#endif
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGGraph.cpp b/Source/JavaScriptCore/dfg/DFGGraph.cpp
index 2a3e23040..e01bea195 100644
--- a/Source/JavaScriptCore/dfg/DFGGraph.cpp
+++ b/Source/JavaScriptCore/dfg/DFGGraph.cpp
@@ -261,10 +261,10 @@ void Graph::dump(NodeIndex nodeIndex, CodeBlock* codeBlock)
if (!skipped) {
if (node.hasVariableAccessData())
printf(" predicting %s, double ratio %lf%s", predictionToString(node.variableAccessData()->prediction()), node.variableAccessData()->doubleVoteRatio(), node.variableAccessData()->shouldUseDoubleFormat() ? ", forcing double" : "");
- else if (node.hasVarNumber())
- printf(" predicting %s", predictionToString(getGlobalVarPrediction(node.varNumber())));
else if (node.hasHeapPrediction())
printf(" predicting %s", predictionToString(node.getHeapPrediction()));
+ else if (node.hasVarNumber())
+ printf(" predicting %s", predictionToString(getGlobalVarPrediction(node.varNumber())));
}
printf("\n");
@@ -353,7 +353,7 @@ void Graph::predictArgumentTypes(CodeBlock* codeBlock)
at(m_arguments[arg]).variableAccessData()->predict(profile->computeUpdatedPrediction());
#if DFG_ENABLE(DEBUG_VERBOSE)
- printf("Argument [%lu] prediction: %s\n", arg, predictionToString(at(m_arguments[arg]).variableAccessData()->prediction()));
+ printf("Argument [%zu] prediction: %s\n", arg, predictionToString(at(m_arguments[arg]).variableAccessData()->prediction()));
#endif
}
}
diff --git a/Source/JavaScriptCore/dfg/DFGGraph.h b/Source/JavaScriptCore/dfg/DFGGraph.h
index fb729063d..d3f16a0f4 100644
--- a/Source/JavaScriptCore/dfg/DFGGraph.h
+++ b/Source/JavaScriptCore/dfg/DFGGraph.h
@@ -137,6 +137,26 @@ public:
return predictionFromValue(node.valueOfJSConstant(codeBlock));
}
+ bool addShouldSpeculateInteger(Node& add, CodeBlock* codeBlock)
+ {
+ ASSERT(add.op == ValueAdd || add.op == ArithAdd || add.op == ArithSub);
+
+ Node& left = at(add.child1());
+ Node& right = at(add.child2());
+
+ if (left.hasConstant())
+ return addImmediateShouldSpeculateInteger(codeBlock, add, right, left);
+ if (right.hasConstant())
+ return addImmediateShouldSpeculateInteger(codeBlock, add, left, right);
+
+ return Node::shouldSpeculateInteger(left, right) && add.canSpeculateInteger();
+ }
+
+ bool addShouldSpeculateInteger(NodeIndex nodeIndex, CodeBlock* codeBlock)
+ {
+ return addShouldSpeculateInteger(at(nodeIndex), codeBlock);
+ }
+
// Helper methods to check nodes for constants.
bool isConstant(NodeIndex nodeIndex)
{
@@ -223,8 +243,7 @@ public:
Node& node = at(nodeIndex);
- switch (node.op) {
- case GetLocal: {
+ if (node.op == GetLocal) {
if (!operandIsArgument(node.local()))
return 0;
int argument = operandToArgument(node.local());
@@ -233,21 +252,10 @@ public:
return profiledBlock->valueProfileForArgument(argument);
}
- // Nodes derives from calls need special handling because the value profile is
- // associated with the op_call_put_result instruction.
- case Call:
- case Construct:
- case ArrayPop:
- case ArrayPush: {
- ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct));
- return profiledBlock->valueProfileForBytecodeOffset(node.codeOrigin.bytecodeIndex + OPCODE_LENGTH(op_call));
- }
-
- default:
- if (node.hasHeapPrediction())
- return profiledBlock->valueProfileForBytecodeOffset(node.codeOrigin.bytecodeIndex);
- return 0;
- }
+ if (node.hasHeapPrediction())
+ return profiledBlock->valueProfileForBytecodeOffset(node.codeOrigin.bytecodeIndexForValueProfile());
+
+ return 0;
}
Vector< OwnPtr<BasicBlock> , 8> m_blocks;
@@ -263,6 +271,28 @@ public:
unsigned m_parameterSlots;
private:
+ bool addImmediateShouldSpeculateInteger(CodeBlock* codeBlock, Node& add, Node& variable, Node& immediate)
+ {
+ ASSERT(immediate.hasConstant());
+
+ JSValue immediateValue = immediate.valueOfJSConstant(codeBlock);
+ if (!immediateValue.isNumber())
+ return false;
+
+ if (!variable.shouldSpeculateInteger())
+ return false;
+
+ if (immediateValue.isInt32())
+ return add.canSpeculateInteger();
+
+ double doubleImmediate = immediateValue.asDouble();
+ const double twoToThe48 = 281474976710656.0;
+ if (doubleImmediate < -twoToThe48 || doubleImmediate > twoToThe48)
+ return false;
+
+ return nodeCanTruncateInteger(add.arithNodeFlags());
+ }
+
// When a node's refCount goes from 0 to 1, it must (logically) recursively ref all of its children, and vice versa.
void refChildren(NodeIndex);
void derefChildren(NodeIndex);
diff --git a/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp b/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp
index 593e4d930..ac5f314a1 100644
--- a/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp
+++ b/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp
@@ -116,7 +116,7 @@ void JITCompiler::link(LinkBuffer& linkBuffer)
{
// Link the code, populate data in CodeBlock data structures.
#if DFG_ENABLE(DEBUG_VERBOSE)
- fprintf(stderr, "JIT code for %p start at [%p, %p). Size = %lu.\n", m_codeBlock, linkBuffer.debugAddress(), static_cast<char*>(linkBuffer.debugAddress()) + linkBuffer.debugSize(), linkBuffer.debugSize());
+ fprintf(stderr, "JIT code for %p start at [%p, %p). Size = %zu.\n", m_codeBlock, linkBuffer.debugAddress(), static_cast<char*>(linkBuffer.debugAddress()) + linkBuffer.debugSize(), linkBuffer.debugSize());
#endif
// Link all calls out from the JIT code to their respective functions.
@@ -151,6 +151,7 @@ void JITCompiler::link(LinkBuffer& linkBuffer)
unsigned returnAddressOffset = linkBuffer.returnAddressOffset(m_exceptionChecks[i].m_call);
codeOrigins[j].codeOrigin = record.m_codeOrigin;
codeOrigins[j].callReturnOffset = returnAddressOffset;
+ record.m_token.assertCodeOriginIndex(j);
j++;
}
}
@@ -160,6 +161,7 @@ void JITCompiler::link(LinkBuffer& linkBuffer)
for (unsigned i = 0; i < m_propertyAccesses.size(); ++i) {
StructureStubInfo& info = m_codeBlock->structureStubInfo(i);
CodeLocationCall callReturnLocation = linkBuffer.locationOf(m_propertyAccesses[i].m_functionCall);
+ info.codeOrigin = m_propertyAccesses[i].m_codeOrigin;
info.callReturnLocation = callReturnLocation;
info.deltaCheckImmToCall = differenceBetweenCodePtr(linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCheckImmToCall), callReturnLocation);
info.deltaCallToStructCheck = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCallToStructCheck));
@@ -179,6 +181,7 @@ void JITCompiler::link(LinkBuffer& linkBuffer)
info.valueGPR = m_propertyAccesses[i].m_valueGPR;
#endif
info.scratchGPR = m_propertyAccesses[i].m_scratchGPR;
+ info.registersFlushed = m_propertyAccesses[i].m_registerMode == PropertyAccessRecord::RegistersFlushed;
}
m_codeBlock->setNumberOfCallLinkInfos(m_jsCalls.size());
@@ -209,7 +212,7 @@ void JITCompiler::compile(JITCode& entry)
SpeculativeJIT speculative(*this);
compileBody(speculative);
- LinkBuffer linkBuffer(*m_globalData, this);
+ LinkBuffer linkBuffer(*m_globalData, this, m_codeBlock);
link(linkBuffer);
speculative.linkOSREntries(linkBuffer);
@@ -269,7 +272,7 @@ void JITCompiler::compileFunction(JITCode& entry, MacroAssemblerCodePtr& entryWi
// === Link ===
- LinkBuffer linkBuffer(*m_globalData, this);
+ LinkBuffer linkBuffer(*m_globalData, this, m_codeBlock);
link(linkBuffer);
speculative.linkOSREntries(linkBuffer);
diff --git a/Source/JavaScriptCore/dfg/DFGJITCompiler.h b/Source/JavaScriptCore/dfg/DFGJITCompiler.h
index de0475a56..451bee6ca 100644
--- a/Source/JavaScriptCore/dfg/DFGJITCompiler.h
+++ b/Source/JavaScriptCore/dfg/DFGJITCompiler.h
@@ -31,7 +31,7 @@
#include <assembler/LinkBuffer.h>
#include <assembler/MacroAssembler.h>
#include <bytecode/CodeBlock.h>
-#include <dfg/DFGAssemblyHelpers.h>
+#include <dfg/DFGCCallHelpers.h>
#include <dfg/DFGFPRInfo.h>
#include <dfg/DFGGPRInfo.h>
#include <dfg/DFGGraph.h>
@@ -70,37 +70,76 @@ struct CallLinkRecord {
FunctionPtr m_function;
};
+class CallBeginToken {
+public:
+ CallBeginToken()
+#if !ASSERT_DISABLED
+ : m_codeOriginIndex(UINT_MAX)
+#endif
+ {
+ }
+
+ explicit CallBeginToken(unsigned codeOriginIndex)
+#if !ASSERT_DISABLED
+ : m_codeOriginIndex(codeOriginIndex)
+#endif
+ {
+ UNUSED_PARAM(codeOriginIndex);
+ }
+
+ void assertCodeOriginIndex(unsigned codeOriginIndex) const
+ {
+ ASSERT_UNUSED(codeOriginIndex, codeOriginIndex < UINT_MAX);
+ ASSERT_UNUSED(codeOriginIndex, codeOriginIndex == m_codeOriginIndex);
+ }
+
+ void assertNoCodeOriginIndex() const
+ {
+ ASSERT(m_codeOriginIndex == UINT_MAX);
+ }
+private:
+#if !ASSERT_DISABLED
+ unsigned m_codeOriginIndex;
+#endif
+};
+
// === CallExceptionRecord ===
//
// A record of a call out from JIT code that might throw an exception.
// Calls that might throw an exception also record the Jump taken on exception
// (unset if not present) and code origin used to recover handler/source info.
struct CallExceptionRecord {
- CallExceptionRecord(MacroAssembler::Call call, CodeOrigin codeOrigin)
+ CallExceptionRecord(MacroAssembler::Call call, CodeOrigin codeOrigin, CallBeginToken token)
: m_call(call)
, m_codeOrigin(codeOrigin)
+ , m_token(token)
{
}
- CallExceptionRecord(MacroAssembler::Call call, MacroAssembler::Jump exceptionCheck, CodeOrigin codeOrigin)
+ CallExceptionRecord(MacroAssembler::Call call, MacroAssembler::Jump exceptionCheck, CodeOrigin codeOrigin, CallBeginToken token)
: m_call(call)
, m_exceptionCheck(exceptionCheck)
, m_codeOrigin(codeOrigin)
+ , m_token(token)
{
}
MacroAssembler::Call m_call;
MacroAssembler::Jump m_exceptionCheck;
CodeOrigin m_codeOrigin;
+ CallBeginToken m_token;
};
struct PropertyAccessRecord {
+ enum RegisterMode { RegistersFlushed, RegistersInUse };
+
#if USE(JSVALUE64)
- PropertyAccessRecord(MacroAssembler::DataLabelPtr deltaCheckImmToCall, MacroAssembler::Call functionCall, MacroAssembler::Jump deltaCallToStructCheck, MacroAssembler::DataLabelCompact deltaCallToLoadOrStore, MacroAssembler::Label deltaCallToSlowCase, MacroAssembler::Label deltaCallToDone, int8_t baseGPR, int8_t valueGPR, int8_t scratchGPR)
+ PropertyAccessRecord(CodeOrigin codeOrigin, MacroAssembler::DataLabelPtr deltaCheckImmToCall, MacroAssembler::Call functionCall, MacroAssembler::Jump deltaCallToStructCheck, MacroAssembler::DataLabelCompact deltaCallToLoadOrStore, MacroAssembler::Label deltaCallToSlowCase, MacroAssembler::Label deltaCallToDone, int8_t baseGPR, int8_t valueGPR, int8_t scratchGPR, RegisterMode registerMode = RegistersInUse)
#elif USE(JSVALUE32_64)
- PropertyAccessRecord(MacroAssembler::DataLabelPtr deltaCheckImmToCall, MacroAssembler::Call functionCall, MacroAssembler::Jump deltaCallToStructCheck, MacroAssembler::DataLabelCompact deltaCallToTagLoadOrStore, MacroAssembler::DataLabelCompact deltaCallToPayloadLoadOrStore, MacroAssembler::Label deltaCallToSlowCase, MacroAssembler::Label deltaCallToDone, int8_t baseGPR, int8_t valueTagGPR, int8_t valueGPR, int8_t scratchGPR)
+ PropertyAccessRecord(CodeOrigin codeOrigin, MacroAssembler::DataLabelPtr deltaCheckImmToCall, MacroAssembler::Call functionCall, MacroAssembler::Jump deltaCallToStructCheck, MacroAssembler::DataLabelCompact deltaCallToTagLoadOrStore, MacroAssembler::DataLabelCompact deltaCallToPayloadLoadOrStore, MacroAssembler::Label deltaCallToSlowCase, MacroAssembler::Label deltaCallToDone, int8_t baseGPR, int8_t valueTagGPR, int8_t valueGPR, int8_t scratchGPR, RegisterMode registerMode = RegistersInUse)
#endif
- : m_deltaCheckImmToCall(deltaCheckImmToCall)
+ : m_codeOrigin(codeOrigin)
+ , m_deltaCheckImmToCall(deltaCheckImmToCall)
, m_functionCall(functionCall)
, m_deltaCallToStructCheck(deltaCallToStructCheck)
#if USE(JSVALUE64)
@@ -117,9 +156,11 @@ struct PropertyAccessRecord {
#endif
, m_valueGPR(valueGPR)
, m_scratchGPR(scratchGPR)
+ , m_registerMode(registerMode)
{
}
+ CodeOrigin m_codeOrigin;
MacroAssembler::DataLabelPtr m_deltaCheckImmToCall;
MacroAssembler::Call m_functionCall;
MacroAssembler::Jump m_deltaCallToStructCheck;
@@ -137,6 +178,7 @@ struct PropertyAccessRecord {
#endif
int8_t m_valueGPR;
int8_t m_scratchGPR;
+ RegisterMode m_registerMode;
};
// === JITCompiler ===
@@ -147,11 +189,12 @@ struct PropertyAccessRecord {
// relationship). The JITCompiler holds references to information required during
// compilation, and also records information used in linking (e.g. a list of all
// call to be linked).
-class JITCompiler : public AssemblyHelpers {
+class JITCompiler : public CCallHelpers {
public:
JITCompiler(JSGlobalData* globalData, Graph& dfg, CodeBlock* codeBlock)
- : AssemblyHelpers(globalData, codeBlock)
+ : CCallHelpers(globalData, codeBlock)
, m_graph(dfg)
+ , m_currentCodeOriginIndex(0)
{
}
@@ -160,11 +203,32 @@ public:
// Accessors for properties.
Graph& graph() { return m_graph; }
+
+ // Just get a token for beginning a call.
+ CallBeginToken nextCallBeginToken(CodeOrigin codeOrigin)
+ {
+ if (!codeOrigin.inlineCallFrame)
+ return CallBeginToken();
+ return CallBeginToken(m_currentCodeOriginIndex++);
+ }
+
+ // Get a token for beginning a call, and set the current code origin index in
+ // the call frame.
+ CallBeginToken beginCall(CodeOrigin codeOrigin)
+ {
+ unsigned codeOriginIndex;
+ if (!codeOrigin.inlineCallFrame)
+ codeOriginIndex = UINT_MAX;
+ else
+ codeOriginIndex = m_currentCodeOriginIndex++;
+ store32(TrustedImm32(codeOriginIndex), tagFor(static_cast<VirtualRegister>(RegisterFile::ArgumentCount)));
+ return CallBeginToken(codeOriginIndex);
+ }
// Notify the JIT of a call that does not require linking.
- void notifyCall(Call functionCall, CodeOrigin codeOrigin)
+ void notifyCall(Call functionCall, CodeOrigin codeOrigin, CallBeginToken token)
{
- m_exceptionChecks.append(CallExceptionRecord(functionCall, codeOrigin));
+ m_exceptionChecks.append(CallExceptionRecord(functionCall, codeOrigin, token));
}
// Add a call out from JIT code, without an exception check.
@@ -176,25 +240,18 @@ public:
}
// Add a call out from JIT code, with an exception check.
- Call addExceptionCheck(Call functionCall, CodeOrigin codeOrigin)
+ void addExceptionCheck(Call functionCall, CodeOrigin codeOrigin, CallBeginToken token)
{
move(TrustedImm32(m_exceptionChecks.size()), GPRInfo::nonPreservedNonReturnGPR);
-#if USE(JSVALUE64)
- Jump exceptionCheck = branchTestPtr(NonZero, AbsoluteAddress(&globalData()->exception));
-#elif USE(JSVALUE32_64)
- Jump exceptionCheck = branch32(NotEqual, AbsoluteAddress(reinterpret_cast<char*>(&globalData()->exception) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag));
-#endif
- m_exceptionChecks.append(CallExceptionRecord(functionCall, exceptionCheck, codeOrigin));
- return functionCall;
+ m_exceptionChecks.append(CallExceptionRecord(functionCall, emitExceptionCheck(), codeOrigin, token));
}
// Add a call out from JIT code, with a fast exception check that tests if the return value is zero.
- Call addFastExceptionCheck(Call functionCall, CodeOrigin codeOrigin)
+ void addFastExceptionCheck(Call functionCall, CodeOrigin codeOrigin, CallBeginToken token)
{
move(TrustedImm32(m_exceptionChecks.size()), GPRInfo::nonPreservedNonReturnGPR);
Jump exceptionCheck = branchTestPtr(Zero, GPRInfo::returnValueGPR);
- m_exceptionChecks.append(CallExceptionRecord(functionCall, exceptionCheck, codeOrigin));
- return functionCall;
+ m_exceptionChecks.append(CallExceptionRecord(functionCall, exceptionCheck, codeOrigin, token));
}
// Helper methods to check nodes for constants.
@@ -325,6 +382,7 @@ private:
Vector<PropertyAccessRecord, 4> m_propertyAccesses;
Vector<JSCallRecord, 4> m_jsCalls;
+ unsigned m_currentCodeOriginIndex;
};
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGNode.h b/Source/JavaScriptCore/dfg/DFGNode.h
index cb5be691c..7366c1c40 100644
--- a/Source/JavaScriptCore/dfg/DFGNode.h
+++ b/Source/JavaScriptCore/dfg/DFGNode.h
@@ -229,6 +229,7 @@ static inline const char* arithNodeFlagsAsString(ArithNodeFlags flags)
macro(PutByVal, NodeMustGenerate | NodeClobbersWorld) \
macro(PutByValAlias, NodeMustGenerate | NodeClobbersWorld) \
macro(GetById, NodeResultJS | NodeMustGenerate | NodeClobbersWorld) \
+ macro(GetByIdFlush, NodeResultJS | NodeMustGenerate | NodeClobbersWorld) \
macro(PutById, NodeMustGenerate | NodeClobbersWorld) \
macro(PutByIdDirect, NodeMustGenerate | NodeClobbersWorld) \
macro(CheckStructure, NodeMustGenerate) \
@@ -244,6 +245,7 @@ static inline const char* arithNodeFlagsAsString(ArithNodeFlags flags)
macro(GetInt16ArrayLength, NodeResultInt32) \
macro(GetInt32ArrayLength, NodeResultInt32) \
macro(GetUint8ArrayLength, NodeResultInt32) \
+ macro(GetUint8ClampedArrayLength, NodeResultInt32) \
macro(GetUint16ArrayLength, NodeResultInt32) \
macro(GetUint32ArrayLength, NodeResultInt32) \
macro(GetFloat32ArrayLength, NodeResultInt32) \
@@ -502,6 +504,7 @@ struct Node {
{
switch (op) {
case GetById:
+ case GetByIdFlush:
case PutById:
case PutByIdDirect:
case Resolve:
@@ -718,6 +721,7 @@ struct Node {
{
switch (op) {
case GetById:
+ case GetByIdFlush:
case GetByVal:
case Call:
case Construct:
@@ -960,6 +964,11 @@ struct Node {
{
return isUint8ArrayPrediction(prediction());
}
+
+ bool shouldSpeculateUint8ClampedArray()
+ {
+ return isUint8ClampedArrayPrediction(prediction());
+ }
bool shouldSpeculateUint16Array()
{
diff --git a/Source/JavaScriptCore/dfg/DFGOSREntry.cpp b/Source/JavaScriptCore/dfg/DFGOSREntry.cpp
index 4510ec7b9..cbcd1319a 100644
--- a/Source/JavaScriptCore/dfg/DFGOSREntry.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOSREntry.cpp
@@ -80,7 +80,7 @@ void* prepareOSREntry(ExecState* exec, CodeBlock* codeBlock, unsigned bytecodeIn
for (size_t argument = 0; argument < entry->m_expectedValues.numberOfArguments(); ++argument) {
if (argument >= exec->argumentCountIncludingThis()) {
#if ENABLE(JIT_VERBOSE_OSR)
- printf(" OSR failed because argument %lu was not passed, expected ", argument);
+ printf(" OSR failed because argument %zu was not passed, expected ", argument);
entry->m_expectedValues.argument(argument).dump(stdout);
printf(".\n");
#endif
@@ -95,7 +95,7 @@ void* prepareOSREntry(ExecState* exec, CodeBlock* codeBlock, unsigned bytecodeIn
if (!entry->m_expectedValues.argument(argument).validate(value)) {
#if ENABLE(JIT_VERBOSE_OSR)
- printf(" OSR failed because argument %lu is %s, expected ", argument, value.description());
+ printf(" OSR failed because argument %zu is %s, expected ", argument, value.description());
entry->m_expectedValues.argument(argument).dump(stdout);
printf(".\n");
#endif
@@ -107,7 +107,7 @@ void* prepareOSREntry(ExecState* exec, CodeBlock* codeBlock, unsigned bytecodeIn
if (entry->m_localsForcedDouble.get(local)) {
if (!exec->registers()[local].jsValue().isNumber()) {
#if ENABLE(JIT_VERBOSE_OSR)
- printf(" OSR failed because variable %lu is %s, expected number.\n", local, exec->registers()[local].jsValue().description());
+ printf(" OSR failed because variable %zu is %s, expected number.\n", local, exec->registers()[local].jsValue().description());
#endif
return 0;
}
@@ -115,7 +115,7 @@ void* prepareOSREntry(ExecState* exec, CodeBlock* codeBlock, unsigned bytecodeIn
}
if (!entry->m_expectedValues.local(local).validate(exec->registers()[local].jsValue())) {
#if ENABLE(JIT_VERBOSE_OSR)
- printf(" OSR failed because variable %lu is %s, expected ", local, exec->registers()[local].jsValue().description());
+ printf(" OSR failed because variable %zu is %s, expected ", local, exec->registers()[local].jsValue().description());
entry->m_expectedValues.local(local).dump(stdout);
printf(".\n");
#endif
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.cpp b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.cpp
index 3da8189d1..1b88c4ffc 100644
--- a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.cpp
@@ -62,7 +62,7 @@ void compileOSRExit(ExecState* exec)
exitCompiler.compileExit(exit, recovery);
- LinkBuffer patchBuffer(*globalData, &jit);
+ LinkBuffer patchBuffer(*globalData, &jit, codeBlock);
exit.m_code = patchBuffer.finalizeCode();
#if DFG_ENABLE(DEBUG_VERBOSE)
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp
index 3d27e00e4..4e33d7b02 100644
--- a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp
@@ -579,13 +579,13 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco
AssemblyHelpers::Jump lowFailRate = m_jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, GPRInfo::regT1);
// Reoptimize as soon as possible.
- m_jit.store32(AssemblyHelpers::Imm32(Options::executionCounterValueForOptimizeNextInvocation), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfExecuteCounter()));
+ m_jit.store32(AssemblyHelpers::Imm32(Options::executionCounterValueForOptimizeNextInvocation), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()));
AssemblyHelpers::Jump doneAdjusting = m_jit.jump();
fewFails.link(&m_jit);
lowFailRate.link(&m_jit);
- m_jit.store32(AssemblyHelpers::Imm32(m_jit.baselineCodeBlock()->counterValueForOptimizeAfterLongWarmUp()), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfExecuteCounter()));
+ m_jit.store32(AssemblyHelpers::Imm32(m_jit.baselineCodeBlock()->counterValueForOptimizeAfterLongWarmUp()), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()));
doneAdjusting.link(&m_jit);
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp
index c6f4a9ed4..98c891ac7 100644
--- a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp
@@ -554,13 +554,13 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco
AssemblyHelpers::Jump lowFailRate = m_jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, GPRInfo::regT1);
// Reoptimize as soon as possible.
- m_jit.store32(AssemblyHelpers::Imm32(Options::executionCounterValueForOptimizeNextInvocation), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfExecuteCounter()));
+ m_jit.store32(AssemblyHelpers::Imm32(Options::executionCounterValueForOptimizeNextInvocation), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()));
AssemblyHelpers::Jump doneAdjusting = m_jit.jump();
fewFails.link(&m_jit);
lowFailRate.link(&m_jit);
- m_jit.store32(AssemblyHelpers::Imm32(m_jit.baselineCodeBlock()->counterValueForOptimizeAfterLongWarmUp()), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfExecuteCounter()));
+ m_jit.store32(AssemblyHelpers::Imm32(m_jit.baselineCodeBlock()->counterValueForOptimizeAfterLongWarmUp()), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()));
doneAdjusting.link(&m_jit);
diff --git a/Source/JavaScriptCore/dfg/DFGOperations.cpp b/Source/JavaScriptCore/dfg/DFGOperations.cpp
index 2e6fd9276..569b4fe86 100644
--- a/Source/JavaScriptCore/dfg/DFGOperations.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOperations.cpp
@@ -31,6 +31,7 @@
#include "CodeBlock.h"
#include "DFGOSRExit.h"
#include "DFGRepatch.h"
+#include "GetterSetter.h"
#include "InlineASM.h"
#include "Interpreter.h"
#include "JSByteArray.h"
@@ -42,6 +43,7 @@
#define FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, register) \
asm( \
".globl " SYMBOL_STRING(function) "\n" \
+ HIDE_SYMBOL(function) "\n" \
SYMBOL_STRING(function) ":" "\n" \
"mov (%rsp), %" STRINGIZE(register) "\n" \
"jmp " SYMBOL_STRING_RELOCATION(function##WithReturnAddress) "\n" \
@@ -56,6 +58,7 @@
#define FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, offset) \
asm( \
".globl " SYMBOL_STRING(function) "\n" \
+ HIDE_SYMBOL(function) "\n" \
SYMBOL_STRING(function) ":" "\n" \
"mov (%esp), %eax\n" \
"mov %eax, " STRINGIZE(offset) "(%esp)\n" \
@@ -175,6 +178,9 @@ static inline void putByVal(ExecState* exec, JSValue baseValue, uint32_t index,
template<bool strict>
ALWAYS_INLINE static void DFG_OPERATION operationPutByValInternal(ExecState* exec, EncodedJSValue encodedBase, EncodedJSValue encodedProperty, EncodedJSValue encodedValue)
{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
JSValue baseValue = JSValue::decode(encodedBase);
JSValue property = JSValue::decode(encodedProperty);
JSValue value = JSValue::decode(encodedValue);
@@ -193,10 +199,9 @@ ALWAYS_INLINE static void DFG_OPERATION operationPutByValInternal(ExecState* exe
}
}
- JSGlobalData* globalData = &exec->globalData();
// Don't put to an object if toString throws an exception.
- Identifier ident(exec, property.toString(exec));
+ Identifier ident(exec, property.toString(exec)->value(exec));
if (!globalData->exception) {
PutPropertySlot slot(strict);
baseValue.put(exec, ident, value, slot);
@@ -207,6 +212,9 @@ extern "C" {
EncodedJSValue DFG_OPERATION operationConvertThis(ExecState* exec, EncodedJSValue encodedOp)
{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
return JSValue::encode(JSValue::decode(encodedOp).toThisObject(exec));
}
@@ -230,21 +238,33 @@ inline JSCell* createThis(ExecState* exec, JSCell* prototype, JSFunction* constr
JSCell* DFG_OPERATION operationCreateThis(ExecState* exec, JSCell* prototype)
{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
return createThis(exec, prototype, asFunction(exec->callee()));
}
JSCell* DFG_OPERATION operationCreateThisInlined(ExecState* exec, JSCell* prototype, JSCell* constructor)
{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
return createThis(exec, prototype, static_cast<JSFunction*>(constructor));
}
JSCell* DFG_OPERATION operationNewObject(ExecState* exec)
{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
return constructEmptyObject(exec);
}
EncodedJSValue DFG_OPERATION operationValueAdd(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2)
{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
JSValue op1 = JSValue::decode(encodedOp1);
JSValue op2 = JSValue::decode(encodedOp2);
@@ -253,16 +273,16 @@ EncodedJSValue DFG_OPERATION operationValueAdd(ExecState* exec, EncodedJSValue e
EncodedJSValue DFG_OPERATION operationValueAddNotNumber(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2)
{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
JSValue op1 = JSValue::decode(encodedOp1);
JSValue op2 = JSValue::decode(encodedOp2);
ASSERT(!op1.isNumber() || !op2.isNumber());
- if (op1.isString()) {
- if (op2.isString())
- return JSValue::encode(jsString(exec, asString(op1), asString(op2)));
- return JSValue::encode(jsString(exec, asString(op1), op2.toPrimitiveString(exec)));
- }
+ if (op1.isString() && !op2.isObject())
+ return JSValue::encode(jsString(exec, asString(op1), op2.toString(exec)));
return JSValue::encode(jsAddSlowCase(exec, op1, op2));
}
@@ -286,6 +306,9 @@ static inline EncodedJSValue getByVal(ExecState* exec, JSCell* base, uint32_t in
EncodedJSValue DFG_OPERATION operationGetByVal(ExecState* exec, EncodedJSValue encodedBase, EncodedJSValue encodedProperty)
{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
JSValue baseValue = JSValue::decode(encodedBase);
JSValue property = JSValue::decode(encodedProperty);
@@ -305,12 +328,15 @@ EncodedJSValue DFG_OPERATION operationGetByVal(ExecState* exec, EncodedJSValue e
}
}
- Identifier ident(exec, property.toString(exec));
+ Identifier ident(exec, property.toString(exec)->value(exec));
return JSValue::encode(baseValue.get(exec, ident));
}
EncodedJSValue DFG_OPERATION operationGetByValCell(ExecState* exec, JSCell* base, EncodedJSValue encodedProperty)
{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
JSValue property = JSValue::decode(encodedProperty);
if (property.isUInt32())
@@ -325,12 +351,15 @@ EncodedJSValue DFG_OPERATION operationGetByValCell(ExecState* exec, JSCell* base
return JSValue::encode(result);
}
- Identifier ident(exec, property.toString(exec));
+ Identifier ident(exec, property.toString(exec)->value(exec));
return JSValue::encode(JSValue(base).get(exec, ident));
}
EncodedJSValue DFG_OPERATION operationGetById(ExecState* exec, EncodedJSValue base, Identifier* propertyName)
{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
JSValue baseValue = JSValue::decode(base);
PropertySlot slot(baseValue);
return JSValue::encode(baseValue.get(exec, *propertyName, slot));
@@ -339,6 +368,9 @@ EncodedJSValue DFG_OPERATION operationGetById(ExecState* exec, EncodedJSValue ba
J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJI(operationGetByIdBuildList);
EncodedJSValue DFG_OPERATION operationGetByIdBuildListWithReturnAddress(ExecState* exec, EncodedJSValue base, Identifier* propertyName, ReturnAddressPtr returnAddress)
{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
JSValue baseValue = JSValue::decode(base);
PropertySlot slot(baseValue);
JSValue result = baseValue.get(exec, *propertyName, slot);
@@ -352,6 +384,9 @@ EncodedJSValue DFG_OPERATION operationGetByIdBuildListWithReturnAddress(ExecStat
J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJI(operationGetByIdProtoBuildList);
EncodedJSValue DFG_OPERATION operationGetByIdProtoBuildListWithReturnAddress(ExecState* exec, EncodedJSValue base, Identifier* propertyName, ReturnAddressPtr returnAddress)
{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
JSValue baseValue = JSValue::decode(base);
PropertySlot slot(baseValue);
JSValue result = baseValue.get(exec, *propertyName, slot);
@@ -365,6 +400,9 @@ EncodedJSValue DFG_OPERATION operationGetByIdProtoBuildListWithReturnAddress(Exe
J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJI(operationGetByIdOptimize);
EncodedJSValue DFG_OPERATION operationGetByIdOptimizeWithReturnAddress(ExecState* exec, EncodedJSValue base, Identifier* propertyName, ReturnAddressPtr returnAddress)
{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
JSValue baseValue = JSValue::decode(base);
PropertySlot slot(baseValue);
JSValue result = baseValue.get(exec, *propertyName, slot);
@@ -378,28 +416,65 @@ EncodedJSValue DFG_OPERATION operationGetByIdOptimizeWithReturnAddress(ExecState
return JSValue::encode(result);
}
+EncodedJSValue DFG_OPERATION operationCallCustomGetter(ExecState* exec, JSCell* base, PropertySlot::GetValueFunc function, Identifier* ident)
+{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
+ return JSValue::encode(function(exec, asObject(base), *ident));
+}
+
+EncodedJSValue DFG_OPERATION operationCallGetter(ExecState* exec, JSCell* base, JSCell* value)
+{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
+ GetterSetter* getterSetter = asGetterSetter(value);
+ JSObject* getter = getterSetter->getter();
+ if (!getter)
+ return JSValue::encode(jsUndefined());
+ CallData callData;
+ CallType callType = getter->methodTable()->getCallData(getter, callData);
+ return JSValue::encode(call(exec, getter, callType, callData, asObject(base), ArgList()));
+}
+
void DFG_OPERATION operationPutByValStrict(ExecState* exec, EncodedJSValue encodedBase, EncodedJSValue encodedProperty, EncodedJSValue encodedValue)
{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
operationPutByValInternal<true>(exec, encodedBase, encodedProperty, encodedValue);
}
void DFG_OPERATION operationPutByValNonStrict(ExecState* exec, EncodedJSValue encodedBase, EncodedJSValue encodedProperty, EncodedJSValue encodedValue)
{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
operationPutByValInternal<false>(exec, encodedBase, encodedProperty, encodedValue);
}
void DFG_OPERATION operationPutByValCellStrict(ExecState* exec, JSCell* cell, EncodedJSValue encodedProperty, EncodedJSValue encodedValue)
{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
operationPutByValInternal<true>(exec, JSValue::encode(cell), encodedProperty, encodedValue);
}
void DFG_OPERATION operationPutByValCellNonStrict(ExecState* exec, JSCell* cell, EncodedJSValue encodedProperty, EncodedJSValue encodedValue)
{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
operationPutByValInternal<false>(exec, JSValue::encode(cell), encodedProperty, encodedValue);
}
void DFG_OPERATION operationPutByValBeyondArrayBounds(ExecState* exec, JSArray* array, int32_t index, EncodedJSValue encodedValue)
{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
// We should only get here if index is outside the existing vector.
ASSERT(!array->canSetIndex(index));
JSArray::putByIndex(array, exec, index, JSValue::decode(encodedValue));
@@ -407,42 +482,65 @@ void DFG_OPERATION operationPutByValBeyondArrayBounds(ExecState* exec, JSArray*
EncodedJSValue DFG_OPERATION operationArrayPush(ExecState* exec, EncodedJSValue encodedValue, JSArray* array)
{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
array->push(exec, JSValue::decode(encodedValue));
return JSValue::encode(jsNumber(array->length()));
}
EncodedJSValue DFG_OPERATION operationArrayPop(ExecState* exec, JSArray* array)
{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
return JSValue::encode(array->pop(exec));
}
void DFG_OPERATION operationPutByIdStrict(ExecState* exec, EncodedJSValue encodedValue, JSCell* base, Identifier* propertyName)
{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
PutPropertySlot slot(true);
base->methodTable()->put(base, exec, *propertyName, JSValue::decode(encodedValue), slot);
}
void DFG_OPERATION operationPutByIdNonStrict(ExecState* exec, EncodedJSValue encodedValue, JSCell* base, Identifier* propertyName)
{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
PutPropertySlot slot(false);
base->methodTable()->put(base, exec, *propertyName, JSValue::decode(encodedValue), slot);
}
void DFG_OPERATION operationPutByIdDirectStrict(ExecState* exec, EncodedJSValue encodedValue, JSCell* base, Identifier* propertyName)
{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
PutPropertySlot slot(true);
- JSValue(base).putDirect(exec, *propertyName, JSValue::decode(encodedValue), slot);
+ ASSERT(base->isObject());
+ asObject(base)->putDirect(exec->globalData(), *propertyName, JSValue::decode(encodedValue), slot);
}
void DFG_OPERATION operationPutByIdDirectNonStrict(ExecState* exec, EncodedJSValue encodedValue, JSCell* base, Identifier* propertyName)
{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
PutPropertySlot slot(false);
- JSValue(base).putDirect(exec, *propertyName, JSValue::decode(encodedValue), slot);
+ ASSERT(base->isObject());
+ asObject(base)->putDirect(exec->globalData(), *propertyName, JSValue::decode(encodedValue), slot);
}
V_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJCI(operationPutByIdStrictOptimize);
void DFG_OPERATION operationPutByIdStrictOptimizeWithReturnAddress(ExecState* exec, EncodedJSValue encodedValue, JSCell* base, Identifier* propertyName, ReturnAddressPtr returnAddress)
{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
JSValue value = JSValue::decode(encodedValue);
JSValue baseValue(base);
PutPropertySlot slot(true);
@@ -459,6 +557,9 @@ void DFG_OPERATION operationPutByIdStrictOptimizeWithReturnAddress(ExecState* ex
V_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJCI(operationPutByIdNonStrictOptimize);
void DFG_OPERATION operationPutByIdNonStrictOptimizeWithReturnAddress(ExecState* exec, EncodedJSValue encodedValue, JSCell* base, Identifier* propertyName, ReturnAddressPtr returnAddress)
{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
JSValue value = JSValue::decode(encodedValue);
JSValue baseValue(base);
PutPropertySlot slot(false);
@@ -475,15 +576,18 @@ void DFG_OPERATION operationPutByIdNonStrictOptimizeWithReturnAddress(ExecState*
V_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJCI(operationPutByIdDirectStrictOptimize);
void DFG_OPERATION operationPutByIdDirectStrictOptimizeWithReturnAddress(ExecState* exec, EncodedJSValue encodedValue, JSCell* base, Identifier* propertyName, ReturnAddressPtr returnAddress)
{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
JSValue value = JSValue::decode(encodedValue);
- JSValue baseValue(base);
PutPropertySlot slot(true);
- baseValue.putDirect(exec, *propertyName, value, slot);
+ ASSERT(base->isObject());
+ asObject(base)->putDirect(exec->globalData(), *propertyName, value, slot);
StructureStubInfo& stubInfo = exec->codeBlock()->getStubInfo(returnAddress);
if (stubInfo.seen)
- dfgRepatchPutByID(exec, baseValue, *propertyName, slot, stubInfo, Direct);
+ dfgRepatchPutByID(exec, base, *propertyName, slot, stubInfo, Direct);
else
stubInfo.seen = true;
}
@@ -491,46 +595,67 @@ void DFG_OPERATION operationPutByIdDirectStrictOptimizeWithReturnAddress(ExecSta
V_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJCI(operationPutByIdDirectNonStrictOptimize);
void DFG_OPERATION operationPutByIdDirectNonStrictOptimizeWithReturnAddress(ExecState* exec, EncodedJSValue encodedValue, JSCell* base, Identifier* propertyName, ReturnAddressPtr returnAddress)
{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
JSValue value = JSValue::decode(encodedValue);
- JSValue baseValue(base);
PutPropertySlot slot(false);
- baseValue.putDirect(exec, *propertyName, value, slot);
+ ASSERT(base->isObject());
+ asObject(base)->putDirect(exec->globalData(), *propertyName, value, slot);
StructureStubInfo& stubInfo = exec->codeBlock()->getStubInfo(returnAddress);
if (stubInfo.seen)
- dfgRepatchPutByID(exec, baseValue, *propertyName, slot, stubInfo, Direct);
+ dfgRepatchPutByID(exec, base, *propertyName, slot, stubInfo, Direct);
else
stubInfo.seen = true;
}
size_t DFG_OPERATION operationCompareLess(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2)
{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
return jsLess<true>(exec, JSValue::decode(encodedOp1), JSValue::decode(encodedOp2));
}
size_t DFG_OPERATION operationCompareLessEq(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2)
{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
return jsLessEq<true>(exec, JSValue::decode(encodedOp1), JSValue::decode(encodedOp2));
}
size_t DFG_OPERATION operationCompareGreater(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2)
{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
return jsLess<false>(exec, JSValue::decode(encodedOp2), JSValue::decode(encodedOp1));
}
size_t DFG_OPERATION operationCompareGreaterEq(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2)
{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
return jsLessEq<false>(exec, JSValue::decode(encodedOp2), JSValue::decode(encodedOp1));
}
size_t DFG_OPERATION operationCompareEq(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2)
{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
return JSValue::equalSlowCaseInline(exec, JSValue::decode(encodedOp1), JSValue::decode(encodedOp2));
}
size_t DFG_OPERATION operationCompareStrictEqCell(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2)
{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
JSValue op1 = JSValue::decode(encodedOp1);
JSValue op2 = JSValue::decode(encodedOp2);
@@ -542,6 +667,9 @@ size_t DFG_OPERATION operationCompareStrictEqCell(ExecState* exec, EncodedJSValu
size_t DFG_OPERATION operationCompareStrictEq(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2)
{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
return JSValue::strictEqual(exec, JSValue::decode(encodedOp1), JSValue::decode(encodedOp2));
}
@@ -551,6 +679,7 @@ EncodedJSValue DFG_OPERATION getHostCallReturnValueWithExecState(ExecState*);
#if CPU(X86_64)
asm (
".globl " SYMBOL_STRING(getHostCallReturnValue) "\n"
+HIDE_SYMBOL(getHostCallReturnValue) "\n"
SYMBOL_STRING(getHostCallReturnValue) ":" "\n"
"mov -40(%r13), %r13\n"
"mov %r13, %rdi\n"
@@ -559,6 +688,7 @@ SYMBOL_STRING(getHostCallReturnValue) ":" "\n"
#elif CPU(X86)
asm (
".globl " SYMBOL_STRING(getHostCallReturnValue) "\n"
+HIDE_SYMBOL(getHostCallReturnValue) "\n"
SYMBOL_STRING(getHostCallReturnValue) ":" "\n"
"mov -40(%edi), %edi\n"
"mov %edi, 4(%esp)\n"
@@ -581,6 +711,9 @@ SYMBOL_STRING(getHostCallReturnValue) ":" "\n"
EncodedJSValue DFG_OPERATION getHostCallReturnValueWithExecState(ExecState* exec)
{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
return JSValue::encode(exec->globalData().hostCallReturnValue);
}
@@ -635,6 +768,8 @@ inline void* linkFor(ExecState* execCallee, ReturnAddressPtr returnAddress, Code
{
ExecState* exec = execCallee->callerFrame();
JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
JSValue calleeAsValue = execCallee->calleeAsValue();
JSCell* calleeAsFunctionCell = getJSFunction(calleeAsValue);
if (!calleeAsFunctionCell)
@@ -684,6 +819,9 @@ void* DFG_OPERATION operationLinkConstructWithReturnAddress(ExecState* execCalle
inline void* virtualFor(ExecState* execCallee, CodeSpecializationKind kind)
{
ExecState* exec = execCallee->callerFrame();
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
JSValue calleeAsValue = execCallee->calleeAsValue();
JSCell* calleeAsFunctionCell = getJSFunction(calleeAsValue);
if (UNLIKELY(!calleeAsFunctionCell))
@@ -704,7 +842,7 @@ inline void* virtualFor(ExecState* execCallee, CodeSpecializationKind kind)
}
void* DFG_OPERATION operationVirtualCall(ExecState* execCallee)
-{
+{
return virtualFor(execCallee, CodeForCall);
}
@@ -715,6 +853,9 @@ void* DFG_OPERATION operationVirtualConstruct(ExecState* execCallee)
EncodedJSValue DFG_OPERATION operationResolve(ExecState* exec, Identifier* propertyName)
{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
ScopeChainNode* scopeChain = exec->scopeChain();
ScopeChainIterator iter = scopeChain->begin();
ScopeChainIterator end = scopeChain->end();
@@ -732,11 +873,17 @@ EncodedJSValue DFG_OPERATION operationResolve(ExecState* exec, Identifier* prope
EncodedJSValue DFG_OPERATION operationResolveBase(ExecState* exec, Identifier* propertyName)
{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
return JSValue::encode(resolveBase(exec, *propertyName, exec->scopeChain(), false));
}
EncodedJSValue DFG_OPERATION operationResolveBaseStrictPut(ExecState* exec, Identifier* propertyName)
{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
JSValue base = resolveBase(exec, *propertyName, exec->scopeChain(), true);
if (!base)
throwError(exec, createErrorForInvalidGlobalAssignment(exec, propertyName->ustring()));
@@ -745,6 +892,9 @@ EncodedJSValue DFG_OPERATION operationResolveBaseStrictPut(ExecState* exec, Iden
EncodedJSValue DFG_OPERATION operationResolveGlobal(ExecState* exec, GlobalResolveInfo* resolveInfo, Identifier* propertyName)
{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
JSGlobalObject* globalObject = exec->lexicalGlobalObject();
PropertySlot slot(globalObject);
@@ -764,16 +914,25 @@ EncodedJSValue DFG_OPERATION operationResolveGlobal(ExecState* exec, GlobalResol
EncodedJSValue DFG_OPERATION operationToPrimitive(ExecState* exec, EncodedJSValue value)
{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
return JSValue::encode(JSValue::decode(value).toPrimitive(exec));
}
EncodedJSValue DFG_OPERATION operationStrCat(ExecState* exec, void* start, size_t size)
{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
return JSValue::encode(jsString(exec, static_cast<Register*>(start), size));
}
EncodedJSValue DFG_OPERATION operationNewArray(ExecState* exec, void* start, size_t size)
{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
return JSValue::encode(constructArray(exec, static_cast<JSValue*>(start), size));
}
@@ -795,6 +954,9 @@ EncodedJSValue DFG_OPERATION operationNewRegexp(ExecState* exec, void* regexpPtr
DFGHandlerEncoded DFG_OPERATION lookupExceptionHandler(ExecState* exec, uint32_t callIndex)
{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
JSValue exceptionValue = exec->exception();
ASSERT(exceptionValue);
@@ -806,29 +968,60 @@ DFGHandlerEncoded DFG_OPERATION lookupExceptionHandler(ExecState* exec, uint32_t
return dfgHandlerEncoded(exec, catchRoutine);
}
+DFGHandlerEncoded DFG_OPERATION lookupExceptionHandlerInStub(ExecState* exec, StructureStubInfo* stubInfo)
+{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
+ JSValue exceptionValue = exec->exception();
+ ASSERT(exceptionValue);
+
+ CodeOrigin codeOrigin = stubInfo->codeOrigin;
+ while (codeOrigin.inlineCallFrame)
+ codeOrigin = codeOrigin.inlineCallFrame->caller;
+
+ HandlerInfo* handler = exec->globalData().interpreter->throwException(exec, exceptionValue, codeOrigin.bytecodeIndex);
+
+ void* catchRoutine = handler ? handler->nativeCode.executableAddress() : (void*)ctiOpThrowNotCaught;
+ ASSERT(catchRoutine);
+ return dfgHandlerEncoded(exec, catchRoutine);
+}
+
double DFG_OPERATION dfgConvertJSValueToNumber(ExecState* exec, EncodedJSValue value)
{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
return JSValue::decode(value).toNumber(exec);
}
size_t DFG_OPERATION dfgConvertJSValueToInt32(ExecState* exec, EncodedJSValue value)
{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
// toInt32/toUInt32 return the same value; we want the value zero extended to fill the register.
return JSValue::decode(value).toUInt32(exec);
}
size_t DFG_OPERATION dfgConvertJSValueToBoolean(ExecState* exec, EncodedJSValue encodedOp)
{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
return JSValue::decode(encodedOp).toBoolean(exec);
}
#if DFG_ENABLE(VERBOSE_SPECULATION_FAILURE)
-void DFG_OPERATION debugOperationPrintSpeculationFailure(ExecState*, void* debugInfoRaw)
+void DFG_OPERATION debugOperationPrintSpeculationFailure(ExecState* exec, void* debugInfoRaw)
{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
SpeculationFailureDebugInfo* debugInfo = static_cast<SpeculationFailureDebugInfo*>(debugInfoRaw);
CodeBlock* codeBlock = debugInfo->codeBlock;
CodeBlock* alternative = codeBlock->alternative();
- printf("Speculation failure in %p at @%u with executeCounter = %d, reoptimizationRetryCounter = %u, optimizationDelayCounter = %u, success/fail %u/%u\n", codeBlock, debugInfo->nodeIndex, alternative ? alternative->executeCounter() : 0, alternative ? alternative->reoptimizationRetryCounter() : 0, alternative ? alternative->optimizationDelayCounter() : 0, codeBlock->speculativeSuccessCounter(), codeBlock->speculativeFailCounter());
+ printf("Speculation failure in %p at @%u with executeCounter = %d, reoptimizationRetryCounter = %u, optimizationDelayCounter = %u, success/fail %u/%u\n", codeBlock, debugInfo->nodeIndex, alternative ? alternative->jitExecuteCounter() : 0, alternative ? alternative->reoptimizationRetryCounter() : 0, alternative ? alternative->optimizationDelayCounter() : 0, codeBlock->speculativeSuccessCounter(), codeBlock->speculativeFailCounter());
}
#endif
diff --git a/Source/JavaScriptCore/dfg/DFGOperations.h b/Source/JavaScriptCore/dfg/DFGOperations.h
index b4121dc21..5de9d3af3 100644
--- a/Source/JavaScriptCore/dfg/DFGOperations.h
+++ b/Source/JavaScriptCore/dfg/DFGOperations.h
@@ -104,7 +104,8 @@ EncodedJSValue DFG_OPERATION operationGetById(ExecState*, EncodedJSValue, Identi
EncodedJSValue DFG_OPERATION operationGetByIdBuildList(ExecState*, EncodedJSValue, Identifier*);
EncodedJSValue DFG_OPERATION operationGetByIdProtoBuildList(ExecState*, EncodedJSValue, Identifier*);
EncodedJSValue DFG_OPERATION operationGetByIdOptimize(ExecState*, EncodedJSValue, Identifier*);
-EncodedJSValue DFG_OPERATION operationGetMethodOptimize(ExecState*, EncodedJSValue, Identifier*);
+EncodedJSValue DFG_OPERATION operationCallCustomGetter(ExecState*, JSCell*, PropertySlot::GetValueFunc, Identifier*);
+EncodedJSValue DFG_OPERATION operationCallGetter(ExecState*, JSCell*, JSCell*);
EncodedJSValue DFG_OPERATION operationResolve(ExecState*, Identifier*);
EncodedJSValue DFG_OPERATION operationResolveBase(ExecState*, Identifier*);
EncodedJSValue DFG_OPERATION operationResolveBaseStrictPut(ExecState*, Identifier*);
@@ -181,6 +182,7 @@ inline DFGHandlerEncoded dfgHandlerEncoded(ExecState* exec, void* handler)
}
#endif
DFGHandlerEncoded DFG_OPERATION lookupExceptionHandler(ExecState*, uint32_t);
+DFGHandlerEncoded DFG_OPERATION lookupExceptionHandlerInStub(ExecState*, StructureStubInfo*);
// These operations implement the implicitly called ToInt32, ToNumber, and ToBoolean conversions from ES5.
double DFG_OPERATION dfgConvertJSValueToNumber(ExecState*, EncodedJSValue);
diff --git a/Source/JavaScriptCore/dfg/DFGPropagator.cpp b/Source/JavaScriptCore/dfg/DFGPropagator.cpp
index 631e82830..acfd2d364 100644
--- a/Source/JavaScriptCore/dfg/DFGPropagator.cpp
+++ b/Source/JavaScriptCore/dfg/DFGPropagator.cpp
@@ -382,7 +382,7 @@ private:
if (left && right) {
if (isNumberPrediction(left) && isNumberPrediction(right)) {
- if (isInt32Prediction(mergePredictions(left, right)) && nodeCanSpeculateInteger(node.arithNodeFlags()))
+ if (m_graph.addShouldSpeculateInteger(node, m_codeBlock))
changed |= mergePrediction(PredictInt32);
else
changed |= mergePrediction(PredictDouble);
@@ -396,7 +396,19 @@ private:
}
case ArithAdd:
- case ArithSub:
+ case ArithSub: {
+ PredictedType left = m_graph[node.child1()].prediction();
+ PredictedType right = m_graph[node.child2()].prediction();
+
+ if (left && right) {
+ if (m_graph.addShouldSpeculateInteger(node, m_codeBlock))
+ changed |= mergePrediction(PredictInt32);
+ else
+ changed |= mergePrediction(PredictDouble);
+ }
+ break;
+ }
+
case ArithMul:
case ArithMin:
case ArithMax:
@@ -454,16 +466,22 @@ private:
bool isInt16Array = m_graph[node.child1()].shouldSpeculateInt16Array();
bool isInt32Array = m_graph[node.child1()].shouldSpeculateInt32Array();
bool isUint8Array = m_graph[node.child1()].shouldSpeculateUint8Array();
+ bool isUint8ClampedArray = m_graph[node.child1()].shouldSpeculateUint8ClampedArray();
bool isUint16Array = m_graph[node.child1()].shouldSpeculateUint16Array();
bool isUint32Array = m_graph[node.child1()].shouldSpeculateUint32Array();
bool isFloat32Array = m_graph[node.child1()].shouldSpeculateFloat32Array();
bool isFloat64Array = m_graph[node.child1()].shouldSpeculateFloat64Array();
- if (isArray || isString || isByteArray || isInt8Array || isInt16Array || isInt32Array || isUint8Array || isUint16Array || isUint32Array || isFloat32Array || isFloat64Array)
+ if (isArray || isString || isByteArray || isInt8Array || isInt16Array || isInt32Array || isUint8Array || isUint8ClampedArray || isUint16Array || isUint32Array || isFloat32Array || isFloat64Array)
changed |= mergePrediction(PredictInt32);
}
break;
}
+ case GetByIdFlush:
+ if (node.getHeapPrediction())
+ changed |= mergePrediction(node.getHeapPrediction());
+ break;
+
case GetByVal: {
if (m_graph[node.child1()].shouldSpeculateUint32Array() || m_graph[node.child1()].shouldSpeculateFloat32Array() || m_graph[node.child1()].shouldSpeculateFloat64Array())
changed |= mergePrediction(PredictDouble);
@@ -586,6 +604,7 @@ private:
case GetInt16ArrayLength:
case GetInt32ArrayLength:
case GetUint8ArrayLength:
+ case GetUint8ClampedArrayLength:
case GetUint16ArrayLength:
case GetUint32ArrayLength:
case GetFloat32ArrayLength:
@@ -705,7 +724,23 @@ private:
switch (node.op) {
case ValueAdd:
case ArithAdd:
- case ArithSub:
+ case ArithSub: {
+ PredictedType left = m_graph[node.child1()].prediction();
+ PredictedType right = m_graph[node.child2()].prediction();
+
+ VariableAccessData::Ballot ballot;
+
+ if (isNumberPrediction(left) && isNumberPrediction(right)
+ && !m_graph.addShouldSpeculateInteger(node, m_codeBlock))
+ ballot = VariableAccessData::VoteDouble;
+ else
+ ballot = VariableAccessData::VoteValue;
+
+ vote(node.child1(), ballot);
+ vote(node.child2(), ballot);
+ break;
+ }
+
case ArithMul:
case ArithMin:
case ArithMax:
@@ -822,27 +857,21 @@ private:
#endif
switch (op) {
- case ValueAdd: {
- if (!nodeCanSpeculateInteger(node.arithNodeFlags())) {
- toDouble(node.child1());
- toDouble(node.child2());
- break;
- }
-
+ case ValueAdd:
+ case ArithAdd:
+ case ArithSub: {
PredictedType left = m_graph[node.child1()].prediction();
PredictedType right = m_graph[node.child2()].prediction();
if (left && right
&& isNumberPrediction(left) && isNumberPrediction(right)
- && ((left & PredictDouble) || (right & PredictDouble))) {
+ && !m_graph.addShouldSpeculateInteger(node, m_codeBlock)) {
toDouble(node.child1());
toDouble(node.child2());
}
break;
}
- case ArithAdd:
- case ArithSub:
case ArithMul:
case ArithMin:
case ArithMax:
@@ -894,11 +923,12 @@ private:
bool isInt16Array = m_graph[node.child1()].shouldSpeculateInt16Array();
bool isInt32Array = m_graph[node.child1()].shouldSpeculateInt32Array();
bool isUint8Array = m_graph[node.child1()].shouldSpeculateUint8Array();
+ bool isUint8ClampedArray = m_graph[node.child1()].shouldSpeculateUint8ClampedArray();
bool isUint16Array = m_graph[node.child1()].shouldSpeculateUint16Array();
bool isUint32Array = m_graph[node.child1()].shouldSpeculateUint32Array();
bool isFloat32Array = m_graph[node.child1()].shouldSpeculateFloat32Array();
bool isFloat64Array = m_graph[node.child1()].shouldSpeculateFloat64Array();
- if (!isArray && !isString && !isByteArray && !isInt8Array && !isInt16Array && !isInt32Array && !isUint8Array && !isUint16Array && !isUint32Array && !isFloat32Array && !isFloat64Array)
+ if (!isArray && !isString && !isByteArray && !isInt8Array && !isInt16Array && !isInt32Array && !isUint8Array && !isUint8ClampedArray && !isUint16Array && !isUint32Array && !isFloat32Array && !isFloat64Array)
break;
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
@@ -918,6 +948,8 @@ private:
node.op = GetInt32ArrayLength;
else if (isUint8Array)
node.op = GetUint8ArrayLength;
+ else if (isUint8ClampedArray)
+ node.op = GetUint8ClampedArrayLength;
else if (isUint16Array)
node.op = GetUint16ArrayLength;
else if (isUint32Array)
@@ -1517,6 +1549,7 @@ private:
case GetInt16ArrayLength:
case GetInt32ArrayLength:
case GetUint8ArrayLength:
+ case GetUint8ClampedArrayLength:
case GetUint16ArrayLength:
case GetUint32ArrayLength:
case GetFloat32ArrayLength:
diff --git a/Source/JavaScriptCore/dfg/DFGRepatch.cpp b/Source/JavaScriptCore/dfg/DFGRepatch.cpp
index ae4a44ffe..f2928c290 100644
--- a/Source/JavaScriptCore/dfg/DFGRepatch.cpp
+++ b/Source/JavaScriptCore/dfg/DFGRepatch.cpp
@@ -28,6 +28,7 @@
#if ENABLE(DFG_JIT)
+#include "DFGCCallHelpers.h"
#include "DFGSpeculativeJIT.h"
#include "LinkBuffer.h"
#include "Operations.h"
@@ -149,7 +150,7 @@ static void generateProtoChainAccessStub(ExecState* exec, StructureStubInfo& stu
emitRestoreScratch(stubJit, needToRestoreScratch, scratchGPR, success, fail, failureCases);
- LinkBuffer patchBuffer(*globalData, &stubJit);
+ LinkBuffer patchBuffer(*globalData, &stubJit, exec->codeBlock());
linkRestoreScratch(patchBuffer, needToRestoreScratch, success, fail, failureCases, successLabel, slowCaseLabel);
@@ -200,7 +201,7 @@ static bool tryCacheGetByID(ExecState* exec, JSValue baseValue, const Identifier
emitRestoreScratch(stubJit, needToRestoreScratch, scratchGPR, success, fail, failureCases);
- LinkBuffer patchBuffer(*globalData, &stubJit);
+ LinkBuffer patchBuffer(*globalData, &stubJit, codeBlock);
linkRestoreScratch(patchBuffer, needToRestoreScratch, stubInfo, success, fail, failureCases);
@@ -227,8 +228,10 @@ static bool tryCacheGetByID(ExecState* exec, JSValue baseValue, const Identifier
// Optimize self access.
if (slot.slotBase() == baseValue) {
- if ((slot.cachedPropertyType() != PropertySlot::Value) || ((slot.cachedOffset() * sizeof(JSValue)) > (unsigned)MacroAssembler::MaximumCompactPtrAlignedAddressOffset))
- return false;
+ if ((slot.cachedPropertyType() != PropertySlot::Value) || ((slot.cachedOffset() * sizeof(JSValue)) > (unsigned)MacroAssembler::MaximumCompactPtrAlignedAddressOffset)) {
+ dfgRepatchCall(codeBlock, stubInfo.callReturnLocation, operationGetByIdBuildList);
+ return true;
+ }
dfgRepatchByIdSelfAccess(codeBlock, stubInfo, structure, slot.cachedOffset(), operationGetByIdBuildList, true);
stubInfo.initGetByIdSelf(*globalData, codeBlock->ownerExecutable(), structure);
@@ -268,16 +271,22 @@ void dfgRepatchGetByID(ExecState* exec, JSValue baseValue, const Identifier& pro
dfgRepatchCall(exec->codeBlock(), stubInfo.callReturnLocation, operationGetById);
}
-static bool tryBuildGetByIDList(ExecState* exec, JSValue baseValue, const Identifier&, const PropertySlot& slot, StructureStubInfo& stubInfo)
+static bool tryBuildGetByIDList(ExecState* exec, JSValue baseValue, const Identifier& ident, const PropertySlot& slot, StructureStubInfo& stubInfo)
{
if (!baseValue.isCell()
|| !slot.isCacheable()
|| baseValue.asCell()->structure()->isUncacheableDictionary()
- || slot.slotBase() != baseValue
- || slot.cachedPropertyType() != PropertySlot::Value
- || (slot.cachedOffset() * sizeof(JSValue)) > (unsigned)MacroAssembler::MaximumCompactPtrAlignedAddressOffset)
+ || slot.slotBase() != baseValue)
return false;
+ if (!stubInfo.registersFlushed) {
+ // We cannot do as much inline caching if the registers were not flushed prior to this GetById. In particular,
+ // non-Value cached properties require planting calls, which requires registers to have been flushed. Thus,
+ // if registers were not flushed, don't do non-Value caching.
+ if (slot.cachedPropertyType() != PropertySlot::Value)
+ return false;
+ }
+
CodeBlock* codeBlock = exec->codeBlock();
JSCell* baseCell = baseValue.asCell();
Structure* structure = baseCell->structure();
@@ -286,12 +295,18 @@ static bool tryBuildGetByIDList(ExecState* exec, JSValue baseValue, const Identi
ASSERT(slot.slotBase().isObject());
PolymorphicAccessStructureList* polymorphicStructureList;
- int listIndex = 1;
+ int listIndex;
- if (stubInfo.accessType == access_get_by_id_self) {
+ if (stubInfo.accessType == access_unset) {
+ ASSERT(!stubInfo.stubRoutine);
+ polymorphicStructureList = new PolymorphicAccessStructureList();
+ stubInfo.initGetByIdSelfList(polymorphicStructureList, 0);
+ listIndex = 0;
+ } else if (stubInfo.accessType == access_get_by_id_self) {
ASSERT(!stubInfo.stubRoutine);
polymorphicStructureList = new PolymorphicAccessStructureList(*globalData, codeBlock->ownerExecutable(), MacroAssemblerCodeRef::createSelfManagedCodeRef(stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToSlowCase)), stubInfo.u.getByIdSelf.baseObjectStructure.get(), true);
stubInfo.initGetByIdSelfList(polymorphicStructureList, 1);
+ listIndex = 1;
} else {
polymorphicStructureList = stubInfo.u.getByIdSelfList.structureList;
listIndex = stubInfo.u.getByIdSelfList.listSize;
@@ -305,32 +320,93 @@ static bool tryBuildGetByIDList(ExecState* exec, JSValue baseValue, const Identi
GPRReg resultTagGPR = static_cast<GPRReg>(stubInfo.valueTagGPR);
#endif
GPRReg resultGPR = static_cast<GPRReg>(stubInfo.valueGPR);
+ GPRReg scratchGPR = static_cast<GPRReg>(stubInfo.scratchGPR);
- MacroAssembler stubJit;
+ CCallHelpers stubJit(globalData, codeBlock);
MacroAssembler::Jump wrongStruct = stubJit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::structureOffset()), MacroAssembler::TrustedImmPtr(structure));
-
- stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::offsetOfPropertyStorage()), resultGPR);
+
+ // The strategy we use for stubs is as follows:
+ // 1) Call DFG helper that calls the getter.
+ // 2) Check if there was an exception, and if there was, call yet another
+ // helper.
+
+ bool isDirect = false;
+ MacroAssembler::Call operationCall;
+ MacroAssembler::Call handlerCall;
+ FunctionPtr operationFunction;
+ MacroAssembler::Jump success;
+
+ if (slot.cachedPropertyType() == PropertySlot::Getter
+ || slot.cachedPropertyType() == PropertySlot::Custom) {
+ if (slot.cachedPropertyType() == PropertySlot::Getter) {
+ ASSERT(baseGPR != scratchGPR);
+ stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::offsetOfPropertyStorage()), scratchGPR);
#if USE(JSVALUE64)
- stubJit.loadPtr(MacroAssembler::Address(resultGPR, slot.cachedOffset() * sizeof(JSValue)), resultGPR);
+ stubJit.loadPtr(MacroAssembler::Address(scratchGPR, slot.cachedOffset() * sizeof(JSValue)), scratchGPR);
#elif USE(JSVALUE32_64)
- stubJit.load32(MacroAssembler::Address(resultGPR, slot.cachedOffset() * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
- stubJit.load32(MacroAssembler::Address(resultGPR, slot.cachedOffset() * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultGPR);
+ stubJit.load32(MacroAssembler::Address(scratchGPR, slot.cachedOffset() * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), scratchGPR);
#endif
+ stubJit.setupArgumentsWithExecState(baseGPR, scratchGPR);
+ operationFunction = operationCallGetter;
+ } else {
+ stubJit.setupArgumentsWithExecState(
+ baseGPR,
+ MacroAssembler::TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()),
+ MacroAssembler::TrustedImmPtr(const_cast<Identifier*>(&ident)));
+ operationFunction = operationCallCustomGetter;
+ }
+
+ // Need to make sure that whenever this call is made in the future, we remember the
+ // place that we made it from. It just so happens to be the place that we are at
+ // right now!
+ stubJit.store32(
+ MacroAssembler::TrustedImm32(exec->codeOriginIndexForDFGWithInlining()),
+ CCallHelpers::tagFor(static_cast<VirtualRegister>(RegisterFile::ArgumentCount)));
+
+ operationCall = stubJit.call();
+#if USE(JSVALUE64)
+ stubJit.move(GPRInfo::returnValueGPR, resultGPR);
+#else
+ stubJit.setupResults(resultGPR, resultTagGPR);
+#endif
+ success = stubJit.emitExceptionCheck(CCallHelpers::InvertedExceptionCheck);
+
+ stubJit.setupArgumentsWithExecState(
+ MacroAssembler::TrustedImmPtr(&stubInfo));
+ handlerCall = stubJit.call();
+ stubJit.jump(GPRInfo::returnValueGPR2);
+ } else {
+ stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::offsetOfPropertyStorage()), resultGPR);
+#if USE(JSVALUE64)
+ stubJit.loadPtr(MacroAssembler::Address(resultGPR, slot.cachedOffset() * sizeof(JSValue)), resultGPR);
+#elif USE(JSVALUE32_64)
+ stubJit.load32(MacroAssembler::Address(resultGPR, slot.cachedOffset() * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
+ stubJit.load32(MacroAssembler::Address(resultGPR, slot.cachedOffset() * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultGPR);
+#endif
+ success = stubJit.jump();
+ isDirect = true;
+ }
- MacroAssembler::Jump success = stubJit.jump();
-
- LinkBuffer patchBuffer(*globalData, &stubJit);
+ LinkBuffer patchBuffer(*globalData, &stubJit, codeBlock);
- CodeLocationLabel lastProtoBegin = CodeLocationLabel(polymorphicStructureList->list[listIndex - 1].stubRoutine.code());
+ CodeLocationLabel lastProtoBegin;
+ if (listIndex)
+ lastProtoBegin = CodeLocationLabel(polymorphicStructureList->list[listIndex - 1].stubRoutine.code());
+ else
+ lastProtoBegin = stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToSlowCase);
ASSERT(!!lastProtoBegin);
patchBuffer.link(wrongStruct, lastProtoBegin);
patchBuffer.link(success, stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToDone));
+ if (!isDirect) {
+ patchBuffer.link(operationCall, operationFunction);
+ patchBuffer.link(handlerCall, lookupExceptionHandlerInStub);
+ }
MacroAssemblerCodeRef stubRoutine = patchBuffer.finalizeCode();
- polymorphicStructureList->list[listIndex].set(*globalData, codeBlock->ownerExecutable(), stubRoutine, structure, true);
+ polymorphicStructureList->list[listIndex].set(*globalData, codeBlock->ownerExecutable(), stubRoutine, structure, isDirect);
CodeLocationJump jumpLocation = stubInfo.callReturnLocation.jumpAtOffset(stubInfo.deltaCallToStructCheck);
RepatchBuffer repatchBuffer(codeBlock);
@@ -538,7 +614,7 @@ static bool tryCachePutByID(ExecState* exec, JSValue baseValue, const Identifier
} else
success = stubJit.jump();
- LinkBuffer patchBuffer(*globalData, &stubJit);
+ LinkBuffer patchBuffer(*globalData, &stubJit, codeBlock);
patchBuffer.link(success, stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToDone));
if (needToRestoreScratch)
patchBuffer.link(failure, stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToSlowCase));
diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
index feb705ab8..e647fb87a 100644
--- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
+++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
@@ -1112,6 +1112,11 @@ void SpeculativeJIT::checkArgumentTypes()
m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), temp.gpr());
speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchTestPtr(MacroAssembler::NonZero, temp.gpr(), GPRInfo::tagMaskRegister));
speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->uint8ArrayDescriptor().m_classInfo)));
+ } else if (isUint8ClampedArrayPrediction(predictedType)) {
+ GPRTemporary temp(this);
+ m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), temp.gpr());
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchTestPtr(MacroAssembler::NonZero, temp.gpr(), GPRInfo::tagMaskRegister));
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->uint8ClampedArrayDescriptor().m_classInfo)));
} else if (isUint16ArrayPrediction(predictedType)) {
GPRTemporary temp(this);
m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), temp.gpr());
@@ -1174,6 +1179,12 @@ void SpeculativeJIT::checkArgumentTypes()
speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::NotEqual, temp.gpr(), TrustedImm32(JSValue::CellTag)));
m_jit.load32(JITCompiler::payloadFor(virtualRegister), temp.gpr());
speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->uint8ArrayDescriptor().m_classInfo)));
+ } else if (isUint8ClampedArrayPrediction(predictedType)) {
+ GPRTemporary temp(this);
+ m_jit.load32(JITCompiler::tagFor(virtualRegister), temp.gpr());
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::NotEqual, temp.gpr(), TrustedImm32(JSValue::CellTag)));
+ m_jit.load32(JITCompiler::payloadFor(virtualRegister), temp.gpr());
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->uint8ClampedArrayDescriptor().m_classInfo)));
} else if (isUint16ArrayPrediction(predictedType)) {
GPRTemporary temp(this);
m_jit.load32(JITCompiler::tagFor(virtualRegister), temp.gpr());
@@ -1529,6 +1540,28 @@ void SpeculativeJIT::compileUInt32ToNumber(Node& node)
integerResult(result.gpr(), m_compileIndex, op1.format());
}
+static double clampDoubleToByte(double d)
+{
+ d += 0.5;
+ if (!(d > 0))
+ d = 0;
+ else if (d > 255)
+ d = 255;
+ return d;
+}
+
+static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
+{
+ MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
+ MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
+ jit.xorPtr(result, result);
+ MacroAssembler::Jump clamped = jit.jump();
+ tooBig.link(&jit);
+ jit.move(JITCompiler::TrustedImm32(255), result);
+ clamped.link(&jit);
+ inBounds.link(&jit);
+}
+
static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
{
// Unordered compare so we pick up NaN
@@ -1576,15 +1609,10 @@ void SpeculativeJIT::compilePutByValForByteArray(GPRReg base, GPRReg property, N
noResult(m_compileIndex);
return;
}
- double d = jsValue.asNumber();
- d += 0.5;
- if (!(d > 0))
- d = 0;
- else if (d > 255)
- d = 255;
+ int clampedValue = clampDoubleToByte(jsValue.asNumber());
GPRTemporary scratch(this);
GPRReg scratchReg = scratch.gpr();
- m_jit.move(Imm32((int)d), scratchReg);
+ m_jit.move(Imm32(clampedValue), scratchReg);
value.adopt(scratch);
valueGPR = scratchReg;
} else if (!at(valueIndex).shouldNotSpeculateInteger()) {
@@ -1592,14 +1620,7 @@ void SpeculativeJIT::compilePutByValForByteArray(GPRReg base, GPRReg property, N
GPRTemporary scratch(this);
GPRReg scratchReg = scratch.gpr();
m_jit.move(valueOp.gpr(), scratchReg);
- MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::BelowOrEqual, scratchReg, TrustedImm32(0xff));
- MacroAssembler::Jump tooBig = m_jit.branch32(MacroAssembler::GreaterThan, scratchReg, TrustedImm32(0xff));
- m_jit.xorPtr(scratchReg, scratchReg);
- MacroAssembler::Jump clamped = m_jit.jump();
- tooBig.link(&m_jit);
- m_jit.move(TrustedImm32(255), scratchReg);
- clamped.link(&m_jit);
- inBounds.link(&m_jit);
+ compileClampIntegerToByte(m_jit, scratchReg);
value.adopt(scratch);
valueGPR = scratchReg;
} else {
@@ -1722,7 +1743,7 @@ void SpeculativeJIT::compileGetByValOnIntTypedArray(const TypedArrayDescriptor&
}
}
-void SpeculativeJIT::compilePutByValForIntTypedArray(const TypedArrayDescriptor& descriptor, GPRReg base, GPRReg property, Node& node, size_t elementSize, TypedArraySpeculationRequirements speculationRequirements, TypedArraySignedness signedness)
+void SpeculativeJIT::compilePutByValForIntTypedArray(const TypedArrayDescriptor& descriptor, GPRReg base, GPRReg property, Node& node, size_t elementSize, TypedArraySpeculationRequirements speculationRequirements, TypedArraySignedness signedness, TypedArrayRounding rounding)
{
NodeIndex baseIndex = node.child1();
NodeIndex valueIndex = node.child3();
@@ -1740,9 +1761,13 @@ void SpeculativeJIT::compilePutByValForIntTypedArray(const TypedArrayDescriptor&
return;
}
double d = jsValue.asNumber();
+ if (rounding == ClampRounding) {
+ ASSERT(elementSize == 1);
+ d = clampDoubleToByte(d);
+ }
GPRTemporary scratch(this);
GPRReg scratchReg = scratch.gpr();
- m_jit.move(Imm32((int)d), scratchReg);
+ m_jit.move(Imm32(static_cast<int>(d)), scratchReg);
value.adopt(scratch);
valueGPR = scratchReg;
} else if (!at(valueIndex).shouldNotSpeculateInteger()) {
@@ -1750,8 +1775,22 @@ void SpeculativeJIT::compilePutByValForIntTypedArray(const TypedArrayDescriptor&
GPRTemporary scratch(this);
GPRReg scratchReg = scratch.gpr();
m_jit.move(valueOp.gpr(), scratchReg);
+ if (rounding == ClampRounding) {
+ ASSERT(elementSize == 1);
+ compileClampIntegerToByte(m_jit, scratchReg);
+ }
value.adopt(scratch);
valueGPR = scratchReg;
+ } else if (rounding == ClampRounding) {
+ ASSERT(elementSize == 1);
+ SpeculateDoubleOperand valueOp(this, valueIndex);
+ GPRTemporary result(this);
+ FPRTemporary floatScratch(this);
+ FPRReg fpr = valueOp.fpr();
+ GPRReg gpr = result.gpr();
+ compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
+ value.adopt(result);
+ valueGPR = gpr;
} else {
SpeculateDoubleOperand valueOp(this, valueIndex);
GPRTemporary result(this);
@@ -1761,17 +1800,17 @@ void SpeculativeJIT::compilePutByValForIntTypedArray(const TypedArrayDescriptor&
m_jit.xorPtr(gpr, gpr);
MacroAssembler::Jump fixed = m_jit.jump();
notNaN.link(&m_jit);
-
+
MacroAssembler::Jump done;
if (signedness == SignedTypedArray)
done = m_jit.branchTruncateDoubleToInt32(fpr, gpr, MacroAssembler::BranchIfTruncateSuccessful);
else
done = m_jit.branchTruncateDoubleToUint32(fpr, gpr, MacroAssembler::BranchIfTruncateSuccessful);
-
+
silentSpillAllRegisters(gpr);
callOperation(toInt32, gpr, fpr);
silentFillAllRegisters(gpr);
-
+
done.link(&m_jit);
fixed.link(&m_jit);
value.adopt(result);
@@ -2157,6 +2196,144 @@ void SpeculativeJIT::compileSoftModulo(Node& node)
#endif
}
+void SpeculativeJIT::compileAdd(Node& node)
+{
+ if (m_jit.graph().addShouldSpeculateInteger(node, m_jit.codeBlock())) {
+ if (isNumberConstant(node.child1())) {
+ int32_t imm1 = valueOfNumberConstantAsInt32(node.child1());
+ SpeculateIntegerOperand op2(this, node.child2());
+ GPRTemporary result(this);
+
+ if (nodeCanTruncateInteger(node.arithNodeFlags())) {
+ m_jit.move(op2.gpr(), result.gpr());
+ m_jit.add32(Imm32(imm1), result.gpr());
+ } else
+ speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchAdd32(MacroAssembler::Overflow, op2.gpr(), Imm32(imm1), result.gpr()));
+
+ integerResult(result.gpr(), m_compileIndex);
+ return;
+ }
+
+ if (isNumberConstant(node.child2())) {
+ SpeculateIntegerOperand op1(this, node.child1());
+ int32_t imm2 = valueOfNumberConstantAsInt32(node.child2());
+ GPRTemporary result(this);
+
+ if (nodeCanTruncateInteger(node.arithNodeFlags())) {
+ m_jit.move(op1.gpr(), result.gpr());
+ m_jit.add32(Imm32(imm2), result.gpr());
+ } else
+ speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchAdd32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
+
+ integerResult(result.gpr(), m_compileIndex);
+ return;
+ }
+
+ SpeculateIntegerOperand op1(this, node.child1());
+ SpeculateIntegerOperand op2(this, node.child2());
+ GPRTemporary result(this, op1, op2);
+
+ GPRReg gpr1 = op1.gpr();
+ GPRReg gpr2 = op2.gpr();
+ GPRReg gprResult = result.gpr();
+
+ if (nodeCanTruncateInteger(node.arithNodeFlags())) {
+ if (gpr1 == gprResult)
+ m_jit.add32(gpr2, gprResult);
+ else {
+ m_jit.move(gpr2, gprResult);
+ m_jit.add32(gpr1, gprResult);
+ }
+ } else {
+ MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
+
+ if (gpr1 == gprResult)
+ speculationCheck(Overflow, JSValueRegs(), NoNode, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
+ else if (gpr2 == gprResult)
+ speculationCheck(Overflow, JSValueRegs(), NoNode, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
+ else
+ speculationCheck(Overflow, JSValueRegs(), NoNode, check);
+ }
+
+ integerResult(gprResult, m_compileIndex);
+ return;
+ }
+
+ if (Node::shouldSpeculateNumber(at(node.child1()), at(node.child2()))) {
+ SpeculateDoubleOperand op1(this, node.child1());
+ SpeculateDoubleOperand op2(this, node.child2());
+ FPRTemporary result(this, op1, op2);
+
+ FPRReg reg1 = op1.fpr();
+ FPRReg reg2 = op2.fpr();
+ m_jit.addDouble(reg1, reg2, result.fpr());
+
+ doubleResult(result.fpr(), m_compileIndex);
+ return;
+ }
+
+ ASSERT(node.op == ValueAdd);
+ compileValueAdd(node);
+}
+
+void SpeculativeJIT::compileArithSub(Node& node)
+{
+ if (m_jit.graph().addShouldSpeculateInteger(node, m_jit.codeBlock())) {
+ if (isNumberConstant(node.child2())) {
+ SpeculateIntegerOperand op1(this, node.child1());
+ int32_t imm2 = valueOfNumberConstantAsInt32(node.child2());
+ GPRTemporary result(this);
+
+ if (nodeCanTruncateInteger(node.arithNodeFlags())) {
+ m_jit.move(op1.gpr(), result.gpr());
+ m_jit.sub32(Imm32(imm2), result.gpr());
+ } else
+ speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
+
+ integerResult(result.gpr(), m_compileIndex);
+ return;
+ }
+
+ if (isNumberConstant(node.child1())) {
+ int32_t imm1 = valueOfNumberConstantAsInt32(node.child1());
+ SpeculateIntegerOperand op2(this, node.child2());
+ GPRTemporary result(this);
+
+ m_jit.move(Imm32(imm1), result.gpr());
+ if (nodeCanTruncateInteger(node.arithNodeFlags()))
+ m_jit.sub32(op2.gpr(), result.gpr());
+ else
+ speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchSub32(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
+
+ integerResult(result.gpr(), m_compileIndex);
+ return;
+ }
+
+ SpeculateIntegerOperand op1(this, node.child1());
+ SpeculateIntegerOperand op2(this, node.child2());
+ GPRTemporary result(this);
+
+ if (nodeCanTruncateInteger(node.arithNodeFlags())) {
+ m_jit.move(op1.gpr(), result.gpr());
+ m_jit.sub32(op2.gpr(), result.gpr());
+ } else
+ speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), op2.gpr(), result.gpr()));
+
+ integerResult(result.gpr(), m_compileIndex);
+ return;
+ }
+
+ SpeculateDoubleOperand op1(this, node.child1());
+ SpeculateDoubleOperand op2(this, node.child2());
+ FPRTemporary result(this, op1);
+
+ FPRReg reg1 = op1.fpr();
+ FPRReg reg2 = op2.fpr();
+ m_jit.subDouble(reg1, reg2, result.fpr());
+
+ doubleResult(result.fpr(), m_compileIndex);
+}
+
void SpeculativeJIT::compileArithMul(Node& node)
{
if (Node::shouldSpeculateInteger(at(node.child1()), at(node.child2())) && node.canSpeculateInteger()) {
@@ -2451,6 +2628,11 @@ void SpeculativeJIT::compileGetIndexedPropertyStorage(Node& node)
if (!isUint8ArrayPrediction(m_state.forNode(node.child1()).m_type))
speculationCheck(BadType, JSValueSource::unboxedCell(baseReg), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseReg, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(descriptor.m_classInfo)));
m_jit.loadPtr(MacroAssembler::Address(baseReg, descriptor.m_storageOffset), storageReg);
+ } else if (at(node.child1()).shouldSpeculateUint8ClampedArray()) {
+ const TypedArrayDescriptor& descriptor = m_jit.globalData()->uint8ClampedArrayDescriptor();
+ if (!isUint8ClampedArrayPrediction(m_state.forNode(node.child1()).m_type))
+ speculationCheck(BadType, JSValueSource::unboxedCell(baseReg), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseReg), MacroAssembler::TrustedImmPtr(descriptor.m_classInfo)));
+ m_jit.loadPtr(MacroAssembler::Address(baseReg, descriptor.m_storageOffset), storageReg);
} else if (at(node.child1()).shouldSpeculateUint16Array()) {
const TypedArrayDescriptor& descriptor = m_jit.globalData()->uint16ArrayDescriptor();
if (!isUint16ArrayPrediction(m_state.forNode(node.child1()).m_type))
diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h
index 3b709400d..0098da3a1 100644
--- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h
+++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h
@@ -325,18 +325,21 @@ public:
void writeBarrier(GPRReg ownerGPR, JSCell* value, WriteBarrierUseKind, GPRReg scratchGPR1 = InvalidGPRReg, GPRReg scratchGPR2 = InvalidGPRReg);
void writeBarrier(JSCell* owner, GPRReg valueGPR, NodeIndex valueIndex, WriteBarrierUseKind, GPRReg scratchGPR1 = InvalidGPRReg);
- static GPRReg selectScratchGPR(GPRReg preserve1 = InvalidGPRReg, GPRReg preserve2 = InvalidGPRReg, GPRReg preserve3 = InvalidGPRReg)
+ static GPRReg selectScratchGPR(GPRReg preserve1 = InvalidGPRReg, GPRReg preserve2 = InvalidGPRReg, GPRReg preserve3 = InvalidGPRReg, GPRReg preserve4 = InvalidGPRReg)
{
- if (preserve1 != GPRInfo::regT0 && preserve2 != GPRInfo::regT0 && preserve3 != GPRInfo::regT0)
+ if (preserve1 != GPRInfo::regT0 && preserve2 != GPRInfo::regT0 && preserve3 != GPRInfo::regT0 && preserve4 != GPRInfo::regT0)
return GPRInfo::regT0;
- if (preserve1 != GPRInfo::regT1 && preserve2 != GPRInfo::regT1 && preserve3 != GPRInfo::regT1)
+ if (preserve1 != GPRInfo::regT1 && preserve2 != GPRInfo::regT1 && preserve3 != GPRInfo::regT1 && preserve4 != GPRInfo::regT1)
return GPRInfo::regT1;
- if (preserve1 != GPRInfo::regT2 && preserve2 != GPRInfo::regT2 && preserve3 != GPRInfo::regT2)
+ if (preserve1 != GPRInfo::regT2 && preserve2 != GPRInfo::regT2 && preserve3 != GPRInfo::regT2 && preserve4 != GPRInfo::regT2)
return GPRInfo::regT2;
- return GPRInfo::regT3;
+ if (preserve1 != GPRInfo::regT3 && preserve2 != GPRInfo::regT3 && preserve3 != GPRInfo::regT3 && preserve4 != GPRInfo::regT3)
+ return GPRInfo::regT3;
+
+ return GPRInfo::regT4;
}
// Called by the speculative operand types, below, to fill operand to
@@ -727,6 +730,12 @@ private:
bool isFunctionConstant(NodeIndex nodeIndex) { return m_jit.isFunctionConstant(nodeIndex); }
int32_t valueOfInt32Constant(NodeIndex nodeIndex) { return m_jit.valueOfInt32Constant(nodeIndex); }
double valueOfNumberConstant(NodeIndex nodeIndex) { return m_jit.valueOfNumberConstant(nodeIndex); }
+ int32_t valueOfNumberConstantAsInt32(NodeIndex nodeIndex)
+ {
+ if (isInt32Constant(nodeIndex))
+ return valueOfInt32Constant(nodeIndex);
+ return JSC::toInt32(valueOfNumberConstant(nodeIndex));
+ }
#if USE(JSVALUE32_64)
void* addressOfDoubleConstant(NodeIndex nodeIndex) { return m_jit.addressOfDoubleConstant(nodeIndex); }
#endif
@@ -872,12 +881,13 @@ private:
void nonSpeculativeValueToInt32(Node&);
void nonSpeculativeUInt32ToNumber(Node&);
+ enum SpillRegistersMode { NeedToSpill, DontSpill };
#if USE(JSVALUE64)
- JITCompiler::Call cachedGetById(GPRReg baseGPR, GPRReg resultGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget = JITCompiler::Jump());
- void cachedPutById(GPRReg base, GPRReg value, NodeIndex valueIndex, GPRReg scratchGPR, unsigned identifierNumber, PutKind, JITCompiler::Jump slowPathTarget = JITCompiler::Jump());
+ JITCompiler::Call cachedGetById(CodeOrigin, GPRReg baseGPR, GPRReg resultGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), SpillRegistersMode = NeedToSpill);
+ void cachedPutById(CodeOrigin, GPRReg base, GPRReg value, NodeIndex valueIndex, GPRReg scratchGPR, unsigned identifierNumber, PutKind, JITCompiler::Jump slowPathTarget = JITCompiler::Jump());
#elif USE(JSVALUE32_64)
- JITCompiler::Call cachedGetById(GPRReg baseTagGPROrNone, GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget = JITCompiler::Jump());
- void cachedPutById(GPRReg basePayloadGPR, GPRReg valueTagGPR, GPRReg valuePayloadGPR, NodeIndex valueIndex, GPRReg scratchGPR, unsigned identifierNumber, PutKind, JITCompiler::Jump slowPathTarget = JITCompiler::Jump());
+ JITCompiler::Call cachedGetById(CodeOrigin, GPRReg baseTagGPROrNone, GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), SpillRegistersMode = NeedToSpill);
+ void cachedPutById(CodeOrigin, GPRReg basePayloadGPR, GPRReg valueTagGPR, GPRReg valuePayloadGPR, NodeIndex valueIndex, GPRReg scratchGPR, unsigned identifierNumber, PutKind, JITCompiler::Jump slowPathTarget = JITCompiler::Jump());
#endif
void nonSpeculativeNonPeepholeCompareNull(NodeIndex operand, bool invert = false);
@@ -1060,457 +1070,6 @@ private:
m_generationInfo[node.virtualRegister()].initConstant(nodeIndex, node.refCount());
}
- // These methods used to sort arguments into the correct registers.
- // On X86 we use cdecl calling conventions, which pass all arguments on the
- // stack. On other architectures we may need to sort values into the
- // correct registers.
-#if !NUMBER_OF_ARGUMENT_REGISTERS
- unsigned m_callArgumentOffset;
- void resetCallArguments() { m_callArgumentOffset = 0; }
-
- // These methods are using internally to implement the callOperation methods.
- void addCallArgument(GPRReg value)
- {
- m_jit.poke(value, m_callArgumentOffset++);
- }
- void addCallArgument(TrustedImm32 imm)
- {
- m_jit.poke(imm, m_callArgumentOffset++);
- }
- void addCallArgument(TrustedImmPtr pointer)
- {
- m_jit.poke(pointer, m_callArgumentOffset++);
- }
- void addCallArgument(FPRReg value)
- {
- m_jit.storeDouble(value, JITCompiler::Address(JITCompiler::stackPointerRegister, m_callArgumentOffset * sizeof(void*)));
- m_callArgumentOffset += sizeof(double) / sizeof(void*);
- }
-
- ALWAYS_INLINE void setupArguments(FPRReg arg1)
- {
- resetCallArguments();
- addCallArgument(arg1);
- }
-
- ALWAYS_INLINE void setupArguments(FPRReg arg1, FPRReg arg2)
- {
- resetCallArguments();
- addCallArgument(arg1);
- addCallArgument(arg2);
- }
-
- ALWAYS_INLINE void setupArgumentsExecState()
- {
- resetCallArguments();
- addCallArgument(GPRInfo::callFrameRegister);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1)
- {
- resetCallArguments();
- addCallArgument(GPRInfo::callFrameRegister);
- addCallArgument(arg1);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1)
- {
- resetCallArguments();
- addCallArgument(GPRInfo::callFrameRegister);
- addCallArgument(arg1);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2)
- {
- resetCallArguments();
- addCallArgument(GPRInfo::callFrameRegister);
- addCallArgument(arg1);
- addCallArgument(arg2);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2)
- {
- resetCallArguments();
- addCallArgument(GPRInfo::callFrameRegister);
- addCallArgument(arg1);
- addCallArgument(arg2);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImm32 arg2)
- {
- resetCallArguments();
- addCallArgument(GPRInfo::callFrameRegister);
- addCallArgument(arg1);
- addCallArgument(arg2);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImmPtr arg2)
- {
- resetCallArguments();
- addCallArgument(GPRInfo::callFrameRegister);
- addCallArgument(arg1);
- addCallArgument(arg2);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3)
- {
- resetCallArguments();
- addCallArgument(GPRInfo::callFrameRegister);
- addCallArgument(arg1);
- addCallArgument(arg2);
- addCallArgument(arg3);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3)
- {
- resetCallArguments();
- addCallArgument(GPRInfo::callFrameRegister);
- addCallArgument(arg1);
- addCallArgument(arg2);
- addCallArgument(arg3);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2, TrustedImmPtr arg3)
- {
- resetCallArguments();
- addCallArgument(GPRInfo::callFrameRegister);
- addCallArgument(arg1);
- addCallArgument(arg2);
- addCallArgument(arg3);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4)
- {
- resetCallArguments();
- addCallArgument(GPRInfo::callFrameRegister);
- addCallArgument(arg1);
- addCallArgument(arg2);
- addCallArgument(arg3);
- addCallArgument(arg4);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImmPtr arg4)
- {
- resetCallArguments();
- addCallArgument(GPRInfo::callFrameRegister);
- addCallArgument(arg1);
- addCallArgument(arg2);
- addCallArgument(arg3);
- addCallArgument(arg4);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImm32 arg4)
- {
- resetCallArguments();
- addCallArgument(GPRInfo::callFrameRegister);
- addCallArgument(arg1);
- addCallArgument(arg2);
- addCallArgument(arg3);
- addCallArgument(arg4);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImm32 arg2, GPRReg arg3, GPRReg arg4)
- {
- resetCallArguments();
- addCallArgument(GPRInfo::callFrameRegister);
- addCallArgument(arg1);
- addCallArgument(arg2);
- addCallArgument(arg3);
- addCallArgument(arg4);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5)
- {
- resetCallArguments();
- addCallArgument(GPRInfo::callFrameRegister);
- addCallArgument(arg1);
- addCallArgument(arg2);
- addCallArgument(arg3);
- addCallArgument(arg4);
- addCallArgument(arg5);
- }
-#endif // !NUMBER_OF_ARGUMENT_REGISTERS
- // These methods are suitable for any calling convention that provides for
- // at least 4 argument registers, e.g. X86_64, ARMv7.
-#if NUMBER_OF_ARGUMENT_REGISTERS >= 4
- template<GPRReg destA, GPRReg destB>
- void setupTwoStubArgs(GPRReg srcA, GPRReg srcB)
- {
- // Assuming that srcA != srcB, there are 7 interesting states the registers may be in:
- // (1) both are already in arg regs, the right way around.
- // (2) both are already in arg regs, the wrong way around.
- // (3) neither are currently in arg registers.
- // (4) srcA in in its correct reg.
- // (5) srcA in in the incorrect reg.
- // (6) srcB in in its correct reg.
- // (7) srcB in in the incorrect reg.
- //
- // The trivial approach is to simply emit two moves, to put srcA in place then srcB in
- // place (the MacroAssembler will omit redundant moves). This apporach will be safe in
- // cases 1, 3, 4, 5, 6, and in cases where srcA==srcB. The two problem cases are 2
- // (requires a swap) and 7 (must move srcB first, to avoid trampling.)
-
- if (srcB != destA) {
- // Handle the easy cases - two simple moves.
- m_jit.move(srcA, destA);
- m_jit.move(srcB, destB);
- } else if (srcA != destB) {
- // Handle the non-swap case - just put srcB in place first.
- m_jit.move(srcB, destB);
- m_jit.move(srcA, destA);
- } else
- m_jit.swap(destA, destB);
- }
-#if CPU(X86_64)
- template<FPRReg destA, FPRReg destB>
- void setupTwoStubArgs(FPRReg srcA, FPRReg srcB)
- {
- // Assuming that srcA != srcB, there are 7 interesting states the registers may be in:
- // (1) both are already in arg regs, the right way around.
- // (2) both are already in arg regs, the wrong way around.
- // (3) neither are currently in arg registers.
- // (4) srcA in in its correct reg.
- // (5) srcA in in the incorrect reg.
- // (6) srcB in in its correct reg.
- // (7) srcB in in the incorrect reg.
- //
- // The trivial approach is to simply emit two moves, to put srcA in place then srcB in
- // place (the MacroAssembler will omit redundant moves). This apporach will be safe in
- // cases 1, 3, 4, 5, 6, and in cases where srcA==srcB. The two problem cases are 2
- // (requires a swap) and 7 (must move srcB first, to avoid trampling.)
-
- if (srcB != destA) {
- // Handle the easy cases - two simple moves.
- m_jit.moveDouble(srcA, destA);
- m_jit.moveDouble(srcB, destB);
- return;
- }
-
- if (srcA != destB) {
- // Handle the non-swap case - just put srcB in place first.
- m_jit.moveDouble(srcB, destB);
- m_jit.moveDouble(srcA, destA);
- return;
- }
-
- ASSERT(srcB == destA && srcA == destB);
- // Need to swap; pick a temporary register.
- FPRReg temp;
- if (destA != FPRInfo::argumentFPR3 && destA != FPRInfo::argumentFPR3)
- temp = FPRInfo::argumentFPR3;
- else if (destA != FPRInfo::argumentFPR2 && destA != FPRInfo::argumentFPR2)
- temp = FPRInfo::argumentFPR2;
- else {
- ASSERT(destA != FPRInfo::argumentFPR1 && destA != FPRInfo::argumentFPR1);
- temp = FPRInfo::argumentFPR1;
- }
- m_jit.moveDouble(destA, temp);
- m_jit.moveDouble(destB, destA);
- m_jit.moveDouble(temp, destB);
- }
-#endif
- void setupStubArguments(GPRReg arg1, GPRReg arg2)
- {
- setupTwoStubArgs<GPRInfo::argumentGPR1, GPRInfo::argumentGPR2>(arg1, arg2);
- }
- void setupStubArguments(GPRReg arg1, GPRReg arg2, GPRReg arg3)
- {
- // If neither of arg2/arg3 are in our way, then we can move arg1 into place.
- // Then we can use setupTwoStubArgs to fix arg2/arg3.
- if (arg2 != GPRInfo::argumentGPR1 && arg3 != GPRInfo::argumentGPR1) {
- m_jit.move(arg1, GPRInfo::argumentGPR1);
- setupTwoStubArgs<GPRInfo::argumentGPR2, GPRInfo::argumentGPR3>(arg2, arg3);
- return;
- }
-
- // If neither of arg1/arg3 are in our way, then we can move arg2 into place.
- // Then we can use setupTwoStubArgs to fix arg1/arg3.
- if (arg1 != GPRInfo::argumentGPR2 && arg3 != GPRInfo::argumentGPR2) {
- m_jit.move(arg2, GPRInfo::argumentGPR2);
- setupTwoStubArgs<GPRInfo::argumentGPR1, GPRInfo::argumentGPR3>(arg1, arg3);
- return;
- }
-
- // If neither of arg1/arg2 are in our way, then we can move arg3 into place.
- // Then we can use setupTwoStubArgs to fix arg1/arg2.
- if (arg1 != GPRInfo::argumentGPR3 && arg2 != GPRInfo::argumentGPR3) {
- m_jit.move(arg3, GPRInfo::argumentGPR3);
- setupTwoStubArgs<GPRInfo::argumentGPR1, GPRInfo::argumentGPR2>(arg1, arg2);
- return;
- }
-
- // If we get here, we haven't been able to move any of arg1/arg2/arg3.
- // Since all three are blocked, then all three must already be in the argument register.
- // But are they in the right ones?
-
- // First, ensure arg1 is in place.
- if (arg1 != GPRInfo::argumentGPR1) {
- m_jit.swap(arg1, GPRInfo::argumentGPR1);
-
- // If arg1 wasn't in argumentGPR1, one of arg2/arg3 must be.
- ASSERT(arg2 == GPRInfo::argumentGPR1 || arg3 == GPRInfo::argumentGPR1);
- // If arg2 was in argumentGPR1 it no longer is (due to the swap).
- // Otherwise arg3 must have been. Mark him as moved.
- if (arg2 == GPRInfo::argumentGPR1)
- arg2 = arg1;
- else
- arg3 = arg1;
- }
-
- // Either arg2 & arg3 need swapping, or we're all done.
- ASSERT((arg2 == GPRInfo::argumentGPR2 || arg3 == GPRInfo::argumentGPR3)
- || (arg2 == GPRInfo::argumentGPR3 || arg3 == GPRInfo::argumentGPR2));
-
- if (arg2 != GPRInfo::argumentGPR2)
- m_jit.swap(GPRInfo::argumentGPR2, GPRInfo::argumentGPR3);
- }
-
-#if CPU(X86_64)
- ALWAYS_INLINE void setupArguments(FPRReg arg1)
- {
- m_jit.moveDouble(arg1, FPRInfo::argumentFPR0);
- }
-
- ALWAYS_INLINE void setupArguments(FPRReg arg1, FPRReg arg2)
- {
- setupTwoStubArgs<FPRInfo::argumentFPR0, FPRInfo::argumentFPR1>(arg1, arg2);
- }
-#else
- ALWAYS_INLINE void setupArguments(FPRReg arg1)
- {
- m_jit.assembler().vmov(GPRInfo::argumentGPR0, GPRInfo::argumentGPR1, arg1);
- }
-
- ALWAYS_INLINE void setupArguments(FPRReg arg1, FPRReg arg2)
- {
- m_jit.assembler().vmov(GPRInfo::argumentGPR0, GPRInfo::argumentGPR1, arg1);
- m_jit.assembler().vmov(GPRInfo::argumentGPR2, GPRInfo::argumentGPR3, arg2);
- }
-#endif
-
- ALWAYS_INLINE void setupArgumentsExecState()
- {
- m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1)
- {
- m_jit.move(arg1, GPRInfo::argumentGPR1);
- m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1)
- {
- m_jit.move(arg1, GPRInfo::argumentGPR1);
- m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2)
- {
- setupStubArguments(arg1, arg2);
- m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2)
- {
- m_jit.move(arg1, GPRInfo::argumentGPR1);
- m_jit.move(arg2, GPRInfo::argumentGPR2);
- m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2)
- {
- m_jit.move(arg2, GPRInfo::argumentGPR2); // Move this first, so setting arg1 does not trample!
- m_jit.move(arg1, GPRInfo::argumentGPR1);
- m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImm32 arg2)
- {
- m_jit.move(arg1, GPRInfo::argumentGPR1);
- m_jit.move(arg2, GPRInfo::argumentGPR2);
- m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImmPtr arg2)
- {
- m_jit.move(arg1, GPRInfo::argumentGPR1);
- m_jit.move(arg2, GPRInfo::argumentGPR2);
- m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3)
- {
- setupStubArguments(arg1, arg2, arg3);
- m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3)
- {
- setupStubArguments(arg1, arg2);
- m_jit.move(arg3, GPRInfo::argumentGPR3);
- m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2, TrustedImmPtr arg3)
- {
- m_jit.move(arg1, GPRInfo::argumentGPR1);
- m_jit.move(arg2, GPRInfo::argumentGPR2);
- m_jit.move(arg3, GPRInfo::argumentGPR3);
- m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3)
- {
- setupStubArguments(arg1, arg2);
- m_jit.move(arg3, GPRInfo::argumentGPR3);
- m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImm32 arg2, GPRReg arg3)
- {
- m_jit.move(arg1, GPRInfo::argumentGPR1);
- m_jit.move(arg2, GPRInfo::argumentGPR2);
- m_jit.move(arg3, GPRInfo::argumentGPR3);
- m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
- }
-
-#endif // NUMBER_OF_ARGUMENT_REGISTERS >= 4
- // These methods are suitable for any calling convention that provides for
- // exactly 4 argument registers, e.g. ARMv7.
-#if NUMBER_OF_ARGUMENT_REGISTERS == 4
- ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4)
- {
- m_jit.poke(arg4);
- setupArgumentsWithExecState(arg1, arg2, arg3);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImm32 arg4)
- {
- m_jit.poke(arg4);
- setupArgumentsWithExecState(arg1, arg2, arg3);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImm32 arg2, GPRReg arg3, GPRReg arg4)
- {
- m_jit.poke(arg4);
- setupArgumentsWithExecState(arg1, arg2, arg3);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImmPtr arg4)
- {
- m_jit.poke(arg4);
- setupArgumentsWithExecState(arg1, arg2, arg3);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5)
- {
- m_jit.poke(arg5, 1);
- m_jit.poke(arg4);
- setupArgumentsWithExecState(arg1, arg2, arg3);
- }
-#endif // NUMBER_OF_ARGUMENT_REGISTERS == 4
-
// These methods add calls to C++ helper functions.
// These methods are broadly value representation specific (i.e.
// deal with the fact that a JSValue may be passed in one or two
@@ -1519,314 +1078,314 @@ private:
#if USE(JSVALUE64)
JITCompiler::Call callOperation(J_DFGOperation_EP operation, GPRReg result, void* pointer)
{
- setupArgumentsWithExecState(TrustedImmPtr(pointer));
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(pointer));
return appendCallWithExceptionCheckSetResult(operation, result);
}
JITCompiler::Call callOperation(Z_DFGOperation_D operation, GPRReg result, FPRReg arg1)
{
- setupArguments(arg1);
+ m_jit.setupArguments(arg1);
JITCompiler::Call call = m_jit.appendCall(operation);
m_jit.zeroExtend32ToPtr(GPRInfo::returnValueGPR, result);
return call;
}
JITCompiler::Call callOperation(J_DFGOperation_EGI operation, GPRReg result, GPRReg arg1, Identifier* identifier)
{
- setupArgumentsWithExecState(arg1, TrustedImmPtr(identifier));
+ m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(identifier));
return appendCallWithExceptionCheckSetResult(operation, result);
}
JITCompiler::Call callOperation(J_DFGOperation_EI operation, GPRReg result, Identifier* identifier)
{
- setupArgumentsWithExecState(TrustedImmPtr(identifier));
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(identifier));
return appendCallWithExceptionCheckSetResult(operation, result);
}
JITCompiler::Call callOperation(J_DFGOperation_EA operation, GPRReg result, GPRReg arg1)
{
- setupArgumentsWithExecState(arg1);
+ m_jit.setupArgumentsWithExecState(arg1);
return appendCallWithExceptionCheckSetResult(operation, result);
}
JITCompiler::Call callOperation(J_DFGOperation_EPS operation, GPRReg result, void* pointer, size_t size)
{
- setupArgumentsWithExecState(TrustedImmPtr(pointer), TrustedImmPtr(size));
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(pointer), TrustedImmPtr(size));
return appendCallWithExceptionCheckSetResult(operation, result);
}
JITCompiler::Call callOperation(J_DFGOperation_ESS operation, GPRReg result, int startConstant, int numConstants)
{
- setupArgumentsWithExecState(Imm32(startConstant), Imm32(numConstants));
+ m_jit.setupArgumentsWithExecState(Imm32(startConstant), Imm32(numConstants));
return appendCallWithExceptionCheckSetResult(operation, result);
}
JITCompiler::Call callOperation(J_DFGOperation_EPP operation, GPRReg result, GPRReg arg1, void* pointer)
{
- setupArgumentsWithExecState(arg1, TrustedImmPtr(pointer));
+ m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(pointer));
return appendCallWithExceptionCheckSetResult(operation, result);
}
JITCompiler::Call callOperation(J_DFGOperation_ECI operation, GPRReg result, GPRReg arg1, Identifier* identifier)
{
- setupArgumentsWithExecState(arg1, TrustedImmPtr(identifier));
+ m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(identifier));
return appendCallWithExceptionCheckSetResult(operation, result);
}
JITCompiler::Call callOperation(J_DFGOperation_EJI operation, GPRReg result, GPRReg arg1, Identifier* identifier)
{
- setupArgumentsWithExecState(arg1, TrustedImmPtr(identifier));
+ m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(identifier));
return appendCallWithExceptionCheckSetResult(operation, result);
}
JITCompiler::Call callOperation(J_DFGOperation_EJA operation, GPRReg result, GPRReg arg1, GPRReg arg2)
{
- setupArgumentsWithExecState(arg1, arg2);
+ m_jit.setupArgumentsWithExecState(arg1, arg2);
return appendCallWithExceptionCheckSetResult(operation, result);
}
JITCompiler::Call callOperation(J_DFGOperation_EP operation, GPRReg result, GPRReg arg1)
{
- setupArgumentsWithExecState(arg1);
+ m_jit.setupArgumentsWithExecState(arg1);
return appendCallWithExceptionCheckSetResult(operation, result);
}
JITCompiler::Call callOperation(C_DFGOperation_E operation, GPRReg result)
{
- setupArgumentsExecState();
+ m_jit.setupArgumentsExecState();
return appendCallWithExceptionCheckSetResult(operation, result);
}
JITCompiler::Call callOperation(C_DFGOperation_EC operation, GPRReg result, GPRReg arg1)
{
- setupArgumentsWithExecState(arg1);
+ m_jit.setupArgumentsWithExecState(arg1);
return appendCallWithExceptionCheckSetResult(operation, result);
}
JITCompiler::Call callOperation(C_DFGOperation_ECC operation, GPRReg result, GPRReg arg1, JSCell* cell)
{
- setupArgumentsWithExecState(arg1, TrustedImmPtr(cell));
+ m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(cell));
return appendCallWithExceptionCheckSetResult(operation, result);
}
JITCompiler::Call callOperation(S_DFGOperation_EJ operation, GPRReg result, GPRReg arg1)
{
- setupArgumentsWithExecState(arg1);
+ m_jit.setupArgumentsWithExecState(arg1);
return appendCallWithExceptionCheckSetResult(operation, result);
}
JITCompiler::Call callOperation(S_DFGOperation_EJJ operation, GPRReg result, GPRReg arg1, GPRReg arg2)
{
- setupArgumentsWithExecState(arg1, arg2);
+ m_jit.setupArgumentsWithExecState(arg1, arg2);
return appendCallWithExceptionCheckSetResult(operation, result);
}
JITCompiler::Call callOperation(J_DFGOperation_EPP operation, GPRReg result, GPRReg arg1, GPRReg arg2)
{
- setupArgumentsWithExecState(arg1, arg2);
+ m_jit.setupArgumentsWithExecState(arg1, arg2);
return appendCallWithExceptionCheckSetResult(operation, result);
}
JITCompiler::Call callOperation(J_DFGOperation_EJJ operation, GPRReg result, GPRReg arg1, MacroAssembler::Imm32 imm)
{
- setupArgumentsWithExecState(arg1, MacroAssembler::ImmPtr(static_cast<const void*>(JSValue::encode(jsNumber(imm.m_value)))));
+ m_jit.setupArgumentsWithExecState(arg1, MacroAssembler::ImmPtr(static_cast<const void*>(JSValue::encode(jsNumber(imm.m_value)))));
return appendCallWithExceptionCheckSetResult(operation, result);
}
JITCompiler::Call callOperation(J_DFGOperation_EJJ operation, GPRReg result, MacroAssembler::Imm32 imm, GPRReg arg2)
{
- setupArgumentsWithExecState(MacroAssembler::ImmPtr(static_cast<const void*>(JSValue::encode(jsNumber(imm.m_value)))), arg2);
+ m_jit.setupArgumentsWithExecState(MacroAssembler::ImmPtr(static_cast<const void*>(JSValue::encode(jsNumber(imm.m_value)))), arg2);
return appendCallWithExceptionCheckSetResult(operation, result);
}
JITCompiler::Call callOperation(J_DFGOperation_ECJ operation, GPRReg result, GPRReg arg1, GPRReg arg2)
{
- setupArgumentsWithExecState(arg1, arg2);
+ m_jit.setupArgumentsWithExecState(arg1, arg2);
return appendCallWithExceptionCheckSetResult(operation, result);
}
JITCompiler::Call callOperation(V_DFGOperation_EJPP operation, GPRReg arg1, GPRReg arg2, void* pointer)
{
- setupArgumentsWithExecState(arg1, arg2, TrustedImmPtr(pointer));
+ m_jit.setupArgumentsWithExecState(arg1, arg2, TrustedImmPtr(pointer));
return appendCallWithExceptionCheck(operation);
}
JITCompiler::Call callOperation(V_DFGOperation_EJCI operation, GPRReg arg1, GPRReg arg2, Identifier* identifier)
{
- setupArgumentsWithExecState(arg1, arg2, TrustedImmPtr(identifier));
+ m_jit.setupArgumentsWithExecState(arg1, arg2, TrustedImmPtr(identifier));
return appendCallWithExceptionCheck(operation);
}
JITCompiler::Call callOperation(V_DFGOperation_EJJJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3)
{
- setupArgumentsWithExecState(arg1, arg2, arg3);
+ m_jit.setupArgumentsWithExecState(arg1, arg2, arg3);
return appendCallWithExceptionCheck(operation);
}
JITCompiler::Call callOperation(V_DFGOperation_EPZJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3)
{
- setupArgumentsWithExecState(arg1, arg2, arg3);
+ m_jit.setupArgumentsWithExecState(arg1, arg2, arg3);
return appendCallWithExceptionCheck(operation);
}
JITCompiler::Call callOperation(V_DFGOperation_EAZJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3)
{
- setupArgumentsWithExecState(arg1, arg2, arg3);
+ m_jit.setupArgumentsWithExecState(arg1, arg2, arg3);
return appendCallWithExceptionCheck(operation);
}
JITCompiler::Call callOperation(V_DFGOperation_ECJJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3)
{
- setupArgumentsWithExecState(arg1, arg2, arg3);
+ m_jit.setupArgumentsWithExecState(arg1, arg2, arg3);
return appendCallWithExceptionCheck(operation);
}
JITCompiler::Call callOperation(D_DFGOperation_EJ operation, FPRReg result, GPRReg arg1)
{
- setupArgumentsWithExecState(arg1);
+ m_jit.setupArgumentsWithExecState(arg1);
return appendCallWithExceptionCheckSetResult(operation, result);
}
JITCompiler::Call callOperation(D_DFGOperation_DD operation, FPRReg result, FPRReg arg1, FPRReg arg2)
{
- setupArguments(arg1, arg2);
+ m_jit.setupArguments(arg1, arg2);
return appendCallSetResult(operation, result);
}
#else
JITCompiler::Call callOperation(Z_DFGOperation_D operation, GPRReg result, FPRReg arg1)
{
- setupArguments(arg1);
+ m_jit.setupArguments(arg1);
JITCompiler::Call call = m_jit.appendCall(operation);
m_jit.zeroExtend32ToPtr(GPRInfo::returnValueGPR, result);
return call;
}
JITCompiler::Call callOperation(J_DFGOperation_EP operation, GPRReg resultTag, GPRReg resultPayload, void* pointer)
{
- setupArgumentsWithExecState(TrustedImmPtr(pointer));
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(pointer));
return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
}
JITCompiler::Call callOperation(J_DFGOperation_EPP operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, void* pointer)
{
- setupArgumentsWithExecState(arg1, TrustedImmPtr(pointer));
+ m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(pointer));
return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
}
JITCompiler::Call callOperation(J_DFGOperation_EGI operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, Identifier* identifier)
{
- setupArgumentsWithExecState(arg1, TrustedImmPtr(identifier));
+ m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(identifier));
return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
}
JITCompiler::Call callOperation(J_DFGOperation_EP operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1)
{
- setupArgumentsWithExecState(arg1);
+ m_jit.setupArgumentsWithExecState(arg1);
return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
}
JITCompiler::Call callOperation(J_DFGOperation_EI operation, GPRReg resultTag, GPRReg resultPayload, Identifier* identifier)
{
- setupArgumentsWithExecState(TrustedImmPtr(identifier));
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(identifier));
return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
}
JITCompiler::Call callOperation(J_DFGOperation_EA operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1)
{
- setupArgumentsWithExecState(arg1);
+ m_jit.setupArgumentsWithExecState(arg1);
return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
}
JITCompiler::Call callOperation(J_DFGOperation_EPS operation, GPRReg resultTag, GPRReg resultPayload, void* pointer, size_t size)
{
- setupArgumentsWithExecState(TrustedImmPtr(pointer), TrustedImmPtr(size));
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(pointer), TrustedImmPtr(size));
return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
}
JITCompiler::Call callOperation(J_DFGOperation_ESS operation, GPRReg resultTag, GPRReg resultPayload, int startConstant, int numConstants)
{
- setupArgumentsWithExecState(TrustedImm32(startConstant), TrustedImm32(numConstants));
+ m_jit.setupArgumentsWithExecState(TrustedImm32(startConstant), TrustedImm32(numConstants));
return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
}
JITCompiler::Call callOperation(J_DFGOperation_EJP operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, void* pointer)
{
- setupArgumentsWithExecState(arg1Payload, arg1Tag, ImmPtr(pointer));
+ m_jit.setupArgumentsWithExecState(arg1Payload, arg1Tag, ImmPtr(pointer));
return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
}
JITCompiler::Call callOperation(J_DFGOperation_EJP operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2)
{
- setupArgumentsWithExecState(arg1Payload, arg1Tag, arg2);
+ m_jit.setupArgumentsWithExecState(arg1Payload, arg1Tag, arg2);
return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
}
JITCompiler::Call callOperation(J_DFGOperation_ECI operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, Identifier* identifier)
{
- setupArgumentsWithExecState(arg1, TrustedImmPtr(identifier));
+ m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(identifier));
return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
}
JITCompiler::Call callOperation(J_DFGOperation_EJI operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, Identifier* identifier)
{
- setupArgumentsWithExecState(arg1Payload, arg1Tag, TrustedImmPtr(identifier));
+ m_jit.setupArgumentsWithExecState(arg1Payload, arg1Tag, TrustedImmPtr(identifier));
return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
}
JITCompiler::Call callOperation(J_DFGOperation_EJI operation, GPRReg resultTag, GPRReg resultPayload, int32_t arg1Tag, GPRReg arg1Payload, Identifier* identifier)
{
- setupArgumentsWithExecState(arg1Payload, TrustedImm32(arg1Tag), TrustedImmPtr(identifier));
+ m_jit.setupArgumentsWithExecState(arg1Payload, TrustedImm32(arg1Tag), TrustedImmPtr(identifier));
return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
}
JITCompiler::Call callOperation(J_DFGOperation_EJA operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2)
{
- setupArgumentsWithExecState(arg1Payload, arg1Tag, arg2);
+ m_jit.setupArgumentsWithExecState(arg1Payload, arg1Tag, arg2);
return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
}
JITCompiler::Call callOperation(J_DFGOperation_EJ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload)
{
- setupArgumentsWithExecState(arg1Payload, arg1Tag);
+ m_jit.setupArgumentsWithExecState(arg1Payload, arg1Tag);
return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
}
JITCompiler::Call callOperation(C_DFGOperation_E operation, GPRReg result)
{
- setupArgumentsExecState();
+ m_jit.setupArgumentsExecState();
return appendCallWithExceptionCheckSetResult(operation, result);
}
JITCompiler::Call callOperation(C_DFGOperation_EC operation, GPRReg result, GPRReg arg1)
{
- setupArgumentsWithExecState(arg1);
+ m_jit.setupArgumentsWithExecState(arg1);
return appendCallWithExceptionCheckSetResult(operation, result);
}
JITCompiler::Call callOperation(C_DFGOperation_ECC operation, GPRReg result, GPRReg arg1, JSCell* cell)
{
- setupArgumentsWithExecState(arg1, TrustedImmPtr(cell));
+ m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(cell));
return appendCallWithExceptionCheckSetResult(operation, result);
}
JITCompiler::Call callOperation(S_DFGOperation_EJ operation, GPRReg result, GPRReg arg1Tag, GPRReg arg1Payload)
{
- setupArgumentsWithExecState(arg1Payload, arg1Tag);
+ m_jit.setupArgumentsWithExecState(arg1Payload, arg1Tag);
return appendCallWithExceptionCheckSetResult(operation, result);
}
JITCompiler::Call callOperation(S_DFGOperation_EJJ operation, GPRReg result, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2Tag, GPRReg arg2Payload)
{
- setupArgumentsWithExecState(arg1Payload, arg1Tag, arg2Payload, arg2Tag);
+ m_jit.setupArgumentsWithExecState(arg1Payload, arg1Tag, arg2Payload, arg2Tag);
return appendCallWithExceptionCheckSetResult(operation, result);
}
JITCompiler::Call callOperation(J_DFGOperation_EJJ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2Tag, GPRReg arg2Payload)
{
- setupArgumentsWithExecState(arg1Payload, arg1Tag, arg2Payload, arg2Tag);
+ m_jit.setupArgumentsWithExecState(arg1Payload, arg1Tag, arg2Payload, arg2Tag);
return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
}
JITCompiler::Call callOperation(J_DFGOperation_EJJ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, MacroAssembler::Imm32 imm)
{
- setupArgumentsWithExecState(arg1Payload, arg1Tag, imm, TrustedImm32(JSValue::Int32Tag));
+ m_jit.setupArgumentsWithExecState(arg1Payload, arg1Tag, imm, TrustedImm32(JSValue::Int32Tag));
return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
}
JITCompiler::Call callOperation(J_DFGOperation_EJJ operation, GPRReg resultTag, GPRReg resultPayload, MacroAssembler::Imm32 imm, GPRReg arg2Tag, GPRReg arg2Payload)
{
- setupArgumentsWithExecState(imm, TrustedImm32(JSValue::Int32Tag), arg2Payload, arg2Tag);
+ m_jit.setupArgumentsWithExecState(imm, TrustedImm32(JSValue::Int32Tag), arg2Payload, arg2Tag);
return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
}
JITCompiler::Call callOperation(J_DFGOperation_ECJ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, GPRReg arg2Tag, GPRReg arg2Payload)
{
- setupArgumentsWithExecState(arg1, arg2Payload, arg2Tag);
+ m_jit.setupArgumentsWithExecState(arg1, arg2Payload, arg2Tag);
return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
}
JITCompiler::Call callOperation(V_DFGOperation_EJPP operation, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2, void* pointer)
{
- setupArgumentsWithExecState(arg1Payload, arg1Tag, arg2, TrustedImmPtr(pointer));
+ m_jit.setupArgumentsWithExecState(arg1Payload, arg1Tag, arg2, TrustedImmPtr(pointer));
return appendCallWithExceptionCheck(operation);
}
JITCompiler::Call callOperation(V_DFGOperation_EJCI operation, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2, Identifier* identifier)
{
- setupArgumentsWithExecState(arg1Payload, arg1Tag, arg2, TrustedImmPtr(identifier));
+ m_jit.setupArgumentsWithExecState(arg1Payload, arg1Tag, arg2, TrustedImmPtr(identifier));
return appendCallWithExceptionCheck(operation);
}
JITCompiler::Call callOperation(V_DFGOperation_ECJJ operation, GPRReg arg1, GPRReg arg2Tag, GPRReg arg2Payload, GPRReg arg3Tag, GPRReg arg3Payload)
{
- setupArgumentsWithExecState(arg1, arg2Payload, arg2Tag, arg3Payload, arg3Tag);
+ m_jit.setupArgumentsWithExecState(arg1, arg2Payload, arg2Tag, arg3Payload, arg3Tag);
return appendCallWithExceptionCheck(operation);
}
JITCompiler::Call callOperation(V_DFGOperation_EPZJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3Tag, GPRReg arg3Payload)
{
- setupArgumentsWithExecState(arg1, arg2, arg3Payload, arg3Tag);
+ m_jit.setupArgumentsWithExecState(arg1, arg2, arg3Payload, arg3Tag);
return appendCallWithExceptionCheck(operation);
}
JITCompiler::Call callOperation(V_DFGOperation_EAZJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3Tag, GPRReg arg3Payload)
{
- setupArgumentsWithExecState(arg1, arg2, arg3Payload, arg3Tag);
+ m_jit.setupArgumentsWithExecState(arg1, arg2, arg3Payload, arg3Tag);
return appendCallWithExceptionCheck(operation);
}
JITCompiler::Call callOperation(D_DFGOperation_EJ operation, FPRReg result, GPRReg arg1Tag, GPRReg arg1Payload)
{
- setupArgumentsWithExecState(arg1Payload, arg1Tag);
+ m_jit.setupArgumentsWithExecState(arg1Payload, arg1Tag);
return appendCallWithExceptionCheckSetResult(operation, result);
}
JITCompiler::Call callOperation(D_DFGOperation_DD operation, FPRReg result, FPRReg arg1, FPRReg arg2)
{
- setupArguments(arg1, arg2);
+ m_jit.setupArguments(arg1, arg2);
return appendCallSetResult(operation, result);
}
#endif
@@ -1834,7 +1393,11 @@ private:
// These methods add call instructions, with optional exception checks & setting results.
JITCompiler::Call appendCallWithExceptionCheck(const FunctionPtr& function)
{
- return m_jit.addExceptionCheck(m_jit.appendCall(function), at(m_compileIndex).codeOrigin);
+ CodeOrigin codeOrigin = at(m_compileIndex).codeOrigin;
+ CallBeginToken token = m_jit.beginCall(codeOrigin);
+ JITCompiler::Call call = m_jit.appendCall(function);
+ m_jit.addExceptionCheck(call, codeOrigin, token);
+ return call;
}
JITCompiler::Call appendCallWithExceptionCheckSetResult(const FunctionPtr& function, GPRReg result)
{
@@ -1842,26 +1405,10 @@ private:
m_jit.move(GPRInfo::returnValueGPR, result);
return call;
}
- void setupResults(GPRReg destA, GPRReg destB)
- {
- GPRReg srcA = GPRInfo::returnValueGPR;
- GPRReg srcB = GPRInfo::returnValueGPR2;
-
- if (srcB != destA) {
- // Handle the easy cases - two simple moves.
- m_jit.move(srcA, destA);
- m_jit.move(srcB, destB);
- } else if (srcA != destB) {
- // Handle the non-swap case - just put srcB in place first.
- m_jit.move(srcB, destB);
- m_jit.move(srcA, destA);
- } else
- m_jit.swap(destA, destB);
- }
JITCompiler::Call appendCallWithExceptionCheckSetResult(const FunctionPtr& function, GPRReg result1, GPRReg result2)
{
JITCompiler::Call call = appendCallWithExceptionCheck(function);
- setupResults(result1, result2);
+ m_jit.setupResults(result1, result2);
return call;
}
#if CPU(X86)
@@ -1975,6 +1522,8 @@ private:
void compileUInt32ToNumber(Node&);
void compileGetByValOnByteArray(Node&);
void compilePutByValForByteArray(GPRReg base, GPRReg property, Node&);
+ void compileAdd(Node&);
+ void compileArithSub(Node&);
void compileArithMul(Node&);
void compileArithMod(Node&);
void compileSoftModulo(Node&);
@@ -1988,9 +1537,13 @@ private:
SignedTypedArray,
UnsignedTypedArray
};
+ enum TypedArrayRounding {
+ TruncateRounding,
+ ClampRounding
+ };
void compileGetIndexedPropertyStorage(Node&);
void compileGetByValOnIntTypedArray(const TypedArrayDescriptor&, Node&, size_t elementSize, TypedArraySpeculationRequirements, TypedArraySignedness);
- void compilePutByValForIntTypedArray(const TypedArrayDescriptor&, GPRReg base, GPRReg property, Node&, size_t elementSize, TypedArraySpeculationRequirements, TypedArraySignedness);
+ void compilePutByValForIntTypedArray(const TypedArrayDescriptor&, GPRReg base, GPRReg property, Node&, size_t elementSize, TypedArraySpeculationRequirements, TypedArraySignedness, TypedArrayRounding = TruncateRounding);
void compileGetByValOnFloatTypedArray(const TypedArrayDescriptor&, Node&, size_t elementSize, TypedArraySpeculationRequirements);
void compilePutByValForFloatTypedArray(const TypedArrayDescriptor&, GPRReg base, GPRReg property, Node&, size_t elementSize, TypedArraySpeculationRequirements);
diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp
index d6a82b1cc..e1f92ba9e 100644
--- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp
+++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp
@@ -493,7 +493,7 @@ void SpeculativeJIT::nonSpeculativeUInt32ToNumber(Node& node)
jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex);
}
-JITCompiler::Call SpeculativeJIT::cachedGetById(GPRReg baseTagGPROrNone, GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget)
+JITCompiler::Call SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin, GPRReg baseTagGPROrNone, GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode)
{
m_jit.beginUninterruptedSequence();
JITCompiler::DataLabelPtr structureToCompare;
@@ -513,24 +513,26 @@ JITCompiler::Call SpeculativeJIT::cachedGetById(GPRReg baseTagGPROrNone, GPRReg
JITCompiler::Label slowCase = m_jit.label();
- silentSpillAllRegisters(resultTagGPR, resultPayloadGPR);
+ if (spillMode == NeedToSpill)
+ silentSpillAllRegisters(resultTagGPR, resultPayloadGPR);
JITCompiler::Call functionCall;
if (baseTagGPROrNone == InvalidGPRReg)
functionCall = callOperation(operationGetByIdOptimize, resultTagGPR, resultPayloadGPR, JSValue::CellTag, basePayloadGPR, identifier(identifierNumber));
else
functionCall = callOperation(operationGetByIdOptimize, resultTagGPR, resultPayloadGPR, baseTagGPROrNone, basePayloadGPR, identifier(identifierNumber));
- silentFillAllRegisters(resultTagGPR, resultPayloadGPR);
+ if (spillMode == NeedToSpill)
+ silentFillAllRegisters(resultTagGPR, resultPayloadGPR);
done.link(&m_jit);
JITCompiler::Label doneLabel = m_jit.label();
- m_jit.addPropertyAccess(PropertyAccessRecord(structureToCompare, functionCall, structureCheck, tagLoadWithPatch, payloadLoadWithPatch, slowCase, doneLabel, safeCast<int8_t>(basePayloadGPR), safeCast<int8_t>(resultTagGPR), safeCast<int8_t>(resultPayloadGPR), safeCast<int8_t>(scratchGPR)));
+ m_jit.addPropertyAccess(PropertyAccessRecord(codeOrigin, structureToCompare, functionCall, structureCheck, tagLoadWithPatch, payloadLoadWithPatch, slowCase, doneLabel, safeCast<int8_t>(basePayloadGPR), safeCast<int8_t>(resultTagGPR), safeCast<int8_t>(resultPayloadGPR), safeCast<int8_t>(scratchGPR), spillMode == NeedToSpill ? PropertyAccessRecord::RegistersInUse : PropertyAccessRecord::RegistersFlushed));
return functionCall;
}
-void SpeculativeJIT::cachedPutById(GPRReg basePayloadGPR, GPRReg valueTagGPR, GPRReg valuePayloadGPR, NodeIndex valueIndex, GPRReg scratchGPR, unsigned identifierNumber, PutKind putKind, JITCompiler::Jump slowPathTarget)
+void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg basePayloadGPR, GPRReg valueTagGPR, GPRReg valuePayloadGPR, NodeIndex valueIndex, GPRReg scratchGPR, unsigned identifierNumber, PutKind putKind, JITCompiler::Jump slowPathTarget)
{
m_jit.beginUninterruptedSequence();
JITCompiler::DataLabelPtr structureToCompare;
@@ -571,7 +573,7 @@ void SpeculativeJIT::cachedPutById(GPRReg basePayloadGPR, GPRReg valueTagGPR, GP
done.link(&m_jit);
JITCompiler::Label doneLabel = m_jit.label();
- m_jit.addPropertyAccess(PropertyAccessRecord(structureToCompare, functionCall, structureCheck, JITCompiler::DataLabelCompact(tagStoreWithPatch.label()), JITCompiler::DataLabelCompact(payloadStoreWithPatch.label()), slowCase, doneLabel, safeCast<int8_t>(basePayloadGPR), safeCast<int8_t>(valueTagGPR), safeCast<int8_t>(valuePayloadGPR), safeCast<int8_t>(scratchGPR)));
+ m_jit.addPropertyAccess(PropertyAccessRecord(codeOrigin, structureToCompare, functionCall, structureCheck, JITCompiler::DataLabelCompact(tagStoreWithPatch.label()), JITCompiler::DataLabelCompact(payloadStoreWithPatch.label()), slowCase, doneLabel, safeCast<int8_t>(basePayloadGPR), safeCast<int8_t>(valueTagGPR), safeCast<int8_t>(valuePayloadGPR), safeCast<int8_t>(scratchGPR)));
}
void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNull(NodeIndex operand, bool invert)
@@ -950,8 +952,10 @@ void SpeculativeJIT::emitCall(Node& node)
m_jit.addPtr(Imm32(m_jit.codeBlock()->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister);
+ CodeOrigin codeOrigin = at(m_compileIndex).codeOrigin;
+ CallBeginToken token = m_jit.nextCallBeginToken(codeOrigin);
JITCompiler::Call fastCall = m_jit.nearCall();
- m_jit.notifyCall(fastCall, at(m_compileIndex).codeOrigin);
+ m_jit.notifyCall(fastCall, codeOrigin, token);
JITCompiler::Jump done = m_jit.jump();
@@ -959,13 +963,17 @@ void SpeculativeJIT::emitCall(Node& node)
m_jit.addPtr(Imm32(m_jit.codeBlock()->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
m_jit.poke(GPRInfo::argumentGPR0);
- JITCompiler::Call slowCall = m_jit.addFastExceptionCheck(m_jit.appendCall(slowCallFunction), at(m_compileIndex).codeOrigin);
+ token = m_jit.beginCall(codeOrigin);
+ JITCompiler::Call slowCall = m_jit.appendCall(slowCallFunction);
+ m_jit.addFastExceptionCheck(slowCall, codeOrigin, token);
m_jit.addPtr(Imm32(m_jit.codeBlock()->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister);
- m_jit.notifyCall(m_jit.call(GPRInfo::returnValueGPR), at(m_compileIndex).codeOrigin);
+ token = m_jit.nextCallBeginToken(codeOrigin);
+ JITCompiler::Call theCall = m_jit.call(GPRInfo::returnValueGPR);
+ m_jit.notifyCall(theCall, codeOrigin, token);
done.link(&m_jit);
- setupResults(resultPayloadGPR, resultTagGPR);
+ m_jit.setupResults(resultPayloadGPR, resultTagGPR);
jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex, DataFormatJS, UseChildrenCalledExplicitly);
@@ -1878,133 +1886,17 @@ void SpeculativeJIT::compile(Node& node)
}
case ValueAdd:
- case ArithAdd: {
- if (Node::shouldSpeculateInteger(at(node.child1()), at(node.child2())) && node.canSpeculateInteger()) {
- if (isInt32Constant(node.child1())) {
- int32_t imm1 = valueOfInt32Constant(node.child1());
- SpeculateIntegerOperand op2(this, node.child2());
- GPRTemporary result(this);
-
- if (nodeCanTruncateInteger(node.arithNodeFlags())) {
- m_jit.move(op2.gpr(), result.gpr());
- m_jit.add32(Imm32(imm1), result.gpr());
- } else
- speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchAdd32(MacroAssembler::Overflow, op2.gpr(), Imm32(imm1), result.gpr()));
-
- integerResult(result.gpr(), m_compileIndex);
- break;
- }
-
- if (isInt32Constant(node.child2())) {
- SpeculateIntegerOperand op1(this, node.child1());
- int32_t imm2 = valueOfInt32Constant(node.child2());
- GPRTemporary result(this);
-
- if (nodeCanTruncateInteger(node.arithNodeFlags())) {
- m_jit.move(op1.gpr(), result.gpr());
- m_jit.add32(Imm32(imm2), result.gpr());
- } else
- speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchAdd32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
-
- integerResult(result.gpr(), m_compileIndex);
- break;
- }
-
- SpeculateIntegerOperand op1(this, node.child1());
- SpeculateIntegerOperand op2(this, node.child2());
- GPRTemporary result(this, op1, op2);
-
- GPRReg gpr1 = op1.gpr();
- GPRReg gpr2 = op2.gpr();
- GPRReg gprResult = result.gpr();
-
- if (nodeCanTruncateInteger(node.arithNodeFlags())) {
- if (gpr1 == gprResult)
- m_jit.add32(gpr2, gprResult);
- else {
- m_jit.move(gpr2, gprResult);
- m_jit.add32(gpr1, gprResult);
- }
- } else {
- MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
-
- if (gpr1 == gprResult)
- speculationCheck(Overflow, JSValueRegs(), NoNode, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
- else if (gpr2 == gprResult)
- speculationCheck(Overflow, JSValueRegs(), NoNode, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
- else
- speculationCheck(Overflow, JSValueRegs(), NoNode, check);
- }
-
- integerResult(gprResult, m_compileIndex);
- break;
- }
-
- if (Node::shouldSpeculateNumber(at(node.child1()), at(node.child2()))) {
- SpeculateDoubleOperand op1(this, node.child1());
- SpeculateDoubleOperand op2(this, node.child2());
- FPRTemporary result(this, op1, op2);
-
- FPRReg reg1 = op1.fpr();
- FPRReg reg2 = op2.fpr();
- m_jit.addDouble(reg1, reg2, result.fpr());
-
- doubleResult(result.fpr(), m_compileIndex);
- break;
- }
-
- ASSERT(op == ValueAdd);
- compileValueAdd(node);
+ case ArithAdd:
+ compileAdd(node);
break;
- }
-
- case ArithSub: {
- if (Node::shouldSpeculateInteger(at(node.child1()), at(node.child2())) && node.canSpeculateInteger()) {
- if (isInt32Constant(node.child2())) {
- SpeculateIntegerOperand op1(this, node.child1());
- int32_t imm2 = valueOfInt32Constant(node.child2());
- GPRTemporary result(this);
-
- if (nodeCanTruncateInteger(node.arithNodeFlags())) {
- m_jit.move(op1.gpr(), result.gpr());
- m_jit.sub32(Imm32(imm2), result.gpr());
- } else
- speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
-
- integerResult(result.gpr(), m_compileIndex);
- break;
- }
-
- SpeculateIntegerOperand op1(this, node.child1());
- SpeculateIntegerOperand op2(this, node.child2());
- GPRTemporary result(this);
- if (nodeCanTruncateInteger(node.arithNodeFlags())) {
- m_jit.move(op1.gpr(), result.gpr());
- m_jit.sub32(op2.gpr(), result.gpr());
- } else
- speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), op2.gpr(), result.gpr()));
-
- integerResult(result.gpr(), m_compileIndex);
- break;
- }
-
- SpeculateDoubleOperand op1(this, node.child1());
- SpeculateDoubleOperand op2(this, node.child2());
- FPRTemporary result(this, op1);
-
- FPRReg reg1 = op1.fpr();
- FPRReg reg2 = op2.fpr();
- m_jit.subDouble(reg1, reg2, result.fpr());
-
- doubleResult(result.fpr(), m_compileIndex);
+ case ArithSub:
+ compileArithSub(node);
break;
- }
- case ArithMul: {
+ case ArithMul:
compileArithMul(node);
break;
- }
case ArithDiv: {
if (Node::shouldSpeculateInteger(at(node.child1()), at(node.child2())) && node.canSpeculateInteger()) {
@@ -2292,7 +2184,14 @@ void SpeculativeJIT::compile(Node& node)
return;
break;
}
-
+
+ if (at(node.child1()).shouldSpeculateUint8ClampedArray()) {
+ compileGetByValOnIntTypedArray(m_jit.globalData()->uint8ClampedArrayDescriptor(), node, sizeof(uint8_t), isUint8ClampedArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
if (at(node.child1()).shouldSpeculateUint16Array()) {
compileGetByValOnIntTypedArray(m_jit.globalData()->uint16ArrayDescriptor(), node, sizeof(uint16_t), isUint16ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray);
if (!m_compileOkay)
@@ -2417,6 +2316,13 @@ void SpeculativeJIT::compile(Node& node)
break;
}
+ if (at(node.child1()).shouldSpeculateUint8ClampedArray()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->uint8ClampedArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint8_t), isUint8ClampedArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray, ClampRounding);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
if (at(node.child1()).shouldSpeculateUint16Array()) {
compilePutByValForIntTypedArray(m_jit.globalData()->uint16ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint16_t), isUint16ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray);
if (!m_compileOkay)
@@ -2552,7 +2458,14 @@ void SpeculativeJIT::compile(Node& node)
return;
break;
}
-
+
+ if (at(node.child1()).shouldSpeculateUint8ClampedArray()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->uint8ClampedArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint8_t), NoTypedArraySpecCheck, UnsignedTypedArray, ClampRounding);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
if (at(node.child1()).shouldSpeculateUint16Array()) {
compilePutByValForIntTypedArray(m_jit.globalData()->uint16ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint16_t), NoTypedArraySpecCheck, UnsignedTypedArray);
if (!m_compileOkay)
@@ -3126,7 +3039,7 @@ void SpeculativeJIT::compile(Node& node)
base.use();
- cachedGetById(InvalidGPRReg, baseGPR, resultTagGPR, resultPayloadGPR, scratchGPR, node.identifierNumber());
+ cachedGetById(node.codeOrigin, InvalidGPRReg, baseGPR, resultTagGPR, resultPayloadGPR, scratchGPR, node.identifierNumber());
jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly);
break;
@@ -3151,7 +3064,58 @@ void SpeculativeJIT::compile(Node& node)
JITCompiler::Jump notCell = m_jit.branch32(JITCompiler::NotEqual, baseTagGPR, TrustedImm32(JSValue::CellTag));
- cachedGetById(baseTagGPR, basePayloadGPR, resultTagGPR, resultPayloadGPR, scratchGPR, node.identifierNumber(), notCell);
+ cachedGetById(node.codeOrigin, baseTagGPR, basePayloadGPR, resultTagGPR, resultPayloadGPR, scratchGPR, node.identifierNumber(), notCell);
+
+ jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly);
+ break;
+ }
+
+ case GetByIdFlush: {
+ if (!node.prediction()) {
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ break;
+ }
+
+ if (isCellPrediction(at(node.child1()).prediction())) {
+ SpeculateCellOperand base(this, node.child1());
+
+ GPRReg baseGPR = base.gpr();
+
+ GPRResult resultTag(this);
+ GPRResult2 resultPayload(this);
+ GPRReg resultTagGPR = resultTag.gpr();
+ GPRReg resultPayloadGPR = resultPayload.gpr();
+
+ GPRReg scratchGPR = selectScratchGPR(baseGPR, resultTagGPR, resultPayloadGPR);
+
+ base.use();
+
+ flushRegisters();
+
+ cachedGetById(node.codeOrigin, InvalidGPRReg, baseGPR, resultTagGPR, resultPayloadGPR, scratchGPR, node.identifierNumber(), JITCompiler::Jump(), DontSpill);
+
+ jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly);
+ break;
+ }
+
+ JSValueOperand base(this, node.child1());
+ GPRReg baseTagGPR = base.tagGPR();
+ GPRReg basePayloadGPR = base.payloadGPR();
+
+ GPRResult resultTag(this);
+ GPRResult2 resultPayload(this);
+ GPRReg resultTagGPR = resultTag.gpr();
+ GPRReg resultPayloadGPR = resultPayload.gpr();
+
+ GPRReg scratchGPR = selectScratchGPR(baseTagGPR, basePayloadGPR, resultTagGPR, resultPayloadGPR);
+
+ base.use();
+
+ flushRegisters();
+
+ JITCompiler::Jump notCell = m_jit.branch32(JITCompiler::NotEqual, baseTagGPR, TrustedImm32(JSValue::CellTag));
+
+ cachedGetById(node.codeOrigin, baseTagGPR, basePayloadGPR, resultTagGPR, resultPayloadGPR, scratchGPR, node.identifierNumber(), notCell, DontSpill);
jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly);
break;
@@ -3224,6 +3188,10 @@ void SpeculativeJIT::compile(Node& node)
compileGetTypedArrayLength(m_jit.globalData()->uint8ArrayDescriptor(), node, !isUint8ArrayPrediction(m_state.forNode(node.child1()).m_type));
break;
}
+ case GetUint8ClampedArrayLength: {
+ compileGetTypedArrayLength(m_jit.globalData()->uint8ClampedArrayDescriptor(), node, !isUint8ClampedArrayPrediction(m_state.forNode(node.child1()).m_type));
+ break;
+ }
case GetUint16ArrayLength: {
compileGetTypedArrayLength(m_jit.globalData()->uint16ArrayDescriptor(), node, !isUint16ArrayPrediction(m_state.forNode(node.child1()).m_type));
break;
@@ -3372,7 +3340,7 @@ void SpeculativeJIT::compile(Node& node)
base.use();
value.use();
- cachedPutById(baseGPR, valueTagGPR, valuePayloadGPR, node.child2(), scratchGPR, node.identifierNumber(), NotDirect);
+ cachedPutById(node.codeOrigin, baseGPR, valueTagGPR, valuePayloadGPR, node.child2(), scratchGPR, node.identifierNumber(), NotDirect);
noResult(m_compileIndex, UseChildrenCalledExplicitly);
break;
@@ -3391,7 +3359,7 @@ void SpeculativeJIT::compile(Node& node)
base.use();
value.use();
- cachedPutById(baseGPR, valueTagGPR, valuePayloadGPR, node.child2(), scratchGPR, node.identifierNumber(), Direct);
+ cachedPutById(node.codeOrigin, baseGPR, valueTagGPR, valuePayloadGPR, node.child2(), scratchGPR, node.identifierNumber(), Direct);
noResult(m_compileIndex, UseChildrenCalledExplicitly);
break;
diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp
index 7e36165f3..139dedded 100644
--- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp
+++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp
@@ -477,7 +477,7 @@ void SpeculativeJIT::nonSpeculativeUInt32ToNumber(Node& node)
jsValueResult(result.gpr(), m_compileIndex);
}
-JITCompiler::Call SpeculativeJIT::cachedGetById(GPRReg baseGPR, GPRReg resultGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget)
+JITCompiler::Call SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg resultGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode)
{
JITCompiler::DataLabelPtr structureToCompare;
JITCompiler::Jump structureCheck = m_jit.branchPtrWithPatch(JITCompiler::NotEqual, JITCompiler::Address(baseGPR, JSCell::structureOffset()), structureToCompare, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(-1)));
@@ -494,23 +494,25 @@ JITCompiler::Call SpeculativeJIT::cachedGetById(GPRReg baseGPR, GPRReg resultGPR
JITCompiler::Label slowCase = m_jit.label();
- silentSpillAllRegisters(resultGPR);
+ if (spillMode == NeedToSpill)
+ silentSpillAllRegisters(resultGPR);
JITCompiler::Call functionCall = callOperation(operationGetByIdOptimize, resultGPR, baseGPR, identifier(identifierNumber));
- silentFillAllRegisters(resultGPR);
+ if (spillMode == NeedToSpill)
+ silentFillAllRegisters(resultGPR);
done.link(&m_jit);
JITCompiler::Label doneLabel = m_jit.label();
- m_jit.addPropertyAccess(PropertyAccessRecord(structureToCompare, functionCall, structureCheck, loadWithPatch, slowCase, doneLabel, safeCast<int8_t>(baseGPR), safeCast<int8_t>(resultGPR), safeCast<int8_t>(scratchGPR)));
+ m_jit.addPropertyAccess(PropertyAccessRecord(codeOrigin, structureToCompare, functionCall, structureCheck, loadWithPatch, slowCase, doneLabel, safeCast<int8_t>(baseGPR), safeCast<int8_t>(resultGPR), safeCast<int8_t>(scratchGPR), spillMode == NeedToSpill ? PropertyAccessRecord::RegistersInUse : PropertyAccessRecord::RegistersFlushed));
- if (scratchGPR != resultGPR && scratchGPR != InvalidGPRReg)
+ if (scratchGPR != resultGPR && scratchGPR != InvalidGPRReg && spillMode == NeedToSpill)
unlock(scratchGPR);
return functionCall;
}
-void SpeculativeJIT::cachedPutById(GPRReg baseGPR, GPRReg valueGPR, NodeIndex valueIndex, GPRReg scratchGPR, unsigned identifierNumber, PutKind putKind, JITCompiler::Jump slowPathTarget)
+void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg valueGPR, NodeIndex valueIndex, GPRReg scratchGPR, unsigned identifierNumber, PutKind putKind, JITCompiler::Jump slowPathTarget)
{
JITCompiler::DataLabelPtr structureToCompare;
@@ -549,7 +551,7 @@ void SpeculativeJIT::cachedPutById(GPRReg baseGPR, GPRReg valueGPR, NodeIndex va
done.link(&m_jit);
JITCompiler::Label doneLabel = m_jit.label();
- m_jit.addPropertyAccess(PropertyAccessRecord(structureToCompare, functionCall, structureCheck, JITCompiler::DataLabelCompact(storeWithPatch.label()), slowCase, doneLabel, safeCast<int8_t>(baseGPR), safeCast<int8_t>(valueGPR), safeCast<int8_t>(scratchGPR)));
+ m_jit.addPropertyAccess(PropertyAccessRecord(codeOrigin, structureToCompare, functionCall, structureCheck, JITCompiler::DataLabelCompact(storeWithPatch.label()), slowCase, doneLabel, safeCast<int8_t>(baseGPR), safeCast<int8_t>(valueGPR), safeCast<int8_t>(scratchGPR)));
}
void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNull(NodeIndex operand, bool invert)
@@ -951,17 +953,23 @@ void SpeculativeJIT::emitCall(Node& node)
m_jit.addPtr(Imm32(m_jit.codeBlock()->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister);
+ CodeOrigin codeOrigin = at(m_compileIndex).codeOrigin;
+ CallBeginToken token = m_jit.nextCallBeginToken(codeOrigin);
JITCompiler::Call fastCall = m_jit.nearCall();
- m_jit.notifyCall(fastCall, at(m_compileIndex).codeOrigin);
+ m_jit.notifyCall(fastCall, codeOrigin, token);
JITCompiler::Jump done = m_jit.jump();
slowPath.link(&m_jit);
m_jit.addPtr(Imm32(m_jit.codeBlock()->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
- JITCompiler::Call slowCall = m_jit.addFastExceptionCheck(m_jit.appendCall(slowCallFunction), at(m_compileIndex).codeOrigin);
+ token = m_jit.beginCall(codeOrigin);
+ JITCompiler::Call slowCall = m_jit.appendCall(slowCallFunction);
+ m_jit.addFastExceptionCheck(slowCall, codeOrigin, token);
m_jit.addPtr(Imm32(m_jit.codeBlock()->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister);
- m_jit.notifyCall(m_jit.call(GPRInfo::returnValueGPR), at(m_compileIndex).codeOrigin);
+ token = m_jit.nextCallBeginToken(codeOrigin);
+ JITCompiler::Call theCall = m_jit.call(GPRInfo::returnValueGPR);
+ m_jit.notifyCall(theCall, codeOrigin, token);
done.link(&m_jit);
@@ -1957,133 +1965,17 @@ void SpeculativeJIT::compile(Node& node)
}
case ValueAdd:
- case ArithAdd: {
- if (Node::shouldSpeculateInteger(at(node.child1()), at(node.child2())) && node.canSpeculateInteger()) {
- if (isInt32Constant(node.child1())) {
- int32_t imm1 = valueOfInt32Constant(node.child1());
- SpeculateIntegerOperand op2(this, node.child2());
- GPRTemporary result(this);
-
- if (nodeCanTruncateInteger(node.arithNodeFlags())) {
- m_jit.move(op2.gpr(), result.gpr());
- m_jit.add32(Imm32(imm1), result.gpr());
- } else
- speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchAdd32(MacroAssembler::Overflow, op2.gpr(), Imm32(imm1), result.gpr()));
-
- integerResult(result.gpr(), m_compileIndex);
- break;
- }
-
- if (isInt32Constant(node.child2())) {
- SpeculateIntegerOperand op1(this, node.child1());
- int32_t imm2 = valueOfInt32Constant(node.child2());
- GPRTemporary result(this);
-
- if (nodeCanTruncateInteger(node.arithNodeFlags())) {
- m_jit.move(op1.gpr(), result.gpr());
- m_jit.add32(Imm32(imm2), result.gpr());
- } else
- speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchAdd32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
-
- integerResult(result.gpr(), m_compileIndex);
- break;
- }
-
- SpeculateIntegerOperand op1(this, node.child1());
- SpeculateIntegerOperand op2(this, node.child2());
- GPRTemporary result(this, op1, op2);
-
- GPRReg gpr1 = op1.gpr();
- GPRReg gpr2 = op2.gpr();
- GPRReg gprResult = result.gpr();
-
- if (nodeCanTruncateInteger(node.arithNodeFlags())) {
- if (gpr1 == gprResult)
- m_jit.add32(gpr2, gprResult);
- else {
- m_jit.move(gpr2, gprResult);
- m_jit.add32(gpr1, gprResult);
- }
- } else {
- MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
-
- if (gpr1 == gprResult)
- speculationCheck(Overflow, JSValueRegs(), NoNode, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
- else if (gpr2 == gprResult)
- speculationCheck(Overflow, JSValueRegs(), NoNode, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
- else
- speculationCheck(Overflow, JSValueRegs(), NoNode, check);
- }
-
- integerResult(gprResult, m_compileIndex);
- break;
- }
-
- if (Node::shouldSpeculateNumber(at(node.child1()), at(node.child2()))) {
- SpeculateDoubleOperand op1(this, node.child1());
- SpeculateDoubleOperand op2(this, node.child2());
- FPRTemporary result(this, op1, op2);
-
- FPRReg reg1 = op1.fpr();
- FPRReg reg2 = op2.fpr();
- m_jit.addDouble(reg1, reg2, result.fpr());
-
- doubleResult(result.fpr(), m_compileIndex);
- break;
- }
-
- ASSERT(op == ValueAdd);
- compileValueAdd(node);
+ case ArithAdd:
+ compileAdd(node);
break;
- }
-
- case ArithSub: {
- if (Node::shouldSpeculateInteger(at(node.child1()), at(node.child2())) && node.canSpeculateInteger()) {
- if (isInt32Constant(node.child2())) {
- SpeculateIntegerOperand op1(this, node.child1());
- int32_t imm2 = valueOfInt32Constant(node.child2());
- GPRTemporary result(this);
-
- if (nodeCanTruncateInteger(node.arithNodeFlags())) {
- m_jit.move(op1.gpr(), result.gpr());
- m_jit.sub32(Imm32(imm2), result.gpr());
- } else
- speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
-
- integerResult(result.gpr(), m_compileIndex);
- break;
- }
-
- SpeculateIntegerOperand op1(this, node.child1());
- SpeculateIntegerOperand op2(this, node.child2());
- GPRTemporary result(this);
-
- if (nodeCanTruncateInteger(node.arithNodeFlags())) {
- m_jit.move(op1.gpr(), result.gpr());
- m_jit.sub32(op2.gpr(), result.gpr());
- } else
- speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), op2.gpr(), result.gpr()));
- integerResult(result.gpr(), m_compileIndex);
- break;
- }
-
- SpeculateDoubleOperand op1(this, node.child1());
- SpeculateDoubleOperand op2(this, node.child2());
- FPRTemporary result(this, op1);
-
- FPRReg reg1 = op1.fpr();
- FPRReg reg2 = op2.fpr();
- m_jit.subDouble(reg1, reg2, result.fpr());
-
- doubleResult(result.fpr(), m_compileIndex);
+ case ArithSub:
+ compileArithSub(node);
break;
- }
- case ArithMul: {
+ case ArithMul:
compileArithMul(node);
break;
- }
case ArithDiv: {
if (Node::shouldSpeculateInteger(at(node.child1()), at(node.child2())) && node.canSpeculateInteger()) {
@@ -2346,7 +2238,14 @@ void SpeculativeJIT::compile(Node& node)
return;
break;
}
-
+
+ if (at(node.child1()).shouldSpeculateUint8ClampedArray()) {
+ compileGetByValOnIntTypedArray(m_jit.globalData()->uint8ClampedArrayDescriptor(), node, sizeof(uint8_t), isUint8ClampedArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
if (at(node.child1()).shouldSpeculateUint16Array()) {
compileGetByValOnIntTypedArray(m_jit.globalData()->uint16ArrayDescriptor(), node, sizeof(uint16_t), isUint16ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray);
if (!m_compileOkay)
@@ -2458,7 +2357,12 @@ void SpeculativeJIT::compile(Node& node)
return;
break;
}
-
+
+ if (at(node.child1()).shouldSpeculateUint8ClampedArray()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->uint8ClampedArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint8_t), isUint8ClampedArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray, ClampRounding);
+ break;
+ }
+
if (at(node.child1()).shouldSpeculateUint16Array()) {
compilePutByValForIntTypedArray(m_jit.globalData()->uint16ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint16_t), isUint16ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray);
if (!m_compileOkay)
@@ -2591,7 +2495,14 @@ void SpeculativeJIT::compile(Node& node)
return;
break;
}
-
+
+ if (at(node.child1()).shouldSpeculateUint8ClampedArray()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->uint8ClampedArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint8_t), NoTypedArraySpecCheck, UnsignedTypedArray, ClampRounding);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
if (at(node.child1()).shouldSpeculateUint16Array()) {
compilePutByValForIntTypedArray(m_jit.globalData()->uint16ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint16_t), NoTypedArraySpecCheck, UnsignedTypedArray);
if (!m_compileOkay)
@@ -3123,7 +3034,7 @@ void SpeculativeJIT::compile(Node& node)
base.use();
- cachedGetById(baseGPR, resultGPR, scratchGPR, node.identifierNumber());
+ cachedGetById(node.codeOrigin, baseGPR, resultGPR, scratchGPR, node.identifierNumber());
jsValueResult(resultGPR, m_compileIndex, UseChildrenCalledExplicitly);
break;
@@ -3145,7 +3056,53 @@ void SpeculativeJIT::compile(Node& node)
JITCompiler::Jump notCell = m_jit.branchTestPtr(JITCompiler::NonZero, baseGPR, GPRInfo::tagMaskRegister);
- cachedGetById(baseGPR, resultGPR, scratchGPR, node.identifierNumber(), notCell);
+ cachedGetById(node.codeOrigin, baseGPR, resultGPR, scratchGPR, node.identifierNumber(), notCell);
+
+ jsValueResult(resultGPR, m_compileIndex, UseChildrenCalledExplicitly);
+
+ break;
+ }
+
+ case GetByIdFlush: {
+ if (!node.prediction()) {
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ break;
+ }
+
+ if (isCellPrediction(at(node.child1()).prediction())) {
+ SpeculateCellOperand base(this, node.child1());
+ GPRReg baseGPR = base.gpr();
+
+ GPRResult result(this);
+
+ GPRReg resultGPR = result.gpr();
+
+ GPRReg scratchGPR = selectScratchGPR(baseGPR, resultGPR);
+
+ base.use();
+
+ flushRegisters();
+
+ cachedGetById(node.codeOrigin, baseGPR, resultGPR, scratchGPR, node.identifierNumber(), JITCompiler::Jump(), DontSpill);
+
+ jsValueResult(resultGPR, m_compileIndex, UseChildrenCalledExplicitly);
+ break;
+ }
+
+ JSValueOperand base(this, node.child1());
+ GPRReg baseGPR = base.gpr();
+
+ GPRResult result(this);
+ GPRReg resultGPR = result.gpr();
+
+ GPRReg scratchGPR = selectScratchGPR(baseGPR, resultGPR);
+
+ base.use();
+ flushRegisters();
+
+ JITCompiler::Jump notCell = m_jit.branchTestPtr(JITCompiler::NonZero, baseGPR, GPRInfo::tagMaskRegister);
+
+ cachedGetById(node.codeOrigin, baseGPR, resultGPR, scratchGPR, node.identifierNumber(), notCell, DontSpill);
jsValueResult(resultGPR, m_compileIndex, UseChildrenCalledExplicitly);
@@ -3219,6 +3176,10 @@ void SpeculativeJIT::compile(Node& node)
compileGetTypedArrayLength(m_jit.globalData()->uint8ArrayDescriptor(), node, !isUint8ArrayPrediction(m_state.forNode(node.child1()).m_type));
break;
}
+ case GetUint8ClampedArrayLength: {
+ compileGetTypedArrayLength(m_jit.globalData()->uint8ClampedArrayDescriptor(), node, !isUint8ClampedArrayPrediction(m_state.forNode(node.child1()).m_type));
+ break;
+ }
case GetUint16ArrayLength: {
compileGetTypedArrayLength(m_jit.globalData()->uint16ArrayDescriptor(), node, !isUint16ArrayPrediction(m_state.forNode(node.child1()).m_type));
break;
@@ -3359,7 +3320,7 @@ void SpeculativeJIT::compile(Node& node)
base.use();
value.use();
- cachedPutById(baseGPR, valueGPR, node.child2(), scratchGPR, node.identifierNumber(), NotDirect);
+ cachedPutById(node.codeOrigin, baseGPR, valueGPR, node.child2(), scratchGPR, node.identifierNumber(), NotDirect);
noResult(m_compileIndex, UseChildrenCalledExplicitly);
break;
@@ -3377,7 +3338,7 @@ void SpeculativeJIT::compile(Node& node)
base.use();
value.use();
- cachedPutById(baseGPR, valueGPR, node.child2(), scratchGPR, node.identifierNumber(), Direct);
+ cachedPutById(node.codeOrigin, baseGPR, valueGPR, node.child2(), scratchGPR, node.identifierNumber(), Direct);
noResult(m_compileIndex, UseChildrenCalledExplicitly);
break;
diff --git a/Source/JavaScriptCore/dfg/DFGStructureSet.h b/Source/JavaScriptCore/dfg/DFGStructureSet.h
deleted file mode 100644
index 181c32910..000000000
--- a/Source/JavaScriptCore/dfg/DFGStructureSet.h
+++ /dev/null
@@ -1,166 +0,0 @@
-/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef DFGStructureSet_h
-#define DFGStructureSet_h
-
-#include <wtf/Platform.h>
-
-#if ENABLE(DFG_JIT)
-
-#include "PredictedType.h"
-#include <stdio.h>
-#include <wtf/Vector.h>
-
-namespace JSC {
-
-class Structure;
-
-namespace DFG {
-
-class StructureAbstractValue;
-
-class StructureSet {
-public:
- StructureSet() { }
-
- StructureSet(Structure* structure)
- {
- m_structures.append(structure);
- }
-
- void clear()
- {
- m_structures.clear();
- }
-
- void add(Structure* structure)
- {
- ASSERT(!contains(structure));
- m_structures.append(structure);
- }
-
- bool addAll(const StructureSet& other)
- {
- bool changed = false;
- for (size_t i = 0; i < other.size(); ++i) {
- if (contains(other[i]))
- continue;
- add(other[i]);
- changed = true;
- }
- return changed;
- }
-
- void remove(Structure* structure)
- {
- for (size_t i = 0; i < m_structures.size(); ++i) {
- if (m_structures[i] != structure)
- continue;
-
- m_structures[i] = m_structures.last();
- m_structures.removeLast();
- return;
- }
- }
-
- bool contains(Structure* structure) const
- {
- for (size_t i = 0; i < m_structures.size(); ++i) {
- if (m_structures[i] == structure)
- return true;
- }
- return false;
- }
-
- bool isSubsetOf(const StructureSet& other) const
- {
- for (size_t i = 0; i < m_structures.size(); ++i) {
- if (!other.contains(m_structures[i]))
- return false;
- }
- return true;
- }
-
- bool isSupersetOf(const StructureSet& other) const
- {
- return other.isSubsetOf(*this);
- }
-
- size_t size() const { return m_structures.size(); }
-
- Structure* at(size_t i) const { return m_structures.at(i); }
-
- Structure* operator[](size_t i) const { return at(i); }
-
- Structure* last() const { return m_structures.last(); }
-
- PredictedType predictionFromStructures() const
- {
- PredictedType result = PredictNone;
-
- for (size_t i = 0; i < m_structures.size(); ++i)
- mergePrediction(result, predictionFromStructure(m_structures[i]));
-
- return result;
- }
-
- bool operator==(const StructureSet& other) const
- {
- if (m_structures.size() != other.m_structures.size())
- return false;
-
- for (size_t i = 0; i < m_structures.size(); ++i) {
- if (!other.contains(m_structures[i]))
- return false;
- }
-
- return true;
- }
-
-#ifndef NDEBUG
- void dump(FILE* out)
- {
- fprintf(out, "[");
- for (size_t i = 0; i < m_structures.size(); ++i) {
- if (i)
- fprintf(out, ", ");
- fprintf(out, "%p", m_structures[i]);
- }
- fprintf(out, "]");
- }
-#endif
-
-private:
- friend class StructureAbstractValue;
-
- Vector<Structure*, 2> m_structures;
-};
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
-#endif // DFGStructureSet_h
diff --git a/Source/JavaScriptCore/dfg/DFGThunks.cpp b/Source/JavaScriptCore/dfg/DFGThunks.cpp
index fddb656cc..d7c3fab23 100644
--- a/Source/JavaScriptCore/dfg/DFGThunks.cpp
+++ b/Source/JavaScriptCore/dfg/DFGThunks.cpp
@@ -66,7 +66,7 @@ MacroAssemblerCodeRef osrExitGenerationThunkGenerator(JSGlobalData* globalData)
jit.jump(MacroAssembler::AbsoluteAddress(&globalData->osrExitJumpDestination));
- LinkBuffer patchBuffer(*globalData, &jit);
+ LinkBuffer patchBuffer(*globalData, &jit, GLOBAL_THUNK_ID);
patchBuffer.link(functionCall, compileOSRExit);