summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/dfg
diff options
context:
space:
mode:
Diffstat (limited to 'Source/JavaScriptCore/dfg')
-rw-r--r--Source/JavaScriptCore/dfg/DFGAbstractState.cpp766
-rw-r--r--Source/JavaScriptCore/dfg/DFGAbstractState.h87
-rw-r--r--Source/JavaScriptCore/dfg/DFGAbstractValue.h112
-rw-r--r--Source/JavaScriptCore/dfg/DFGAdjacencyList.h31
-rw-r--r--Source/JavaScriptCore/dfg/DFGArgumentsSimplificationPhase.cpp750
-rw-r--r--Source/JavaScriptCore/dfg/DFGArgumentsSimplificationPhase.h49
-rw-r--r--Source/JavaScriptCore/dfg/DFGAssemblyHelpers.cpp8
-rw-r--r--Source/JavaScriptCore/dfg/DFGAssemblyHelpers.h43
-rw-r--r--Source/JavaScriptCore/dfg/DFGBasicBlock.h33
-rw-r--r--Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp329
-rw-r--r--Source/JavaScriptCore/dfg/DFGByteCodeParser.h2
-rw-r--r--Source/JavaScriptCore/dfg/DFGCCallHelpers.h43
-rw-r--r--Source/JavaScriptCore/dfg/DFGCFAPhase.cpp10
-rw-r--r--Source/JavaScriptCore/dfg/DFGCFAPhase.h2
-rw-r--r--Source/JavaScriptCore/dfg/DFGCFGSimplificationPhase.cpp730
-rw-r--r--Source/JavaScriptCore/dfg/DFGCFGSimplificationPhase.h52
-rw-r--r--Source/JavaScriptCore/dfg/DFGCSEPhase.cpp217
-rw-r--r--Source/JavaScriptCore/dfg/DFGCSEPhase.h4
-rw-r--r--Source/JavaScriptCore/dfg/DFGCapabilities.cpp55
-rw-r--r--Source/JavaScriptCore/dfg/DFGCapabilities.h64
-rw-r--r--Source/JavaScriptCore/dfg/DFGCommon.h23
-rw-r--r--Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.cpp120
-rw-r--r--Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.h48
-rw-r--r--Source/JavaScriptCore/dfg/DFGDominators.cpp109
-rw-r--r--Source/JavaScriptCore/dfg/DFGDominators.h77
-rw-r--r--Source/JavaScriptCore/dfg/DFGDriver.cpp48
-rw-r--r--Source/JavaScriptCore/dfg/DFGDriver.h9
-rw-r--r--Source/JavaScriptCore/dfg/DFGEdge.h4
-rw-r--r--Source/JavaScriptCore/dfg/DFGFixupPhase.cpp30
-rw-r--r--Source/JavaScriptCore/dfg/DFGFixupPhase.h2
-rw-r--r--Source/JavaScriptCore/dfg/DFGGPRInfo.h6
-rw-r--r--Source/JavaScriptCore/dfg/DFGGraph.cpp151
-rw-r--r--Source/JavaScriptCore/dfg/DFGGraph.h210
-rw-r--r--Source/JavaScriptCore/dfg/DFGInsertionSet.h2
-rw-r--r--Source/JavaScriptCore/dfg/DFGJITCompiler.cpp34
-rw-r--r--Source/JavaScriptCore/dfg/DFGJITCompiler.h78
-rw-r--r--Source/JavaScriptCore/dfg/DFGNode.h89
-rw-r--r--Source/JavaScriptCore/dfg/DFGNodeFlags.cpp7
-rw-r--r--Source/JavaScriptCore/dfg/DFGNodeFlags.h2
-rw-r--r--Source/JavaScriptCore/dfg/DFGNodeType.h24
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSREntry.cpp8
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRExit.cpp11
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRExitCompiler.cpp4
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRExitCompiler.h5
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp125
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp87
-rw-r--r--Source/JavaScriptCore/dfg/DFGOperations.cpp103
-rw-r--r--Source/JavaScriptCore/dfg/DFGOperations.h20
-rw-r--r--Source/JavaScriptCore/dfg/DFGPhase.cpp3
-rw-r--r--Source/JavaScriptCore/dfg/DFGPhase.h11
-rw-r--r--Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp79
-rw-r--r--Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.h2
-rw-r--r--Source/JavaScriptCore/dfg/DFGRedundantPhiEliminationPhase.cpp9
-rw-r--r--Source/JavaScriptCore/dfg/DFGRedundantPhiEliminationPhase.h2
-rw-r--r--Source/JavaScriptCore/dfg/DFGScoreBoard.h13
-rw-r--r--Source/JavaScriptCore/dfg/DFGSilentRegisterSavePlan.h123
-rw-r--r--Source/JavaScriptCore/dfg/DFGSlowPathGenerator.h496
-rw-r--r--Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp260
-rw-r--r--Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h686
-rw-r--r--Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp723
-rw-r--r--Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp757
-rw-r--r--Source/JavaScriptCore/dfg/DFGThunks.cpp15
-rw-r--r--Source/JavaScriptCore/dfg/DFGValidate.cpp362
-rw-r--r--Source/JavaScriptCore/dfg/DFGValidate.h51
-rw-r--r--Source/JavaScriptCore/dfg/DFGVariableAccessData.h37
-rw-r--r--Source/JavaScriptCore/dfg/DFGVirtualRegisterAllocationPhase.cpp43
-rw-r--r--Source/JavaScriptCore/dfg/DFGVirtualRegisterAllocationPhase.h2
67 files changed, 7280 insertions, 1217 deletions
diff --git a/Source/JavaScriptCore/dfg/DFGAbstractState.cpp b/Source/JavaScriptCore/dfg/DFGAbstractState.cpp
index 3eb5463a7..33c058e7d 100644
--- a/Source/JavaScriptCore/dfg/DFGAbstractState.cpp
+++ b/Source/JavaScriptCore/dfg/DFGAbstractState.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -91,6 +91,8 @@ void AbstractState::beginBasicBlock(BasicBlock* basicBlock)
basicBlock->cfaHasVisited = true;
m_block = basicBlock;
m_isValid = true;
+ m_foundConstants = false;
+ m_branchDirection = InvalidBranchDirection;
}
void AbstractState::initialize(Graph& graph)
@@ -98,6 +100,8 @@ void AbstractState::initialize(Graph& graph)
PROFILE(FLAG_FOR_BLOCK_INITIALIZATION);
BasicBlock* root = graph.m_blocks[0].get();
root->cfaShouldRevisit = true;
+ root->cfaHasVisited = false;
+ root->cfaFoundConstants = false;
for (size_t i = 0; i < root->valuesAtHead.numberOfArguments(); ++i) {
Node& node = graph[root->variablesAtHead.argument(i)];
ASSERT(node.op() == SetArgument);
@@ -108,7 +112,7 @@ void AbstractState::initialize(Graph& graph)
continue;
}
- if (graph.argumentIsCaptured(i)) {
+ if (node.variableAccessData()->isCaptured()) {
root->valuesAtHead.argument(i).makeTop();
continue;
}
@@ -140,21 +144,46 @@ void AbstractState::initialize(Graph& graph)
root->valuesAtHead.argument(i).set(PredictFloat64Array);
else
root->valuesAtHead.argument(i).makeTop();
+
+ root->valuesAtTail.argument(i).clear();
}
for (size_t i = 0; i < root->valuesAtHead.numberOfLocals(); ++i) {
- if (!graph.localIsCaptured(i))
+ NodeIndex nodeIndex = root->variablesAtHead.local(i);
+ if (nodeIndex != NoNode && graph[nodeIndex].variableAccessData()->isCaptured())
+ root->valuesAtHead.local(i).makeTop();
+ else
+ root->valuesAtHead.local(i).clear();
+ root->valuesAtTail.local(i).clear();
+ }
+ for (BlockIndex blockIndex = 1 ; blockIndex < graph.m_blocks.size(); ++blockIndex) {
+ BasicBlock* block = graph.m_blocks[blockIndex].get();
+ if (!block)
continue;
- root->valuesAtHead.local(i).makeTop();
+ if (!block->isReachable)
+ continue;
+ block->cfaShouldRevisit = false;
+ block->cfaHasVisited = false;
+ block->cfaFoundConstants = false;
+ for (size_t i = 0; i < block->valuesAtHead.numberOfArguments(); ++i) {
+ block->valuesAtHead.argument(i).clear();
+ block->valuesAtTail.argument(i).clear();
+ }
+ for (size_t i = 0; i < block->valuesAtHead.numberOfLocals(); ++i) {
+ block->valuesAtHead.local(i).clear();
+ block->valuesAtTail.local(i).clear();
+ }
}
}
-bool AbstractState::endBasicBlock(MergeMode mergeMode)
+bool AbstractState::endBasicBlock(MergeMode mergeMode, BranchDirection* branchDirectionPtr)
{
PROFILE(FLAG_FOR_BLOCK_END);
ASSERT(m_block);
BasicBlock* block = m_block; // Save the block for successor merging.
+ block->cfaFoundConstants = m_foundConstants;
+
if (!m_isValid) {
reset();
return false;
@@ -168,7 +197,8 @@ bool AbstractState::endBasicBlock(MergeMode mergeMode)
dataLog(" Merging state for argument %zu.\n", argument);
#endif
AbstractValue& destination = block->valuesAtTail.argument(argument);
- if (m_graph.argumentIsCaptured(argument)) {
+ NodeIndex nodeIndex = block->variablesAtTail.argument(argument);
+ if (nodeIndex != NoNode && m_graph[nodeIndex].variableAccessData()->isCaptured()) {
if (!destination.isTop()) {
destination.makeTop();
changed = true;
@@ -182,7 +212,8 @@ bool AbstractState::endBasicBlock(MergeMode mergeMode)
dataLog(" Merging state for local %zu.\n", local);
#endif
AbstractValue& destination = block->valuesAtTail.local(local);
- if (m_graph.localIsCaptured(local)) {
+ NodeIndex nodeIndex = block->variablesAtTail.local(local);
+ if (nodeIndex != NoNode && m_graph[nodeIndex].variableAccessData()->isCaptured()) {
if (!destination.isTop()) {
destination.makeTop();
changed = true;
@@ -194,18 +225,27 @@ bool AbstractState::endBasicBlock(MergeMode mergeMode)
ASSERT(mergeMode != DontMerge || !changed);
+ BranchDirection branchDirection = m_branchDirection;
+ if (branchDirectionPtr)
+ *branchDirectionPtr = branchDirection;
+
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ dataLog(" Branch direction = %s\n", branchDirectionToString(branchDirection));
+#endif
+
reset();
if (mergeMode != MergeToSuccessors)
return changed;
- return mergeToSuccessors(m_graph, block);
+ return mergeToSuccessors(m_graph, block, branchDirection);
}
void AbstractState::reset()
{
m_block = 0;
m_isValid = false;
+ m_branchDirection = InvalidBranchDirection;
}
bool AbstractState::execute(unsigned indexInBlock)
@@ -223,41 +263,55 @@ bool AbstractState::execute(unsigned indexInBlock)
switch (node.op()) {
case JSConstant:
case WeakJSConstant: {
- JSValue value = m_graph.valueOfJSConstant(nodeIndex);
- // Have to be careful here! It's tempting to call set(value), but
- // that would be wrong, since that would constitute a proof that this
- // value will always have the same structure. The whole point of a value
- // having a structure is that it may change in the future - for example
- // between when we compile the code and when we run it.
- forNode(nodeIndex).set(predictionFromValue(value));
+ forNode(nodeIndex).set(m_graph.valueOfJSConstant(nodeIndex));
+ node.setCanExit(false);
break;
}
case GetLocal: {
- if (m_graph.isCaptured(node.local()))
+ VariableAccessData* variableAccessData = node.variableAccessData();
+ bool canExit = false;
+ canExit |= variableAccessData->prediction() == PredictNone;
+ if (variableAccessData->isCaptured())
forNode(nodeIndex).makeTop();
- else
- forNode(nodeIndex) = m_variables.operand(node.local());
+ else {
+ AbstractValue value = m_variables.operand(variableAccessData->local());
+ if (value.isClear())
+ canExit |= true;
+ forNode(nodeIndex) = value;
+ }
+ node.setCanExit(canExit);
+ break;
+ }
+
+ case GetLocalUnlinked: {
+ forNode(nodeIndex).makeTop();
+ node.setCanExit(false);
break;
}
case SetLocal: {
- if (m_graph.isCaptured(node.local()))
+ if (node.variableAccessData()->isCaptured()) {
+ node.setCanExit(false);
break;
+ }
if (node.variableAccessData()->shouldUseDoubleFormat()) {
- forNode(node.child1()).filter(PredictNumber);
+ speculateNumberUnary(node);
m_variables.operand(node.local()).set(PredictDouble);
break;
}
PredictedType predictedType = node.variableAccessData()->argumentAwarePrediction();
if (isInt32Prediction(predictedType))
- forNode(node.child1()).filter(PredictInt32);
- else if (isArrayPrediction(predictedType))
+ speculateInt32Unary(node);
+ else if (isArrayPrediction(predictedType)) {
+ node.setCanExit(!isArrayPrediction(forNode(node.child1()).m_type));
forNode(node.child1()).filter(PredictArray);
- else if (isBooleanPrediction(predictedType))
- forNode(node.child1()).filter(PredictBoolean);
+ } else if (isBooleanPrediction(predictedType))
+ speculateBooleanUnary(node);
+ else
+ node.setCanExit(false);
m_variables.operand(node.local()) = forNode(node.child1());
break;
@@ -266,6 +320,7 @@ bool AbstractState::execute(unsigned indexInBlock)
case SetArgument:
// Assert that the state of arguments has been set.
ASSERT(!m_block->valuesAtHead.operand(node.local()).isClear());
+ node.setCanExit(false);
break;
case BitAnd:
@@ -273,39 +328,116 @@ bool AbstractState::execute(unsigned indexInBlock)
case BitXor:
case BitRShift:
case BitLShift:
- case BitURShift:
- forNode(node.child1()).filter(PredictInt32);
- forNode(node.child2()).filter(PredictInt32);
+ case BitURShift: {
+ JSValue left = forNode(node.child1()).value();
+ JSValue right = forNode(node.child2()).value();
+ if (left && right && left.isInt32() && right.isInt32()) {
+ int32_t a = left.asInt32();
+ int32_t b = right.asInt32();
+ switch (node.op()) {
+ case BitAnd:
+ forNode(nodeIndex).set(JSValue(a & b));
+ break;
+ case BitOr:
+ forNode(nodeIndex).set(JSValue(a | b));
+ break;
+ case BitXor:
+ forNode(nodeIndex).set(JSValue(a ^ b));
+ break;
+ case BitRShift:
+ forNode(nodeIndex).set(JSValue(a >> static_cast<uint32_t>(b)));
+ break;
+ case BitLShift:
+ forNode(nodeIndex).set(JSValue(a << static_cast<uint32_t>(b)));
+ break;
+ case BitURShift:
+ forNode(nodeIndex).set(JSValue(static_cast<uint32_t>(a) >> static_cast<uint32_t>(b)));
+ break;
+ default:
+ ASSERT_NOT_REACHED();
+ }
+ m_foundConstants = true;
+ node.setCanExit(false);
+ break;
+ }
+ speculateInt32Binary(node);
forNode(nodeIndex).set(PredictInt32);
break;
+ }
- case UInt32ToNumber:
- if (!node.canSpeculateInteger())
+ case UInt32ToNumber: {
+ JSValue child = forNode(node.child1()).value();
+ if (child && child.isNumber()) {
+ ASSERT(child.isInt32());
+ forNode(nodeIndex).set(JSValue(child.asUInt32()));
+ m_foundConstants = true;
+ node.setCanExit(false);
+ break;
+ }
+ if (!node.canSpeculateInteger()) {
forNode(nodeIndex).set(PredictDouble);
- else
+ node.setCanExit(false);
+ } else {
forNode(nodeIndex).set(PredictInt32);
+ node.setCanExit(true);
+ }
break;
+ }
+
- case DoubleAsInt32:
+ case DoubleAsInt32: {
+ JSValue child = forNode(node.child1()).value();
+ if (child && child.isNumber()) {
+ double asDouble = child.asNumber();
+ int32_t asInt = JSC::toInt32(asDouble);
+ if (bitwise_cast<int64_t>(static_cast<double>(asInt)) == bitwise_cast<int64_t>(asDouble)) {
+ forNode(nodeIndex).set(JSValue(asInt));
+ m_foundConstants = true;
+ break;
+ }
+ }
+ node.setCanExit(true);
forNode(node.child1()).filter(PredictNumber);
forNode(nodeIndex).set(PredictInt32);
break;
+ }
- case ValueToInt32:
+ case ValueToInt32: {
+ JSValue child = forNode(node.child1()).value();
+ if (child && child.isNumber()) {
+ if (child.isInt32())
+ forNode(nodeIndex).set(child);
+ else
+ forNode(nodeIndex).set(JSValue(JSC::toInt32(child.asDouble())));
+ m_foundConstants = true;
+ node.setCanExit(false);
+ break;
+ }
if (m_graph[node.child1()].shouldSpeculateInteger())
- forNode(node.child1()).filter(PredictInt32);
+ speculateInt32Unary(node);
else if (m_graph[node.child1()].shouldSpeculateNumber())
- forNode(node.child1()).filter(PredictNumber);
+ speculateNumberUnary(node);
else if (m_graph[node.child1()].shouldSpeculateBoolean())
- forNode(node.child1()).filter(PredictBoolean);
+ speculateBooleanUnary(node);
+ else
+ node.setCanExit(false);
forNode(nodeIndex).set(PredictInt32);
break;
+ }
- case Int32ToDouble:
- forNode(node.child1()).filter(PredictNumber);
+ case Int32ToDouble: {
+ JSValue child = forNode(node.child1()).value();
+ if (child && child.isNumber()) {
+ forNode(nodeIndex).set(JSValue(JSValue::EncodeAsDouble, child.asNumber()));
+ m_foundConstants = true;
+ node.setCanExit(false);
+ break;
+ }
+ speculateNumberUnary(node);
forNode(nodeIndex).set(PredictDouble);
break;
+ }
case CheckNumber:
forNode(node.child1()).filter(PredictNumber);
@@ -313,98 +445,196 @@ bool AbstractState::execute(unsigned indexInBlock)
case ValueAdd:
case ArithAdd: {
+ JSValue left = forNode(node.child1()).value();
+ JSValue right = forNode(node.child2()).value();
+ if (left && right && left.isNumber() && right.isNumber()) {
+ forNode(nodeIndex).set(JSValue(left.asNumber() + right.asNumber()));
+ m_foundConstants = true;
+ node.setCanExit(false);
+ break;
+ }
if (m_graph.addShouldSpeculateInteger(node)) {
- forNode(node.child1()).filter(PredictInt32);
- forNode(node.child2()).filter(PredictInt32);
+ speculateInt32Binary(
+ node, !nodeCanTruncateInteger(node.arithNodeFlags()));
forNode(nodeIndex).set(PredictInt32);
break;
}
if (Node::shouldSpeculateNumber(m_graph[node.child1()], m_graph[node.child2()])) {
- forNode(node.child1()).filter(PredictNumber);
- forNode(node.child2()).filter(PredictNumber);
+ speculateNumberBinary(node);
forNode(nodeIndex).set(PredictDouble);
break;
}
if (node.op() == ValueAdd) {
clobberStructures(indexInBlock);
forNode(nodeIndex).set(PredictString | PredictInt32 | PredictNumber);
+ node.setCanExit(false);
break;
}
// We don't handle this yet. :-(
m_isValid = false;
+ node.setCanExit(true);
break;
}
case ArithSub: {
+ JSValue left = forNode(node.child1()).value();
+ JSValue right = forNode(node.child2()).value();
+ if (left && right && left.isNumber() && right.isNumber()) {
+ forNode(nodeIndex).set(JSValue(left.asNumber() - right.asNumber()));
+ m_foundConstants = true;
+ node.setCanExit(false);
+ break;
+ }
if (m_graph.addShouldSpeculateInteger(node)) {
- forNode(node.child1()).filter(PredictInt32);
- forNode(node.child2()).filter(PredictInt32);
+ speculateInt32Binary(
+ node, !nodeCanTruncateInteger(node.arithNodeFlags()));
forNode(nodeIndex).set(PredictInt32);
break;
}
- forNode(node.child1()).filter(PredictNumber);
- forNode(node.child2()).filter(PredictNumber);
+ speculateNumberBinary(node);
forNode(nodeIndex).set(PredictDouble);
break;
}
case ArithNegate: {
+ JSValue child = forNode(node.child1()).value();
+ if (child && child.isNumber()) {
+ forNode(nodeIndex).set(JSValue(-child.asNumber()));
+ m_foundConstants = true;
+ node.setCanExit(false);
+ break;
+ }
if (m_graph.negateShouldSpeculateInteger(node)) {
- forNode(node.child1()).filter(PredictInt32);
+ speculateInt32Unary(
+ node, !nodeCanTruncateInteger(node.arithNodeFlags()));
forNode(nodeIndex).set(PredictInt32);
break;
}
- forNode(node.child1()).filter(PredictNumber);
+ speculateNumberUnary(node);
+ forNode(nodeIndex).set(PredictDouble);
+ break;
+ }
+
+ case ArithMul: {
+ JSValue left = forNode(node.child1()).value();
+ JSValue right = forNode(node.child2()).value();
+ if (left && right && left.isNumber() && right.isNumber()) {
+ forNode(nodeIndex).set(JSValue(left.asNumber() * right.asNumber()));
+ m_foundConstants = true;
+ node.setCanExit(false);
+ break;
+ }
+ if (m_graph.mulShouldSpeculateInteger(node)) {
+ speculateInt32Binary(
+ node,
+ !nodeCanTruncateInteger(node.arithNodeFlags())
+ || !nodeCanIgnoreNegativeZero(node.arithNodeFlags()));
+ forNode(nodeIndex).set(PredictInt32);
+ break;
+ }
+ speculateNumberBinary(node);
forNode(nodeIndex).set(PredictDouble);
break;
}
- case ArithMul:
case ArithDiv:
case ArithMin:
case ArithMax:
case ArithMod: {
- if (Node::shouldSpeculateInteger(m_graph[node.child1()], m_graph[node.child2()]) && node.canSpeculateInteger()) {
- forNode(node.child1()).filter(PredictInt32);
- forNode(node.child2()).filter(PredictInt32);
+ JSValue left = forNode(node.child1()).value();
+ JSValue right = forNode(node.child2()).value();
+ if (left && right && left.isNumber() && right.isNumber()) {
+ double a = left.asNumber();
+ double b = right.asNumber();
+ switch (node.op()) {
+ case ArithDiv:
+ forNode(nodeIndex).set(JSValue(a / b));
+ break;
+ case ArithMin:
+ forNode(nodeIndex).set(JSValue(a < b ? a : (b <= a ? b : a + b)));
+ break;
+ case ArithMax:
+ forNode(nodeIndex).set(JSValue(a > b ? a : (b >= a ? b : a + b)));
+ break;
+ case ArithMod:
+ forNode(nodeIndex).set(JSValue(fmod(a, b)));
+ break;
+ default:
+ ASSERT_NOT_REACHED();
+ break;
+ }
+ m_foundConstants = true;
+ node.setCanExit(false);
+ break;
+ }
+ if (Node::shouldSpeculateInteger(
+ m_graph[node.child1()], m_graph[node.child2()])
+ && node.canSpeculateInteger()) {
+ speculateInt32Binary(node, true); // forcing can-exit, which is a bit on the conservative side.
forNode(nodeIndex).set(PredictInt32);
break;
}
- forNode(node.child1()).filter(PredictNumber);
- forNode(node.child2()).filter(PredictNumber);
+ speculateNumberBinary(node);
forNode(nodeIndex).set(PredictDouble);
break;
}
- case ArithAbs:
- if (m_graph[node.child1()].shouldSpeculateInteger() && node.canSpeculateInteger()) {
- forNode(node.child1()).filter(PredictInt32);
+ case ArithAbs: {
+ JSValue child = forNode(node.child1()).value();
+ if (child && child.isNumber()) {
+ forNode(nodeIndex).set(JSValue(fabs(child.asNumber())));
+ m_foundConstants = true;
+ node.setCanExit(false);
+ break;
+ }
+ if (m_graph[node.child1()].shouldSpeculateInteger()
+ && node.canSpeculateInteger()) {
+ speculateInt32Unary(node, true);
forNode(nodeIndex).set(PredictInt32);
break;
}
- forNode(node.child1()).filter(PredictNumber);
+ speculateNumberUnary(node);
forNode(nodeIndex).set(PredictDouble);
break;
+ }
- case ArithSqrt:
- forNode(node.child1()).filter(PredictNumber);
+ case ArithSqrt: {
+ JSValue child = forNode(node.child1()).value();
+ if (child && child.isNumber()) {
+ forNode(nodeIndex).set(JSValue(sqrt(child.asNumber())));
+ m_foundConstants = true;
+ node.setCanExit(false);
+ break;
+ }
+ speculateNumberUnary(node);
forNode(nodeIndex).set(PredictDouble);
break;
+ }
case LogicalNot: {
+ JSValue childConst = forNode(node.child1()).value();
+ if (childConst) {
+ forNode(nodeIndex).set(jsBoolean(!childConst.toBoolean()));
+ node.setCanExit(false);
+ break;
+ }
Node& child = m_graph[node.child1()];
if (isBooleanPrediction(child.prediction()))
- forNode(node.child1()).filter(PredictBoolean);
- else if (child.shouldSpeculateFinalObjectOrOther())
+ speculateBooleanUnary(node);
+ else if (child.shouldSpeculateFinalObjectOrOther()) {
+ node.setCanExit(
+ !isFinalObjectOrOtherPrediction(forNode(node.child1()).m_type));
forNode(node.child1()).filter(PredictFinalObject | PredictOther);
- else if (child.shouldSpeculateArrayOrOther())
+ } else if (child.shouldSpeculateArrayOrOther()) {
+ node.setCanExit(
+ !isArrayOrOtherPrediction(forNode(node.child1()).m_type));
forNode(node.child1()).filter(PredictArray | PredictOther);
- else if (child.shouldSpeculateInteger())
- forNode(node.child1()).filter(PredictInt32);
+ } else if (child.shouldSpeculateInteger())
+ speculateInt32Unary(node);
else if (child.shouldSpeculateNumber())
- forNode(node.child1()).filter(PredictNumber);
+ speculateNumberUnary(node);
else
- clobberStructures(indexInBlock);
+ node.setCanExit(false);
forNode(nodeIndex).set(PredictBoolean);
break;
}
@@ -415,6 +645,34 @@ bool AbstractState::execute(unsigned indexInBlock)
case IsString:
case IsObject:
case IsFunction: {
+ node.setCanExit(false);
+ JSValue child = forNode(node.child1()).value();
+ if (child) {
+ bool foundConstant = true;
+ switch (node.op()) {
+ case IsUndefined:
+ forNode(nodeIndex).set(jsBoolean(
+ child.isCell()
+ ? child.asCell()->structure()->typeInfo().masqueradesAsUndefined()
+ : child.isUndefined()));
+ break;
+ case IsBoolean:
+ forNode(nodeIndex).set(jsBoolean(child.isBoolean()));
+ break;
+ case IsNumber:
+ forNode(nodeIndex).set(jsBoolean(child.isNumber()));
+ break;
+ case IsString:
+ forNode(nodeIndex).set(jsBoolean(isJSString(child)));
+ break;
+ default:
+ break;
+ }
+ if (foundConstant) {
+ m_foundConstants = true;
+ break;
+ }
+ }
forNode(nodeIndex).set(PredictBoolean);
break;
}
@@ -424,74 +682,182 @@ bool AbstractState::execute(unsigned indexInBlock)
case CompareGreater:
case CompareGreaterEq:
case CompareEq: {
+ JSValue leftConst = forNode(node.child1()).value();
+ JSValue rightConst = forNode(node.child2()).value();
+ if (leftConst && rightConst && leftConst.isNumber() && rightConst.isNumber()) {
+ double a = leftConst.asNumber();
+ double b = rightConst.asNumber();
+ switch (node.op()) {
+ case CompareLess:
+ forNode(nodeIndex).set(jsBoolean(a < b));
+ break;
+ case CompareLessEq:
+ forNode(nodeIndex).set(jsBoolean(a <= b));
+ break;
+ case CompareGreater:
+ forNode(nodeIndex).set(jsBoolean(a > b));
+ break;
+ case CompareGreaterEq:
+ forNode(nodeIndex).set(jsBoolean(a >= b));
+ break;
+ case CompareEq:
+ forNode(nodeIndex).set(jsBoolean(a == b));
+ break;
+ default:
+ ASSERT_NOT_REACHED();
+ break;
+ }
+ m_foundConstants = true;
+ node.setCanExit(false);
+ break;
+ }
+
forNode(nodeIndex).set(PredictBoolean);
Node& left = m_graph[node.child1()];
Node& right = m_graph[node.child2()];
PredictedType filter;
- if (Node::shouldSpeculateInteger(left, right))
+ PredictionChecker checker;
+ if (Node::shouldSpeculateInteger(left, right)) {
filter = PredictInt32;
- else if (Node::shouldSpeculateNumber(left, right))
+ checker = isInt32Prediction;
+ } else if (Node::shouldSpeculateNumber(left, right)) {
filter = PredictNumber;
- else if (node.op() == CompareEq) {
+ checker = isNumberPrediction;
+ } else if (node.op() == CompareEq) {
if ((m_graph.isConstant(node.child1().index())
&& m_graph.valueOfJSConstant(node.child1().index()).isNull())
|| (m_graph.isConstant(node.child2().index())
&& m_graph.valueOfJSConstant(node.child2().index()).isNull())) {
// We know that this won't clobber the world. But that's all we know.
+ node.setCanExit(false);
break;
}
- if (Node::shouldSpeculateFinalObject(left, right))
+ if (Node::shouldSpeculateFinalObject(left, right)) {
filter = PredictFinalObject;
- else if (Node::shouldSpeculateArray(left, right))
+ checker = isFinalObjectPrediction;
+ } else if (Node::shouldSpeculateArray(left, right)) {
filter = PredictArray;
- else if (left.shouldSpeculateFinalObject() && right.shouldSpeculateFinalObjectOrOther()) {
+ checker = isArrayPrediction;
+ } else if (left.shouldSpeculateFinalObject() && right.shouldSpeculateFinalObjectOrOther()) {
+ node.setCanExit(
+ !isFinalObjectPrediction(forNode(node.child1()).m_type)
+ || !isFinalObjectOrOtherPrediction(forNode(node.child2()).m_type));
forNode(node.child1()).filter(PredictFinalObject);
forNode(node.child2()).filter(PredictFinalObject | PredictOther);
break;
} else if (right.shouldSpeculateFinalObject() && left.shouldSpeculateFinalObjectOrOther()) {
+ node.setCanExit(
+ !isFinalObjectOrOtherPrediction(forNode(node.child1()).m_type)
+ || !isFinalObjectPrediction(forNode(node.child2()).m_type));
forNode(node.child1()).filter(PredictFinalObject | PredictOther);
forNode(node.child2()).filter(PredictFinalObject);
break;
} else if (left.shouldSpeculateArray() && right.shouldSpeculateArrayOrOther()) {
- forNode(node.child1()).filter(PredictFinalObject);
- forNode(node.child2()).filter(PredictFinalObject | PredictOther);
+ node.setCanExit(
+ !isArrayPrediction(forNode(node.child1()).m_type)
+ || !isArrayOrOtherPrediction(forNode(node.child2()).m_type));
+ forNode(node.child1()).filter(PredictArray);
+ forNode(node.child2()).filter(PredictArray | PredictOther);
break;
} else if (right.shouldSpeculateArray() && left.shouldSpeculateArrayOrOther()) {
- forNode(node.child1()).filter(PredictFinalObject | PredictOther);
- forNode(node.child2()).filter(PredictFinalObject);
+ node.setCanExit(
+ !isArrayOrOtherPrediction(forNode(node.child1()).m_type)
+ || !isArrayPrediction(forNode(node.child2()).m_type));
+ forNode(node.child1()).filter(PredictArray | PredictOther);
+ forNode(node.child2()).filter(PredictArray);
break;
} else {
filter = PredictTop;
+ checker = isAnyPrediction;
clobberStructures(indexInBlock);
}
} else {
filter = PredictTop;
+ checker = isAnyPrediction;
clobberStructures(indexInBlock);
}
+ node.setCanExit(
+ !checker(forNode(node.child1()).m_type)
+ || !checker(forNode(node.child2()).m_type));
forNode(node.child1()).filter(filter);
forNode(node.child2()).filter(filter);
break;
}
- case CompareStrictEq:
+ case CompareStrictEq: {
+ JSValue left = forNode(node.child1()).value();
+ JSValue right = forNode(node.child2()).value();
+ if (left && right && left.isNumber() && right.isNumber()) {
+ forNode(nodeIndex).set(jsBoolean(left.asNumber() == right.asNumber()));
+ m_foundConstants = true;
+ node.setCanExit(false);
+ break;
+ }
forNode(nodeIndex).set(PredictBoolean);
+ if (m_graph.isJSConstant(node.child1().index())) {
+ JSValue value = m_graph.valueOfJSConstant(node.child1().index());
+ if (!value.isNumber() && !value.isString()) {
+ node.setCanExit(false);
+ break;
+ }
+ }
+ if (m_graph.isJSConstant(node.child2().index())) {
+ JSValue value = m_graph.valueOfJSConstant(node.child2().index());
+ if (!value.isNumber() && !value.isString()) {
+ node.setCanExit(false);
+ break;
+ }
+ }
+ if (Node::shouldSpeculateInteger(
+ m_graph[node.child1()], m_graph[node.child2()])) {
+ speculateInt32Binary(node);
+ break;
+ }
+ if (Node::shouldSpeculateNumber(
+ m_graph[node.child1()], m_graph[node.child2()])) {
+ speculateNumberBinary(node);
+ break;
+ }
+ if (Node::shouldSpeculateFinalObject(
+ m_graph[node.child1()], m_graph[node.child2()])) {
+ node.setCanExit(
+ !isFinalObjectPrediction(forNode(node.child1()).m_type)
+ || !isFinalObjectPrediction(forNode(node.child2()).m_type));
+ forNode(node.child1()).filter(PredictFinalObject);
+ forNode(node.child2()).filter(PredictFinalObject);
+ break;
+ }
+ if (Node::shouldSpeculateArray(
+ m_graph[node.child1()], m_graph[node.child2()])) {
+ node.setCanExit(
+ !isArrayPrediction(forNode(node.child1()).m_type)
+ || !isArrayPrediction(forNode(node.child2()).m_type));
+ forNode(node.child1()).filter(PredictArray);
+ forNode(node.child2()).filter(PredictArray);
+ break;
+ }
+ node.setCanExit(false);
break;
+ }
case StringCharCodeAt:
+ node.setCanExit(true);
forNode(node.child1()).filter(PredictString);
forNode(node.child2()).filter(PredictInt32);
forNode(nodeIndex).set(PredictInt32);
break;
case StringCharAt:
+ node.setCanExit(true);
forNode(node.child1()).filter(PredictString);
forNode(node.child2()).filter(PredictInt32);
forNode(nodeIndex).set(PredictString);
break;
case GetByVal: {
+ node.setCanExit(true);
if (!node.prediction() || !m_graph[node.child1()].prediction() || !m_graph[node.child2()].prediction()) {
m_isValid = false;
break;
@@ -501,6 +867,12 @@ bool AbstractState::execute(unsigned indexInBlock)
forNode(nodeIndex).makeTop();
break;
}
+ if (m_graph[node.child1()].shouldSpeculateArguments()) {
+ forNode(node.child1()).filter(PredictArguments);
+ forNode(node.child2()).filter(PredictInt32);
+ forNode(nodeIndex).makeTop();
+ break;
+ }
if (m_graph[node.child1()].prediction() == PredictString) {
forNode(node.child1()).filter(PredictString);
forNode(node.child2()).filter(PredictInt32);
@@ -574,17 +946,27 @@ bool AbstractState::execute(unsigned indexInBlock)
case PutByVal:
case PutByValAlias: {
+ node.setCanExit(true);
if (!m_graph[node.child1()].prediction() || !m_graph[node.child2()].prediction()) {
m_isValid = false;
break;
}
- if (!m_graph[node.child2()].shouldSpeculateInteger() || !isActionableMutableArrayPrediction(m_graph[node.child1()].prediction())) {
+ if (!m_graph[node.child2()].shouldSpeculateInteger() || !isActionableMutableArrayPrediction(m_graph[node.child1()].prediction())
+#if USE(JSVALUE32_64)
+ || m_graph[node.child1()].shouldSpeculateArguments()
+#endif
+ ) {
ASSERT(node.op() == PutByVal);
clobberStructures(indexInBlock);
forNode(nodeIndex).makeTop();
break;
}
+ if (m_graph[node.child1()].shouldSpeculateArguments()) {
+ forNode(node.child1()).filter(PredictArguments);
+ forNode(node.child2()).filter(PredictInt32);
+ break;
+ }
if (m_graph[node.child1()].shouldSpeculateInt8Array()) {
forNode(node.child1()).filter(PredictInt8Array);
forNode(node.child2()).filter(PredictInt32);
@@ -667,53 +1049,93 @@ bool AbstractState::execute(unsigned indexInBlock)
}
case ArrayPush:
+ node.setCanExit(true);
forNode(node.child1()).filter(PredictArray);
forNode(nodeIndex).set(PredictNumber);
break;
case ArrayPop:
+ node.setCanExit(true);
forNode(node.child1()).filter(PredictArray);
forNode(nodeIndex).makeTop();
break;
case RegExpExec:
case RegExpTest:
+ node.setCanExit(
+ !isCellPrediction(forNode(node.child1()).m_type)
+ || !isCellPrediction(forNode(node.child2()).m_type));
forNode(node.child1()).filter(PredictCell);
forNode(node.child2()).filter(PredictCell);
forNode(nodeIndex).makeTop();
break;
case Jump:
+ node.setCanExit(false);
break;
case Branch: {
- // There is probably profit to be found in doing sparse conditional constant
- // propagation, and to take it one step further, where a variable's value
- // is specialized on each direction of a branch. For now, we don't do this.
+ JSValue value = forNode(node.child1()).value();
+ if (value) {
+ bool booleanValue = value.toBoolean();
+ if (booleanValue)
+ m_branchDirection = TakeTrue;
+ else
+ m_branchDirection = TakeFalse;
+ node.setCanExit(false);
+ break;
+ }
+ // FIXME: The above handles the trivial cases of sparse conditional
+ // constant propagation, but we can do better:
+ // 1) If the abstract value does not have a concrete value but describes
+ // something that is known to evaluate true (or false) then we ought
+ // to sparse conditional that.
+ // 2) We can specialize the source variable's value on each direction of
+ // the branch.
Node& child = m_graph[node.child1()];
if (child.shouldSpeculateBoolean())
- forNode(node.child1()).filter(PredictBoolean);
- else if (child.shouldSpeculateFinalObjectOrOther())
+ speculateBooleanUnary(node);
+ else if (child.shouldSpeculateFinalObjectOrOther()) {
+ node.setCanExit(
+ !isFinalObjectOrOtherPrediction(forNode(node.child1()).m_type));
forNode(node.child1()).filter(PredictFinalObject | PredictOther);
- else if (child.shouldSpeculateArrayOrOther())
+ } else if (child.shouldSpeculateArrayOrOther()) {
+ node.setCanExit(
+ !isArrayOrOtherPrediction(forNode(node.child1()).m_type));
forNode(node.child1()).filter(PredictArray | PredictOther);
- else if (child.shouldSpeculateInteger())
- forNode(node.child1()).filter(PredictInt32);
+ } else if (child.shouldSpeculateInteger())
+ speculateInt32Unary(node);
else if (child.shouldSpeculateNumber())
- forNode(node.child1()).filter(PredictNumber);
+ speculateNumberUnary(node);
+ else
+ node.setCanExit(false);
+ m_branchDirection = TakeBoth;
break;
}
case Return:
+ m_isValid = false;
+ node.setCanExit(false);
+ break;
+
case Throw:
case ThrowReferenceError:
m_isValid = false;
+ node.setCanExit(true);
break;
case ToPrimitive: {
+ JSValue childConst = forNode(node.child1()).value();
+ if (childConst && childConst.isNumber()) {
+ forNode(nodeIndex).set(childConst);
+ m_foundConstants = true;
+ node.setCanExit(false);
+ break;
+ }
+
Node& child = m_graph[node.child1()];
if (child.shouldSpeculateInteger()) {
- forNode(node.child1()).filter(PredictInt32);
+ speculateInt32Unary(node);
forNode(nodeIndex).set(PredictInt32);
break;
}
@@ -727,20 +1149,24 @@ bool AbstractState::execute(unsigned indexInBlock)
type |= PredictString;
}
destination.set(type);
+ node.setCanExit(false);
break;
}
case StrCat:
+ node.setCanExit(false);
forNode(nodeIndex).set(PredictString);
break;
case NewArray:
case NewArrayBuffer:
+ node.setCanExit(false);
forNode(nodeIndex).set(m_codeBlock->globalObject()->arrayStructure());
m_haveStructures = true;
break;
case NewRegexp:
+ node.setCanExit(false);
forNode(nodeIndex).set(m_codeBlock->globalObject()->regExpStructure());
m_haveStructures = true;
break;
@@ -755,9 +1181,12 @@ bool AbstractState::execute(unsigned indexInBlock)
// object, so there's nothing to do. I don't think this case will
// be hit, but then again, you never know.
destination = source;
+ node.setCanExit(false);
break;
}
+ node.setCanExit(true);
+
if (isOtherPrediction(child.prediction())) {
source.filter(PredictOther);
destination.set(PredictObjectOther);
@@ -778,6 +1207,8 @@ bool AbstractState::execute(unsigned indexInBlock)
case CreateThis: {
AbstractValue& source = forNode(node.child1());
AbstractValue& destination = forNode(nodeIndex);
+
+ node.setCanExit(!isCellPrediction(source.m_type));
source.filter(PredictFunction);
destination.set(PredictFinalObject);
@@ -785,43 +1216,107 @@ bool AbstractState::execute(unsigned indexInBlock)
}
case NewObject:
+ node.setCanExit(false);
forNode(nodeIndex).set(m_codeBlock->globalObjectFor(node.codeOrigin)->emptyObjectStructure());
m_haveStructures = true;
break;
case CreateActivation:
+ node.setCanExit(false);
forNode(nodeIndex).set(m_graph.m_globalData.activationStructure.get());
m_haveStructures = true;
break;
+ case CreateArguments:
+ node.setCanExit(false);
+ forNode(nodeIndex).set(m_codeBlock->globalObjectFor(node.codeOrigin)->argumentsStructure());
+ m_haveStructures = true;
+ break;
+
case TearOffActivation:
+ case TearOffArguments:
+ node.setCanExit(false);
// Does nothing that is user-visible.
break;
+
+ case CheckArgumentsNotCreated:
+ node.setCanExit(true);
+ break;
+
+ case GetMyArgumentsLength:
+ // We know that this executable does not escape its arguments, so we can optimize
+ // the arguments a bit. Note that this is not sufficient to force constant folding
+ // of GetMyArgumentsLength, because GetMyArgumentsLength is a clobbering operation.
+ // We perform further optimizations on this later on.
+ if (node.codeOrigin.inlineCallFrame) {
+ forNode(nodeIndex).set(jsNumber(node.codeOrigin.inlineCallFrame->arguments.size() - 1));
+ node.setCanExit(false);
+ break;
+ }
+ node.setCanExit(true);
+ forNode(nodeIndex).set(PredictInt32);
+ break;
+
+ case GetMyArgumentsLengthSafe:
+ node.setCanExit(false);
+ // This potentially clobbers all structures if the arguments object had a getter
+ // installed on the length property.
+ clobberStructures(indexInBlock);
+ // We currently make no guarantee about what this returns because it does not
+ // speculate that the length property is actually a length.
+ forNode(nodeIndex).makeTop();
+ break;
+
+ case GetMyArgumentByVal:
+ node.setCanExit(true);
+ // We know that this executable does not escape its arguments, so we can optimize
+ // the arguments a bit. Note that this ends up being further optimized by the
+ // ArgumentsSimplificationPhase.
+ forNode(node.child1()).filter(PredictInt32);
+ forNode(nodeIndex).makeTop();
+ break;
+
+ case GetMyArgumentByValSafe:
+ node.setCanExit(false);
+ // This potentially clobbers all structures if the property we're accessing has
+ // a getter. We don't speculate against this.
+ clobberStructures(indexInBlock);
+ // But we do speculate that the index is an integer.
+ forNode(node.child1()).filter(PredictInt32);
+ // And the result is unknown.
+ forNode(nodeIndex).makeTop();
+ break;
case NewFunction:
case NewFunctionExpression:
case NewFunctionNoCheck:
+ node.setCanExit(false);
forNode(nodeIndex).set(m_codeBlock->globalObjectFor(node.codeOrigin)->functionStructure());
break;
case GetCallee:
+ node.setCanExit(false);
forNode(nodeIndex).set(PredictFunction);
break;
case GetScopeChain:
+ node.setCanExit(false);
forNode(nodeIndex).set(PredictCellOther);
break;
case GetScopedVar:
+ node.setCanExit(false);
forNode(nodeIndex).makeTop();
break;
case PutScopedVar:
+ node.setCanExit(false);
clobberStructures(indexInBlock);
break;
case GetById:
case GetByIdFlush:
+ node.setCanExit(true);
if (!node.prediction()) {
m_isValid = false;
break;
@@ -833,73 +1328,102 @@ bool AbstractState::execute(unsigned indexInBlock)
break;
case GetArrayLength:
+ node.setCanExit(true);
forNode(node.child1()).filter(PredictArray);
forNode(nodeIndex).set(PredictInt32);
break;
+ case GetArgumentsLength:
+ node.setCanExit(true);
+ forNode(node.child1()).filter(PredictArguments);
+ forNode(nodeIndex).set(PredictInt32);
+ break;
+
case GetStringLength:
+ node.setCanExit(!isStringPrediction(forNode(node.child1()).m_type));
forNode(node.child1()).filter(PredictString);
forNode(nodeIndex).set(PredictInt32);
break;
case GetInt8ArrayLength:
+ node.setCanExit(!isInt8ArrayPrediction(forNode(node.child1()).m_type));
forNode(node.child1()).filter(PredictInt8Array);
forNode(nodeIndex).set(PredictInt32);
break;
case GetInt16ArrayLength:
+ node.setCanExit(!isInt16ArrayPrediction(forNode(node.child1()).m_type));
forNode(node.child1()).filter(PredictInt16Array);
forNode(nodeIndex).set(PredictInt32);
break;
case GetInt32ArrayLength:
+ node.setCanExit(!isInt32ArrayPrediction(forNode(node.child1()).m_type));
forNode(node.child1()).filter(PredictInt32Array);
forNode(nodeIndex).set(PredictInt32);
break;
case GetUint8ArrayLength:
+ node.setCanExit(!isUint8ArrayPrediction(forNode(node.child1()).m_type));
forNode(node.child1()).filter(PredictUint8Array);
forNode(nodeIndex).set(PredictInt32);
break;
case GetUint8ClampedArrayLength:
+ node.setCanExit(!isUint8ClampedArrayPrediction(forNode(node.child1()).m_type));
forNode(node.child1()).filter(PredictUint8ClampedArray);
forNode(nodeIndex).set(PredictInt32);
break;
case GetUint16ArrayLength:
+ node.setCanExit(!isUint16ArrayPrediction(forNode(node.child1()).m_type));
forNode(node.child1()).filter(PredictUint16Array);
forNode(nodeIndex).set(PredictInt32);
break;
case GetUint32ArrayLength:
+ node.setCanExit(!isUint32ArrayPrediction(forNode(node.child1()).m_type));
forNode(node.child1()).filter(PredictUint32Array);
forNode(nodeIndex).set(PredictInt32);
break;
case GetFloat32ArrayLength:
+ node.setCanExit(!isFloat32ArrayPrediction(forNode(node.child1()).m_type));
forNode(node.child1()).filter(PredictFloat32Array);
forNode(nodeIndex).set(PredictInt32);
break;
case GetFloat64ArrayLength:
+ node.setCanExit(!isFloat64ArrayPrediction(forNode(node.child1()).m_type));
forNode(node.child1()).filter(PredictFloat64Array);
forNode(nodeIndex).set(PredictInt32);
break;
- case CheckStructure:
+ case CheckStructure: {
// FIXME: We should be able to propagate the structure sets of constants (i.e. prototypes).
- forNode(node.child1()).filter(node.structureSet());
+ AbstractValue& value = forNode(node.child1());
+ node.setCanExit(
+ !value.m_structure.isSubsetOf(node.structureSet())
+ || !isCellPrediction(value.m_type));
+ value.filter(node.structureSet());
m_haveStructures = true;
break;
+ }
case PutStructure:
+ node.setCanExit(false);
clobberStructures(indexInBlock);
forNode(node.child1()).set(node.structureTransitionData().newStructure);
m_haveStructures = true;
break;
case GetPropertyStorage:
+ node.setCanExit(false);
forNode(node.child1()).filter(PredictCell);
forNode(nodeIndex).clear(); // The result is not a JS value.
break;
case GetIndexedPropertyStorage: {
+ node.setCanExit(true); // Lies, but this is (almost) always followed by GetByVal, which does exit. So no point in trying to be more precise.
PredictedType basePrediction = m_graph[node.child2()].prediction();
if (!(basePrediction & PredictInt32) && basePrediction) {
forNode(nodeIndex).clear();
break;
}
+ if (m_graph[node.child1()].shouldSpeculateArguments()) {
+ ASSERT_NOT_REACHED();
+ break;
+ }
if (m_graph[node.child1()].prediction() == PredictString) {
forNode(node.child1()).filter(PredictString);
forNode(nodeIndex).clear();
@@ -956,38 +1480,46 @@ bool AbstractState::execute(unsigned indexInBlock)
break;
}
case GetByOffset:
+ node.setCanExit(false);
forNode(node.child1()).filter(PredictCell);
forNode(nodeIndex).makeTop();
break;
case PutByOffset:
+ node.setCanExit(false);
forNode(node.child1()).filter(PredictCell);
break;
case CheckFunction:
+ node.setCanExit(true); // Lies! We can do better.
forNode(node.child1()).filter(PredictFunction);
// FIXME: Should be able to propagate the fact that we know what the function is.
break;
case PutById:
case PutByIdDirect:
+ node.setCanExit(true);
forNode(node.child1()).filter(PredictCell);
clobberStructures(indexInBlock);
break;
case GetGlobalVar:
+ node.setCanExit(false);
forNode(nodeIndex).makeTop();
break;
case PutGlobalVar:
+ node.setCanExit(false);
break;
case CheckHasInstance:
+ node.setCanExit(true);
forNode(node.child1()).filter(PredictCell);
// Sadly, we don't propagate the fact that we've done CheckHasInstance
break;
case InstanceOf:
+ node.setCanExit(true);
// Again, sadly, we don't propagate the fact that we've done InstanceOf
if (!(m_graph[node.child1()].prediction() & ~PredictCell) && !(forNode(node.child1()).m_type & ~PredictCell))
forNode(node.child1()).filter(PredictCell);
@@ -997,9 +1529,11 @@ bool AbstractState::execute(unsigned indexInBlock)
case Phi:
case Flush:
+ node.setCanExit(false);
break;
case Breakpoint:
+ node.setCanExit(false);
break;
case Call:
@@ -1008,17 +1542,20 @@ bool AbstractState::execute(unsigned indexInBlock)
case ResolveBase:
case ResolveBaseStrictPut:
case ResolveGlobal:
+ node.setCanExit(true);
clobberStructures(indexInBlock);
forNode(nodeIndex).makeTop();
break;
case ForceOSRExit:
+ node.setCanExit(true);
m_isValid = false;
break;
case Phantom:
case InlineStart:
case Nop:
+ node.setCanExit(false);
break;
case LastNodeType:
@@ -1065,7 +1602,9 @@ inline bool AbstractState::mergeStateAtTail(AbstractValue& destination, Abstract
// The block transfers the value from head to tail.
source = inVariable;
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- dataLog(" Transfering from head to tail.\n");
+ dataLog(" Transfering ");
+ source.dump(WTF::dataFile());
+ dataLog(" from head to tail.\n");
#endif
break;
@@ -1073,7 +1612,9 @@ inline bool AbstractState::mergeStateAtTail(AbstractValue& destination, Abstract
// The block refines the value with additional speculations.
source = forNode(nodeIndex);
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- dataLog(" Refining.\n");
+ dataLog(" Refining to ");
+ source.dump(WTF::dataFile());
+ dataLog("\n");
#endif
break;
@@ -1085,7 +1626,9 @@ inline bool AbstractState::mergeStateAtTail(AbstractValue& destination, Abstract
else
source = forNode(node.child1());
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- dataLog(" Setting.\n");
+ dataLog(" Setting to ");
+ source.dump(WTF::dataFile());
+ dataLog("\n");
#endif
break;
@@ -1122,7 +1665,8 @@ inline bool AbstractState::merge(BasicBlock* from, BasicBlock* to)
for (size_t argument = 0; argument < from->variablesAtTail.numberOfArguments(); ++argument) {
AbstractValue& destination = to->valuesAtHead.argument(argument);
- if (m_graph.argumentIsCaptured(argument)) {
+ NodeIndex nodeIndex = from->variablesAtTail.argument(argument);
+ if (nodeIndex != NoNode && m_graph[nodeIndex].variableAccessData()->isCaptured()) {
if (destination.isTop())
continue;
destination.makeTop();
@@ -1134,7 +1678,8 @@ inline bool AbstractState::merge(BasicBlock* from, BasicBlock* to)
for (size_t local = 0; local < from->variablesAtTail.numberOfLocals(); ++local) {
AbstractValue& destination = to->valuesAtHead.local(local);
- if (m_graph.localIsCaptured(local)) {
+ NodeIndex nodeIndex = from->variablesAtTail.local(local);
+ if (nodeIndex != NoNode && m_graph[nodeIndex].variableAccessData()->isCaptured()) {
if (destination.isTop())
continue;
destination.makeTop();
@@ -1152,7 +1697,8 @@ inline bool AbstractState::merge(BasicBlock* from, BasicBlock* to)
return changed;
}
-inline bool AbstractState::mergeToSuccessors(Graph& graph, BasicBlock* basicBlock)
+inline bool AbstractState::mergeToSuccessors(
+ Graph& graph, BasicBlock* basicBlock, BranchDirection branchDirection)
{
PROFILE(FLAG_FOR_MERGE_TO_SUCCESSORS);
@@ -1161,16 +1707,34 @@ inline bool AbstractState::mergeToSuccessors(Graph& graph, BasicBlock* basicBloc
ASSERT(terminal.isTerminal());
switch (terminal.op()) {
- case Jump:
+ case Jump: {
+ ASSERT(branchDirection == InvalidBranchDirection);
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ dataLog(" Merging to block #%u.\n", terminal.takenBlockIndex());
+#endif
return merge(basicBlock, graph.m_blocks[terminal.takenBlockIndex()].get());
+ }
- case Branch:
- return merge(basicBlock, graph.m_blocks[terminal.takenBlockIndex()].get())
- | merge(basicBlock, graph.m_blocks[terminal.notTakenBlockIndex()].get());
+ case Branch: {
+ ASSERT(branchDirection != InvalidBranchDirection);
+ bool changed = false;
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ dataLog(" Merging to block #%u.\n", terminal.takenBlockIndex());
+#endif
+ if (branchDirection != TakeFalse)
+ changed |= merge(basicBlock, graph.m_blocks[terminal.takenBlockIndex()].get());
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ dataLog(" Merging to block #%u.\n", terminal.notTakenBlockIndex());
+#endif
+ if (branchDirection != TakeTrue)
+ changed |= merge(basicBlock, graph.m_blocks[terminal.notTakenBlockIndex()].get());
+ return changed;
+ }
case Return:
case Throw:
case ThrowReferenceError:
+ ASSERT(branchDirection == InvalidBranchDirection);
return false;
default:
@@ -1191,7 +1755,6 @@ inline bool AbstractState::mergeVariableBetweenBlocks(AbstractValue& destination
return destination.merge(source);
}
-#ifndef NDEBUG
void AbstractState::dump(FILE* out)
{
bool first = true;
@@ -1208,7 +1771,6 @@ void AbstractState::dump(FILE* out)
value.dump(out);
}
}
-#endif
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGAbstractState.h b/Source/JavaScriptCore/dfg/DFGAbstractState.h
index 3325e0703..4ce3df19b 100644
--- a/Source/JavaScriptCore/dfg/DFGAbstractState.h
+++ b/Source/JavaScriptCore/dfg/DFGAbstractState.h
@@ -92,6 +92,36 @@ public:
MergeToSuccessors
};
+ enum BranchDirection {
+ // This is not a branch and so there is no branch direction, or
+ // the branch direction has yet to be set.
+ InvalidBranchDirection,
+
+ // The branch takes the true case.
+ TakeTrue,
+
+ // The branch takes the false case.
+ TakeFalse,
+
+ // For all we know, the branch could go either direction, so we
+ // have to assume the worst.
+ TakeBoth
+ };
+
+ static const char* branchDirectionToString(BranchDirection branchDirection)
+ {
+ switch (branchDirection) {
+ case InvalidBranchDirection:
+ return "Invalid";
+ case TakeTrue:
+ return "TakeTrue";
+ case TakeFalse:
+ return "TakeFalse";
+ case TakeBoth:
+ return "TakeBoth";
+ }
+ }
+
AbstractState(Graph&);
~AbstractState();
@@ -139,7 +169,11 @@ public:
// A true return means that you must revisit (at least) the successor
// blocks. This also sets cfaShouldRevisit to true for basic blocks
// that must be visited next.
- bool endBasicBlock(MergeMode);
+ //
+ // If you'd like to know what direction the branch at the end of the
+ // basic block is thought to have taken, you can pass a non-0 pointer
+ // for BranchDirection.
+ bool endBasicBlock(MergeMode, BranchDirection* = 0);
// Reset the AbstractState. This throws away any results, and at this point
// you can safely call beginBasicBlock() on any basic block.
@@ -169,11 +203,9 @@ public:
// successors. Returns true if any of the successors' states changed. Note
// that this is automatically called in endBasicBlock() if MergeMode is
// MergeToSuccessors.
- bool mergeToSuccessors(Graph&, BasicBlock*);
+ bool mergeToSuccessors(Graph&, BasicBlock*, BranchDirection);
-#ifndef NDEBUG
void dump(FILE* out);
-#endif
private:
void clobberStructures(unsigned);
@@ -182,6 +214,50 @@ private:
static bool mergeVariableBetweenBlocks(AbstractValue& destination, AbstractValue& source, NodeIndex destinationNodeIndex, NodeIndex sourceNodeIndex);
+ void speculateInt32Unary(Node& node, bool forceCanExit = false)
+ {
+ AbstractValue& childValue = forNode(node.child1());
+ node.setCanExit(forceCanExit || !isInt32Prediction(childValue.m_type));
+ childValue.filter(PredictInt32);
+ }
+
+ void speculateNumberUnary(Node& node)
+ {
+ AbstractValue& childValue = forNode(node.child1());
+ node.setCanExit(!isNumberPrediction(childValue.m_type));
+ childValue.filter(PredictNumber);
+ }
+
+ void speculateBooleanUnary(Node& node)
+ {
+ AbstractValue& childValue = forNode(node.child1());
+ node.setCanExit(!isBooleanPrediction(childValue.m_type));
+ childValue.filter(PredictBoolean);
+ }
+
+ void speculateInt32Binary(Node& node, bool forceCanExit = false)
+ {
+ AbstractValue& childValue1 = forNode(node.child1());
+ AbstractValue& childValue2 = forNode(node.child2());
+ node.setCanExit(
+ forceCanExit
+ || !isInt32Prediction(childValue1.m_type)
+ || !isInt32Prediction(childValue2.m_type));
+ childValue1.filter(PredictInt32);
+ childValue2.filter(PredictInt32);
+ }
+
+ void speculateNumberBinary(Node& node)
+ {
+ AbstractValue& childValue1 = forNode(node.child1());
+ AbstractValue& childValue2 = forNode(node.child2());
+ node.setCanExit(
+ !isNumberPrediction(childValue1.m_type)
+ || !isNumberPrediction(childValue2.m_type));
+ childValue1.filter(PredictNumber);
+ childValue2.filter(PredictNumber);
+ }
+
CodeBlock* m_codeBlock;
Graph& m_graph;
@@ -189,8 +265,11 @@ private:
Operands<AbstractValue> m_variables;
BasicBlock* m_block;
bool m_haveStructures;
+ bool m_foundConstants;
bool m_isValid;
+
+ BranchDirection m_branchDirection; // This is only set for blocks that end in Branch and that execute to completion (i.e. m_isValid == true).
};
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGAbstractValue.h b/Source/JavaScriptCore/dfg/DFGAbstractValue.h
index 682c7a90f..c61a383eb 100644
--- a/Source/JavaScriptCore/dfg/DFGAbstractValue.h
+++ b/Source/JavaScriptCore/dfg/DFGAbstractValue.h
@@ -317,18 +317,23 @@ struct AbstractValue {
{
m_type = PredictNone;
m_structure.clear();
+ m_value = JSValue();
checkConsistency();
}
- bool isClear()
+ bool isClear() const
{
- return m_type == PredictNone && m_structure.isClear();
+ bool result = m_type == PredictNone && m_structure.isClear();
+ if (result)
+ ASSERT(!m_value);
+ return result;
}
void makeTop()
{
m_type = PredictTop;
m_structure.makeTop();
+ m_value = JSValue();
checkConsistency();
}
@@ -341,11 +346,26 @@ struct AbstractValue {
checkConsistency();
}
+ void clobberValue()
+ {
+ m_value = JSValue();
+ }
+
bool isTop() const
{
return m_type == PredictTop && m_structure.isTop();
}
+ bool valueIsTop() const
+ {
+ return !m_value && m_type;
+ }
+
+ JSValue value() const
+ {
+ return m_value;
+ }
+
static AbstractValue top()
{
AbstractValue result;
@@ -355,11 +375,19 @@ struct AbstractValue {
void set(JSValue value)
{
- m_structure.clear();
- if (value.isCell())
- m_structure.add(value.asCell()->structure());
+ if (!!value && value.isCell()) {
+ // Have to be careful here! It's tempting to set the structure to the
+ // value's structure, but that would be wrong, since that would
+ // constitute a proof that this value will always have the same
+ // structure. The whole point of a value having a structure is that
+ // it may change in the future - for example between when we compile
+ // the code and when we run it.
+ m_structure.makeTop();
+ } else
+ m_structure.clear();
m_type = predictionFromValue(value);
+ m_value = value;
checkConsistency();
}
@@ -370,6 +398,7 @@ struct AbstractValue {
m_structure.add(structure);
m_type = predictionFromStructure(structure);
+ m_value = JSValue();
checkConsistency();
}
@@ -381,18 +410,40 @@ struct AbstractValue {
else
m_structure.clear();
m_type = type;
+ m_value = JSValue();
checkConsistency();
}
bool operator==(const AbstractValue& other) const
{
- return m_type == other.m_type && m_structure == other.m_structure;
+ return m_type == other.m_type
+ && m_structure == other.m_structure
+ && m_value == other.m_value;
+ }
+ bool operator!=(const AbstractValue& other) const
+ {
+ return !(*this == other);
}
bool merge(const AbstractValue& other)
{
- bool result = mergePrediction(m_type, other.m_type) | m_structure.addAll(other.m_structure);
+#if !ASSERT_DISABLED
+ AbstractValue oldMe = *this;
+#endif
+ bool result = false;
+ if (isClear()) {
+ *this = other;
+ result = !other.isClear();
+ } else {
+ result |= mergePrediction(m_type, other.m_type);
+ result |= m_structure.addAll(other.m_structure);
+ if (m_value != other.m_value) {
+ result |= !!m_value;
+ m_value = JSValue();
+ }
+ }
checkConsistency();
+ ASSERT(result == (*this != oldMe));
return result;
}
@@ -402,6 +453,7 @@ struct AbstractValue {
if (type & PredictCell)
m_structure.makeTop();
+ m_value = JSValue();
checkConsistency();
}
@@ -417,6 +469,10 @@ struct AbstractValue {
// sure that new information gleaned from the PredictedType needs to be fed back
// into the information gleaned from the StructureSet.
m_structure.filter(m_type);
+
+ if (!!m_value && !validateIgnoringValue(m_value))
+ clear();
+
checkConsistency();
}
@@ -431,14 +487,45 @@ struct AbstractValue {
// to ensure that the structure filtering does the right thing is to filter on
// the new type (None) rather than the one passed (Array).
m_structure.filter(m_type);
+
+ if (!!m_value && !validateIgnoringValue(m_value))
+ clear();
+
checkConsistency();
}
+ bool validateIgnoringValue(JSValue value) const
+ {
+ if (isTop())
+ return true;
+
+ if (mergePredictions(m_type, predictionFromValue(value)) != m_type)
+ return false;
+
+ if (value.isEmpty()) {
+ ASSERT(m_type & PredictEmpty);
+ return true;
+ }
+
+ if (m_structure.isTop())
+ return true;
+
+ if (!!value && value.isCell()) {
+ ASSERT(m_type & PredictCell);
+ return m_structure.contains(value.asCell()->structure());
+ }
+
+ return true;
+ }
+
bool validate(JSValue value) const
{
if (isTop())
return true;
+ if (!!m_value)
+ return m_value == value;
+
if (mergePredictions(m_type, predictionFromValue(value)) != m_type)
return false;
@@ -450,7 +537,7 @@ struct AbstractValue {
if (m_structure.isTop())
return true;
- if (value.isCell()) {
+ if (!!value && value.isCell()) {
ASSERT(m_type & PredictCell);
return m_structure.contains(value.asCell()->structure());
}
@@ -463,6 +550,12 @@ struct AbstractValue {
if (!(m_type & PredictCell))
ASSERT(m_structure.isClear());
+ if (isClear())
+ ASSERT(!m_value);
+
+ if (!!m_value)
+ ASSERT(mergePredictions(m_type, predictionFromValue(m_value)) == m_type);
+
// Note that it's possible for a prediction like (Final, []). This really means that
// the value is bottom and that any code that uses the value is unreachable. But
// we don't want to get pedantic about this as it would only increase the computational
@@ -473,11 +566,14 @@ struct AbstractValue {
{
fprintf(out, "(%s, ", predictionToString(m_type));
m_structure.dump(out);
+ if (!!m_value)
+ fprintf(out, ", %s", m_value.description());
fprintf(out, ")");
}
StructureAbstractValue m_structure;
PredictedType m_type;
+ JSValue m_value;
};
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGAdjacencyList.h b/Source/JavaScriptCore/dfg/DFGAdjacencyList.h
index e2b096bf4..a59223b05 100644
--- a/Source/JavaScriptCore/dfg/DFGAdjacencyList.h
+++ b/Source/JavaScriptCore/dfg/DFGAdjacencyList.h
@@ -41,7 +41,9 @@ public:
Fixed,
Variable
};
-
+
+ enum { Size = 3 };
+
AdjacencyList(Kind kind)
#if !ASSERT_DISABLED
: m_kind(kind)
@@ -74,21 +76,21 @@ public:
const Edge& child(unsigned i) const
{
- ASSERT(i < 3);
+ ASSERT(i < Size);
ASSERT(m_kind == Fixed);
return m_words[i];
}
Edge& child(unsigned i)
{
- ASSERT(i < 3);
+ ASSERT(i < Size);
ASSERT(m_kind == Fixed);
return m_words[i];
}
void setChild(unsigned i, Edge nodeUse)
{
- ASSERT(i < 30);
+ ASSERT(i < Size);
ASSERT(m_kind == Fixed);
m_words[i] = nodeUse;
}
@@ -114,10 +116,27 @@ public:
child(2) = child3;
}
- void initialize(NodeIndex child1, NodeIndex child2, NodeIndex child3)
+ void initialize(NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode)
{
initialize(Edge(child1), Edge(child2), Edge(child3));
}
+
+ void reset()
+ {
+#if !ASSERT_DISABLED
+ m_kind = Fixed;
+#endif
+ initialize();
+ }
+
+ // Call this if you wish to remove an edge and the node treats the list of children
+ // as a "bag" - an unordered set where the index of the edge does not matter.
+ void removeEdgeFromBag(unsigned edgeIndex)
+ {
+ for (unsigned i = edgeIndex; i < Size - 1; ++i)
+ setChild(i, child(i + 1));
+ setChild(Size - 1, Edge());
+ }
unsigned firstChild() const
{
@@ -142,7 +161,7 @@ public:
}
private:
- Edge m_words[3];
+ Edge m_words[Size];
#if !ASSERT_DISABLED
Kind m_kind;
#endif
diff --git a/Source/JavaScriptCore/dfg/DFGArgumentsSimplificationPhase.cpp b/Source/JavaScriptCore/dfg/DFGArgumentsSimplificationPhase.cpp
new file mode 100644
index 000000000..5ab515bd7
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGArgumentsSimplificationPhase.cpp
@@ -0,0 +1,750 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "DFGArgumentsSimplificationPhase.h"
+
+#if ENABLE(DFG_JIT)
+
+#include "DFGAbstractState.h"
+#include "DFGBasicBlock.h"
+#include "DFGGraph.h"
+#include "DFGInsertionSet.h"
+#include "DFGPhase.h"
+#include "DFGValidate.h"
+#include <wtf/HashSet.h>
+#include <wtf/HashMap.h>
+
+namespace JSC { namespace DFG {
+
+namespace {
+
+template<typename T>
+struct NullableHashTraits : public HashTraits<T> {
+ static const bool emptyValueIsZero = false;
+ static T emptyValue() { return reinterpret_cast<T>(1); }
+};
+
+struct ArgumentsAliasingData {
+ InlineCallFrame* callContext;
+ bool callContextSet;
+ bool multipleCallContexts;
+
+ bool assignedFromArguments;
+ bool assignedFromManyThings;
+
+ bool escapes;
+
+ ArgumentsAliasingData()
+ : callContext(0)
+ , callContextSet(false)
+ , multipleCallContexts(false)
+ , assignedFromArguments(false)
+ , assignedFromManyThings(false)
+ , escapes(false)
+ {
+ }
+
+ void mergeCallContext(InlineCallFrame* newCallContext)
+ {
+ if (multipleCallContexts)
+ return;
+
+ if (!callContextSet) {
+ callContext = newCallContext;
+ callContextSet = true;
+ return;
+ }
+
+ if (callContext == newCallContext)
+ return;
+
+ multipleCallContexts = true;
+ }
+
+ bool callContextIsValid()
+ {
+ return callContextSet && !multipleCallContexts;
+ }
+
+ void mergeArgumentsAssignment()
+ {
+ assignedFromArguments = true;
+ }
+
+ void mergeNonArgumentsAssignment()
+ {
+ assignedFromManyThings = true;
+ }
+
+ bool argumentsAssignmentIsValid()
+ {
+ return assignedFromArguments && !assignedFromManyThings;
+ }
+
+ bool isValid()
+ {
+ return callContextIsValid() && argumentsAssignmentIsValid() && !escapes;
+ }
+};
+
+} // end anonymous namespace
+
+class ArgumentsSimplificationPhase : public Phase {
+public:
+ ArgumentsSimplificationPhase(Graph& graph)
+ : Phase(graph, "arguments simplification")
+ {
+ }
+
+ bool run()
+ {
+ if (!m_graph.m_hasArguments)
+ return false;
+
+ bool changed = false;
+
+ // Record which arguments are known to escape no matter what.
+ for (unsigned i = codeBlock()->inlineCallFrames().size(); i--;) {
+ InlineCallFrame* inlineCallFrame = &codeBlock()->inlineCallFrames()[i];
+ if (m_graph.m_executablesWhoseArgumentsEscaped.contains(
+ m_graph.executableFor(inlineCallFrame)))
+ m_createsArguments.add(inlineCallFrame);
+ }
+
+ // Create data for variable access datas that we will want to analyze.
+ for (unsigned i = m_graph.m_variableAccessData.size(); i--;) {
+ VariableAccessData* variableAccessData = &m_graph.m_variableAccessData[i];
+ if (!variableAccessData->isRoot())
+ continue;
+ if (variableAccessData->isCaptured())
+ continue;
+ m_argumentsAliasing.add(variableAccessData, ArgumentsAliasingData());
+ }
+
+ // Figure out which variables alias the arguments and nothing else, and are
+ // used only for GetByVal and GetArgumentsLength accesses. At the same time,
+ // identify uses of CreateArguments that are not consistent with the arguments
+ // being aliased only to variables that satisfy these constraints.
+ for (BlockIndex blockIndex = 0; blockIndex < m_graph.m_blocks.size(); ++blockIndex) {
+ BasicBlock* block = m_graph.m_blocks[blockIndex].get();
+ if (!block)
+ continue;
+ for (unsigned indexInBlock = 0; indexInBlock < block->size(); ++indexInBlock) {
+ NodeIndex nodeIndex = block->at(indexInBlock);
+ Node& node = m_graph[nodeIndex];
+ if (!node.shouldGenerate())
+ continue;
+ switch (node.op()) {
+ case CreateArguments: {
+ // Ignore this op. If we see a lone CreateArguments then we want to
+ // completely ignore it because:
+ // 1) The default would be to see that the child is a GetLocal on the
+ // arguments register and conclude that we have an arguments escape.
+ // 2) The fact that a CreateArguments exists does not mean that it
+ // will continue to exist after we're done with this phase. As far
+ // as this phase is concerned, a CreateArguments only "exists" if it
+ // is used in a manner that necessitates its existance.
+ break;
+ }
+
+ case SetLocal: {
+ Node& source = m_graph[node.child1()];
+ VariableAccessData* variableAccessData = node.variableAccessData();
+ if (source.op() != CreateArguments) {
+ // Make sure that the source of the SetLocal knows that if it's
+ // a variable that we think is aliased to the arguments, then it
+ // may escape at this point. In future, we could track transitive
+ // aliasing. But not yet.
+ observeBadArgumentsUse(node.child1());
+
+ if (variableAccessData->isCaptured())
+ break;
+
+ // Make sure that if it's a variable that we think is aliased to
+ // the arguments, that we know that it might actually not be.
+ ArgumentsAliasingData& data =
+ m_argumentsAliasing.find(variableAccessData)->second;
+ data.mergeNonArgumentsAssignment();
+ data.mergeCallContext(node.codeOrigin.inlineCallFrame);
+ break;
+ }
+ int argumentsRegister =
+ m_graph.uncheckedArgumentsRegisterFor(node.codeOrigin);
+ if (variableAccessData->local() == argumentsRegister
+ || variableAccessData->local() ==
+ unmodifiedArgumentsRegister(argumentsRegister)) {
+ if (node.codeOrigin.inlineCallFrame == source.codeOrigin.inlineCallFrame)
+ break;
+ m_createsArguments.add(source.codeOrigin.inlineCallFrame);
+ break;
+ }
+ if (variableAccessData->isCaptured()) {
+ m_createsArguments.add(source.codeOrigin.inlineCallFrame);
+ break;
+ }
+ ArgumentsAliasingData& data =
+ m_argumentsAliasing.find(variableAccessData)->second;
+ data.mergeArgumentsAssignment();
+ // This ensures that the variable's uses are in the same context as
+ // the arguments it is aliasing.
+ data.mergeCallContext(node.codeOrigin.inlineCallFrame);
+ data.mergeCallContext(source.codeOrigin.inlineCallFrame);
+ break;
+ }
+
+ case GetLocal:
+ case Phi: {
+ VariableAccessData* variableAccessData = node.variableAccessData();
+ if (variableAccessData->isCaptured())
+ break;
+ ArgumentsAliasingData& data =
+ m_argumentsAliasing.find(variableAccessData)->second;
+ data.mergeCallContext(node.codeOrigin.inlineCallFrame);
+ break;
+ }
+
+ case Flush: {
+ VariableAccessData* variableAccessData = node.variableAccessData();
+ if (variableAccessData->isCaptured())
+ break;
+ ArgumentsAliasingData& data =
+ m_argumentsAliasing.find(variableAccessData)->second;
+ data.mergeCallContext(node.codeOrigin.inlineCallFrame);
+
+ // If a variable is used in a flush then by definition it escapes.
+ data.escapes = true;
+ break;
+ }
+
+ case SetArgument: {
+ VariableAccessData* variableAccessData = node.variableAccessData();
+ if (variableAccessData->isCaptured())
+ break;
+ ArgumentsAliasingData& data =
+ m_argumentsAliasing.find(variableAccessData)->second;
+ data.mergeNonArgumentsAssignment();
+ data.mergeCallContext(node.codeOrigin.inlineCallFrame);
+ break;
+ }
+
+ case GetByVal: {
+ if (!node.prediction()
+ || !m_graph[node.child1()].prediction()
+ || !m_graph[node.child2()].prediction()) {
+ observeBadArgumentsUses(node);
+ break;
+ }
+
+ if (!isActionableArrayPrediction(m_graph[node.child1()].prediction())
+ || !m_graph[node.child2()].shouldSpeculateInteger()) {
+ observeBadArgumentsUses(node);
+ break;
+ }
+
+ if (m_graph[node.child1()].shouldSpeculateArguments()) {
+ // If arguments is used as an index, then it's an escaping use.
+ // That's so awful and pretty much impossible since it would
+ // imply that the arguments were predicted integer, but it's
+ // good to be defensive and thorough.
+ observeBadArgumentsUse(node.child2());
+ observeProperArgumentsUse(node, node.child1());
+ break;
+ }
+
+ observeBadArgumentsUses(node);
+ break;
+ }
+
+ case GetArgumentsLength: {
+ observeProperArgumentsUse(node, node.child1());
+ break;
+ }
+
+ default:
+ observeBadArgumentsUses(node);
+ break;
+ }
+ }
+ }
+
+ // Now we know which variables are aliased to arguments. But if any of them are
+ // found to have escaped, or were otherwise invalidated, then we need to mark
+ // the arguments as requiring creation. This is a property of SetLocals to
+ // variables that are neither the correct arguments register nor are marked as
+ // being arguments-aliased.
+ for (BlockIndex blockIndex = 0; blockIndex < m_graph.m_blocks.size(); ++blockIndex) {
+ BasicBlock* block = m_graph.m_blocks[blockIndex].get();
+ if (!block)
+ continue;
+ for (unsigned indexInBlock = 0; indexInBlock < block->size(); ++indexInBlock) {
+ NodeIndex nodeIndex = block->at(indexInBlock);
+ Node& node = m_graph[nodeIndex];
+ if (!node.shouldGenerate())
+ continue;
+ if (node.op() != SetLocal)
+ continue;
+ Node& source = m_graph[node.child1()];
+ if (source.op() != CreateArguments)
+ continue;
+ VariableAccessData* variableAccessData = node.variableAccessData();
+ if (variableAccessData->isCaptured()) {
+ // The captured case would have already been taken care of in the
+ // previous pass.
+ continue;
+ }
+
+ ArgumentsAliasingData& data =
+ m_argumentsAliasing.find(variableAccessData)->second;
+ if (data.isValid())
+ continue;
+
+ m_createsArguments.add(source.codeOrigin.inlineCallFrame);
+ }
+ }
+
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ dataLog("Arguments aliasing states:\n");
+ for (unsigned i = 0; i < m_graph.m_variableAccessData.size(); ++i) {
+ VariableAccessData* variableAccessData = &m_graph.m_variableAccessData[i];
+ if (!variableAccessData->isRoot())
+ continue;
+ dataLog(" r%d(%s): ", variableAccessData->local(), m_graph.nameOfVariableAccessData(variableAccessData));
+ if (variableAccessData->isCaptured())
+ dataLog("Captured");
+ else {
+ ArgumentsAliasingData& data =
+ m_argumentsAliasing.find(variableAccessData)->second;
+ bool first = true;
+ if (data.callContextIsValid()) {
+ if (!first)
+ dataLog(", ");
+ dataLog("Have Call Context: %p", data.callContext);
+ first = false;
+ if (!m_createsArguments.contains(data.callContext))
+ dataLog(" (Does Not Create Arguments)");
+ }
+ if (data.argumentsAssignmentIsValid()) {
+ if (!first)
+ dataLog(", ");
+ dataLog("Arguments Assignment Is Valid");
+ first = false;
+ }
+ if (!data.escapes) {
+ if (!first)
+ dataLog(", ");
+ dataLog("Does Not Escape");
+ first = false;
+ }
+ if (!first)
+ dataLog(", ");
+ if (data.isValid()) {
+ if (m_createsArguments.contains(data.callContext))
+ dataLog("VALID");
+ else
+ dataLog("INVALID (due to argument creation)");
+ } else
+ dataLog("INVALID (due to bad variable use)");
+ }
+ dataLog("\n");
+ }
+#endif
+
+ InsertionSet<NodeIndex> insertionSet;
+
+ for (BlockIndex blockIndex = 0; blockIndex < m_graph.m_blocks.size(); ++blockIndex) {
+ BasicBlock* block = m_graph.m_blocks[blockIndex].get();
+ if (!block)
+ continue;
+ for (unsigned indexInBlock = 0; indexInBlock < block->size(); ++indexInBlock) {
+ NodeIndex nodeIndex = block->at(indexInBlock);
+ Node& node = m_graph[nodeIndex];
+ if (!node.shouldGenerate())
+ continue;
+
+ switch (node.op()) {
+ case SetLocal: {
+ Node& source = m_graph[node.child1()];
+ if (source.op() != CreateArguments)
+ break;
+
+ VariableAccessData* variableAccessData = node.variableAccessData();
+
+ // If this is a store into the arguments register for an InlineCallFrame*
+ // that does not create arguments, then kill it.
+ int argumentsRegister =
+ m_graph.uncheckedArgumentsRegisterFor(node.codeOrigin);
+ if ((variableAccessData->local() == argumentsRegister
+ || variableAccessData->local()
+ == unmodifiedArgumentsRegister(argumentsRegister))
+ && !m_createsArguments.contains(source.codeOrigin.inlineCallFrame)) {
+ // Find the Flush. It should be the next instruction.
+ Node& flush = m_graph[block->at(indexInBlock + 1)];
+ ASSERT(flush.op() == Flush);
+ ASSERT(flush.variableAccessData() == variableAccessData);
+ ASSERT(flush.child1() == nodeIndex);
+ // Be defensive in release mode.
+ if (flush.op() != Flush
+ || flush.variableAccessData() != variableAccessData
+ || flush.child1() != nodeIndex)
+ break;
+ flush.setOpAndDefaultFlags(Nop);
+ m_graph.clearAndDerefChild1(flush);
+ flush.setRefCount(0);
+ changed = true;
+ break;
+ }
+
+ if (variableAccessData->isCaptured())
+ break;
+
+ // If this is a store into a VariableAccessData* that is marked as
+ // arguments aliasing for an InlineCallFrame* that does not create
+ // arguments, then flag the VariableAccessData as being an
+ // arguments-aliased. This'll let the OSR exit machinery do the right
+ // things. Note also that the SetLocal should become dead as soon as
+ // we replace all uses of this variable with GetMyArgumentsLength and
+ // GetMyArgumentByVal.
+ if (m_argumentsAliasing.find(variableAccessData)->second.isValid()
+ && !m_createsArguments.contains(source.codeOrigin.inlineCallFrame)) {
+ changed |= variableAccessData->mergeIsArgumentsAlias(true);
+ break;
+ }
+ break;
+ }
+
+ case Phantom: {
+ // It's highly likely that we will have a Phantom referencing either
+ // CreateArguments, or a local op for the arguments register, or a
+ // local op for an arguments-aliased variable. In any of those cases,
+ // we should remove the phantom reference, since:
+ // 1) Phantoms only exist to aid OSR exit. But arguments simplification
+ // has its own OSR exit story, which is to inform OSR exit to reify
+ // the arguments as necessary.
+ // 2) The Phantom may keep the CreateArguments node alive, which is
+ // precisely what we don't want.
+ for (unsigned i = 0; i < AdjacencyList::Size; ++i)
+ removeArgumentsReferencingPhantomChild(node, i);
+ break;
+ }
+
+ case GetByVal: {
+ if (!node.prediction()
+ || !m_graph[node.child1()].prediction()
+ || !m_graph[node.child2()].prediction())
+ break;
+
+ if (!isActionableArrayPrediction(m_graph[node.child1()].prediction())
+ || !m_graph[node.child2()].shouldSpeculateInteger())
+ break;
+
+ if (m_graph[node.child1()].shouldSpeculateArguments()) {
+ // This can be simplified to GetMyArgumentByVal if we know that
+ // it satisfies either condition (1) or (2):
+ // 1) Its first child is a valid ArgumentsAliasingData and the
+ // InlineCallFrame* is not marked as creating arguments.
+ // 2) Its first child is CreateArguments and its InlineCallFrame*
+ // is not marked as creating arguments.
+
+ if (!isOKToOptimize(m_graph[node.child1()]))
+ break;
+
+ m_graph.deref(node.child1());
+ node.children.child1() = node.children.child2();
+ node.children.child2() = Edge();
+ node.setOpAndDefaultFlags(GetMyArgumentByVal);
+ changed = true;
+ --indexInBlock; // Force reconsideration of this op now that it's a GetMyArgumentByVal.
+ break;
+ }
+ break;
+ }
+
+ case GetArgumentsLength: {
+ if (!isOKToOptimize(m_graph[node.child1()]))
+ break;
+
+ m_graph.deref(node.child1());
+ node.children.child1() = Edge();
+ node.setOpAndDefaultFlags(GetMyArgumentsLength);
+ changed = true;
+ --indexInBlock; // Force reconsideration of this op noew that it's a GetMyArgumentsLength.
+ break;
+ }
+
+ case GetMyArgumentsLength:
+ case GetMyArgumentsLengthSafe: {
+ if (m_createsArguments.contains(node.codeOrigin.inlineCallFrame)) {
+ ASSERT(node.op() == GetMyArgumentsLengthSafe);
+ break;
+ }
+ if (node.op() == GetMyArgumentsLengthSafe) {
+ node.setOp(GetMyArgumentsLength);
+ changed = true;
+ }
+ if (!node.codeOrigin.inlineCallFrame)
+ break;
+
+ // We know exactly what this will return. But only after we have checked
+ // that nobody has escaped our arguments.
+ Node check(CheckArgumentsNotCreated, node.codeOrigin);
+ check.ref();
+ NodeIndex checkIndex = m_graph.size();
+ m_graph.append(check);
+ insertionSet.append(indexInBlock, checkIndex);
+
+ m_graph.convertToConstant(
+ nodeIndex, jsNumber(node.codeOrigin.inlineCallFrame->arguments.size() - 1));
+ changed = true;
+ break;
+ }
+
+ case GetMyArgumentByVal:
+ case GetMyArgumentByValSafe: {
+ if (m_createsArguments.contains(node.codeOrigin.inlineCallFrame)) {
+ ASSERT(node.op() == GetMyArgumentByValSafe);
+ break;
+ }
+ if (node.op() == GetMyArgumentByValSafe) {
+ node.setOp(GetMyArgumentByVal);
+ changed = true;
+ }
+ if (!node.codeOrigin.inlineCallFrame)
+ break;
+ if (!m_graph[node.child1()].hasConstant())
+ break;
+ JSValue value = m_graph[node.child1()].valueOfJSConstant(codeBlock());
+ if (!value.isInt32())
+ break;
+ int32_t index = value.asInt32();
+ if (index < 0
+ || static_cast<size_t>(index + 1) >=
+ node.codeOrigin.inlineCallFrame->arguments.size())
+ break;
+
+ // We know which argument this is accessing. But only after we have checked
+ // that nobody has escaped our arguments. We also need to ensure that the
+ // index is kept alive. That's somewhat pointless since it's a constant, but
+ // it's important because this is one of those invariants that we like to
+ // have in the DFG. Note finally that we use the GetLocalUnlinked opcode
+ // here, since this is being done _after_ the prediction propagation phase
+ // has run - therefore it makes little sense to link the GetLocal operation
+ // into the VariableAccessData and Phi graphs.
+
+ Node check(CheckArgumentsNotCreated, node.codeOrigin);
+ check.ref();
+
+ Node phantom(Phantom, node.codeOrigin);
+ phantom.ref();
+ phantom.children = node.children;
+
+ node.convertToGetLocalUnlinked(
+ static_cast<VirtualRegister>(
+ node.codeOrigin.inlineCallFrame->stackOffset +
+ argumentToOperand(index + 1)));
+
+ NodeIndex checkNodeIndex = m_graph.size();
+ m_graph.append(check);
+ insertionSet.append(indexInBlock, checkNodeIndex);
+ NodeIndex phantomNodeIndex = m_graph.size();
+ m_graph.append(phantom);
+ insertionSet.append(indexInBlock, phantomNodeIndex);
+
+ changed = true;
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+ insertionSet.execute(*block);
+ }
+
+ if (changed)
+ m_graph.collectGarbage();
+
+ return changed;
+ }
+
+private:
+ HashSet<InlineCallFrame*,
+ DefaultHash<InlineCallFrame*>::Hash,
+ NullableHashTraits<InlineCallFrame*> > m_createsArguments;
+ HashMap<VariableAccessData*, ArgumentsAliasingData,
+ DefaultHash<VariableAccessData*>::Hash,
+ NullableHashTraits<VariableAccessData*> > m_argumentsAliasing;
+
+ void observeBadArgumentsUse(Edge edge)
+ {
+ if (!edge)
+ return;
+
+ Node& child = m_graph[edge];
+ switch (child.op()) {
+ case CreateArguments: {
+ m_createsArguments.add(child.codeOrigin.inlineCallFrame);
+ break;
+ }
+
+ case GetLocal: {
+ if (child.local() == m_graph.uncheckedArgumentsRegisterFor(child.codeOrigin)) {
+ m_createsArguments.add(child.codeOrigin.inlineCallFrame);
+ break;
+ }
+
+ VariableAccessData* variableAccessData = child.variableAccessData();
+ if (variableAccessData->isCaptured())
+ break;
+
+ ArgumentsAliasingData& data = m_argumentsAliasing.find(variableAccessData)->second;
+ data.escapes = true;
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+
+ void observeBadArgumentsUses(Node& node)
+ {
+ for (unsigned i = m_graph.numChildren(node); i--;)
+ observeBadArgumentsUse(m_graph.child(node, i));
+ }
+
+ void observeProperArgumentsUse(Node& node, Edge edge)
+ {
+ Node& child = m_graph[edge];
+ if (child.op() != GetLocal) {
+ // When can this happen? At least two cases that I can think
+ // of:
+ //
+ // 1) Aliased use of arguments in the same basic block,
+ // like:
+ //
+ // var a = arguments;
+ // var x = arguments[i];
+ //
+ // 2) If we're accessing arguments we got from the heap!
+
+ if (child.op() == CreateArguments
+ && node.codeOrigin.inlineCallFrame
+ != child.codeOrigin.inlineCallFrame)
+ m_createsArguments.add(child.codeOrigin.inlineCallFrame);
+
+ return;
+ }
+
+ VariableAccessData* variableAccessData = child.variableAccessData();
+ if (variableAccessData->isCaptured())
+ return;
+
+ ArgumentsAliasingData& data = m_argumentsAliasing.find(variableAccessData)->second;
+ data.mergeCallContext(node.codeOrigin.inlineCallFrame);
+ }
+
+ bool isOKToOptimize(Node& source)
+ {
+ switch (source.op()) {
+ case GetLocal: {
+ VariableAccessData* variableAccessData = source.variableAccessData();
+ if (variableAccessData->isCaptured())
+ break;
+ ArgumentsAliasingData& data =
+ m_argumentsAliasing.find(variableAccessData)->second;
+ if (!data.isValid())
+ break;
+ if (m_createsArguments.contains(source.codeOrigin.inlineCallFrame))
+ break;
+
+ return true;
+ }
+
+ case CreateArguments: {
+ if (m_createsArguments.contains(source.codeOrigin.inlineCallFrame))
+ break;
+
+ return true;
+ }
+
+ default:
+ break;
+ }
+
+ return false;
+ }
+
+ void removeArgumentsReferencingPhantomChild(Node& node, unsigned edgeIndex)
+ {
+ Edge edge = node.children.child(edgeIndex);
+ if (!edge)
+ return;
+
+ Node& child = m_graph[edge];
+ switch (child.op()) {
+ case Phi: // Arises if we had CSE on a GetLocal of the arguments register.
+ case GetLocal: // Arises if we had CSE on an arguments access to a variable aliased to the arguments.
+ case SetLocal: { // Arises if we had CSE on a GetLocal of the arguments register.
+ VariableAccessData* variableAccessData = child.variableAccessData();
+ bool isDeadArgumentsRegister =
+ variableAccessData->local() ==
+ m_graph.uncheckedArgumentsRegisterFor(child.codeOrigin)
+ && !m_createsArguments.contains(child.codeOrigin.inlineCallFrame);
+ bool isAliasedArgumentsRegister =
+ !variableAccessData->isCaptured()
+ && m_argumentsAliasing.find(variableAccessData)->second.isValid()
+ && !m_createsArguments.contains(child.codeOrigin.inlineCallFrame);
+ if (!isDeadArgumentsRegister && !isAliasedArgumentsRegister)
+ break;
+ m_graph.deref(edge);
+ node.children.removeEdgeFromBag(edgeIndex);
+ break;
+ }
+
+ case CreateArguments: { // Arises if we CSE two GetLocals to the arguments register and then CSE the second use of the GetLocal to the first.
+ if (m_createsArguments.contains(child.codeOrigin.inlineCallFrame))
+ break;
+ m_graph.deref(edge);
+ node.children.removeEdgeFromBag(edgeIndex);
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+};
+
+bool performArgumentsSimplification(Graph& graph)
+{
+ return runPhase<ArgumentsSimplificationPhase>(graph);
+}
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
+
+
diff --git a/Source/JavaScriptCore/dfg/DFGArgumentsSimplificationPhase.h b/Source/JavaScriptCore/dfg/DFGArgumentsSimplificationPhase.h
new file mode 100644
index 000000000..e8a24019e
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGArgumentsSimplificationPhase.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGArgumentsSimplificationPhase_h
+#define DFGArgumentsSimplificationPhase_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(DFG_JIT)
+
+namespace JSC { namespace DFG {
+
+class Graph;
+
+// Simplifies reflective uses of the Arguments object:
+//
+// Inlined arguments.length -> constant
+// Inlined arguments[constant] -> GetLocalUnlinked
+
+bool performArgumentsSimplification(Graph&);
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
+
+#endif // DFGArgumentsSimplificationPhase_h
+
diff --git a/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.cpp b/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.cpp
index 15f6d19a5..7799ee505 100644
--- a/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.cpp
+++ b/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.cpp
@@ -32,6 +32,14 @@ namespace JSC { namespace DFG {
const double AssemblyHelpers::twoToThe32 = (double)0x100000000ull;
+ExecutableBase* AssemblyHelpers::executableFor(const CodeOrigin& codeOrigin)
+{
+ if (!codeOrigin.inlineCallFrame)
+ return m_codeBlock->ownerExecutable();
+
+ return codeOrigin.inlineCallFrame->executable.get();
+}
+
Vector<BytecodeAndMachineOffset>& AssemblyHelpers::decodedCodeMapFor(CodeBlock* codeBlock)
{
ASSERT(codeBlock == codeBlock->baselineVersion());
diff --git a/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.h b/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.h
index e7a3132f3..9087eec57 100644
--- a/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.h
+++ b/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.h
@@ -134,16 +134,28 @@ public:
{
return Address(GPRInfo::callFrameRegister, virtualRegister * sizeof(Register));
}
+ static Address addressFor(int operand)
+ {
+ return addressFor(static_cast<VirtualRegister>(operand));
+ }
static Address tagFor(VirtualRegister virtualRegister)
{
return Address(GPRInfo::callFrameRegister, virtualRegister * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
}
+ static Address tagFor(int operand)
+ {
+ return tagFor(static_cast<VirtualRegister>(operand));
+ }
static Address payloadFor(VirtualRegister virtualRegister)
{
return Address(GPRInfo::callFrameRegister, virtualRegister * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
}
+ static Address payloadFor(int operand)
+ {
+ return payloadFor(static_cast<VirtualRegister>(operand));
+ }
Jump branchIfNotObject(GPRReg structureReg)
{
@@ -170,14 +182,21 @@ public:
// Add a debug call. This call has no effect on JIT code execution state.
void debugCall(V_DFGDebugOperation_EP function, void* argument)
{
- EncodedJSValue* buffer = static_cast<EncodedJSValue*>(m_globalData->scratchBufferForSize(sizeof(EncodedJSValue) * (GPRInfo::numberOfRegisters + FPRInfo::numberOfRegisters)));
-
+ size_t scratchSize = sizeof(EncodedJSValue) * (GPRInfo::numberOfRegisters + FPRInfo::numberOfRegisters);
+ ScratchBuffer* scratchBuffer = m_globalData->scratchBufferForSize(scratchSize);
+ EncodedJSValue* buffer = static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer());
+
for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i)
storePtr(GPRInfo::toRegister(i), buffer + i);
for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0);
storeDouble(FPRInfo::toRegister(i), GPRInfo::regT0);
}
+
+ // Tell GC mark phase how much of the scratch buffer is active during call.
+ move(TrustedImmPtr(scratchBuffer->activeLengthPtr()), GPRInfo::regT0);
+ storePtr(TrustedImmPtr(scratchSize), GPRInfo::regT0);
+
#if CPU(X86_64) || CPU(ARM_THUMB2)
move(TrustedImmPtr(argument), GPRInfo::argumentGPR1);
move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
@@ -191,6 +210,10 @@ public:
#endif
move(TrustedImmPtr(reinterpret_cast<void*>(function)), scratch);
call(scratch);
+
+ move(TrustedImmPtr(scratchBuffer->activeLengthPtr()), GPRInfo::regT0);
+ storePtr(TrustedImmPtr(0), GPRInfo::regT0);
+
for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0);
loadDouble(GPRInfo::regT0, FPRInfo::toRegister(i));
@@ -308,6 +331,8 @@ public:
return codeOrigin.inlineCallFrame->callee->jsExecutable()->isStrictMode();
}
+ ExecutableBase* executableFor(const CodeOrigin& codeOrigin);
+
CodeBlock* baselineCodeBlockFor(const CodeOrigin& codeOrigin)
{
return baselineCodeBlockForOriginAndBaselineCodeBlock(codeOrigin, baselineCodeBlock());
@@ -318,6 +343,20 @@ public:
return m_baselineCodeBlock;
}
+ int argumentsRegisterFor(InlineCallFrame* inlineCallFrame)
+ {
+ if (!inlineCallFrame)
+ return codeBlock()->argumentsRegister();
+
+ return baselineCodeBlockForInlineCallFrame(
+ inlineCallFrame)->argumentsRegister() + inlineCallFrame->stackOffset;
+ }
+
+ int argumentsRegisterFor(const CodeOrigin& codeOrigin)
+ {
+ return argumentsRegisterFor(codeOrigin.inlineCallFrame);
+ }
+
Vector<BytecodeAndMachineOffset>& decodedCodeMapFor(CodeBlock*);
static const double twoToThe32;
diff --git a/Source/JavaScriptCore/dfg/DFGBasicBlock.h b/Source/JavaScriptCore/dfg/DFGBasicBlock.h
index 92df58d09..9128f0882 100644
--- a/Source/JavaScriptCore/dfg/DFGBasicBlock.h
+++ b/Source/JavaScriptCore/dfg/DFGBasicBlock.h
@@ -44,6 +44,7 @@ struct BasicBlock : Vector<NodeIndex, 8> {
, isOSRTarget(false)
, cfaHasVisited(false)
, cfaShouldRevisit(false)
+ , cfaFoundConstants(false)
#if !ASSERT_DISABLED
, isLinked(false)
#endif
@@ -55,6 +56,10 @@ struct BasicBlock : Vector<NodeIndex, 8> {
{
}
+ ~BasicBlock()
+ {
+ }
+
void ensureLocals(unsigned newNumLocals)
{
variablesAtHead.ensureLocals(newNumLocals);
@@ -62,6 +67,33 @@ struct BasicBlock : Vector<NodeIndex, 8> {
valuesAtHead.ensureLocals(newNumLocals);
valuesAtTail.ensureLocals(newNumLocals);
}
+
+ size_t numNodes() const { return phis.size() + size(); }
+ NodeIndex nodeIndex(size_t i) const
+ {
+ if (i < phis.size())
+ return phis[i];
+ return at(i - phis.size());
+ }
+ bool isPhiIndex(size_t i) const { return i < phis.size(); }
+
+ bool isInPhis(NodeIndex nodeIndex) const
+ {
+ for (size_t i = 0; i < phis.size(); ++i) {
+ if (phis[i] == nodeIndex)
+ return true;
+ }
+ return false;
+ }
+
+ bool isInBlock(NodeIndex index) const
+ {
+ for (size_t i = 0; i < numNodes(); ++i) {
+ if (nodeIndex(i) == index)
+ return true;
+ }
+ return false;
+ }
// This value is used internally for block linking and OSR entry. It is mostly meaningless
// for other purposes due to inlining.
@@ -70,6 +102,7 @@ struct BasicBlock : Vector<NodeIndex, 8> {
bool isOSRTarget;
bool cfaHasVisited;
bool cfaShouldRevisit;
+ bool cfaFoundConstants;
#if !ASSERT_DISABLED
bool isLinked;
#endif
diff --git a/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp b/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
index cc756c61e..27e198c75 100644
--- a/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
+++ b/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
@@ -45,8 +45,9 @@ namespace JSC { namespace DFG {
// This class is used to compile the dataflow graph from a CodeBlock.
class ByteCodeParser {
public:
- ByteCodeParser(Graph& graph)
- : m_globalData(&graph.m_globalData)
+ ByteCodeParser(ExecState* exec, Graph& graph)
+ : m_exec(exec)
+ , m_globalData(&graph.m_globalData)
, m_codeBlock(graph.m_codeBlock)
, m_profiledBlock(graph.m_profiledBlock)
, m_graph(graph)
@@ -97,10 +98,6 @@ private:
void prepareToParseBlock();
// Parse a single basic block of bytecode instructions.
bool parseBlock(unsigned limit);
- // Find reachable code and setup predecessor links in the graph's BasicBlocks.
- void determineReachability();
- // Enqueue a block onto the worklist, if necessary.
- void handleSuccessor(Vector<BlockIndex, 16>& worklist, BlockIndex, BlockIndex successor);
// Link block successors.
void linkBlock(BasicBlock*, Vector<BlockIndex>& possibleTargets);
void linkBlocks(Vector<UnlinkedBlock>& unlinkedBlocks, Vector<BlockIndex>& possibleTargets);
@@ -116,11 +113,11 @@ private:
// Add spill locations to nodes.
void allocateVirtualRegisters();
- VariableAccessData* newVariableAccessData(int operand)
+ VariableAccessData* newVariableAccessData(int operand, bool isCaptured)
{
ASSERT(operand < FirstConstantRegisterIndex);
- m_graph.m_variableAccessData.append(VariableAccessData(static_cast<VirtualRegister>(operand)));
+ m_graph.m_variableAccessData.append(VariableAccessData(static_cast<VirtualRegister>(operand), isCaptured));
return &m_graph.m_variableAccessData.last();
}
@@ -181,6 +178,7 @@ private:
NodeIndex getLocal(unsigned operand)
{
NodeIndex nodeIndex = m_currentBlock->variablesAtTail.local(operand);
+ bool isCaptured = m_codeBlock->localIsCaptured(m_inlineStackTop->m_inlineCallFrame, operand);
if (nodeIndex != NoNode) {
Node* nodePtr = &m_graph[nodeIndex];
@@ -192,6 +190,7 @@ private:
Node& flushChild = m_graph[nodeIndex];
if (flushChild.op() == Phi) {
VariableAccessData* variableAccessData = flushChild.variableAccessData();
+ variableAccessData->mergeIsCaptured(isCaptured);
nodeIndex = injectLazyOperandPrediction(addToGraph(GetLocal, OpInfo(variableAccessData), nodeIndex));
m_currentBlock->variablesAtTail.local(operand) = nodeIndex;
return nodeIndex;
@@ -202,7 +201,9 @@ private:
ASSERT(&m_graph[nodeIndex] == nodePtr);
ASSERT(nodePtr->op() != Flush);
- if (m_graph.localIsCaptured(operand)) {
+ nodePtr->variableAccessData()->mergeIsCaptured(isCaptured);
+
+ if (isCaptured) {
// We wish to use the same variable access data as the previous access,
// but for all other purposes we want to issue a load since for all we
// know, at this stage of compilation, the local has been clobbered.
@@ -224,7 +225,7 @@ private:
// expand m_preservedVars to cover these.
m_preservedVars.set(operand);
- VariableAccessData* variableAccessData = newVariableAccessData(operand);
+ VariableAccessData* variableAccessData = newVariableAccessData(operand, isCaptured);
NodeIndex phi = addToGraph(Phi, OpInfo(variableAccessData));
m_localPhiStack.append(PhiStackEntry(m_currentBlock, phi, operand));
@@ -237,11 +238,13 @@ private:
}
void setLocal(unsigned operand, NodeIndex value)
{
- VariableAccessData* variableAccessData = newVariableAccessData(operand);
+ bool isCaptured = m_codeBlock->localIsCaptured(m_inlineStackTop->m_inlineCallFrame, operand);
+
+ VariableAccessData* variableAccessData = newVariableAccessData(operand, isCaptured);
NodeIndex nodeIndex = addToGraph(SetLocal, OpInfo(variableAccessData), value);
m_currentBlock->variablesAtTail.local(operand) = nodeIndex;
- bool shouldFlush = m_graph.localIsCaptured(operand);
+ bool shouldFlush = isCaptured;
if (!shouldFlush) {
// If this is in argument position, then it should be flushed.
@@ -270,6 +273,9 @@ private:
NodeIndex getArgument(unsigned operand)
{
unsigned argument = operandToArgument(operand);
+
+ bool isCaptured = m_codeBlock->argumentIsCaptured(argument);
+
ASSERT(argument < m_numArguments);
NodeIndex nodeIndex = m_currentBlock->variablesAtTail.argument(argument);
@@ -284,6 +290,7 @@ private:
Node& flushChild = m_graph[nodeIndex];
if (flushChild.op() == Phi) {
VariableAccessData* variableAccessData = flushChild.variableAccessData();
+ variableAccessData->mergeIsCaptured(isCaptured);
nodeIndex = injectLazyOperandPrediction(addToGraph(GetLocal, OpInfo(variableAccessData), nodeIndex));
m_currentBlock->variablesAtTail.local(operand) = nodeIndex;
return nodeIndex;
@@ -294,6 +301,8 @@ private:
ASSERT(&m_graph[nodeIndex] == nodePtr);
ASSERT(nodePtr->op() != Flush);
+ nodePtr->variableAccessData()->mergeIsCaptured(isCaptured);
+
if (nodePtr->op() == SetArgument) {
// We're getting an argument in the first basic block; link
// the GetLocal to the SetArgument.
@@ -303,7 +312,7 @@ private:
return nodeIndex;
}
- if (m_graph.argumentIsCaptured(argument)) {
+ if (isCaptured) {
if (nodePtr->op() == GetLocal)
nodeIndex = nodePtr->child1().index();
return injectLazyOperandPrediction(addToGraph(GetLocal, OpInfo(nodePtr->variableAccessData()), nodeIndex));
@@ -316,7 +325,7 @@ private:
return nodePtr->child1().index();
}
- VariableAccessData* variableAccessData = newVariableAccessData(operand);
+ VariableAccessData* variableAccessData = newVariableAccessData(operand, isCaptured);
NodeIndex phi = addToGraph(Phi, OpInfo(variableAccessData));
m_argumentPhiStack.append(PhiStackEntry(m_currentBlock, phi, argument));
@@ -330,9 +339,11 @@ private:
void setArgument(int operand, NodeIndex value)
{
unsigned argument = operandToArgument(operand);
+ bool isCaptured = m_codeBlock->argumentIsCaptured(argument);
+
ASSERT(argument < m_numArguments);
- VariableAccessData* variableAccessData = newVariableAccessData(operand);
+ VariableAccessData* variableAccessData = newVariableAccessData(operand, isCaptured);
InlineStackEntry* stack = m_inlineStackTop;
while (stack->m_inlineCallFrame) // find the machine stack entry.
stack = stack->m_caller;
@@ -349,6 +360,7 @@ private:
// some other local variable.
operand = m_inlineStackTop->remapOperand(operand);
+ bool isCaptured = m_codeBlock->isCaptured(m_inlineStackTop->m_inlineCallFrame, operand);
ASSERT(operand < FirstConstantRegisterIndex);
@@ -383,11 +395,12 @@ private:
// This gives us guidance to see that the variable also needs to be flushed
// for arguments, even if it already had to be flushed for other reasons.
VariableAccessData* variableAccessData = node.variableAccessData();
+ variableAccessData->mergeIsCaptured(isCaptured);
addToGraph(Flush, OpInfo(variableAccessData), nodeIndex);
return variableAccessData;
}
- VariableAccessData* variableAccessData = newVariableAccessData(operand);
+ VariableAccessData* variableAccessData = newVariableAccessData(operand, isCaptured);
NodeIndex phi = addToGraph(Phi, OpInfo(variableAccessData));
nodeIndex = addToGraph(Flush, OpInfo(variableAccessData), phi);
if (operandIsArgument(operand)) {
@@ -846,6 +859,7 @@ private:
void buildOperandMapsIfNecessary();
+ ExecState* m_exec;
JSGlobalData* m_globalData;
CodeBlock* m_codeBlock;
CodeBlock* m_profiledBlock;
@@ -984,7 +998,17 @@ private:
InlineStackEntry* m_caller;
- InlineStackEntry(ByteCodeParser*, CodeBlock*, CodeBlock* profiledBlock, BlockIndex callsiteBlockHead, VirtualRegister calleeVR, JSFunction* callee, VirtualRegister returnValueVR, VirtualRegister inlineCallFrameStart, CodeSpecializationKind);
+ InlineStackEntry(
+ ByteCodeParser*,
+ CodeBlock*,
+ CodeBlock* profiledBlock,
+ BlockIndex callsiteBlockHead,
+ VirtualRegister calleeVR,
+ JSFunction* callee,
+ VirtualRegister returnValueVR,
+ VirtualRegister inlineCallFrameStart,
+ int argumentCountIncludingThis,
+ CodeSpecializationKind);
~InlineStackEntry()
{
@@ -1052,13 +1076,29 @@ void ByteCodeParser::handleCall(Interpreter* interpreter, Instruction* currentIn
dataLog("not set.\n");
#endif
- if (m_graph.isFunctionConstant(callTarget))
+ if (m_graph.isFunctionConstant(callTarget)) {
callType = ConstantFunction;
- else if (callLinkStatus.isSet() && !callLinkStatus.couldTakeSlowPath()
- && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache))
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ dataLog("Call at [@%lu, bc#%u] has a function constant: %p, exec %p.\n",
+ m_graph.size(), m_currentIndex,
+ m_graph.valueOfFunctionConstant(callTarget),
+ m_graph.valueOfFunctionConstant(callTarget)->executable());
+#endif
+ } else if (callLinkStatus.isSet() && !callLinkStatus.couldTakeSlowPath()
+ && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)) {
callType = LinkedFunction;
- else
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ dataLog("Call at [@%lu, bc#%u] is linked to: %p, exec %p.\n",
+ m_graph.size(), m_currentIndex, callLinkStatus.callTarget(),
+ callLinkStatus.callTarget()->executable());
+#endif
+ } else {
callType = UnknownFunction;
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ dataLog("Call at [@%lu, bc#%u] is has an unknown or ambiguous target.\n",
+ m_graph.size(), m_currentIndex);
+#endif
+ }
if (callType != UnknownFunction) {
int argumentCountIncludingThis = currentInstruction[2].u.operand;
int registerOffset = currentInstruction[3].u.operand;
@@ -1131,9 +1171,10 @@ bool ByteCodeParser::handleInlining(bool usesResult, int callTarget, NodeIndex c
FunctionExecutable* executable = expectedFunction->jsExecutable();
- // Does the number of arguments we're passing match the arity of the target? We could
- // inline arity check failures, but for simplicity we currently don't.
- if (static_cast<int>(executable->parameterCount()) + 1 != argumentCountIncludingThis)
+ // Does the number of arguments we're passing match the arity of the target? We currently
+ // inline only if the number of arguments passed is greater than or equal to the number
+ // arguments expected.
+ if (static_cast<int>(executable->parameterCount()) + 1 > argumentCountIncludingThis)
return false;
// Have we exceeded inline stack depth, or are we trying to inline a recursive call?
@@ -1178,8 +1219,15 @@ bool ByteCodeParser::handleInlining(bool usesResult, int callTarget, NodeIndex c
// FIXME: Don't flush constants!
Vector<VariableAccessData*, 8> arguments;
- for (int i = 1; i < argumentCountIncludingThis; ++i)
- arguments.append(flushArgument(registerOffset + argumentToOperand(i)));
+ for (int i = 1; i < argumentCountIncludingThis; ++i) {
+ VariableAccessData* variableAccessData =
+ flushArgument(registerOffset + argumentToOperand(i));
+ arguments.append(variableAccessData);
+
+ // Are we going to be capturing arguments? If so make sure we record this fact.
+ if (codeBlock->argumentIsCaptured(i))
+ variableAccessData->mergeIsCaptured(true);
+ }
int inlineCallFrameStart = m_inlineStackTop->remapOperand(registerOffset) - RegisterFile::CallFrameHeaderSize;
@@ -1195,11 +1243,19 @@ bool ByteCodeParser::handleInlining(bool usesResult, int callTarget, NodeIndex c
m_graph.m_blocks[i]->ensureLocals(newNumLocals);
}
- InlineStackEntry inlineStackEntry(this, codeBlock, profiledBlock, m_graph.m_blocks.size() - 1, (VirtualRegister)m_inlineStackTop->remapOperand(callTarget), expectedFunction, (VirtualRegister)m_inlineStackTop->remapOperand(usesResult ? resultOperand : InvalidVirtualRegister), (VirtualRegister)inlineCallFrameStart, kind);
+ InlineStackEntry inlineStackEntry(
+ this, codeBlock, profiledBlock, m_graph.m_blocks.size() - 1,
+ (VirtualRegister)m_inlineStackTop->remapOperand(callTarget), expectedFunction,
+ (VirtualRegister)m_inlineStackTop->remapOperand(
+ usesResult ? resultOperand : InvalidVirtualRegister),
+ (VirtualRegister)inlineCallFrameStart, argumentCountIncludingThis, kind);
// Link up the argument variable access datas to their argument positions.
- for (int i = 1; i < argumentCountIncludingThis; ++i)
+ for (int i = 1; i < argumentCountIncludingThis; ++i) {
+ if (static_cast<size_t>(i) >= inlineStackEntry.m_argumentPositions.size())
+ break;
inlineStackEntry.m_argumentPositions[i]->addVariable(arguments[i - 1]);
+ }
// This is where the actual inlining really happens.
unsigned oldIndex = m_currentIndex;
@@ -1482,7 +1538,7 @@ bool ByteCodeParser::parseBlock(unsigned limit)
if (m_currentBlock == m_graph.m_blocks[0].get() && !m_inlineStackTop->m_inlineCallFrame) {
m_graph.m_arguments.resize(m_numArguments);
for (unsigned argument = 0; argument < m_numArguments; ++argument) {
- NodeIndex setArgument = addToGraph(SetArgument, OpInfo(newVariableAccessData(argumentToOperand(argument))));
+ NodeIndex setArgument = addToGraph(SetArgument, OpInfo(newVariableAccessData(argumentToOperand(argument), m_codeBlock->argumentIsCaptured(argument))));
m_graph.m_arguments[argument] = setArgument;
m_currentBlock->variablesAtHead.setArgumentFirstTime(argument, setArgument);
m_currentBlock->variablesAtTail.setArgumentFirstTime(argument, setArgument);
@@ -2014,10 +2070,20 @@ bool ByteCodeParser::parseBlock(unsigned limit)
if (!hasExitSite && putByIdStatus.isSimpleReplace()) {
addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(putByIdStatus.oldStructure())), base);
- addToGraph(PutByOffset, OpInfo(m_graph.m_storageAccessData.size()), base, addToGraph(GetPropertyStorage, base), value);
+ size_t offsetOffset;
+ NodeIndex propertyStorage;
+ if (putByIdStatus.oldStructure()->isUsingInlineStorage()) {
+ propertyStorage = base;
+ ASSERT(!(sizeof(JSObject) % sizeof(EncodedJSValue)));
+ offsetOffset = sizeof(JSObject) / sizeof(EncodedJSValue);
+ } else {
+ propertyStorage = addToGraph(GetPropertyStorage, base);
+ offsetOffset = 0;
+ }
+ addToGraph(PutByOffset, OpInfo(m_graph.m_storageAccessData.size()), propertyStorage, base, value);
StorageAccessData storageAccessData;
- storageAccessData.offset = putByIdStatus.offset();
+ storageAccessData.offset = putByIdStatus.offset() + offsetOffset;
storageAccessData.identifierNumber = identifierNumber;
m_graph.m_storageAccessData.append(storageAccessData);
} else if (!hasExitSite
@@ -2056,15 +2122,25 @@ bool ByteCodeParser::parseBlock(unsigned limit)
putByIdStatus.newStructure()))),
base);
+ size_t offsetOffset;
+ NodeIndex propertyStorage;
+ if (putByIdStatus.newStructure()->isUsingInlineStorage()) {
+ propertyStorage = base;
+ ASSERT(!(sizeof(JSObject) % sizeof(EncodedJSValue)));
+ offsetOffset = sizeof(JSObject) / sizeof(EncodedJSValue);
+ } else {
+ propertyStorage = addToGraph(GetPropertyStorage, base);
+ offsetOffset = 0;
+ }
addToGraph(
PutByOffset,
OpInfo(m_graph.m_storageAccessData.size()),
+ propertyStorage,
base,
- addToGraph(GetPropertyStorage, base),
value);
StorageAccessData storageAccessData;
- storageAccessData.offset = putByIdStatus.offset();
+ storageAccessData.offset = putByIdStatus.offset() + offsetOffset;
storageAccessData.identifierNumber = identifierNumber;
m_graph.m_storageAccessData.append(storageAccessData);
} else {
@@ -2305,8 +2381,52 @@ bool ByteCodeParser::parseBlock(unsigned limit)
handleCall(interpreter, currentInstruction, Construct, CodeForConstruct);
NEXT_OPCODE(op_construct);
+ case op_call_varargs: {
+ ASSERT(m_inlineStackTop->m_inlineCallFrame);
+ ASSERT(currentInstruction[3].u.operand == m_inlineStackTop->m_codeBlock->argumentsRegister());
+ // It would be cool to funnel this into handleCall() so that it can handle
+ // inlining. But currently that won't be profitable anyway, since none of the
+ // uses of call_varargs will be inlineable. So we set this up manually and
+ // without inline/intrinsic detection.
+
+ Instruction* putInstruction = currentInstruction + OPCODE_LENGTH(op_call_varargs);
+
+ PredictedType prediction = PredictNone;
+ if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result) {
+ m_currentProfilingIndex = m_currentIndex + OPCODE_LENGTH(op_call_varargs);
+ prediction = getPrediction();
+ }
+
+ addToGraph(CheckArgumentsNotCreated);
+
+ unsigned argCount = m_inlineStackTop->m_inlineCallFrame->arguments.size();
+ if (RegisterFile::CallFrameHeaderSize + argCount > m_parameterSlots)
+ m_parameterSlots = RegisterFile::CallFrameHeaderSize + argCount;
+
+ addVarArgChild(get(currentInstruction[1].u.operand)); // callee
+ addVarArgChild(get(currentInstruction[2].u.operand)); // this
+ for (unsigned argument = 1; argument < argCount; ++argument)
+ addVarArgChild(get(argumentToOperand(argument)));
+
+ NodeIndex call = addToGraph(Node::VarArg, Call, OpInfo(0), OpInfo(prediction));
+ if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result)
+ set(putInstruction[1].u.operand, call);
+
+ NEXT_OPCODE(op_call_varargs);
+ }
+
case op_call_put_result:
NEXT_OPCODE(op_call_put_result);
+
+ case op_jneq_ptr:
+ // Statically speculate for now. It makes sense to let speculate-only jneq_ptr
+ // support simmer for a while before making it more general, since it's
+ // already gnarly enough as it is.
+ addToGraph(
+ CheckFunction, OpInfo(currentInstruction[2].u.jsCell.get()),
+ get(currentInstruction[1].u.operand));
+ addToGraph(Jump, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jneq_ptr)));
+ LAST_OPCODE(op_jneq_ptr);
case op_resolve: {
PredictedType prediction = getPrediction();
@@ -2372,12 +2492,40 @@ bool ByteCodeParser::parseBlock(unsigned limit)
NEXT_OPCODE(op_create_activation);
}
+ case op_create_arguments: {
+ m_graph.m_hasArguments = true;
+ NodeIndex createArguments = addToGraph(CreateArguments, get(currentInstruction[1].u.operand));
+ set(currentInstruction[1].u.operand, createArguments);
+ set(unmodifiedArgumentsRegister(currentInstruction[1].u.operand), createArguments);
+ NEXT_OPCODE(op_create_arguments);
+ }
+
case op_tear_off_activation: {
- // This currently ignores arguments because we don't support them yet.
- addToGraph(TearOffActivation, get(currentInstruction[1].u.operand));
+ addToGraph(TearOffActivation, OpInfo(unmodifiedArgumentsRegister(currentInstruction[2].u.operand)), get(currentInstruction[1].u.operand), get(currentInstruction[2].u.operand));
NEXT_OPCODE(op_tear_off_activation);
}
+ case op_tear_off_arguments: {
+ m_graph.m_hasArguments = true;
+ addToGraph(TearOffArguments, get(unmodifiedArgumentsRegister(currentInstruction[1].u.operand)));
+ NEXT_OPCODE(op_tear_off_arguments);
+ }
+
+ case op_get_arguments_length: {
+ m_graph.m_hasArguments = true;
+ set(currentInstruction[1].u.operand, addToGraph(GetMyArgumentsLengthSafe));
+ NEXT_OPCODE(op_get_arguments_length);
+ }
+
+ case op_get_argument_by_val: {
+ m_graph.m_hasArguments = true;
+ set(currentInstruction[1].u.operand,
+ addToGraph(
+ GetMyArgumentByValSafe, OpInfo(0), OpInfo(getPrediction()),
+ get(currentInstruction[3].u.operand)));
+ NEXT_OPCODE(op_get_argument_by_val);
+ }
+
case op_new_func: {
if (!currentInstruction[3].u.operand) {
set(currentInstruction[1].u.operand,
@@ -2404,8 +2552,6 @@ bool ByteCodeParser::parseBlock(unsigned limit)
ASSERT_NOT_REACHED();
return false;
}
-
- ASSERT(canCompileOpcode(opcodeID));
}
}
@@ -2413,11 +2559,17 @@ template<ByteCodeParser::PhiStackType stackType>
void ByteCodeParser::processPhiStack()
{
Vector<PhiStackEntry, 16>& phiStack = (stackType == ArgumentPhiStack) ? m_argumentPhiStack : m_localPhiStack;
-
+
while (!phiStack.isEmpty()) {
PhiStackEntry entry = phiStack.last();
phiStack.removeLast();
+ if (!entry.m_block->isReachable)
+ continue;
+
+ if (!entry.m_block->isReachable)
+ continue;
+
PredecessorList& predecessors = entry.m_block->m_predecessors;
unsigned varNo = entry.m_varNo;
VariableAccessData* dataForPhi = m_graph[entry.m_phi].variableAccessData();
@@ -2425,7 +2577,7 @@ void ByteCodeParser::processPhiStack()
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
dataLog(" Handling phi entry for var %u, phi @%u.\n", entry.m_varNo, entry.m_phi);
#endif
-
+
for (size_t i = 0; i < predecessors.size(); ++i) {
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
dataLog(" Dealing with predecessor block %u.\n", predecessors[i]);
@@ -2441,7 +2593,7 @@ void ByteCodeParser::processPhiStack()
dataLog(" Did not find node, adding phi.\n");
#endif
- valueInPredecessor = insertPhiNode(OpInfo(newVariableAccessData(stackType == ArgumentPhiStack ? argumentToOperand(varNo) : static_cast<int>(varNo))), predecessorBlock);
+ valueInPredecessor = insertPhiNode(OpInfo(newVariableAccessData(stackType == ArgumentPhiStack ? argumentToOperand(varNo) : static_cast<int>(varNo), false)), predecessorBlock);
var = valueInPredecessor;
if (stackType == ArgumentPhiStack)
predecessorBlock->variablesAtHead.setArgumentFirstTime(varNo, valueInPredecessor);
@@ -2558,6 +2710,7 @@ void ByteCodeParser::fixVariableAccessPredictions()
for (unsigned i = 0; i < m_graph.m_variableAccessData.size(); ++i) {
VariableAccessData* data = &m_graph.m_variableAccessData[i];
data->find()->predict(data->nonUnifiedPrediction());
+ data->find()->mergeIsCaptured(data->isCaptured());
}
}
@@ -2606,41 +2759,6 @@ void ByteCodeParser::linkBlocks(Vector<UnlinkedBlock>& unlinkedBlocks, Vector<Bl
}
}
-void ByteCodeParser::handleSuccessor(Vector<BlockIndex, 16>& worklist, BlockIndex blockIndex, BlockIndex successorIndex)
-{
- BasicBlock* successor = m_graph.m_blocks[successorIndex].get();
- if (!successor->isReachable) {
- successor->isReachable = true;
- worklist.append(successorIndex);
- }
-
- successor->m_predecessors.append(blockIndex);
-}
-
-void ByteCodeParser::determineReachability()
-{
- Vector<BlockIndex, 16> worklist;
- worklist.append(0);
- m_graph.m_blocks[0]->isReachable = true;
- while (!worklist.isEmpty()) {
- BlockIndex index = worklist.last();
- worklist.removeLast();
-
- BasicBlock* block = m_graph.m_blocks[index].get();
- ASSERT(block->isLinked);
-
- Node& node = m_graph[block->last()];
- ASSERT(node.isTerminal());
-
- if (node.isJump())
- handleSuccessor(worklist, index, node.takenBlockIndex());
- else if (node.isBranch()) {
- handleSuccessor(worklist, index, node.takenBlockIndex());
- handleSuccessor(worklist, index, node.notTakenBlockIndex());
- }
- }
-}
-
void ByteCodeParser::buildOperandMapsIfNecessary()
{
if (m_haveBuiltOperandMaps)
@@ -2659,7 +2777,17 @@ void ByteCodeParser::buildOperandMapsIfNecessary()
m_haveBuiltOperandMaps = true;
}
-ByteCodeParser::InlineStackEntry::InlineStackEntry(ByteCodeParser* byteCodeParser, CodeBlock* codeBlock, CodeBlock* profiledBlock, BlockIndex callsiteBlockHead, VirtualRegister calleeVR, JSFunction* callee, VirtualRegister returnValueVR, VirtualRegister inlineCallFrameStart, CodeSpecializationKind kind)
+ByteCodeParser::InlineStackEntry::InlineStackEntry(
+ ByteCodeParser* byteCodeParser,
+ CodeBlock* codeBlock,
+ CodeBlock* profiledBlock,
+ BlockIndex callsiteBlockHead,
+ VirtualRegister calleeVR,
+ JSFunction* callee,
+ VirtualRegister returnValueVR,
+ VirtualRegister inlineCallFrameStart,
+ int argumentCountIncludingThis,
+ CodeSpecializationKind kind)
: m_byteCodeParser(byteCodeParser)
, m_codeBlock(codeBlock)
, m_profiledBlock(profiledBlock)
@@ -2678,6 +2806,12 @@ ByteCodeParser::InlineStackEntry::InlineStackEntry(ByteCodeParser* byteCodeParse
ArgumentPosition* argumentPosition = &byteCodeParser->m_graph.m_argumentPositions.last();
m_argumentPositions[i] = argumentPosition;
}
+
+ // Track the code-block-global exit sites.
+ if (m_exitProfile.hasExitSite(ArgumentsEscaped)) {
+ byteCodeParser->m_graph.m_executablesWhoseArgumentsEscaped.add(
+ codeBlock->ownerExecutable());
+ }
if (m_caller) {
// Inline case.
@@ -2692,8 +2826,29 @@ ByteCodeParser::InlineStackEntry::InlineStackEntry(ByteCodeParser* byteCodeParse
inlineCallFrame.stackOffset = inlineCallFrameStart + RegisterFile::CallFrameHeaderSize;
inlineCallFrame.callee.set(*byteCodeParser->m_globalData, byteCodeParser->m_codeBlock->ownerExecutable(), callee);
inlineCallFrame.caller = byteCodeParser->currentCodeOrigin();
- inlineCallFrame.arguments.resize(codeBlock->numParameters()); // Set the number of arguments including this, but don't configure the value recoveries, yet.
+ inlineCallFrame.arguments.resize(argumentCountIncludingThis); // Set the number of arguments including this, but don't configure the value recoveries, yet.
inlineCallFrame.isCall = isCall(kind);
+
+ if (inlineCallFrame.caller.inlineCallFrame)
+ inlineCallFrame.capturedVars = inlineCallFrame.caller.inlineCallFrame->capturedVars;
+ else {
+ for (int i = byteCodeParser->m_codeBlock->m_numCapturedVars; i--;)
+ inlineCallFrame.capturedVars.set(i);
+ }
+
+ if (codeBlock->usesArguments() || codeBlock->needsActivation()) {
+ for (int i = argumentCountIncludingThis; i--;)
+ inlineCallFrame.capturedVars.set(argumentToOperand(i) + inlineCallFrame.stackOffset);
+ }
+ for (int i = codeBlock->m_numCapturedVars; i--;)
+ inlineCallFrame.capturedVars.set(i + inlineCallFrame.stackOffset);
+
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ dataLog("Current captured variables: ");
+ inlineCallFrame.capturedVars.dump(WTF::dataFile());
+ dataLog("\n");
+#endif
+
byteCodeParser->m_codeBlock->inlineCallFrames().append(inlineCallFrame);
m_inlineCallFrame = &byteCodeParser->m_codeBlock->inlineCallFrames().last();
@@ -2769,6 +2924,7 @@ void ByteCodeParser::parseCodeBlock()
codeBlock->needsFullScopeChain()?"true":"false",
codeBlock->ownerExecutable()->needsActivation()?"true":"false",
codeBlock->ownerExecutable()->isStrictMode()?"true":"false");
+ codeBlock->baselineVersion()->dump(m_exec);
#endif
for (unsigned jumpTargetIndex = 0; jumpTargetIndex <= codeBlock->numberOfJumpTargets(); ++jumpTargetIndex) {
@@ -2847,12 +3003,15 @@ bool ByteCodeParser::parse()
ASSERT(m_graph.needsActivation());
#endif
- InlineStackEntry inlineStackEntry(this, m_codeBlock, m_profiledBlock, NoBlock, InvalidVirtualRegister, 0, InvalidVirtualRegister, InvalidVirtualRegister, CodeForCall);
+ InlineStackEntry inlineStackEntry(
+ this, m_codeBlock, m_profiledBlock, NoBlock, InvalidVirtualRegister, 0,
+ InvalidVirtualRegister, InvalidVirtualRegister, m_codeBlock->numParameters(),
+ CodeForCall);
parseCodeBlock();
linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
- determineReachability();
+ m_graph.determineReachability();
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
dataLog("Processing local variable phis.\n");
#endif
@@ -2864,6 +3023,13 @@ bool ByteCodeParser::parse()
dataLog("Processing argument phis.\n");
#endif
processPhiStack<ArgumentPhiStack>();
+
+ for (BlockIndex blockIndex = 0; blockIndex < m_graph.m_blocks.size(); ++blockIndex) {
+ BasicBlock* block = m_graph.m_blocks[blockIndex].get();
+ ASSERT(block);
+ if (!block->isReachable)
+ m_graph.m_blocks[blockIndex].clear();
+ }
fixVariableAccessPredictions();
@@ -2874,13 +3040,14 @@ bool ByteCodeParser::parse()
return true;
}
-bool parse(Graph& graph)
+bool parse(ExecState* exec, Graph& graph)
{
#if DFG_DEBUG_LOCAL_DISBALE
+ UNUSED_PARAM(exec);
UNUSED_PARAM(graph);
return false;
#else
- return ByteCodeParser(graph).parse();
+ return ByteCodeParser(exec, graph).parse();
#endif
}
diff --git a/Source/JavaScriptCore/dfg/DFGByteCodeParser.h b/Source/JavaScriptCore/dfg/DFGByteCodeParser.h
index 558cf0167..f1648acf8 100644
--- a/Source/JavaScriptCore/dfg/DFGByteCodeParser.h
+++ b/Source/JavaScriptCore/dfg/DFGByteCodeParser.h
@@ -39,7 +39,7 @@ namespace DFG {
// Populate the Graph with a basic block of code from the CodeBlock,
// starting at the provided bytecode index.
-bool parse(Graph&);
+bool parse(ExecState*, Graph&);
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGCCallHelpers.h b/Source/JavaScriptCore/dfg/DFGCCallHelpers.h
index aa08da128..dc3af636e 100644
--- a/Source/JavaScriptCore/dfg/DFGCCallHelpers.h
+++ b/Source/JavaScriptCore/dfg/DFGCCallHelpers.h
@@ -115,6 +115,13 @@ public:
addCallArgument(arg1);
}
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ }
+
ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2)
{
resetCallArguments();
@@ -131,6 +138,22 @@ public:
addCallArgument(arg2);
}
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ }
+
ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImm32 arg2)
{
resetCallArguments();
@@ -419,6 +442,12 @@ public:
move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
}
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1)
+ {
+ move(arg1, GPRInfo::argumentGPR1);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2)
{
setupStubArguments(arg1, arg2);
@@ -432,6 +461,13 @@ public:
move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
}
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2)
+ {
+ move(arg1, GPRInfo::argumentGPR1);
+ move(arg2, GPRInfo::argumentGPR2);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, ImmPtr arg2)
{
move(arg1, GPRInfo::argumentGPR1);
@@ -446,6 +482,13 @@ public:
move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
}
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2)
+ {
+ move(arg2, GPRInfo::argumentGPR2); // Move this first, so setting arg1 does not trample!
+ move(arg1, GPRInfo::argumentGPR1);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
ALWAYS_INLINE void setupArgumentsWithExecState(ImmPtr arg1, GPRReg arg2)
{
move(arg2, GPRInfo::argumentGPR2); // Move this first, so setting arg1 does not trample!
diff --git a/Source/JavaScriptCore/dfg/DFGCFAPhase.cpp b/Source/JavaScriptCore/dfg/DFGCFAPhase.cpp
index 6e69c1094..c6042448a 100644
--- a/Source/JavaScriptCore/dfg/DFGCFAPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGCFAPhase.cpp
@@ -42,7 +42,7 @@ public:
{
}
- void run()
+ bool run()
{
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
m_count = 0;
@@ -65,12 +65,16 @@ public:
m_changed = false;
performForwardCFA();
} while (m_changed);
+
+ return true;
}
private:
void performBlockCFA(BlockIndex blockIndex)
{
BasicBlock* block = m_graph.m_blocks[blockIndex].get();
+ if (!block)
+ return;
if (!block->cfaShouldRevisit)
return;
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
@@ -126,9 +130,9 @@ private:
#endif
};
-void performCFA(Graph& graph)
+bool performCFA(Graph& graph)
{
- runPhase<CFAPhase>(graph);
+ return runPhase<CFAPhase>(graph);
}
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGCFAPhase.h b/Source/JavaScriptCore/dfg/DFGCFAPhase.h
index 2b626c81f..cc9e6c4b4 100644
--- a/Source/JavaScriptCore/dfg/DFGCFAPhase.h
+++ b/Source/JavaScriptCore/dfg/DFGCFAPhase.h
@@ -39,7 +39,7 @@ class Graph;
// the code block. It's also responsible for identifying dead code, and in the
// future should be used as a hook for constant propagation.
-void performCFA(Graph&);
+bool performCFA(Graph&);
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGCFGSimplificationPhase.cpp b/Source/JavaScriptCore/dfg/DFGCFGSimplificationPhase.cpp
new file mode 100644
index 000000000..0f0a22562
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGCFGSimplificationPhase.cpp
@@ -0,0 +1,730 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "DFGCFGSimplificationPhase.h"
+
+#if ENABLE(DFG_JIT)
+
+#include "DFGAbstractState.h"
+#include "DFGBasicBlock.h"
+#include "DFGGraph.h"
+#include "DFGInsertionSet.h"
+#include "DFGPhase.h"
+#include "DFGValidate.h"
+
+namespace JSC { namespace DFG {
+
+class CFGSimplificationPhase : public Phase {
+public:
+ CFGSimplificationPhase(Graph& graph)
+ : Phase(graph, "CFG simplification")
+ {
+ }
+
+ bool run()
+ {
+ const bool extremeLogging = false;
+
+ bool outerChanged = false;
+ bool innerChanged;
+
+ do {
+ innerChanged = false;
+ for (BlockIndex blockIndex = 0; blockIndex < m_graph.m_blocks.size(); ++blockIndex) {
+ BasicBlock* block = m_graph.m_blocks[blockIndex].get();
+ if (!block)
+ continue;
+ ASSERT(block->isReachable);
+
+ switch (m_graph[block->last()].op()) {
+ case Jump: {
+ // Successor with one predecessor -> merge.
+ if (m_graph.m_blocks[m_graph.successor(block, 0)]->m_predecessors.size() == 1) {
+ ASSERT(m_graph.m_blocks[m_graph.successor(block, 0)]->m_predecessors[0]
+ == blockIndex);
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ dataLog("CFGSimplify: Jump merge on Block #%u to Block #%u.\n",
+ blockIndex, m_graph.successor(block, 0));
+#endif
+ if (extremeLogging)
+ m_graph.dump();
+ mergeBlocks(blockIndex, m_graph.successor(block, 0), NoBlock);
+ innerChanged = outerChanged = true;
+ break;
+ } else {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ dataLog("Not jump merging on Block #%u to Block #%u because predecessors = ",
+ blockIndex, m_graph.successor(block, 0));
+ for (unsigned i = 0; i < m_graph.m_blocks[m_graph.successor(block, 0)]->m_predecessors.size(); ++i) {
+ if (i)
+ dataLog(", ");
+ dataLog("#%u", m_graph.m_blocks[m_graph.successor(block, 0)]->m_predecessors[i]);
+ }
+ dataLog(".\n");
+#endif
+ }
+
+ // FIXME: Block only has a jump -> remove. This is tricky though because of
+ // liveness. What we really want is to slam in a phantom at the end of the
+ // block, after the terminal. But we can't right now. :-(
+ // Idea: what if I slam the ghosties into my successor? Nope, that's
+ // suboptimal, because if my successor has multiple predecessors then we'll
+ // be keeping alive things on other predecessor edges unnecessarily.
+ // What we really need is the notion of end-of-block ghosties!
+ break;
+ }
+
+ case Branch: {
+ // Branch on constant -> jettison the not-taken block and merge.
+ if (m_graph[m_graph[block->last()].child1()].hasConstant()) {
+ bool condition =
+ m_graph.valueOfJSConstant(m_graph[block->last()].child1().index()).toBoolean();
+ BasicBlock* targetBlock = m_graph.m_blocks[
+ m_graph.successorForCondition(block, condition)].get();
+ if (targetBlock->m_predecessors.size() == 1) {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ dataLog("CFGSimplify: Known condition (%s) branch merge on Block #%u to Block #%u, jettisoning Block #%u.\n",
+ condition ? "true" : "false",
+ blockIndex, m_graph.successorForCondition(block, condition),
+ m_graph.successorForCondition(block, !condition));
+#endif
+ if (extremeLogging)
+ m_graph.dump();
+ mergeBlocks(
+ blockIndex,
+ m_graph.successorForCondition(block, condition),
+ m_graph.successorForCondition(block, !condition));
+ } else {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ dataLog("CFGSimplify: Known condition (%s) branch->jump conversion on Block #%u to Block #%u, jettisoning Block #%u.\n",
+ condition ? "true" : "false",
+ blockIndex, m_graph.successorForCondition(block, condition),
+ m_graph.successorForCondition(block, !condition));
+#endif
+ if (extremeLogging)
+ m_graph.dump();
+ BlockIndex takenBlockIndex = m_graph.successorForCondition(block, condition);
+ BlockIndex notTakenBlockIndex = m_graph.successorForCondition(block, !condition);
+
+ ASSERT(m_graph[block->last()].isTerminal());
+ CodeOrigin boundaryCodeOrigin = m_graph[block->last()].codeOrigin;
+ m_graph[block->last()].setOpAndDefaultFlags(Phantom);
+ ASSERT(m_graph[block->last()].refCount() == 1);
+
+ jettisonBlock(blockIndex, notTakenBlockIndex, boundaryCodeOrigin);
+
+ NodeIndex jumpNodeIndex = m_graph.size();
+ Node jump(Jump, boundaryCodeOrigin, OpInfo(takenBlockIndex));
+ jump.ref();
+ m_graph.append(jump);
+ block->append(jumpNodeIndex);
+ }
+ innerChanged = outerChanged = true;
+ break;
+ }
+
+ if (m_graph.successor(block, 0) == m_graph.successor(block, 1)) {
+ BlockIndex targetBlockIndex = m_graph.successor(block, 0);
+ BasicBlock* targetBlock = m_graph.m_blocks[targetBlockIndex].get();
+ ASSERT(targetBlock);
+ ASSERT(targetBlock->isReachable);
+ if (targetBlock->m_predecessors.size() == 1) {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ dataLog("CFGSimplify: Branch to same successor merge on Block #%u to Block #%u.\n",
+ blockIndex, targetBlockIndex);
+#endif
+ mergeBlocks(blockIndex, targetBlockIndex, NoBlock);
+ } else {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ dataLog("CFGSimplify: Branch->jump conversion to same successor on Block #%u to Block #%u.\n",
+ blockIndex, targetBlockIndex);
+#endif
+ ASSERT(m_graph[block->last()].isTerminal());
+ Node& branch = m_graph[block->last()];
+ ASSERT(branch.isTerminal());
+ ASSERT(branch.op() == Branch);
+ branch.setOpAndDefaultFlags(Phantom);
+ ASSERT(branch.refCount() == 1);
+
+ Node jump(Jump, branch.codeOrigin, OpInfo(targetBlockIndex));
+ jump.ref();
+ NodeIndex jumpNodeIndex = m_graph.size();
+ m_graph.append(jump);
+ block->append(jumpNodeIndex);
+ }
+ innerChanged = outerChanged = true;
+ break;
+ }
+
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ dataLog("Not branch simplifying on Block #%u because the successors differ and the condition is not known.\n",
+ blockIndex);
+#endif
+
+ // Branch to same destination -> jump.
+ // FIXME: this will currently not be hit because of the lack of jump-only
+ // block simplification.
+
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+
+ if (innerChanged) {
+ // Here's the reason for this pass:
+ // Blocks: A, B, C, D, E, F
+ // A -> B, C
+ // B -> F
+ // C -> D, E
+ // D -> F
+ // E -> F
+ //
+ // Assume that A's branch is determined to go to B. Then the rest of this phase
+ // is smart enough to simplify down to:
+ // A -> B
+ // B -> F
+ // C -> D, E
+ // D -> F
+ // E -> F
+ //
+ // We will also merge A and B. But then we don't have any other mechanism to
+ // remove D, E as predecessors for F. Worse, the rest of this phase does not
+ // know how to fix the Phi functions of F to ensure that they no longer refer
+ // to variables in D, E. In general, we need a way to handle Phi simplification
+ // upon:
+ // 1) Removal of a predecessor due to branch simplification. The branch
+ // simplifier already does that.
+ // 2) Invalidation of a predecessor because said predecessor was rendered
+ // unreachable. We do this here.
+ //
+ // This implies that when a block is unreachable, we must inspect its
+ // successors' Phi functions to remove any references from them into the
+ // removed block.
+
+ m_graph.resetReachability();
+
+ for (BlockIndex blockIndex = 0; blockIndex < m_graph.m_blocks.size(); ++blockIndex) {
+ BasicBlock* block = m_graph.m_blocks[blockIndex].get();
+ if (!block)
+ continue;
+ if (block->isReachable)
+ continue;
+
+ killUnreachable(blockIndex);
+ }
+ }
+
+ validate(m_graph);
+ } while (innerChanged);
+
+ return outerChanged;
+ }
+
+private:
+ void killUnreachable(BlockIndex blockIndex)
+ {
+ BasicBlock* block = m_graph.m_blocks[blockIndex].get();
+
+ ASSERT(block);
+ ASSERT(!block->isReachable);
+
+ // 1) Remove references from other blocks to this block.
+ for (unsigned i = m_graph.numSuccessors(block); i--;)
+ fixPhis(blockIndex, m_graph.successor(block, i));
+
+ // 2) Kill the block
+ m_graph.m_blocks[blockIndex].clear();
+ }
+
+ void keepOperandAlive(BasicBlock* block, CodeOrigin codeOrigin, int operand)
+ {
+ NodeIndex nodeIndex = block->variablesAtTail.operand(operand);
+ if (nodeIndex == NoNode)
+ return;
+ if (m_graph[nodeIndex].variableAccessData()->isCaptured())
+ return;
+ if (m_graph[nodeIndex].op() == SetLocal)
+ nodeIndex = m_graph[nodeIndex].child1().index();
+ Node& node = m_graph[nodeIndex];
+ if (!node.shouldGenerate())
+ return;
+ ASSERT(m_graph[nodeIndex].op() != SetLocal);
+ NodeIndex phantomNodeIndex = m_graph.size();
+ Node phantom(Phantom, codeOrigin, nodeIndex);
+ m_graph.append(phantom);
+ m_graph.ref(phantomNodeIndex);
+ block->append(phantomNodeIndex);
+ }
+
+ void fixPossibleGetLocal(BasicBlock* block, Edge& edge, bool changeRef)
+ {
+ Node& child = m_graph[edge];
+ if (child.op() != GetLocal)
+ return;
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ dataLog(" Considering GetLocal at @%u.\n", edge.index());
+#endif
+ if (child.variableAccessData()->isCaptured()) {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ dataLog(" It's captured.\n");
+#endif
+ return;
+ }
+ NodeIndex originalNodeIndex = block->variablesAtTail.operand(child.local());
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ dataLog(" Dealing with original @%u.\n", originalNodeIndex);
+#endif
+ ASSERT(originalNodeIndex != NoNode);
+ Node* originalNode = &m_graph[originalNodeIndex];
+ if (changeRef)
+ ASSERT(originalNode->shouldGenerate());
+ // Possibilities:
+ // SetLocal -> the secondBlock is getting the value of something that is immediately
+ // available in the first block with a known NodeIndex.
+ // GetLocal -> the secondBlock is getting the value of something that the first
+ // block also gets.
+ // Phi -> the secondBlock is asking for keep-alive on an operand that the first block
+ // was also asking for keep-alive on.
+ // SetArgument -> the secondBlock is asking for keep-alive on an operand that the
+ // first block was keeping alive by virtue of the firstBlock being the root and
+ // the operand being an argument.
+ // Flush -> the secondBlock is asking for keep-alive on an operand that the first
+ // block was forcing to be alive, so the second block should refer child of
+ // the flush.
+ if (originalNode->op() == Flush) {
+ originalNodeIndex = originalNode->child1().index();
+ originalNode = &m_graph[originalNodeIndex];
+ }
+ switch (originalNode->op()) {
+ case SetLocal: {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ dataLog(" It's a SetLocal.\n");
+#endif
+ m_graph.changeIndex(edge, originalNode->child1().index(), changeRef);
+ break;
+ }
+ case GetLocal: {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ dataLog(" It's a GetLocal.\n");
+#endif
+ m_graph.changeIndex(edge, originalNodeIndex, changeRef);
+ break;
+ }
+ case Phi:
+ case SetArgument: {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ dataLog(" It's Phi/SetArgument.\n");
+#endif
+ // Keep the GetLocal!
+ break;
+ }
+ default:
+ ASSERT_NOT_REACHED();
+ break;
+ }
+ }
+
+ void jettisonBlock(BlockIndex blockIndex, BlockIndex jettisonedBlockIndex, CodeOrigin boundaryCodeOrigin)
+ {
+ BasicBlock* block = m_graph.m_blocks[blockIndex].get();
+ BasicBlock* jettisonedBlock = m_graph.m_blocks[jettisonedBlockIndex].get();
+
+ for (size_t i = 0; i < jettisonedBlock->variablesAtHead.numberOfArguments(); ++i)
+ keepOperandAlive(block, boundaryCodeOrigin, argumentToOperand(i));
+ for (size_t i = 0; i < jettisonedBlock->variablesAtHead.numberOfLocals(); ++i)
+ keepOperandAlive(block, boundaryCodeOrigin, i);
+
+ fixJettisonedPredecessors(blockIndex, jettisonedBlockIndex);
+ }
+
+ void fixPhis(BlockIndex sourceBlockIndex, BlockIndex destinationBlockIndex)
+ {
+ BasicBlock* sourceBlock = m_graph.m_blocks[sourceBlockIndex].get();
+ BasicBlock* destinationBlock = m_graph.m_blocks[destinationBlockIndex].get();
+ if (!destinationBlock) {
+ // If we're trying to kill off the source block and the destination block is already
+ // dead, then we're done!
+ return;
+ }
+ for (size_t i = 0; i < destinationBlock->phis.size(); ++i) {
+ NodeIndex phiNodeIndex = destinationBlock->phis[i];
+ Node& phiNode = m_graph[phiNodeIndex];
+ NodeIndex myNodeIndex = sourceBlock->variablesAtTail.operand(phiNode.local());
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ dataLog("Considering removing reference from phi @%u to @%u on local r%d:",
+ phiNodeIndex, myNodeIndex, phiNode.local());
+#endif
+ if (myNodeIndex == NoNode) {
+ // This will happen if there is a phi in the destination that refers into
+ // the destination itself.
+ continue;
+ }
+ Node& myNode = m_graph[myNodeIndex];
+ if (myNode.op() == GetLocal)
+ myNodeIndex = myNode.child1().index();
+ for (unsigned j = 0; j < AdjacencyList::Size; ++j)
+ removePotentiallyDeadPhiReference(myNodeIndex, phiNode, j);
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ dataLog("\n");
+#endif
+ }
+ }
+
+ void fixJettisonedPredecessors(BlockIndex blockIndex, BlockIndex jettisonedBlockIndex)
+ {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ dataLog("Fixing predecessors and phis due to jettison of Block #%u from Block #%u.\n",
+ jettisonedBlockIndex, blockIndex);
+#endif
+ BasicBlock* jettisonedBlock = m_graph.m_blocks[jettisonedBlockIndex].get();
+ for (unsigned i = 0; i < jettisonedBlock->m_predecessors.size(); ++i) {
+ if (jettisonedBlock->m_predecessors[i] != blockIndex)
+ continue;
+ jettisonedBlock->m_predecessors[i] = jettisonedBlock->m_predecessors.last();
+ jettisonedBlock->m_predecessors.removeLast();
+ break;
+ }
+
+ fixPhis(blockIndex, jettisonedBlockIndex);
+ }
+
+ void removePotentiallyDeadPhiReference(NodeIndex myNodeIndex, Node& phiNode, unsigned edgeIndex)
+ {
+ if (phiNode.children.child(edgeIndex).indexUnchecked() != myNodeIndex)
+ return;
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ dataLog(" Removing reference at child %u.", edgeIndex);
+#endif
+ if (phiNode.shouldGenerate())
+ m_graph.deref(myNodeIndex);
+ phiNode.children.removeEdgeFromBag(edgeIndex);
+ }
+
+ struct OperandSubstitution {
+ OperandSubstitution()
+ : oldChild(NoNode)
+ , newChild(NoNode)
+ {
+ }
+
+ explicit OperandSubstitution(NodeIndex oldChild)
+ : oldChild(oldChild)
+ , newChild(oldChild)
+ {
+ }
+
+ OperandSubstitution(NodeIndex oldChild, NodeIndex newChild)
+ : oldChild(oldChild)
+ , newChild(newChild)
+ {
+ ASSERT((oldChild == NoNode) == (newChild == NoNode));
+ }
+
+ void dump(FILE* out)
+ {
+ if (oldChild == NoNode)
+ fprintf(out, "-");
+ else
+ fprintf(out, "@%u -> @%u", oldChild, newChild);
+ }
+
+ NodeIndex oldChild;
+ NodeIndex newChild;
+ };
+
+ NodeIndex skipGetLocal(NodeIndex nodeIndex)
+ {
+ if (nodeIndex == NoNode)
+ return NoNode;
+ Node& node = m_graph[nodeIndex];
+ if (node.op() == GetLocal)
+ return node.child1().index();
+ return nodeIndex;
+ }
+
+ void recordPossibleIncomingReference(
+ BasicBlock* secondBlock, Operands<OperandSubstitution>& substitutions, int operand)
+ {
+ substitutions.operand(operand) = OperandSubstitution(
+ skipGetLocal(secondBlock->variablesAtTail.operand(operand)));
+ }
+
+ void recordNewTarget(Operands<OperandSubstitution>& substitutions, int operand, NodeIndex nodeIndex)
+ {
+ ASSERT(m_graph[nodeIndex].op() == SetLocal
+ || m_graph[nodeIndex].op() == SetArgument
+ || m_graph[nodeIndex].op() == Flush
+ || m_graph[nodeIndex].op() == Phi);
+ substitutions.operand(operand).newChild = nodeIndex;
+ }
+
+ void fixTailOperand(
+ BasicBlock* firstBlock, BasicBlock* secondBlock, int operand,
+ Operands<OperandSubstitution>& substitutions)
+ {
+ NodeIndex atSecondTail = secondBlock->variablesAtTail.operand(operand);
+
+ if (atSecondTail == NoNode) {
+ // If the variable is dead at the end of the second block, then do nothing; essentially
+ // this means that we want the tail state to reflect whatever the first block did.
+ return;
+ }
+
+ Node& secondNode = m_graph[atSecondTail];
+
+ switch (secondNode.op()) {
+ case SetLocal:
+ case Flush: {
+ // The second block did interesting things to the variables, so update the tail
+ // accordingly.
+ firstBlock->variablesAtTail.operand(operand) = atSecondTail;
+ break;
+ }
+
+ case Phi: {
+ // Keep what was in the first block.
+ ASSERT(firstBlock->variablesAtTail.operand(operand) != NoNode);
+ recordNewTarget(substitutions, operand, skipGetLocal(firstBlock->variablesAtTail.operand(operand)));
+ break;
+ }
+
+ case GetLocal: {
+ // If it's a GetLocal on a captured var, then definitely keep what was
+ // in the second block. In particular, it's possible that the first
+ // block doesn't even know about this variable.
+ if (secondNode.variableAccessData()->isCaptured()) {
+ firstBlock->variablesAtTail.operand(operand) = atSecondTail;
+ recordNewTarget(substitutions, operand, secondNode.child1().index());
+ break;
+ }
+
+ // It's possible that the second block had a GetLocal and the first block
+ // had a SetArgument or a Phi. Then update the tail. Otherwise keep what was in the
+ // first block.
+ NodeIndex atFirstTail = firstBlock->variablesAtTail.operand(operand);
+ ASSERT(atFirstTail != NoNode);
+ switch (m_graph[atFirstTail].op()) {
+ case SetArgument:
+ case Phi:
+ firstBlock->variablesAtTail.operand(operand) = atSecondTail;
+ recordNewTarget(substitutions, operand, secondNode.child1().index());
+ break;
+
+ default:
+ // Keep what was in the first block, and adjust the substitution to account for
+ // the fact that successors will refer to the child of the GetLocal.
+ ASSERT(firstBlock->variablesAtTail.operand(operand) != NoNode);
+ recordNewTarget(substitutions, operand, skipGetLocal(firstBlock->variablesAtTail.operand(operand)));
+ break;
+ }
+ break;
+ }
+
+ default:
+ ASSERT_NOT_REACHED();
+ }
+ }
+
+ void mergeBlocks(
+ BlockIndex firstBlockIndex, BlockIndex secondBlockIndex, BlockIndex jettisonedBlockIndex)
+ {
+ // This will add all of the nodes in secondBlock to firstBlock, but in so doing
+ // it will also ensure that any GetLocals from the second block that refer to
+ // SetLocals in the first block are relinked. If jettisonedBlock is not NoBlock,
+ // then Phantoms are inserted for anything that the jettisonedBlock would have
+ // kept alive.
+
+ BasicBlock* firstBlock = m_graph.m_blocks[firstBlockIndex].get();
+ BasicBlock* secondBlock = m_graph.m_blocks[secondBlockIndex].get();
+
+ // Remove the terminal of firstBlock since we don't need it anymore. Well, we don't
+ // really remove it; we actually turn it into a Phantom.
+ ASSERT(m_graph[firstBlock->last()].isTerminal());
+ CodeOrigin boundaryCodeOrigin = m_graph[firstBlock->last()].codeOrigin;
+ m_graph[firstBlock->last()].setOpAndDefaultFlags(Phantom);
+ ASSERT(m_graph[firstBlock->last()].refCount() == 1);
+
+ if (jettisonedBlockIndex != NoBlock) {
+ BasicBlock* jettisonedBlock = m_graph.m_blocks[jettisonedBlockIndex].get();
+
+ // Time to insert ghosties for things that need to be kept alive in case we OSR
+ // exit prior to hitting the firstBlock's terminal, and end up going down a
+ // different path than secondBlock.
+
+ for (size_t i = 0; i < jettisonedBlock->variablesAtHead.numberOfArguments(); ++i)
+ keepOperandAlive(firstBlock, boundaryCodeOrigin, argumentToOperand(i));
+ for (size_t i = 0; i < jettisonedBlock->variablesAtHead.numberOfLocals(); ++i)
+ keepOperandAlive(firstBlock, boundaryCodeOrigin, i);
+ }
+
+ for (size_t i = 0; i < secondBlock->phis.size(); ++i)
+ firstBlock->phis.append(secondBlock->phis[i]);
+
+ // Before we start changing the second block's graph, record what nodes would
+ // be referenced by successors of the second block.
+ Operands<OperandSubstitution> substitutions(
+ secondBlock->variablesAtTail.numberOfArguments(),
+ secondBlock->variablesAtTail.numberOfLocals());
+ for (size_t i = 0; i < secondBlock->variablesAtTail.numberOfArguments(); ++i)
+ recordPossibleIncomingReference(secondBlock, substitutions, argumentToOperand(i));
+ for (size_t i = 0; i < secondBlock->variablesAtTail.numberOfLocals(); ++i)
+ recordPossibleIncomingReference(secondBlock, substitutions, i);
+
+ for (size_t i = 0; i < secondBlock->size(); ++i) {
+ NodeIndex nodeIndex = secondBlock->at(i);
+ Node& node = m_graph[nodeIndex];
+
+ switch (node.op()) {
+ case Phantom: {
+ if (!node.child1())
+ break;
+
+ ASSERT(node.shouldGenerate());
+ Node& possibleLocalOp = m_graph[node.child1()];
+ if (possibleLocalOp.hasLocal()) {
+ NodeIndex setLocalIndex =
+ firstBlock->variablesAtTail.operand(possibleLocalOp.local());
+ Node& setLocal = m_graph[setLocalIndex];
+ if (setLocal.op() == SetLocal)
+ m_graph.changeEdge(node.children.child1(), setLocal.child1());
+ }
+ break;
+ }
+
+ case Flush:
+ case GetLocal: {
+ // A Flush could use a GetLocal, SetLocal, SetArgument, or a Phi.
+ // If it uses a GetLocal, it'll be taken care of below. If it uses a
+ // SetLocal or SetArgument, then it must be using a node from the
+ // same block. But if it uses a Phi, then we should redirect it to
+ // use whatever the first block advertised as a tail operand.
+ // Similarly for GetLocal; it could use any of those except for
+ // GetLocal. If it uses a Phi then it should be redirected to use a
+ // Phi from the tail operand.
+ if (m_graph[node.child1()].op() != Phi)
+ break;
+
+ NodeIndex atFirstIndex = firstBlock->variablesAtTail.operand(node.local());
+ m_graph.changeEdge(node.children.child1(), Edge(skipGetLocal(atFirstIndex)), node.shouldGenerate());
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ bool changeRef = node.shouldGenerate();
+
+ // If the child is a GetLocal, then we might like to fix it.
+ if (node.flags() & NodeHasVarArgs) {
+ for (unsigned childIdx = node.firstChild();
+ childIdx < node.firstChild() + node.numChildren();
+ ++childIdx)
+ fixPossibleGetLocal(firstBlock, m_graph.m_varArgChildren[childIdx], changeRef);
+ } else if (!!node.child1()) {
+ fixPossibleGetLocal(firstBlock, node.children.child1(), changeRef);
+ if (!!node.child2()) {
+ fixPossibleGetLocal(firstBlock, node.children.child2(), changeRef);
+ if (!!node.child3())
+ fixPossibleGetLocal(firstBlock, node.children.child3(), changeRef);
+ }
+ }
+
+ firstBlock->append(nodeIndex);
+ }
+
+ ASSERT(m_graph[firstBlock->last()].isTerminal());
+
+ // Fix the predecessors of my new successors. This is tricky, since we are going to reset
+ // all predecessors anyway due to reachability analysis. But we need to fix the
+ // predecessors eagerly to ensure that we know what they are in case the next block we
+ // consider in this phase wishes to query the predecessors of one of the blocks we
+ // affected.
+ for (unsigned i = m_graph.numSuccessors(firstBlock); i--;) {
+ BasicBlock* successor = m_graph.m_blocks[m_graph.successor(firstBlock, i)].get();
+ for (unsigned j = 0; j < successor->m_predecessors.size(); ++j) {
+ if (successor->m_predecessors[j] == secondBlockIndex)
+ successor->m_predecessors[j] = firstBlockIndex;
+ }
+ }
+
+ // Fix the predecessors of my former successors. Again, we'd rather not do this, but it's
+ // an unfortunate necessity. See above comment.
+ if (jettisonedBlockIndex != NoBlock)
+ fixJettisonedPredecessors(firstBlockIndex, jettisonedBlockIndex);
+
+ // Fix up the variables at tail.
+ for (size_t i = 0; i < secondBlock->variablesAtHead.numberOfArguments(); ++i)
+ fixTailOperand(firstBlock, secondBlock, argumentToOperand(i), substitutions);
+ for (size_t i = 0; i < secondBlock->variablesAtHead.numberOfLocals(); ++i)
+ fixTailOperand(firstBlock, secondBlock, i, substitutions);
+
+ // Fix up the references from our new successors.
+ for (unsigned i = m_graph.numSuccessors(firstBlock); i--;) {
+ BasicBlock* successor = m_graph.m_blocks[m_graph.successor(firstBlock, i)].get();
+ for (unsigned j = 0; j < successor->phis.size(); ++j) {
+ NodeIndex phiNodeIndex = successor->phis[j];
+ Node& phiNode = m_graph[phiNodeIndex];
+ bool changeRef = phiNode.shouldGenerate();
+ OperandSubstitution substitution = substitutions.operand(phiNode.local());
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ dataLog(" Performing operand substitution @%u -> @%u.\n",
+ substitution.oldChild, substitution.newChild);
+#endif
+ if (!phiNode.child1())
+ continue;
+ if (phiNode.child1().index() == substitution.oldChild)
+ m_graph.changeIndex(phiNode.children.child1(), substitution.newChild, changeRef);
+ if (!phiNode.child2())
+ continue;
+ if (phiNode.child2().index() == substitution.oldChild)
+ m_graph.changeIndex(phiNode.children.child2(), substitution.newChild, changeRef);
+ if (!phiNode.child3())
+ continue;
+ if (phiNode.child3().index() == substitution.oldChild)
+ m_graph.changeIndex(phiNode.children.child3(), substitution.newChild, changeRef);
+ }
+ }
+
+ firstBlock->valuesAtTail = secondBlock->valuesAtTail;
+
+ m_graph.m_blocks[secondBlockIndex].clear();
+ }
+};
+
+bool performCFGSimplification(Graph& graph)
+{
+ return runPhase<CFGSimplificationPhase>(graph);
+}
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
+
+
diff --git a/Source/JavaScriptCore/dfg/DFGCFGSimplificationPhase.h b/Source/JavaScriptCore/dfg/DFGCFGSimplificationPhase.h
new file mode 100644
index 000000000..a0f4856a4
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGCFGSimplificationPhase.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGCFGSimplificationPhase_h
+#define DFGCFGSimplificationPhase_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(DFG_JIT)
+
+namespace JSC { namespace DFG {
+
+class Graph;
+
+// CFG simplification:
+//
+// jump to single predecessor -> merge blocks
+// branch on constant -> jump
+// branch to same blocks -> jump
+// jump-only block -> remove
+// kill dead code
+
+bool performCFGSimplification(Graph&);
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
+
+#endif // DFGCFGSimplificationPhase_h
+
diff --git a/Source/JavaScriptCore/dfg/DFGCSEPhase.cpp b/Source/JavaScriptCore/dfg/DFGCSEPhase.cpp
index 020b1cfd2..842bcc236 100644
--- a/Source/JavaScriptCore/dfg/DFGCSEPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGCSEPhase.cpp
@@ -35,8 +35,9 @@ namespace JSC { namespace DFG {
class CSEPhase : public Phase {
public:
- CSEPhase(Graph& graph)
+ CSEPhase(Graph& graph, OptimizationFixpointState fixpointState)
: Phase(graph, "common subexpression elimination")
+ , m_fixpointState(fixpointState)
{
// Replacements are used to implement local common subexpression elimination.
m_replacements.resize(m_graph.size());
@@ -45,10 +46,11 @@ public:
m_replacements[i] = NoNode;
}
- void run()
+ bool run()
{
for (unsigned block = 0; block < m_graph.m_blocks.size(); ++block)
- performBlockCSE(*m_graph.m_blocks[block]);
+ performBlockCSE(m_graph.m_blocks[block].get());
+ return true; // Maybe we'll need to make this reason about whether it changed the graph in an actionable way?
}
private:
@@ -123,50 +125,20 @@ private:
return NoNode;
}
- bool isPredictedNumerical(Node& node)
+ NodeIndex constantCSE(Node& node)
{
- PredictedType left = m_graph[node.child1()].prediction();
- PredictedType right = m_graph[node.child2()].prediction();
- return isNumberPrediction(left) && isNumberPrediction(right);
- }
-
- bool logicalNotIsPure(Node& node)
- {
- PredictedType prediction = m_graph[node.child1()].prediction();
- return isBooleanPrediction(prediction) || !prediction;
- }
-
- bool byValIsPure(Node& node)
- {
- return m_graph[node.child2()].shouldSpeculateInteger()
- && ((node.op() == PutByVal || node.op() == PutByValAlias)
- ? isActionableMutableArrayPrediction(m_graph[node.child1()].prediction())
- : isActionableArrayPrediction(m_graph[node.child1()].prediction()));
- }
-
- bool clobbersWorld(NodeIndex nodeIndex)
- {
- Node& node = m_graph[nodeIndex];
- if (node.flags() & NodeClobbersWorld)
- return true;
- if (!(node.flags() & NodeMightClobber))
- return false;
- switch (node.op()) {
- case ValueAdd:
- case CompareLess:
- case CompareLessEq:
- case CompareGreater:
- case CompareGreaterEq:
- case CompareEq:
- return !isPredictedNumerical(node);
- case LogicalNot:
- return !logicalNotIsPure(node);
- case GetByVal:
- return !byValIsPure(node);
- default:
- ASSERT_NOT_REACHED();
- return true; // If by some oddity we hit this case in release build it's safer to have CSE assume the worst.
+ for (unsigned i = endIndexForPureCSE(); i--;) {
+ NodeIndex index = m_currentBlock->at(i);
+ Node& otherNode = m_graph[index];
+ if (otherNode.op() != JSConstant)
+ continue;
+
+ if (otherNode.constantNumber() != node.constantNumber())
+ continue;
+
+ return index;
}
+ return NoNode;
}
NodeIndex impureCSE(Node& node)
@@ -199,7 +171,7 @@ private:
}
}
}
- if (clobbersWorld(index))
+ if (m_graph.clobbersWorld(index))
break;
}
return NoNode;
@@ -222,7 +194,7 @@ private:
default:
break;
}
- if (clobbersWorld(index))
+ if (m_graph.clobbersWorld(index))
break;
}
return NoNode;
@@ -238,14 +210,14 @@ private:
Node& node = m_graph[index];
switch (node.op()) {
case GetByVal:
- if (!byValIsPure(node))
+ if (!m_graph.byValIsPure(node))
return NoNode;
if (node.child1() == child1 && canonicalize(node.child2()) == canonicalize(child2))
return index;
break;
case PutByVal:
case PutByValAlias:
- if (!byValIsPure(node))
+ if (!m_graph.byValIsPure(node))
return NoNode;
if (node.child1() == child1 && canonicalize(node.child2()) == canonicalize(child2))
return node.child3().index();
@@ -264,7 +236,7 @@ private:
// A push cannot affect previously existing elements in the array.
break;
default:
- if (clobbersWorld(index))
+ if (m_graph.clobbersWorld(index))
return NoNode;
break;
}
@@ -315,7 +287,7 @@ private:
case PutByVal:
case PutByValAlias:
- if (byValIsPure(node)) {
+ if (m_graph.byValIsPure(node)) {
// If PutByVal speculates that it's accessing an array with an
// integer index, then it's impossible for it to cause a structure
// change.
@@ -324,7 +296,7 @@ private:
return false;
default:
- if (clobbersWorld(index))
+ if (m_graph.clobbersWorld(index))
return false;
break;
}
@@ -336,7 +308,7 @@ private:
{
for (unsigned i = m_indexInBlock; i--;) {
NodeIndex index = m_currentBlock->at(i);
- if (index == child1)
+ if (index == child1)
break;
Node& node = m_graph[index];
@@ -349,7 +321,7 @@ private:
case PutByOffset:
if (m_graph.m_storageAccessData[node.storageAccessDataIndex()].identifierNumber == identifierNumber) {
- if (node.child2() == child1)
+ if (node.child1() == child1) // Must be same property storage.
return node.child3().index();
return NoNode;
}
@@ -361,7 +333,7 @@ private:
case PutByVal:
case PutByValAlias:
- if (byValIsPure(node)) {
+ if (m_graph.byValIsPure(node)) {
// If PutByVal speculates that it's accessing an array with an
// integer index, then it's impossible for it to cause a structure
// change.
@@ -370,7 +342,7 @@ private:
return NoNode;
default:
- if (clobbersWorld(index))
+ if (m_graph.clobbersWorld(index))
return NoNode;
break;
}
@@ -400,7 +372,7 @@ private:
case PutByVal:
case PutByValAlias:
- if (byValIsPure(node)) {
+ if (m_graph.byValIsPure(node)) {
// If PutByVal speculates that it's accessing an array with an
// integer index, then it's impossible for it to cause a structure
// change.
@@ -409,7 +381,7 @@ private:
return NoNode;
default:
- if (clobbersWorld(index))
+ if (m_graph.clobbersWorld(index))
return NoNode;
break;
}
@@ -445,12 +417,12 @@ private:
break;
case PutByVal:
- if (isFixedIndexedStorageObjectPrediction(m_graph[node.child1()].prediction()) && byValIsPure(node))
+ if (isFixedIndexedStorageObjectPrediction(m_graph[node.child1()].prediction()) && m_graph.byValIsPure(node))
break;
return NoNode;
default:
- if (clobbersWorld(index))
+ if (m_graph.clobbersWorld(index))
return NoNode;
break;
}
@@ -470,6 +442,44 @@ private:
return NoNode;
}
+ NodeIndex getLocalLoadElimination(VirtualRegister local, NodeIndex& relevantLocalOp)
+ {
+ relevantLocalOp = NoNode;
+
+ for (unsigned i = m_indexInBlock; i--;) {
+ NodeIndex index = m_currentBlock->at(i);
+ Node& node = m_graph[index];
+ switch (node.op()) {
+ case GetLocal:
+ if (node.local() == local) {
+ relevantLocalOp = index;
+ return index;
+ }
+ break;
+
+ case GetLocalUnlinked:
+ if (node.unlinkedLocal() == local) {
+ relevantLocalOp = index;
+ return index;
+ }
+ break;
+
+ case SetLocal:
+ if (node.local() == local) {
+ relevantLocalOp = index;
+ return node.child1().index();
+ }
+ break;
+
+ default:
+ if (m_graph.clobbersWorld(index))
+ return NoNode;
+ break;
+ }
+ }
+ return NoNode;
+ }
+
void performSubstitution(Edge& child, bool addRef = true)
{
// Check if this operand is actually unused.
@@ -491,15 +501,15 @@ private:
m_graph[child].ref();
}
- void setReplacement(NodeIndex replacement)
+ bool setReplacement(NodeIndex replacement)
{
if (replacement == NoNode)
- return;
+ return false;
// Be safe. Don't try to perform replacements if the predictions don't
// agree.
if (m_graph[m_compileIndex].prediction() != m_graph[replacement].prediction())
- return;
+ return false;
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
dataLog(" Replacing @%u -> @%u", m_compileIndex, replacement);
@@ -511,6 +521,8 @@ private:
// At this point we will eliminate all references to this node.
m_replacements[m_compileIndex] = replacement;
+
+ return true;
}
void eliminate()
@@ -594,9 +606,51 @@ private:
case IsObject:
case IsFunction:
case DoubleAsInt32:
+ case LogicalNot:
setReplacement(pureCSE(node));
break;
+ case GetLocal: {
+ VariableAccessData* variableAccessData = node.variableAccessData();
+ if (!variableAccessData->isCaptured())
+ break;
+ NodeIndex relevantLocalOp;
+ NodeIndex possibleReplacement = getLocalLoadElimination(variableAccessData->local(), relevantLocalOp);
+ ASSERT(relevantLocalOp == NoNode
+ || m_graph[relevantLocalOp].op() == GetLocalUnlinked
+ || m_graph[relevantLocalOp].variableAccessData() == variableAccessData);
+ NodeIndex phiIndex = node.child1().index();
+ if (!setReplacement(possibleReplacement))
+ break;
+ NodeIndex oldTailIndex = m_currentBlock->variablesAtTail.operand(
+ variableAccessData->local());
+ if (oldTailIndex == m_compileIndex) {
+ m_currentBlock->variablesAtTail.operand(variableAccessData->local()) =
+ relevantLocalOp;
+
+ // Maintain graph integrity: since we're replacing a GetLocal with a GetLocalUnlinked,
+ // make sure that the GetLocalUnlinked is now linked.
+ if (m_graph[relevantLocalOp].op() == GetLocalUnlinked) {
+ m_graph[relevantLocalOp].setOp(GetLocal);
+ m_graph[relevantLocalOp].children.child1() = Edge(phiIndex);
+ m_graph.ref(phiIndex);
+ }
+ }
+ break;
+ }
+
+ case GetLocalUnlinked: {
+ NodeIndex relevantLocalOpIgnored;
+ setReplacement(getLocalLoadElimination(node.unlinkedLocal(), relevantLocalOpIgnored));
+ break;
+ }
+
+ case JSConstant:
+ // This is strange, but necessary. Some phases will convert nodes to constants,
+ // which may result in duplicated constants. We use CSE to clean this up.
+ setReplacement(constantCSE(node));
+ break;
+
case GetArrayLength:
setReplacement(impureCSE(node));
break;
@@ -613,18 +667,9 @@ private:
case CompareGreater:
case CompareGreaterEq:
case CompareEq: {
- if (isPredictedNumerical(node)) {
- NodeIndex replacementIndex = pureCSE(node);
- if (replacementIndex != NoNode && isPredictedNumerical(m_graph[replacementIndex]))
- setReplacement(replacementIndex);
- }
- break;
- }
-
- case LogicalNot: {
- if (logicalNotIsPure(node)) {
+ if (m_graph.isPredictedNumerical(node)) {
NodeIndex replacementIndex = pureCSE(node);
- if (replacementIndex != NoNode && logicalNotIsPure(m_graph[replacementIndex]))
+ if (replacementIndex != NoNode && m_graph.isPredictedNumerical(m_graph[replacementIndex]))
setReplacement(replacementIndex);
}
break;
@@ -637,12 +682,14 @@ private:
break;
case GetByVal:
- if (byValIsPure(node))
+ if (m_graph.byValIsPure(node))
setReplacement(getByValLoadElimination(node.child1().index(), node.child2().index()));
break;
case PutByVal:
- if (byValIsPure(node) && getByValLoadElimination(node.child1().index(), node.child2().index()) != NoNode)
+ if (m_graph.byValIsPure(node)
+ && !m_graph[node.child1()].shouldSpeculateArguments()
+ && getByValLoadElimination(node.child1().index(), node.child2().index()) != NoNode)
node.setOp(PutByValAlias);
break;
@@ -682,14 +729,19 @@ private:
#endif
}
- void performBlockCSE(BasicBlock& block)
+ void performBlockCSE(BasicBlock* block)
{
- m_currentBlock = &block;
+ if (!block)
+ return;
+ if (!block->isReachable)
+ return;
+
+ m_currentBlock = block;
for (unsigned i = 0; i < LastNodeType; ++i)
m_lastSeen[i] = UINT_MAX;
- for (m_indexInBlock = 0; m_indexInBlock < block.size(); ++m_indexInBlock) {
- m_compileIndex = block[m_indexInBlock];
+ for (m_indexInBlock = 0; m_indexInBlock < block->size(); ++m_indexInBlock) {
+ m_compileIndex = block->at(m_indexInBlock);
performNodeCSE(m_graph[m_compileIndex]);
}
}
@@ -699,11 +751,12 @@ private:
unsigned m_indexInBlock;
Vector<NodeIndex, 16> m_replacements;
FixedArray<unsigned, LastNodeType> m_lastSeen;
+ OptimizationFixpointState m_fixpointState;
};
-void performCSE(Graph& graph)
+bool performCSE(Graph& graph, OptimizationFixpointState fixpointState)
{
- runPhase<CSEPhase>(graph);
+ return runPhase<CSEPhase>(graph, fixpointState);
}
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGCSEPhase.h b/Source/JavaScriptCore/dfg/DFGCSEPhase.h
index 3f13f102b..7e33c2243 100644
--- a/Source/JavaScriptCore/dfg/DFGCSEPhase.h
+++ b/Source/JavaScriptCore/dfg/DFGCSEPhase.h
@@ -30,6 +30,8 @@
#if ENABLE(DFG_JIT)
+#include "DFGCommon.h"
+
namespace JSC { namespace DFG {
class Graph;
@@ -39,7 +41,7 @@ class Graph;
// a wide range of subexpression similarities. It's known to produce big wins
// on a few benchmarks, and is relatively cheap to run.
-void performCSE(Graph&);
+bool performCSE(Graph&, OptimizationFixpointState);
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGCapabilities.cpp b/Source/JavaScriptCore/dfg/DFGCapabilities.cpp
index 450a5d83e..910c3d986 100644
--- a/Source/JavaScriptCore/dfg/DFGCapabilities.cpp
+++ b/Source/JavaScriptCore/dfg/DFGCapabilities.cpp
@@ -34,33 +34,56 @@ namespace JSC { namespace DFG {
#if ENABLE(DFG_JIT)
-static inline void debugFail(CodeBlock* codeBlock, OpcodeID opcodeID)
+static inline void debugFail(CodeBlock* codeBlock, OpcodeID opcodeID, bool result)
{
+ ASSERT_UNUSED(result, !result);
#if DFG_ENABLE(DEBUG_VERBOSE)
dataLog("Cannot handle code block %p because of opcode %s.\n", codeBlock, opcodeNames[opcodeID]);
#else
UNUSED_PARAM(codeBlock);
UNUSED_PARAM(opcodeID);
+ UNUSED_PARAM(result);
#endif
}
-template<bool (*canHandleOpcode)(OpcodeID)>
-bool canHandleOpcodes(CodeBlock* codeBlock)
+static inline void debugFail(CodeBlock* codeBlock, OpcodeID opcodeID, CapabilityLevel result)
+{
+ ASSERT(result != CanCompile);
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ if (result == CannotCompile)
+ dataLog("Cannot handle code block %p because of opcode %s.\n", codeBlock, opcodeNames[opcodeID]);
+ else {
+ ASSERT(result == ShouldProfile);
+ dataLog("Cannot compile code block %p because of opcode %s, but inlining might be possible.\n", codeBlock, opcodeNames[opcodeID]);
+ }
+#else
+ UNUSED_PARAM(codeBlock);
+ UNUSED_PARAM(opcodeID);
+ UNUSED_PARAM(result);
+#endif
+}
+
+template<typename ReturnType, ReturnType (*canHandleOpcode)(OpcodeID, CodeBlock*, Instruction*)>
+ReturnType canHandleOpcodes(CodeBlock* codeBlock, ReturnType initialValue)
{
Interpreter* interpreter = codeBlock->globalData()->interpreter;
Instruction* instructionsBegin = codeBlock->instructions().begin();
unsigned instructionCount = codeBlock->instructions().size();
+ ReturnType result = initialValue;
for (unsigned bytecodeOffset = 0; bytecodeOffset < instructionCount; ) {
switch (interpreter->getOpcodeID(instructionsBegin[bytecodeOffset].u.opcode)) {
-#define DEFINE_OP(opcode, length) \
- case opcode: \
- if (!canHandleOpcode(opcode)) { \
- debugFail(codeBlock, opcode); \
- return false; \
- } \
- bytecodeOffset += length; \
- break;
+#define DEFINE_OP(opcode, length) \
+ case opcode: { \
+ ReturnType current = canHandleOpcode( \
+ opcode, codeBlock, instructionsBegin + bytecodeOffset); \
+ if (current < result) { \
+ result = current; \
+ debugFail(codeBlock, opcode, current); \
+ } \
+ bytecodeOffset += length; \
+ break; \
+ }
FOR_EACH_OPCODE_ID(DEFINE_OP)
#undef DEFINE_OP
default:
@@ -69,19 +92,19 @@ bool canHandleOpcodes(CodeBlock* codeBlock)
}
}
- return true;
+ return result;
}
-bool canCompileOpcodes(CodeBlock* codeBlock)
+CapabilityLevel canCompileOpcodes(CodeBlock* codeBlock)
{
if (!MacroAssembler::supportsFloatingPoint())
- return false;
- return canHandleOpcodes<canCompileOpcode>(codeBlock);
+ return CannotCompile;
+ return canHandleOpcodes<CapabilityLevel, canCompileOpcode>(codeBlock, CanCompile);
}
bool canInlineOpcodes(CodeBlock* codeBlock)
{
- return canHandleOpcodes<canInlineOpcode>(codeBlock);
+ return canHandleOpcodes<bool, canInlineOpcode>(codeBlock, true);
}
#endif
diff --git a/Source/JavaScriptCore/dfg/DFGCapabilities.h b/Source/JavaScriptCore/dfg/DFGCapabilities.h
index 8aae85ef7..694e886ee 100644
--- a/Source/JavaScriptCore/dfg/DFGCapabilities.h
+++ b/Source/JavaScriptCore/dfg/DFGCapabilities.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -27,6 +27,7 @@
#define DFGCapabilities_h
#include "Intrinsic.h"
+#include "DFGCommon.h"
#include "DFGNode.h"
#include "Executable.h"
#include "Options.h"
@@ -67,7 +68,7 @@ inline bool mightInlineFunctionForConstruct(CodeBlock* codeBlock)
}
// Opcode checking.
-inline bool canCompileOpcode(OpcodeID opcodeID)
+inline CapabilityLevel canCompileOpcode(OpcodeID opcodeID, CodeBlock*, Instruction*)
{
switch (opcodeID) {
case op_enter:
@@ -163,16 +164,24 @@ inline bool canCompileOpcode(OpcodeID opcodeID)
case op_init_lazy_reg:
case op_create_activation:
case op_tear_off_activation:
+ case op_create_arguments:
+ case op_tear_off_arguments:
case op_new_func:
case op_new_func_exp:
- return true;
+ case op_get_argument_by_val:
+ case op_get_arguments_length:
+ case op_jneq_ptr:
+ return CanCompile;
+ case op_call_varargs:
+ return ShouldProfile;
+
default:
- return false;
+ return CannotCompile;
}
}
-inline bool canInlineOpcode(OpcodeID opcodeID)
+inline bool canInlineOpcode(OpcodeID opcodeID, CodeBlock* codeBlock, Instruction* pc)
{
switch (opcodeID) {
@@ -190,22 +199,25 @@ inline bool canInlineOpcode(OpcodeID opcodeID)
// Inlining doesn't correctly remap regular expression operands.
case op_new_regexp:
- return false;
// We don't support inlining code that creates activations or has nested functions.
- case op_init_lazy_reg:
case op_create_activation:
case op_tear_off_activation:
case op_new_func:
case op_new_func_exp:
return false;
+ // Inlining supports op_call_varargs if it's a call that just forwards the caller's
+ // arguments.
+ case op_call_varargs:
+ return codeBlock->usesArguments() && pc[3].u.operand == codeBlock->argumentsRegister();
+
default:
- return canCompileOpcode(opcodeID);
+ return canCompileOpcode(opcodeID, codeBlock, pc) == CanCompile;
}
}
-bool canCompileOpcodes(CodeBlock*);
+CapabilityLevel canCompileOpcodes(CodeBlock*);
bool canInlineOpcodes(CodeBlock*);
#else // ENABLE(DFG_JIT)
inline bool mightCompileEval(CodeBlock*) { return false; }
@@ -215,30 +227,42 @@ inline bool mightCompileFunctionForConstruct(CodeBlock*) { return false; }
inline bool mightInlineFunctionForCall(CodeBlock*) { return false; }
inline bool mightInlineFunctionForConstruct(CodeBlock*) { return false; }
-inline bool canCompileOpcode(OpcodeID) { return false; }
-inline bool canInlineOpcode(OpcodeID) { return false; }
-inline bool canCompileOpcodes(CodeBlock*) { return false; }
+inline CapabilityLevel canCompileOpcode(OpcodeID, CodeBlock*, Instruction*) { return CannotCompile; }
+inline bool canInlineOpcode(OpcodeID, CodeBlock*, Instruction*) { return false; }
+inline CapabilityLevel canCompileOpcodes(CodeBlock*) { return CannotCompile; }
inline bool canInlineOpcodes(CodeBlock*) { return false; }
#endif // ENABLE(DFG_JIT)
-inline bool canCompileEval(CodeBlock* codeBlock)
+inline CapabilityLevel canCompileEval(CodeBlock* codeBlock)
{
- return mightCompileEval(codeBlock) && canCompileOpcodes(codeBlock);
+ if (!mightCompileEval(codeBlock))
+ return CannotCompile;
+
+ return canCompileOpcodes(codeBlock);
}
-inline bool canCompileProgram(CodeBlock* codeBlock)
+inline CapabilityLevel canCompileProgram(CodeBlock* codeBlock)
{
- return mightCompileProgram(codeBlock) && canCompileOpcodes(codeBlock);
+ if (!mightCompileProgram(codeBlock))
+ return CannotCompile;
+
+ return canCompileOpcodes(codeBlock);
}
-inline bool canCompileFunctionForCall(CodeBlock* codeBlock)
+inline CapabilityLevel canCompileFunctionForCall(CodeBlock* codeBlock)
{
- return mightCompileFunctionForCall(codeBlock) && canCompileOpcodes(codeBlock);
+ if (!mightCompileFunctionForCall(codeBlock))
+ return CannotCompile;
+
+ return canCompileOpcodes(codeBlock);
}
-inline bool canCompileFunctionForConstruct(CodeBlock* codeBlock)
+inline CapabilityLevel canCompileFunctionForConstruct(CodeBlock* codeBlock)
{
- return mightCompileFunctionForConstruct(codeBlock) && canCompileOpcodes(codeBlock);
+ if (!mightCompileFunctionForConstruct(codeBlock))
+ return CannotCompile;
+
+ return canCompileOpcodes(codeBlock);
}
inline bool canInlineFunctionForCall(CodeBlock* codeBlock)
diff --git a/Source/JavaScriptCore/dfg/DFGCommon.h b/Source/JavaScriptCore/dfg/DFGCommon.h
index 828bcb2a3..b2e3bb4ee 100644
--- a/Source/JavaScriptCore/dfg/DFGCommon.h
+++ b/Source/JavaScriptCore/dfg/DFGCommon.h
@@ -49,6 +49,12 @@
#else
#define DFG_ENABLE_JIT_ASSERT 0
#endif
+// Enable validation of the graph.
+#if !ASSERT_DISABLED
+#define DFG_ENABLE_VALIDATION 1
+#else
+#define DFG_ENABLE_VALIDATION 0
+#endif
// Consistency check contents compiler data structures.
#define DFG_ENABLE_CONSISTENCY_CHECK 0
// Emit a breakpoint into the head of every generated function, to aid debugging in GDB.
@@ -71,9 +77,6 @@
#define DFG_ENABLE_SUCCESS_STATS 0
// Enable verification that the DFG is able to insert code for control flow edges.
#define DFG_ENABLE_EDGE_CODE_VERIFICATION 0
-// Pretend that all variables in the top-level code block got captured. Great
-// for testing code gen for activations.
-#define DFG_ENABLE_ALL_VARIABLES_CAPTURED 0
namespace JSC { namespace DFG {
@@ -123,9 +126,23 @@ inline bool isX86()
#endif
}
+enum SpillRegistersMode { NeedToSpill, DontSpill };
+
+enum NoResultTag { NoResult };
+
+enum OptimizationFixpointState { FixpointConverged, FixpointNotConverged };
+
} } // namespace JSC::DFG
#endif // ENABLE(DFG_JIT)
+namespace JSC { namespace DFG {
+
+// Put things here that must be defined even if ENABLE(DFG_JIT) is false.
+
+enum CapabilityLevel { CannotCompile, ShouldProfile, CanCompile, CapabilityLevelNotSet };
+
+} } // namespace JSC::DFG
+
#endif // DFGCommon_h
diff --git a/Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.cpp b/Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.cpp
new file mode 100644
index 000000000..b2b74ba04
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.cpp
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "DFGConstantFoldingPhase.h"
+
+#if ENABLE(DFG_JIT)
+
+#include "DFGAbstractState.h"
+#include "DFGBasicBlock.h"
+#include "DFGGraph.h"
+#include "DFGInsertionSet.h"
+#include "DFGPhase.h"
+
+namespace JSC { namespace DFG {
+
+class ConstantFoldingPhase : public Phase {
+public:
+ ConstantFoldingPhase(Graph& graph)
+ : Phase(graph, "constant folding")
+ {
+ }
+
+ bool run()
+ {
+ bool changed = false;
+
+ AbstractState state(m_graph);
+ InsertionSet<NodeIndex> insertionSet;
+
+ for (BlockIndex blockIndex = 0; blockIndex < m_graph.m_blocks.size(); ++blockIndex) {
+ BasicBlock* block = m_graph.m_blocks[blockIndex].get();
+ if (!block)
+ continue;
+ if (!block->cfaFoundConstants)
+ continue;
+ state.beginBasicBlock(block);
+ for (unsigned indexInBlock = 0; indexInBlock < block->size(); ++indexInBlock) {
+ if (!state.isValid())
+ break;
+ state.execute(indexInBlock);
+ NodeIndex nodeIndex = block->at(indexInBlock);
+ Node& node = m_graph[nodeIndex];
+ if (!node.shouldGenerate()
+ || m_graph.clobbersWorld(node)
+ || node.hasConstant())
+ continue;
+ JSValue value = state.forNode(nodeIndex).value();
+ if (!value)
+ continue;
+
+ Node phantom(Phantom, node.codeOrigin);
+
+ if (node.op() == GetLocal) {
+ ASSERT(m_graph[node.child1()].op() == Phi);
+ ASSERT(!m_graph[node.child1()].hasResult());
+
+ ASSERT(block->variablesAtHead.operand(node.local()) == nodeIndex);
+ ASSERT(block->isInPhis(node.child1().index()));
+ block->variablesAtHead.operand(node.local()) = node.child1().index();
+
+ NodeIndex tailNodeIndex = block->variablesAtTail.operand(node.local());
+ if (tailNodeIndex == nodeIndex)
+ block->variablesAtTail.operand(node.local()) = node.child1().index();
+ else {
+ ASSERT(m_graph[tailNodeIndex].op() == Flush
+ || m_graph[tailNodeIndex].op() == SetLocal);
+ }
+ }
+
+ phantom.children = node.children;
+ phantom.ref();
+
+ m_graph.convertToConstant(nodeIndex, value);
+ NodeIndex phantomNodeIndex = m_graph.size();
+ m_graph.append(phantom);
+ insertionSet.append(indexInBlock, phantomNodeIndex);
+
+ changed = true;
+ }
+ insertionSet.execute(*block);
+ state.reset();
+ }
+
+ return changed;
+ }
+};
+
+bool performConstantFolding(Graph& graph)
+{
+ return runPhase<ConstantFoldingPhase>(graph);
+}
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
+
+
diff --git a/Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.h b/Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.h
new file mode 100644
index 000000000..cde16806c
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGConstantFoldingPhase_h
+#define DFGConstantFoldingPhase_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(DFG_JIT)
+
+namespace JSC { namespace DFG {
+
+class Graph;
+
+// CFA-based constant folding. Walks those blocks marked by the CFA as having
+// inferred constants, and replaces those nodes with constants whilst injecting
+// Phantom nodes to keep the children alive (which is necessary for OSR exit).
+
+bool performConstantFolding(Graph&);
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
+
+#endif // DFGConstantFoldingPhase_h
+
diff --git a/Source/JavaScriptCore/dfg/DFGDominators.cpp b/Source/JavaScriptCore/dfg/DFGDominators.cpp
new file mode 100644
index 000000000..0b23d96a7
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGDominators.cpp
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "DFGDominators.h"
+
+#if ENABLE(DFG_JIT)
+
+#include "DFGGraph.h"
+
+namespace JSC { namespace DFG {
+
+Dominators::Dominators()
+ : m_valid(false)
+{
+}
+
+Dominators::~Dominators()
+{
+}
+
+void Dominators::compute(Graph& graph)
+{
+ // This implements a naive dominator solver.
+
+ ASSERT(graph.m_blocks[0]->m_predecessors.isEmpty());
+
+ unsigned numBlocks = graph.m_blocks.size();
+
+ if (numBlocks > m_results.size()) {
+ m_results.grow(numBlocks);
+ for (unsigned i = numBlocks; i--;)
+ m_results[i].resize(numBlocks);
+ m_scratch.resize(numBlocks);
+ }
+
+ m_results[0].clearAll();
+ m_results[0].set(0);
+
+ m_scratch.clearAll();
+ for (unsigned i = numBlocks; i--;) {
+ if (!graph.m_blocks[i])
+ continue;
+ m_scratch.set(i);
+ }
+
+ for (unsigned i = numBlocks; i-- > 1;) {
+ if (!graph.m_blocks[i] || graph.m_blocks[i]->m_predecessors.isEmpty())
+ m_results[i].clearAll();
+ else
+ m_results[i].set(m_scratch);
+ }
+
+ bool changed;
+ do {
+ changed = false;
+ for (unsigned i = 1; i < numBlocks; ++i)
+ changed |= iterateForBlock(graph, i);
+ if (!changed)
+ break;
+
+ changed = false;
+ for (unsigned i = numBlocks; i-- > 1;)
+ changed |= iterateForBlock(graph, i);
+ } while (changed);
+
+ m_valid = true;
+}
+
+bool Dominators::iterateForBlock(Graph& graph, BlockIndex i)
+{
+ BasicBlock* block = graph.m_blocks[i].get();
+ if (!block)
+ return false;
+ if (block->m_predecessors.isEmpty())
+ return false;
+ m_scratch.set(m_results[block->m_predecessors[0]]);
+ for (unsigned j = block->m_predecessors.size(); j-- > 1;)
+ m_scratch.filter(m_results[block->m_predecessors[j]]);
+ m_scratch.set(i);
+ return m_results[i].setAndCheck(m_scratch);
+}
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
+
diff --git a/Source/JavaScriptCore/dfg/DFGDominators.h b/Source/JavaScriptCore/dfg/DFGDominators.h
new file mode 100644
index 000000000..8eee3e899
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGDominators.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGDominators_h
+#define DFGDominators_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(DFG_JIT)
+
+#include "DFGCommon.h"
+#include <wtf/FastBitVector.h>
+
+namespace JSC { namespace DFG {
+
+class Graph;
+
+class Dominators {
+public:
+ Dominators();
+ ~Dominators();
+
+ void compute(Graph& graph);
+ void invalidate()
+ {
+ m_valid = false;
+ }
+ void computeIfNecessary(Graph& graph)
+ {
+ if (m_valid)
+ return;
+ compute(graph);
+ }
+
+ bool isValid() const { return m_valid; }
+
+ bool dominates(BlockIndex from, BlockIndex to) const
+ {
+ ASSERT(isValid());
+ return m_results[to].get(from);
+ }
+
+private:
+ bool iterateForBlock(Graph& graph, BlockIndex);
+
+ Vector<FastBitVector> m_results;
+ FastBitVector m_scratch;
+ bool m_valid;
+};
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
+
+#endif // DFGDominators_h
diff --git a/Source/JavaScriptCore/dfg/DFGDriver.cpp b/Source/JavaScriptCore/dfg/DFGDriver.cpp
index f583a8d63..6ebe338f5 100644
--- a/Source/JavaScriptCore/dfg/DFGDriver.cpp
+++ b/Source/JavaScriptCore/dfg/DFGDriver.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,32 +28,36 @@
#if ENABLE(DFG_JIT)
+#include "DFGArgumentsSimplificationPhase.h"
#include "DFGByteCodeParser.h"
#include "DFGCFAPhase.h"
+#include "DFGCFGSimplificationPhase.h"
#include "DFGCSEPhase.h"
+#include "DFGConstantFoldingPhase.h"
#include "DFGFixupPhase.h"
#include "DFGJITCompiler.h"
#include "DFGPredictionPropagationPhase.h"
#include "DFGRedundantPhiEliminationPhase.h"
+#include "DFGValidate.h"
#include "DFGVirtualRegisterAllocationPhase.h"
namespace JSC { namespace DFG {
enum CompileMode { CompileFunction, CompileOther };
-inline bool compile(CompileMode compileMode, JSGlobalData& globalData, CodeBlock* codeBlock, JITCode& jitCode, MacroAssemblerCodePtr* jitCodeWithArityCheck)
+inline bool compile(CompileMode compileMode, ExecState* exec, CodeBlock* codeBlock, JITCode& jitCode, MacroAssemblerCodePtr* jitCodeWithArityCheck)
{
SamplingRegion samplingRegion("DFG Compilation (Driver)");
ASSERT(codeBlock);
ASSERT(codeBlock->alternative());
ASSERT(codeBlock->alternative()->getJITType() == JITCode::BaselineJIT);
-
+
#if DFG_ENABLE(DEBUG_VERBOSE)
- dataLog("DFG compiling code block %p(%p), number of instructions = %u.\n", codeBlock, codeBlock->alternative(), codeBlock->instructionCount());
+ dataLog("DFG compiling code block %p(%p) for executable %p, number of instructions = %u.\n", codeBlock, codeBlock->alternative(), codeBlock->ownerExecutable(), codeBlock->instructionCount());
#endif
- Graph dfg(globalData, codeBlock);
- if (!parse(dfg))
+ Graph dfg(exec->globalData(), codeBlock);
+ if (!parse(exec, dfg))
return false;
if (compileMode == CompileFunction)
@@ -65,12 +69,30 @@ inline bool compile(CompileMode compileMode, JSGlobalData& globalData, CodeBlock
// that references any of the tables directly, yet.
codeBlock->shrinkToFit(CodeBlock::EarlyShrink);
- performRedundantPhiElimination(dfg);
+ validate(dfg);
performPredictionPropagation(dfg);
performFixup(dfg);
- performCSE(dfg);
+ unsigned cnt = 1;
+ for (;; ++cnt) {
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ dataLog("DFG beginning optimization fixpoint iteration #%u.\n", cnt);
+#endif
+ bool changed = false;
+ performCFA(dfg);
+ changed |= performConstantFolding(dfg);
+ changed |= performArgumentsSimplification(dfg);
+ changed |= performCFGSimplification(dfg);
+ if (!changed)
+ break;
+ performCSE(dfg, FixpointNotConverged);
+ dfg.resetExitStates();
+ }
+ performCSE(dfg, FixpointConverged);
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ dataLog("DFG optimization fixpoint converged in %u iterations.\n", cnt);
+#endif
+ dfg.m_dominators.compute(dfg);
performVirtualRegisterAllocation(dfg);
- performCFA(dfg);
#if DFG_ENABLE(DEBUG_VERBOSE)
dataLog("Graph after optimization:\n");
@@ -93,14 +115,14 @@ inline bool compile(CompileMode compileMode, JSGlobalData& globalData, CodeBlock
return result;
}
-bool tryCompile(JSGlobalData& globalData, CodeBlock* codeBlock, JITCode& jitCode)
+bool tryCompile(ExecState* exec, CodeBlock* codeBlock, JITCode& jitCode)
{
- return compile(CompileOther, globalData, codeBlock, jitCode, 0);
+ return compile(CompileOther, exec, codeBlock, jitCode, 0);
}
-bool tryCompileFunction(JSGlobalData& globalData, CodeBlock* codeBlock, JITCode& jitCode, MacroAssemblerCodePtr& jitCodeWithArityCheck)
+bool tryCompileFunction(ExecState* exec, CodeBlock* codeBlock, JITCode& jitCode, MacroAssemblerCodePtr& jitCodeWithArityCheck)
{
- return compile(CompileFunction, globalData, codeBlock, jitCode, &jitCodeWithArityCheck);
+ return compile(CompileFunction, exec, codeBlock, jitCode, &jitCodeWithArityCheck);
}
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGDriver.h b/Source/JavaScriptCore/dfg/DFGDriver.h
index 639b13f7a..ce798d0a6 100644
--- a/Source/JavaScriptCore/dfg/DFGDriver.h
+++ b/Source/JavaScriptCore/dfg/DFGDriver.h
@@ -26,6 +26,7 @@
#ifndef DFGDriver_h
#define DFGDriver_h
+#include "CallFrame.h"
#include <wtf/Platform.h>
namespace JSC {
@@ -38,11 +39,11 @@ class MacroAssemblerCodePtr;
namespace DFG {
#if ENABLE(DFG_JIT)
-bool tryCompile(JSGlobalData&, CodeBlock*, JITCode&);
-bool tryCompileFunction(JSGlobalData&, CodeBlock*, JITCode&, MacroAssemblerCodePtr& jitCodeWithArityCheck);
+bool tryCompile(ExecState*, CodeBlock*, JITCode&);
+bool tryCompileFunction(ExecState*, CodeBlock*, JITCode&, MacroAssemblerCodePtr& jitCodeWithArityCheck);
#else
-inline bool tryCompile(JSGlobalData&, CodeBlock*, JITCode&) { return false; }
-inline bool tryCompileFunction(JSGlobalData&, CodeBlock*, JITCode&, MacroAssemblerCodePtr&) { return false; }
+inline bool tryCompile(ExecState*, CodeBlock*, JITCode&) { return false; }
+inline bool tryCompileFunction(ExecState*, CodeBlock*, JITCode&, MacroAssemblerCodePtr&) { return false; }
#endif
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGEdge.h b/Source/JavaScriptCore/dfg/DFGEdge.h
index 7b4b5b8bf..10988bf39 100644
--- a/Source/JavaScriptCore/dfg/DFGEdge.h
+++ b/Source/JavaScriptCore/dfg/DFGEdge.h
@@ -78,6 +78,10 @@ public:
}
bool isSet() const { return indexUnchecked() != NoNode; }
+
+ typedef void* Edge::*UnspecifiedBoolType;
+ operator UnspecifiedBoolType*() const { return reinterpret_cast<UnspecifiedBoolType*>(isSet()); }
+
bool operator!() const { return !isSet(); }
bool operator==(Edge other) const
diff --git a/Source/JavaScriptCore/dfg/DFGFixupPhase.cpp b/Source/JavaScriptCore/dfg/DFGFixupPhase.cpp
index 242fdf852..e54d2cfaf 100644
--- a/Source/JavaScriptCore/dfg/DFGFixupPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGFixupPhase.cpp
@@ -41,15 +41,19 @@ public:
{
}
- void run()
+ bool run()
{
for (BlockIndex blockIndex = 0; blockIndex < m_graph.m_blocks.size(); ++blockIndex)
fixupBlock(m_graph.m_blocks[blockIndex].get());
+ return true;
}
private:
void fixupBlock(BasicBlock* block)
{
+ if (!block)
+ return;
+ ASSERT(block->isReachable);
for (m_indexInBlock = 0; m_indexInBlock < block->size(); ++m_indexInBlock) {
m_compileIndex = block->at(m_indexInBlock);
fixupNode(m_graph[m_compileIndex]);
@@ -75,6 +79,7 @@ private:
if (codeBlock()->identifier(node.identifierNumber()) != globalData().propertyNames->length)
break;
bool isArray = isArrayPrediction(m_graph[node.child1()].prediction());
+ bool isArguments = isArgumentsPrediction(m_graph[node.child1()].prediction());
bool isString = isStringPrediction(m_graph[node.child1()].prediction());
bool isInt8Array = m_graph[node.child1()].shouldSpeculateInt8Array();
bool isInt16Array = m_graph[node.child1()].shouldSpeculateInt16Array();
@@ -85,7 +90,7 @@ private:
bool isUint32Array = m_graph[node.child1()].shouldSpeculateUint32Array();
bool isFloat32Array = m_graph[node.child1()].shouldSpeculateFloat32Array();
bool isFloat64Array = m_graph[node.child1()].shouldSpeculateFloat64Array();
- if (!isArray && !isString && !isInt8Array && !isInt16Array && !isInt32Array && !isUint8Array && !isUint8ClampedArray && !isUint16Array && !isUint32Array && !isFloat32Array && !isFloat64Array)
+ if (!isArray && !isArguments && !isString && !isInt8Array && !isInt16Array && !isInt32Array && !isUint8Array && !isUint8ClampedArray && !isUint16Array && !isUint32Array && !isFloat32Array && !isFloat64Array)
break;
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
@@ -93,6 +98,8 @@ private:
#endif
if (isArray)
node.setOp(GetArrayLength);
+ else if (isArguments)
+ node.setOp(GetArgumentsLength);
else if (isString)
node.setOp(GetStringLength);
else if (isInt8Array)
@@ -123,7 +130,9 @@ private:
}
case GetIndexedPropertyStorage: {
PredictedType basePrediction = m_graph[node.child2()].prediction();
- if (!(basePrediction & PredictInt32) && basePrediction) {
+ if ((!(basePrediction & PredictInt32) && basePrediction)
+ || m_graph[node.child1()].shouldSpeculateArguments()
+ || !isActionableArrayPrediction(m_graph[node.child1()].prediction())) {
node.setOpAndDefaultFlags(Nop);
m_graph.clearAndDerefChild1(node);
m_graph.clearAndDerefChild2(node);
@@ -209,7 +218,7 @@ private:
}
case SetLocal: {
- if (m_graph.isCaptured(node.local()))
+ if (node.variableAccessData()->isCaptured())
break;
if (!node.variableAccessData()->shouldUseDoubleFormat())
break;
@@ -246,7 +255,6 @@ private:
case ArithMin:
case ArithMax:
- case ArithMul:
case ArithMod: {
if (Node::shouldSpeculateInteger(m_graph[node.child1()], m_graph[node.child2()])
&& node.canSpeculateInteger())
@@ -256,6 +264,14 @@ private:
break;
}
+ case ArithMul: {
+ if (m_graph.mulShouldSpeculateInteger(node))
+ break;
+ fixDoubleEdge(0);
+ fixDoubleEdge(1);
+ break;
+ }
+
case ArithDiv: {
if (Node::shouldSpeculateInteger(m_graph[node.child1()], m_graph[node.child2()])
&& node.canSpeculateInteger()) {
@@ -383,9 +399,9 @@ private:
InsertionSet<NodeIndex> m_insertionSet;
};
-void performFixup(Graph& graph)
+bool performFixup(Graph& graph)
{
- runPhase<FixupPhase>(graph);
+ return runPhase<FixupPhase>(graph);
}
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGFixupPhase.h b/Source/JavaScriptCore/dfg/DFGFixupPhase.h
index 1ba85ebfe..d496d59b2 100644
--- a/Source/JavaScriptCore/dfg/DFGFixupPhase.h
+++ b/Source/JavaScriptCore/dfg/DFGFixupPhase.h
@@ -37,7 +37,7 @@ class Graph;
// Fix portions of the graph that are inefficient given the predictions that
// we have. This should run after prediction propagation but before CSE.
-void performFixup(Graph&);
+bool performFixup(Graph&);
} } // namespace JSC::DFG::Phase
diff --git a/Source/JavaScriptCore/dfg/DFGGPRInfo.h b/Source/JavaScriptCore/dfg/DFGGPRInfo.h
index 4a250328f..bd4fa32d1 100644
--- a/Source/JavaScriptCore/dfg/DFGGPRInfo.h
+++ b/Source/JavaScriptCore/dfg/DFGGPRInfo.h
@@ -271,6 +271,8 @@ public:
// These constants provide the names for the general purpose argument & return value registers.
static const GPRReg argumentGPR0 = X86Registers::ecx; // regT2
static const GPRReg argumentGPR1 = X86Registers::edx; // regT1
+ static const GPRReg nonArgGPR0 = X86Registers::eax; // regT0
+ static const GPRReg nonArgGPR1 = X86Registers::ebx; // regT3
static const GPRReg returnValueGPR = X86Registers::eax; // regT0
static const GPRReg returnValueGPR2 = X86Registers::edx; // regT1
static const GPRReg nonPreservedNonReturnGPR = X86Registers::ecx;
@@ -340,6 +342,8 @@ public:
static const GPRReg argumentGPR3 = X86Registers::ecx; // regT2
static const GPRReg argumentGPR4 = X86Registers::r8; // regT6
static const GPRReg argumentGPR5 = X86Registers::r9; // regT7
+ static const GPRReg nonArgGPR0 = X86Registers::eax; // regT0
+ static const GPRReg nonArgGPR1 = X86Registers::ebx; // regT3
static const GPRReg returnValueGPR = X86Registers::eax; // regT0
static const GPRReg returnValueGPR2 = X86Registers::edx; // regT1
static const GPRReg nonPreservedNonReturnGPR = X86Registers::esi;
@@ -410,6 +414,8 @@ public:
// between the arguments being set up, and the call being made. That said,
// any change introducing a problem here is likely to be immediately apparent!
static const GPRReg argumentGPR3 = ARMRegisters::r3; // FIXME!
+ static const GPRReg nonArgGPR0 = ARMRegisters::r4; // regT3
+ static const GPRReg nonArgGPR1 = ARMRegisters::r8; // regT4
static const GPRReg returnValueGPR = ARMRegisters::r0; // regT0
static const GPRReg returnValueGPR2 = ARMRegisters::r1; // regT1
static const GPRReg nonPreservedNonReturnGPR = ARMRegisters::r2;
diff --git a/Source/JavaScriptCore/dfg/DFGGraph.cpp b/Source/JavaScriptCore/dfg/DFGGraph.cpp
index 3c99e5d4e..4562e30ee 100644
--- a/Source/JavaScriptCore/dfg/DFGGraph.cpp
+++ b/Source/JavaScriptCore/dfg/DFGGraph.cpp
@@ -65,7 +65,7 @@ const char* Graph::nameOfVariableAccessData(VariableAccessData* variableAccessDa
if (!index)
return "A";
- static char buf[10];
+ static char buf[100];
BoundsCheckedPointer<char> ptr(buf, sizeof(buf));
while (index) {
@@ -73,6 +73,11 @@ const char* Graph::nameOfVariableAccessData(VariableAccessData* variableAccessDa
index /= 26;
}
+ if (variableAccessData->isCaptured())
+ *ptr++ = '*';
+
+ ptr.strcat(predictionToAbbreviatedString(variableAccessData->prediction()));
+
*ptr++ = 0;
return buf;
@@ -126,10 +131,8 @@ void Graph::dump(NodeIndex nodeIndex)
unsigned refCount = node.refCount();
bool skipped = !refCount;
bool mustGenerate = node.mustGenerate();
- if (mustGenerate) {
- ASSERT(refCount);
+ if (mustGenerate)
--refCount;
- }
printWhiteSpace((node.codeOrigin.inlineDepth() - 1) * 2);
@@ -166,7 +169,8 @@ void Graph::dump(NodeIndex nodeIndex)
dataLog("%s@%u%s",
useKindToString(m_varArgChildren[childIdx].useKind()),
m_varArgChildren[childIdx].index(),
- predictionToAbbreviatedString(at(childIdx).prediction()));
+ predictionToAbbreviatedString(
+ at(m_varArgChildren[childIdx]).prediction()));
}
} else {
if (!!node.child1()) {
@@ -278,12 +282,34 @@ void Graph::dump()
NodeIndex lastNodeIndex = NoNode;
for (size_t b = 0; b < m_blocks.size(); ++b) {
BasicBlock* block = m_blocks[b].get();
+ if (!block)
+ continue;
dataLog("Block #%u (bc#%u): %s%s\n", (int)b, block->bytecodeBegin, block->isReachable ? "" : " (skipped)", block->isOSRTarget ? " (OSR target)" : "");
+ dataLog(" Predecessors:");
+ for (size_t i = 0; i < block->m_predecessors.size(); ++i)
+ dataLog(" #%u", block->m_predecessors[i]);
+ dataLog("\n");
+ if (m_dominators.isValid()) {
+ dataLog(" Dominated by:");
+ for (size_t i = 0; i < m_blocks.size(); ++i) {
+ if (!m_dominators.dominates(i, b))
+ continue;
+ dataLog(" #%lu", static_cast<unsigned long>(i));
+ }
+ dataLog("\n");
+ dataLog(" Dominates:");
+ for (size_t i = 0; i < m_blocks.size(); ++i) {
+ if (!m_dominators.dominates(b, i))
+ continue;
+ dataLog(" #%lu", static_cast<unsigned long>(i));
+ }
+ dataLog("\n");
+ }
dataLog(" Phi Nodes:\n");
for (size_t i = 0; i < block->phis.size(); ++i) {
- // Dumping the dead Phi nodes is just annoying!
- if (at(block->phis[i]).refCount())
- dump(block->phis[i]);
+ dumpCodeOrigin(lastNodeIndex, block->phis[i]);
+ dump(block->phis[i]);
+ lastNodeIndex = block->phis[i];
}
dataLog(" vars before: ");
if (block->cfaHasVisited)
@@ -305,6 +331,9 @@ void Graph::dump()
else
dataLog("<empty>");
dataLog("\n");
+ dataLog(" var links: ");
+ dumpOperands(block->variablesAtTail, WTF::dataFile());
+ dataLog("\n");
}
}
@@ -362,6 +391,112 @@ void Graph::predictArgumentTypes()
}
}
+void Graph::handleSuccessor(Vector<BlockIndex, 16>& worklist, BlockIndex blockIndex, BlockIndex successorIndex)
+{
+ BasicBlock* successor = m_blocks[successorIndex].get();
+ if (!successor->isReachable) {
+ successor->isReachable = true;
+ worklist.append(successorIndex);
+ }
+
+ successor->m_predecessors.append(blockIndex);
+}
+
+void Graph::collectGarbage()
+{
+ // First reset the counts to 0 for all nodes.
+ for (unsigned i = size(); i--;)
+ at(i).setRefCount(0);
+
+ // Now find the roots: the nodes that are must-generate. Set their ref counts to
+ // 1 and put them on the worklist.
+ Vector<NodeIndex, 128> worklist;
+ for (BlockIndex blockIndex = 0; blockIndex < m_blocks.size(); ++blockIndex) {
+ BasicBlock* block = m_blocks[blockIndex].get();
+ if (!block)
+ continue;
+ for (unsigned indexInBlock = block->size(); indexInBlock--;) {
+ NodeIndex nodeIndex = block->at(indexInBlock);
+ Node& node = at(nodeIndex);
+ if (!(node.flags() & NodeMustGenerate))
+ continue;
+ node.setRefCount(1);
+ worklist.append(nodeIndex);
+ }
+ }
+
+ while (!worklist.isEmpty()) {
+ NodeIndex nodeIndex = worklist.last();
+ worklist.removeLast();
+ Node& node = at(nodeIndex);
+ ASSERT(node.shouldGenerate()); // It should not be on the worklist unless it's ref'ed.
+ if (node.flags() & NodeHasVarArgs) {
+ for (unsigned childIdx = node.firstChild();
+ childIdx < node.firstChild() + node.numChildren();
+ ++childIdx) {
+ NodeIndex childNodeIndex = m_varArgChildren[childIdx].index();
+ if (!at(childNodeIndex).ref())
+ continue;
+ worklist.append(childNodeIndex);
+ }
+ } else if (node.child1()) {
+ if (at(node.child1()).ref())
+ worklist.append(node.child1().index());
+ if (node.child2()) {
+ if (at(node.child2()).ref())
+ worklist.append(node.child2().index());
+ if (node.child3()) {
+ if (at(node.child3()).ref())
+ worklist.append(node.child3().index());
+ }
+ }
+ }
+ }
+}
+
+void Graph::determineReachability()
+{
+ Vector<BlockIndex, 16> worklist;
+ worklist.append(0);
+ m_blocks[0]->isReachable = true;
+ while (!worklist.isEmpty()) {
+ BlockIndex index = worklist.last();
+ worklist.removeLast();
+
+ BasicBlock* block = m_blocks[index].get();
+ ASSERT(block->isLinked);
+
+ Node& node = at(block->last());
+ ASSERT(node.isTerminal());
+
+ if (node.isJump())
+ handleSuccessor(worklist, index, node.takenBlockIndex());
+ else if (node.isBranch()) {
+ handleSuccessor(worklist, index, node.takenBlockIndex());
+ handleSuccessor(worklist, index, node.notTakenBlockIndex());
+ }
+ }
+}
+
+void Graph::resetReachability()
+{
+ for (BlockIndex blockIndex = m_blocks.size(); blockIndex--;) {
+ BasicBlock* block = m_blocks[blockIndex].get();
+ if (!block)
+ continue;
+ block->isReachable = false;
+ block->m_predecessors.clear();
+ }
+
+ determineReachability();
+}
+
+void Graph::resetExitStates()
+{
+ for (unsigned i = size(); i--;)
+ at(i).setCanExit(true);
+}
+
} } // namespace JSC::DFG
#endif
diff --git a/Source/JavaScriptCore/dfg/DFGGraph.h b/Source/JavaScriptCore/dfg/DFGGraph.h
index 0c8ac2dcf..52654d23b 100644
--- a/Source/JavaScriptCore/dfg/DFGGraph.h
+++ b/Source/JavaScriptCore/dfg/DFGGraph.h
@@ -26,12 +26,15 @@
#ifndef DFGGraph_h
#define DFGGraph_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "CodeBlock.h"
#include "DFGArgumentPosition.h"
#include "DFGAssemblyHelpers.h"
#include "DFGBasicBlock.h"
+#include "DFGDominators.h"
#include "DFGNode.h"
#include "MethodOfGettingAValueProfile.h"
#include "RegisterFile.h"
@@ -77,6 +80,7 @@ public:
: m_globalData(globalData)
, m_codeBlock(codeBlock)
, m_profiledBlock(codeBlock->alternative())
+ , m_hasArguments(false)
{
ASSERT(m_profiledBlock);
}
@@ -105,6 +109,8 @@ public:
void deref(NodeIndex nodeIndex)
{
+ if (!at(nodeIndex).refCount())
+ dump();
if (at(nodeIndex).deref())
derefChildren(nodeIndex);
}
@@ -113,6 +119,24 @@ public:
deref(nodeUse.index());
}
+ void changeIndex(Edge& edge, NodeIndex newIndex, bool changeRef = true)
+ {
+ if (changeRef) {
+ ref(newIndex);
+ deref(edge.index());
+ }
+ edge.setIndex(newIndex);
+ }
+
+ void changeEdge(Edge& edge, Edge newEdge, bool changeRef = true)
+ {
+ if (changeRef) {
+ ref(newEdge);
+ deref(edge);
+ }
+ edge = newEdge;
+ }
+
void clearAndDerefChild1(Node& node)
{
if (!node.child1())
@@ -136,6 +160,22 @@ public:
deref(node.child3());
node.children.child3() = Edge();
}
+
+ // Call this if you've modified the reference counts of nodes that deal with
+ // local variables. This is necessary because local variable references can form
+ // cycles, and hence reference counting is not enough. This will reset the
+ // reference counts according to reachability.
+ void collectGarbage();
+
+ void convertToConstant(NodeIndex nodeIndex, unsigned constantNumber)
+ {
+ at(nodeIndex).convertToConstant(constantNumber);
+ }
+
+ void convertToConstant(NodeIndex nodeIndex, JSValue value)
+ {
+ convertToConstant(nodeIndex, m_codeBlock->addOrFindConstant(value));
+ }
// CodeBlock is optional, but may allow additional information to be dumped (e.g. Identifier names).
void dump();
@@ -167,6 +207,21 @@ public:
return Node::shouldSpeculateInteger(left, right) && add.canSpeculateInteger();
}
+ bool mulShouldSpeculateInteger(Node& mul)
+ {
+ ASSERT(mul.op() == ArithMul);
+
+ Node& left = at(mul.child1());
+ Node& right = at(mul.child2());
+
+ if (left.hasConstant())
+ return mulImmediateShouldSpeculateInteger(mul, right, left);
+ if (right.hasConstant())
+ return mulImmediateShouldSpeculateInteger(mul, left, right);
+
+ return Node::shouldSpeculateInteger(left, right) && mul.canSpeculateInteger() && !nodeMayOverflow(mul.arithNodeFlags());
+ }
+
bool negateShouldSpeculateInteger(Node& negate)
{
ASSERT(negate.op() == ArithNegate);
@@ -255,11 +310,48 @@ public:
return &m_structureTransitionData.last();
}
+ ExecutableBase* executableFor(InlineCallFrame* inlineCallFrame)
+ {
+ if (!inlineCallFrame)
+ return m_codeBlock->ownerExecutable();
+
+ return inlineCallFrame->executable.get();
+ }
+
+ ExecutableBase* executableFor(const CodeOrigin& codeOrigin)
+ {
+ return executableFor(codeOrigin.inlineCallFrame);
+ }
+
CodeBlock* baselineCodeBlockFor(const CodeOrigin& codeOrigin)
{
return baselineCodeBlockForOriginAndBaselineCodeBlock(codeOrigin, m_profiledBlock);
}
+ int argumentsRegisterFor(const CodeOrigin& codeOrigin)
+ {
+ if (!codeOrigin.inlineCallFrame)
+ return m_codeBlock->argumentsRegister();
+
+ return baselineCodeBlockForInlineCallFrame(
+ codeOrigin.inlineCallFrame)->argumentsRegister() +
+ codeOrigin.inlineCallFrame->stackOffset;
+ }
+
+ int uncheckedArgumentsRegisterFor(const CodeOrigin& codeOrigin)
+ {
+ if (!codeOrigin.inlineCallFrame)
+ return m_codeBlock->uncheckedArgumentsRegister();
+
+ CodeBlock* codeBlock = baselineCodeBlockForInlineCallFrame(
+ codeOrigin.inlineCallFrame);
+ if (!codeBlock->usesArguments())
+ return InvalidVirtualRegister;
+
+ return codeBlock->argumentsRegister() +
+ codeOrigin.inlineCallFrame->stackOffset;
+ }
+
ValueProfile* valueProfileFor(NodeIndex nodeIndex)
{
if (nodeIndex == NoNode)
@@ -303,37 +395,86 @@ public:
bool needsActivation() const
{
-#if DFG_ENABLE(ALL_VARIABLES_CAPTURED)
- return true;
-#else
return m_codeBlock->needsFullScopeChain() && m_codeBlock->codeType() != GlobalCode;
-#endif
}
- // Pass an argument index. Currently it's ignored, but that's somewhat
- // of a bug.
- bool argumentIsCaptured(int) const
+ bool usesArguments() const
{
- return needsActivation();
+ return m_codeBlock->usesArguments();
}
- bool localIsCaptured(int operand) const
+
+ unsigned numSuccessors(BasicBlock* block)
{
-#if DFG_ENABLE(ALL_VARIABLES_CAPTURED)
- return operand < m_codeBlock->m_numVars;
-#else
- return operand < m_codeBlock->m_numCapturedVars;
-#endif
+ return at(block->last()).numSuccessors();
+ }
+ BlockIndex successor(BasicBlock* block, unsigned index)
+ {
+ return at(block->last()).successor(index);
+ }
+ BlockIndex successorForCondition(BasicBlock* block, bool condition)
+ {
+ return at(block->last()).successorForCondition(condition);
+ }
+
+ bool isPredictedNumerical(Node& node)
+ {
+ PredictedType left = at(node.child1()).prediction();
+ PredictedType right = at(node.child2()).prediction();
+ return isNumberPrediction(left) && isNumberPrediction(right);
+ }
+
+ bool byValIsPure(Node& node)
+ {
+ return at(node.child2()).shouldSpeculateInteger()
+ && ((node.op() == PutByVal || node.op() == PutByValAlias)
+ ? isActionableMutableArrayPrediction(at(node.child1()).prediction())
+ : isActionableArrayPrediction(at(node.child1()).prediction()));
}
- bool isCaptured(int operand) const
+ bool clobbersWorld(Node& node)
{
- if (operandIsArgument(operand))
- return argumentIsCaptured(operandToArgument(operand));
- return localIsCaptured(operand);
+ if (node.flags() & NodeClobbersWorld)
+ return true;
+ if (!(node.flags() & NodeMightClobber))
+ return false;
+ switch (node.op()) {
+ case ValueAdd:
+ case CompareLess:
+ case CompareLessEq:
+ case CompareGreater:
+ case CompareGreaterEq:
+ case CompareEq:
+ return !isPredictedNumerical(node);
+ case GetByVal:
+ return !byValIsPure(node);
+ default:
+ ASSERT_NOT_REACHED();
+ return true; // If by some oddity we hit this case in release build it's safer to have CSE assume the worst.
+ }
}
- bool isCaptured(VirtualRegister virtualRegister) const
+
+ bool clobbersWorld(NodeIndex nodeIndex)
{
- return isCaptured(static_cast<int>(virtualRegister));
+ return clobbersWorld(at(nodeIndex));
+ }
+
+ void determineReachability();
+ void resetReachability();
+
+ void resetExitStates();
+
+ unsigned numChildren(Node& node)
+ {
+ if (node.flags() & NodeHasVarArgs)
+ return node.numChildren();
+ return AdjacencyList::Size;
+ }
+
+ Edge child(Node& node, unsigned index)
+ {
+ if (node.flags() & NodeHasVarArgs)
+ return m_varArgChildren[node.firstChild() + index];
+ return node.children.child(index);
}
JSGlobalData& m_globalData;
@@ -349,11 +490,16 @@ public:
SegmentedVector<ArgumentPosition, 8> m_argumentPositions;
SegmentedVector<StructureSet, 16> m_structureSet;
SegmentedVector<StructureTransitionData, 8> m_structureTransitionData;
+ bool m_hasArguments;
+ HashSet<ExecutableBase*> m_executablesWhoseArgumentsEscaped;
BitVector m_preservedVars;
+ Dominators m_dominators;
unsigned m_localVars;
unsigned m_parameterSlots;
private:
+ void handleSuccessor(Vector<BlockIndex, 16>& worklist, BlockIndex blockIndex, BlockIndex successorIndex);
+
bool addImmediateShouldSpeculateInteger(Node& add, Node& variable, Node& immediate)
{
ASSERT(immediate.hasConstant());
@@ -376,6 +522,30 @@ private:
return nodeCanTruncateInteger(add.arithNodeFlags());
}
+ bool mulImmediateShouldSpeculateInteger(Node& mul, Node& variable, Node& immediate)
+ {
+ ASSERT(immediate.hasConstant());
+
+ JSValue immediateValue = immediate.valueOfJSConstant(m_codeBlock);
+ if (!immediateValue.isInt32())
+ return false;
+
+ if (!variable.shouldSpeculateInteger())
+ return false;
+
+ int32_t intImmediate = immediateValue.asInt32();
+ // Doubles have a 53 bit mantissa so we expect a multiplication of 2^31 (the highest
+ // magnitude possible int32 value) and any value less than 2^22 to not result in any
+ // rounding in a double multiplication - hence it will be equivalent to an integer
+ // multiplication, if we are doing int32 truncation afterwards (which is what
+ // canSpeculateInteger() implies).
+ const int32_t twoToThe22 = 1 << 22;
+ if (intImmediate <= -twoToThe22 || intImmediate >= twoToThe22)
+ return mul.canSpeculateInteger() && !nodeMayOverflow(mul.arithNodeFlags());
+
+ return mul.canSpeculateInteger();
+ }
+
// When a node's refCount goes from 0 to 1, it must (logically) recursively ref all of its children, and vice versa.
void refChildren(NodeIndex);
void derefChildren(NodeIndex);
diff --git a/Source/JavaScriptCore/dfg/DFGInsertionSet.h b/Source/JavaScriptCore/dfg/DFGInsertionSet.h
index 82a6a6fa4..26ab1f28f 100644
--- a/Source/JavaScriptCore/dfg/DFGInsertionSet.h
+++ b/Source/JavaScriptCore/dfg/DFGInsertionSet.h
@@ -79,7 +79,7 @@ public:
Insertion<ElementType>& insertion = m_insertions[indexInInsertions];
size_t firstIndex = insertion.index() + indexInInsertions;
size_t indexOffset = indexInInsertions + 1;
- for (size_t i = lastIndex; i-- > firstIndex;)
+ for (size_t i = lastIndex; --i > firstIndex;)
collection[i] = collection[i - indexOffset];
collection[firstIndex] = insertion.element();
lastIndex = firstIndex;
diff --git a/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp b/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp
index 8d5b7238c..54b5aaee6 100644
--- a/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp
+++ b/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp
@@ -32,6 +32,7 @@
#include "DFGOSRExitCompiler.h"
#include "DFGOperations.h"
#include "DFGRegisterBank.h"
+#include "DFGSlowPathGenerator.h"
#include "DFGSpeculativeJIT.h"
#include "DFGThunks.h"
#include "JSGlobalData.h"
@@ -78,9 +79,10 @@ void JITCompiler::compileBody(SpeculativeJIT& speculative)
bool compiledSpeculative = speculative.compile();
ASSERT_UNUSED(compiledSpeculative, compiledSpeculative);
+}
- linkOSRExits();
-
+void JITCompiler::compileExceptionHandlers()
+{
// Iterate over the m_calls vector, checking for jumps to link.
bool didLinkExceptionCheck = false;
for (unsigned i = 0; i < m_exceptionChecks.size(); ++i) {
@@ -148,19 +150,19 @@ void JITCompiler::link(LinkBuffer& linkBuffer)
m_codeBlock->setNumberOfStructureStubInfos(m_propertyAccesses.size());
for (unsigned i = 0; i < m_propertyAccesses.size(); ++i) {
StructureStubInfo& info = m_codeBlock->structureStubInfo(i);
- CodeLocationCall callReturnLocation = linkBuffer.locationOf(m_propertyAccesses[i].m_functionCall);
+ CodeLocationCall callReturnLocation = linkBuffer.locationOf(m_propertyAccesses[i].m_slowPathGenerator->call());
info.codeOrigin = m_propertyAccesses[i].m_codeOrigin;
info.callReturnLocation = callReturnLocation;
- info.patch.dfg.deltaCheckImmToCall = differenceBetweenCodePtr(linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCheckImmToCall), callReturnLocation);
- info.patch.dfg.deltaCallToStructCheck = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCallToStructCheck));
+ info.patch.dfg.deltaCheckImmToCall = differenceBetweenCodePtr(linkBuffer.locationOf(m_propertyAccesses[i].m_structureImm), callReturnLocation);
+ info.patch.dfg.deltaCallToStructCheck = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_structureCheck));
#if USE(JSVALUE64)
- info.patch.dfg.deltaCallToLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCallToLoadOrStore));
+ info.patch.dfg.deltaCallToLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_loadOrStore));
#else
- info.patch.dfg.deltaCallToTagLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCallToTagLoadOrStore));
- info.patch.dfg.deltaCallToPayloadLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCallToPayloadLoadOrStore));
+ info.patch.dfg.deltaCallToTagLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_tagLoadOrStore));
+ info.patch.dfg.deltaCallToPayloadLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_payloadLoadOrStore));
#endif
- info.patch.dfg.deltaCallToSlowCase = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCallToSlowCase));
- info.patch.dfg.deltaCallToDone = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCallToDone));
+ info.patch.dfg.deltaCallToSlowCase = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_slowPathGenerator->label()));
+ info.patch.dfg.deltaCallToDone = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_done));
info.patch.dfg.baseGPR = m_propertyAccesses[i].m_baseGPR;
#if USE(JSVALUE64)
info.patch.dfg.valueGPR = m_propertyAccesses[i].m_valueGPR;
@@ -199,6 +201,12 @@ bool JITCompiler::compile(JITCode& entry)
SpeculativeJIT speculative(*this);
compileBody(speculative);
+ // Generate slow path code.
+ speculative.runSlowPathGenerators();
+
+ compileExceptionHandlers();
+ linkOSRExits();
+
// Create OSR entry trampolines if necessary.
speculative.createOSREntries();
@@ -268,6 +276,12 @@ bool JITCompiler::compileFunction(JITCode& entry, MacroAssemblerCodePtr& entryWi
move(GPRInfo::regT0, GPRInfo::callFrameRegister);
jump(fromArityCheck);
+ // Generate slow path code.
+ speculative.runSlowPathGenerators();
+
+ compileExceptionHandlers();
+ linkOSRExits();
+
// Create OSR entry trampolines if necessary.
speculative.createOSREntries();
diff --git a/Source/JavaScriptCore/dfg/DFGJITCompiler.h b/Source/JavaScriptCore/dfg/DFGJITCompiler.h
index 01a1e7246..360165b24 100644
--- a/Source/JavaScriptCore/dfg/DFGJITCompiler.h
+++ b/Source/JavaScriptCore/dfg/DFGJITCompiler.h
@@ -28,15 +28,15 @@
#if ENABLE(DFG_JIT)
-#include <assembler/LinkBuffer.h>
-#include <assembler/MacroAssembler.h>
-#include <bytecode/CodeBlock.h>
-#include <dfg/DFGCCallHelpers.h>
-#include <dfg/DFGFPRInfo.h>
-#include <dfg/DFGGPRInfo.h>
-#include <dfg/DFGGraph.h>
-#include <dfg/DFGRegisterBank.h>
-#include <jit/JITCode.h>
+#include "CodeBlock.h"
+#include "DFGCCallHelpers.h"
+#include "DFGFPRInfo.h"
+#include "DFGGPRInfo.h"
+#include "DFGGraph.h"
+#include "DFGRegisterBank.h"
+#include "JITCode.h"
+#include "LinkBuffer.h"
+#include "MacroAssembler.h"
namespace JSC {
@@ -48,6 +48,7 @@ namespace DFG {
class JITCodeGenerator;
class NodeToRegisterMap;
+class SlowPathGenerator;
class SpeculativeJIT;
class SpeculationRecovery;
@@ -130,22 +131,43 @@ struct PropertyAccessRecord {
enum RegisterMode { RegistersFlushed, RegistersInUse };
#if USE(JSVALUE64)
- PropertyAccessRecord(CodeOrigin codeOrigin, MacroAssembler::DataLabelPtr deltaCheckImmToCall, MacroAssembler::Call functionCall, MacroAssembler::PatchableJump deltaCallToStructCheck, MacroAssembler::DataLabelCompact deltaCallToLoadOrStore, MacroAssembler::Label deltaCallToSlowCase, MacroAssembler::Label deltaCallToDone, int8_t baseGPR, int8_t valueGPR, int8_t scratchGPR, RegisterMode registerMode = RegistersInUse)
+ PropertyAccessRecord(
+ CodeOrigin codeOrigin,
+ MacroAssembler::DataLabelPtr structureImm,
+ MacroAssembler::PatchableJump structureCheck,
+ MacroAssembler::DataLabelCompact loadOrStore,
+ SlowPathGenerator* slowPathGenerator,
+ MacroAssembler::Label done,
+ int8_t baseGPR,
+ int8_t valueGPR,
+ int8_t scratchGPR,
+ RegisterMode registerMode = RegistersInUse)
#elif USE(JSVALUE32_64)
- PropertyAccessRecord(CodeOrigin codeOrigin, MacroAssembler::DataLabelPtr deltaCheckImmToCall, MacroAssembler::Call functionCall, MacroAssembler::PatchableJump deltaCallToStructCheck, MacroAssembler::DataLabelCompact deltaCallToTagLoadOrStore, MacroAssembler::DataLabelCompact deltaCallToPayloadLoadOrStore, MacroAssembler::Label deltaCallToSlowCase, MacroAssembler::Label deltaCallToDone, int8_t baseGPR, int8_t valueTagGPR, int8_t valueGPR, int8_t scratchGPR, RegisterMode registerMode = RegistersInUse)
+ PropertyAccessRecord(
+ CodeOrigin codeOrigin,
+ MacroAssembler::DataLabelPtr structureImm,
+ MacroAssembler::PatchableJump structureCheck,
+ MacroAssembler::DataLabelCompact tagLoadOrStore,
+ MacroAssembler::DataLabelCompact payloadLoadOrStore,
+ SlowPathGenerator* slowPathGenerator,
+ MacroAssembler::Label done,
+ int8_t baseGPR,
+ int8_t valueTagGPR,
+ int8_t valueGPR,
+ int8_t scratchGPR,
+ RegisterMode registerMode = RegistersInUse)
#endif
: m_codeOrigin(codeOrigin)
- , m_deltaCheckImmToCall(deltaCheckImmToCall)
- , m_functionCall(functionCall)
- , m_deltaCallToStructCheck(deltaCallToStructCheck)
+ , m_structureImm(structureImm)
+ , m_structureCheck(structureCheck)
#if USE(JSVALUE64)
- , m_deltaCallToLoadOrStore(deltaCallToLoadOrStore)
+ , m_loadOrStore(loadOrStore)
#elif USE(JSVALUE32_64)
- , m_deltaCallToTagLoadOrStore(deltaCallToTagLoadOrStore)
- , m_deltaCallToPayloadLoadOrStore(deltaCallToPayloadLoadOrStore)
+ , m_tagLoadOrStore(tagLoadOrStore)
+ , m_payloadLoadOrStore(payloadLoadOrStore)
#endif
- , m_deltaCallToSlowCase(deltaCallToSlowCase)
- , m_deltaCallToDone(deltaCallToDone)
+ , m_slowPathGenerator(slowPathGenerator)
+ , m_done(done)
, m_baseGPR(baseGPR)
#if USE(JSVALUE32_64)
, m_valueTagGPR(valueTagGPR)
@@ -157,17 +179,16 @@ struct PropertyAccessRecord {
}
CodeOrigin m_codeOrigin;
- MacroAssembler::DataLabelPtr m_deltaCheckImmToCall;
- MacroAssembler::Call m_functionCall;
- MacroAssembler::PatchableJump m_deltaCallToStructCheck;
+ MacroAssembler::DataLabelPtr m_structureImm;
+ MacroAssembler::PatchableJump m_structureCheck;
#if USE(JSVALUE64)
- MacroAssembler::DataLabelCompact m_deltaCallToLoadOrStore;
+ MacroAssembler::DataLabelCompact m_loadOrStore;
#elif USE(JSVALUE32_64)
- MacroAssembler::DataLabelCompact m_deltaCallToTagLoadOrStore;
- MacroAssembler::DataLabelCompact m_deltaCallToPayloadLoadOrStore;
+ MacroAssembler::DataLabelCompact m_tagLoadOrStore;
+ MacroAssembler::DataLabelCompact m_payloadLoadOrStore;
#endif
- MacroAssembler::Label m_deltaCallToSlowCase;
- MacroAssembler::Label m_deltaCallToDone;
+ SlowPathGenerator* m_slowPathGenerator;
+ MacroAssembler::Label m_done;
int8_t m_baseGPR;
#if USE(JSVALUE32_64)
int8_t m_valueTagGPR;
@@ -193,7 +214,7 @@ public:
, m_currentCodeOriginIndex(0)
{
}
-
+
bool compile(JITCode& entry);
bool compileFunction(JITCode& entry, MacroAssemblerCodePtr& entryWithArityCheck);
@@ -316,6 +337,7 @@ private:
void link(LinkBuffer&);
void exitSpeculativeWithOSR(const OSRExit&, SpeculationRecovery*);
+ void compileExceptionHandlers();
void linkOSRExits();
// The dataflow graph currently being generated.
diff --git a/Source/JavaScriptCore/dfg/DFGNode.h b/Source/JavaScriptCore/dfg/DFGNode.h
index f79a93a69..1dbfccb8a 100644
--- a/Source/JavaScriptCore/dfg/DFGNode.h
+++ b/Source/JavaScriptCore/dfg/DFGNode.h
@@ -75,7 +75,7 @@ struct OpInfo {
// Node represents a single operation in the data flow graph.
struct Node {
enum VarArgTag { VarArg };
-
+
// Construct a node with up to 3 children, no immediate value.
Node(NodeType op, CodeOrigin codeOrigin, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode)
: codeOrigin(codeOrigin)
@@ -144,6 +144,7 @@ struct Node {
bool mergeFlags(NodeFlags flags)
{
+ ASSERT(!(flags & NodeDoesNotExit));
NodeFlags newFlags = m_flags | flags;
if (newFlags == m_flags)
return false;
@@ -153,6 +154,7 @@ struct Node {
bool filterFlags(NodeFlags flags)
{
+ ASSERT(flags & NodeDoesNotExit);
NodeFlags newFlags = m_flags & flags;
if (newFlags == m_flags)
return false;
@@ -175,7 +177,20 @@ struct Node {
{
return m_flags & NodeMustGenerate;
}
-
+
+ void setCanExit(bool exits)
+ {
+ if (exits)
+ m_flags &= ~NodeDoesNotExit;
+ else
+ m_flags |= NodeDoesNotExit;
+ }
+
+ bool canExit()
+ {
+ return !(m_flags & NodeDoesNotExit);
+ }
+
bool isConstant()
{
return op() == JSConstant;
@@ -197,6 +212,26 @@ struct Node {
return m_opInfo;
}
+ void convertToConstant(unsigned constantNumber)
+ {
+ m_op = JSConstant;
+ if (m_flags & NodeMustGenerate)
+ m_refCount--;
+ m_flags &= ~(NodeMustGenerate | NodeMightClobber | NodeClobbersWorld);
+ m_opInfo = constantNumber;
+ children.reset();
+ }
+
+ void convertToGetLocalUnlinked(VirtualRegister local)
+ {
+ m_op = GetLocalUnlinked;
+ if (m_flags & NodeMustGenerate)
+ m_refCount--;
+ m_flags &= ~(NodeMustGenerate | NodeMightClobber | NodeClobbersWorld);
+ m_opInfo = local;
+ children.reset();
+ }
+
JSCell* weakConstant()
{
return bitwise_cast<JSCell*>(m_opInfo);
@@ -264,6 +299,18 @@ struct Node {
return variableAccessData()->local();
}
+ VirtualRegister unmodifiedArgumentsRegister()
+ {
+ ASSERT(op() == TearOffActivation);
+ return static_cast<VirtualRegister>(m_opInfo);
+ }
+
+ VirtualRegister unlinkedLocal()
+ {
+ ASSERT(op() == GetLocalUnlinked);
+ return static_cast<VirtualRegister>(m_opInfo);
+ }
+
bool hasIdentifier()
{
switch (op()) {
@@ -458,12 +505,45 @@ struct Node {
return m_opInfo2;
}
+ unsigned numSuccessors()
+ {
+ switch (op()) {
+ case Jump:
+ return 1;
+ case Branch:
+ return 2;
+ default:
+ return 0;
+ }
+ }
+
+ BlockIndex successor(unsigned index)
+ {
+ switch (index) {
+ case 0:
+ return takenBlockIndex();
+ case 1:
+ return notTakenBlockIndex();
+ default:
+ ASSERT_NOT_REACHED();
+ return NoBlock;
+ }
+ }
+
+ BlockIndex successorForCondition(bool condition)
+ {
+ ASSERT(isBranch());
+ return condition ? takenBlockIndex() : notTakenBlockIndex();
+ }
+
bool hasHeapPrediction()
{
switch (op()) {
case GetById:
case GetByIdFlush:
case GetByVal:
+ case GetMyArgumentByVal:
+ case GetMyArgumentByValSafe:
case Call:
case Construct:
case GetByOffset:
@@ -700,6 +780,11 @@ struct Node {
return isArrayPrediction(prediction());
}
+ bool shouldSpeculateArguments()
+ {
+ return isArgumentsPrediction(prediction());
+ }
+
bool shouldSpeculateInt8Array()
{
return isInt8ArrayPrediction(prediction());
diff --git a/Source/JavaScriptCore/dfg/DFGNodeFlags.cpp b/Source/JavaScriptCore/dfg/DFGNodeFlags.cpp
index 54e6b69b7..ca6257401 100644
--- a/Source/JavaScriptCore/dfg/DFGNodeFlags.cpp
+++ b/Source/JavaScriptCore/dfg/DFGNodeFlags.cpp
@@ -130,6 +130,13 @@ const char* nodeFlagsAsString(NodeFlags flags)
hasPrinted = true;
}
+ if (!(flags & NodeDoesNotExit)) {
+ if (hasPrinted)
+ ptr.strcat("|");
+ ptr.strcat("CanExit");
+ hasPrinted = true;
+ }
+
*ptr++ = 0;
return description;
diff --git a/Source/JavaScriptCore/dfg/DFGNodeFlags.h b/Source/JavaScriptCore/dfg/DFGNodeFlags.h
index 16d76655e..a897d0c4f 100644
--- a/Source/JavaScriptCore/dfg/DFGNodeFlags.h
+++ b/Source/JavaScriptCore/dfg/DFGNodeFlags.h
@@ -59,6 +59,8 @@ namespace JSC { namespace DFG {
#define NodeUsedAsValue (NodeUsedAsNumber | NodeNeedsNegZero)
#define NodeUsedAsInt 0x1000 // The result of this computation is known to be used in a context that prefers, but does not require, integer values.
+#define NodeDoesNotExit 0x2000 // This flag is negated to make it natural for the default to be that a node does exit.
+
typedef uint16_t NodeFlags;
static inline bool nodeUsedAsNumber(NodeFlags flags)
diff --git a/Source/JavaScriptCore/dfg/DFGNodeType.h b/Source/JavaScriptCore/dfg/DFGNodeType.h
index 8a3828c31..091f96c6f 100644
--- a/Source/JavaScriptCore/dfg/DFGNodeType.h
+++ b/Source/JavaScriptCore/dfg/DFGNodeType.h
@@ -48,7 +48,9 @@ namespace JSC { namespace DFG {
macro(CreateThis, NodeResultJS) /* Note this is not MustGenerate since we're returning it anyway. */ \
macro(GetCallee, NodeResultJS) \
\
- /* Nodes for local variable access. */\
+ /* Nodes for local variable access. These nodes are linked together using Phi nodes. */\
+ /* Any two nodes that are part of the same Phi graph will share the same */\
+ /* VariableAccessData, and thus will share predictions. */\
macro(GetLocal, NodeResultJS) \
macro(SetLocal, 0) \
macro(Phantom, NodeMustGenerate) \
@@ -56,7 +58,12 @@ namespace JSC { namespace DFG {
macro(Phi, 0) \
macro(Flush, NodeMustGenerate) \
\
- /* Marker for arguments being set. */\
+ /* Get the value of a local variable, without linking into the VariableAccessData */\
+ /* network. This is only valid for variable accesses whose predictions originated */\
+ /* as something other than a local access, and thus had their own profiling. */\
+ macro(GetLocalUnlinked, NodeResultJS) \
+ \
+ /* Marker for an argument being set at the prologue of a function. */\
macro(SetArgument, 0) \
\
/* Hint that inlining begins here. No code is generated for this node. It's only */\
@@ -117,6 +124,7 @@ namespace JSC { namespace DFG {
macro(GetByOffset, NodeResultJS) \
macro(PutByOffset, NodeMustGenerate | NodeClobbersWorld) \
macro(GetArrayLength, NodeResultInt32) \
+ macro(GetArgumentsLength, NodeResultInt32) \
macro(GetStringLength, NodeResultInt32) \
macro(GetInt8ArrayLength, NodeResultInt32) \
macro(GetInt16ArrayLength, NodeResultInt32) \
@@ -180,7 +188,7 @@ namespace JSC { namespace DFG {
macro(IsString, NodeResultBoolean) \
macro(IsObject, NodeResultBoolean) \
macro(IsFunction, NodeResultBoolean) \
- macro(LogicalNot, NodeResultBoolean | NodeMightClobber) \
+ macro(LogicalNot, NodeResultBoolean) \
macro(ToPrimitive, NodeResultJS | NodeMustGenerate | NodeClobbersWorld) \
macro(StrCat, NodeResultJS | NodeMustGenerate | NodeHasVarArgs | NodeClobbersWorld) \
\
@@ -190,6 +198,16 @@ namespace JSC { namespace DFG {
macro(CreateActivation, NodeResultJS) \
macro(TearOffActivation, NodeMustGenerate) \
\
+ /* Nodes used for arguments. Similar to activation support, only it makes even less */\
+ /* sense. */\
+ macro(CreateArguments, NodeResultJS) \
+ macro(TearOffArguments, NodeMustGenerate) \
+ macro(GetMyArgumentsLength, NodeResultJS | NodeMustGenerate) \
+ macro(GetMyArgumentByVal, NodeResultJS | NodeMustGenerate) \
+ macro(GetMyArgumentsLengthSafe, NodeResultJS | NodeMustGenerate | NodeClobbersWorld) \
+ macro(GetMyArgumentByValSafe, NodeResultJS | NodeMustGenerate | NodeClobbersWorld) \
+ macro(CheckArgumentsNotCreated, NodeMustGenerate) \
+ \
/* Nodes for creating functions. */\
macro(NewFunctionNoCheck, NodeResultJS) \
macro(NewFunction, NodeResultJS) \
diff --git a/Source/JavaScriptCore/dfg/DFGOSREntry.cpp b/Source/JavaScriptCore/dfg/DFGOSREntry.cpp
index 21c76c6fe..9a7bc96cc 100644
--- a/Source/JavaScriptCore/dfg/DFGOSREntry.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOSREntry.cpp
@@ -42,7 +42,6 @@ void* prepareOSREntry(ExecState* exec, CodeBlock* codeBlock, unsigned bytecodeIn
ASSERT(codeBlock->alternative());
ASSERT(codeBlock->alternative()->getJITType() == JITCode::BaselineJIT);
ASSERT(!codeBlock->jitCodeMap());
- ASSERT(codeBlock->numberOfDFGOSREntries());
#if ENABLE(JIT_VERBOSE_OSR)
dataLog("OSR in %p(%p) from bc#%u\n", codeBlock, codeBlock->alternative(), bytecodeIndex);
@@ -51,6 +50,13 @@ void* prepareOSREntry(ExecState* exec, CodeBlock* codeBlock, unsigned bytecodeIn
JSGlobalData* globalData = &exec->globalData();
OSREntryData* entry = codeBlock->dfgOSREntryDataForBytecodeIndex(bytecodeIndex);
+ if (!entry) {
+#if ENABLE(JIT_VERBOSE_OSR)
+ dataLog(" OSR failed because the entrypoint was optimized out.\n");
+#endif
+ return 0;
+ }
+
ASSERT(entry->m_bytecodeIndex == bytecodeIndex);
// The code below checks if it is safe to perform OSR entry. It may find
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExit.cpp b/Source/JavaScriptCore/dfg/DFGOSRExit.cpp
index 844be2a7c..bcb98a1ed 100644
--- a/Source/JavaScriptCore/dfg/DFGOSRExit.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOSRExit.cpp
@@ -78,7 +78,16 @@ bool OSRExit::considerAddingAsFrequentExitSiteSlow(CodeBlock* dfgCodeBlock, Code
if (static_cast<double>(m_count) / dfgCodeBlock->speculativeFailCounter() <= Options::osrExitProminenceForFrequentExitSite)
return false;
- return baselineCodeBlockForOriginAndBaselineCodeBlock(m_codeOriginForExitProfile, profiledCodeBlock)->addFrequentExitSite(FrequentExitSite(m_codeOriginForExitProfile.bytecodeIndex, m_kind));
+ FrequentExitSite exitSite;
+
+ if (m_kind == ArgumentsEscaped) {
+ // Count this one globally. It doesn't matter where in the code block the arguments excaped;
+ // the fact that they did is not associated with any particular instruction.
+ exitSite = FrequentExitSite(m_kind);
+ } else
+ exitSite = FrequentExitSite(m_codeOriginForExitProfile.bytecodeIndex, m_kind);
+
+ return baselineCodeBlockForOriginAndBaselineCodeBlock(m_codeOrigin, profiledCodeBlock)->addFrequentExitSite(exitSite);
}
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.cpp b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.cpp
index a63f671bc..888a4a2c5 100644
--- a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.cpp
@@ -60,7 +60,7 @@ void compileOSRExit(ExecState* exec)
for (CodeOrigin codeOrigin = exit.m_codeOrigin; codeOrigin.inlineCallFrame; codeOrigin = codeOrigin.inlineCallFrame->caller) {
static_cast<FunctionExecutable*>(codeOrigin.inlineCallFrame->executable.get())
->baselineCodeBlockFor(codeOrigin.inlineCallFrame->isCall ? CodeForCall : CodeForConstruct)
- ->jitCompile(*globalData);
+ ->jitCompile(exec);
}
SpeculationRecovery* recovery = 0;
@@ -72,7 +72,7 @@ void compileOSRExit(ExecState* exec)
#endif
{
- AssemblyHelpers jit(globalData, codeBlock);
+ CCallHelpers jit(globalData, codeBlock);
OSRExitCompiler exitCompiler(jit);
jit.jitAssertHasValidCallFrame();
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.h b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.h
index 523644982..86345b0eb 100644
--- a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.h
+++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.h
@@ -31,6 +31,7 @@
#if ENABLE(DFG_JIT)
#include "DFGAssemblyHelpers.h"
+#include "DFGCCallHelpers.h"
#include "DFGOSRExit.h"
#include "DFGOperations.h"
@@ -42,7 +43,7 @@ namespace DFG {
class OSRExitCompiler {
public:
- OSRExitCompiler(AssemblyHelpers& jit)
+ OSRExitCompiler(CCallHelpers& jit)
: m_jit(jit)
{
}
@@ -72,7 +73,7 @@ private:
void handleExitCounts(const OSRExit&);
- AssemblyHelpers& m_jit;
+ CCallHelpers& m_jit;
Vector<unsigned> m_poisonScratchIndices;
};
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp
index 3c7f27579..d773cb4ac 100644
--- a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp
@@ -90,13 +90,14 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco
GPRReg scratch = GPRInfo::regT0;
if (scratch == exit.m_jsValueSource.base())
scratch = GPRInfo::regT1;
- EncodedJSValue* scratchBuffer = static_cast<EncodedJSValue*>(m_jit.globalData()->scratchBufferForSize(sizeof(uint32_t)));
- m_jit.store32(scratch, scratchBuffer);
+ ScratchBuffer* scratchBuffer = m_jit.globalData()->scratchBufferForSize(sizeof(uint32_t));
+ EncodedJSValue* scratchDataBuffer = static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer());
+ m_jit.store32(scratch, scratchDataBuffer);
m_jit.load32(exit.m_jsValueSource.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), scratch);
m_jit.store32(scratch, &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag);
m_jit.load32(exit.m_jsValueSource.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), scratch);
m_jit.store32(scratch, &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload);
- m_jit.load32(scratchBuffer, scratch);
+ m_jit.load32(scratchDataBuffer, scratch);
} else if (exit.m_jsValueSource.hasKnownTag()) {
m_jit.store32(AssemblyHelpers::TrustedImm32(exit.m_jsValueSource.tag()), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag);
m_jit.store32(exit.m_jsValueSource.payloadGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload);
@@ -130,6 +131,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco
bool haveFPRs = false;
bool haveConstants = false;
bool haveUndefined = false;
+ bool haveArguments = false;
for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
const ValueRecovery& recovery = exit.valueRecovery(index);
@@ -193,13 +195,18 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco
haveUndefined = true;
break;
+ case ArgumentsThatWereNotCreated:
+ haveArguments = true;
+ break;
+
default:
break;
}
}
unsigned scratchBufferLengthBeforeUInt32s = numberOfPoisonedVirtualRegisters + ((numberOfDisplacedVirtualRegisters * 2) <= GPRInfo::numberOfRegisters ? 0 : numberOfDisplacedVirtualRegisters);
- EncodedJSValue* scratchBuffer = static_cast<EncodedJSValue*>(m_jit.globalData()->scratchBufferForSize(sizeof(EncodedJSValue) * (scratchBufferLengthBeforeUInt32s + (haveUInt32s ? 2 : 0))));
+ ScratchBuffer* scratchBuffer = m_jit.globalData()->scratchBufferForSize(sizeof(EncodedJSValue) * (scratchBufferLengthBeforeUInt32s + (haveUInt32s ? 2 : 0)));
+ EncodedJSValue* scratchDataBuffer = scratchBuffer ? static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) : 0;
// From here on, the code assumes that it is profitable to maximize the distance
// between when something is computed and when it is stored.
@@ -243,7 +250,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco
case UnboxedInt32InGPR:
case UnboxedBooleanInGPR:
if (exit.isVariable(index) && poisonedVirtualRegisters[exit.variableForIndex(index)]) {
- m_jit.store32(recovery.gpr(), reinterpret_cast<char*>(scratchBuffer + currentPoisonIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
+ m_jit.store32(recovery.gpr(), reinterpret_cast<char*>(scratchDataBuffer + currentPoisonIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
m_poisonScratchIndices[exit.variableForIndex(index)] = currentPoisonIndex;
currentPoisonIndex++;
} else {
@@ -260,8 +267,8 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco
break;
case InPair:
if (exit.isVariable(index) && poisonedVirtualRegisters[exit.variableForIndex(index)]) {
- m_jit.store32(recovery.tagGPR(), reinterpret_cast<char*>(scratchBuffer + currentPoisonIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
- m_jit.store32(recovery.payloadGPR(), reinterpret_cast<char*>(scratchBuffer + currentPoisonIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
+ m_jit.store32(recovery.tagGPR(), reinterpret_cast<char*>(scratchDataBuffer + currentPoisonIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
+ m_jit.store32(recovery.payloadGPR(), reinterpret_cast<char*>(scratchDataBuffer + currentPoisonIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
m_poisonScratchIndices[exit.variableForIndex(index)] = currentPoisonIndex;
currentPoisonIndex++;
} else {
@@ -270,7 +277,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco
}
break;
case UInt32InGPR: {
- EncodedJSValue* myScratch = scratchBuffer + scratchBufferLengthBeforeUInt32s;
+ EncodedJSValue* myScratch = scratchDataBuffer + scratchBufferLengthBeforeUInt32s;
GPRReg addressGPR = GPRInfo::regT0;
if (addressGPR == recovery.gpr())
@@ -285,7 +292,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco
m_jit.convertInt32ToDouble(recovery.gpr(), FPRInfo::fpRegT0);
m_jit.addDouble(AssemblyHelpers::AbsoluteAddress(&AssemblyHelpers::twoToThe32), FPRInfo::fpRegT0);
if (exit.isVariable(index) && poisonedVirtualRegisters[exit.variableForIndex(index)]) {
- m_jit.move(AssemblyHelpers::TrustedImmPtr(scratchBuffer + currentPoisonIndex), addressGPR);
+ m_jit.move(AssemblyHelpers::TrustedImmPtr(scratchDataBuffer + currentPoisonIndex), addressGPR);
m_jit.storeDouble(FPRInfo::fpRegT0, addressGPR);
} else
m_jit.storeDouble(FPRInfo::fpRegT0, AssemblyHelpers::addressFor((VirtualRegister)operand));
@@ -295,8 +302,8 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco
positive.link(&m_jit);
if (exit.isVariable(index) && poisonedVirtualRegisters[exit.variableForIndex(index)]) {
- m_jit.store32(recovery.gpr(), reinterpret_cast<char*>(scratchBuffer + currentPoisonIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
- m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag), reinterpret_cast<char*>(scratchBuffer + currentPoisonIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
+ m_jit.store32(recovery.gpr(), reinterpret_cast<char*>(scratchDataBuffer + currentPoisonIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
+ m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag), reinterpret_cast<char*>(scratchDataBuffer + currentPoisonIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
} else {
m_jit.store32(recovery.gpr(), AssemblyHelpers::payloadFor((VirtualRegister)operand));
m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag), AssemblyHelpers::tagFor((VirtualRegister)operand));
@@ -327,7 +334,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco
if (recovery.technique() != InFPR)
continue;
if (exit.isVariable(index) && poisonedVirtualRegisters[exit.variableForIndex(index)]) {
- m_jit.storeDouble(recovery.fpr(), scratchBuffer + currentPoisonIndex);
+ m_jit.storeDouble(recovery.fpr(), scratchDataBuffer + currentPoisonIndex);
m_poisonScratchIndices[exit.variableForIndex(index)] = currentPoisonIndex;
currentPoisonIndex++;
} else
@@ -413,15 +420,15 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco
case DisplacedInRegisterFile:
m_jit.load32(AssemblyHelpers::payloadFor(recovery.virtualRegister()), GPRInfo::regT0);
m_jit.load32(AssemblyHelpers::tagFor(recovery.virtualRegister()), GPRInfo::regT1);
- m_jit.store32(GPRInfo::regT0, reinterpret_cast<char*>(scratchBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
- m_jit.store32(GPRInfo::regT1, reinterpret_cast<char*>(scratchBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
+ m_jit.store32(GPRInfo::regT0, reinterpret_cast<char*>(scratchDataBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
+ m_jit.store32(GPRInfo::regT1, reinterpret_cast<char*>(scratchDataBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
scratchIndex++;
break;
case Int32DisplacedInRegisterFile:
case CellDisplacedInRegisterFile:
case BooleanDisplacedInRegisterFile:
m_jit.load32(AssemblyHelpers::payloadFor(recovery.virtualRegister()), GPRInfo::regT0);
- m_jit.store32(GPRInfo::regT0, reinterpret_cast<char*>(scratchBuffer + scratchIndex++) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
+ m_jit.store32(GPRInfo::regT0, reinterpret_cast<char*>(scratchDataBuffer + scratchIndex++) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
break;
default:
break;
@@ -433,24 +440,24 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco
const ValueRecovery& recovery = exit.valueRecovery(index);
switch (recovery.technique()) {
case DisplacedInRegisterFile:
- m_jit.load32(reinterpret_cast<char*>(scratchBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0);
- m_jit.load32(reinterpret_cast<char*>(scratchBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag), GPRInfo::regT1);
+ m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0);
+ m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag), GPRInfo::regT1);
m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)exit.operandForIndex(index)));
m_jit.store32(GPRInfo::regT1, AssemblyHelpers::tagFor((VirtualRegister)exit.operandForIndex(index)));
scratchIndex++;
break;
case Int32DisplacedInRegisterFile:
- m_jit.load32(reinterpret_cast<char*>(scratchBuffer + scratchIndex++) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0);
+ m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + scratchIndex++) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0);
m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag), AssemblyHelpers::tagFor((VirtualRegister)exit.operandForIndex(index)));
m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)exit.operandForIndex(index)));
break;
case CellDisplacedInRegisterFile:
- m_jit.load32(reinterpret_cast<char*>(scratchBuffer + scratchIndex++) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0);
+ m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + scratchIndex++) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0);
m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)exit.operandForIndex(index)));
m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)exit.operandForIndex(index)));
break;
case BooleanDisplacedInRegisterFile:
- m_jit.load32(reinterpret_cast<char*>(scratchBuffer + scratchIndex++) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0);
+ m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + scratchIndex++) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0);
m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::BooleanTag), AssemblyHelpers::tagFor((VirtualRegister)exit.operandForIndex(index)));
m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)exit.operandForIndex(index)));
break;
@@ -475,7 +482,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco
case InGPR:
case UnboxedInt32InGPR:
case UnboxedBooleanInGPR: {
- m_jit.load32(reinterpret_cast<char*>(scratchBuffer + poisonIndex(virtualRegister)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0);
+ m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + poisonIndex(virtualRegister)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0);
m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)virtualRegister));
uint32_t tag = JSValue::EmptyValueTag;
if (recovery.technique() == InGPR)
@@ -491,8 +498,8 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco
case InFPR:
case InPair:
case UInt32InGPR:
- m_jit.load32(reinterpret_cast<char*>(scratchBuffer + poisonIndex(virtualRegister)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0);
- m_jit.load32(reinterpret_cast<char*>(scratchBuffer + poisonIndex(virtualRegister)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag), GPRInfo::regT1);
+ m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + poisonIndex(virtualRegister)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0);
+ m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + poisonIndex(virtualRegister)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag), GPRInfo::regT1);
m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)virtualRegister));
m_jit.store32(GPRInfo::regT1, AssemblyHelpers::tagFor((VirtualRegister)virtualRegister));
break;
@@ -526,7 +533,71 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco
}
}
- // 11) Adjust the old JIT's execute counter. Since we are exiting OSR, we know
+ // 11) Create arguments if necessary and place them into the appropriate aliased
+ // registers.
+
+ if (haveArguments) {
+ for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
+ const ValueRecovery& recovery = exit.valueRecovery(index);
+ if (recovery.technique() != ArgumentsThatWereNotCreated)
+ continue;
+ int operand = exit.operandForIndex(index);
+ // Find the right inline call frame.
+ InlineCallFrame* inlineCallFrame = 0;
+ for (InlineCallFrame* current = exit.m_codeOrigin.inlineCallFrame;
+ current;
+ current = current->caller.inlineCallFrame) {
+ if (current->stackOffset <= operand) {
+ inlineCallFrame = current;
+ break;
+ }
+ }
+ int argumentsRegister = m_jit.argumentsRegisterFor(inlineCallFrame);
+
+ m_jit.load32(AssemblyHelpers::payloadFor(argumentsRegister), GPRInfo::regT0);
+ AssemblyHelpers::Jump haveArguments = m_jit.branch32(
+ AssemblyHelpers::NotEqual,
+ AssemblyHelpers::tagFor(argumentsRegister),
+ AssemblyHelpers::TrustedImm32(JSValue::EmptyValueTag));
+
+ if (inlineCallFrame) {
+ m_jit.setupArgumentsWithExecState(
+ AssemblyHelpers::TrustedImmPtr(inlineCallFrame));
+ m_jit.move(
+ AssemblyHelpers::TrustedImmPtr(
+ bitwise_cast<void*>(operationCreateInlinedArguments)),
+ GPRInfo::nonArgGPR0);
+ } else {
+ m_jit.setupArgumentsExecState();
+ m_jit.move(
+ AssemblyHelpers::TrustedImmPtr(
+ bitwise_cast<void*>(operationCreateArguments)),
+ GPRInfo::nonArgGPR0);
+ }
+ m_jit.call(GPRInfo::nonArgGPR0);
+ m_jit.store32(
+ AssemblyHelpers::TrustedImm32(JSValue::CellTag),
+ AssemblyHelpers::tagFor(argumentsRegister));
+ m_jit.store32(
+ GPRInfo::returnValueGPR,
+ AssemblyHelpers::payloadFor(argumentsRegister));
+ m_jit.store32(
+ AssemblyHelpers::TrustedImm32(JSValue::CellTag),
+ AssemblyHelpers::tagFor(unmodifiedArgumentsRegister(argumentsRegister)));
+ m_jit.store32(
+ GPRInfo::returnValueGPR,
+ AssemblyHelpers::payloadFor(unmodifiedArgumentsRegister(argumentsRegister)));
+ m_jit.move(GPRInfo::returnValueGPR, GPRInfo::regT0); // no-op move on almost all platforms.
+
+ haveArguments.link(&m_jit);
+ m_jit.store32(
+ AssemblyHelpers::TrustedImm32(JSValue::CellTag),
+ AssemblyHelpers::tagFor(operand));
+ m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor(operand));
+ }
+ }
+
+ // 12) Adjust the old JIT's execute counter. Since we are exiting OSR, we know
// that all new calls into this code will go to the new JIT, so the execute
// counter only affects call frames that performed OSR exit and call frames
// that were still executing the old JIT at the time of another call frame's
@@ -564,14 +635,14 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco
handleExitCounts(exit);
- // 12) Load the result of the last bytecode operation into regT0.
+ // 13) Load the result of the last bytecode operation into regT0.
if (exit.m_lastSetOperand != std::numeric_limits<int>::max()) {
m_jit.load32(AssemblyHelpers::payloadFor((VirtualRegister)exit.m_lastSetOperand), GPRInfo::cachedResultRegister);
m_jit.load32(AssemblyHelpers::tagFor((VirtualRegister)exit.m_lastSetOperand), GPRInfo::cachedResultRegister2);
}
- // 13) Fix call frame (s).
+ // 14) Fix call frame (s).
ASSERT(m_jit.baselineCodeBlock()->getJITType() == JITCode::BaselineJIT);
m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(m_jit.baselineCodeBlock()), AssemblyHelpers::addressFor((VirtualRegister)RegisterFile::CodeBlock));
@@ -610,7 +681,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco
if (exit.m_codeOrigin.inlineCallFrame)
m_jit.addPtr(AssemblyHelpers::TrustedImm32(exit.m_codeOrigin.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister);
- // 14) Jump into the corresponding baseline JIT code.
+ // 15) Jump into the corresponding baseline JIT code.
CodeBlock* baselineCodeBlock = m_jit.baselineCodeBlockFor(exit.m_codeOrigin);
Vector<BytecodeAndMachineOffset>& decodedCodeMap = m_jit.decodedCodeMapFor(baselineCodeBlock);
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp
index 86d47b90e..22b236115 100644
--- a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp
@@ -127,6 +127,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco
bool haveConstants = false;
bool haveUndefined = false;
bool haveUInt32s = false;
+ bool haveArguments = false;
for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
const ValueRecovery& recovery = exit.valueRecovery(index);
@@ -184,6 +185,10 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco
haveUndefined = true;
break;
+ case ArgumentsThatWereNotCreated:
+ haveArguments = true;
+ break;
+
default:
break;
}
@@ -210,7 +215,8 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco
dataLog(" ");
#endif
- EncodedJSValue* scratchBuffer = static_cast<EncodedJSValue*>(m_jit.globalData()->scratchBufferForSize(sizeof(EncodedJSValue) * std::max(haveUInt32s ? 2u : 0u, numberOfPoisonedVirtualRegisters + (numberOfDisplacedVirtualRegisters <= GPRInfo::numberOfRegisters ? 0 : numberOfDisplacedVirtualRegisters))));
+ ScratchBuffer* scratchBuffer = m_jit.globalData()->scratchBufferForSize(sizeof(EncodedJSValue) * std::max(haveUInt32s ? 2u : 0u, numberOfPoisonedVirtualRegisters + (numberOfDisplacedVirtualRegisters <= GPRInfo::numberOfRegisters ? 0 : numberOfDisplacedVirtualRegisters)));
+ EncodedJSValue* scratchDataBuffer = scratchBuffer ? static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) : 0;
// From here on, the code assumes that it is profitable to maximize the distance
// between when something is computed and when it is stored.
@@ -245,8 +251,8 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco
if (addressGPR == recovery.gpr())
addressGPR = GPRInfo::regT1;
- m_jit.storePtr(addressGPR, scratchBuffer);
- m_jit.move(AssemblyHelpers::TrustedImmPtr(scratchBuffer + 1), addressGPR);
+ m_jit.storePtr(addressGPR, scratchDataBuffer);
+ m_jit.move(AssemblyHelpers::TrustedImmPtr(scratchDataBuffer + 1), addressGPR);
m_jit.storeDouble(FPRInfo::fpRegT0, addressGPR);
AssemblyHelpers::Jump positive = m_jit.branch32(AssemblyHelpers::GreaterThanOrEqual, recovery.gpr(), AssemblyHelpers::TrustedImm32(0));
@@ -264,7 +270,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco
done.link(&m_jit);
m_jit.loadDouble(addressGPR, FPRInfo::fpRegT0);
- m_jit.loadPtr(scratchBuffer, addressGPR);
+ m_jit.loadPtr(scratchDataBuffer, addressGPR);
break;
}
@@ -289,7 +295,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco
case UnboxedInt32InGPR:
case UInt32InGPR:
if (exit.isVariable(index) && poisonedVirtualRegisters[exit.variableForIndex(index)]) {
- m_jit.storePtr(recovery.gpr(), scratchBuffer + currentPoisonIndex);
+ m_jit.storePtr(recovery.gpr(), scratchDataBuffer + currentPoisonIndex);
m_poisonScratchIndices[exit.variableForIndex(index)] = currentPoisonIndex;
currentPoisonIndex++;
} else
@@ -323,7 +329,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco
continue;
GPRReg gpr = GPRInfo::toRegister(FPRInfo::toIndex(recovery.fpr()));
if (exit.isVariable(index) && poisonedVirtualRegisters[exit.variableForIndex(index)]) {
- m_jit.storePtr(gpr, scratchBuffer + currentPoisonIndex);
+ m_jit.storePtr(gpr, scratchDataBuffer + currentPoisonIndex);
m_poisonScratchIndices[exit.variableForIndex(index)] = currentPoisonIndex;
currentPoisonIndex++;
} else
@@ -422,20 +428,20 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco
switch (recovery.technique()) {
case DisplacedInRegisterFile:
m_jit.loadPtr(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::regT0);
- m_jit.storePtr(GPRInfo::regT0, scratchBuffer + scratchIndex++);
+ m_jit.storePtr(GPRInfo::regT0, scratchDataBuffer + scratchIndex++);
break;
case Int32DisplacedInRegisterFile: {
m_jit.load32(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::regT0);
m_jit.orPtr(GPRInfo::tagTypeNumberRegister, GPRInfo::regT0);
- m_jit.storePtr(GPRInfo::regT0, scratchBuffer + scratchIndex++);
+ m_jit.storePtr(GPRInfo::regT0, scratchDataBuffer + scratchIndex++);
break;
}
case DoubleDisplacedInRegisterFile: {
m_jit.loadPtr(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::regT0);
m_jit.subPtr(GPRInfo::tagTypeNumberRegister, GPRInfo::regT0);
- m_jit.storePtr(GPRInfo::regT0, scratchBuffer + scratchIndex++);
+ m_jit.storePtr(GPRInfo::regT0, scratchDataBuffer + scratchIndex++);
break;
}
@@ -451,7 +457,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco
case DisplacedInRegisterFile:
case Int32DisplacedInRegisterFile:
case DoubleDisplacedInRegisterFile:
- m_jit.loadPtr(scratchBuffer + scratchIndex++, GPRInfo::regT0);
+ m_jit.loadPtr(scratchDataBuffer + scratchIndex++, GPRInfo::regT0);
m_jit.storePtr(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)exit.operandForIndex(index)));
break;
@@ -477,7 +483,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco
case UnboxedInt32InGPR:
case UInt32InGPR:
case InFPR:
- m_jit.loadPtr(scratchBuffer + poisonIndex(virtualRegister), GPRInfo::regT0);
+ m_jit.loadPtr(scratchDataBuffer + poisonIndex(virtualRegister), GPRInfo::regT0);
m_jit.storePtr(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)virtualRegister));
break;
@@ -505,7 +511,58 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco
}
}
- // 13) Adjust the old JIT's execute counter. Since we are exiting OSR, we know
+ // 13) Create arguments if necessary and place them into the appropriate aliased
+ // registers.
+
+ if (haveArguments) {
+ for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
+ const ValueRecovery& recovery = exit.valueRecovery(index);
+ if (recovery.technique() != ArgumentsThatWereNotCreated)
+ continue;
+ int operand = exit.operandForIndex(index);
+ // Find the right inline call frame.
+ InlineCallFrame* inlineCallFrame = 0;
+ for (InlineCallFrame* current = exit.m_codeOrigin.inlineCallFrame;
+ current;
+ current = current->caller.inlineCallFrame) {
+ if (current->stackOffset <= operand) {
+ inlineCallFrame = current;
+ break;
+ }
+ }
+ int argumentsRegister = m_jit.argumentsRegisterFor(inlineCallFrame);
+
+ m_jit.loadPtr(AssemblyHelpers::addressFor(argumentsRegister), GPRInfo::regT0);
+ AssemblyHelpers::Jump haveArguments = m_jit.branchTestPtr(
+ AssemblyHelpers::NonZero, GPRInfo::regT0);
+
+ if (inlineCallFrame) {
+ m_jit.setupArgumentsWithExecState(
+ AssemblyHelpers::TrustedImmPtr(inlineCallFrame));
+ m_jit.move(
+ AssemblyHelpers::TrustedImmPtr(
+ bitwise_cast<void*>(operationCreateInlinedArguments)),
+ GPRInfo::nonArgGPR0);
+ } else {
+ m_jit.setupArgumentsExecState();
+ m_jit.move(
+ AssemblyHelpers::TrustedImmPtr(
+ bitwise_cast<void*>(operationCreateArguments)),
+ GPRInfo::nonArgGPR0);
+ }
+ m_jit.call(GPRInfo::nonArgGPR0);
+ m_jit.storePtr(GPRInfo::returnValueGPR, AssemblyHelpers::addressFor(argumentsRegister));
+ m_jit.storePtr(
+ GPRInfo::returnValueGPR,
+ AssemblyHelpers::addressFor(unmodifiedArgumentsRegister(argumentsRegister)));
+ m_jit.move(GPRInfo::returnValueGPR, GPRInfo::regT0); // no-op move on almost all platforms.
+
+ haveArguments.link(&m_jit);
+ m_jit.storePtr(GPRInfo::regT0, AssemblyHelpers::addressFor(operand));
+ }
+ }
+
+ // 14) Adjust the old JIT's execute counter. Since we are exiting OSR, we know
// that all new calls into this code will go to the new JIT, so the execute
// counter only affects call frames that performed OSR exit and call frames
// that were still executing the old JIT at the time of another call frame's
@@ -543,12 +600,12 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco
handleExitCounts(exit);
- // 14) Load the result of the last bytecode operation into regT0.
+ // 15) Load the result of the last bytecode operation into regT0.
if (exit.m_lastSetOperand != std::numeric_limits<int>::max())
m_jit.loadPtr(AssemblyHelpers::addressFor((VirtualRegister)exit.m_lastSetOperand), GPRInfo::cachedResultRegister);
- // 15) Fix call frame(s).
+ // 16) Fix call frame(s).
ASSERT(m_jit.baselineCodeBlock()->getJITType() == JITCode::BaselineJIT);
m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(m_jit.baselineCodeBlock()), AssemblyHelpers::addressFor((VirtualRegister)RegisterFile::CodeBlock));
@@ -584,7 +641,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco
if (exit.m_codeOrigin.inlineCallFrame)
m_jit.addPtr(AssemblyHelpers::TrustedImm32(exit.m_codeOrigin.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister);
- // 16) Jump into the corresponding baseline JIT code.
+ // 17) Jump into the corresponding baseline JIT code.
CodeBlock* baselineCodeBlock = m_jit.baselineCodeBlockFor(exit.m_codeOrigin);
Vector<BytecodeAndMachineOffset>& decodedCodeMap = m_jit.decodedCodeMapFor(baselineCodeBlock);
diff --git a/Source/JavaScriptCore/dfg/DFGOperations.cpp b/Source/JavaScriptCore/dfg/DFGOperations.cpp
index 376902d97..f95b993d7 100644
--- a/Source/JavaScriptCore/dfg/DFGOperations.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOperations.cpp
@@ -26,6 +26,7 @@
#include "config.h"
#include "DFGOperations.h"
+#include "Arguments.h"
#include "CodeBlock.h"
#include "DFGOSRExit.h"
#include "DFGRepatch.h"
@@ -36,6 +37,7 @@
#include "JSActivation.h"
#include "JSGlobalData.h"
#include "JSStaticScopeObject.h"
+#include "NameInstance.h"
#include "Operations.h"
#if ENABLE(DFG_JIT)
@@ -139,19 +141,19 @@
#endif
#define P_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) \
-void* DFG_OPERATION function##WithReturnAddress(ExecState*, ReturnAddressPtr); \
+void* DFG_OPERATION function##WithReturnAddress(ExecState*, ReturnAddressPtr) REFERENCED_FROM_ASM; \
FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function)
#define J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_ECI(function) \
-EncodedJSValue DFG_OPERATION function##WithReturnAddress(ExecState*, JSCell*, Identifier*, ReturnAddressPtr); \
+EncodedJSValue DFG_OPERATION function##WithReturnAddress(ExecState*, JSCell*, Identifier*, ReturnAddressPtr) REFERENCED_FROM_ASM; \
FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_ECI(function)
#define J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJI(function) \
-EncodedJSValue DFG_OPERATION function##WithReturnAddress(ExecState*, EncodedJSValue, Identifier*, ReturnAddressPtr); \
+EncodedJSValue DFG_OPERATION function##WithReturnAddress(ExecState*, EncodedJSValue, Identifier*, ReturnAddressPtr) REFERENCED_FROM_ASM; \
FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJI(function)
#define V_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJCI(function) \
-void DFG_OPERATION function##WithReturnAddress(ExecState*, EncodedJSValue, JSCell*, Identifier*, ReturnAddressPtr); \
+void DFG_OPERATION function##WithReturnAddress(ExecState*, EncodedJSValue, JSCell*, Identifier*, ReturnAddressPtr) REFERENCED_FROM_ASM; \
FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJCI(function)
namespace JSC { namespace DFG {
@@ -200,6 +202,11 @@ ALWAYS_INLINE static void DFG_OPERATION operationPutByValInternal(ExecState* exe
}
}
+ if (isName(property)) {
+ PutPropertySlot slot(strict);
+ baseValue.put(exec, jsCast<NameInstance*>(property.asCell())->privateName(), value, slot);
+ return;
+ }
// Don't put to an object if toString throws an exception.
Identifier ident(exec, property.toString(exec)->value(exec));
@@ -307,6 +314,9 @@ EncodedJSValue DFG_OPERATION operationGetByVal(ExecState* exec, EncodedJSValue e
}
}
+ if (isName(property))
+ return JSValue::encode(baseValue.get(exec, jsCast<NameInstance*>(property.asCell())->privateName()));
+
Identifier ident(exec, property.toString(exec)->value(exec));
return JSValue::encode(baseValue.get(exec, ident));
}
@@ -330,6 +340,9 @@ EncodedJSValue DFG_OPERATION operationGetByValCell(ExecState* exec, JSCell* base
return JSValue::encode(result);
}
+ if (isName(property))
+ return JSValue::encode(JSValue(base).get(exec, jsCast<NameInstance*>(property.asCell())->privateName()));
+
Identifier ident(exec, property.toString(exec)->value(exec));
return JSValue::encode(JSValue(base).get(exec, ident));
}
@@ -979,20 +992,20 @@ EncodedJSValue DFG_OPERATION operationToPrimitive(ExecState* exec, EncodedJSValu
return JSValue::encode(JSValue::decode(value).toPrimitive(exec));
}
-EncodedJSValue DFG_OPERATION operationStrCat(ExecState* exec, void* start, size_t size)
+EncodedJSValue DFG_OPERATION operationStrCat(ExecState* exec, void* buffer, size_t size)
{
JSGlobalData* globalData = &exec->globalData();
NativeCallFrameTracer tracer(globalData, exec);
-
- return JSValue::encode(jsString(exec, static_cast<Register*>(start), size));
+
+ return JSValue::encode(jsString(exec, static_cast<Register*>(buffer), size));
}
-EncodedJSValue DFG_OPERATION operationNewArray(ExecState* exec, void* start, size_t size)
+EncodedJSValue DFG_OPERATION operationNewArray(ExecState* exec, void* buffer, size_t size)
{
JSGlobalData* globalData = &exec->globalData();
NativeCallFrameTracer tracer(globalData, exec);
-
- return JSValue::encode(constructArray(exec, static_cast<JSValue*>(start), size));
+
+ return JSValue::encode(constructArray(exec, static_cast<JSValue*>(buffer), size));
}
EncodedJSValue DFG_OPERATION operationNewArrayBuffer(ExecState* exec, size_t start, size_t size)
@@ -1025,13 +1038,73 @@ JSCell* DFG_OPERATION operationCreateActivation(ExecState* exec)
return activation;
}
-void DFG_OPERATION operationTearOffActivation(ExecState* exec, JSCell* activation)
+JSCell* DFG_OPERATION operationCreateArguments(ExecState* exec)
+{
+ // NB: This needs to be exceedingly careful with top call frame tracking, since it
+ // may be called from OSR exit, while the state of the call stack is bizarre.
+ Arguments* result = Arguments::create(exec->globalData(), exec);
+ ASSERT(!exec->globalData().exception);
+ return result;
+}
+
+JSCell* DFG_OPERATION operationCreateInlinedArguments(
+ ExecState* exec, InlineCallFrame* inlineCallFrame)
+{
+ // NB: This needs to be exceedingly careful with top call frame tracking, since it
+ // may be called from OSR exit, while the state of the call stack is bizarre.
+ Arguments* result = Arguments::create(exec->globalData(), exec, inlineCallFrame);
+ ASSERT(!exec->globalData().exception);
+ return result;
+}
+
+void DFG_OPERATION operationTearOffActivation(ExecState* exec, JSCell* activationCell, int32_t unmodifiedArgumentsRegister)
{
- ASSERT(activation);
- ASSERT(activation->inherits(&JSActivation::s_info));
JSGlobalData& globalData = exec->globalData();
NativeCallFrameTracer tracer(&globalData, exec);
- jsCast<JSActivation*>(activation)->tearOff(exec->globalData());
+ if (!activationCell) {
+ if (JSValue v = exec->uncheckedR(unmodifiedArgumentsRegister).jsValue()) {
+ if (!exec->codeBlock()->isStrictMode())
+ asArguments(v)->tearOff(exec);
+ }
+ return;
+ }
+ JSActivation* activation = jsCast<JSActivation*>(activationCell);
+ activation->tearOff(exec->globalData());
+ if (JSValue v = exec->uncheckedR(unmodifiedArgumentsRegister).jsValue())
+ asArguments(v)->didTearOffActivation(exec->globalData(), activation);
+}
+
+
+void DFG_OPERATION operationTearOffArguments(ExecState* exec, JSCell* argumentsCell)
+{
+ ASSERT(exec->codeBlock()->usesArguments());
+ ASSERT(!exec->codeBlock()->needsFullScopeChain());
+ asArguments(argumentsCell)->tearOff(exec);
+}
+
+void DFG_OPERATION operationTearOffInlinedArguments(
+ ExecState* exec, JSCell* argumentsCell, InlineCallFrame* inlineCallFrame)
+{
+ // This should only be called when the inline code block uses arguments but does not
+ // need a full scope chain. We could assert it, except that the assertion would be
+ // rather expensive and may cause side effects that would greatly diverge debug-mode
+ // behavior from release-mode behavior, since getting the code block of an inline
+ // call frame implies call frame reification.
+ asArguments(argumentsCell)->tearOff(exec, inlineCallFrame);
+}
+
+EncodedJSValue DFG_OPERATION operationGetArgumentsLength(ExecState* exec, int32_t argumentsRegister)
+{
+ Identifier ident(&exec->globalData(), "length");
+ JSValue baseValue = exec->uncheckedR(argumentsRegister).jsValue();
+ PropertySlot slot(baseValue);
+ return JSValue::encode(baseValue.get(exec, ident, slot));
+}
+
+EncodedJSValue DFG_OPERATION operationGetArgumentByVal(ExecState* exec, int32_t argumentsRegister, int32_t index)
+{
+ return JSValue::encode(
+ exec->uncheckedR(argumentsRegister).jsValue().get(exec, index));
}
JSCell* DFG_OPERATION operationNewFunction(ExecState* exec, JSCell* functionExecutable)
@@ -1129,7 +1202,7 @@ size_t DFG_OPERATION dfgConvertJSValueToBoolean(ExecState* exec, EncodedJSValue
JSGlobalData* globalData = &exec->globalData();
NativeCallFrameTracer tracer(globalData, exec);
- return JSValue::decode(encodedOp).toBoolean(exec);
+ return JSValue::decode(encodedOp).toBoolean();
}
#if DFG_ENABLE(VERBOSE_SPECULATION_FAILURE)
diff --git a/Source/JavaScriptCore/dfg/DFGOperations.h b/Source/JavaScriptCore/dfg/DFGOperations.h
index 601ed7665..03f198e9d 100644
--- a/Source/JavaScriptCore/dfg/DFGOperations.h
+++ b/Source/JavaScriptCore/dfg/DFGOperations.h
@@ -60,6 +60,7 @@ extern "C" {
I: Identifier*
G: GlobalResolveInfo*
*/
+typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_E)(ExecState*);
typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EA)(ExecState*, JSArray*);
typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_ECC)(ExecState*, JSCell*, JSCell*);
typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_ECI)(ExecState*, JSCell*, Identifier*);
@@ -75,9 +76,12 @@ typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EP)(ExecState*, void*);
typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EPP)(ExecState*, void*, void*);
typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EPS)(ExecState*, void*, size_t);
typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_ESS)(ExecState*, size_t, size_t);
+typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EZ)(ExecState*, int32_t);
+typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EZZ)(ExecState*, int32_t, int32_t);
typedef JSCell* DFG_OPERATION (*C_DFGOperation_E)(ExecState*);
typedef JSCell* DFG_OPERATION (*C_DFGOperation_EC)(ExecState*, JSCell*);
typedef JSCell* DFG_OPERATION (*C_DFGOperation_ECC)(ExecState*, JSCell*, JSCell*);
+typedef JSCell* DFG_OPERATION (*C_DFGOperation_EIcf)(ExecState*, InlineCallFrame*);
typedef double DFG_OPERATION (*D_DFGOperation_DD)(double, double);
typedef double DFG_OPERATION (*D_DFGOperation_ZZ)(int32_t, int32_t);
typedef double DFG_OPERATION (*D_DFGOperation_EJ)(ExecState*, EncodedJSValue);
@@ -87,12 +91,14 @@ typedef size_t DFG_OPERATION (*S_DFGOperation_EJ)(ExecState*, EncodedJSValue);
typedef size_t DFG_OPERATION (*S_DFGOperation_EJJ)(ExecState*, EncodedJSValue, EncodedJSValue);
typedef size_t DFG_OPERATION (*S_DFGOperation_J)(EncodedJSValue);
typedef void DFG_OPERATION (*V_DFGOperation_EAZJ)(ExecState*, JSArray*, int32_t, EncodedJSValue);
+typedef void DFG_OPERATION (*V_DFGOperation_EC)(ExecState*, JSCell*);
+typedef void DFG_OPERATION (*V_DFGOperation_ECIcf)(ExecState*, JSCell*, InlineCallFrame*);
typedef void DFG_OPERATION (*V_DFGOperation_ECJJ)(ExecState*, JSCell*, EncodedJSValue, EncodedJSValue);
+typedef void DFG_OPERATION (*V_DFGOperation_ECZ)(ExecState*, JSCell*, int32_t);
typedef void DFG_OPERATION (*V_DFGOperation_EJCI)(ExecState*, EncodedJSValue, JSCell*, Identifier*);
typedef void DFG_OPERATION (*V_DFGOperation_EJJJ)(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue);
typedef void DFG_OPERATION (*V_DFGOperation_EJPP)(ExecState*, EncodedJSValue, EncodedJSValue, void*);
typedef void DFG_OPERATION (*V_DFGOperation_EPZJ)(ExecState*, void*, int32_t, EncodedJSValue);
-typedef void DFG_OPERATION (V_DFGOperation_EC)(ExecState*, JSCell*);
typedef void* DFG_OPERATION (*P_DFGOperation_E)(ExecState*);
// These routines are provide callbacks out to C++ implementations of operations too complex to JIT.
@@ -114,8 +120,8 @@ EncodedJSValue DFG_OPERATION operationResolveBase(ExecState*, Identifier*);
EncodedJSValue DFG_OPERATION operationResolveBaseStrictPut(ExecState*, Identifier*);
EncodedJSValue DFG_OPERATION operationResolveGlobal(ExecState*, GlobalResolveInfo*, Identifier*);
EncodedJSValue DFG_OPERATION operationToPrimitive(ExecState*, EncodedJSValue);
-EncodedJSValue DFG_OPERATION operationStrCat(ExecState*, void* start, size_t);
-EncodedJSValue DFG_OPERATION operationNewArray(ExecState*, void* start, size_t);
+EncodedJSValue DFG_OPERATION operationStrCat(ExecState*, void*, size_t);
+EncodedJSValue DFG_OPERATION operationNewArray(ExecState*, void*, size_t);
EncodedJSValue DFG_OPERATION operationNewArrayBuffer(ExecState*, size_t, size_t);
EncodedJSValue DFG_OPERATION operationNewRegexp(ExecState*, void*);
void DFG_OPERATION operationPutByValStrict(ExecState*, EncodedJSValue encodedBase, EncodedJSValue encodedProperty, EncodedJSValue encodedValue);
@@ -153,7 +159,13 @@ void* DFG_OPERATION operationLinkCall(ExecState*);
void* DFG_OPERATION operationVirtualConstruct(ExecState*);
void* DFG_OPERATION operationLinkConstruct(ExecState*);
JSCell* DFG_OPERATION operationCreateActivation(ExecState*);
-void DFG_OPERATION operationTearOffActivation(ExecState*, JSCell*);
+JSCell* DFG_OPERATION operationCreateArguments(ExecState*);
+JSCell* DFG_OPERATION operationCreateInlinedArguments(ExecState*, InlineCallFrame*);
+void DFG_OPERATION operationTearOffActivation(ExecState*, JSCell*, int32_t unmodifiedArgumentsRegister);
+void DFG_OPERATION operationTearOffArguments(ExecState*, JSCell*);
+void DFG_OPERATION operationTearOffInlinedArguments(ExecState*, JSCell*, InlineCallFrame*);
+EncodedJSValue DFG_OPERATION operationGetArgumentsLength(ExecState*, int32_t);
+EncodedJSValue DFG_OPERATION operationGetArgumentByVal(ExecState*, int32_t, int32_t);
JSCell* DFG_OPERATION operationNewFunction(ExecState*, JSCell*);
JSCell* DFG_OPERATION operationNewFunctionExpression(ExecState*, JSCell*);
double DFG_OPERATION operationFModOnInts(int32_t, int32_t);
diff --git a/Source/JavaScriptCore/dfg/DFGPhase.cpp b/Source/JavaScriptCore/dfg/DFGPhase.cpp
index bae12b1cc..ecf669704 100644
--- a/Source/JavaScriptCore/dfg/DFGPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGPhase.cpp
@@ -28,6 +28,8 @@
#if ENABLE(DFG_JIT)
+#include "DFGValidate.h"
+
namespace JSC { namespace DFG {
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
@@ -40,6 +42,7 @@ void Phase::beginPhase()
void Phase::endPhase()
{
+ validate(m_graph, DumpGraph);
}
#endif
diff --git a/Source/JavaScriptCore/dfg/DFGPhase.h b/Source/JavaScriptCore/dfg/DFGPhase.h
index 1d344c0c3..6d13bcd25 100644
--- a/Source/JavaScriptCore/dfg/DFGPhase.h
+++ b/Source/JavaScriptCore/dfg/DFGPhase.h
@@ -73,10 +73,17 @@ private:
};
template<typename PhaseType>
-void runPhase(Graph& graph)
+bool runPhase(Graph& graph)
{
PhaseType phase(graph);
- phase.run();
+ return phase.run();
+}
+
+template<typename PhaseType, typename ArgumentType1>
+bool runPhase(Graph& graph, ArgumentType1 arg1)
+{
+ PhaseType phase(graph, arg1);
+ return phase.run();
}
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp b/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp
index 53174604a..de01adb1f 100644
--- a/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp
@@ -40,7 +40,7 @@ public:
{
}
- void run()
+ bool run()
{
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
m_count = 0;
@@ -77,6 +77,8 @@ public:
doRoundOfDoubleVoting();
propagateBackward();
} while (m_changed);
+
+ return true;
}
private:
@@ -329,8 +331,29 @@ private:
changed |= m_graph[node.child2()].mergeFlags(flags);
break;
}
+
+ case ArithMul: {
+ PredictedType left = m_graph[node.child1()].prediction();
+ PredictedType right = m_graph[node.child2()].prediction();
+
+ if (left && right) {
+ if (m_graph.mulShouldSpeculateInteger(node))
+ changed |= mergePrediction(PredictInt32);
+ else
+ changed |= mergePrediction(PredictDouble);
+ }
+
+ // As soon as a multiply happens, we can easily end up in the part
+ // of the double domain where the point at which you do truncation
+ // can change the outcome. So, ArithMul always checks for overflow
+ // no matter what, and always forces its inputs to check as well.
+
+ flags |= NodeUsedAsNumber | NodeNeedsNegZero;
+ changed |= m_graph[node.child1()].mergeFlags(flags);
+ changed |= m_graph[node.child2()].mergeFlags(flags);
+ break;
+ }
- case ArithMul:
case ArithDiv: {
PredictedType left = m_graph[node.child1()].prediction();
PredictedType right = m_graph[node.child2()].prediction();
@@ -414,6 +437,17 @@ private:
break;
}
+ case GetMyArgumentByValSafe: {
+ changed |= mergePrediction(node.getHeapPrediction());
+ changed |= m_graph[node.child1()].mergeFlags(NodeUsedAsNumber | NodeUsedAsInt);
+ break;
+ }
+
+ case GetMyArgumentsLengthSafe: {
+ changed |= setPrediction(PredictInt32);
+ break;
+ }
+
case GetPropertyStorage:
case GetIndexedPropertyStorage: {
changed |= setPrediction(PredictOther);
@@ -553,6 +587,13 @@ private:
break;
}
+ case CreateArguments: {
+ // At this stage we don't try to predict whether the arguments are ours or
+ // someone else's. We could, but we don't, yet.
+ changed |= setPrediction(PredictArguments);
+ break;
+ }
+
case NewFunction:
case NewFunctionNoCheck:
case NewFunctionExpression: {
@@ -562,6 +603,7 @@ private:
case PutByValAlias:
case GetArrayLength:
+ case GetArgumentsLength:
case GetInt8ArrayLength:
case GetInt16ArrayLength:
case GetInt32ArrayLength:
@@ -573,7 +615,10 @@ private:
case GetFloat64ArrayLength:
case GetStringLength:
case Int32ToDouble:
- case DoubleAsInt32: {
+ case DoubleAsInt32:
+ case GetLocalUnlinked:
+ case GetMyArgumentsLength:
+ case GetMyArgumentByVal: {
// This node should never be visible at this stage of compilation. It is
// inserted by fixup(), which follows this phase.
ASSERT_NOT_REACHED();
@@ -619,7 +664,9 @@ private:
case CheckFunction:
case PutStructure:
case TearOffActivation:
+ case TearOffArguments:
case CheckNumber:
+ case CheckArgumentsNotCreated:
changed |= mergeDefaultFlags(node);
break;
@@ -751,7 +798,23 @@ private:
break;
}
- case ArithMul:
+ case ArithMul: {
+ PredictedType left = m_graph[node.child1()].prediction();
+ PredictedType right = m_graph[node.child2()].prediction();
+
+ VariableAccessData::Ballot ballot;
+
+ if (isNumberPrediction(left) && isNumberPrediction(right)
+ && !m_graph.mulShouldSpeculateInteger(node))
+ ballot = VariableAccessData::VoteDouble;
+ else
+ ballot = VariableAccessData::VoteValue;
+
+ vote(node.child1(), ballot);
+ vote(node.child2(), ballot);
+ break;
+ }
+
case ArithMin:
case ArithMax:
case ArithMod:
@@ -807,7 +870,7 @@ private:
if (!variableAccessData->isRoot())
continue;
if (operandIsArgument(variableAccessData->local())
- || m_graph.isCaptured(variableAccessData->local()))
+ || variableAccessData->isCaptured())
continue;
m_changed |= variableAccessData->tallyVotesForShouldUseDoubleFormat();
}
@@ -818,7 +881,7 @@ private:
if (!variableAccessData->isRoot())
continue;
if (operandIsArgument(variableAccessData->local())
- || m_graph.isCaptured(variableAccessData->local()))
+ || variableAccessData->isCaptured())
continue;
m_changed |= variableAccessData->makePredictionForDoubleFormat();
}
@@ -832,9 +895,9 @@ private:
#endif
};
-void performPredictionPropagation(Graph& graph)
+bool performPredictionPropagation(Graph& graph)
{
- runPhase<PredictionPropagationPhase>(graph);
+ return runPhase<PredictionPropagationPhase>(graph);
}
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.h b/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.h
index fe127136a..ae025cdeb 100644
--- a/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.h
+++ b/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.h
@@ -43,7 +43,7 @@ class Graph;
// arithmetic nodes) do not qualify for any of these categories. But after running
// this phase, we'll have full information for the expected type of each node.
-void performPredictionPropagation(Graph&);
+bool performPredictionPropagation(Graph&);
} } // namespace JSC::DFG::Phase
diff --git a/Source/JavaScriptCore/dfg/DFGRedundantPhiEliminationPhase.cpp b/Source/JavaScriptCore/dfg/DFGRedundantPhiEliminationPhase.cpp
index b16a72a7e..5453469fe 100644
--- a/Source/JavaScriptCore/dfg/DFGRedundantPhiEliminationPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGRedundantPhiEliminationPhase.cpp
@@ -39,7 +39,7 @@ public:
{
}
- void run()
+ bool run()
{
bool changed = false;
do {
@@ -63,7 +63,8 @@ public:
break;
}
}
-
+
+ return true;
}
private:
@@ -166,9 +167,9 @@ private:
};
-void performRedundantPhiElimination(Graph& graph)
+bool performRedundantPhiElimination(Graph& graph)
{
- runPhase<RedundantPhiEliminationPhase>(graph);
+ return runPhase<RedundantPhiEliminationPhase>(graph);
}
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGRedundantPhiEliminationPhase.h b/Source/JavaScriptCore/dfg/DFGRedundantPhiEliminationPhase.h
index 202ab4441..fd6634a88 100644
--- a/Source/JavaScriptCore/dfg/DFGRedundantPhiEliminationPhase.h
+++ b/Source/JavaScriptCore/dfg/DFGRedundantPhiEliminationPhase.h
@@ -39,7 +39,7 @@ class Graph;
// We inserted many can-be-redundant Phi nodes when building the graph.
// This phase will just remove them.
-void performRedundantPhiElimination(Graph&);
+bool performRedundantPhiElimination(Graph&);
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGScoreBoard.h b/Source/JavaScriptCore/dfg/DFGScoreBoard.h
index 578f2b147..430bdf552 100644
--- a/Source/JavaScriptCore/dfg/DFGScoreBoard.h
+++ b/Source/JavaScriptCore/dfg/DFGScoreBoard.h
@@ -120,12 +120,25 @@ public:
// Clear the use count & add to the free list.
m_used[index] = 0;
m_free.append(index);
+ } else {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ dataLog(" Virtual register %u is at %u/%u uses.", index, m_used[index], node.refCount());
+#endif
}
}
void use(Edge child)
{
use(child.indexUnchecked());
}
+
+ void useIfHasResult(Edge child)
+ {
+ if (!child)
+ return;
+ if (!m_graph[child].hasResult())
+ return;
+ use(child);
+ }
unsigned highWatermark()
{
diff --git a/Source/JavaScriptCore/dfg/DFGSilentRegisterSavePlan.h b/Source/JavaScriptCore/dfg/DFGSilentRegisterSavePlan.h
new file mode 100644
index 000000000..ab99b014d
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGSilentRegisterSavePlan.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGSilentRegisterSavePlan_h
+#define DFGSilentRegisterSavePlan_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(DFG_JIT)
+
+#include "DFGCommon.h"
+#include "DFGFPRInfo.h"
+#include "DFGGPRInfo.h"
+
+namespace JSC { namespace DFG {
+
+enum SilentSpillAction {
+ DoNothingForSpill,
+ Store32Tag,
+ Store32Payload,
+ StorePtr,
+ StoreDouble
+};
+
+enum SilentFillAction {
+ DoNothingForFill,
+ SetInt32Constant,
+ SetBooleanConstant,
+ SetCellConstant,
+ SetTrustedJSConstant,
+ SetJSConstant,
+ SetJSConstantTag,
+ SetJSConstantPayload,
+ SetInt32Tag,
+ SetCellTag,
+ SetBooleanTag,
+ SetDoubleConstant,
+ Load32Tag,
+ Load32Payload,
+ Load32PayloadBoxInt,
+ LoadPtr,
+ LoadDouble,
+ LoadDoubleBoxDouble,
+ LoadJSUnboxDouble
+};
+
+class SilentRegisterSavePlan {
+public:
+ SilentRegisterSavePlan()
+ : m_spillAction(DoNothingForSpill)
+ , m_fillAction(DoNothingForFill)
+ , m_register(-1)
+ , m_nodeIndex(NoNode)
+ {
+ }
+
+ SilentRegisterSavePlan(
+ SilentSpillAction spillAction,
+ SilentFillAction fillAction,
+ NodeIndex nodeIndex,
+ GPRReg gpr)
+ : m_spillAction(spillAction)
+ , m_fillAction(fillAction)
+ , m_register(gpr)
+ , m_nodeIndex(nodeIndex)
+ {
+ }
+
+ SilentRegisterSavePlan(
+ SilentSpillAction spillAction,
+ SilentFillAction fillAction,
+ NodeIndex nodeIndex,
+ FPRReg fpr)
+ : m_spillAction(spillAction)
+ , m_fillAction(fillAction)
+ , m_register(fpr)
+ , m_nodeIndex(nodeIndex)
+ {
+ }
+
+ SilentSpillAction spillAction() const { return static_cast<SilentSpillAction>(m_spillAction); }
+ SilentFillAction fillAction() const { return static_cast<SilentFillAction>(m_fillAction); }
+
+ NodeIndex nodeIndex() const { return m_nodeIndex; }
+
+ GPRReg gpr() const { return static_cast<GPRReg>(m_register); }
+ FPRReg fpr() const { return static_cast<FPRReg>(m_register); }
+
+private:
+ int8_t m_spillAction;
+ int8_t m_fillAction;
+ int8_t m_register;
+ NodeIndex m_nodeIndex;
+};
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
+
+#endif // DFGSilentRegisterSavePlan_h
+
diff --git a/Source/JavaScriptCore/dfg/DFGSlowPathGenerator.h b/Source/JavaScriptCore/dfg/DFGSlowPathGenerator.h
new file mode 100644
index 000000000..fa1f888e0
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGSlowPathGenerator.h
@@ -0,0 +1,496 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGSlowPathGenerator_h
+#define DFGSlowPathGenerator_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(DFG_JIT)
+
+#include "DFGCommon.h"
+#include "DFGSilentRegisterSavePlan.h"
+#include "DFGSpeculativeJIT.h"
+#include <wtf/FastAllocBase.h>
+#include <wtf/PassOwnPtr.h>
+
+namespace JSC { namespace DFG {
+
+class SlowPathGenerator {
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ SlowPathGenerator(SpeculativeJIT* jit)
+ : m_compileIndex(jit->m_compileIndex)
+ {
+ }
+ virtual ~SlowPathGenerator() { }
+ void generate(SpeculativeJIT* jit)
+ {
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ dataLog("Generating slow path %p at offset 0x%x\n", this, jit->m_jit.debugOffset());
+#endif
+ m_label = jit->m_jit.label();
+ jit->m_compileIndex = m_compileIndex;
+ generateInternal(jit);
+#if !ASSERT_DISABLED
+ jit->m_jit.breakpoint(); // make sure that the generator jumps back to somewhere
+#endif
+ }
+ MacroAssembler::Label label() const { return m_label; }
+ virtual MacroAssembler::Call call() const
+ {
+ ASSERT_NOT_REACHED(); // By default slow path generators don't have a call.
+ return MacroAssembler::Call();
+ }
+protected:
+ virtual void generateInternal(SpeculativeJIT*) = 0;
+ MacroAssembler::Label m_label;
+ NodeIndex m_compileIndex;
+};
+
+template<typename JumpType>
+class JumpingSlowPathGenerator : public SlowPathGenerator {
+public:
+ JumpingSlowPathGenerator(JumpType from, SpeculativeJIT* jit)
+ : SlowPathGenerator(jit)
+ , m_from(from)
+ , m_to(jit->m_jit.label())
+ {
+ }
+
+protected:
+ void linkFrom(SpeculativeJIT* jit)
+ {
+ m_from.link(&jit->m_jit);
+ }
+
+ void jumpTo(SpeculativeJIT* jit)
+ {
+ jit->m_jit.jump().linkTo(m_to, &jit->m_jit);
+ }
+
+ JumpType m_from;
+ MacroAssembler::Label m_to;
+};
+
+template<typename JumpType, typename FunctionType, typename ResultType>
+class CallSlowPathGenerator : public JumpingSlowPathGenerator<JumpType> {
+public:
+ CallSlowPathGenerator(
+ JumpType from, SpeculativeJIT* jit, FunctionType function,
+ SpillRegistersMode spillMode, ResultType result)
+ : JumpingSlowPathGenerator<JumpType>(from, jit)
+ , m_function(function)
+ , m_spillMode(spillMode)
+ , m_result(result)
+ {
+ if (m_spillMode == NeedToSpill)
+ jit->silentSpillAllRegistersImpl(false, m_plans, result);
+ }
+
+ virtual MacroAssembler::Call call() const
+ {
+ return m_call;
+ }
+
+protected:
+ void setUp(SpeculativeJIT* jit)
+ {
+ this->linkFrom(jit);
+ if (m_spillMode == NeedToSpill) {
+ for (unsigned i = 0; i < m_plans.size(); ++i)
+ jit->silentSpill(m_plans[i]);
+ }
+ }
+
+ void recordCall(MacroAssembler::Call call)
+ {
+ m_call = call;
+ }
+
+ void tearDown(SpeculativeJIT* jit)
+ {
+ if (m_spillMode == NeedToSpill) {
+ GPRReg canTrample = SpeculativeJIT::pickCanTrample(m_result);
+ for (unsigned i = m_plans.size(); i--;)
+ jit->silentFill(m_plans[i], canTrample);
+ }
+ this->jumpTo(jit);
+ }
+
+ FunctionType m_function;
+ SpillRegistersMode m_spillMode;
+ ResultType m_result;
+ MacroAssembler::Call m_call;
+ Vector<SilentRegisterSavePlan, 2> m_plans;
+};
+
+template<typename JumpType, typename FunctionType, typename ResultType>
+class CallResultAndNoArgumentsSlowPathGenerator
+ : public CallSlowPathGenerator<JumpType, FunctionType, ResultType> {
+public:
+ CallResultAndNoArgumentsSlowPathGenerator(
+ JumpType from, SpeculativeJIT* jit, FunctionType function,
+ SpillRegistersMode spillMode, ResultType result)
+ : CallSlowPathGenerator<JumpType, FunctionType, ResultType>(
+ from, jit, function, spillMode, result)
+ {
+ }
+
+protected:
+ void generateInternal(SpeculativeJIT* jit)
+ {
+ this->setUp(jit);
+ this->recordCall(jit->callOperation(this->m_function, this->m_result));
+ this->tearDown(jit);
+ }
+};
+
+template<
+ typename JumpType, typename FunctionType, typename ResultType,
+ typename ArgumentType1>
+class CallResultAndOneArgumentSlowPathGenerator
+ : public CallSlowPathGenerator<JumpType, FunctionType, ResultType> {
+public:
+ CallResultAndOneArgumentSlowPathGenerator(
+ JumpType from, SpeculativeJIT* jit, FunctionType function,
+ SpillRegistersMode spillMode, ResultType result, ArgumentType1 argument1)
+ : CallSlowPathGenerator<JumpType, FunctionType, ResultType>(
+ from, jit, function, spillMode, result)
+ , m_argument1(argument1)
+ {
+ }
+
+protected:
+ void generateInternal(SpeculativeJIT* jit)
+ {
+ this->setUp(jit);
+ this->recordCall(jit->callOperation(this->m_function, this->m_result, m_argument1));
+ this->tearDown(jit);
+ }
+
+ ArgumentType1 m_argument1;
+};
+
+template<
+ typename JumpType, typename FunctionType, typename ResultType,
+ typename ArgumentType1, typename ArgumentType2>
+class CallResultAndTwoArgumentsSlowPathGenerator
+ : public CallSlowPathGenerator<JumpType, FunctionType, ResultType> {
+public:
+ CallResultAndTwoArgumentsSlowPathGenerator(
+ JumpType from, SpeculativeJIT* jit, FunctionType function,
+ SpillRegistersMode spillMode, ResultType result, ArgumentType1 argument1,
+ ArgumentType2 argument2)
+ : CallSlowPathGenerator<JumpType, FunctionType, ResultType>(
+ from, jit, function, spillMode, result)
+ , m_argument1(argument1)
+ , m_argument2(argument2)
+ {
+ }
+
+protected:
+ void generateInternal(SpeculativeJIT* jit)
+ {
+ this->setUp(jit);
+ this->recordCall(jit->callOperation(this->m_function, this->m_result, m_argument1, m_argument2));
+ this->tearDown(jit);
+ }
+
+ ArgumentType1 m_argument1;
+ ArgumentType2 m_argument2;
+};
+
+template<
+ typename JumpType, typename FunctionType, typename ResultType,
+ typename ArgumentType1, typename ArgumentType2, typename ArgumentType3>
+class CallResultAndThreeArgumentsSlowPathGenerator
+ : public CallSlowPathGenerator<JumpType, FunctionType, ResultType> {
+public:
+ CallResultAndThreeArgumentsSlowPathGenerator(
+ JumpType from, SpeculativeJIT* jit, FunctionType function,
+ SpillRegistersMode spillMode, ResultType result, ArgumentType1 argument1,
+ ArgumentType2 argument2, ArgumentType3 argument3)
+ : CallSlowPathGenerator<JumpType, FunctionType, ResultType>(
+ from, jit, function, spillMode, result)
+ , m_argument1(argument1)
+ , m_argument2(argument2)
+ , m_argument3(argument3)
+ {
+ }
+
+protected:
+ void generateInternal(SpeculativeJIT* jit)
+ {
+ this->setUp(jit);
+ this->recordCall(
+ jit->callOperation(
+ this->m_function, this->m_result, m_argument1, m_argument2,
+ m_argument3));
+ this->tearDown(jit);
+ }
+
+ ArgumentType1 m_argument1;
+ ArgumentType2 m_argument2;
+ ArgumentType3 m_argument3;
+};
+
+template<
+ typename JumpType, typename FunctionType, typename ResultType,
+ typename ArgumentType1, typename ArgumentType2, typename ArgumentType3,
+ typename ArgumentType4>
+class CallResultAndFourArgumentsSlowPathGenerator
+ : public CallSlowPathGenerator<JumpType, FunctionType, ResultType> {
+public:
+ CallResultAndFourArgumentsSlowPathGenerator(
+ JumpType from, SpeculativeJIT* jit, FunctionType function,
+ SpillRegistersMode spillMode, ResultType result, ArgumentType1 argument1,
+ ArgumentType2 argument2, ArgumentType3 argument3, ArgumentType4 argument4)
+ : CallSlowPathGenerator<JumpType, FunctionType, ResultType>(
+ from, jit, function, spillMode, result)
+ , m_argument1(argument1)
+ , m_argument2(argument2)
+ , m_argument3(argument3)
+ , m_argument4(argument4)
+ {
+ }
+
+protected:
+ void generateInternal(SpeculativeJIT* jit)
+ {
+ this->setUp(jit);
+ this->recordCall(
+ jit->callOperation(
+ this->m_function, this->m_result, m_argument1, m_argument2,
+ m_argument3, m_argument4));
+ this->tearDown(jit);
+ }
+
+ ArgumentType1 m_argument1;
+ ArgumentType2 m_argument2;
+ ArgumentType3 m_argument3;
+ ArgumentType4 m_argument4;
+};
+
+template<
+ typename JumpType, typename FunctionType, typename ResultType,
+ typename ArgumentType1, typename ArgumentType2, typename ArgumentType3,
+ typename ArgumentType4, typename ArgumentType5>
+class CallResultAndFiveArgumentsSlowPathGenerator
+ : public CallSlowPathGenerator<JumpType, FunctionType, ResultType> {
+public:
+ CallResultAndFiveArgumentsSlowPathGenerator(
+ JumpType from, SpeculativeJIT* jit, FunctionType function,
+ SpillRegistersMode spillMode, ResultType result, ArgumentType1 argument1,
+ ArgumentType2 argument2, ArgumentType3 argument3, ArgumentType4 argument4,
+ ArgumentType5 argument5)
+ : CallSlowPathGenerator<JumpType, FunctionType, ResultType>(
+ from, jit, function, spillMode, result)
+ , m_argument1(argument1)
+ , m_argument2(argument2)
+ , m_argument3(argument3)
+ , m_argument4(argument4)
+ , m_argument5(argument5)
+ {
+ }
+
+protected:
+ void generateInternal(SpeculativeJIT* jit)
+ {
+ this->setUp(jit);
+ this->recordCall(
+ jit->callOperation(
+ this->m_function, this->m_result, m_argument1, m_argument2,
+ m_argument3, m_argument4, m_argument5));
+ this->tearDown(jit);
+ }
+
+ ArgumentType1 m_argument1;
+ ArgumentType2 m_argument2;
+ ArgumentType3 m_argument3;
+ ArgumentType4 m_argument4;
+ ArgumentType5 m_argument5;
+};
+
+template<typename JumpType, typename FunctionType, typename ResultType>
+inline PassOwnPtr<SlowPathGenerator> slowPathCall(
+ JumpType from, SpeculativeJIT* jit, FunctionType function,
+ ResultType result, SpillRegistersMode spillMode = NeedToSpill)
+{
+ return adoptPtr(
+ new CallResultAndNoArgumentsSlowPathGenerator<
+ JumpType, FunctionType, ResultType>(
+ from, jit, function, spillMode, result));
+}
+
+template<
+ typename JumpType, typename FunctionType, typename ResultType,
+ typename ArgumentType1>
+inline PassOwnPtr<SlowPathGenerator> slowPathCall(
+ JumpType from, SpeculativeJIT* jit, FunctionType function,
+ ResultType result, ArgumentType1 argument1,
+ SpillRegistersMode spillMode = NeedToSpill)
+{
+ return adoptPtr(
+ new CallResultAndOneArgumentSlowPathGenerator<
+ JumpType, FunctionType, ResultType, ArgumentType1>(
+ from, jit, function, spillMode, result, argument1));
+}
+
+template<
+ typename JumpType, typename FunctionType, typename ResultType,
+ typename ArgumentType1, typename ArgumentType2>
+inline PassOwnPtr<SlowPathGenerator> slowPathCall(
+ JumpType from, SpeculativeJIT* jit, FunctionType function,
+ ResultType result, ArgumentType1 argument1, ArgumentType2 argument2,
+ SpillRegistersMode spillMode = NeedToSpill)
+{
+ return adoptPtr(
+ new CallResultAndTwoArgumentsSlowPathGenerator<
+ JumpType, FunctionType, ResultType, ArgumentType1, ArgumentType2>(
+ from, jit, function, spillMode, result, argument1, argument2));
+}
+
+template<
+ typename JumpType, typename FunctionType, typename ResultType,
+ typename ArgumentType1, typename ArgumentType2, typename ArgumentType3>
+inline PassOwnPtr<SlowPathGenerator> slowPathCall(
+ JumpType from, SpeculativeJIT* jit, FunctionType function,
+ ResultType result, ArgumentType1 argument1, ArgumentType2 argument2,
+ ArgumentType3 argument3, SpillRegistersMode spillMode = NeedToSpill)
+{
+ return adoptPtr(
+ new CallResultAndThreeArgumentsSlowPathGenerator<
+ JumpType, FunctionType, ResultType, ArgumentType1, ArgumentType2,
+ ArgumentType3>(
+ from, jit, function, spillMode, result, argument1, argument2,
+ argument3));
+}
+
+template<
+ typename JumpType, typename FunctionType, typename ResultType,
+ typename ArgumentType1, typename ArgumentType2, typename ArgumentType3,
+ typename ArgumentType4>
+inline PassOwnPtr<SlowPathGenerator> slowPathCall(
+ JumpType from, SpeculativeJIT* jit, FunctionType function,
+ ResultType result, ArgumentType1 argument1, ArgumentType2 argument2,
+ ArgumentType3 argument3, ArgumentType4 argument4,
+ SpillRegistersMode spillMode = NeedToSpill)
+{
+ return adoptPtr(
+ new CallResultAndFourArgumentsSlowPathGenerator<
+ JumpType, FunctionType, ResultType, ArgumentType1, ArgumentType2,
+ ArgumentType3, ArgumentType4>(
+ from, jit, function, spillMode, result, argument1, argument2,
+ argument3, argument4));
+}
+
+template<
+ typename JumpType, typename FunctionType, typename ResultType,
+ typename ArgumentType1, typename ArgumentType2, typename ArgumentType3,
+ typename ArgumentType4, typename ArgumentType5>
+inline PassOwnPtr<SlowPathGenerator> slowPathCall(
+ JumpType from, SpeculativeJIT* jit, FunctionType function,
+ ResultType result, ArgumentType1 argument1, ArgumentType2 argument2,
+ ArgumentType3 argument3, ArgumentType4 argument4, ArgumentType5 argument5,
+ SpillRegistersMode spillMode = NeedToSpill)
+{
+ return adoptPtr(
+ new CallResultAndFiveArgumentsSlowPathGenerator<
+ JumpType, FunctionType, ResultType, ArgumentType1, ArgumentType2,
+ ArgumentType3, ArgumentType4, ArgumentType5>(
+ from, jit, function, spillMode, result, argument1, argument2,
+ argument3, argument4, argument5));
+}
+
+template<typename JumpType, typename DestinationType, typename SourceType, unsigned numberOfAssignments>
+class AssigningSlowPathGenerator : public JumpingSlowPathGenerator<JumpType> {
+public:
+ AssigningSlowPathGenerator(
+ JumpType from, SpeculativeJIT* jit,
+ DestinationType destination[numberOfAssignments],
+ SourceType source[numberOfAssignments])
+ : JumpingSlowPathGenerator<JumpType>(from, jit)
+ {
+ for (unsigned i = numberOfAssignments; i--;) {
+ m_destination[i] = destination[i];
+ m_source[i] = source[i];
+ }
+ }
+
+protected:
+ virtual void generateInternal(SpeculativeJIT* jit)
+ {
+ this->linkFrom(jit);
+ for (unsigned i = numberOfAssignments; i--;)
+ jit->m_jit.move(m_source[i], m_destination[i]);
+ this->jumpTo(jit);
+ }
+
+private:
+ DestinationType m_destination[numberOfAssignments];
+ SourceType m_source[numberOfAssignments];
+};
+
+template<typename JumpType, typename DestinationType, typename SourceType, unsigned numberOfAssignments>
+inline PassOwnPtr<SlowPathGenerator> slowPathMove(
+ JumpType from, SpeculativeJIT* jit, SourceType source[numberOfAssignments], DestinationType destination[numberOfAssignments])
+{
+ return adoptPtr(
+ new AssigningSlowPathGenerator<
+ JumpType, DestinationType, SourceType, numberOfAssignments>(
+ from, jit, destination, source));
+}
+
+template<typename JumpType, typename DestinationType, typename SourceType>
+inline PassOwnPtr<SlowPathGenerator> slowPathMove(
+ JumpType from, SpeculativeJIT* jit, SourceType source, DestinationType destination)
+{
+ SourceType sourceArray[1] = { source };
+ DestinationType destinationArray[1] = { destination };
+ return adoptPtr(
+ new AssigningSlowPathGenerator<
+ JumpType, DestinationType, SourceType, 1>(
+ from, jit, destinationArray, sourceArray));
+}
+
+template<typename JumpType, typename DestinationType, typename SourceType>
+inline PassOwnPtr<SlowPathGenerator> slowPathMove(
+ JumpType from, SpeculativeJIT* jit, SourceType source1, DestinationType destination1, SourceType source2, DestinationType destination2)
+{
+ SourceType sourceArray[2] = { source1, source2 };
+ DestinationType destinationArray[2] = { destination1, destination2 };
+ return adoptPtr(
+ new AssigningSlowPathGenerator<
+ JumpType, DestinationType, SourceType, 2>(
+ from, jit, destinationArray, sourceArray));
+}
+
+} } // namespace JSC::DFG
+
+#endif // ENABLD(DFG_JIT)
+
+#endif // DFGSlowPathGenerator_h
+
diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
index 0bcee7510..db71fc01f 100644
--- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
+++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
@@ -28,10 +28,46 @@
#if ENABLE(DFG_JIT)
+#include "Arguments.h"
+#include "DFGSlowPathGenerator.h"
#include "LinkBuffer.h"
namespace JSC { namespace DFG {
+SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
+ : m_compileOkay(true)
+ , m_jit(jit)
+ , m_compileIndex(0)
+ , m_indexInBlock(0)
+ , m_generationInfo(m_jit.codeBlock()->m_numCalleeRegisters)
+ , m_blockHeads(jit.graph().m_blocks.size())
+ , m_arguments(jit.codeBlock()->numParameters())
+ , m_variables(jit.graph().m_localVars)
+ , m_lastSetOperand(std::numeric_limits<int>::max())
+ , m_state(m_jit.graph())
+ , m_isCheckingArgumentTypes(false)
+{
+}
+
+SpeculativeJIT::~SpeculativeJIT()
+{
+ WTF::deleteAllValues(m_slowPathGenerators);
+}
+
+void SpeculativeJIT::addSlowPathGenerator(PassOwnPtr<SlowPathGenerator> slowPathGenerator)
+{
+ m_slowPathGenerators.append(slowPathGenerator.leakPtr());
+}
+
+void SpeculativeJIT::runSlowPathGenerators()
+{
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ dataLog("Running %lu slow path generators.\n", m_slowPathGenerators.size());
+#endif
+ for (unsigned i = 0; i < m_slowPathGenerators.size(); ++i)
+ m_slowPathGenerators[i]->generate(this);
+}
+
// On Windows we need to wrap fmod; on other platforms we can call it directly.
// On ARMv7 we assert that all function pointers have to low bit set (point to thumb code).
#if CALLING_CONVENTION_IS_STDCALL || CPU(ARM_THUMB2)
@@ -768,6 +804,9 @@ void ValueSource::dump(FILE* out) const
case DoubleInRegisterFile:
fprintf(out, "Double");
break;
+ case ArgumentsSource:
+ fprintf(out, "Arguments");
+ break;
case HaveNode:
fprintf(out, "Node(%d)", m_nodeIndex);
break;
@@ -795,7 +834,7 @@ void SpeculativeJIT::compilePeepHoleObjectEquality(Node& node, NodeIndex branchN
MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
- if (taken == (m_block + 1)) {
+ if (taken == nextBlock()) {
condition = MacroAssembler::NotEqual;
BlockIndex tmp = taken;
taken = notTaken;
@@ -825,7 +864,7 @@ void SpeculativeJIT::compilePeepHoleIntegerBranch(Node& node, NodeIndex branchNo
// The branch instruction will branch to the taken block.
// If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
- if (taken == (m_block + 1)) {
+ if (taken == nextBlock()) {
condition = JITCompiler::invert(condition);
BlockIndex tmp = taken;
taken = notTaken;
@@ -939,7 +978,7 @@ void SpeculativeJIT::compile(BasicBlock& block)
ASSERT(m_arguments.size() == block.variablesAtHead.numberOfArguments());
for (size_t i = 0; i < m_arguments.size(); ++i) {
NodeIndex nodeIndex = block.variablesAtHead.argument(i);
- if (nodeIndex == NoNode || m_jit.graph().argumentIsCaptured(i))
+ if (nodeIndex == NoNode || m_jit.codeBlock()->argumentIsCaptured(i))
m_arguments[i] = ValueSource(ValueInRegisterFile);
else
m_arguments[i] = ValueSource::forPrediction(at(nodeIndex).variableAccessData()->prediction());
@@ -951,10 +990,16 @@ void SpeculativeJIT::compile(BasicBlock& block)
ASSERT(m_variables.size() == block.variablesAtHead.numberOfLocals());
for (size_t i = 0; i < m_variables.size(); ++i) {
NodeIndex nodeIndex = block.variablesAtHead.local(i);
- if ((nodeIndex == NoNode || !at(nodeIndex).refCount()) && !m_jit.graph().localIsCaptured(i))
- m_variables[i] = ValueSource(SourceIsDead);
- else if (m_jit.graph().localIsCaptured(i))
+ // FIXME: Use the variable access data, not the first node in the block.
+ // https://bugs.webkit.org/show_bug.cgi?id=87205
+ if (m_jit.codeBlock()->localIsCaptured(at(block[0]).codeOrigin.inlineCallFrame, i))
m_variables[i] = ValueSource(ValueInRegisterFile);
+ else if (nodeIndex == NoNode)
+ m_variables[i] = ValueSource(SourceIsDead);
+ else if (at(nodeIndex).variableAccessData()->isArgumentsAlias())
+ m_variables[i] = ValueSource(ArgumentsSource);
+ else if (!at(nodeIndex).refCount())
+ m_variables[i] = ValueSource(SourceIsDead);
else if (at(nodeIndex).variableAccessData()->shouldUseDoubleFormat())
m_variables[i] = ValueSource(DoubleInRegisterFile);
else
@@ -1082,6 +1127,7 @@ void SpeculativeJIT::compile(BasicBlock& block)
void SpeculativeJIT::checkArgumentTypes()
{
ASSERT(!m_compileIndex);
+ m_isCheckingArgumentTypes = true;
m_codeOriginForOSR = CodeOrigin(0);
for (size_t i = 0; i < m_arguments.size(); ++i)
@@ -1231,6 +1277,7 @@ void SpeculativeJIT::checkArgumentTypes()
}
#endif
}
+ m_isCheckingArgumentTypes = false;
}
bool SpeculativeJIT::compile()
@@ -1241,8 +1288,11 @@ bool SpeculativeJIT::compile()
m_jit.move(TrustedImm32(0), GPRInfo::regT0);
ASSERT(!m_compileIndex);
- for (m_block = 0; m_block < m_jit.graph().m_blocks.size(); ++m_block)
- compile(*m_jit.graph().m_blocks[m_block]);
+ for (m_block = 0; m_block < m_jit.graph().m_blocks.size(); ++m_block) {
+ BasicBlock* block = m_jit.graph().m_blocks[m_block].get();
+ if (block)
+ compile(*block);
+ }
linkBranches();
return true;
}
@@ -1250,8 +1300,10 @@ bool SpeculativeJIT::compile()
void SpeculativeJIT::createOSREntries()
{
for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().m_blocks.size(); ++blockIndex) {
- BasicBlock& block = *m_jit.graph().m_blocks[blockIndex];
- if (!block.isOSRTarget)
+ BasicBlock* block = m_jit.graph().m_blocks[blockIndex].get();
+ if (!block)
+ continue;
+ if (!block->isOSRTarget)
continue;
// Currently we only need to create OSR entry trampolines when using edge code
@@ -1273,9 +1325,12 @@ void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
{
unsigned osrEntryIndex = 0;
for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().m_blocks.size(); ++blockIndex) {
- BasicBlock& block = *m_jit.graph().m_blocks[blockIndex];
- if (block.isOSRTarget)
- m_jit.noticeOSREntry(block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
+ BasicBlock* block = m_jit.graph().m_blocks[blockIndex].get();
+ if (!block)
+ continue;
+ if (!block->isOSRTarget)
+ continue;
+ m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
}
ASSERT(osrEntryIndex == m_osrEntryHeads.size());
}
@@ -1300,13 +1355,18 @@ ValueRecovery SpeculativeJIT::computeValueRecoveryFor(const ValueSource& valueSo
case DoubleInRegisterFile:
return ValueRecovery::alreadyInRegisterFileAsUnboxedDouble();
+
+ case ArgumentsSource:
+ return ValueRecovery::argumentsThatWereNotCreated();
case HaveNode: {
if (isConstant(valueSource.nodeIndex()))
return ValueRecovery::constant(valueOfJSConstant(valueSource.nodeIndex()));
-
+
Node* nodePtr = &at(valueSource.nodeIndex());
if (!nodePtr->shouldGenerate()) {
+ if (nodePtr->op() == CreateArguments)
+ return ValueRecovery::argumentsThatWereNotCreated();
// It's legitimately dead. As in, nobody will ever use this node, or operand,
// ever. Set it to Undefined to make the GC happy after the OSR.
return ValueRecovery::constant(jsUndefined());
@@ -1591,13 +1651,10 @@ void SpeculativeJIT::compileValueToInt32(Node& node)
DoubleOperand op1(this, node.child1());
FPRReg fpr = op1.fpr();
GPRReg gpr = result.gpr();
- JITCompiler::Jump truncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateSuccessful);
-
- silentSpillAllRegisters(gpr);
- callOperation(toInt32, gpr, fpr);
- silentFillAllRegisters(gpr);
+ JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
+
+ addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, toInt32, gpr, fpr));
- truncatedToInteger.link(&m_jit);
integerResult(gpr, m_compileIndex);
return;
}
@@ -1613,7 +1670,8 @@ void SpeculativeJIT::compileValueToInt32(Node& node)
JITCompiler::Jump isInteger = m_jit.branchPtr(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
- speculationCheck(BadType, JSValueRegs(gpr), node.child1().index(), m_jit.branchTestPtr(MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
+ if (!isNumberPrediction(m_state.forNode(node.child1()).m_type))
+ speculationCheck(BadType, JSValueRegs(gpr), node.child1().index(), m_jit.branchTestPtr(MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
// First, if we get here we have a double encoded as a JSValue
m_jit.move(gpr, resultGpr);
@@ -1649,7 +1707,8 @@ void SpeculativeJIT::compileValueToInt32(Node& node)
JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
- speculationCheck(BadType, JSValueRegs(tagGPR, payloadGPR), node.child1().index(), m_jit.branch32(MacroAssembler::AboveOrEqual, tagGPR, TrustedImm32(JSValue::LowestTag)));
+ if (!isNumberPrediction(m_state.forNode(node.child1()).m_type))
+ speculationCheck(BadType, JSValueRegs(tagGPR, payloadGPR), node.child1().index(), m_jit.branch32(MacroAssembler::AboveOrEqual, tagGPR, TrustedImm32(JSValue::LowestTag)));
unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
@@ -2008,17 +2067,14 @@ void SpeculativeJIT::compilePutByValForIntTypedArray(const TypedArrayDescriptor&
MacroAssembler::Jump fixed = m_jit.jump();
notNaN.link(&m_jit);
- MacroAssembler::Jump done;
+ MacroAssembler::Jump failed;
if (signedness == SignedTypedArray)
- done = m_jit.branchTruncateDoubleToInt32(fpr, gpr, MacroAssembler::BranchIfTruncateSuccessful);
+ failed = m_jit.branchTruncateDoubleToInt32(fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
else
- done = m_jit.branchTruncateDoubleToUint32(fpr, gpr, MacroAssembler::BranchIfTruncateSuccessful);
-
- silentSpillAllRegisters(gpr);
- callOperation(toInt32, gpr, fpr);
- silentFillAllRegisters(gpr);
+ failed = m_jit.branchTruncateDoubleToUint32(fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
+
+ addSlowPathGenerator(slowPathCall(failed, this, toInt32, gpr, fpr));
- done.link(&m_jit);
fixed.link(&m_jit);
value.adopt(result);
valueGPR = gpr;
@@ -2553,7 +2609,7 @@ void SpeculativeJIT::compileArithNegate(Node& node)
void SpeculativeJIT::compileArithMul(Node& node)
{
- if (Node::shouldSpeculateInteger(at(node.child1()), at(node.child2())) && node.canSpeculateInteger()) {
+ if (m_jit.graph().mulShouldSpeculateInteger(node)) {
SpeculateIntegerOperand op1(this, node.child1());
SpeculateIntegerOperand op2(this, node.child2());
GPRTemporary result(this);
@@ -2561,15 +2617,17 @@ void SpeculativeJIT::compileArithMul(Node& node)
GPRReg reg1 = op1.gpr();
GPRReg reg2 = op2.gpr();
- // What is unfortunate is that we cannot take advantage of nodeCanTruncateInteger()
- // here. A multiply on integers performed in the double domain and then truncated to
- // an integer will give a different result than a multiply performed in the integer
- // domain and then truncated, if the integer domain result would have resulted in
- // something bigger than what a 32-bit integer can hold. JavaScript mandates that
- // the semantics are always as if the multiply had been performed in the double
- // domain.
-
- speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchMul32(MacroAssembler::Overflow, reg1, reg2, result.gpr()));
+ // We can perform truncated multiplications if we get to this point, because if the
+ // fixup phase could not prove that it would be safe, it would have turned us into
+ // a double multiplication.
+ if (nodeCanTruncateInteger(node.arithNodeFlags())) {
+ m_jit.move(reg1, result.gpr());
+ m_jit.mul32(reg2, result.gpr());
+ } else {
+ speculationCheck(
+ Overflow, JSValueRegs(), NoNode,
+ m_jit.branchMul32(MacroAssembler::Overflow, reg1, reg2, result.gpr()));
+ }
// Check for negative zero, if the users of this node care about such things.
if (!nodeCanIgnoreNegativeZero(node.arithNodeFlags())) {
@@ -2772,7 +2830,7 @@ bool SpeculativeJIT::compileStrictEqForConstant(Node& node, Edge value, JSValue
// The branch instruction will branch to the taken block.
// If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
- if (taken == (m_block + 1)) {
+ if (taken == nextBlock()) {
condition = MacroAssembler::NotEqual;
BlockIndex tmp = taken;
taken = notTaken;
@@ -2940,7 +2998,9 @@ void SpeculativeJIT::compileGetIndexedPropertyStorage(Node& node)
GPRTemporary storage(this);
GPRReg storageReg = storage.gpr();
- if (at(node.child1()).prediction() == PredictString) {
+ if (at(node.child1()).shouldSpeculateArguments()) {
+ ASSERT_NOT_REACHED();
+ } else if (at(node.child1()).prediction() == PredictString) {
if (!isStringPrediction(m_state.forNode(node.child1()).m_type))
speculationCheck(BadType, JSValueSource::unboxedCell(baseReg), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseReg, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSString::s_info)));
@@ -3003,6 +3063,120 @@ void SpeculativeJIT::compileGetIndexedPropertyStorage(Node& node)
storageResult(storageReg, m_compileIndex);
}
+void SpeculativeJIT::compileGetByValOnArguments(Node& node)
+{
+ SpeculateCellOperand base(this, node.child1());
+ SpeculateStrictInt32Operand property(this, node.child2());
+ GPRTemporary result(this);
+#if USE(JSVALUE32_64)
+ GPRTemporary resultTag(this);
+#endif
+ GPRTemporary scratch(this);
+
+ GPRReg baseReg = base.gpr();
+ GPRReg propertyReg = property.gpr();
+ GPRReg resultReg = result.gpr();
+#if USE(JSVALUE32_64)
+ GPRReg resultTagReg = resultTag.gpr();
+#endif
+ GPRReg scratchReg = scratch.gpr();
+
+ if (!m_compileOkay)
+ return;
+
+ if (!isArgumentsPrediction(m_state.forNode(node.child1()).m_type)) {
+ speculationCheck(
+ BadType, JSValueSource::unboxedCell(baseReg), node.child1(),
+ m_jit.branchPtr(
+ MacroAssembler::NotEqual,
+ MacroAssembler::Address(baseReg, JSCell::classInfoOffset()),
+ MacroAssembler::TrustedImmPtr(&Arguments::s_info)));
+ }
+
+ m_jit.loadPtr(
+ MacroAssembler::Address(baseReg, Arguments::offsetOfData()),
+ scratchReg);
+
+ // Two really lame checks.
+ speculationCheck(
+ Uncountable, JSValueSource(), NoNode,
+ m_jit.branchPtr(
+ MacroAssembler::AboveOrEqual, propertyReg,
+ MacroAssembler::Address(scratchReg, OBJECT_OFFSETOF(ArgumentsData, numArguments))));
+ speculationCheck(
+ Uncountable, JSValueSource(), NoNode,
+ m_jit.branchTestPtr(
+ MacroAssembler::NonZero,
+ MacroAssembler::Address(
+ scratchReg, OBJECT_OFFSETOF(ArgumentsData, deletedArguments))));
+
+ m_jit.move(propertyReg, resultReg);
+ m_jit.neg32(resultReg);
+ m_jit.signExtend32ToPtr(resultReg, resultReg);
+ m_jit.loadPtr(
+ MacroAssembler::Address(scratchReg, OBJECT_OFFSETOF(ArgumentsData, registers)),
+ scratchReg);
+
+#if USE(JSVALUE32_64)
+ m_jit.load32(
+ MacroAssembler::BaseIndex(
+ scratchReg, resultReg, MacroAssembler::TimesEight,
+ CallFrame::thisArgumentOffset() * sizeof(Register) - sizeof(Register) +
+ OBJECT_OFFSETOF(JSValue, u.asBits.tag)),
+ resultTagReg);
+ m_jit.load32(
+ MacroAssembler::BaseIndex(
+ scratchReg, resultReg, MacroAssembler::TimesEight,
+ CallFrame::thisArgumentOffset() * sizeof(Register) - sizeof(Register) +
+ OBJECT_OFFSETOF(JSValue, u.asBits.payload)),
+ resultReg);
+ jsValueResult(resultTagReg, resultReg, m_compileIndex);
+#else
+ m_jit.loadPtr(
+ MacroAssembler::BaseIndex(
+ scratchReg, resultReg, MacroAssembler::TimesEight,
+ CallFrame::thisArgumentOffset() * sizeof(Register) - sizeof(Register)),
+ resultReg);
+ jsValueResult(resultReg, m_compileIndex);
+#endif
+}
+
+void SpeculativeJIT::compileGetArgumentsLength(Node& node)
+{
+ SpeculateCellOperand base(this, node.child1());
+ GPRTemporary result(this, base);
+
+ GPRReg baseReg = base.gpr();
+ GPRReg resultReg = result.gpr();
+
+ if (!m_compileOkay)
+ return;
+
+ if (!isArgumentsPrediction(m_state.forNode(node.child1()).m_type)) {
+ speculationCheck(
+ BadType, JSValueSource::unboxedCell(baseReg), node.child1(),
+ m_jit.branchPtr(
+ MacroAssembler::NotEqual,
+ MacroAssembler::Address(baseReg, JSCell::classInfoOffset()),
+ MacroAssembler::TrustedImmPtr(&Arguments::s_info)));
+ }
+
+ m_jit.loadPtr(
+ MacroAssembler::Address(baseReg, Arguments::offsetOfData()),
+ resultReg);
+
+ speculationCheck(
+ Uncountable, JSValueSource(), NoNode,
+ m_jit.branchTest8(
+ MacroAssembler::NonZero,
+ MacroAssembler::Address(resultReg, OBJECT_OFFSETOF(ArgumentsData, overrodeLength))));
+
+ m_jit.load32(
+ MacroAssembler::Address(resultReg, OBJECT_OFFSETOF(ArgumentsData, numArguments)),
+ resultReg);
+ integerResult(resultReg, m_compileIndex);
+}
+
void SpeculativeJIT::compileNewFunctionNoCheck(Node& node)
{
GPRResult result(this);
@@ -3038,7 +3212,7 @@ bool SpeculativeJIT::compileRegExpExec(Node& node)
BlockIndex notTaken = branchNode.notTakenBlockIndex();
bool invert = false;
- if (taken == (m_block + 1)) {
+ if (taken == nextBlock()) {
invert = true;
BlockIndex tmp = taken;
taken = notTaken;
diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h
index 6f8dc1156..912078a79 100644
--- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h
+++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h
@@ -33,12 +33,14 @@
#include "DFGJITCompiler.h"
#include "DFGOSRExit.h"
#include "DFGOperations.h"
+#include "DFGSilentRegisterSavePlan.h"
#include "MarkedAllocator.h"
#include "ValueRecovery.h"
namespace JSC { namespace DFG {
class JSValueOperand;
+class SlowPathGenerator;
class SpeculativeJIT;
class SpeculateIntegerOperand;
class SpeculateStrictInt32Operand;
@@ -54,6 +56,7 @@ enum ValueSourceKind {
CellInRegisterFile,
BooleanInRegisterFile,
DoubleInRegisterFile,
+ ArgumentsSource,
SourceIsDead,
HaveNode
};
@@ -176,6 +179,7 @@ private:
public:
SpeculativeJIT(JITCompiler&);
+ ~SpeculativeJIT();
bool compile();
void createOSREntries();
@@ -190,6 +194,16 @@ public:
return at(nodeUse.index());
}
+ BlockIndex nextBlock()
+ {
+ for (BlockIndex result = m_block + 1; ; result++) {
+ if (result >= m_jit.graph().m_blocks.size())
+ return NoBlock;
+ if (m_jit.graph().m_blocks[result])
+ return result;
+ }
+ }
+
GPRReg fillInteger(NodeIndex, DataFormat& returnFormat);
FPRReg fillDouble(NodeIndex);
#if USE(JSVALUE64)
@@ -304,7 +318,10 @@ public:
// Called on an operand once it has been consumed by a parent node.
void use(NodeIndex nodeIndex)
{
- VirtualRegister virtualRegister = at(nodeIndex).virtualRegister();
+ Node& node = at(nodeIndex);
+ if (!node.hasResult())
+ return;
+ VirtualRegister virtualRegister = node.virtualRegister();
GenerationInfo& info = m_generationInfo[virtualRegister];
// use() returns true when the value becomes dead, and any
@@ -355,7 +372,9 @@ public:
GPRReg fillSpeculateBoolean(NodeIndex);
GeneratedOperandType checkGeneratedTypeForToInt32(NodeIndex);
-private:
+ void addSlowPathGenerator(PassOwnPtr<SlowPathGenerator>);
+ void runSlowPathGenerators();
+
void compile(Node&);
void compileMovHint(Node&);
void compile(BasicBlock&);
@@ -369,243 +388,355 @@ private:
// they spill all live values to the appropriate
// slots in the RegisterFile without changing any state
// in the GenerationInfo.
- void silentSpillGPR(VirtualRegister spillMe, GPRReg source)
+ SilentRegisterSavePlan silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
{
GenerationInfo& info = m_generationInfo[spillMe];
- ASSERT(info.registerFormat() != DataFormatNone);
- ASSERT(info.registerFormat() != DataFormatDouble);
-
- if (!info.needsSpill())
- return;
-
+ NodeIndex nodeIndex = info.nodeIndex();
+ Node& node = at(nodeIndex);
DataFormat registerFormat = info.registerFormat();
-
-#if USE(JSVALUE64)
- ASSERT(info.gpr() == source);
- if (registerFormat == DataFormatInteger)
- m_jit.store32(source, JITCompiler::addressFor(spillMe));
+ ASSERT(registerFormat != DataFormatNone);
+ ASSERT(registerFormat != DataFormatDouble);
+
+ SilentSpillAction spillAction;
+ SilentFillAction fillAction;
+
+ if (!info.needsSpill())
+ spillAction = DoNothingForSpill;
else {
- ASSERT(registerFormat & DataFormatJS || registerFormat == DataFormatCell || registerFormat == DataFormatStorage);
- m_jit.storePtr(source, JITCompiler::addressFor(spillMe));
- }
-#elif USE(JSVALUE32_64)
- if (registerFormat & DataFormatJS) {
- ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
- m_jit.store32(source, source == info.tagGPR() ? JITCompiler::tagFor(spillMe) : JITCompiler::payloadFor(spillMe));
- } else {
+#if USE(JSVALUE64)
ASSERT(info.gpr() == source);
- m_jit.store32(source, JITCompiler::payloadFor(spillMe));
- }
+ if (registerFormat == DataFormatInteger)
+ spillAction = Store32Payload;
+ else {
+ ASSERT(registerFormat & DataFormatJS || registerFormat == DataFormatCell || registerFormat == DataFormatStorage);
+ spillAction = StorePtr;
+ }
+#elif USE(JSVALUE32_64)
+ if (registerFormat & DataFormatJS) {
+ ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
+ spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
+ } else {
+ ASSERT(info.gpr() == source);
+ spillAction = Store32Payload;
+ }
#endif
- }
- void silentSpillFPR(VirtualRegister spillMe, FPRReg source)
- {
- GenerationInfo& info = m_generationInfo[spillMe];
- ASSERT(info.registerFormat() == DataFormatDouble);
-
- if (!info.needsSpill()) {
- // it's either a constant or it's already been spilled
- ASSERT(at(info.nodeIndex()).hasConstant() || info.spillFormat() != DataFormatNone);
- return;
}
- // it's neither a constant nor has it been spilled.
- ASSERT(!at(info.nodeIndex()).hasConstant());
- ASSERT(info.spillFormat() == DataFormatNone);
- ASSERT(info.fpr() == source);
-
- m_jit.storeDouble(source, JITCompiler::addressFor(spillMe));
- }
-
- void silentFillGPR(VirtualRegister spillMe, GPRReg target)
- {
- GenerationInfo& info = m_generationInfo[spillMe];
-
- NodeIndex nodeIndex = info.nodeIndex();
- Node& node = at(nodeIndex);
- ASSERT(info.registerFormat() != DataFormatNone);
- ASSERT(info.registerFormat() != DataFormatDouble);
- DataFormat registerFormat = info.registerFormat();
-
if (registerFormat == DataFormatInteger) {
- ASSERT(info.gpr() == target);
+ ASSERT(info.gpr() == source);
ASSERT(isJSInteger(info.registerFormat()));
if (node.hasConstant()) {
ASSERT(isInt32Constant(nodeIndex));
- m_jit.move(Imm32(valueOfInt32Constant(nodeIndex)), target);
+ fillAction = SetInt32Constant;
} else
- m_jit.load32(JITCompiler::payloadFor(spillMe), target);
- return;
- }
-
- if (registerFormat == DataFormatBoolean) {
+ fillAction = Load32Payload;
+ } else if (registerFormat == DataFormatBoolean) {
#if USE(JSVALUE64)
ASSERT_NOT_REACHED();
+ fillAction = DoNothingForFill;
#elif USE(JSVALUE32_64)
- ASSERT(info.gpr() == target);
+ ASSERT(info.gpr() == source);
if (node.hasConstant()) {
ASSERT(isBooleanConstant(nodeIndex));
- m_jit.move(TrustedImm32(valueOfBooleanConstant(nodeIndex)), target);
+ fillAction = SetBooleanConstant;
} else
- m_jit.load32(JITCompiler::payloadFor(spillMe), target);
+ fillAction = Load32Payload;
#endif
- return;
- }
-
- if (registerFormat == DataFormatCell) {
- ASSERT(info.gpr() == target);
+ } else if (registerFormat == DataFormatCell) {
+ ASSERT(info.gpr() == source);
if (node.hasConstant()) {
JSValue value = valueOfJSConstant(nodeIndex);
- ASSERT(value.isCell());
- m_jit.move(TrustedImmPtr(value.asCell()), target);
- } else
- m_jit.loadPtr(JITCompiler::payloadFor(spillMe), target);
- return;
- }
-
- if (registerFormat == DataFormatStorage) {
- ASSERT(info.gpr() == target);
- m_jit.loadPtr(JITCompiler::addressFor(spillMe), target);
- return;
- }
-
- ASSERT(registerFormat & DataFormatJS);
+ ASSERT_UNUSED(value, value.isCell());
+ fillAction = SetCellConstant;
+ } else {
#if USE(JSVALUE64)
- ASSERT(info.gpr() == target);
- if (node.hasConstant()) {
- if (valueOfJSConstant(nodeIndex).isCell())
- m_jit.move(valueOfJSConstantAsImmPtr(nodeIndex).asTrustedImmPtr(), target);
- else
- m_jit.move(valueOfJSConstantAsImmPtr(nodeIndex), target);
- } else if (info.spillFormat() == DataFormatInteger) {
- ASSERT(registerFormat == DataFormatJSInteger);
- m_jit.load32(JITCompiler::payloadFor(spillMe), target);
- m_jit.orPtr(GPRInfo::tagTypeNumberRegister, target);
- } else if (info.spillFormat() == DataFormatDouble) {
- ASSERT(registerFormat == DataFormatJSDouble);
- m_jit.loadPtr(JITCompiler::addressFor(spillMe), target);
- m_jit.subPtr(GPRInfo::tagTypeNumberRegister, target);
- } else
- m_jit.loadPtr(JITCompiler::addressFor(spillMe), target);
+ fillAction = LoadPtr;
#else
- ASSERT(info.tagGPR() == target || info.payloadGPR() == target);
- if (node.hasConstant()) {
- JSValue v = valueOfJSConstant(nodeIndex);
- m_jit.move(info.tagGPR() == target ? Imm32(v.tag()) : Imm32(v.payload()), target);
- } else if (info.payloadGPR() == target)
- m_jit.load32(JITCompiler::payloadFor(spillMe), target);
- else { // Fill the Tag
- switch (info.spillFormat()) {
- case DataFormatInteger:
+ fillAction = Load32Payload;
+#endif
+ }
+ } else if (registerFormat == DataFormatStorage) {
+ ASSERT(info.gpr() == source);
+ fillAction = LoadPtr;
+ } else {
+ ASSERT(registerFormat & DataFormatJS);
+#if USE(JSVALUE64)
+ ASSERT(info.gpr() == source);
+ if (node.hasConstant()) {
+ if (valueOfJSConstant(nodeIndex).isCell())
+ fillAction = SetTrustedJSConstant;
+ else
+ fillAction = SetJSConstant;
+ } else if (info.spillFormat() == DataFormatInteger) {
ASSERT(registerFormat == DataFormatJSInteger);
- m_jit.move(TrustedImm32(JSValue::Int32Tag), target);
- break;
- case DataFormatCell:
- ASSERT(registerFormat == DataFormatJSCell);
- m_jit.move(TrustedImm32(JSValue::CellTag), target);
- break;
- case DataFormatBoolean:
- ASSERT(registerFormat == DataFormatJSBoolean);
- m_jit.move(TrustedImm32(JSValue::BooleanTag), target);
- break;
- default:
- m_jit.load32(JITCompiler::tagFor(spillMe), target);
- break;
+ fillAction = Load32PayloadBoxInt;
+ } else if (info.spillFormat() == DataFormatDouble) {
+ ASSERT(registerFormat == DataFormatJSDouble);
+ fillAction = LoadDoubleBoxDouble;
+ } else
+ fillAction = LoadPtr;
+#else
+ ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
+ if (node.hasConstant())
+ fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
+ else if (info.payloadGPR() == source)
+ fillAction = Load32Payload;
+ else { // Fill the Tag
+ switch (info.spillFormat()) {
+ case DataFormatInteger:
+ ASSERT(registerFormat == DataFormatJSInteger);
+ fillAction = SetInt32Tag;
+ break;
+ case DataFormatCell:
+ ASSERT(registerFormat == DataFormatJSCell);
+ fillAction = SetCellTag;
+ break;
+ case DataFormatBoolean:
+ ASSERT(registerFormat == DataFormatJSBoolean);
+ fillAction = SetBooleanTag;
+ break;
+ default:
+ fillAction = Load32Tag;
+ break;
+ }
}
- }
#endif
+ }
+
+ return SilentRegisterSavePlan(spillAction, fillAction, nodeIndex, source);
}
-
- void silentFillFPR(VirtualRegister spillMe, GPRReg canTrample, FPRReg target)
+
+ SilentRegisterSavePlan silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
{
GenerationInfo& info = m_generationInfo[spillMe];
- ASSERT(info.fpr() == target);
-
NodeIndex nodeIndex = info.nodeIndex();
Node& node = at(nodeIndex);
-#if USE(JSVALUE64)
ASSERT(info.registerFormat() == DataFormatDouble);
- if (node.hasConstant()) {
- ASSERT(isNumberConstant(nodeIndex));
- m_jit.move(ImmPtr(bitwise_cast<void*>(valueOfNumberConstant(nodeIndex))), canTrample);
- m_jit.movePtrToDouble(canTrample, target);
- return;
+ SilentSpillAction spillAction;
+ SilentFillAction fillAction;
+
+ if (!info.needsSpill())
+ spillAction = DoNothingForSpill;
+ else {
+ ASSERT(!at(info.nodeIndex()).hasConstant());
+ ASSERT(info.spillFormat() == DataFormatNone);
+ ASSERT(info.fpr() == source);
+ spillAction = StoreDouble;
}
- if (info.spillFormat() != DataFormatNone && info.spillFormat() != DataFormatDouble) {
+#if USE(JSVALUE64)
+ if (node.hasConstant()) {
+ ASSERT(isNumberConstant(nodeIndex));
+ fillAction = SetDoubleConstant;
+ } else if (info.spillFormat() != DataFormatNone && info.spillFormat() != DataFormatDouble) {
// it was already spilled previously and not as a double, which means we need unboxing.
ASSERT(info.spillFormat() & DataFormatJS);
- m_jit.loadPtr(JITCompiler::addressFor(spillMe), canTrample);
- unboxDouble(canTrample, target);
- return;
- }
-
- m_jit.loadDouble(JITCompiler::addressFor(spillMe), target);
+ fillAction = LoadJSUnboxDouble;
+ } else
+ fillAction = LoadDouble;
#elif USE(JSVALUE32_64)
- UNUSED_PARAM(canTrample);
ASSERT(info.registerFormat() == DataFormatDouble || info.registerFormat() == DataFormatJSDouble);
if (node.hasConstant()) {
ASSERT(isNumberConstant(nodeIndex));
- m_jit.loadDouble(addressOfDoubleConstant(nodeIndex), target);
+ fillAction = SetDoubleConstant;
} else
- m_jit.loadDouble(JITCompiler::addressFor(spillMe), target);
+ fillAction = LoadDouble;
#endif
- }
- void silentSpillAllRegisters(GPRReg exclude, GPRReg exclude2 = InvalidGPRReg)
+ return SilentRegisterSavePlan(spillAction, fillAction, nodeIndex, source);
+ }
+
+ void silentSpill(const SilentRegisterSavePlan& plan)
{
- for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
- GPRReg gpr = iter.regID();
- if (iter.name() != InvalidVirtualRegister && gpr != exclude && gpr != exclude2)
- silentSpillGPR(iter.name(), gpr);
+ switch (plan.spillAction()) {
+ case DoNothingForSpill:
+ break;
+ case Store32Tag:
+ m_jit.store32(plan.gpr(), JITCompiler::tagFor(at(plan.nodeIndex()).virtualRegister()));
+ break;
+ case Store32Payload:
+ m_jit.store32(plan.gpr(), JITCompiler::payloadFor(at(plan.nodeIndex()).virtualRegister()));
+ break;
+ case StorePtr:
+ m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(at(plan.nodeIndex()).virtualRegister()));
+ break;
+ case StoreDouble:
+ m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(at(plan.nodeIndex()).virtualRegister()));
+ break;
+ default:
+ ASSERT_NOT_REACHED();
}
- for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
- if (iter.name() != InvalidVirtualRegister)
- silentSpillFPR(iter.name(), iter.regID());
+ }
+
+ void silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
+ {
+#if USE(JSVALUE32_64)
+ UNUSED_PARAM(canTrample);
+#endif
+ switch (plan.fillAction()) {
+ case DoNothingForFill:
+ break;
+ case SetInt32Constant:
+ m_jit.move(Imm32(valueOfInt32Constant(plan.nodeIndex())), plan.gpr());
+ break;
+ case SetBooleanConstant:
+ m_jit.move(TrustedImm32(valueOfBooleanConstant(plan.nodeIndex())), plan.gpr());
+ break;
+ case SetCellConstant:
+ m_jit.move(TrustedImmPtr(valueOfJSConstant(plan.nodeIndex()).asCell()), plan.gpr());
+ break;
+#if USE(JSVALUE64)
+ case SetTrustedJSConstant:
+ m_jit.move(valueOfJSConstantAsImmPtr(plan.nodeIndex()).asTrustedImmPtr(), plan.gpr());
+ break;
+ case SetJSConstant:
+ m_jit.move(valueOfJSConstantAsImmPtr(plan.nodeIndex()), plan.gpr());
+ break;
+ case SetDoubleConstant:
+ m_jit.move(ImmPtr(bitwise_cast<void*>(valueOfNumberConstant(plan.nodeIndex()))), canTrample);
+ m_jit.movePtrToDouble(canTrample, plan.fpr());
+ break;
+ case Load32PayloadBoxInt:
+ m_jit.load32(JITCompiler::payloadFor(at(plan.nodeIndex()).virtualRegister()), plan.gpr());
+ m_jit.orPtr(GPRInfo::tagTypeNumberRegister, plan.gpr());
+ break;
+ case LoadDoubleBoxDouble:
+ m_jit.loadPtr(JITCompiler::addressFor(at(plan.nodeIndex()).virtualRegister()), plan.gpr());
+ m_jit.subPtr(GPRInfo::tagTypeNumberRegister, plan.gpr());
+ break;
+ case LoadJSUnboxDouble:
+ m_jit.loadPtr(JITCompiler::addressFor(at(plan.nodeIndex()).virtualRegister()), canTrample);
+ unboxDouble(canTrample, plan.fpr());
+ break;
+#else
+ case SetJSConstantTag:
+ m_jit.move(Imm32(valueOfJSConstant(plan.nodeIndex()).tag()), plan.gpr());
+ break;
+ case SetJSConstantPayload:
+ m_jit.move(Imm32(valueOfJSConstant(plan.nodeIndex()).payload()), plan.gpr());
+ break;
+ case SetInt32Tag:
+ m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
+ break;
+ case SetCellTag:
+ m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
+ break;
+ case SetBooleanTag:
+ m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
+ break;
+ case SetDoubleConstant:
+ m_jit.loadDouble(addressOfDoubleConstant(plan.nodeIndex()), plan.fpr());
+ break;
+#endif
+ case Load32Tag:
+ m_jit.load32(JITCompiler::tagFor(at(plan.nodeIndex()).virtualRegister()), plan.gpr());
+ break;
+ case Load32Payload:
+ m_jit.load32(JITCompiler::payloadFor(at(plan.nodeIndex()).virtualRegister()), plan.gpr());
+ break;
+ case LoadPtr:
+ m_jit.loadPtr(JITCompiler::addressFor(at(plan.nodeIndex()).virtualRegister()), plan.gpr());
+ break;
+ case LoadDouble:
+ m_jit.loadDouble(JITCompiler::addressFor(at(plan.nodeIndex()).virtualRegister()), plan.fpr());
+ break;
+ default:
+ ASSERT_NOT_REACHED();
}
}
- void silentSpillAllRegisters(FPRReg exclude)
+
+ template<typename CollectionType>
+ void silentSpillAllRegistersImpl(bool doSpill, CollectionType& plans, GPRReg exclude, GPRReg exclude2 = InvalidGPRReg, FPRReg fprExclude = InvalidFPRReg)
{
+ ASSERT(plans.isEmpty());
for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
- if (iter.name() != InvalidVirtualRegister)
- silentSpillGPR(iter.name(), iter.regID());
+ GPRReg gpr = iter.regID();
+ if (iter.name() != InvalidVirtualRegister && gpr != exclude && gpr != exclude2) {
+ SilentRegisterSavePlan plan = silentSavePlanForGPR(iter.name(), gpr);
+ if (doSpill)
+ silentSpill(plan);
+ plans.append(plan);
+ }
}
for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
- FPRReg fpr = iter.regID();
- if (iter.name() != InvalidVirtualRegister && fpr != exclude)
- silentSpillFPR(iter.name(), fpr);
+ if (iter.name() != InvalidVirtualRegister && iter.regID() != fprExclude) {
+ SilentRegisterSavePlan plan = silentSavePlanForFPR(iter.name(), iter.regID());
+ if (doSpill)
+ silentSpill(plan);
+ plans.append(plan);
+ }
}
}
-
- void silentFillAllRegisters(GPRReg exclude, GPRReg exclude2 = InvalidGPRReg)
+ template<typename CollectionType>
+ void silentSpillAllRegistersImpl(bool doSpill, CollectionType& plans, NoResultTag)
{
- GPRReg canTrample = GPRInfo::regT0;
- if (exclude == GPRInfo::regT0)
- canTrample = GPRInfo::regT1;
-
- for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
- if (iter.name() != InvalidVirtualRegister)
- silentFillFPR(iter.name(), canTrample, iter.regID());
- }
- for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
- GPRReg gpr = iter.regID();
- if (iter.name() != InvalidVirtualRegister && gpr != exclude && gpr != exclude2)
- silentFillGPR(iter.name(), gpr);
+ silentSpillAllRegistersImpl(doSpill, plans, InvalidGPRReg, InvalidGPRReg, InvalidFPRReg);
+ }
+ template<typename CollectionType>
+ void silentSpillAllRegistersImpl(bool doSpill, CollectionType& plans, FPRReg exclude)
+ {
+ silentSpillAllRegistersImpl(doSpill, plans, InvalidGPRReg, InvalidGPRReg, exclude);
+ }
+#if USE(JSVALUE32_64)
+ template<typename CollectionType>
+ void silentSpillAllRegistersImpl(bool doSpill, CollectionType& plans, JSValueRegs exclude)
+ {
+ silentSpillAllRegistersImpl(doSpill, plans, exclude.tagGPR(), exclude.payloadGPR());
+ }
+#endif
+
+ void silentSpillAllRegisters(GPRReg exclude, GPRReg exclude2 = InvalidGPRReg, FPRReg fprExclude = InvalidFPRReg)
+ {
+ silentSpillAllRegistersImpl(true, m_plans, exclude, exclude2, fprExclude);
+ }
+ void silentSpillAllRegisters(FPRReg exclude)
+ {
+ silentSpillAllRegisters(InvalidGPRReg, InvalidGPRReg, exclude);
+ }
+
+ static GPRReg pickCanTrample(GPRReg exclude)
+ {
+ GPRReg result = GPRInfo::regT0;
+ if (result == exclude)
+ result = GPRInfo::regT1;
+ return result;
+ }
+ static GPRReg pickCanTrample(FPRReg)
+ {
+ return GPRInfo::regT0;
+ }
+ static GPRReg pickCanTrample(NoResultTag)
+ {
+ return GPRInfo::regT0;
+ }
+
+#if USE(JSVALUE32_64)
+ static GPRReg pickCanTrample(JSValueRegs exclude)
+ {
+ GPRReg result = GPRInfo::regT0;
+ if (result == exclude.tagGPR()) {
+ result = GPRInfo::regT1;
+ if (result == exclude.payloadGPR())
+ result = GPRInfo::regT2;
+ } else if (result == exclude.payloadGPR()) {
+ result = GPRInfo::regT1;
+ if (result == exclude.tagGPR())
+ result = GPRInfo::regT2;
}
+ return result;
}
- void silentFillAllRegisters(FPRReg exclude)
+#endif
+
+ template<typename RegisterType>
+ void silentFillAllRegisters(RegisterType exclude)
{
- GPRReg canTrample = GPRInfo::regT0;
+ GPRReg canTrample = pickCanTrample(exclude);
- for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
- FPRReg fpr = iter.regID();
- if (iter.name() != InvalidVirtualRegister && fpr != exclude)
- silentFillFPR(iter.name(), canTrample, fpr);
- }
- for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
- if (iter.name() != InvalidVirtualRegister)
- silentFillGPR(iter.name(), iter.regID());
+ while (!m_plans.isEmpty()) {
+ SilentRegisterSavePlan& plan = m_plans.last();
+ silentFill(plan, canTrample);
+ m_plans.removeLast();
}
}
@@ -887,12 +1018,11 @@ private:
void nonSpeculativeValueToInt32(Node&);
void nonSpeculativeUInt32ToNumber(Node&);
- enum SpillRegistersMode { NeedToSpill, DontSpill };
#if USE(JSVALUE64)
- JITCompiler::Call cachedGetById(CodeOrigin, GPRReg baseGPR, GPRReg resultGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), SpillRegistersMode = NeedToSpill);
+ void cachedGetById(CodeOrigin, GPRReg baseGPR, GPRReg resultGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), SpillRegistersMode = NeedToSpill);
void cachedPutById(CodeOrigin, GPRReg base, GPRReg value, Edge valueUse, GPRReg scratchGPR, unsigned identifierNumber, PutKind, JITCompiler::Jump slowPathTarget = JITCompiler::Jump());
#elif USE(JSVALUE32_64)
- JITCompiler::Call cachedGetById(CodeOrigin, GPRReg baseTagGPROrNone, GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), SpillRegistersMode = NeedToSpill);
+ void cachedGetById(CodeOrigin, GPRReg baseTagGPROrNone, GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), SpillRegistersMode = NeedToSpill);
void cachedPutById(CodeOrigin, GPRReg basePayloadGPR, GPRReg valueTagGPR, GPRReg valuePayloadGPR, Edge valueUse, GPRReg scratchGPR, unsigned identifierNumber, PutKind, JITCompiler::Jump slowPathTarget = JITCompiler::Jump());
#endif
@@ -1082,6 +1212,11 @@ private:
// machine registers, and delegate the calling convention specific
// decision as to how to fill the regsiters to setupArguments* methods.
#if USE(JSVALUE64)
+ JITCompiler::Call callOperation(J_DFGOperation_E operation, GPRReg result)
+ {
+ m_jit.setupArgumentsExecState();
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
JITCompiler::Call callOperation(J_DFGOperation_EP operation, GPRReg result, void* pointer)
{
m_jit.setupArgumentsWithExecState(TrustedImmPtr(pointer));
@@ -1144,6 +1279,21 @@ private:
m_jit.setupArgumentsWithExecState(arg1);
return appendCallWithExceptionCheckSetResult(operation, result);
}
+ JITCompiler::Call callOperation(J_DFGOperation_EZ operation, GPRReg result, GPRReg arg1)
+ {
+ m_jit.setupArgumentsWithExecState(arg1);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(J_DFGOperation_EZ operation, GPRReg result, int32_t arg1)
+ {
+ m_jit.setupArgumentsWithExecState(TrustedImm32(arg1));
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(J_DFGOperation_EZZ operation, GPRReg result, int32_t arg1, GPRReg arg2)
+ {
+ m_jit.setupArgumentsWithExecState(TrustedImm32(arg1), arg2);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
JITCompiler::Call callOperation(C_DFGOperation_E operation, GPRReg result)
{
m_jit.setupArgumentsExecState();
@@ -1164,6 +1314,11 @@ private:
m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(cell));
return appendCallWithExceptionCheckSetResult(operation, result);
}
+ JITCompiler::Call callOperation(C_DFGOperation_EIcf operation, GPRReg result, InlineCallFrame* inlineCallFrame)
+ {
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(inlineCallFrame));
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
JITCompiler::Call callOperation(S_DFGOperation_J operation, GPRReg result, GPRReg arg1)
{
m_jit.setupArguments(arg1);
@@ -1214,6 +1369,11 @@ private:
m_jit.setupArgumentsWithExecState(arg1);
return appendCallWithExceptionCheck(operation);
}
+ JITCompiler::Call callOperation(V_DFGOperation_ECIcf operation, GPRReg arg1, InlineCallFrame* arg2)
+ {
+ m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(arg2));
+ return appendCallWithExceptionCheck(operation);
+ }
JITCompiler::Call callOperation(V_DFGOperation_EJPP operation, GPRReg arg1, GPRReg arg2, void* pointer)
{
m_jit.setupArgumentsWithExecState(arg1, arg2, TrustedImmPtr(pointer));
@@ -1244,6 +1404,26 @@ private:
m_jit.setupArgumentsWithExecState(arg1, arg2, arg3);
return appendCallWithExceptionCheck(operation);
}
+ JITCompiler::Call callOperation(V_DFGOperation_ECZ operation, GPRReg arg1, int arg2)
+ {
+ m_jit.setupArgumentsWithExecState(arg1, TrustedImm32(arg2));
+ return appendCallWithExceptionCheck(operation);
+ }
+ template<typename FunctionType, typename ArgumentType1>
+ JITCompiler::Call callOperation(FunctionType operation, NoResultTag, ArgumentType1 arg1)
+ {
+ return callOperation(operation, arg1);
+ }
+ template<typename FunctionType, typename ArgumentType1, typename ArgumentType2>
+ JITCompiler::Call callOperation(FunctionType operation, NoResultTag, ArgumentType1 arg1, ArgumentType2 arg2)
+ {
+ return callOperation(operation, arg1, arg2);
+ }
+ template<typename FunctionType, typename ArgumentType1, typename ArgumentType2, typename ArgumentType3>
+ JITCompiler::Call callOperation(FunctionType operation, NoResultTag, ArgumentType1 arg1, ArgumentType2 arg2, ArgumentType3 arg3)
+ {
+ return callOperation(operation, arg1, arg2, arg3);
+ }
JITCompiler::Call callOperation(D_DFGOperation_EJ operation, FPRReg result, GPRReg arg1)
{
m_jit.setupArgumentsWithExecState(arg1);
@@ -1277,6 +1457,11 @@ private:
m_jit.zeroExtend32ToPtr(GPRInfo::returnValueGPR, result);
return call;
}
+ JITCompiler::Call callOperation(J_DFGOperation_E operation, GPRReg resultTag, GPRReg resultPayload)
+ {
+ m_jit.setupArgumentsExecState();
+ return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
+ }
JITCompiler::Call callOperation(J_DFGOperation_EP operation, GPRReg resultTag, GPRReg resultPayload, void* pointer)
{
m_jit.setupArgumentsWithExecState(TrustedImmPtr(pointer));
@@ -1352,6 +1537,21 @@ private:
m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag);
return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
}
+ JITCompiler::Call callOperation(J_DFGOperation_EZ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1)
+ {
+ m_jit.setupArgumentsWithExecState(arg1);
+ return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
+ }
+ JITCompiler::Call callOperation(J_DFGOperation_EZ operation, GPRReg resultTag, GPRReg resultPayload, int32_t arg1)
+ {
+ m_jit.setupArgumentsWithExecState(TrustedImm32(arg1));
+ return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
+ }
+ JITCompiler::Call callOperation(J_DFGOperation_EZZ operation, GPRReg resultTag, GPRReg resultPayload, int32_t arg1, GPRReg arg2)
+ {
+ m_jit.setupArgumentsWithExecState(TrustedImm32(arg1), arg2);
+ return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
+ }
JITCompiler::Call callOperation(C_DFGOperation_E operation, GPRReg result)
{
m_jit.setupArgumentsExecState();
@@ -1372,6 +1572,11 @@ private:
m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(cell));
return appendCallWithExceptionCheckSetResult(operation, result);
}
+ JITCompiler::Call callOperation(C_DFGOperation_EIcf operation, GPRReg result, InlineCallFrame* inlineCallFrame)
+ {
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(inlineCallFrame));
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
JITCompiler::Call callOperation(S_DFGOperation_J operation, GPRReg result, GPRReg arg1Tag, GPRReg arg1Payload)
{
m_jit.setupArguments(arg1Payload, arg1Tag);
@@ -1422,6 +1627,11 @@ private:
m_jit.setupArgumentsWithExecState(arg1);
return appendCallWithExceptionCheck(operation);
}
+ JITCompiler::Call callOperation(V_DFGOperation_ECIcf operation, GPRReg arg1, InlineCallFrame* inlineCallFrame)
+ {
+ m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(inlineCallFrame));
+ return appendCallWithExceptionCheck(operation);
+ }
JITCompiler::Call callOperation(V_DFGOperation_EJPP operation, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2, void* pointer)
{
m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, arg2, TrustedImmPtr(pointer));
@@ -1437,6 +1647,11 @@ private:
m_jit.setupArgumentsWithExecState(arg1, arg2Payload, arg2Tag, arg3Payload, arg3Tag);
return appendCallWithExceptionCheck(operation);
}
+ JITCompiler::Call callOperation(V_DFGOperation_ECZ operation, GPRReg arg1, int arg2)
+ {
+ m_jit.setupArgumentsWithExecState(arg1, TrustedImm32(arg2));
+ return appendCallWithExceptionCheck(operation);
+ }
JITCompiler::Call callOperation(V_DFGOperation_EPZJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3Tag, GPRReg arg3Payload)
{
m_jit.setupArgumentsWithExecState(arg1, arg2, EABI_32BIT_DUMMY_ARG arg3Payload, arg3Tag);
@@ -1447,6 +1662,26 @@ private:
m_jit.setupArgumentsWithExecState(arg1, arg2, EABI_32BIT_DUMMY_ARG arg3Payload, arg3Tag);
return appendCallWithExceptionCheck(operation);
}
+ template<typename FunctionType, typename ArgumentType1>
+ JITCompiler::Call callOperation(FunctionType operation, NoResultTag, ArgumentType1 arg1)
+ {
+ return callOperation(operation, arg1);
+ }
+ template<typename FunctionType, typename ArgumentType1, typename ArgumentType2>
+ JITCompiler::Call callOperation(FunctionType operation, NoResultTag, ArgumentType1 arg1, ArgumentType2 arg2)
+ {
+ return callOperation(operation, arg1, arg2);
+ }
+ template<typename FunctionType, typename ArgumentType1, typename ArgumentType2, typename ArgumentType3, typename ArgumentType4>
+ JITCompiler::Call callOperation(FunctionType operation, NoResultTag, ArgumentType1 arg1, ArgumentType2 arg2, ArgumentType3 arg3, ArgumentType4 arg4)
+ {
+ return callOperation(operation, arg1, arg2, arg3, arg4);
+ }
+ template<typename FunctionType, typename ArgumentType1, typename ArgumentType2, typename ArgumentType3, typename ArgumentType4, typename ArgumentType5>
+ JITCompiler::Call callOperation(FunctionType operation, NoResultTag, ArgumentType1 arg1, ArgumentType2 arg2, ArgumentType3 arg3, ArgumentType4 arg4, ArgumentType5 arg5)
+ {
+ return callOperation(operation, arg1, arg2, arg3, arg4, arg5);
+ }
JITCompiler::Call callOperation(D_DFGOperation_EJ operation, FPRReg result, GPRReg arg1Tag, GPRReg arg1Payload)
{
@@ -1466,10 +1701,56 @@ private:
}
#undef EABI_32BIT_DUMMY_ARG
-
+
+ template<typename FunctionType>
+ JITCompiler::Call callOperation(
+ FunctionType operation, JSValueRegs result)
+ {
+ return callOperation(operation, result.tagGPR(), result.payloadGPR());
+ }
+ template<typename FunctionType, typename ArgumentType1>
+ JITCompiler::Call callOperation(
+ FunctionType operation, JSValueRegs result, ArgumentType1 arg1)
+ {
+ return callOperation(operation, result.tagGPR(), result.payloadGPR(), arg1);
+ }
+ template<typename FunctionType, typename ArgumentType1, typename ArgumentType2>
+ JITCompiler::Call callOperation(
+ FunctionType operation, JSValueRegs result, ArgumentType1 arg1, ArgumentType2 arg2)
+ {
+ return callOperation(operation, result.tagGPR(), result.payloadGPR(), arg1, arg2);
+ }
+ template<
+ typename FunctionType, typename ArgumentType1, typename ArgumentType2,
+ typename ArgumentType3>
+ JITCompiler::Call callOperation(
+ FunctionType operation, JSValueRegs result, ArgumentType1 arg1, ArgumentType2 arg2,
+ ArgumentType3 arg3)
+ {
+ return callOperation(operation, result.tagGPR(), result.payloadGPR(), arg1, arg2, arg3);
+ }
+ template<
+ typename FunctionType, typename ArgumentType1, typename ArgumentType2,
+ typename ArgumentType3, typename ArgumentType4>
+ JITCompiler::Call callOperation(
+ FunctionType operation, JSValueRegs result, ArgumentType1 arg1, ArgumentType2 arg2,
+ ArgumentType3 arg3, ArgumentType4 arg4)
+ {
+ return callOperation(operation, result.tagGPR(), result.payloadGPR(), arg1, arg2, arg3, arg4);
+ }
+ template<
+ typename FunctionType, typename ArgumentType1, typename ArgumentType2,
+ typename ArgumentType3, typename ArgumentType4, typename ArgumentType5>
+ JITCompiler::Call callOperation(
+ FunctionType operation, JSValueRegs result, ArgumentType1 arg1, ArgumentType2 arg2,
+ ArgumentType3 arg3, ArgumentType4 arg4, ArgumentType5 arg5)
+ {
+ return callOperation(
+ operation, result.tagGPR(), result.payloadGPR(), arg1, arg2, arg3, arg4, arg5);
+ }
#endif
-#ifndef NDEBUG
+#if !defined(NDEBUG) && !CPU(ARM_THUMB2)
void prepareForExternalCall()
{
for (unsigned i = 0; i < sizeof(void*) / 4; i++)
@@ -1689,7 +1970,7 @@ private:
{
if (haveEdgeCodeToEmit(destination))
emitEdgeCode(destination);
- if (destination == m_block + 1
+ if (destination == nextBlock()
&& fallThroughMode == AtFallThroughPoint)
return;
addBranch(m_jit.jump(), destination);
@@ -1774,6 +2055,10 @@ private:
void compileGetCharCodeAt(Node&);
void compileGetByValOnString(Node&);
+
+ void compileGetByValOnArguments(Node&);
+ void compileGetArgumentsLength(Node&);
+
void compileValueToInt32(Node&);
void compileUInt32ToNumber(Node&);
void compileDoubleAsInt32(Node&);
@@ -1863,21 +2148,25 @@ private:
{
if (!m_compileOkay)
return;
+ ASSERT(at(m_compileIndex).canExit() || m_isCheckingArgumentTypes);
m_jit.codeBlock()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(nodeIndex), jumpToFail, this));
}
void speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
{
+ ASSERT(at(m_compileIndex).canExit() || m_isCheckingArgumentTypes);
speculationCheck(kind, jsValueSource, nodeUse.index(), jumpToFail);
}
// Add a set of speculation checks without additional recovery.
void speculationCheck(ExitKind kind, JSValueSource jsValueSource, NodeIndex nodeIndex, MacroAssembler::JumpList& jumpsToFail)
{
+ ASSERT(at(m_compileIndex).canExit() || m_isCheckingArgumentTypes);
Vector<MacroAssembler::Jump, 16> jumpVector = jumpsToFail.jumps();
for (unsigned i = 0; i < jumpVector.size(); ++i)
speculationCheck(kind, jsValueSource, nodeIndex, jumpVector[i]);
}
void speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::JumpList& jumpsToFail)
{
+ ASSERT(at(m_compileIndex).canExit() || m_isCheckingArgumentTypes);
speculationCheck(kind, jsValueSource, nodeUse.index(), jumpsToFail);
}
// Add a speculation check with additional recovery.
@@ -1885,15 +2174,18 @@ private:
{
if (!m_compileOkay)
return;
+ ASSERT(at(m_compileIndex).canExit() || m_isCheckingArgumentTypes);
m_jit.codeBlock()->appendSpeculationRecovery(recovery);
m_jit.codeBlock()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(nodeIndex), jumpToFail, this, m_jit.codeBlock()->numberOfSpeculationRecoveries()));
}
void speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
{
+ ASSERT(at(m_compileIndex).canExit() || m_isCheckingArgumentTypes);
speculationCheck(kind, jsValueSource, nodeUse.index(), jumpToFail, recovery);
}
void forwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, NodeIndex nodeIndex, MacroAssembler::Jump jumpToFail, const ValueRecovery& valueRecovery)
{
+ ASSERT(at(m_compileIndex).canExit() || m_isCheckingArgumentTypes);
speculationCheck(kind, jsValueSource, nodeIndex, jumpToFail);
unsigned setLocalIndexInBlock = m_indexInBlock + 1;
@@ -1925,6 +2217,7 @@ private:
}
void forwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, NodeIndex nodeIndex, MacroAssembler::JumpList& jumpsToFail, const ValueRecovery& valueRecovery)
{
+ ASSERT(at(m_compileIndex).canExit() || m_isCheckingArgumentTypes);
Vector<MacroAssembler::Jump, 16> jumpVector = jumpsToFail.jumps();
for (unsigned i = 0; i < jumpVector.size(); ++i)
forwardSpeculationCheck(kind, jsValueSource, nodeIndex, jumpVector[i], valueRecovery);
@@ -1933,6 +2226,7 @@ private:
// Called when we statically determine that a speculation will fail.
void terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, NodeIndex nodeIndex)
{
+ ASSERT(at(m_compileIndex).canExit() || m_isCheckingArgumentTypes);
#if DFG_ENABLE(DEBUG_VERBOSE)
dataLog("SpeculativeJIT was terminated.\n");
#endif
@@ -1943,6 +2237,7 @@ private:
}
void terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
{
+ ASSERT(at(m_compileIndex).canExit() || m_isCheckingArgumentTypes);
terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.index());
}
@@ -1985,8 +2280,8 @@ private:
return m_variables[operand];
}
- // The JIT, while also provides MacroAssembler functionality.
JITCompiler& m_jit;
+
// The current node being generated.
BlockIndex m_block;
NodeIndex m_compileIndex;
@@ -2018,6 +2313,11 @@ private:
AbstractState m_state;
+ bool m_isCheckingArgumentTypes;
+
+ Vector<SlowPathGenerator*, 8> m_slowPathGenerators; // doesn't use OwnPtr<> because I don't want to include DFGSlowPathGenerator.h
+ Vector<SilentRegisterSavePlan> m_plans;
+
ValueRecovery computeValueRecoveryFor(const ValueSource&);
ValueRecovery computeValueRecoveryFor(int operand)
@@ -2637,20 +2937,6 @@ private:
GPRReg m_gprOrInvalid;
};
-inline SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
- : m_compileOkay(true)
- , m_jit(jit)
- , m_compileIndex(0)
- , m_indexInBlock(0)
- , m_generationInfo(m_jit.codeBlock()->m_numCalleeRegisters)
- , m_blockHeads(jit.graph().m_blocks.size())
- , m_arguments(jit.codeBlock()->numParameters())
- , m_variables(jit.graph().m_localVars)
- , m_lastSetOperand(std::numeric_limits<int>::max())
- , m_state(m_jit.graph())
-{
-}
-
} } // namespace JSC::DFG
#endif
diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp
index 05c418d1e..637e335a3 100644
--- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp
+++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp
@@ -29,6 +29,8 @@
#if ENABLE(DFG_JIT)
+#include "DFGSlowPathGenerator.h"
+
namespace JSC { namespace DFG {
#if USE(JSVALUE32_64)
@@ -356,6 +358,33 @@ bool SpeculativeJIT::fillJSValue(NodeIndex nodeIndex, GPRReg& tagGPR, GPRReg& pa
return true;
}
+class ValueToNumberSlowPathGenerator
+ : public CallSlowPathGenerator<MacroAssembler::Jump, D_DFGOperation_EJ, JSValueRegs> {
+public:
+ ValueToNumberSlowPathGenerator(
+ MacroAssembler::Jump from, SpeculativeJIT* jit,
+ GPRReg resultTagGPR, GPRReg resultPayloadGPR, GPRReg jsValueTagGPR, GPRReg jsValuePayloadGPR)
+ : CallSlowPathGenerator<MacroAssembler::Jump, D_DFGOperation_EJ, JSValueRegs>(
+ from, jit, dfgConvertJSValueToNumber, NeedToSpill, JSValueRegs(resultTagGPR, resultPayloadGPR))
+ , m_jsValueTagGPR(jsValueTagGPR)
+ , m_jsValuePayloadGPR(jsValuePayloadGPR)
+ {
+ }
+
+protected:
+ virtual void generateInternal(SpeculativeJIT* jit)
+ {
+ setUp(jit);
+ recordCall(jit->callOperation(dfgConvertJSValueToNumber, FPRInfo::returnValueFPR, m_jsValueTagGPR, m_jsValuePayloadGPR));
+ jit->boxDouble(FPRInfo::returnValueFPR, m_result.tagGPR(), m_result.payloadGPR());
+ tearDown(jit);
+ }
+
+private:
+ GPRReg m_jsValueTagGPR;
+ GPRReg m_jsValuePayloadGPR;
+};
+
void SpeculativeJIT::nonSpeculativeValueToNumber(Node& node)
{
if (isKnownNumeric(node.child1().index())) {
@@ -391,23 +420,12 @@ void SpeculativeJIT::nonSpeculativeValueToNumber(Node& node)
JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
JITCompiler::Jump nonNumeric = m_jit.branch32(MacroAssembler::AboveOrEqual, tagGPR, TrustedImm32(JSValue::LowestTag));
- // First, if we get here we have a double encoded as a JSValue
- JITCompiler::Jump hasUnboxedDouble = m_jit.jump();
-
- // Next handle cells (& other JS immediates)
- nonNumeric.link(&m_jit);
- silentSpillAllRegisters(resultTagGPR, resultPayloadGPR);
- callOperation(dfgConvertJSValueToNumber, FPRInfo::returnValueFPR, tagGPR, payloadGPR);
- boxDouble(FPRInfo::returnValueFPR, resultTagGPR, resultPayloadGPR);
- silentFillAllRegisters(resultTagGPR, resultPayloadGPR);
- JITCompiler::Jump hasCalledToNumber = m_jit.jump();
-
- // Finally, handle integers.
isInteger.link(&m_jit);
- hasUnboxedDouble.link(&m_jit);
m_jit.move(tagGPR, resultTagGPR);
m_jit.move(payloadGPR, resultPayloadGPR);
- hasCalledToNumber.link(&m_jit);
+
+ addSlowPathGenerator(adoptPtr(new ValueToNumberSlowPathGenerator(nonNumeric, this, resultTagGPR, resultPayloadGPR, tagGPR, payloadGPR)));
+
jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly);
}
@@ -430,13 +448,10 @@ void SpeculativeJIT::nonSpeculativeValueToInt32(Node& node)
FPRReg fpr = op1.fpr();
GPRReg gpr = result.gpr();
op1.use();
- JITCompiler::Jump truncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateSuccessful);
+ JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
- silentSpillAllRegisters(gpr);
- callOperation(toInt32, gpr, fpr);
- silentFillAllRegisters(gpr);
+ addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, toInt32, gpr, fpr));
- truncatedToInteger.link(&m_jit);
integerResult(gpr, m_compileIndex, UseChildrenCalledExplicitly);
return;
}
@@ -448,19 +463,12 @@ void SpeculativeJIT::nonSpeculativeValueToInt32(Node& node)
GPRReg resultGPR = result.gpr();
op1.use();
- JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
+ JITCompiler::Jump isNotInteger = m_jit.branch32(MacroAssembler::NotEqual, tagGPR, TrustedImm32(JSValue::Int32Tag));
- // First handle non-integers
- silentSpillAllRegisters(resultGPR);
- callOperation(dfgConvertJSValueToInt32, GPRInfo::returnValueGPR, tagGPR, payloadGPR);
- m_jit.move(GPRInfo::returnValueGPR, resultGPR);
- silentFillAllRegisters(resultGPR);
- JITCompiler::Jump hasCalledToInt32 = m_jit.jump();
-
- // Then handle integers.
- isInteger.link(&m_jit);
m_jit.move(payloadGPR, resultGPR);
- hasCalledToInt32.link(&m_jit);
+
+ addSlowPathGenerator(slowPathCall(isNotInteger, this, dfgConvertJSValueToInt32, resultGPR, tagGPR, payloadGPR));
+
integerResult(resultGPR, m_compileIndex, UseChildrenCalledExplicitly);
}
@@ -491,7 +499,7 @@ void SpeculativeJIT::nonSpeculativeUInt32ToNumber(Node& node)
jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex);
}
-JITCompiler::Call SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin, GPRReg baseTagGPROrNone, GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode)
+void SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin, GPRReg baseTagGPROrNone, GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode)
{
JITCompiler::DataLabelPtr structureToCompare;
JITCompiler::PatchableJump structureCheck = m_jit.patchableBranchPtrWithPatch(JITCompiler::NotEqual, JITCompiler::Address(basePayloadGPR, JSCell::structureOffset()), structureToCompare, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(-1)));
@@ -500,32 +508,50 @@ JITCompiler::Call SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin, GPRReg ba
JITCompiler::DataLabelCompact tagLoadWithPatch = m_jit.load32WithCompactAddressOffsetPatch(JITCompiler::Address(resultPayloadGPR, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
JITCompiler::DataLabelCompact payloadLoadWithPatch = m_jit.load32WithCompactAddressOffsetPatch(JITCompiler::Address(resultPayloadGPR, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultPayloadGPR);
- JITCompiler::Jump done = m_jit.jump();
-
- structureCheck.m_jump.link(&m_jit);
-
- if (slowPathTarget.isSet())
- slowPathTarget.link(&m_jit);
-
- JITCompiler::Label slowCase = m_jit.label();
-
- if (spillMode == NeedToSpill)
- silentSpillAllRegisters(resultTagGPR, resultPayloadGPR);
- JITCompiler::Call functionCall;
- if (baseTagGPROrNone == InvalidGPRReg)
- functionCall = callOperation(operationGetByIdOptimize, resultTagGPR, resultPayloadGPR, JSValue::CellTag, basePayloadGPR, identifier(identifierNumber));
- else
- functionCall = callOperation(operationGetByIdOptimize, resultTagGPR, resultPayloadGPR, baseTagGPROrNone, basePayloadGPR, identifier(identifierNumber));
- if (spillMode == NeedToSpill)
- silentFillAllRegisters(resultTagGPR, resultPayloadGPR);
-
- done.link(&m_jit);
-
JITCompiler::Label doneLabel = m_jit.label();
- m_jit.addPropertyAccess(PropertyAccessRecord(codeOrigin, structureToCompare, functionCall, structureCheck, tagLoadWithPatch, payloadLoadWithPatch, slowCase, doneLabel, safeCast<int8_t>(basePayloadGPR), safeCast<int8_t>(resultTagGPR), safeCast<int8_t>(resultPayloadGPR), safeCast<int8_t>(scratchGPR), spillMode == NeedToSpill ? PropertyAccessRecord::RegistersInUse : PropertyAccessRecord::RegistersFlushed));
-
- return functionCall;
+ OwnPtr<SlowPathGenerator> slowPath;
+ if (baseTagGPROrNone == InvalidGPRReg) {
+ if (!slowPathTarget.isSet()) {
+ slowPath = slowPathCall(
+ structureCheck.m_jump, this, operationGetByIdOptimize,
+ JSValueRegs(resultTagGPR, resultPayloadGPR),
+ static_cast<int32_t>(JSValue::CellTag), basePayloadGPR,
+ identifier(identifierNumber));
+ } else {
+ JITCompiler::JumpList slowCases;
+ slowCases.append(structureCheck.m_jump);
+ slowCases.append(slowPathTarget);
+ slowPath = slowPathCall(
+ slowCases, this, operationGetByIdOptimize,
+ JSValueRegs(resultTagGPR, resultPayloadGPR),
+ static_cast<int32_t>(JSValue::CellTag), basePayloadGPR,
+ identifier(identifierNumber));
+ }
+ } else {
+ if (!slowPathTarget.isSet()) {
+ slowPath = slowPathCall(
+ structureCheck.m_jump, this, operationGetByIdOptimize,
+ JSValueRegs(resultTagGPR, resultPayloadGPR), baseTagGPROrNone, basePayloadGPR,
+ identifier(identifierNumber));
+ } else {
+ JITCompiler::JumpList slowCases;
+ slowCases.append(structureCheck.m_jump);
+ slowCases.append(slowPathTarget);
+ slowPath = slowPathCall(
+ slowCases, this, operationGetByIdOptimize,
+ JSValueRegs(resultTagGPR, resultPayloadGPR), baseTagGPROrNone, basePayloadGPR,
+ identifier(identifierNumber));
+ }
+ }
+ m_jit.addPropertyAccess(
+ PropertyAccessRecord(
+ codeOrigin, structureToCompare, structureCheck,
+ tagLoadWithPatch, payloadLoadWithPatch, slowPath.get(), doneLabel,
+ safeCast<int8_t>(basePayloadGPR), safeCast<int8_t>(resultTagGPR),
+ safeCast<int8_t>(resultPayloadGPR), safeCast<int8_t>(scratchGPR),
+ spillMode == NeedToSpill ? PropertyAccessRecord::RegistersInUse : PropertyAccessRecord::RegistersFlushed));
+ addSlowPathGenerator(slowPath.release());
}
void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg basePayloadGPR, GPRReg valueTagGPR, GPRReg valuePayloadGPR, Edge valueUse, GPRReg scratchGPR, unsigned identifierNumber, PutKind putKind, JITCompiler::Jump slowPathTarget)
@@ -539,16 +565,7 @@ void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg basePayloadGPR,
JITCompiler::DataLabel32 tagStoreWithPatch = m_jit.store32WithAddressOffsetPatch(valueTagGPR, JITCompiler::Address(scratchGPR, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
JITCompiler::DataLabel32 payloadStoreWithPatch = m_jit.store32WithAddressOffsetPatch(valuePayloadGPR, JITCompiler::Address(scratchGPR, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
- JITCompiler::Jump done = m_jit.jump();
-
- structureCheck.m_jump.link(&m_jit);
-
- if (slowPathTarget.isSet())
- slowPathTarget.link(&m_jit);
-
- JITCompiler::Label slowCase = m_jit.label();
-
- silentSpillAllRegisters(InvalidGPRReg);
+ JITCompiler::Label doneLabel = m_jit.label();
V_DFGOperation_EJCI optimizedCall;
if (m_jit.strictModeFor(at(m_compileIndex).codeOrigin)) {
if (putKind == Direct)
@@ -561,13 +578,28 @@ void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg basePayloadGPR,
else
optimizedCall = operationPutByIdNonStrictOptimize;
}
- JITCompiler::Call functionCall = callOperation(optimizedCall, valueTagGPR, valuePayloadGPR, basePayloadGPR, identifier(identifierNumber));
- silentFillAllRegisters(InvalidGPRReg);
-
- done.link(&m_jit);
- JITCompiler::Label doneLabel = m_jit.label();
-
- m_jit.addPropertyAccess(PropertyAccessRecord(codeOrigin, structureToCompare, functionCall, structureCheck, JITCompiler::DataLabelCompact(tagStoreWithPatch.label()), JITCompiler::DataLabelCompact(payloadStoreWithPatch.label()), slowCase, doneLabel, safeCast<int8_t>(basePayloadGPR), safeCast<int8_t>(valueTagGPR), safeCast<int8_t>(valuePayloadGPR), safeCast<int8_t>(scratchGPR)));
+ OwnPtr<SlowPathGenerator> slowPath;
+ if (!slowPathTarget.isSet()) {
+ slowPath = slowPathCall(
+ structureCheck.m_jump, this, optimizedCall, NoResult, valueTagGPR, valuePayloadGPR,
+ basePayloadGPR, identifier(identifierNumber));
+ } else {
+ JITCompiler::JumpList slowCases;
+ slowCases.append(structureCheck.m_jump);
+ slowCases.append(slowPathTarget);
+ slowPath = slowPathCall(
+ slowCases, this, optimizedCall, NoResult, valueTagGPR, valuePayloadGPR,
+ basePayloadGPR, identifier(identifierNumber));
+ }
+ m_jit.addPropertyAccess(
+ PropertyAccessRecord(
+ codeOrigin, structureToCompare, structureCheck,
+ JITCompiler::DataLabelCompact(tagStoreWithPatch.label()),
+ JITCompiler::DataLabelCompact(payloadStoreWithPatch.label()),
+ slowPath.get(), doneLabel, safeCast<int8_t>(basePayloadGPR),
+ safeCast<int8_t>(valueTagGPR), safeCast<int8_t>(valuePayloadGPR),
+ safeCast<int8_t>(scratchGPR)));
+ addSlowPathGenerator(slowPath.release());
}
void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNull(Edge operand, bool invert)
@@ -608,7 +640,7 @@ void SpeculativeJIT::nonSpeculativePeepholeBranchNull(Edge operand, NodeIndex br
BlockIndex taken = branchNode.takenBlockIndex();
BlockIndex notTaken = branchNode.notTakenBlockIndex();
- if (taken == (m_block + 1)) {
+ if (taken == nextBlock()) {
invert = !invert;
BlockIndex tmp = taken;
taken = notTaken;
@@ -677,7 +709,7 @@ void SpeculativeJIT::nonSpeculativePeepholeBranch(Node& node, NodeIndex branchNo
// The branch instruction will branch to the taken block.
// If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
- if (taken == (m_block + 1)) {
+ if (taken == nextBlock()) {
cond = JITCompiler::invert(cond);
callResultCondition = JITCompiler::Zero;
BlockIndex tmp = taken;
@@ -738,6 +770,42 @@ void SpeculativeJIT::nonSpeculativePeepholeBranch(Node& node, NodeIndex branchNo
m_compileIndex = branchNodeIndex;
}
+template<typename JumpType>
+class CompareAndBoxBooleanSlowPathGenerator
+ : public CallSlowPathGenerator<JumpType, S_DFGOperation_EJJ, GPRReg> {
+public:
+ CompareAndBoxBooleanSlowPathGenerator(
+ JumpType from, SpeculativeJIT* jit,
+ S_DFGOperation_EJJ function, GPRReg result, GPRReg arg1Tag, GPRReg arg1Payload,
+ GPRReg arg2Tag, GPRReg arg2Payload)
+ : CallSlowPathGenerator<JumpType, S_DFGOperation_EJJ, GPRReg>(
+ from, jit, function, NeedToSpill, result)
+ , m_arg1Tag(arg1Tag)
+ , m_arg1Payload(arg1Payload)
+ , m_arg2Tag(arg2Tag)
+ , m_arg2Payload(arg2Payload)
+ {
+ }
+
+protected:
+ virtual void generateInternal(SpeculativeJIT* jit)
+ {
+ this->setUp(jit);
+ this->recordCall(
+ jit->callOperation(
+ this->m_function, this->m_result, m_arg1Tag, m_arg1Payload, m_arg2Tag,
+ m_arg2Payload));
+ jit->m_jit.and32(JITCompiler::TrustedImm32(1), this->m_result);
+ this->tearDown(jit);
+ }
+
+private:
+ GPRReg m_arg1Tag;
+ GPRReg m_arg1Payload;
+ GPRReg m_arg2Tag;
+ GPRReg m_arg2Payload;
+};
+
void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node& node, MacroAssembler::RelationalCondition cond, S_DFGOperation_EJJ helperFunction)
{
JSValueOperand arg1(this, node.child1());
@@ -775,17 +843,10 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node& node, MacroAssembler
m_jit.compare32(cond, arg1PayloadGPR, arg2PayloadGPR, resultPayloadGPR);
if (!isKnownInteger(node.child1().index()) || !isKnownInteger(node.child2().index())) {
- JITCompiler::Jump haveResult = m_jit.jump();
-
- slowPath.link(&m_jit);
-
- silentSpillAllRegisters(resultPayloadGPR);
- callOperation(helperFunction, resultPayloadGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR);
- silentFillAllRegisters(resultPayloadGPR);
-
- m_jit.andPtr(TrustedImm32(1), resultPayloadGPR);
-
- haveResult.link(&m_jit);
+ addSlowPathGenerator(adoptPtr(
+ new CompareAndBoxBooleanSlowPathGenerator<JITCompiler::JumpList>(
+ slowPath, this, helperFunction, resultPayloadGPR, arg1TagGPR,
+ arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR)));
}
booleanResult(resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly);
@@ -800,7 +861,7 @@ void SpeculativeJIT::nonSpeculativePeepholeStrictEq(Node& node, NodeIndex branch
// The branch instruction will branch to the taken block.
// If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
- if (taken == (m_block + 1)) {
+ if (taken == nextBlock()) {
invert = !invert;
BlockIndex tmp = taken;
taken = notTaken;
@@ -861,6 +922,7 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeStrictEq(Node& node, bool invert)
if (isKnownCell(node.child1().index()) && isKnownCell(node.child2().index())) {
// see if we get lucky: if the arguments are cells and they reference the same
// cell, then they must be strictly equal.
+ // FIXME: this should flush registers instead of silent spill/fill.
JITCompiler::Jump notEqualCase = m_jit.branchPtr(JITCompiler::NotEqual, arg1PayloadGPR, arg2PayloadGPR);
m_jit.move(JITCompiler::TrustedImm32(!invert), resultPayloadGPR);
@@ -989,6 +1051,7 @@ GPRReg SpeculativeJIT::fillSpeculateIntInternal(NodeIndex nodeIndex, DataFormat&
return allocate();
}
+ PredictedType type = m_state.forNode(nodeIndex).m_type;
Node& node = at(nodeIndex);
VirtualRegister virtualRegister = node.virtualRegister();
GenerationInfo& info = m_generationInfo[virtualRegister];
@@ -1007,10 +1070,10 @@ GPRReg SpeculativeJIT::fillSpeculateIntInternal(NodeIndex nodeIndex, DataFormat&
}
DataFormat spillFormat = info.spillFormat();
- ASSERT((spillFormat & DataFormatJS) || spillFormat == DataFormatInteger);
+ ASSERT_UNUSED(spillFormat, (spillFormat & DataFormatJS) || spillFormat == DataFormatInteger);
// If we know this was spilled as an integer we can fill without checking.
- if (spillFormat != DataFormatJSInteger && spillFormat != DataFormatInteger)
+ if (!isInt32Prediction(type))
speculationCheck(BadType, JSValueSource(JITCompiler::addressFor(virtualRegister)), nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
GPRReg gpr = allocate();
@@ -1028,7 +1091,7 @@ GPRReg SpeculativeJIT::fillSpeculateIntInternal(NodeIndex nodeIndex, DataFormat&
GPRReg payloadGPR = info.payloadGPR();
m_gprs.lock(tagGPR);
m_gprs.lock(payloadGPR);
- if (info.registerFormat() != DataFormatJSInteger)
+ if (!isInt32Prediction(type))
speculationCheck(BadType, JSValueRegs(tagGPR, payloadGPR), nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, tagGPR, TrustedImm32(JSValue::Int32Tag)));
m_gprs.unlock(tagGPR);
m_gprs.release(tagGPR);
@@ -1084,6 +1147,7 @@ FPRReg SpeculativeJIT::fillSpeculateDouble(NodeIndex nodeIndex)
return fprAllocate();
}
+ PredictedType type = m_state.forNode(nodeIndex).m_type;
Node& node = at(nodeIndex);
VirtualRegister virtualRegister = node.virtualRegister();
GenerationInfo& info = m_generationInfo[virtualRegister];
@@ -1121,7 +1185,8 @@ FPRReg SpeculativeJIT::fillSpeculateDouble(NodeIndex nodeIndex)
if (spillFormat != DataFormatJSInteger && spillFormat != DataFormatInteger) {
JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag));
- speculationCheck(BadType, JSValueSource(JITCompiler::addressFor(virtualRegister)), nodeIndex, m_jit.branch32(MacroAssembler::AboveOrEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::LowestTag)));
+ if (!isNumberPrediction(type))
+ speculationCheck(BadType, JSValueSource(JITCompiler::addressFor(virtualRegister)), nodeIndex, m_jit.branch32(MacroAssembler::AboveOrEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::LowestTag)));
m_jit.loadDouble(JITCompiler::addressFor(virtualRegister), fpr);
hasUnboxedDouble = m_jit.jump();
@@ -1154,7 +1219,8 @@ FPRReg SpeculativeJIT::fillSpeculateDouble(NodeIndex nodeIndex)
if (info.registerFormat() != DataFormatJSInteger) {
FPRTemporary scratch(this);
JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
- speculationCheck(BadType, JSValueRegs(tagGPR, payloadGPR), nodeIndex, m_jit.branch32(MacroAssembler::AboveOrEqual, tagGPR, TrustedImm32(JSValue::LowestTag)));
+ if (!isNumberPrediction(type))
+ speculationCheck(BadType, JSValueRegs(tagGPR, payloadGPR), nodeIndex, m_jit.branch32(MacroAssembler::AboveOrEqual, tagGPR, TrustedImm32(JSValue::LowestTag)));
unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
hasUnboxedDouble = m_jit.jump();
isInteger.link(&m_jit);
@@ -1214,6 +1280,7 @@ GPRReg SpeculativeJIT::fillSpeculateCell(NodeIndex nodeIndex)
return allocate();
}
+ PredictedType type = m_state.forNode(nodeIndex).m_type;
Node& node = at(nodeIndex);
VirtualRegister virtualRegister = node.virtualRegister();
GenerationInfo& info = m_generationInfo[virtualRegister];
@@ -1232,7 +1299,7 @@ GPRReg SpeculativeJIT::fillSpeculateCell(NodeIndex nodeIndex)
}
ASSERT((info.spillFormat() & DataFormatJS) || info.spillFormat() == DataFormatCell);
- if (info.spillFormat() != DataFormatJSCell && info.spillFormat() != DataFormatCell)
+ if (!isCellPrediction(type))
speculationCheck(BadType, JSValueSource(JITCompiler::addressFor(virtualRegister)), nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
GPRReg gpr = allocate();
m_jit.load32(JITCompiler::payloadFor(virtualRegister), gpr);
@@ -1253,7 +1320,7 @@ GPRReg SpeculativeJIT::fillSpeculateCell(NodeIndex nodeIndex)
GPRReg payloadGPR = info.payloadGPR();
m_gprs.lock(tagGPR);
m_gprs.lock(payloadGPR);
- if (info.spillFormat() != DataFormatJSCell)
+ if (!isCellPrediction(type))
speculationCheck(BadType, JSValueRegs(tagGPR, payloadGPR), nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, tagGPR, TrustedImm32(JSValue::CellTag)));
m_gprs.unlock(tagGPR);
m_gprs.release(tagGPR);
@@ -1280,8 +1347,9 @@ GPRReg SpeculativeJIT::fillSpeculateCell(NodeIndex nodeIndex)
GPRReg SpeculativeJIT::fillSpeculateBoolean(NodeIndex nodeIndex)
{
#if DFG_ENABLE(DEBUG_VERBOSE)
- dataLog("SpecBool@%d ", nodeIndex);
+ dataLog("SpecBool@%d ", nodeIndex);
#endif
+ PredictedType type = m_state.forNode(nodeIndex).m_type;
Node& node = m_jit.graph()[nodeIndex];
VirtualRegister virtualRegister = node.virtualRegister();
GenerationInfo& info = m_generationInfo[virtualRegister];
@@ -1306,7 +1374,7 @@ GPRReg SpeculativeJIT::fillSpeculateBoolean(NodeIndex nodeIndex)
ASSERT((info.spillFormat() & DataFormatJS) || info.spillFormat() == DataFormatBoolean);
- if (info.spillFormat() != DataFormatJSBoolean && info.spillFormat() != DataFormatBoolean)
+ if (!isBooleanPrediction(type))
speculationCheck(BadType, JSValueSource(JITCompiler::addressFor(virtualRegister)), nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
GPRReg gpr = allocate();
@@ -1328,7 +1396,7 @@ GPRReg SpeculativeJIT::fillSpeculateBoolean(NodeIndex nodeIndex)
GPRReg payloadGPR = info.payloadGPR();
m_gprs.lock(tagGPR);
m_gprs.lock(payloadGPR);
- if (info.registerFormat() != DataFormatJSBoolean)
+ if (!isBooleanPrediction(type))
speculationCheck(BadType, JSValueRegs(tagGPR, payloadGPR), nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, tagGPR, TrustedImm32(JSValue::BooleanTag)));
m_gprs.unlock(tagGPR);
@@ -1658,17 +1726,15 @@ void SpeculativeJIT::compileLogicalNot(Node& node)
arg1.use();
- JITCompiler::Jump fastCase = m_jit.branch32(JITCompiler::Equal, arg1TagGPR, TrustedImm32(JSValue::BooleanTag));
-
- silentSpillAllRegisters(resultPayloadGPR);
- callOperation(dfgConvertJSValueToBoolean, resultPayloadGPR, arg1TagGPR, arg1PayloadGPR);
- silentFillAllRegisters(resultPayloadGPR);
- JITCompiler::Jump doNot = m_jit.jump();
-
- fastCase.link(&m_jit);
+ JITCompiler::Jump slowCase = m_jit.branch32(JITCompiler::NotEqual, arg1TagGPR, TrustedImm32(JSValue::BooleanTag));
+
m_jit.move(arg1PayloadGPR, resultPayloadGPR);
- doNot.link(&m_jit);
+ addSlowPathGenerator(
+ slowPathCall(
+ slowCase, this, dfgConvertJSValueToBoolean, resultPayloadGPR, arg1TagGPR,
+ arg1PayloadGPR));
+
m_jit.xor32(TrustedImm32(1), resultPayloadGPR);
booleanResult(resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly);
}
@@ -1709,7 +1775,7 @@ void SpeculativeJIT::emitBranch(Node& node)
SpeculateBooleanOperand value(this, node.child1());
MacroAssembler::ResultCondition condition = MacroAssembler::NonZero;
- if (taken == (m_block + 1)) {
+ if (taken == nextBlock()) {
condition = MacroAssembler::Zero;
BlockIndex tmp = taken;
taken = notTaken;
@@ -1728,7 +1794,7 @@ void SpeculativeJIT::emitBranch(Node& node)
if (at(node.child1()).shouldSpeculateInteger()) {
bool invert = false;
- if (taken == (m_block + 1)) {
+ if (taken == nextBlock()) {
invert = true;
BlockIndex tmp = taken;
taken = notTaken;
@@ -1795,12 +1861,19 @@ void SpeculativeJIT::compile(Node& node)
AbstractValue& value = block()->valuesAtHead.operand(node.local());
// If we have no prediction for this local, then don't attempt to compile.
- if (prediction == PredictNone || value.isClear()) {
+ if (prediction == PredictNone) {
terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
break;
}
- if (!m_jit.graph().isCaptured(node.local())) {
+ if (!node.variableAccessData()->isCaptured()) {
+ // If the CFA is tracking this variable and it found that the variable
+ // cannot have been assigned, then don't attempt to proceed.
+ if (value.isClear()) {
+ terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
+ break;
+ }
+
if (node.variableAccessData()->shouldUseDoubleFormat()) {
FPRTemporary result(this);
m_jit.loadDouble(JITCompiler::addressFor(node.local()), result.fpr());
@@ -1860,13 +1933,22 @@ void SpeculativeJIT::compile(Node& node)
DataFormat format;
if (isCellPrediction(value.m_type)
- && !m_jit.graph().isCaptured(node.local()))
+ && !node.variableAccessData()->isCaptured())
format = DataFormatJSCell;
else
format = DataFormatJS;
m_generationInfo[virtualRegister].initJSValue(m_compileIndex, node.refCount(), tag.gpr(), result.gpr(), format);
break;
}
+
+ case GetLocalUnlinked: {
+ GPRTemporary payload(this);
+ GPRTemporary tag(this);
+ m_jit.load32(JITCompiler::payloadFor(node.unlinkedLocal()), payload.gpr());
+ m_jit.load32(JITCompiler::tagFor(node.unlinkedLocal()), tag.gpr());
+ jsValueResult(tag.gpr(), payload.gpr(), m_compileIndex);
+ break;
+ }
case SetLocal: {
// SetLocal doubles as a hint as to where a node will be stored and
@@ -1905,7 +1987,7 @@ void SpeculativeJIT::compile(Node& node)
// OSR exit, would not be visible to the old JIT in any way.
m_codeOriginForOSR = nextNode->codeOrigin;
- if (!m_jit.graph().isCaptured(node.local())) {
+ if (!node.variableAccessData()->isCaptured()) {
if (node.variableAccessData()->shouldUseDoubleFormat()) {
SpeculateDoubleOperand value(this, node.child1());
m_jit.storeDouble(value.fpr(), JITCompiler::addressFor(node.local()));
@@ -2258,6 +2340,13 @@ void SpeculativeJIT::compile(Node& node)
jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex);
break;
}
+
+ if (at(node.child1()).shouldSpeculateArguments()) {
+ compileGetByValOnArguments(node);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
if (at(node.child1()).prediction() == PredictString) {
compileGetByValOnString(node);
@@ -2369,7 +2458,9 @@ void SpeculativeJIT::compile(Node& node)
break;
}
- if (!at(node.child2()).shouldSpeculateInteger() || !isActionableMutableArrayPrediction(at(node.child1()).prediction())) {
+ if (!at(node.child2()).shouldSpeculateInteger()
+ || !isActionableMutableArrayPrediction(at(node.child1()).prediction())
+ || at(node.child1()).shouldSpeculateArguments()) {
SpeculateCellOperand base(this, node.child1()); // Save a register, speculate cell. We'll probably be right.
JSValueOperand property(this, node.child2());
JSValueOperand value(this, node.child3());
@@ -2477,15 +2568,7 @@ void SpeculativeJIT::compile(Node& node)
property.use();
value.use();
- MacroAssembler::Jump withinArrayBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(baseReg, JSArray::vectorLengthOffset()));
-
- // Code to handle put beyond array bounds.
- silentSpillAllRegisters(scratchReg);
- callOperation(m_jit.codeBlock()->isStrictMode() ? operationPutByValBeyondArrayBoundsStrict : operationPutByValBeyondArrayBoundsNonStrict, baseReg, propertyReg, valueTagReg, valuePayloadReg);
- silentFillAllRegisters(scratchReg);
- JITCompiler::Jump wasBeyondArrayBounds = m_jit.jump();
-
- withinArrayBounds.link(&m_jit);
+ MacroAssembler::Jump beyondArrayBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(baseReg, JSArray::vectorLengthOffset()));
// Get the array storage.
GPRReg storageReg = scratchReg;
@@ -2507,8 +2590,12 @@ void SpeculativeJIT::compile(Node& node)
// Store the value to the array.
m_jit.store32(valueTagReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
m_jit.store32(valuePayloadReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
-
- wasBeyondArrayBounds.link(&m_jit);
+
+ addSlowPathGenerator(
+ slowPathCall(
+ beyondArrayBounds, this,
+ m_jit.codeBlock()->isStrictMode() ? operationPutByValBeyondArrayBoundsStrict : operationPutByValBeyondArrayBoundsNonStrict,
+ NoResult, baseReg, propertyReg, valueTagReg, valuePayloadReg));
noResult(m_compileIndex, UseChildrenCalledExplicitly);
break;
@@ -2694,15 +2781,7 @@ void SpeculativeJIT::compile(Node& node)
m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
m_jit.move(TrustedImm32(JSValue::Int32Tag), storageGPR);
- MacroAssembler::Jump done = m_jit.jump();
-
- slowPath.link(&m_jit);
-
- silentSpillAllRegisters(storageGPR, storageLengthGPR);
- callOperation(operationArrayPush, storageGPR, storageLengthGPR, valueTagGPR, valuePayloadGPR, baseGPR);
- silentFillAllRegisters(storageGPR, storageLengthGPR);
-
- done.link(&m_jit);
+ addSlowPathGenerator(slowPathCall(slowPath, this, operationArrayPush, JSValueRegs(storageGPR, storageLengthGPR), valueTagGPR, valuePayloadGPR, baseGPR));
jsValueResult(storageGPR, storageLengthGPR, m_compileIndex);
break;
@@ -2727,7 +2806,8 @@ void SpeculativeJIT::compile(Node& node)
m_jit.loadPtr(MacroAssembler::Address(baseGPR, JSArray::storageOffset()), storageGPR);
m_jit.load32(MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_length)), storageLengthGPR);
- MacroAssembler::Jump emptyArrayCase = m_jit.branchTest32(MacroAssembler::Zero, storageLengthGPR);
+ JITCompiler::JumpList setUndefinedCases;
+ setUndefinedCases.append(m_jit.branchTest32(MacroAssembler::Zero, storageLengthGPR));
m_jit.sub32(TrustedImm32(1), storageLengthGPR);
@@ -2738,30 +2818,23 @@ void SpeculativeJIT::compile(Node& node)
m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_length)));
- MacroAssembler::Jump holeCase = m_jit.branch32(MacroAssembler::Equal, TrustedImm32(JSValue::EmptyValueTag), valueTagGPR);
+ setUndefinedCases.append(m_jit.branch32(MacroAssembler::Equal, TrustedImm32(JSValue::EmptyValueTag), valueTagGPR));
m_jit.store32(TrustedImm32(JSValue::EmptyValueTag), MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
m_jit.sub32(TrustedImm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
- MacroAssembler::JumpList done;
-
- done.append(m_jit.jump());
-
- holeCase.link(&m_jit);
- emptyArrayCase.link(&m_jit);
- m_jit.move(MacroAssembler::TrustedImm32(jsUndefined().tag()), valueTagGPR);
- m_jit.move(MacroAssembler::TrustedImm32(jsUndefined().payload()), valuePayloadGPR);
- done.append(m_jit.jump());
-
- slowCase.link(&m_jit);
-
- silentSpillAllRegisters(valueTagGPR, valuePayloadGPR);
- callOperation(operationArrayPop, valueTagGPR, valuePayloadGPR, baseGPR);
- silentFillAllRegisters(valueTagGPR, valuePayloadGPR);
-
- done.link(&m_jit);
+ addSlowPathGenerator(
+ slowPathMove(
+ setUndefinedCases, this,
+ MacroAssembler::TrustedImm32(jsUndefined().tag()), valueTagGPR,
+ MacroAssembler::TrustedImm32(jsUndefined().payload()), valuePayloadGPR));
+ addSlowPathGenerator(
+ slowPathCall(
+ slowCase, this, operationArrayPop,
+ JSValueRegs(valueTagGPR, valuePayloadGPR), baseGPR));
+
jsValueResult(valueTagGPR, valuePayloadGPR, m_compileIndex);
break;
}
@@ -2782,7 +2855,7 @@ void SpeculativeJIT::compile(Node& node)
MacroAssembler::ResultCondition condition = MacroAssembler::NonZero;
- if (taken == (m_block + 1)) {
+ if (taken == nextBlock()) {
condition = MacroAssembler::Zero;
BlockIndex tmp = taken;
taken = notTaken;
@@ -2877,22 +2950,17 @@ void SpeculativeJIT::compile(Node& node)
m_jit.move(op1TagGPR, resultTagGPR);
m_jit.move(op1PayloadGPR, resultPayloadGPR);
} else {
- MacroAssembler::JumpList alreadyPrimitive;
-
- alreadyPrimitive.append(m_jit.branch32(MacroAssembler::NotEqual, op1TagGPR, TrustedImm32(JSValue::CellTag)));
- alreadyPrimitive.append(m_jit.branchPtr(MacroAssembler::Equal, MacroAssembler::Address(op1PayloadGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSString::s_info)));
-
- silentSpillAllRegisters(resultTagGPR, resultPayloadGPR);
- callOperation(operationToPrimitive, resultTagGPR, resultPayloadGPR, op1TagGPR, op1PayloadGPR);
- silentFillAllRegisters(resultTagGPR, resultPayloadGPR);
-
- MacroAssembler::Jump done = m_jit.jump();
+ MacroAssembler::Jump alreadyPrimitive = m_jit.branch32(MacroAssembler::NotEqual, op1TagGPR, TrustedImm32(JSValue::CellTag));
+ MacroAssembler::Jump notPrimitive = m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(op1PayloadGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSString::s_info));
alreadyPrimitive.link(&m_jit);
m_jit.move(op1TagGPR, resultTagGPR);
m_jit.move(op1PayloadGPR, resultPayloadGPR);
- done.link(&m_jit);
+ addSlowPathGenerator(
+ slowPathCall(
+ notPrimitive, this, operationToPrimitive,
+ JSValueRegs(resultTagGPR, resultPayloadGPR), op1TagGPR, op1PayloadGPR));
}
jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly);
@@ -2917,8 +2985,10 @@ void SpeculativeJIT::compile(Node& node)
// probably has the best balance of performance and sensibility in the sense
// that it does not increase the complexity of the DFG JIT just to make StrCat
// fast and pretty.
-
- EncodedJSValue* buffer = static_cast<EncodedJSValue*>(m_jit.globalData()->scratchBufferForSize(sizeof(EncodedJSValue) * node.numChildren()));
+
+ size_t scratchSize = sizeof(EncodedJSValue) * node.numChildren();
+ ScratchBuffer* scratchBuffer = m_jit.globalData()->scratchBufferForSize(scratchSize);
+ EncodedJSValue* buffer = scratchBuffer ? static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) : 0;
for (unsigned operandIdx = 0; operandIdx < node.numChildren(); ++operandIdx) {
JSValueOperand operand(this, m_jit.graph().m_varArgChildren[node.firstChild() + operandIdx]);
@@ -2931,11 +3001,26 @@ void SpeculativeJIT::compile(Node& node)
}
flushRegisters();
-
+
+ if (scratchSize) {
+ GPRTemporary scratch(this);
+
+ // Tell GC mark phase how much of the scratch buffer is active during call.
+ m_jit.move(TrustedImmPtr(scratchBuffer->activeLengthPtr()), scratch.gpr());
+ m_jit.storePtr(TrustedImmPtr(scratchSize), scratch.gpr());
+ }
+
GPRResult resultPayload(this);
GPRResult2 resultTag(this);
- callOperation(op == StrCat ? operationStrCat : operationNewArray, resultTag.gpr(), resultPayload.gpr(), buffer, node.numChildren());
+ callOperation(op == StrCat ? operationStrCat : operationNewArray, resultTag.gpr(), resultPayload.gpr(), static_cast<void *>(buffer), node.numChildren());
+
+ if (scratchSize) {
+ GPRTemporary scratch(this);
+
+ m_jit.move(TrustedImmPtr(scratchBuffer->activeLengthPtr()), scratch.gpr());
+ m_jit.storePtr(TrustedImmPtr(0), scratch.gpr());
+ }
// FIXME: make the callOperation above explicitly return a cell result, or jitAssert the tag is a cell tag.
cellResult(resultPayload.gpr(), m_compileIndex, UseChildrenCalledExplicitly);
@@ -3044,15 +3129,7 @@ void SpeculativeJIT::compile(Node& node)
emitAllocateJSFinalObject(scratchGPR, resultGPR, scratchGPR, slowPath);
- MacroAssembler::Jump done = m_jit.jump();
-
- slowPath.link(&m_jit);
-
- silentSpillAllRegisters(resultGPR);
- callOperation(operationCreateThis, resultGPR, calleeGPR);
- silentFillAllRegisters(resultGPR);
-
- done.link(&m_jit);
+ addSlowPathGenerator(slowPathCall(slowPath, this, operationCreateThis, resultGPR, calleeGPR));
cellResult(resultGPR, m_compileIndex);
break;
@@ -3069,15 +3146,7 @@ void SpeculativeJIT::compile(Node& node)
emitAllocateJSFinalObject(MacroAssembler::TrustedImmPtr(m_jit.globalObjectFor(node.codeOrigin)->emptyObjectStructure()), resultGPR, scratchGPR, slowPath);
- MacroAssembler::Jump done = m_jit.jump();
-
- slowPath.link(&m_jit);
-
- silentSpillAllRegisters(resultGPR);
- callOperation(operationNewObject, resultGPR);
- silentFillAllRegisters(resultGPR);
-
- done.link(&m_jit);
+ addSlowPathGenerator(slowPathCall(slowPath, this, operationNewObject, resultGPR));
cellResult(resultGPR, m_compileIndex);
break;
@@ -3261,6 +3330,11 @@ void SpeculativeJIT::compile(Node& node)
integerResult(resultGPR, m_compileIndex);
break;
}
+
+ case GetArgumentsLength: {
+ compileGetArgumentsLength(node);
+ break;
+ }
case GetStringLength: {
SpeculateCellOperand base(this, node.child1());
@@ -3323,7 +3397,9 @@ void SpeculativeJIT::compile(Node& node)
}
case CheckStructure: {
- if (m_state.forNode(node.child1()).m_structure.isSubsetOf(node.structureSet())) {
+ AbstractValue& value = m_state.forNode(node.child1());
+ if (value.m_structure.isSubsetOf(node.structureSet())
+ && isCellPrediction(value.m_type)) {
noResult(m_compileIndex);
break;
}
@@ -3411,9 +3487,9 @@ void SpeculativeJIT::compile(Node& node)
case PutByOffset: {
#if ENABLE(GGC) || ENABLE(WRITE_BARRIER_PROFILING)
- SpeculateCellOperand base(this, node.child1());
+ SpeculateCellOperand base(this, node.child2());
#endif
- StorageOperand storage(this, node.child2());
+ StorageOperand storage(this, node.child1());
JSValueOperand value(this, node.child3());
GPRReg storageGPR = storage.gpr();
@@ -3671,14 +3747,11 @@ void SpeculativeJIT::compile(Node& node)
m_jit.load32(JITCompiler::BaseIndex(resultPayloadGPR, resolveInfoGPR, JITCompiler::TimesEight, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
m_jit.load32(JITCompiler::BaseIndex(resultPayloadGPR, resolveInfoGPR, JITCompiler::TimesEight, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultPayloadGPR);
- JITCompiler::Jump wasFast = m_jit.jump();
-
- structuresNotMatch.link(&m_jit);
- silentSpillAllRegisters(resultTagGPR, resultPayloadGPR);
- callOperation(operationResolveGlobal, resultTagGPR, resultPayloadGPR, resolveInfoGPR, &m_jit.codeBlock()->identifier(data.identifierNumber));
- silentFillAllRegisters(resultTagGPR, resultPayloadGPR);
-
- wasFast.link(&m_jit);
+ addSlowPathGenerator(
+ slowPathCall(
+ structuresNotMatch, this, operationResolveGlobal,
+ JSValueRegs(resultTagGPR, resultPayloadGPR), resolveInfoGPR,
+ &m_jit.codeBlock()->identifier(data.identifierNumber)));
jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex);
break;
@@ -3694,36 +3767,260 @@ void SpeculativeJIT::compile(Node& node)
m_jit.move(valuePayloadGPR, resultGPR);
- JITCompiler::Jump alreadyCreated = m_jit.branch32(JITCompiler::NotEqual, valueTagGPR, TrustedImm32(JSValue::EmptyValueTag));
-
- silentSpillAllRegisters(resultGPR);
- callOperation(operationCreateActivation, resultGPR);
- silentFillAllRegisters(resultGPR);
+ JITCompiler::Jump notCreated = m_jit.branch32(JITCompiler::Equal, valueTagGPR, TrustedImm32(JSValue::EmptyValueTag));
- alreadyCreated.link(&m_jit);
+ addSlowPathGenerator(
+ slowPathCall(notCreated, this, operationCreateActivation, resultGPR));
cellResult(resultGPR, m_compileIndex);
break;
}
- case TearOffActivation: {
+ case CreateArguments: {
JSValueOperand value(this, node.child1());
+ GPRTemporary result(this, value, false);
GPRReg valueTagGPR = value.tagGPR();
GPRReg valuePayloadGPR = value.payloadGPR();
+ GPRReg resultGPR = result.gpr();
+
+ m_jit.move(valuePayloadGPR, resultGPR);
JITCompiler::Jump notCreated = m_jit.branch32(JITCompiler::Equal, valueTagGPR, TrustedImm32(JSValue::EmptyValueTag));
- silentSpillAllRegisters(InvalidGPRReg);
- callOperation(operationTearOffActivation, valuePayloadGPR);
- silentFillAllRegisters(InvalidGPRReg);
+ if (node.codeOrigin.inlineCallFrame) {
+ addSlowPathGenerator(
+ slowPathCall(
+ notCreated, this, operationCreateInlinedArguments, resultGPR,
+ node.codeOrigin.inlineCallFrame));
+ } else {
+ addSlowPathGenerator(
+ slowPathCall(notCreated, this, operationCreateArguments, resultGPR));
+ }
- notCreated.link(&m_jit);
+ cellResult(resultGPR, m_compileIndex);
+ break;
+ }
+
+ case TearOffActivation: {
+ JSValueOperand activationValue(this, node.child1());
+ JSValueOperand argumentsValue(this, node.child2());
+
+ GPRReg activationValueTagGPR = activationValue.tagGPR();
+ GPRReg activationValuePayloadGPR = activationValue.payloadGPR();
+ GPRReg argumentsValueTagGPR = argumentsValue.tagGPR();
+
+ JITCompiler::JumpList created;
+ created.append(m_jit.branch32(JITCompiler::NotEqual, activationValueTagGPR, TrustedImm32(JSValue::EmptyValueTag)));
+ created.append(m_jit.branch32(JITCompiler::NotEqual, argumentsValueTagGPR, TrustedImm32(JSValue::EmptyValueTag)));
+
+ addSlowPathGenerator(
+ slowPathCall(
+ created, this, operationTearOffActivation, NoResult, activationValuePayloadGPR,
+ static_cast<int32_t>(node.unmodifiedArgumentsRegister())));
+
+ noResult(m_compileIndex);
+ break;
+ }
+
+ case TearOffArguments: {
+ JSValueOperand argumentsValue(this, node.child1());
+ GPRReg argumentsValueTagGPR = argumentsValue.tagGPR();
+ GPRReg argumentsValuePayloadGPR = argumentsValue.payloadGPR();
+
+ JITCompiler::Jump created = m_jit.branch32(
+ JITCompiler::NotEqual, argumentsValueTagGPR, TrustedImm32(JSValue::EmptyValueTag));
+
+ if (node.codeOrigin.inlineCallFrame) {
+ addSlowPathGenerator(
+ slowPathCall(
+ created, this, operationTearOffInlinedArguments, NoResult,
+ argumentsValuePayloadGPR, node.codeOrigin.inlineCallFrame));
+ } else {
+ addSlowPathGenerator(
+ slowPathCall(
+ created, this, operationTearOffArguments, NoResult,
+ argumentsValuePayloadGPR));
+ }
noResult(m_compileIndex);
break;
}
+ case CheckArgumentsNotCreated: {
+ speculationCheck(
+ Uncountable, JSValueRegs(), NoNode,
+ m_jit.branch32(
+ JITCompiler::NotEqual,
+ JITCompiler::tagFor(m_jit.argumentsRegisterFor(node.codeOrigin)),
+ TrustedImm32(JSValue::EmptyValueTag)));
+ noResult(m_compileIndex);
+ break;
+ }
+
+ case GetMyArgumentsLength: {
+ GPRTemporary result(this);
+ GPRReg resultGPR = result.gpr();
+
+ speculationCheck(
+ ArgumentsEscaped, JSValueRegs(), NoNode,
+ m_jit.branch32(
+ JITCompiler::NotEqual,
+ JITCompiler::tagFor(m_jit.argumentsRegisterFor(node.codeOrigin)),
+ TrustedImm32(JSValue::EmptyValueTag)));
+
+ ASSERT(!node.codeOrigin.inlineCallFrame);
+ m_jit.load32(JITCompiler::payloadFor(RegisterFile::ArgumentCount), resultGPR);
+ m_jit.sub32(TrustedImm32(1), resultGPR);
+ integerResult(resultGPR, m_compileIndex);
+ break;
+ }
+
+ case GetMyArgumentsLengthSafe: {
+ GPRTemporary resultPayload(this);
+ GPRTemporary resultTag(this);
+ GPRReg resultPayloadGPR = resultPayload.gpr();
+ GPRReg resultTagGPR = resultTag.gpr();
+
+ JITCompiler::Jump created = m_jit.branch32(
+ JITCompiler::NotEqual,
+ JITCompiler::tagFor(m_jit.argumentsRegisterFor(node.codeOrigin)),
+ TrustedImm32(JSValue::EmptyValueTag));
+
+ if (node.codeOrigin.inlineCallFrame) {
+ m_jit.move(
+ Imm32(node.codeOrigin.inlineCallFrame->arguments.size() - 1),
+ resultPayloadGPR);
+ } else {
+ m_jit.load32(JITCompiler::payloadFor(RegisterFile::ArgumentCount), resultPayloadGPR);
+ m_jit.sub32(TrustedImm32(1), resultPayloadGPR);
+ }
+ m_jit.move(TrustedImm32(JSValue::Int32Tag), resultTagGPR);
+
+ // FIXME: the slow path generator should perform a forward speculation that the
+ // result is an integer. For now we postpone the speculation by having this return
+ // a JSValue.
+
+ addSlowPathGenerator(
+ slowPathCall(
+ created, this, operationGetArgumentsLength,
+ JSValueRegs(resultTagGPR, resultPayloadGPR),
+ m_jit.argumentsRegisterFor(node.codeOrigin)));
+
+ jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex);
+ break;
+ }
+
+ case GetMyArgumentByVal: {
+ SpeculateStrictInt32Operand index(this, node.child1());
+ GPRTemporary resultPayload(this);
+ GPRTemporary resultTag(this);
+ GPRReg indexGPR = index.gpr();
+ GPRReg resultPayloadGPR = resultPayload.gpr();
+ GPRReg resultTagGPR = resultTag.gpr();
+
+ speculationCheck(
+ ArgumentsEscaped, JSValueRegs(), NoNode,
+ m_jit.branch32(
+ JITCompiler::NotEqual,
+ JITCompiler::tagFor(m_jit.argumentsRegisterFor(node.codeOrigin)),
+ TrustedImm32(JSValue::EmptyValueTag)));
+
+ m_jit.add32(TrustedImm32(1), indexGPR, resultPayloadGPR);
+
+ if (node.codeOrigin.inlineCallFrame) {
+ speculationCheck(
+ Uncountable, JSValueRegs(), NoNode,
+ m_jit.branch32(
+ JITCompiler::AboveOrEqual,
+ resultPayloadGPR,
+ Imm32(node.codeOrigin.inlineCallFrame->arguments.size())));
+ } else {
+ speculationCheck(
+ Uncountable, JSValueRegs(), NoNode,
+ m_jit.branch32(
+ JITCompiler::AboveOrEqual,
+ resultPayloadGPR,
+ JITCompiler::payloadFor(RegisterFile::ArgumentCount)));
+ }
+
+ m_jit.neg32(resultPayloadGPR);
+
+ size_t baseOffset =
+ ((node.codeOrigin.inlineCallFrame
+ ? node.codeOrigin.inlineCallFrame->stackOffset
+ : 0) + CallFrame::argumentOffsetIncludingThis(0)) * sizeof(Register);
+ m_jit.load32(
+ JITCompiler::BaseIndex(
+ GPRInfo::callFrameRegister, resultPayloadGPR, JITCompiler::TimesEight,
+ baseOffset + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)),
+ resultTagGPR);
+ m_jit.load32(
+ JITCompiler::BaseIndex(
+ GPRInfo::callFrameRegister, resultPayloadGPR, JITCompiler::TimesEight,
+ baseOffset + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)),
+ resultPayloadGPR);
+
+ jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex);
+ break;
+ }
+ case GetMyArgumentByValSafe: {
+ SpeculateStrictInt32Operand index(this, node.child1());
+ GPRTemporary resultPayload(this);
+ GPRTemporary resultTag(this);
+ GPRReg indexGPR = index.gpr();
+ GPRReg resultPayloadGPR = resultPayload.gpr();
+ GPRReg resultTagGPR = resultTag.gpr();
+
+ JITCompiler::JumpList slowPath;
+ slowPath.append(
+ m_jit.branch32(
+ JITCompiler::NotEqual,
+ JITCompiler::tagFor(m_jit.argumentsRegisterFor(node.codeOrigin)),
+ TrustedImm32(JSValue::EmptyValueTag)));
+
+ m_jit.add32(TrustedImm32(1), indexGPR, resultPayloadGPR);
+ if (node.codeOrigin.inlineCallFrame) {
+ slowPath.append(
+ m_jit.branch32(
+ JITCompiler::AboveOrEqual,
+ resultPayloadGPR,
+ Imm32(node.codeOrigin.inlineCallFrame->arguments.size())));
+ } else {
+ slowPath.append(
+ m_jit.branch32(
+ JITCompiler::AboveOrEqual,
+ resultPayloadGPR,
+ JITCompiler::payloadFor(RegisterFile::ArgumentCount)));
+ }
+
+ m_jit.neg32(resultPayloadGPR);
+
+ size_t baseOffset =
+ ((node.codeOrigin.inlineCallFrame
+ ? node.codeOrigin.inlineCallFrame->stackOffset
+ : 0) + CallFrame::argumentOffsetIncludingThis(0)) * sizeof(Register);
+ m_jit.load32(
+ JITCompiler::BaseIndex(
+ GPRInfo::callFrameRegister, resultPayloadGPR, JITCompiler::TimesEight,
+ baseOffset + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)),
+ resultTagGPR);
+ m_jit.load32(
+ JITCompiler::BaseIndex(
+ GPRInfo::callFrameRegister, resultPayloadGPR, JITCompiler::TimesEight,
+ baseOffset + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)),
+ resultPayloadGPR);
+
+ addSlowPathGenerator(
+ slowPathCall(
+ slowPath, this, operationGetArgumentByVal,
+ JSValueRegs(resultTagGPR, resultPayloadGPR),
+ m_jit.argumentsRegisterFor(node.codeOrigin), indexGPR));
+
+ jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex);
+ break;
+ }
+
case NewFunctionNoCheck:
compileNewFunctionNoCheck(node);
break;
@@ -3738,14 +4035,12 @@ void SpeculativeJIT::compile(Node& node)
m_jit.move(valuePayloadGPR, resultGPR);
- JITCompiler::Jump alreadyCreated = m_jit.branch32(JITCompiler::NotEqual, valueTagGPR, TrustedImm32(JSValue::EmptyValueTag));
-
- silentSpillAllRegisters(resultGPR);
- callOperation(
- operationNewFunction, resultGPR, m_jit.codeBlock()->functionDecl(node.functionDeclIndex()));
- silentFillAllRegisters(resultGPR);
+ JITCompiler::Jump notCreated = m_jit.branch32(JITCompiler::Equal, valueTagGPR, TrustedImm32(JSValue::EmptyValueTag));
- alreadyCreated.link(&m_jit);
+ addSlowPathGenerator(
+ slowPathCall(
+ notCreated, this, operationNewFunction, resultGPR,
+ m_jit.codeBlock()->functionDecl(node.functionDeclIndex())));
cellResult(resultGPR, m_compileIndex);
break;
diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp
index 08e7d966d..543e2b913 100644
--- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp
+++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp
@@ -28,6 +28,9 @@
#if ENABLE(DFG_JIT)
+#include "Arguments.h"
+#include "DFGSlowPathGenerator.h"
+
namespace JSC { namespace DFG {
#if USE(JSVALUE64)
@@ -356,6 +359,31 @@ GPRReg SpeculativeJIT::fillJSValue(NodeIndex nodeIndex)
return InvalidGPRReg;
}
+class ValueToNumberSlowPathGenerator
+ : public CallSlowPathGenerator<MacroAssembler::Jump, D_DFGOperation_EJ, GPRReg> {
+public:
+ ValueToNumberSlowPathGenerator(
+ MacroAssembler::Jump from, SpeculativeJIT* jit,
+ GPRReg resultGPR, GPRReg jsValueGPR)
+ : CallSlowPathGenerator<MacroAssembler::Jump, D_DFGOperation_EJ, GPRReg>(
+ from, jit, dfgConvertJSValueToNumber, NeedToSpill, resultGPR)
+ , m_jsValueGPR(jsValueGPR)
+ {
+ }
+
+protected:
+ virtual void generateInternal(SpeculativeJIT* jit)
+ {
+ setUp(jit);
+ recordCall(jit->callOperation(dfgConvertJSValueToNumber, FPRInfo::returnValueFPR, m_jsValueGPR));
+ jit->boxDouble(FPRInfo::returnValueFPR, m_result);
+ tearDown(jit);
+ }
+
+private:
+ GPRReg m_jsValueGPR;
+};
+
void SpeculativeJIT::nonSpeculativeValueToNumber(Node& node)
{
if (isKnownNumeric(node.child1().index())) {
@@ -383,19 +411,12 @@ void SpeculativeJIT::nonSpeculativeValueToNumber(Node& node)
m_jit.move(jsValueGpr, gpr);
JITCompiler::Jump hasUnboxedDouble = m_jit.jump();
- // Next handle cells (& other JS immediates)
- nonNumeric.link(&m_jit);
- silentSpillAllRegisters(gpr);
- callOperation(dfgConvertJSValueToNumber, FPRInfo::returnValueFPR, jsValueGpr);
- boxDouble(FPRInfo::returnValueFPR, gpr);
- silentFillAllRegisters(gpr);
- JITCompiler::Jump hasCalledToNumber = m_jit.jump();
-
// Finally, handle integers.
isInteger.link(&m_jit);
m_jit.orPtr(GPRInfo::tagTypeNumberRegister, jsValueGpr, gpr);
hasUnboxedDouble.link(&m_jit);
- hasCalledToNumber.link(&m_jit);
+
+ addSlowPathGenerator(adoptPtr(new ValueToNumberSlowPathGenerator(nonNumeric, this, gpr, jsValueGpr)));
jsValueResult(result.gpr(), m_compileIndex, UseChildrenCalledExplicitly);
}
@@ -419,13 +440,11 @@ void SpeculativeJIT::nonSpeculativeValueToInt32(Node& node)
FPRReg fpr = op1.fpr();
GPRReg gpr = result.gpr();
op1.use();
- JITCompiler::Jump truncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateSuccessful);
+ JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
- silentSpillAllRegisters(gpr);
- callOperation(toInt32, gpr, fpr);
- silentFillAllRegisters(gpr);
-
- truncatedToInteger.link(&m_jit);
+ addSlowPathGenerator(
+ slowPathCall(notTruncatedToInteger, this, toInt32, gpr, fpr));
+
integerResult(gpr, m_compileIndex, UseChildrenCalledExplicitly);
return;
}
@@ -436,18 +455,13 @@ void SpeculativeJIT::nonSpeculativeValueToInt32(Node& node)
GPRReg resultGPR = result.gpr();
op1.use();
- JITCompiler::Jump isInteger = m_jit.branchPtr(MacroAssembler::AboveOrEqual, jsValueGpr, GPRInfo::tagTypeNumberRegister);
-
- // First handle non-integers
- silentSpillAllRegisters(resultGPR);
- callOperation(dfgConvertJSValueToInt32, resultGPR, jsValueGpr);
- silentFillAllRegisters(resultGPR);
- JITCompiler::Jump hasCalledToInt32 = m_jit.jump();
+ JITCompiler::Jump isNotInteger = m_jit.branchPtr(MacroAssembler::Below, jsValueGpr, GPRInfo::tagTypeNumberRegister);
- // Then handle integers.
- isInteger.link(&m_jit);
m_jit.zeroExtend32ToPtr(jsValueGpr, resultGPR);
- hasCalledToInt32.link(&m_jit);
+
+ addSlowPathGenerator(
+ slowPathCall(isNotInteger, this, dfgConvertJSValueToInt32, resultGPR, jsValueGpr));
+
integerResult(resultGPR, m_compileIndex, UseChildrenCalledExplicitly);
}
@@ -475,7 +489,7 @@ void SpeculativeJIT::nonSpeculativeUInt32ToNumber(Node& node)
jsValueResult(result.gpr(), m_compileIndex);
}
-JITCompiler::Call SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg resultGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode)
+void SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg resultGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode)
{
JITCompiler::DataLabelPtr structureToCompare;
JITCompiler::PatchableJump structureCheck = m_jit.patchableBranchPtrWithPatch(JITCompiler::NotEqual, JITCompiler::Address(baseGPR, JSCell::structureOffset()), structureToCompare, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(-1)));
@@ -483,31 +497,32 @@ JITCompiler::Call SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin, GPRReg ba
m_jit.loadPtr(JITCompiler::Address(baseGPR, JSObject::offsetOfPropertyStorage()), resultGPR);
JITCompiler::DataLabelCompact loadWithPatch = m_jit.loadPtrWithCompactAddressOffsetPatch(JITCompiler::Address(resultGPR, 0), resultGPR);
- JITCompiler::Jump done = m_jit.jump();
+ JITCompiler::Label doneLabel = m_jit.label();
- structureCheck.m_jump.link(&m_jit);
-
- if (slowPathTarget.isSet())
- slowPathTarget.link(&m_jit);
-
- JITCompiler::Label slowCase = m_jit.label();
+ OwnPtr<SlowPathGenerator> slowPath;
+ if (!slowPathTarget.isSet()) {
+ slowPath = slowPathCall(
+ structureCheck.m_jump, this, operationGetByIdOptimize, resultGPR, baseGPR,
+ identifier(identifierNumber), spillMode);
+ } else {
+ JITCompiler::JumpList slowCases;
+ slowCases.append(structureCheck.m_jump);
+ slowCases.append(slowPathTarget);
+ slowPath = slowPathCall(
+ slowCases, this, operationGetByIdOptimize, resultGPR, baseGPR,
+ identifier(identifierNumber), spillMode);
+ }
+ m_jit.addPropertyAccess(
+ PropertyAccessRecord(
+ codeOrigin, structureToCompare, structureCheck, loadWithPatch, slowPath.get(),
+ doneLabel, safeCast<int8_t>(baseGPR), safeCast<int8_t>(resultGPR),
+ safeCast<int8_t>(scratchGPR),
+ spillMode == NeedToSpill ? PropertyAccessRecord::RegistersInUse : PropertyAccessRecord::RegistersFlushed));
+ addSlowPathGenerator(slowPath.release());
- if (spillMode == NeedToSpill)
- silentSpillAllRegisters(resultGPR);
- JITCompiler::Call functionCall = callOperation(operationGetByIdOptimize, resultGPR, baseGPR, identifier(identifierNumber));
- if (spillMode == NeedToSpill)
- silentFillAllRegisters(resultGPR);
-
- done.link(&m_jit);
-
- JITCompiler::Label doneLabel = m_jit.label();
- m_jit.addPropertyAccess(PropertyAccessRecord(codeOrigin, structureToCompare, functionCall, structureCheck, loadWithPatch, slowCase, doneLabel, safeCast<int8_t>(baseGPR), safeCast<int8_t>(resultGPR), safeCast<int8_t>(scratchGPR), spillMode == NeedToSpill ? PropertyAccessRecord::RegistersInUse : PropertyAccessRecord::RegistersFlushed));
-
if (scratchGPR != resultGPR && scratchGPR != InvalidGPRReg && spillMode == NeedToSpill)
unlock(scratchGPR);
-
- return functionCall;
}
void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg valueGPR, Edge valueUse, GPRReg scratchGPR, unsigned identifierNumber, PutKind putKind, JITCompiler::Jump slowPathTarget)
@@ -521,16 +536,8 @@ void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg
m_jit.loadPtr(JITCompiler::Address(baseGPR, JSObject::offsetOfPropertyStorage()), scratchGPR);
JITCompiler::DataLabel32 storeWithPatch = m_jit.storePtrWithAddressOffsetPatch(valueGPR, JITCompiler::Address(scratchGPR, 0));
- JITCompiler::Jump done = m_jit.jump();
-
- structureCheck.m_jump.link(&m_jit);
-
- if (slowPathTarget.isSet())
- slowPathTarget.link(&m_jit);
-
- JITCompiler::Label slowCase = m_jit.label();
-
- silentSpillAllRegisters(InvalidGPRReg);
+ JITCompiler::Label doneLabel = m_jit.label();
+
V_DFGOperation_EJCI optimizedCall;
if (m_jit.strictModeFor(at(m_compileIndex).codeOrigin)) {
if (putKind == Direct)
@@ -543,13 +550,21 @@ void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg
else
optimizedCall = operationPutByIdNonStrictOptimize;
}
- JITCompiler::Call functionCall = callOperation(optimizedCall, valueGPR, baseGPR, identifier(identifierNumber));
- silentFillAllRegisters(InvalidGPRReg);
-
- done.link(&m_jit);
- JITCompiler::Label doneLabel = m_jit.label();
-
- m_jit.addPropertyAccess(PropertyAccessRecord(codeOrigin, structureToCompare, functionCall, structureCheck, JITCompiler::DataLabelCompact(storeWithPatch.label()), slowCase, doneLabel, safeCast<int8_t>(baseGPR), safeCast<int8_t>(valueGPR), safeCast<int8_t>(scratchGPR)));
+ OwnPtr<SlowPathGenerator> slowPath;
+ if (!slowPathTarget.isSet()) {
+ slowPath = slowPathCall(
+ structureCheck.m_jump, this, optimizedCall, NoResult, valueGPR, baseGPR,
+ identifier(identifierNumber));
+ } else {
+ JITCompiler::JumpList slowCases;
+ slowCases.append(structureCheck.m_jump);
+ slowCases.append(slowPathTarget);
+ slowPath = slowPathCall(
+ slowCases, this, optimizedCall, NoResult, valueGPR, baseGPR,
+ identifier(identifierNumber));
+ }
+ m_jit.addPropertyAccess(PropertyAccessRecord(codeOrigin, structureToCompare, structureCheck, JITCompiler::DataLabelCompact(storeWithPatch.label()), slowPath.get(), doneLabel, safeCast<int8_t>(baseGPR), safeCast<int8_t>(valueGPR), safeCast<int8_t>(scratchGPR)));
+ addSlowPathGenerator(slowPath.release());
}
void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNull(Edge operand, bool invert)
@@ -590,7 +605,7 @@ void SpeculativeJIT::nonSpeculativePeepholeBranchNull(Edge operand, NodeIndex br
BlockIndex taken = branchNode.takenBlockIndex();
BlockIndex notTaken = branchNode.notTakenBlockIndex();
- if (taken == (m_block + 1)) {
+ if (taken == nextBlock()) {
invert = !invert;
BlockIndex tmp = taken;
taken = notTaken;
@@ -657,7 +672,7 @@ void SpeculativeJIT::nonSpeculativePeepholeBranch(Node& node, NodeIndex branchNo
// The branch instruction will branch to the taken block.
// If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
- if (taken == (m_block + 1)) {
+ if (taken == nextBlock()) {
cond = JITCompiler::invert(cond);
callResultCondition = JITCompiler::Zero;
BlockIndex tmp = taken;
@@ -716,6 +731,35 @@ void SpeculativeJIT::nonSpeculativePeepholeBranch(Node& node, NodeIndex branchNo
m_compileIndex = branchNodeIndex;
}
+template<typename JumpType>
+class CompareAndBoxBooleanSlowPathGenerator
+ : public CallSlowPathGenerator<JumpType, S_DFGOperation_EJJ, GPRReg> {
+public:
+ CompareAndBoxBooleanSlowPathGenerator(
+ JumpType from, SpeculativeJIT* jit,
+ S_DFGOperation_EJJ function, GPRReg result, GPRReg arg1, GPRReg arg2)
+ : CallSlowPathGenerator<JumpType, S_DFGOperation_EJJ, GPRReg>(
+ from, jit, function, NeedToSpill, result)
+ , m_arg1(arg1)
+ , m_arg2(arg2)
+ {
+ }
+
+protected:
+ virtual void generateInternal(SpeculativeJIT* jit)
+ {
+ this->setUp(jit);
+ this->recordCall(jit->callOperation(this->m_function, this->m_result, m_arg1, m_arg2));
+ jit->m_jit.and32(JITCompiler::TrustedImm32(1), this->m_result);
+ jit->m_jit.or32(JITCompiler::TrustedImm32(ValueFalse), this->m_result);
+ this->tearDown(jit);
+ }
+
+private:
+ GPRReg m_arg1;
+ GPRReg m_arg2;
+};
+
void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node& node, MacroAssembler::RelationalCondition cond, S_DFGOperation_EJJ helperFunction)
{
JSValueOperand arg1(this, node.child1());
@@ -750,23 +794,14 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node& node, MacroAssembler
slowPath.append(m_jit.branchPtr(MacroAssembler::Below, arg2GPR, GPRInfo::tagTypeNumberRegister));
m_jit.compare32(cond, arg1GPR, arg2GPR, resultGPR);
-
- if (!isKnownInteger(node.child1().index()) || !isKnownInteger(node.child2().index())) {
- JITCompiler::Jump haveResult = m_jit.jump();
-
- slowPath.link(&m_jit);
-
- silentSpillAllRegisters(resultGPR);
- callOperation(helperFunction, resultGPR, arg1GPR, arg2GPR);
- silentFillAllRegisters(resultGPR);
-
- m_jit.andPtr(TrustedImm32(1), resultGPR);
-
- haveResult.link(&m_jit);
- }
-
m_jit.or32(TrustedImm32(ValueFalse), resultGPR);
+ if (!isKnownInteger(node.child1().index()) || !isKnownInteger(node.child2().index())) {
+ addSlowPathGenerator(adoptPtr(
+ new CompareAndBoxBooleanSlowPathGenerator<JITCompiler::JumpList>(
+ slowPath, this, helperFunction, resultGPR, arg1GPR, arg2GPR)));
+ }
+
jsValueResult(resultGPR, m_compileIndex, DataFormatJSBoolean, UseChildrenCalledExplicitly);
}
}
@@ -779,7 +814,7 @@ void SpeculativeJIT::nonSpeculativePeepholeStrictEq(Node& node, NodeIndex branch
// The branch instruction will branch to the taken block.
// If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
- if (taken == (m_block + 1)) {
+ if (taken == nextBlock()) {
invert = !invert;
BlockIndex tmp = taken;
taken = notTaken;
@@ -854,6 +889,7 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeStrictEq(Node& node, bool invert)
if (isKnownCell(node.child1().index()) && isKnownCell(node.child2().index())) {
// see if we get lucky: if the arguments are cells and they reference the same
// cell, then they must be strictly equal.
+ // FIXME: this should flush registers instead of silent spill/fill.
JITCompiler::Jump notEqualCase = m_jit.branchPtr(JITCompiler::NotEqual, arg1GPR, arg2GPR);
m_jit.move(JITCompiler::TrustedImmPtr(JSValue::encode(jsBoolean(!invert))), resultGPR);
@@ -873,41 +909,34 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeStrictEq(Node& node, bool invert)
} else {
m_jit.orPtr(arg1GPR, arg2GPR, resultGPR);
+ JITCompiler::JumpList slowPathCases;
+
JITCompiler::Jump twoCellsCase = m_jit.branchTestPtr(JITCompiler::Zero, resultGPR, GPRInfo::tagMaskRegister);
JITCompiler::Jump leftOK = m_jit.branchPtr(JITCompiler::AboveOrEqual, arg1GPR, GPRInfo::tagTypeNumberRegister);
- JITCompiler::Jump leftDouble = m_jit.branchTestPtr(JITCompiler::NonZero, arg1GPR, GPRInfo::tagTypeNumberRegister);
+ slowPathCases.append(m_jit.branchTestPtr(JITCompiler::NonZero, arg1GPR, GPRInfo::tagTypeNumberRegister));
leftOK.link(&m_jit);
JITCompiler::Jump rightOK = m_jit.branchPtr(JITCompiler::AboveOrEqual, arg2GPR, GPRInfo::tagTypeNumberRegister);
- JITCompiler::Jump rightDouble = m_jit.branchTestPtr(JITCompiler::NonZero, arg2GPR, GPRInfo::tagTypeNumberRegister);
+ slowPathCases.append(m_jit.branchTestPtr(JITCompiler::NonZero, arg2GPR, GPRInfo::tagTypeNumberRegister));
rightOK.link(&m_jit);
m_jit.comparePtr(invert ? JITCompiler::NotEqual : JITCompiler::Equal, arg1GPR, arg2GPR, resultGPR);
+ m_jit.or32(JITCompiler::TrustedImm32(ValueFalse), resultGPR);
- JITCompiler::Jump done1 = m_jit.jump();
+ JITCompiler::Jump done = m_jit.jump();
twoCellsCase.link(&m_jit);
- JITCompiler::Jump notEqualCase = m_jit.branchPtr(JITCompiler::NotEqual, arg1GPR, arg2GPR);
+ slowPathCases.append(m_jit.branchPtr(JITCompiler::NotEqual, arg1GPR, arg2GPR));
m_jit.move(JITCompiler::TrustedImmPtr(JSValue::encode(jsBoolean(!invert))), resultGPR);
- JITCompiler::Jump done2 = m_jit.jump();
+ addSlowPathGenerator(
+ adoptPtr(
+ new CompareAndBoxBooleanSlowPathGenerator<MacroAssembler::JumpList>(
+ slowPathCases, this, operationCompareStrictEq, resultGPR, arg1GPR,
+ arg2GPR)));
- leftDouble.link(&m_jit);
- rightDouble.link(&m_jit);
- notEqualCase.link(&m_jit);
-
- silentSpillAllRegisters(resultGPR);
- callOperation(operationCompareStrictEq, resultGPR, arg1GPR, arg2GPR);
- silentFillAllRegisters(resultGPR);
-
- m_jit.andPtr(JITCompiler::TrustedImm32(1), resultGPR);
-
- done1.link(&m_jit);
-
- m_jit.or32(JITCompiler::TrustedImm32(ValueFalse), resultGPR);
-
- done2.link(&m_jit);
+ done.link(&m_jit);
}
jsValueResult(resultGPR, m_compileIndex, DataFormatJSBoolean, UseChildrenCalledExplicitly);
@@ -935,8 +964,8 @@ void SpeculativeJIT::emitCall(Node& node)
GPRReg calleeGPR = callee.gpr();
use(calleeEdge);
- // The call instruction's first child is either the function (normal call) or the
- // receiver (method call). subsequent children are the arguments.
+ // The call instruction's first child is the function; the subsequent children are the
+ // arguments.
int numPassedArgs = node.numChildren() - 1;
m_jit.store32(MacroAssembler::TrustedImm32(numPassedArgs + dummyThisArgument), callFramePayloadSlot(RegisterFile::ArgumentCount));
@@ -999,6 +1028,7 @@ GPRReg SpeculativeJIT::fillSpeculateIntInternal(NodeIndex nodeIndex, DataFormat&
#if DFG_ENABLE(DEBUG_VERBOSE)
dataLog("SpecInt@%d ", nodeIndex);
#endif
+ PredictedType type = m_state.forNode(nodeIndex).m_type;
Node& node = at(nodeIndex);
VirtualRegister virtualRegister = node.virtualRegister();
GenerationInfo& info = m_generationInfo[virtualRegister];
@@ -1056,7 +1086,8 @@ GPRReg SpeculativeJIT::fillSpeculateIntInternal(NodeIndex nodeIndex, DataFormat&
// Check the value is an integer.
GPRReg gpr = info.gpr();
m_gprs.lock(gpr);
- speculationCheck(BadType, JSValueRegs(gpr), nodeIndex, m_jit.branchPtr(MacroAssembler::Below, gpr, GPRInfo::tagTypeNumberRegister));
+ if (!isInt32Prediction(type))
+ speculationCheck(BadType, JSValueRegs(gpr), nodeIndex, m_jit.branchPtr(MacroAssembler::Below, gpr, GPRInfo::tagTypeNumberRegister));
info.fillJSValue(gpr, DataFormatJSInteger);
// If !strict we're done, return.
if (!strict) {
@@ -1144,6 +1175,7 @@ FPRReg SpeculativeJIT::fillSpeculateDouble(NodeIndex nodeIndex)
#if DFG_ENABLE(DEBUG_VERBOSE)
dataLog("SpecDouble@%d ", nodeIndex);
#endif
+ PredictedType type = m_state.forNode(nodeIndex).m_type;
Node& node = at(nodeIndex);
VirtualRegister virtualRegister = node.virtualRegister();
GenerationInfo& info = m_generationInfo[virtualRegister];
@@ -1228,7 +1260,8 @@ FPRReg SpeculativeJIT::fillSpeculateDouble(NodeIndex nodeIndex)
JITCompiler::Jump isInteger = m_jit.branchPtr(MacroAssembler::AboveOrEqual, jsValueGpr, GPRInfo::tagTypeNumberRegister);
- speculationCheck(BadType, JSValueRegs(jsValueGpr), nodeIndex, m_jit.branchTestPtr(MacroAssembler::Zero, jsValueGpr, GPRInfo::tagTypeNumberRegister));
+ if (!isNumberPrediction(type))
+ speculationCheck(BadType, JSValueRegs(jsValueGpr), nodeIndex, m_jit.branchTestPtr(MacroAssembler::Zero, jsValueGpr, GPRInfo::tagTypeNumberRegister));
// First, if we get here we have a double encoded as a JSValue
m_jit.move(jsValueGpr, tempGpr);
@@ -1295,6 +1328,7 @@ GPRReg SpeculativeJIT::fillSpeculateCell(NodeIndex nodeIndex)
#if DFG_ENABLE(DEBUG_VERBOSE)
dataLog("SpecCell@%d ", nodeIndex);
#endif
+ PredictedType type = m_state.forNode(nodeIndex).m_type;
Node& node = at(nodeIndex);
VirtualRegister virtualRegister = node.virtualRegister();
GenerationInfo& info = m_generationInfo[virtualRegister];
@@ -1324,7 +1358,7 @@ GPRReg SpeculativeJIT::fillSpeculateCell(NodeIndex nodeIndex)
m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
info.fillJSValue(gpr, DataFormatJS);
- if (info.spillFormat() != DataFormatJSCell)
+ if (!isCellPrediction(type))
speculationCheck(BadType, JSValueRegs(gpr), nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, gpr, GPRInfo::tagMaskRegister));
info.fillJSValue(gpr, DataFormatJSCell);
return gpr;
@@ -1340,7 +1374,8 @@ GPRReg SpeculativeJIT::fillSpeculateCell(NodeIndex nodeIndex)
case DataFormatJS: {
GPRReg gpr = info.gpr();
m_gprs.lock(gpr);
- speculationCheck(BadType, JSValueRegs(gpr), nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, gpr, GPRInfo::tagMaskRegister));
+ if (!isCellPrediction(type))
+ speculationCheck(BadType, JSValueRegs(gpr), nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, gpr, GPRInfo::tagMaskRegister));
info.fillJSValue(gpr, DataFormatJSCell);
return gpr;
}
@@ -1368,6 +1403,7 @@ GPRReg SpeculativeJIT::fillSpeculateBoolean(NodeIndex nodeIndex)
#if DFG_ENABLE(DEBUG_VERBOSE)
dataLog("SpecBool@%d ", nodeIndex);
#endif
+ PredictedType type = m_state.forNode(nodeIndex).m_type;
Node& node = at(nodeIndex);
VirtualRegister virtualRegister = node.virtualRegister();
GenerationInfo& info = m_generationInfo[virtualRegister];
@@ -1397,7 +1433,7 @@ GPRReg SpeculativeJIT::fillSpeculateBoolean(NodeIndex nodeIndex)
m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
info.fillJSValue(gpr, DataFormatJS);
- if (info.spillFormat() != DataFormatJSBoolean) {
+ if (!isBooleanPrediction(type)) {
m_jit.xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), gpr);
speculationCheck(BadType, JSValueRegs(gpr), nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, gpr, TrustedImm32(static_cast<int32_t>(~1))), SpeculationRecovery(BooleanSpeculationCheck, gpr, InvalidGPRReg));
m_jit.xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), gpr);
@@ -1416,9 +1452,11 @@ GPRReg SpeculativeJIT::fillSpeculateBoolean(NodeIndex nodeIndex)
case DataFormatJS: {
GPRReg gpr = info.gpr();
m_gprs.lock(gpr);
- m_jit.xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), gpr);
- speculationCheck(BadType, JSValueRegs(gpr), nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, gpr, TrustedImm32(static_cast<int32_t>(~1))), SpeculationRecovery(BooleanSpeculationCheck, gpr, InvalidGPRReg));
- m_jit.xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), gpr);
+ if (!isBooleanPrediction(type)) {
+ m_jit.xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), gpr);
+ speculationCheck(BadType, JSValueRegs(gpr), nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, gpr, TrustedImm32(static_cast<int32_t>(~1))), SpeculationRecovery(BooleanSpeculationCheck, gpr, InvalidGPRReg));
+ m_jit.xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), gpr);
+ }
info.fillJSValue(gpr, DataFormatJSBoolean);
return gpr;
}
@@ -1764,13 +1802,10 @@ void SpeculativeJIT::compileLogicalNot(Node& node)
m_jit.move(arg1GPR, resultGPR);
m_jit.xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), resultGPR);
- JITCompiler::Jump fastCase = m_jit.branchTestPtr(JITCompiler::Zero, resultGPR, TrustedImm32(static_cast<int32_t>(~1)));
-
- silentSpillAllRegisters(resultGPR);
- callOperation(dfgConvertJSValueToBoolean, resultGPR, arg1GPR);
- silentFillAllRegisters(resultGPR);
+ JITCompiler::Jump slowCase = m_jit.branchTestPtr(JITCompiler::NonZero, resultGPR, TrustedImm32(static_cast<int32_t>(~1)));
- fastCase.link(&m_jit);
+ addSlowPathGenerator(
+ slowPathCall(slowCase, this, dfgConvertJSValueToBoolean, resultGPR, arg1GPR));
m_jit.xorPtr(TrustedImm32(static_cast<int32_t>(ValueTrue)), resultGPR);
jsValueResult(resultGPR, m_compileIndex, DataFormatJSBoolean, UseChildrenCalledExplicitly);
@@ -1813,7 +1848,7 @@ void SpeculativeJIT::emitBranch(Node& node)
if (at(node.child1()).shouldSpeculateInteger()) {
bool invert = false;
- if (taken == (m_block + 1)) {
+ if (taken == nextBlock()) {
invert = true;
BlockIndex tmp = taken;
taken = notTaken;
@@ -1841,7 +1876,7 @@ void SpeculativeJIT::emitBranch(Node& node)
if (isBooleanPrediction(m_state.forNode(node.child1()).m_type)) {
MacroAssembler::ResultCondition condition = MacroAssembler::NonZero;
- if (taken == (m_block + 1)) {
+ if (taken == nextBlock()) {
condition = MacroAssembler::Zero;
BlockIndex tmp = taken;
taken = notTaken;
@@ -1902,12 +1937,19 @@ void SpeculativeJIT::compile(Node& node)
AbstractValue& value = block()->valuesAtHead.operand(node.local());
// If we have no prediction for this local, then don't attempt to compile.
- if (prediction == PredictNone || value.isClear()) {
+ if (prediction == PredictNone) {
terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
break;
}
- if (!m_jit.graph().isCaptured(node.local())) {
+ if (!node.variableAccessData()->isCaptured()) {
+ // If the CFA is tracking this variable and it found that the variable
+ // cannot have been assigned, then don't attempt to proceed.
+ if (value.isClear()) {
+ terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
+ break;
+ }
+
if (node.variableAccessData()->shouldUseDoubleFormat()) {
FPRTemporary result(this);
m_jit.loadDouble(JITCompiler::addressFor(node.local()), result.fpr());
@@ -1939,7 +1981,7 @@ void SpeculativeJIT::compile(Node& node)
m_gprs.retain(result.gpr(), virtualRegister, SpillOrderJS);
DataFormat format;
- if (m_jit.graph().isCaptured(node.local()))
+ if (node.variableAccessData()->isCaptured())
format = DataFormatJS;
else if (isCellPrediction(value.m_type))
format = DataFormatJSCell;
@@ -1952,6 +1994,15 @@ void SpeculativeJIT::compile(Node& node)
break;
}
+ case GetLocalUnlinked: {
+ GPRTemporary result(this);
+
+ m_jit.loadPtr(JITCompiler::addressFor(node.unlinkedLocal()), result.gpr());
+
+ jsValueResult(result.gpr(), m_compileIndex);
+ break;
+ }
+
case SetLocal: {
// SetLocal doubles as a hint as to where a node will be stored and
// as a speculation point. So before we speculate make sure that we
@@ -1989,7 +2040,7 @@ void SpeculativeJIT::compile(Node& node)
// OSR exit, would not be visible to the old JIT in any way.
m_codeOriginForOSR = nextNode->codeOrigin;
- if (!m_jit.graph().isCaptured(node.local())) {
+ if (!node.variableAccessData()->isCaptured()) {
if (node.variableAccessData()->shouldUseDoubleFormat()) {
SpeculateDoubleOperand value(this, node.child1());
m_jit.storeDouble(value.fpr(), JITCompiler::addressFor(node.local()));
@@ -2332,6 +2383,13 @@ void SpeculativeJIT::compile(Node& node)
break;
}
+ if (at(node.child1()).shouldSpeculateArguments()) {
+ compileGetByValOnArguments(node);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
if (at(node.child1()).prediction() == PredictString) {
compileGetByValOnString(node);
if (!m_compileOkay)
@@ -2453,6 +2511,65 @@ void SpeculativeJIT::compile(Node& node)
SpeculateCellOperand base(this, node.child1());
SpeculateStrictInt32Operand property(this, node.child2());
+ if (at(node.child1()).shouldSpeculateArguments()) {
+ JSValueOperand value(this, node.child3());
+ SpeculateCellOperand base(this, node.child1());
+ SpeculateStrictInt32Operand property(this, node.child2());
+ GPRTemporary scratch(this);
+ GPRTemporary scratch2(this);
+
+ GPRReg baseReg = base.gpr();
+ GPRReg propertyReg = property.gpr();
+ GPRReg valueReg = value.gpr();
+ GPRReg scratchReg = scratch.gpr();
+ GPRReg scratch2Reg = scratch2.gpr();
+
+ if (!m_compileOkay)
+ return;
+
+ if (!isArgumentsPrediction(m_state.forNode(node.child1()).m_type)) {
+ speculationCheck(
+ BadType, JSValueSource::unboxedCell(baseReg), node.child1(),
+ m_jit.branchPtr(
+ MacroAssembler::NotEqual,
+ MacroAssembler::Address(baseReg, JSCell::classInfoOffset()),
+ MacroAssembler::TrustedImmPtr(&Arguments::s_info)));
+ }
+
+ m_jit.loadPtr(
+ MacroAssembler::Address(baseReg, Arguments::offsetOfData()),
+ scratchReg);
+
+ // Two really lame checks.
+ speculationCheck(
+ Uncountable, JSValueSource(), NoNode,
+ m_jit.branchPtr(
+ MacroAssembler::AboveOrEqual, propertyReg,
+ MacroAssembler::Address(scratchReg, OBJECT_OFFSETOF(ArgumentsData, numArguments))));
+ speculationCheck(
+ Uncountable, JSValueSource(), NoNode,
+ m_jit.branchTestPtr(
+ MacroAssembler::NonZero,
+ MacroAssembler::Address(
+ scratchReg, OBJECT_OFFSETOF(ArgumentsData, deletedArguments))));
+
+ m_jit.move(propertyReg, scratch2Reg);
+ m_jit.neg32(scratch2Reg);
+ m_jit.signExtend32ToPtr(scratch2Reg, scratch2Reg);
+ m_jit.loadPtr(
+ MacroAssembler::Address(scratchReg, OBJECT_OFFSETOF(ArgumentsData, registers)),
+ scratchReg);
+
+ m_jit.storePtr(
+ valueReg,
+ MacroAssembler::BaseIndex(
+ scratchReg, scratch2Reg, MacroAssembler::TimesEight,
+ CallFrame::thisArgumentOffset() * sizeof(Register) - sizeof(Register)));
+
+ noResult(m_compileIndex);
+ break;
+ }
+
if (at(node.child1()).shouldSpeculateInt8Array()) {
compilePutByValForIntTypedArray(m_jit.globalData()->int8ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int8_t), isInt8ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray);
if (!m_compileOkay)
@@ -2539,15 +2656,7 @@ void SpeculativeJIT::compile(Node& node)
property.use();
value.use();
- MacroAssembler::Jump withinArrayBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(baseReg, JSArray::vectorLengthOffset()));
-
- // Code to handle put beyond array bounds.
- silentSpillAllRegisters(scratchReg);
- callOperation(m_jit.codeBlock()->isStrictMode() ? operationPutByValBeyondArrayBoundsStrict : operationPutByValBeyondArrayBoundsNonStrict, baseReg, propertyReg, valueReg);
- silentFillAllRegisters(scratchReg);
- JITCompiler::Jump wasBeyondArrayBounds = m_jit.jump();
-
- withinArrayBounds.link(&m_jit);
+ MacroAssembler::Jump beyondArrayBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(baseReg, JSArray::vectorLengthOffset()));
// Get the array storage.
GPRReg storageReg = scratchReg;
@@ -2569,7 +2678,11 @@ void SpeculativeJIT::compile(Node& node)
// Store the value to the array.
m_jit.storePtr(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
- wasBeyondArrayBounds.link(&m_jit);
+ addSlowPathGenerator(
+ slowPathCall(
+ beyondArrayBounds, this,
+ m_jit.codeBlock()->isStrictMode() ? operationPutByValBeyondArrayBoundsStrict : operationPutByValBeyondArrayBoundsNonStrict,
+ NoResult, baseReg, propertyReg, valueReg));
noResult(m_compileIndex, UseChildrenCalledExplicitly);
break;
@@ -2751,15 +2864,10 @@ void SpeculativeJIT::compile(Node& node)
m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
m_jit.orPtr(GPRInfo::tagTypeNumberRegister, storageLengthGPR);
- MacroAssembler::Jump done = m_jit.jump();
-
- slowPath.link(&m_jit);
-
- silentSpillAllRegisters(storageLengthGPR);
- callOperation(operationArrayPush, storageLengthGPR, valueGPR, baseGPR);
- silentFillAllRegisters(storageLengthGPR);
-
- done.link(&m_jit);
+ addSlowPathGenerator(
+ slowPathCall(
+ slowPath, this, operationArrayPush, NoResult, storageLengthGPR,
+ valueGPR, baseGPR));
jsValueResult(storageLengthGPR, m_compileIndex);
break;
@@ -2782,7 +2890,8 @@ void SpeculativeJIT::compile(Node& node)
m_jit.loadPtr(MacroAssembler::Address(baseGPR, JSArray::storageOffset()), storageGPR);
m_jit.load32(MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_length)), storageLengthGPR);
- MacroAssembler::Jump emptyArrayCase = m_jit.branchTest32(MacroAssembler::Zero, storageLengthGPR);
+ JITCompiler::JumpList setUndefinedCases;
+ setUndefinedCases.append(m_jit.branchTest32(MacroAssembler::Zero, storageLengthGPR));
m_jit.sub32(TrustedImm32(1), storageLengthGPR);
@@ -2792,28 +2901,20 @@ void SpeculativeJIT::compile(Node& node)
m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_length)));
- MacroAssembler::Jump holeCase = m_jit.branchTestPtr(MacroAssembler::Zero, valueGPR);
+ setUndefinedCases.append(m_jit.branchTestPtr(MacroAssembler::Zero, valueGPR));
m_jit.storePtr(MacroAssembler::TrustedImmPtr(0), MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
m_jit.sub32(MacroAssembler::TrustedImm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
- MacroAssembler::JumpList done;
-
- done.append(m_jit.jump());
-
- holeCase.link(&m_jit);
- emptyArrayCase.link(&m_jit);
- m_jit.move(MacroAssembler::TrustedImmPtr(JSValue::encode(jsUndefined())), valueGPR);
- done.append(m_jit.jump());
-
- slowCase.link(&m_jit);
-
- silentSpillAllRegisters(valueGPR);
- callOperation(operationArrayPop, valueGPR, baseGPR);
- silentFillAllRegisters(valueGPR);
-
- done.link(&m_jit);
+ addSlowPathGenerator(
+ slowPathMove(
+ setUndefinedCases, this,
+ MacroAssembler::TrustedImmPtr(JSValue::encode(jsUndefined())), valueGPR));
+ addSlowPathGenerator(
+ slowPathCall(
+ slowCase, this, operationArrayPop, valueGPR, baseGPR));
+
jsValueResult(valueGPR, m_compileIndex);
break;
}
@@ -2834,7 +2935,7 @@ void SpeculativeJIT::compile(Node& node)
MacroAssembler::ResultCondition condition = MacroAssembler::NonZero;
- if (taken == (m_block + 1)) {
+ if (taken == nextBlock()) {
condition = MacroAssembler::Zero;
BlockIndex tmp = taken;
taken = notTaken;
@@ -2913,21 +3014,14 @@ void SpeculativeJIT::compile(Node& node)
if (!(m_state.forNode(node.child1()).m_type & ~(PredictNumber | PredictBoolean)))
m_jit.move(op1GPR, resultGPR);
else {
- MacroAssembler::JumpList alreadyPrimitive;
-
- alreadyPrimitive.append(m_jit.branchTestPtr(MacroAssembler::NonZero, op1GPR, GPRInfo::tagMaskRegister));
- alreadyPrimitive.append(m_jit.branchPtr(MacroAssembler::Equal, MacroAssembler::Address(op1GPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSString::s_info)));
-
- silentSpillAllRegisters(resultGPR);
- callOperation(operationToPrimitive, resultGPR, op1GPR);
- silentFillAllRegisters(resultGPR);
-
- MacroAssembler::Jump done = m_jit.jump();
+ MacroAssembler::Jump alreadyPrimitive = m_jit.branchTestPtr(MacroAssembler::NonZero, op1GPR, GPRInfo::tagMaskRegister);
+ MacroAssembler::Jump notPrimitive = m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(op1GPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSString::s_info));
alreadyPrimitive.link(&m_jit);
m_jit.move(op1GPR, resultGPR);
- done.link(&m_jit);
+ addSlowPathGenerator(
+ slowPathCall(notPrimitive, this, operationToPrimitive, resultGPR, op1GPR));
}
jsValueResult(resultGPR, m_compileIndex, UseChildrenCalledExplicitly);
@@ -2952,8 +3046,10 @@ void SpeculativeJIT::compile(Node& node)
// probably has the best balance of performance and sensibility in the sense
// that it does not increase the complexity of the DFG JIT just to make StrCat
// fast and pretty.
-
- EncodedJSValue* buffer = static_cast<EncodedJSValue*>(m_jit.globalData()->scratchBufferForSize(sizeof(EncodedJSValue) * node.numChildren()));
+
+ size_t scratchSize = sizeof(EncodedJSValue) * node.numChildren();
+ ScratchBuffer* scratchBuffer = m_jit.globalData()->scratchBufferForSize(scratchSize);
+ EncodedJSValue* buffer = scratchBuffer ? static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) : 0;
for (unsigned operandIdx = 0; operandIdx < node.numChildren(); ++operandIdx) {
JSValueOperand operand(this, m_jit.graph().m_varArgChildren[node.firstChild() + operandIdx]);
@@ -2964,11 +3060,26 @@ void SpeculativeJIT::compile(Node& node)
}
flushRegisters();
-
+
+ if (scratchSize) {
+ GPRTemporary scratch(this);
+
+ // Tell GC mark phase how much of the scratch buffer is active during call.
+ m_jit.move(TrustedImmPtr(scratchBuffer->activeLengthPtr()), scratch.gpr());
+ m_jit.storePtr(TrustedImmPtr(scratchSize), scratch.gpr());
+ }
+
GPRResult result(this);
- callOperation(op == StrCat ? operationStrCat : operationNewArray, result.gpr(), buffer, node.numChildren());
-
+ callOperation(op == StrCat ? operationStrCat : operationNewArray, result.gpr(), static_cast<void *>(buffer), node.numChildren());
+
+ if (scratchSize) {
+ GPRTemporary scratch(this);
+
+ m_jit.move(TrustedImmPtr(scratchBuffer->activeLengthPtr()), scratch.gpr());
+ m_jit.storePtr(TrustedImmPtr(0), scratch.gpr());
+ }
+
cellResult(result.gpr(), m_compileIndex, UseChildrenCalledExplicitly);
break;
}
@@ -3068,15 +3179,7 @@ void SpeculativeJIT::compile(Node& node)
emitAllocateJSFinalObject(scratchGPR, resultGPR, scratchGPR, slowPath);
- MacroAssembler::Jump done = m_jit.jump();
-
- slowPath.link(&m_jit);
-
- silentSpillAllRegisters(resultGPR);
- callOperation(operationCreateThis, resultGPR, calleeGPR);
- silentFillAllRegisters(resultGPR);
-
- done.link(&m_jit);
+ addSlowPathGenerator(slowPathCall(slowPath, this, operationCreateThis, resultGPR, calleeGPR));
cellResult(resultGPR, m_compileIndex);
break;
@@ -3093,15 +3196,7 @@ void SpeculativeJIT::compile(Node& node)
emitAllocateJSFinalObject(MacroAssembler::TrustedImmPtr(m_jit.globalObjectFor(node.codeOrigin)->emptyObjectStructure()), resultGPR, scratchGPR, slowPath);
- MacroAssembler::Jump done = m_jit.jump();
-
- slowPath.link(&m_jit);
-
- silentSpillAllRegisters(resultGPR);
- callOperation(operationNewObject, resultGPR);
- silentFillAllRegisters(resultGPR);
-
- done.link(&m_jit);
+ addSlowPathGenerator(slowPathCall(slowPath, this, operationNewObject, resultGPR));
cellResult(resultGPR, m_compileIndex);
break;
@@ -3271,6 +3366,11 @@ void SpeculativeJIT::compile(Node& node)
integerResult(resultGPR, m_compileIndex);
break;
}
+
+ case GetArgumentsLength: {
+ compileGetArgumentsLength(node);
+ break;
+ }
case GetStringLength: {
SpeculateCellOperand base(this, node.child1());
@@ -3331,7 +3431,9 @@ void SpeculativeJIT::compile(Node& node)
break;
}
case CheckStructure: {
- if (m_state.forNode(node.child1()).m_structure.isSubsetOf(node.structureSet())) {
+ AbstractValue& value = m_state.forNode(node.child1());
+ if (value.m_structure.isSubsetOf(node.structureSet())
+ && isCellPrediction(value.m_type)) {
noResult(m_compileIndex);
break;
}
@@ -3416,9 +3518,9 @@ void SpeculativeJIT::compile(Node& node)
case PutByOffset: {
#if ENABLE(GGC) || ENABLE(WRITE_BARRIER_PROFILING)
- SpeculateCellOperand base(this, node.child1());
+ SpeculateCellOperand base(this, node.child2());
#endif
- StorageOperand storage(this, node.child2());
+ StorageOperand storage(this, node.child1());
JSValueOperand value(this, node.child3());
GPRReg storageGPR = storage.gpr();
@@ -3660,27 +3762,26 @@ void SpeculativeJIT::compile(Node& node)
m_jit.move(JITCompiler::TrustedImmPtr(m_jit.globalObjectFor(node.codeOrigin)), globalObjectGPR);
m_jit.move(JITCompiler::TrustedImmPtr(resolveInfoAddress), resolveInfoGPR);
m_jit.loadPtr(JITCompiler::Address(resolveInfoGPR, OBJECT_OFFSETOF(GlobalResolveInfo, structure)), resultGPR);
- JITCompiler::Jump structuresMatch = m_jit.branchPtr(JITCompiler::Equal, resultGPR, JITCompiler::Address(globalObjectGPR, JSCell::structureOffset()));
-
- silentSpillAllRegisters(resultGPR);
- callOperation(operationResolveGlobal, resultGPR, resolveInfoGPR, &m_jit.codeBlock()->identifier(data.identifierNumber));
- silentFillAllRegisters(resultGPR);
-
- JITCompiler::Jump wasSlow = m_jit.jump();
+ JITCompiler::Jump structuresDontMatch = m_jit.branchPtr(JITCompiler::NotEqual, resultGPR, JITCompiler::Address(globalObjectGPR, JSCell::structureOffset()));
// Fast case
- structuresMatch.link(&m_jit);
m_jit.loadPtr(JITCompiler::Address(globalObjectGPR, JSObject::offsetOfPropertyStorage()), resultGPR);
m_jit.load32(JITCompiler::Address(resolveInfoGPR, OBJECT_OFFSETOF(GlobalResolveInfo, offset)), resolveInfoGPR);
m_jit.loadPtr(JITCompiler::BaseIndex(resultGPR, resolveInfoGPR, JITCompiler::ScalePtr), resultGPR);
- wasSlow.link(&m_jit);
+ addSlowPathGenerator(
+ slowPathCall(
+ structuresDontMatch, this, operationResolveGlobal,
+ resultGPR, resolveInfoGPR,
+ &m_jit.codeBlock()->identifier(data.identifierNumber)));
jsValueResult(resultGPR, m_compileIndex);
break;
}
case CreateActivation: {
+ ASSERT(!node.codeOrigin.inlineCallFrame);
+
JSValueOperand value(this, node.child1());
GPRTemporary result(this, value);
@@ -3689,34 +3790,240 @@ void SpeculativeJIT::compile(Node& node)
m_jit.move(valueGPR, resultGPR);
- JITCompiler::Jump alreadyCreated = m_jit.branchTestPtr(JITCompiler::NonZero, resultGPR);
+ JITCompiler::Jump notCreated = m_jit.branchTestPtr(JITCompiler::Zero, resultGPR);
- silentSpillAllRegisters(resultGPR);
- callOperation(operationCreateActivation, resultGPR);
- silentFillAllRegisters(resultGPR);
-
- alreadyCreated.link(&m_jit);
+ addSlowPathGenerator(
+ slowPathCall(notCreated, this, operationCreateActivation, resultGPR));
cellResult(resultGPR, m_compileIndex);
break;
}
- case TearOffActivation: {
+ case CreateArguments: {
JSValueOperand value(this, node.child1());
+ GPRTemporary result(this, value);
+
GPRReg valueGPR = value.gpr();
+ GPRReg resultGPR = result.gpr();
+
+ m_jit.move(valueGPR, resultGPR);
+
+ JITCompiler::Jump notCreated = m_jit.branchTestPtr(JITCompiler::Zero, resultGPR);
+
+ if (node.codeOrigin.inlineCallFrame) {
+ addSlowPathGenerator(
+ slowPathCall(
+ notCreated, this, operationCreateInlinedArguments, resultGPR,
+ node.codeOrigin.inlineCallFrame));
+ } else {
+ addSlowPathGenerator(
+ slowPathCall(notCreated, this, operationCreateArguments, resultGPR));
+ }
+
+ cellResult(resultGPR, m_compileIndex);
+ break;
+ }
- JITCompiler::Jump notCreated = m_jit.branchTestPtr(JITCompiler::Zero, valueGPR);
+ case TearOffActivation: {
+ ASSERT(!node.codeOrigin.inlineCallFrame);
+
+ JSValueOperand activationValue(this, node.child1());
+ JSValueOperand argumentsValue(this, node.child2());
+ GPRReg activationValueGPR = activationValue.gpr();
+ GPRReg argumentsValueGPR = argumentsValue.gpr();
+
+ JITCompiler::JumpList created;
+ created.append(m_jit.branchTestPtr(JITCompiler::NonZero, activationValueGPR));
+ created.append(m_jit.branchTestPtr(JITCompiler::NonZero, argumentsValueGPR));
+
+ addSlowPathGenerator(
+ slowPathCall(
+ created, this, operationTearOffActivation, NoResult, activationValueGPR,
+ static_cast<int32_t>(node.unmodifiedArgumentsRegister())));
+
+ noResult(m_compileIndex);
+ break;
+ }
- silentSpillAllRegisters(InvalidGPRReg);
- callOperation(operationTearOffActivation, valueGPR);
- silentFillAllRegisters(InvalidGPRReg);
+ case TearOffArguments: {
+ JSValueOperand argumentsValue(this, node.child1());
+ GPRReg argumentsValueGPR = argumentsValue.gpr();
- notCreated.link(&m_jit);
+ JITCompiler::Jump created = m_jit.branchTestPtr(JITCompiler::NonZero, argumentsValueGPR);
+
+ if (node.codeOrigin.inlineCallFrame) {
+ addSlowPathGenerator(
+ slowPathCall(
+ created, this, operationTearOffInlinedArguments, NoResult,
+ argumentsValueGPR, node.codeOrigin.inlineCallFrame));
+ } else {
+ addSlowPathGenerator(
+ slowPathCall(
+ created, this, operationTearOffArguments, NoResult, argumentsValueGPR));
+ }
noResult(m_compileIndex);
break;
}
+ case GetMyArgumentsLength: {
+ GPRTemporary result(this);
+ GPRReg resultGPR = result.gpr();
+
+ speculationCheck(
+ ArgumentsEscaped, JSValueRegs(), NoNode,
+ m_jit.branchTestPtr(
+ JITCompiler::NonZero,
+ JITCompiler::addressFor(
+ m_jit.argumentsRegisterFor(node.codeOrigin))));
+
+ ASSERT(!node.codeOrigin.inlineCallFrame);
+ m_jit.load32(JITCompiler::payloadFor(RegisterFile::ArgumentCount), resultGPR);
+ m_jit.sub32(TrustedImm32(1), resultGPR);
+ integerResult(resultGPR, m_compileIndex);
+ break;
+ }
+
+ case GetMyArgumentsLengthSafe: {
+ GPRTemporary result(this);
+ GPRReg resultGPR = result.gpr();
+
+ JITCompiler::Jump created = m_jit.branchTestPtr(
+ JITCompiler::NonZero,
+ JITCompiler::addressFor(
+ m_jit.argumentsRegisterFor(node.codeOrigin)));
+
+ if (node.codeOrigin.inlineCallFrame) {
+ m_jit.move(
+ ImmPtr(
+ bitwise_cast<void*>(
+ JSValue::encode(
+ jsNumber(node.codeOrigin.inlineCallFrame->arguments.size() - 1)))),
+ resultGPR);
+ } else {
+ m_jit.load32(JITCompiler::payloadFor(RegisterFile::ArgumentCount), resultGPR);
+ m_jit.sub32(TrustedImm32(1), resultGPR);
+ m_jit.orPtr(GPRInfo::tagTypeNumberRegister, resultGPR);
+ }
+
+ // FIXME: the slow path generator should perform a forward speculation that the
+ // result is an integer. For now we postpone the speculation by having this return
+ // a JSValue.
+
+ addSlowPathGenerator(
+ slowPathCall(
+ created, this, operationGetArgumentsLength, resultGPR,
+ m_jit.argumentsRegisterFor(node.codeOrigin)));
+
+ jsValueResult(resultGPR, m_compileIndex);
+ break;
+ }
+
+ case GetMyArgumentByVal: {
+ SpeculateStrictInt32Operand index(this, node.child1());
+ GPRTemporary result(this);
+ GPRReg indexGPR = index.gpr();
+ GPRReg resultGPR = result.gpr();
+
+ speculationCheck(
+ ArgumentsEscaped, JSValueRegs(), NoNode,
+ m_jit.branchTestPtr(
+ JITCompiler::NonZero,
+ JITCompiler::addressFor(
+ m_jit.argumentsRegisterFor(node.codeOrigin))));
+
+ m_jit.add32(TrustedImm32(1), indexGPR, resultGPR);
+ if (node.codeOrigin.inlineCallFrame) {
+ speculationCheck(
+ Uncountable, JSValueRegs(), NoNode,
+ m_jit.branch32(
+ JITCompiler::AboveOrEqual,
+ resultGPR,
+ Imm32(node.codeOrigin.inlineCallFrame->arguments.size())));
+ } else {
+ speculationCheck(
+ Uncountable, JSValueRegs(), NoNode,
+ m_jit.branch32(
+ JITCompiler::AboveOrEqual,
+ resultGPR,
+ JITCompiler::payloadFor(RegisterFile::ArgumentCount)));
+ }
+
+ m_jit.neg32(resultGPR);
+ m_jit.signExtend32ToPtr(resultGPR, resultGPR);
+
+ m_jit.loadPtr(
+ JITCompiler::BaseIndex(
+ GPRInfo::callFrameRegister, resultGPR, JITCompiler::TimesEight,
+ ((node.codeOrigin.inlineCallFrame
+ ? node.codeOrigin.inlineCallFrame->stackOffset
+ : 0) + CallFrame::argumentOffsetIncludingThis(0)) * sizeof(Register)),
+ resultGPR);
+
+ jsValueResult(resultGPR, m_compileIndex);
+ break;
+ }
+
+ case GetMyArgumentByValSafe: {
+ SpeculateStrictInt32Operand index(this, node.child1());
+ GPRTemporary result(this);
+ GPRReg indexGPR = index.gpr();
+ GPRReg resultGPR = result.gpr();
+
+ JITCompiler::JumpList slowPath;
+ slowPath.append(
+ m_jit.branchTestPtr(
+ JITCompiler::NonZero,
+ JITCompiler::addressFor(
+ m_jit.argumentsRegisterFor(node.codeOrigin))));
+
+ m_jit.add32(TrustedImm32(1), indexGPR, resultGPR);
+ if (node.codeOrigin.inlineCallFrame) {
+ slowPath.append(
+ m_jit.branch32(
+ JITCompiler::AboveOrEqual,
+ resultGPR,
+ Imm32(node.codeOrigin.inlineCallFrame->arguments.size())));
+ } else {
+ slowPath.append(
+ m_jit.branch32(
+ JITCompiler::AboveOrEqual,
+ resultGPR,
+ JITCompiler::payloadFor(RegisterFile::ArgumentCount)));
+ }
+
+ m_jit.neg32(resultGPR);
+ m_jit.signExtend32ToPtr(resultGPR, resultGPR);
+
+ m_jit.loadPtr(
+ JITCompiler::BaseIndex(
+ GPRInfo::callFrameRegister, resultGPR, JITCompiler::TimesEight,
+ ((node.codeOrigin.inlineCallFrame
+ ? node.codeOrigin.inlineCallFrame->stackOffset
+ : 0) + CallFrame::argumentOffsetIncludingThis(0)) * sizeof(Register)),
+ resultGPR);
+
+ addSlowPathGenerator(
+ slowPathCall(
+ slowPath, this, operationGetArgumentByVal, resultGPR,
+ m_jit.argumentsRegisterFor(node.codeOrigin),
+ indexGPR));
+
+ jsValueResult(resultGPR, m_compileIndex);
+ break;
+ }
+
+ case CheckArgumentsNotCreated: {
+ speculationCheck(
+ ArgumentsEscaped, JSValueRegs(), NoNode,
+ m_jit.branchTestPtr(
+ JITCompiler::NonZero,
+ JITCompiler::addressFor(
+ m_jit.argumentsRegisterFor(node.codeOrigin))));
+ noResult(m_compileIndex);
+ break;
+ }
+
case NewFunctionNoCheck:
compileNewFunctionNoCheck(node);
break;
@@ -3730,14 +4037,12 @@ void SpeculativeJIT::compile(Node& node)
m_jit.move(valueGPR, resultGPR);
- JITCompiler::Jump alreadyCreated = m_jit.branchTestPtr(JITCompiler::NonZero, resultGPR);
-
- silentSpillAllRegisters(resultGPR);
- callOperation(
- operationNewFunction, resultGPR, m_jit.codeBlock()->functionDecl(node.functionDeclIndex()));
- silentFillAllRegisters(resultGPR);
+ JITCompiler::Jump notCreated = m_jit.branchTestPtr(JITCompiler::Zero, resultGPR);
- alreadyCreated.link(&m_jit);
+ addSlowPathGenerator(
+ slowPathCall(
+ notCreated, this, operationNewFunction,
+ resultGPR, m_jit.codeBlock()->functionDecl(node.functionDeclIndex())));
cellResult(resultGPR, m_compileIndex);
break;
diff --git a/Source/JavaScriptCore/dfg/DFGThunks.cpp b/Source/JavaScriptCore/dfg/DFGThunks.cpp
index d7c3fab23..1ed46c11f 100644
--- a/Source/JavaScriptCore/dfg/DFGThunks.cpp
+++ b/Source/JavaScriptCore/dfg/DFGThunks.cpp
@@ -39,7 +39,9 @@ MacroAssemblerCodeRef osrExitGenerationThunkGenerator(JSGlobalData* globalData)
{
MacroAssembler jit;
- EncodedJSValue* buffer = static_cast<EncodedJSValue*>(globalData->scratchBufferForSize(sizeof(EncodedJSValue) * (GPRInfo::numberOfRegisters + FPRInfo::numberOfRegisters)));
+ size_t scratchSize = sizeof(EncodedJSValue) * (GPRInfo::numberOfRegisters + FPRInfo::numberOfRegisters);
+ ScratchBuffer* scratchBuffer = globalData->scratchBufferForSize(scratchSize);
+ EncodedJSValue* buffer = static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer());
for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i)
jit.storePtr(GPRInfo::toRegister(i), buffer + i);
@@ -48,15 +50,22 @@ MacroAssemblerCodeRef osrExitGenerationThunkGenerator(JSGlobalData* globalData)
jit.storeDouble(FPRInfo::toRegister(i), GPRInfo::regT0);
}
+ // Tell GC mark phase how much of the scratch buffer is active during call.
+ jit.move(MacroAssembler::TrustedImmPtr(scratchBuffer->activeLengthPtr()), GPRInfo::regT0);
+ jit.storePtr(MacroAssembler::TrustedImmPtr(scratchSize), GPRInfo::regT0);
+
// Set up one argument.
#if CPU(X86)
jit.poke(GPRInfo::callFrameRegister, 0);
#else
jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
#endif
-
+
MacroAssembler::Call functionCall = jit.call();
-
+
+ jit.move(MacroAssembler::TrustedImmPtr(scratchBuffer->activeLengthPtr()), GPRInfo::regT0);
+ jit.storePtr(MacroAssembler::TrustedImmPtr(0), GPRInfo::regT0);
+
for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
jit.move(MacroAssembler::TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0);
jit.loadDouble(GPRInfo::regT0, FPRInfo::toRegister(i));
diff --git a/Source/JavaScriptCore/dfg/DFGValidate.cpp b/Source/JavaScriptCore/dfg/DFGValidate.cpp
new file mode 100644
index 000000000..2b26123d8
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGValidate.cpp
@@ -0,0 +1,362 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "DFGValidate.h"
+
+#if ENABLE(DFG_JIT)
+
+#include <wtf/Assertions.h>
+#include <wtf/BitVector.h>
+
+namespace JSC { namespace DFG {
+
+#if DFG_ENABLE(VALIDATION)
+
+class Validate {
+public:
+ Validate(Graph& graph, GraphDumpMode graphDumpMode)
+ : m_graph(graph)
+ , m_graphDumpMode(graphDumpMode)
+ {
+ }
+
+ #define VALIDATE(context, assertion) do { \
+ if (!(assertion)) { \
+ dataLog("\n\n\nAt "); \
+ reportValidationContext context; \
+ dataLog(": validation %s (%s:%d) failed.\n", #assertion, __FILE__, __LINE__); \
+ dumpGraphIfAppropriate(); \
+ WTFReportAssertionFailure(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #assertion); \
+ CRASH(); \
+ } \
+ } while (0)
+
+ #define V_EQUAL(context, left, right) do { \
+ if (left != right) { \
+ dataLog("\n\n\nAt "); \
+ reportValidationContext context; \
+ dataLog(": validation (%s = ", #left); \
+ dumpData(left); \
+ dataLog(") == (%s = ", #right); \
+ dumpData(right); \
+ dataLog(") (%s:%d) failed.\n", __FILE__, __LINE__); \
+ dumpGraphIfAppropriate(); \
+ WTFReportAssertionFailure(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #left " == " #right); \
+ CRASH(); \
+ } \
+ } while (0)
+
+ #define notSet (static_cast<size_t>(-1))
+
+ void validate()
+ {
+ // NB. This code is not written for performance, since it is not intended to run
+ // in release builds.
+
+ // Validate ref counts and uses.
+ Vector<unsigned> myRefCounts;
+ myRefCounts.fill(0, m_graph.size());
+ BitVector acceptableNodeIndices;
+ for (BlockIndex blockIndex = 0; blockIndex < m_graph.m_blocks.size(); ++blockIndex) {
+ BasicBlock* block = m_graph.m_blocks[blockIndex].get();
+ if (!block)
+ continue;
+ if (!block->isReachable)
+ continue;
+ for (size_t i = 0; i < block->numNodes(); ++i) {
+ NodeIndex nodeIndex = block->nodeIndex(i);
+ acceptableNodeIndices.set(nodeIndex);
+ Node& node = m_graph[nodeIndex];
+ if (!node.shouldGenerate())
+ continue;
+ for (unsigned j = 0; j < m_graph.numChildren(node); ++j) {
+ Edge edge = m_graph.child(node, j);
+ if (!edge)
+ continue;
+
+ myRefCounts[edge.index()]++;
+
+ // Unless I'm a Flush, Phantom, GetLocal, or Phi, my children should hasResult().
+ switch (node.op()) {
+ case Flush:
+ case Phantom:
+ case GetLocal:
+ case Phi:
+ break;
+ default:
+ VALIDATE((nodeIndex, edge), m_graph[edge].hasResult());
+ break;
+ }
+ }
+ }
+ }
+ for (BlockIndex blockIndex = 0; blockIndex < m_graph.m_blocks.size(); ++blockIndex) {
+ BasicBlock* block = m_graph.m_blocks[blockIndex].get();
+ if (!block)
+ continue;
+ if (!block->isReachable)
+ continue;
+
+ BitVector phisInThisBlock;
+ BitVector nodesInThisBlock;
+
+ for (size_t i = 0; i < block->numNodes(); ++i) {
+ NodeIndex nodeIndex = block->nodeIndex(i);
+ Node& node = m_graph[nodeIndex];
+ nodesInThisBlock.set(nodeIndex);
+ if (block->isPhiIndex(i))
+ phisInThisBlock.set(nodeIndex);
+ V_EQUAL((nodeIndex), myRefCounts[nodeIndex], node.adjustedRefCount());
+ for (unsigned j = 0; j < m_graph.numChildren(node); ++j) {
+ Edge edge = m_graph.child(node, j);
+ if (!edge)
+ continue;
+ VALIDATE((nodeIndex, edge), acceptableNodeIndices.get(edge.index()));
+ }
+ }
+
+ for (size_t i = 0; i < block->phis.size(); ++i) {
+ NodeIndex nodeIndex = block->phis[i];
+ Node& node = m_graph[nodeIndex];
+ ASSERT(phisInThisBlock.get(nodeIndex));
+ VALIDATE((nodeIndex), node.op() == Phi);
+ VirtualRegister local = node.local();
+ for (unsigned j = 0; j < m_graph.numChildren(node); ++j) {
+ Edge edge = m_graph.child(node, j);
+ if (!edge)
+ continue;
+
+ VALIDATE((nodeIndex, edge),
+ m_graph[edge].op() == SetLocal
+ || m_graph[edge].op() == SetArgument
+ || m_graph[edge].op() == Flush
+ || m_graph[edge].op() == Phi);
+
+ if (phisInThisBlock.get(edge.index()))
+ continue;
+
+ if (nodesInThisBlock.get(edge.index())) {
+ VALIDATE((nodeIndex, edge),
+ m_graph[edge].op() == SetLocal
+ || m_graph[edge].op() == SetArgument
+ || m_graph[edge].op() == Flush);
+
+ continue;
+ }
+
+ // There must exist a predecessor block that has this node index in
+ // its tail variables.
+ bool found = false;
+ for (unsigned k = 0; k < block->m_predecessors.size(); ++k) {
+ BasicBlock* prevBlock = m_graph.m_blocks[block->m_predecessors[k]].get();
+ VALIDATE((Block, block->m_predecessors[k]), prevBlock);
+ VALIDATE((Block, block->m_predecessors[k]), prevBlock->isReachable);
+ NodeIndex prevNodeIndex = prevBlock->variablesAtTail.operand(local);
+ // If we have a Phi that is not referring to *this* block then all predecessors
+ // must have that local available.
+ VALIDATE((local, blockIndex, Block, block->m_predecessors[k]), prevNodeIndex != NoNode);
+ Node* prevNode = &m_graph[prevNodeIndex];
+ if (prevNode->op() == GetLocal) {
+ prevNodeIndex = prevNode->child1().index();
+ prevNode = &m_graph[prevNodeIndex];
+ }
+ if (node.shouldGenerate()) {
+ VALIDATE((local, block->m_predecessors[k], prevNodeIndex),
+ prevNode->shouldGenerate());
+ }
+ VALIDATE((local, block->m_predecessors[k], prevNodeIndex),
+ prevNode->op() == SetLocal
+ || prevNode->op() == SetArgument
+ || prevNode->op() == Flush
+ || prevNode->op() == Phi);
+ if (prevNodeIndex == edge.index()) {
+ found = true;
+ break;
+ }
+ // At this point it cannot refer into this block.
+ VALIDATE((local, block->m_predecessors[k], prevNodeIndex), !prevBlock->isInBlock(edge.index()));
+ }
+
+ VALIDATE((nodeIndex, edge), found);
+ }
+ }
+
+ Operands<size_t> getLocalPositions(
+ block->variablesAtHead.numberOfArguments(),
+ block->variablesAtHead.numberOfLocals());
+ Operands<size_t> setLocalPositions(
+ block->variablesAtHead.numberOfArguments(),
+ block->variablesAtHead.numberOfLocals());
+
+ for (size_t i = 0; i < block->variablesAtHead.numberOfArguments(); ++i) {
+ getLocalPositions.argument(i) = notSet;
+ setLocalPositions.argument(i) = notSet;
+ }
+ for (size_t i = 0; i < block->variablesAtHead.numberOfLocals(); ++i) {
+ getLocalPositions.local(i) = notSet;
+ setLocalPositions.local(i) = notSet;
+ }
+
+ for (size_t i = 0; i < block->size(); ++i) {
+ NodeIndex nodeIndex = block->at(i);
+ Node& node = m_graph[nodeIndex];
+ ASSERT(nodesInThisBlock.get(nodeIndex));
+ VALIDATE((nodeIndex), node.op() != Phi);
+ for (unsigned j = 0; j < m_graph.numChildren(node); ++j) {
+ Edge edge = m_graph.child(node, j);
+ if (!edge)
+ continue;
+ VALIDATE((nodeIndex, edge), nodesInThisBlock.get(nodeIndex));
+ }
+
+ if (!node.shouldGenerate())
+ continue;
+ switch (node.op()) {
+ case GetLocal:
+ if (node.variableAccessData()->isCaptured())
+ break;
+ VALIDATE((nodeIndex, blockIndex), getLocalPositions.operand(node.local()) == notSet);
+ getLocalPositions.operand(node.local()) = i;
+ break;
+ case SetLocal:
+ if (node.variableAccessData()->isCaptured())
+ break;
+ // Only record the first SetLocal. There may be multiple SetLocals
+ // because of flushing.
+ if (setLocalPositions.operand(node.local()) != notSet)
+ break;
+ setLocalPositions.operand(node.local()) = i;
+ break;
+ default:
+ break;
+ }
+ }
+
+ for (size_t i = 0; i < block->variablesAtHead.numberOfArguments(); ++i) {
+ checkOperand(
+ blockIndex, getLocalPositions, setLocalPositions, argumentToOperand(i));
+ }
+ for (size_t i = 0; i < block->variablesAtHead.numberOfLocals(); ++i) {
+ checkOperand(
+ blockIndex, getLocalPositions, setLocalPositions, i);
+ }
+ }
+ }
+
+private:
+ Graph& m_graph;
+ GraphDumpMode m_graphDumpMode;
+
+ void checkOperand(
+ BlockIndex blockIndex, Operands<size_t>& getLocalPositions,
+ Operands<size_t>& setLocalPositions, int operand)
+ {
+ if (getLocalPositions.operand(operand) == notSet)
+ return;
+ if (setLocalPositions.operand(operand) == notSet)
+ return;
+
+ BasicBlock* block = m_graph.m_blocks[blockIndex].get();
+
+ VALIDATE(
+ (block->at(getLocalPositions.operand(operand)),
+ block->at(setLocalPositions.operand(operand)),
+ blockIndex),
+ getLocalPositions.operand(operand) < setLocalPositions.operand(operand));
+ }
+
+ void reportValidationContext(NodeIndex nodeIndex)
+ {
+ dataLog("@%u", nodeIndex);
+ }
+
+ enum BlockTag { Block };
+ void reportValidationContext(BlockTag, BlockIndex blockIndex)
+ {
+ dataLog("Block #%u", blockIndex);
+ }
+
+ void reportValidationContext(NodeIndex nodeIndex, Edge edge)
+ {
+ dataLog("@%u -> %s@%u", nodeIndex, useKindToString(edge.useKind()), edge.index());
+ }
+
+ void reportValidationContext(
+ VirtualRegister local, BlockIndex sourceBlockIndex, BlockTag, BlockIndex destinationBlockIndex)
+ {
+ dataLog("r%d in Block #%u -> #%u", local, sourceBlockIndex, destinationBlockIndex);
+ }
+
+ void reportValidationContext(
+ VirtualRegister local, BlockIndex sourceBlockIndex, NodeIndex prevNodeIndex)
+ {
+ dataLog("@%u for r%d in Block #%u", prevNodeIndex, local, sourceBlockIndex);
+ }
+
+ void reportValidationContext(
+ NodeIndex nodeIndex, BlockIndex blockIndex)
+ {
+ dataLog("@%u in Block #%u", nodeIndex, blockIndex);
+ }
+
+ void reportValidationContext(
+ NodeIndex nodeIndex, NodeIndex nodeIndex2, BlockIndex blockIndex)
+ {
+ dataLog("@%u and @%u in Block #%u", nodeIndex, nodeIndex2, blockIndex);
+ }
+
+ void reportValidationContext(
+ NodeIndex nodeIndex, BlockIndex blockIndex, NodeIndex expectedNodeIndex, Edge incomingEdge)
+ {
+ dataLog("@%u in Block #%u, searching for @%u from @%u", nodeIndex, blockIndex, expectedNodeIndex, incomingEdge.index());
+ }
+
+ void dumpData(unsigned value)
+ {
+ dataLog("%u", value);
+ }
+
+ void dumpGraphIfAppropriate()
+ {
+ if (m_graphDumpMode == DontDumpGraph)
+ return;
+ dataLog("Graph at time of failure:\n");
+ m_graph.dump();
+ }
+};
+
+void validate(Graph& graph, GraphDumpMode graphDumpMode)
+{
+ Validate validationObject(graph, graphDumpMode);
+ validationObject.validate();
+}
+
+#endif // DFG_ENABLE(VALIDATION)
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
+
diff --git a/Source/JavaScriptCore/dfg/DFGValidate.h b/Source/JavaScriptCore/dfg/DFGValidate.h
new file mode 100644
index 000000000..353c3b696
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGValidate.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGValidate_h
+#define DFGValidate_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(DFG_JIT)
+
+#include "DFGCommon.h"
+#include "DFGGraph.h"
+
+namespace JSC { namespace DFG {
+
+enum GraphDumpMode { DontDumpGraph, DumpGraph };
+
+#if DFG_ENABLE(VALIDATION)
+void validate(Graph&, GraphDumpMode = DumpGraph);
+#else
+inline void validate(Graph&, GraphDumpMode = DumpGraph) { }
+#endif
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
+
+#endif // DFGPhase_h
+
diff --git a/Source/JavaScriptCore/dfg/DFGVariableAccessData.h b/Source/JavaScriptCore/dfg/DFGVariableAccessData.h
index 1d99ed516..3dfd94d01 100644
--- a/Source/JavaScriptCore/dfg/DFGVariableAccessData.h
+++ b/Source/JavaScriptCore/dfg/DFGVariableAccessData.h
@@ -47,16 +47,20 @@ public:
, m_argumentAwarePrediction(PredictNone)
, m_flags(0)
, m_doubleFormatState(EmptyDoubleFormatState)
+ , m_isCaptured(false)
+ , m_isArgumentsAlias(false)
{
clearVotes();
}
- VariableAccessData(VirtualRegister local)
+ VariableAccessData(VirtualRegister local, bool isCaptured)
: m_local(local)
, m_prediction(PredictNone)
, m_argumentAwarePrediction(PredictNone)
, m_flags(0)
, m_doubleFormatState(EmptyDoubleFormatState)
+ , m_isCaptured(isCaptured)
+ , m_isArgumentsAlias(false)
{
clearVotes();
}
@@ -72,6 +76,34 @@ public:
return static_cast<int>(local());
}
+ bool mergeIsCaptured(bool isCaptured)
+ {
+ bool newIsCaptured = m_isCaptured | isCaptured;
+ if (newIsCaptured == m_isCaptured)
+ return false;
+ m_isCaptured = newIsCaptured;
+ return true;
+ }
+
+ bool isCaptured()
+ {
+ return m_isCaptured;
+ }
+
+ bool mergeIsArgumentsAlias(bool isArgumentsAlias)
+ {
+ bool newIsArgumentsAlias = m_isArgumentsAlias | isArgumentsAlias;
+ if (newIsArgumentsAlias == m_isArgumentsAlias)
+ return false;
+ m_isArgumentsAlias = newIsArgumentsAlias;
+ return true;
+ }
+
+ bool isArgumentsAlias()
+ {
+ return m_isArgumentsAlias;
+ }
+
bool predict(PredictedType prediction)
{
VariableAccessData* self = find();
@@ -220,6 +252,9 @@ private:
float m_votes[2];
DoubleFormatState m_doubleFormatState;
+
+ bool m_isCaptured;
+ bool m_isArgumentsAlias;
};
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGVirtualRegisterAllocationPhase.cpp b/Source/JavaScriptCore/dfg/DFGVirtualRegisterAllocationPhase.cpp
index 11ac69524..2d7ce33c9 100644
--- a/Source/JavaScriptCore/dfg/DFGVirtualRegisterAllocationPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGVirtualRegisterAllocationPhase.cpp
@@ -40,7 +40,7 @@ public:
{
}
- void run()
+ bool run()
{
#if DFG_ENABLE(DEBUG_VERBOSE)
dataLog("Preserved vars: ");
@@ -54,6 +54,10 @@ public:
#endif
for (size_t blockIndex = 0; blockIndex < m_graph.m_blocks.size(); ++blockIndex) {
BasicBlock* block = m_graph.m_blocks[blockIndex].get();
+ if (!block)
+ continue;
+ if (!block->isReachable)
+ continue;
for (size_t indexInBlock = 0; indexInBlock < block->size(); ++indexInBlock) {
NodeIndex nodeIndex = block->at(indexInBlock);
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
@@ -66,22 +70,21 @@ public:
if (!node.shouldGenerate() || node.op() == Phi || node.op() == Flush)
continue;
-
- // GetLocal nodes are effectively phi nodes in the graph, referencing
- // results from prior blocks.
- if (node.op() != GetLocal) {
- // First, call use on all of the current node's children, then
- // allocate a VirtualRegister for this node. We do so in this
- // order so that if a child is on its last use, and a
- // VirtualRegister is freed, then it may be reused for node.
- if (node.flags() & NodeHasVarArgs) {
- for (unsigned childIdx = node.firstChild(); childIdx < node.firstChild() + node.numChildren(); childIdx++)
- scoreBoard.use(m_graph.m_varArgChildren[childIdx]);
- } else {
- scoreBoard.use(node.child1());
- scoreBoard.use(node.child2());
- scoreBoard.use(node.child3());
- }
+
+ if (node.op() == GetLocal)
+ ASSERT(!m_graph[node.child1()].hasResult());
+
+ // First, call use on all of the current node's children, then
+ // allocate a VirtualRegister for this node. We do so in this
+ // order so that if a child is on its last use, and a
+ // VirtualRegister is freed, then it may be reused for node.
+ if (node.flags() & NodeHasVarArgs) {
+ for (unsigned childIdx = node.firstChild(); childIdx < node.firstChild() + node.numChildren(); childIdx++)
+ scoreBoard.useIfHasResult(m_graph.m_varArgChildren[childIdx]);
+ } else {
+ scoreBoard.useIfHasResult(node.child1());
+ scoreBoard.useIfHasResult(node.child2());
+ scoreBoard.useIfHasResult(node.child3());
}
if (!node.hasResult())
@@ -122,12 +125,14 @@ public:
#if DFG_ENABLE(DEBUG_VERBOSE)
dataLog("Num callee registers: %u\n", calleeRegisters);
#endif
+
+ return true;
}
};
-void performVirtualRegisterAllocation(Graph& graph)
+bool performVirtualRegisterAllocation(Graph& graph)
{
- runPhase<VirtualRegisterAllocationPhase>(graph);
+ return runPhase<VirtualRegisterAllocationPhase>(graph);
}
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGVirtualRegisterAllocationPhase.h b/Source/JavaScriptCore/dfg/DFGVirtualRegisterAllocationPhase.h
index abfa6ae64..5878ed13f 100644
--- a/Source/JavaScriptCore/dfg/DFGVirtualRegisterAllocationPhase.h
+++ b/Source/JavaScriptCore/dfg/DFGVirtualRegisterAllocationPhase.h
@@ -42,7 +42,7 @@ class Graph;
// for look-up tables for the linear scan register allocator that the backend
// uses.
-void performVirtualRegisterAllocation(Graph&);
+bool performVirtualRegisterAllocation(Graph&);
} } // namespace JSC::DFG