summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
diff options
context:
space:
mode:
authorSimon Hausmann <simon.hausmann@nokia.com>2012-02-24 16:36:50 +0100
committerSimon Hausmann <simon.hausmann@nokia.com>2012-02-24 16:36:50 +0100
commitad0d549d4cc13433f77c1ac8f0ab379c83d93f28 (patch)
treeb34b0daceb7c8e7fdde4b4ec43650ab7caadb0a9 /Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
parent03e12282df9aa1e1fb05a8b90f1cfc2e08764cec (diff)
downloadqtwebkit-ad0d549d4cc13433f77c1ac8f0ab379c83d93f28.tar.gz
Imported WebKit commit bb52bf3c0119e8a128cd93afe5572413a8617de9 (http://svn.webkit.org/repository/webkit/trunk@108790)
Diffstat (limited to 'Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp')
-rw-r--r--Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp198
1 files changed, 117 insertions, 81 deletions
diff --git a/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp b/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
index 2a5d249b3..0e575db4e 100644
--- a/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
+++ b/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -45,10 +45,10 @@ namespace JSC { namespace DFG {
// This class is used to compile the dataflow graph from a CodeBlock.
class ByteCodeParser {
public:
- ByteCodeParser(JSGlobalData* globalData, CodeBlock* codeBlock, CodeBlock* profiledBlock, Graph& graph)
- : m_globalData(globalData)
- , m_codeBlock(codeBlock)
- , m_profiledBlock(profiledBlock)
+ ByteCodeParser(Graph& graph)
+ : m_globalData(&graph.m_globalData)
+ , m_codeBlock(graph.m_codeBlock)
+ , m_profiledBlock(graph.m_profiledBlock)
, m_graph(graph)
, m_currentBlock(0)
, m_currentIndex(0)
@@ -57,10 +57,10 @@ public:
, m_constantNull(UINT_MAX)
, m_constantNaN(UINT_MAX)
, m_constant1(UINT_MAX)
- , m_constants(codeBlock->numberOfConstantRegisters())
- , m_numArguments(codeBlock->numParameters())
- , m_numLocals(codeBlock->m_numCalleeRegisters)
- , m_preservedVars(codeBlock->m_numVars)
+ , m_constants(m_codeBlock->numberOfConstantRegisters())
+ , m_numArguments(m_codeBlock->numParameters())
+ , m_numLocals(m_codeBlock->m_numCalleeRegisters)
+ , m_preservedVars(m_codeBlock->m_numVars)
, m_parameterSlots(0)
, m_numPassedVarArgs(0)
, m_globalResolveNumber(0)
@@ -69,7 +69,7 @@ public:
{
ASSERT(m_profiledBlock);
- for (int i = 0; i < codeBlock->m_numVars; ++i)
+ for (int i = 0; i < m_codeBlock->m_numVars; ++i)
m_preservedVars.set(i);
}
@@ -108,6 +108,8 @@ private:
};
template<PhiStackType stackType>
void processPhiStack();
+
+ void fixVariableAccessPredictions();
// Add spill locations to nodes.
void allocateVirtualRegisters();
@@ -155,6 +157,22 @@ private:
{
setDirect(m_inlineStackTop->remapOperand(operand), value);
}
+
+ NodeIndex injectLazyOperandPrediction(NodeIndex nodeIndex)
+ {
+ Node& node = m_graph[nodeIndex];
+ ASSERT(node.op == GetLocal);
+ ASSERT(node.codeOrigin.bytecodeIndex == m_currentIndex);
+ PredictedType prediction =
+ m_inlineStackTop->m_lazyOperands.prediction(
+ LazyOperandValueProfileKey(m_currentIndex, node.local()));
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ dataLog("Lazy operand [@%u, bc#%u, r%d] prediction: %s\n",
+ nodeIndex, m_currentIndex, node.local(), predictionToString(prediction));
+#endif
+ node.variableAccessData()->predict(prediction);
+ return nodeIndex;
+ }
// Used in implementing get/set, above, where the operand is a local variable.
NodeIndex getLocal(unsigned operand)
@@ -170,12 +188,13 @@ private:
Node& flushChild = m_graph[nodePtr->child1()];
if (flushChild.op == Phi) {
VariableAccessData* variableAccessData = flushChild.variableAccessData();
- nodeIndex = addToGraph(GetLocal, OpInfo(variableAccessData), nodePtr->child1().index());
+ nodeIndex = injectLazyOperandPrediction(addToGraph(GetLocal, OpInfo(variableAccessData), nodePtr->child1().index()));
m_currentBlock->variablesAtTail.local(operand) = nodeIndex;
return nodeIndex;
}
nodePtr = &flushChild;
}
+ ASSERT(nodePtr->op != Flush);
if (nodePtr->op == GetLocal)
return nodeIndex;
ASSERT(nodePtr->op == SetLocal);
@@ -190,7 +209,7 @@ private:
NodeIndex phi = addToGraph(Phi, OpInfo(variableAccessData));
m_localPhiStack.append(PhiStackEntry(m_currentBlock, phi, operand));
- nodeIndex = addToGraph(GetLocal, OpInfo(variableAccessData), phi);
+ nodeIndex = injectLazyOperandPrediction(addToGraph(GetLocal, OpInfo(variableAccessData), phi));
m_currentBlock->variablesAtTail.local(operand) = nodeIndex;
m_currentBlock->variablesAtHead.setLocalFirstTime(operand, nodeIndex);
@@ -219,17 +238,20 @@ private:
Node& flushChild = m_graph[nodePtr->child1()];
if (flushChild.op == Phi) {
VariableAccessData* variableAccessData = flushChild.variableAccessData();
- nodeIndex = addToGraph(GetLocal, OpInfo(variableAccessData), nodePtr->child1().index());
+ nodeIndex = injectLazyOperandPrediction(addToGraph(GetLocal, OpInfo(variableAccessData), nodePtr->child1().index()));
m_currentBlock->variablesAtTail.local(operand) = nodeIndex;
return nodeIndex;
}
nodePtr = &flushChild;
}
+
+ ASSERT(nodePtr->op != Flush);
+
if (nodePtr->op == SetArgument) {
// We're getting an argument in the first basic block; link
// the GetLocal to the SetArgument.
ASSERT(nodePtr->local() == static_cast<VirtualRegister>(operand));
- nodeIndex = addToGraph(GetLocal, OpInfo(nodePtr->variableAccessData()), nodeIndex);
+ nodeIndex = injectLazyOperandPrediction(addToGraph(GetLocal, OpInfo(nodePtr->variableAccessData()), nodeIndex));
m_currentBlock->variablesAtTail.argument(argument) = nodeIndex;
return nodeIndex;
}
@@ -245,7 +267,7 @@ private:
NodeIndex phi = addToGraph(Phi, OpInfo(variableAccessData));
m_argumentPhiStack.append(PhiStackEntry(m_currentBlock, phi, argument));
- nodeIndex = addToGraph(GetLocal, OpInfo(variableAccessData), phi);
+ nodeIndex = injectLazyOperandPrediction(addToGraph(GetLocal, OpInfo(variableAccessData), phi));
m_currentBlock->variablesAtTail.argument(argument) = nodeIndex;
m_currentBlock->variablesAtHead.setArgumentFirstTime(argument, nodeIndex);
@@ -282,13 +304,10 @@ private:
if (nodeIndex != NoNode) {
Node& node = m_graph[nodeIndex];
- if (node.op == Flush || node.op == SetArgument) {
- // If a local has already been flushed, or if it's an argument in the
- // first basic block, then there is really no need to flush it. In fact
- // emitting a Flush instruction could just confuse things, since the
- // getArgument() code assumes that we never see a Flush of a SetArgument.
- return;
- }
+ if (node.op == Flush)
+ nodeIndex = node.child1().index();
+
+ ASSERT(m_graph[nodeIndex].op != Flush);
addToGraph(Flush, OpInfo(node.variableAccessData()), nodeIndex);
return;
@@ -585,11 +604,9 @@ private:
{
UNUSED_PARAM(nodeIndex);
- ValueProfile* profile = m_inlineStackTop->m_profiledBlock->valueProfileForBytecodeOffset(bytecodeIndex);
- ASSERT(profile);
- PredictedType prediction = profile->computeUpdatedPrediction();
+ PredictedType prediction = m_inlineStackTop->m_profiledBlock->valueProfilePredictionForBytecodeOffset(bytecodeIndex);
#if DFG_ENABLE(DEBUG_VERBOSE)
- printf("Dynamic [@%u, bc#%u] prediction: %s\n", nodeIndex, bytecodeIndex, predictionToString(prediction));
+ dataLog("Dynamic [@%u, bc#%u] prediction: %s\n", nodeIndex, bytecodeIndex, predictionToString(prediction));
#endif
return prediction;
@@ -626,7 +643,7 @@ private:
return nodeIndex;
#if DFG_ENABLE(DEBUG_VERBOSE)
- printf("Making %s @%u safe at bc#%u because slow-case counter is at %u and exit profiles say %d, %d\n", Graph::opName(m_graph[nodeIndex].op), nodeIndex, m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow), m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero));
+ dataLog("Making %s @%u safe at bc#%u because slow-case counter is at %u and exit profiles say %d, %d\n", Graph::opName(m_graph[nodeIndex].op), nodeIndex, m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow), m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero));
#endif
switch (m_graph[nodeIndex].op) {
@@ -642,13 +659,13 @@ private:
if (m_inlineStackTop->m_profiledBlock->likelyToTakeDeepestSlowCase(m_currentIndex)
|| m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)) {
#if DFG_ENABLE(DEBUG_VERBOSE)
- printf("Making ArithMul @%u take deepest slow case.\n", nodeIndex);
+ dataLog("Making ArithMul @%u take deepest slow case.\n", nodeIndex);
#endif
m_graph[nodeIndex].mergeArithNodeFlags(NodeMayOverflow | NodeMayNegZero);
} else if (m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
|| m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero)) {
#if DFG_ENABLE(DEBUG_VERBOSE)
- printf("Making ArithMul @%u take faster slow case.\n", nodeIndex);
+ dataLog("Making ArithMul @%u take faster slow case.\n", nodeIndex);
#endif
m_graph[nodeIndex].mergeArithNodeFlags(NodeMayNegZero);
}
@@ -678,7 +695,7 @@ private:
return nodeIndex;
#if DFG_ENABLE(DEBUG_VERBOSE)
- printf("Making %s @%u safe at bc#%u because special fast-case counter is at %u and exit profiles say %d, %d\n", Graph::opName(m_graph[nodeIndex].op), nodeIndex, m_currentIndex, m_inlineStackTop->m_profiledBlock->specialFastCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow), m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero));
+ dataLog("Making %s @%u safe at bc#%u because special fast-case counter is at %u and exit profiles say %d, %d\n", Graph::opName(m_graph[nodeIndex].op), nodeIndex, m_currentIndex, m_inlineStackTop->m_profiledBlock->specialFastCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow), m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero));
#endif
// FIXME: It might be possible to make this more granular. The DFG certainly can
@@ -850,6 +867,11 @@ private:
VirtualRegister m_returnValue;
+ // Predictions about variable types collected from the profiled code block,
+ // which are based on OSR exit profiles that past DFG compilatins of this
+ // code block had gathered.
+ LazyOperandValueProfileParser m_lazyOperands;
+
// Did we see any returns? We need to handle the (uncommon but necessary)
// case where a procedure that does not return was inlined.
bool m_didReturn;
@@ -912,13 +934,13 @@ void ByteCodeParser::handleCall(Interpreter* interpreter, Instruction* currentIn
enum { ConstantFunction, LinkedFunction, UnknownFunction } callType;
#if DFG_ENABLE(DEBUG_VERBOSE)
- printf("Slow case count for call at @%zu bc#%u: %u/%u; exit profile: %d.\n", m_graph.size(), m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_profiledBlock->executionEntryCount(), m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
+ dataLog("Slow case count for call at @%zu bc#%u: %u/%u; exit profile: %d.\n", m_graph.size(), m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_profiledBlock->executionEntryCount(), m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
#endif
CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
m_inlineStackTop->m_profiledBlock, m_currentIndex);
- if (m_graph.isFunctionConstant(m_codeBlock, callTarget))
+ if (m_graph.isFunctionConstant(callTarget))
callType = ConstantFunction;
else if (callLinkStatus.isSet() && !callLinkStatus.couldTakeSlowPath()
&& !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache))
@@ -946,7 +968,7 @@ void ByteCodeParser::handleCall(Interpreter* interpreter, Instruction* currentIn
Intrinsic intrinsic;
bool certainAboutExpectedFunction;
if (callType == ConstantFunction) {
- expectedFunction = m_graph.valueOfFunctionConstant(m_codeBlock, callTarget);
+ expectedFunction = m_graph.valueOfFunctionConstant(callTarget);
intrinsic = expectedFunction->executable()->intrinsicFor(kind);
certainAboutExpectedFunction = true;
} else {
@@ -1022,6 +1044,9 @@ bool ByteCodeParser::handleInlining(bool usesResult, int callTarget, NodeIndex c
// If we get here then it looks like we should definitely inline this code. Proceed
// with parsing the code to get bytecode, so that we can then parse the bytecode.
+ // Note that if LLInt is enabled, the bytecode will always be available. Also note
+ // that if LLInt is enabled, we may inline a code block that has never been JITted
+ // before!
CodeBlock* codeBlock = m_codeBlockCache.get(CodeBlockKey(executable, kind), expectedFunction->scope());
if (!codeBlock)
return false;
@@ -1029,7 +1054,7 @@ bool ByteCodeParser::handleInlining(bool usesResult, int callTarget, NodeIndex c
ASSERT(canInlineFunctionFor(codeBlock, kind));
#if DFG_ENABLE(DEBUG_VERBOSE)
- printf("Inlining executable %p.\n", executable);
+ dataLog("Inlining executable %p.\n", executable);
#endif
// Now we know without a doubt that we are committed to inlining. So begin the process
@@ -1110,7 +1135,7 @@ bool ByteCodeParser::handleInlining(bool usesResult, int callTarget, NodeIndex c
// caller. It doesn't need to be linked to, but it needs outgoing links.
if (!inlineStackEntry.m_unlinkedBlocks.isEmpty()) {
#if DFG_ENABLE(DEBUG_VERBOSE)
- printf("Reascribing bytecode index of block %p from bc#%u to bc#%u (inline return case).\n", lastBlock, lastBlock->bytecodeBegin, m_currentIndex);
+ dataLog("Reascribing bytecode index of block %p from bc#%u to bc#%u (inline return case).\n", lastBlock, lastBlock->bytecodeBegin, m_currentIndex);
#endif
// For debugging purposes, set the bytecodeBegin. Note that this doesn't matter
// for release builds because this block will never serve as a potential target
@@ -1122,7 +1147,7 @@ bool ByteCodeParser::handleInlining(bool usesResult, int callTarget, NodeIndex c
m_currentBlock = m_graph.m_blocks.last().get();
#if DFG_ENABLE(DEBUG_VERBOSE)
- printf("Done inlining executable %p, continuing code generation at epilogue.\n", executable);
+ dataLog("Done inlining executable %p, continuing code generation at epilogue.\n", executable);
#endif
return true;
}
@@ -1149,7 +1174,7 @@ bool ByteCodeParser::handleInlining(bool usesResult, int callTarget, NodeIndex c
// Need to create a new basic block for the continuation at the caller.
OwnPtr<BasicBlock> block = adoptPtr(new BasicBlock(nextOffset, m_graph.size(), m_numArguments, m_numLocals));
#if DFG_ENABLE(DEBUG_VERBOSE)
- printf("Creating inline epilogue basic block %p, #%zu for %p bc#%u at inline depth %u.\n", block.get(), m_graph.m_blocks.size(), m_inlineStackTop->executable(), m_currentIndex, CodeOrigin::inlineDepthForCallFrame(m_inlineStackTop->m_inlineCallFrame));
+ dataLog("Creating inline epilogue basic block %p, #%zu for %p bc#%u at inline depth %u.\n", block.get(), m_graph.m_blocks.size(), m_inlineStackTop->executable(), m_currentIndex, CodeOrigin::inlineDepthForCallFrame(m_inlineStackTop->m_inlineCallFrame));
#endif
m_currentBlock = block.get();
ASSERT(m_inlineStackTop->m_caller->m_blockLinkingTargets.isEmpty() || m_graph.m_blocks[m_inlineStackTop->m_caller->m_blockLinkingTargets.last()]->bytecodeBegin < nextOffset);
@@ -1161,7 +1186,7 @@ bool ByteCodeParser::handleInlining(bool usesResult, int callTarget, NodeIndex c
// At this point we return and continue to generate code for the caller, but
// in the new basic block.
#if DFG_ENABLE(DEBUG_VERBOSE)
- printf("Done inlining executable %p, continuing code generation in new block.\n", executable);
+ dataLog("Done inlining executable %p, continuing code generation in new block.\n", executable);
#endif
return true;
}
@@ -1343,7 +1368,7 @@ bool ByteCodeParser::parseBlock(unsigned limit)
addToGraph(Jump, OpInfo(m_currentIndex));
else {
#if DFG_ENABLE(DEBUG_VERBOSE)
- printf("Refusing to plant jump at limit %u because block %p is empty.\n", limit, m_currentBlock);
+ dataLog("Refusing to plant jump at limit %u because block %p is empty.\n", limit, m_currentBlock);
#endif
}
return shouldContinueParsing;
@@ -1722,7 +1747,7 @@ bool ByteCodeParser::parseBlock(unsigned limit)
m_inlineStackTop->m_profiledBlock, m_currentIndex);
if (methodCallStatus.isSet()
- && !getByIdStatus.isSet()
+ && !getByIdStatus.wasSeenInJIT()
&& !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)) {
// It's monomorphic as far as we can tell, since the method_check was linked
// but the slow path (i.e. the normal get_by_id) never fired.
@@ -1767,7 +1792,7 @@ bool ByteCodeParser::parseBlock(unsigned limit)
m_inlineStackTop->m_profiledBlock, m_currentIndex, identifier);
#if DFG_ENABLE(DEBUG_VERBOSE)
- printf("Slow case count for GetById @%zu bc#%u: %u; exit profile: %d\n", m_graph.size(), m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
+ dataLog("Slow case count for GetById @%zu bc#%u: %u; exit profile: %d\n", m_graph.size(), m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
#endif
if (getByIdStatus.isSimpleDirect()
@@ -1791,7 +1816,9 @@ bool ByteCodeParser::parseBlock(unsigned limit)
NEXT_OPCODE(op_get_by_id);
}
- case op_put_by_id: {
+ case op_put_by_id:
+ case op_put_by_id_transition_direct:
+ case op_put_by_id_transition_normal: {
NodeIndex value = get(currentInstruction[3].u.operand);
NodeIndex base = get(currentInstruction[1].u.operand);
unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
@@ -1807,7 +1834,7 @@ bool ByteCodeParser::parseBlock(unsigned limit)
bool hasExitSite = m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache);
#if DFG_ENABLE(DEBUG_VERBOSE)
- printf("Slow case count for PutById @%zu bc#%u: %u; exit profile: %d\n", m_graph.size(), m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
+ dataLog("Slow case count for PutById @%zu bc#%u: %u; exit profile: %d\n", m_graph.size(), m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
#endif
if (!hasExitSite && putByIdStatus.isSimpleReplace()) {
@@ -2186,12 +2213,12 @@ void ByteCodeParser::processPhiStack()
VariableAccessData* dataForPhi = m_graph[entry.m_phi].variableAccessData();
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" Handling phi entry for var %u, phi @%u.\n", entry.m_varNo, entry.m_phi);
+ dataLog(" Handling phi entry for var %u, phi @%u.\n", entry.m_varNo, entry.m_phi);
#endif
for (size_t i = 0; i < predecessors.size(); ++i) {
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" Dealing with predecessor block %u.\n", predecessors[i]);
+ dataLog(" Dealing with predecessor block %u.\n", predecessors[i]);
#endif
BasicBlock* predecessorBlock = m_graph.m_blocks[predecessors[i]].get();
@@ -2201,7 +2228,7 @@ void ByteCodeParser::processPhiStack()
NodeIndex valueInPredecessor = var;
if (valueInPredecessor == NoNode) {
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" Did not find node, adding phi.\n");
+ dataLog(" Did not find node, adding phi.\n");
#endif
valueInPredecessor = addToGraph(Phi, OpInfo(newVariableAccessData(stackType == ArgumentPhiStack ? argumentToOperand(varNo) : static_cast<int>(varNo))));
@@ -2213,7 +2240,7 @@ void ByteCodeParser::processPhiStack()
phiStack.append(PhiStackEntry(predecessorBlock, valueInPredecessor, varNo));
} else if (m_graph[valueInPredecessor].op == GetLocal) {
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" Found GetLocal @%u.\n", valueInPredecessor);
+ dataLog(" Found GetLocal @%u.\n", valueInPredecessor);
#endif
// We want to ensure that the VariableAccessDatas are identical between the
@@ -2225,7 +2252,7 @@ void ByteCodeParser::processPhiStack()
valueInPredecessor = m_graph[valueInPredecessor].child1().index();
} else {
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" Found @%u.\n", valueInPredecessor);
+ dataLog(" Found @%u.\n", valueInPredecessor);
#endif
}
ASSERT(m_graph[valueInPredecessor].op == SetLocal || m_graph[valueInPredecessor].op == Phi || m_graph[valueInPredecessor].op == Flush || (m_graph[valueInPredecessor].op == SetArgument && stackType == ArgumentPhiStack));
@@ -2236,48 +2263,48 @@ void ByteCodeParser::processPhiStack()
Node* phiNode = &m_graph[entry.m_phi];
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" Ref count of @%u = %u.\n", entry.m_phi, phiNode->refCount());
+ dataLog(" Ref count of @%u = %u.\n", entry.m_phi, phiNode->refCount());
#endif
if (phiNode->refCount()) {
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" Reffing @%u.\n", valueInPredecessor);
+ dataLog(" Reffing @%u.\n", valueInPredecessor);
#endif
m_graph.ref(valueInPredecessor);
}
if (!phiNode->child1()) {
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" Setting @%u->child1 = @%u.\n", entry.m_phi, valueInPredecessor);
+ dataLog(" Setting @%u->child1 = @%u.\n", entry.m_phi, valueInPredecessor);
#endif
phiNode->children.setChild1(NodeUse(valueInPredecessor));
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" Children of @%u: ", entry.m_phi);
- phiNode->dumpChildren(stdout);
- printf(".\n");
+ dataLog(" Children of @%u: ", entry.m_phi);
+ phiNode->dumpChildren(WTF::dataFile());
+ dataLog(".\n");
#endif
continue;
}
if (!phiNode->child2()) {
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" Setting @%u->child2 = @%u.\n", entry.m_phi, valueInPredecessor);
+ dataLog(" Setting @%u->child2 = @%u.\n", entry.m_phi, valueInPredecessor);
#endif
phiNode->children.setChild2(NodeUse(valueInPredecessor));
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" Children of @%u: ", entry.m_phi);
- phiNode->dumpChildren(stdout);
- printf(".\n");
+ dataLog(" Children of @%u: ", entry.m_phi);
+ phiNode->dumpChildren(WTF::dataFile());
+ dataLog(".\n");
#endif
continue;
}
if (!phiNode->child3()) {
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" Setting @%u->child3 = @%u.\n", entry.m_phi, valueInPredecessor);
+ dataLog(" Setting @%u->child3 = @%u.\n", entry.m_phi, valueInPredecessor);
#endif
phiNode->children.setChild3(NodeUse(valueInPredecessor));
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" Children of @%u: ", entry.m_phi);
- phiNode->dumpChildren(stdout);
- printf(".\n");
+ dataLog(" Children of @%u: ", entry.m_phi);
+ phiNode->dumpChildren(WTF::dataFile());
+ dataLog(".\n");
#endif
continue;
}
@@ -2285,7 +2312,7 @@ void ByteCodeParser::processPhiStack()
NodeIndex newPhi = addToGraph(Phi, OpInfo(dataForPhi));
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" Splitting @%u, created @%u.\n", entry.m_phi, newPhi);
+ dataLog(" Splitting @%u, created @%u.\n", entry.m_phi, newPhi);
#endif
phiNode = &m_graph[entry.m_phi]; // reload after vector resize
@@ -2296,22 +2323,30 @@ void ByteCodeParser::processPhiStack()
newPhiNode.children = phiNode->children;
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" Children of @%u: ", newPhi);
- newPhiNode.dumpChildren(stdout);
- printf(".\n");
+ dataLog(" Children of @%u: ", newPhi);
+ newPhiNode.dumpChildren(WTF::dataFile());
+ dataLog(".\n");
#endif
phiNode->children.initialize(newPhi, valueInPredecessor, NoNode);
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" Children of @%u: ", entry.m_phi);
- phiNode->dumpChildren(stdout);
- printf(".\n");
+ dataLog(" Children of @%u: ", entry.m_phi);
+ phiNode->dumpChildren(WTF::dataFile());
+ dataLog(".\n");
#endif
}
}
}
+void ByteCodeParser::fixVariableAccessPredictions()
+{
+ for (unsigned i = 0; i < m_graph.m_variableAccessData.size(); ++i) {
+ VariableAccessData* data = &m_graph.m_variableAccessData[i];
+ data->find()->predict(data->nonUnifiedPrediction());
+ }
+}
+
void ByteCodeParser::linkBlock(BasicBlock* block, Vector<BlockIndex>& possibleTargets)
{
ASSERT(block->end != NoNode);
@@ -2324,7 +2359,7 @@ void ByteCodeParser::linkBlock(BasicBlock* block, Vector<BlockIndex>& possibleTa
case Jump:
node.setTakenBlockIndex(m_graph.blockIndexForBytecodeOffset(possibleTargets, node.takenBytecodeOffsetDuringParsing()));
#if DFG_ENABLE(DEBUG_VERBOSE)
- printf("Linked basic block %p to %p, #%u.\n", block, m_graph.m_blocks[node.takenBlockIndex()].get(), node.takenBlockIndex());
+ dataLog("Linked basic block %p to %p, #%u.\n", block, m_graph.m_blocks[node.takenBlockIndex()].get(), node.takenBlockIndex());
#endif
break;
@@ -2332,13 +2367,13 @@ void ByteCodeParser::linkBlock(BasicBlock* block, Vector<BlockIndex>& possibleTa
node.setTakenBlockIndex(m_graph.blockIndexForBytecodeOffset(possibleTargets, node.takenBytecodeOffsetDuringParsing()));
node.setNotTakenBlockIndex(m_graph.blockIndexForBytecodeOffset(possibleTargets, node.notTakenBytecodeOffsetDuringParsing()));
#if DFG_ENABLE(DEBUG_VERBOSE)
- printf("Linked basic block %p to %p, #%u and %p, #%u.\n", block, m_graph.m_blocks[node.takenBlockIndex()].get(), node.takenBlockIndex(), m_graph.m_blocks[node.notTakenBlockIndex()].get(), node.notTakenBlockIndex());
+ dataLog("Linked basic block %p to %p, #%u and %p, #%u.\n", block, m_graph.m_blocks[node.takenBlockIndex()].get(), node.takenBlockIndex(), m_graph.m_blocks[node.notTakenBlockIndex()].get(), node.notTakenBlockIndex());
#endif
break;
default:
#if DFG_ENABLE(DEBUG_VERBOSE)
- printf("Marking basic block %p as linked.\n", block);
+ dataLog("Marking basic block %p as linked.\n", block);
#endif
break;
}
@@ -2414,6 +2449,7 @@ ByteCodeParser::InlineStackEntry::InlineStackEntry(ByteCodeParser* byteCodeParse
, m_exitProfile(profiledBlock->exitProfile())
, m_callsiteBlockHead(callsiteBlockHead)
, m_returnValue(returnValueVR)
+ , m_lazyOperands(profiledBlock->lazyOperandValueProfiles())
, m_didReturn(false)
, m_didEarlyReturn(false)
, m_caller(byteCodeParser->m_inlineStackTop)
@@ -2495,7 +2531,7 @@ void ByteCodeParser::parseCodeBlock()
// The maximum bytecode offset to go into the current basicblock is either the next jump target, or the end of the instructions.
unsigned limit = jumpTargetIndex < codeBlock->numberOfJumpTargets() ? codeBlock->jumpTarget(jumpTargetIndex) : codeBlock->instructions().size();
#if DFG_ENABLE(DEBUG_VERBOSE)
- printf("Parsing bytecode with limit %p bc#%u at inline depth %u.\n", m_inlineStackTop->executable(), limit, CodeOrigin::inlineDepthForCallFrame(m_inlineStackTop->m_inlineCallFrame));
+ dataLog("Parsing bytecode with limit %p bc#%u at inline depth %u.\n", m_inlineStackTop->executable(), limit, CodeOrigin::inlineDepthForCallFrame(m_inlineStackTop->m_inlineCallFrame));
#endif
ASSERT(m_currentIndex < limit);
@@ -2517,13 +2553,13 @@ void ByteCodeParser::parseCodeBlock()
// Change its bytecode begin and continue.
m_currentBlock = m_graph.m_blocks.last().get();
#if DFG_ENABLE(DEBUG_VERBOSE)
- printf("Reascribing bytecode index of block %p from bc#%u to bc#%u (peephole case).\n", m_currentBlock, m_currentBlock->bytecodeBegin, m_currentIndex);
+ dataLog("Reascribing bytecode index of block %p from bc#%u to bc#%u (peephole case).\n", m_currentBlock, m_currentBlock->bytecodeBegin, m_currentIndex);
#endif
m_currentBlock->bytecodeBegin = m_currentIndex;
} else {
OwnPtr<BasicBlock> block = adoptPtr(new BasicBlock(m_currentIndex, m_graph.size(), m_numArguments, m_numLocals));
#if DFG_ENABLE(DEBUG_VERBOSE)
- printf("Creating basic block %p, #%zu for %p bc#%u at inline depth %u.\n", block.get(), m_graph.m_blocks.size(), m_inlineStackTop->executable(), m_currentIndex, CodeOrigin::inlineDepthForCallFrame(m_inlineStackTop->m_inlineCallFrame));
+ dataLog("Creating basic block %p, #%zu for %p bc#%u at inline depth %u.\n", block.get(), m_graph.m_blocks.size(), m_inlineStackTop->executable(), m_currentIndex, CodeOrigin::inlineDepthForCallFrame(m_inlineStackTop->m_inlineCallFrame));
#endif
m_currentBlock = block.get();
ASSERT(m_inlineStackTop->m_unlinkedBlocks.isEmpty() || m_graph.m_blocks[m_inlineStackTop->m_unlinkedBlocks.last().m_blockIndex]->bytecodeBegin < m_currentIndex);
@@ -2571,17 +2607,19 @@ bool ByteCodeParser::parse()
linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
determineReachability();
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf("Processing local variable phis.\n");
+ dataLog("Processing local variable phis.\n");
#endif
m_currentProfilingIndex = m_currentIndex;
processPhiStack<LocalPhiStack>();
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf("Processing argument phis.\n");
+ dataLog("Processing argument phis.\n");
#endif
processPhiStack<ArgumentPhiStack>();
+ fixVariableAccessPredictions();
+
m_graph.m_preservedVars = m_preservedVars;
m_graph.m_localVars = m_numLocals;
m_graph.m_parameterSlots = m_parameterSlots;
@@ -2589,15 +2627,13 @@ bool ByteCodeParser::parse()
return true;
}
-bool parse(Graph& graph, JSGlobalData* globalData, CodeBlock* codeBlock)
+bool parse(Graph& graph)
{
#if DFG_DEBUG_LOCAL_DISBALE
UNUSED_PARAM(graph);
- UNUSED_PARAM(globalData);
- UNUSED_PARAM(codeBlock);
return false;
#else
- return ByteCodeParser(globalData, codeBlock, codeBlock->alternative(), graph).parse();
+ return ByteCodeParser(graph).parse();
#endif
}