diff options
author | Simon Hausmann <simon.hausmann@nokia.com> | 2012-03-12 14:11:15 +0100 |
---|---|---|
committer | Simon Hausmann <simon.hausmann@nokia.com> | 2012-03-12 14:11:15 +0100 |
commit | dd91e772430dc294e3bf478c119ef8d43c0a3358 (patch) | |
tree | 6f33ce4d5872a5691e0291eb45bf6ab373a5f567 /Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp | |
parent | ad0d549d4cc13433f77c1ac8f0ab379c83d93f28 (diff) | |
download | qtwebkit-dd91e772430dc294e3bf478c119ef8d43c0a3358.tar.gz |
Imported WebKit commit 3db4eb1820ac8fb03065d7ea73a4d9db1e8fea1a (http://svn.webkit.org/repository/webkit/trunk@110422)
This includes build fixes for the latest qtbase/qtdeclarative as well as the final QML2 API.
Diffstat (limited to 'Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp')
-rw-r--r-- | Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp | 230 |
1 files changed, 181 insertions, 49 deletions
diff --git a/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp b/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp index 0e575db4e..3a3678d12 100644 --- a/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp +++ b/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp @@ -66,6 +66,7 @@ public: , m_globalResolveNumber(0) , m_inlineStackTop(0) , m_haveBuiltOperandMaps(false) + , m_emptyJSValueIndex(UINT_MAX) { ASSERT(m_profiledBlock); @@ -185,16 +186,32 @@ private: // Two possibilities: either the block wants the local to be live // but has not loaded its value, or it has loaded its value, in // which case we're done. - Node& flushChild = m_graph[nodePtr->child1()]; + nodeIndex = nodePtr->child1().index(); + Node& flushChild = m_graph[nodeIndex]; if (flushChild.op == Phi) { VariableAccessData* variableAccessData = flushChild.variableAccessData(); - nodeIndex = injectLazyOperandPrediction(addToGraph(GetLocal, OpInfo(variableAccessData), nodePtr->child1().index())); + nodeIndex = injectLazyOperandPrediction(addToGraph(GetLocal, OpInfo(variableAccessData), nodeIndex)); m_currentBlock->variablesAtTail.local(operand) = nodeIndex; return nodeIndex; } nodePtr = &flushChild; } + + ASSERT(&m_graph[nodeIndex] == nodePtr); ASSERT(nodePtr->op != Flush); + + if (m_graph.localIsCaptured(operand)) { + // We wish to use the same variable access data as the previous access, + // but for all other purposes we want to issue a load since for all we + // know, at this stage of compilation, the local has been clobbered. + + // Make sure we link to the Phi node, not to the GetLocal. + if (nodePtr->op == GetLocal) + nodeIndex = nodePtr->child1().index(); + + return injectLazyOperandPrediction(addToGraph(GetLocal, OpInfo(nodePtr->variableAccessData()), nodeIndex)); + } + if (nodePtr->op == GetLocal) return nodeIndex; ASSERT(nodePtr->op == SetLocal); @@ -218,7 +235,11 @@ private: } void setLocal(unsigned operand, NodeIndex value) { - m_currentBlock->variablesAtTail.local(operand) = addToGraph(SetLocal, OpInfo(newVariableAccessData(operand)), value); + VariableAccessData* variableAccessData = newVariableAccessData(operand); + NodeIndex nodeIndex = addToGraph(SetLocal, OpInfo(variableAccessData), value); + m_currentBlock->variablesAtTail.local(operand) = nodeIndex; + if (m_graph.localIsCaptured(operand)) + addToGraph(Flush, OpInfo(variableAccessData), nodeIndex); } // Used in implementing get/set, above, where the operand is an argument. @@ -226,7 +247,7 @@ private: { unsigned argument = operandToArgument(operand); ASSERT(argument < m_numArguments); - + NodeIndex nodeIndex = m_currentBlock->variablesAtTail.argument(argument); if (nodeIndex != NoNode) { @@ -235,16 +256,18 @@ private: // Two possibilities: either the block wants the local to be live // but has not loaded its value, or it has loaded its value, in // which case we're done. - Node& flushChild = m_graph[nodePtr->child1()]; + nodeIndex = nodePtr->child1().index(); + Node& flushChild = m_graph[nodeIndex]; if (flushChild.op == Phi) { VariableAccessData* variableAccessData = flushChild.variableAccessData(); - nodeIndex = injectLazyOperandPrediction(addToGraph(GetLocal, OpInfo(variableAccessData), nodePtr->child1().index())); + nodeIndex = injectLazyOperandPrediction(addToGraph(GetLocal, OpInfo(variableAccessData), nodeIndex)); m_currentBlock->variablesAtTail.local(operand) = nodeIndex; return nodeIndex; } nodePtr = &flushChild; } + ASSERT(&m_graph[nodeIndex] == nodePtr); ASSERT(nodePtr->op != Flush); if (nodePtr->op == SetArgument) { @@ -256,6 +279,12 @@ private: return nodeIndex; } + if (m_graph.argumentIsCaptured(argument)) { + if (nodePtr->op == GetLocal) + nodeIndex = nodePtr->child1().index(); + return injectLazyOperandPrediction(addToGraph(GetLocal, OpInfo(nodePtr->variableAccessData()), nodeIndex)); + } + if (nodePtr->op == GetLocal) return nodeIndex; @@ -278,11 +307,15 @@ private: { unsigned argument = operandToArgument(operand); ASSERT(argument < m_numArguments); - - m_currentBlock->variablesAtTail.argument(argument) = addToGraph(SetLocal, OpInfo(newVariableAccessData(operand)), value); + + VariableAccessData* variableAccessData = newVariableAccessData(operand); + NodeIndex nodeIndex = addToGraph(SetLocal, OpInfo(variableAccessData), value); + m_currentBlock->variablesAtTail.argument(argument) = nodeIndex; + if (m_graph.argumentIsCaptured(argument)) + addToGraph(Flush, OpInfo(variableAccessData), nodeIndex); } - void flush(int operand) + void flushArgument(int operand) { // FIXME: This should check if the same operand had already been flushed to // some other local variable. @@ -308,7 +341,10 @@ private: nodeIndex = node.child1().index(); ASSERT(m_graph[nodeIndex].op != Flush); - + + // Emit a Flush regardless of whether we already flushed it. + // This gives us guidance to see that the variable also needs to be flushed + // for arguments, even if it already had to be flushed for other reasons. addToGraph(Flush, OpInfo(node.variableAccessData()), nodeIndex); return; } @@ -533,8 +569,10 @@ private: { NodeIndex resultIndex = (NodeIndex)m_graph.size(); m_graph.append(Node(op, currentCodeOrigin(), child1, child2, child3)); + ASSERT(op != Phi); + m_currentBlock->append(resultIndex); - if (op & NodeMustGenerate) + if (defaultFlags(op) & NodeMustGenerate) m_graph.ref(resultIndex); return resultIndex; } @@ -542,8 +580,12 @@ private: { NodeIndex resultIndex = (NodeIndex)m_graph.size(); m_graph.append(Node(op, currentCodeOrigin(), info, child1, child2, child3)); + if (op == Phi) + m_currentBlock->phis.append(resultIndex); + else + m_currentBlock->append(resultIndex); - if (op & NodeMustGenerate) + if (defaultFlags(op) & NodeMustGenerate) m_graph.ref(resultIndex); return resultIndex; } @@ -551,8 +593,10 @@ private: { NodeIndex resultIndex = (NodeIndex)m_graph.size(); m_graph.append(Node(op, currentCodeOrigin(), info1, info2, child1, child2, child3)); + ASSERT(op != Phi); + m_currentBlock->append(resultIndex); - if (op & NodeMustGenerate) + if (defaultFlags(op) & NodeMustGenerate) m_graph.ref(resultIndex); return resultIndex; } @@ -561,13 +605,25 @@ private: { NodeIndex resultIndex = (NodeIndex)m_graph.size(); m_graph.append(Node(Node::VarArg, op, currentCodeOrigin(), info1, info2, m_graph.m_varArgChildren.size() - m_numPassedVarArgs, m_numPassedVarArgs)); + ASSERT(op != Phi); + m_currentBlock->append(resultIndex); m_numPassedVarArgs = 0; - if (op & NodeMustGenerate) + if (defaultFlags(op) & NodeMustGenerate) m_graph.ref(resultIndex); return resultIndex; } + + NodeIndex insertPhiNode(OpInfo info, BasicBlock* block) + { + NodeIndex resultIndex = (NodeIndex)m_graph.size(); + m_graph.append(Node(Phi, currentCodeOrigin(), info)); + block->phis.append(resultIndex); + + return resultIndex; + } + void addVarArgChild(NodeIndex child) { m_graph.m_varArgChildren.append(NodeUse(child)); @@ -643,13 +699,14 @@ private: return nodeIndex; #if DFG_ENABLE(DEBUG_VERBOSE) - dataLog("Making %s @%u safe at bc#%u because slow-case counter is at %u and exit profiles say %d, %d\n", Graph::opName(m_graph[nodeIndex].op), nodeIndex, m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow), m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero)); + dataLog("Making %s @%u safe at bc#%u because slow-case counter is at %u and exit profiles say %d, %d\n", Graph::opName(static_cast<NodeType>(m_graph[nodeIndex].op)), nodeIndex, m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow), m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero)); #endif switch (m_graph[nodeIndex].op) { case UInt32ToNumber: case ArithAdd: case ArithSub: + case ArithNegate: case ValueAdd: case ArithMod: // for ArithMode "MayOverflow" means we tried to divide by zero, or we saw double. m_graph[nodeIndex].mergeArithNodeFlags(NodeMayOverflow); @@ -695,7 +752,7 @@ private: return nodeIndex; #if DFG_ENABLE(DEBUG_VERBOSE) - dataLog("Making %s @%u safe at bc#%u because special fast-case counter is at %u and exit profiles say %d, %d\n", Graph::opName(m_graph[nodeIndex].op), nodeIndex, m_currentIndex, m_inlineStackTop->m_profiledBlock->specialFastCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow), m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero)); + dataLog("Making %s @%u safe at bc#%u because special fast-case counter is at %u and exit profiles say %d, %d\n", Graph::opName(static_cast<NodeType>(m_graph[nodeIndex].op)), nodeIndex, m_currentIndex, m_inlineStackTop->m_profiledBlock->specialFastCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow), m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero)); #endif // FIXME: It might be possible to make this more granular. The DFG certainly can @@ -912,6 +969,9 @@ private: IdentifierMap m_identifierMap; // Mapping between values and constant numbers. JSValueMap m_jsValueMap; + // Index of the empty value, or UINT_MAX if there is no mapping. This is a horrible + // work-around for the fact that JSValueMap can't handle "empty" values. + unsigned m_emptyJSValueIndex; // Cache of code blocks that we've generated bytecode for. ByteCodeCache<canInlineFunctionFor> m_codeBlockCache; @@ -1066,7 +1126,7 @@ bool ByteCodeParser::handleInlining(bool usesResult, int callTarget, NodeIndex c // FIXME: Don't flush constants! for (int i = 1; i < argumentCountIncludingThis; ++i) - flush(registerOffset + argumentToOperand(i)); + flushArgument(registerOffset + argumentToOperand(i)); int inlineCallFrameStart = m_inlineStackTop->remapOperand(registerOffset) - RegisterFile::CallFrameHeaderSize; @@ -1129,7 +1189,7 @@ bool ByteCodeParser::handleInlining(bool usesResult, int callTarget, NodeIndex c // the caller to continue in whatever basic block we're in right now. if (!inlineStackEntry.m_didEarlyReturn && inlineStackEntry.m_didReturn) { BasicBlock* lastBlock = m_graph.m_blocks.last().get(); - ASSERT(lastBlock->begin == lastBlock->end || !m_graph.last().isTerminal()); + ASSERT(lastBlock->isEmpty() || !m_graph.last().isTerminal()); // If we created new blocks then the last block needs linking, but in the // caller. It doesn't need to be linked to, but it needs outgoing links. @@ -1161,7 +1221,7 @@ bool ByteCodeParser::handleInlining(bool usesResult, int callTarget, NodeIndex c continue; BasicBlock* block = m_graph.m_blocks[inlineStackEntry.m_unlinkedBlocks[i].m_blockIndex].get(); ASSERT(!block->isLinked); - Node& node = m_graph[block->end - 1]; + Node& node = m_graph[block->last()]; ASSERT(node.op == Jump); ASSERT(node.takenBlockIndex() == NoBlock); node.setTakenBlockIndex(m_graph.m_blocks.size()); @@ -1172,7 +1232,7 @@ bool ByteCodeParser::handleInlining(bool usesResult, int callTarget, NodeIndex c } // Need to create a new basic block for the continuation at the caller. - OwnPtr<BasicBlock> block = adoptPtr(new BasicBlock(nextOffset, m_graph.size(), m_numArguments, m_numLocals)); + OwnPtr<BasicBlock> block = adoptPtr(new BasicBlock(nextOffset, m_numArguments, m_numLocals)); #if DFG_ENABLE(DEBUG_VERBOSE) dataLog("Creating inline epilogue basic block %p, #%zu for %p bc#%u at inline depth %u.\n", block.get(), m_graph.m_blocks.size(), m_inlineStackTop->executable(), m_currentIndex, CodeOrigin::inlineDepthForCallFrame(m_inlineStackTop->m_inlineCallFrame)); #endif @@ -1207,7 +1267,7 @@ bool ByteCodeParser::handleMinMax(bool usesResult, int resultOperand, NodeType o } if (argumentCountIncludingThis == 3) { // Math.min(x, y) - set(resultOperand, addToGraph(op, OpInfo(NodeUseBottom), get(registerOffset + argumentToOperand(1)), get(registerOffset + argumentToOperand(2)))); + set(resultOperand, addToGraph(op, get(registerOffset + argumentToOperand(1)), get(registerOffset + argumentToOperand(2)))); return true; } @@ -1235,7 +1295,7 @@ bool ByteCodeParser::handleIntrinsic(bool usesResult, int resultOperand, Intrins if (!MacroAssembler::supportsFloatingPointAbs()) return false; - NodeIndex nodeIndex = addToGraph(ArithAbs, OpInfo(NodeUseBottom), get(registerOffset + argumentToOperand(1))); + NodeIndex nodeIndex = addToGraph(ArithAbs, get(registerOffset + argumentToOperand(1))); if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)) m_graph[nodeIndex].mergeArithNodeFlags(NodeMayOverflow); set(resultOperand, nodeIndex); @@ -1364,7 +1424,7 @@ bool ByteCodeParser::parseBlock(unsigned limit) // logic relies on every bytecode resulting in one or more nodes, which would // be true anyway except for op_loop_hint, which emits a Phantom to force this // to be true. - if (m_currentBlock->begin != m_graph.size()) + if (!m_currentBlock->isEmpty()) addToGraph(Jump, OpInfo(m_currentIndex)); else { #if DFG_ENABLE(DEBUG_VERBOSE) @@ -1502,11 +1562,11 @@ bool ByteCodeParser::parseBlock(unsigned limit) if (valueOfInt32Constant(op2) & 0x1f) result = addToGraph(BitURShift, op1, op2); else - result = makeSafe(addToGraph(UInt32ToNumber, OpInfo(NodeUseBottom), op1)); + result = makeSafe(addToGraph(UInt32ToNumber, op1)); } else { // Cannot optimize at this stage; shift & potentially rebox as a double. result = addToGraph(BitURShift, op1, op2); - result = makeSafe(addToGraph(UInt32ToNumber, OpInfo(NodeUseBottom), result)); + result = makeSafe(addToGraph(UInt32ToNumber, result)); } set(currentInstruction[1].u.operand, result); NEXT_OPCODE(op_urshift); @@ -1517,7 +1577,7 @@ bool ByteCodeParser::parseBlock(unsigned limit) case op_pre_inc: { unsigned srcDst = currentInstruction[1].u.operand; NodeIndex op = get(srcDst); - set(srcDst, makeSafe(addToGraph(ArithAdd, OpInfo(NodeUseBottom), op, one()))); + set(srcDst, makeSafe(addToGraph(ArithAdd, op, one()))); NEXT_OPCODE(op_pre_inc); } @@ -1527,14 +1587,14 @@ bool ByteCodeParser::parseBlock(unsigned limit) ASSERT(result != srcDst); // Required for assumptions we make during OSR. NodeIndex op = get(srcDst); set(result, op); - set(srcDst, makeSafe(addToGraph(ArithAdd, OpInfo(NodeUseBottom), op, one()))); + set(srcDst, makeSafe(addToGraph(ArithAdd, op, one()))); NEXT_OPCODE(op_post_inc); } case op_pre_dec: { unsigned srcDst = currentInstruction[1].u.operand; NodeIndex op = get(srcDst); - set(srcDst, makeSafe(addToGraph(ArithSub, OpInfo(NodeUseBottom), op, one()))); + set(srcDst, makeSafe(addToGraph(ArithSub, op, one()))); NEXT_OPCODE(op_pre_dec); } @@ -1543,7 +1603,7 @@ bool ByteCodeParser::parseBlock(unsigned limit) unsigned srcDst = currentInstruction[2].u.operand; NodeIndex op = get(srcDst); set(result, op); - set(srcDst, makeSafe(addToGraph(ArithSub, OpInfo(NodeUseBottom), op, one()))); + set(srcDst, makeSafe(addToGraph(ArithSub, op, one()))); NEXT_OPCODE(op_post_dec); } @@ -1553,38 +1613,44 @@ bool ByteCodeParser::parseBlock(unsigned limit) NodeIndex op1 = get(currentInstruction[2].u.operand); NodeIndex op2 = get(currentInstruction[3].u.operand); if (m_graph[op1].hasNumberResult() && m_graph[op2].hasNumberResult()) - set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithAdd, OpInfo(NodeUseBottom), op1, op2))); + set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithAdd, op1, op2))); else - set(currentInstruction[1].u.operand, makeSafe(addToGraph(ValueAdd, OpInfo(NodeUseBottom), op1, op2))); + set(currentInstruction[1].u.operand, makeSafe(addToGraph(ValueAdd, op1, op2))); NEXT_OPCODE(op_add); } case op_sub: { NodeIndex op1 = get(currentInstruction[2].u.operand); NodeIndex op2 = get(currentInstruction[3].u.operand); - set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithSub, OpInfo(NodeUseBottom), op1, op2))); + set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithSub, op1, op2))); NEXT_OPCODE(op_sub); } + case op_negate: { + NodeIndex op1 = get(currentInstruction[2].u.operand); + set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithNegate, op1))); + NEXT_OPCODE(op_negate); + } + case op_mul: { // Multiply requires that the inputs are not truncated, unfortunately. NodeIndex op1 = get(currentInstruction[2].u.operand); NodeIndex op2 = get(currentInstruction[3].u.operand); - set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithMul, OpInfo(NodeUseBottom), op1, op2))); + set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithMul, op1, op2))); NEXT_OPCODE(op_mul); } case op_mod: { NodeIndex op1 = get(currentInstruction[2].u.operand); NodeIndex op2 = get(currentInstruction[3].u.operand); - set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithMod, OpInfo(NodeUseBottom), op1, op2))); + set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithMod, op1, op2))); NEXT_OPCODE(op_mod); } case op_div: { NodeIndex op1 = get(currentInstruction[2].u.operand); NodeIndex op2 = get(currentInstruction[3].u.operand); - set(currentInstruction[1].u.operand, makeDivSafe(addToGraph(ArithDiv, OpInfo(NodeUseBottom), op1, op2))); + set(currentInstruction[1].u.operand, makeDivSafe(addToGraph(ArithDiv, op1, op2))); NEXT_OPCODE(op_div); } @@ -2187,6 +2253,42 @@ bool ByteCodeParser::parseBlock(unsigned limit) NEXT_OPCODE(op_loop_hint); } + + case op_init_lazy_reg: { + set(currentInstruction[1].u.operand, getJSConstantForValue(JSValue())); + NEXT_OPCODE(op_init_lazy_reg); + } + + case op_create_activation: { + set(currentInstruction[1].u.operand, addToGraph(CreateActivation, get(currentInstruction[1].u.operand))); + NEXT_OPCODE(op_create_activation); + } + + case op_tear_off_activation: { + // This currently ignores arguments because we don't support them yet. + addToGraph(TearOffActivation, get(currentInstruction[1].u.operand)); + NEXT_OPCODE(op_tear_off_activation); + } + + case op_new_func: { + if (!currentInstruction[3].u.operand) { + set(currentInstruction[1].u.operand, + addToGraph(NewFunctionNoCheck, OpInfo(currentInstruction[2].u.operand))); + } else { + set(currentInstruction[1].u.operand, + addToGraph( + NewFunction, + OpInfo(currentInstruction[2].u.operand), + get(currentInstruction[1].u.operand))); + } + NEXT_OPCODE(op_new_func); + } + + case op_new_func_exp: { + set(currentInstruction[1].u.operand, + addToGraph(NewFunctionExpression, OpInfo(currentInstruction[2].u.operand))); + NEXT_OPCODE(op_new_func_exp); + } default: // Parse failed! This should not happen because the capabilities checker @@ -2231,7 +2333,7 @@ void ByteCodeParser::processPhiStack() dataLog(" Did not find node, adding phi.\n"); #endif - valueInPredecessor = addToGraph(Phi, OpInfo(newVariableAccessData(stackType == ArgumentPhiStack ? argumentToOperand(varNo) : static_cast<int>(varNo)))); + valueInPredecessor = insertPhiNode(OpInfo(newVariableAccessData(stackType == ArgumentPhiStack ? argumentToOperand(varNo) : static_cast<int>(varNo))), predecessorBlock); var = valueInPredecessor; if (stackType == ArgumentPhiStack) predecessorBlock->variablesAtHead.setArgumentFirstTime(varNo, valueInPredecessor); @@ -2255,7 +2357,11 @@ void ByteCodeParser::processPhiStack() dataLog(" Found @%u.\n", valueInPredecessor); #endif } - ASSERT(m_graph[valueInPredecessor].op == SetLocal || m_graph[valueInPredecessor].op == Phi || m_graph[valueInPredecessor].op == Flush || (m_graph[valueInPredecessor].op == SetArgument && stackType == ArgumentPhiStack)); + ASSERT(m_graph[valueInPredecessor].op == SetLocal + || m_graph[valueInPredecessor].op == Phi + || m_graph[valueInPredecessor].op == Flush + || (m_graph[valueInPredecessor].op == SetArgument + && stackType == ArgumentPhiStack)); VariableAccessData* dataForPredecessor = m_graph[valueInPredecessor].variableAccessData(); @@ -2309,7 +2415,7 @@ void ByteCodeParser::processPhiStack() continue; } - NodeIndex newPhi = addToGraph(Phi, OpInfo(dataForPhi)); + NodeIndex newPhi = insertPhiNode(OpInfo(dataForPhi), entry.m_block); #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE) dataLog(" Splitting @%u, created @%u.\n", entry.m_phi, newPhi); @@ -2349,10 +2455,9 @@ void ByteCodeParser::fixVariableAccessPredictions() void ByteCodeParser::linkBlock(BasicBlock* block, Vector<BlockIndex>& possibleTargets) { - ASSERT(block->end != NoNode); ASSERT(!block->isLinked); - ASSERT(block->end > block->begin); - Node& node = m_graph[block->end - 1]; + ASSERT(!block->isEmpty()); + Node& node = m_graph[block->last()]; ASSERT(node.isTerminal()); switch (node.op) { @@ -2416,7 +2521,7 @@ void ByteCodeParser::determineReachability() BasicBlock* block = m_graph.m_blocks[index].get(); ASSERT(block->isLinked); - Node& node = m_graph[block->end - 1]; + Node& node = m_graph[block->last()]; ASSERT(node.isTerminal()); if (node.isJump()) @@ -2435,8 +2540,13 @@ void ByteCodeParser::buildOperandMapsIfNecessary() for (size_t i = 0; i < m_codeBlock->numberOfIdentifiers(); ++i) m_identifierMap.add(m_codeBlock->identifier(i).impl(), i); - for (size_t i = 0; i < m_codeBlock->numberOfConstantRegisters(); ++i) - m_jsValueMap.add(JSValue::encode(m_codeBlock->getConstant(i + FirstConstantRegisterIndex)), i + FirstConstantRegisterIndex); + for (size_t i = 0; i < m_codeBlock->numberOfConstantRegisters(); ++i) { + JSValue value = m_codeBlock->getConstant(i + FirstConstantRegisterIndex); + if (!value) + m_emptyJSValueIndex = i + FirstConstantRegisterIndex; + else + m_jsValueMap.add(JSValue::encode(value), i + FirstConstantRegisterIndex); + } m_haveBuiltOperandMaps = true; } @@ -2486,6 +2596,15 @@ ByteCodeParser::InlineStackEntry::InlineStackEntry(ByteCodeParser* byteCodeParse } for (size_t i = 0; i < codeBlock->numberOfConstantRegisters(); ++i) { JSValue value = codeBlock->getConstant(i + FirstConstantRegisterIndex); + if (!value) { + if (byteCodeParser->m_emptyJSValueIndex == UINT_MAX) { + byteCodeParser->m_emptyJSValueIndex = byteCodeParser->m_codeBlock->numberOfConstantRegisters() + FirstConstantRegisterIndex; + byteCodeParser->m_codeBlock->addConstant(JSValue()); + byteCodeParser->m_constants.append(ConstantRecord()); + } + m_constantRemap[i] = byteCodeParser->m_emptyJSValueIndex; + continue; + } pair<JSValueMap::iterator, bool> result = byteCodeParser->m_jsValueMap.add(JSValue::encode(value), byteCodeParser->m_codeBlock->numberOfConstantRegisters() + FirstConstantRegisterIndex); if (result.second) { byteCodeParser->m_codeBlock->addConstant(value); @@ -2527,6 +2646,16 @@ void ByteCodeParser::parseCodeBlock() { CodeBlock* codeBlock = m_inlineStackTop->m_codeBlock; +#if DFG_ENABLE(DEBUG_VERBOSE) + dataLog("Parsing code block %p. codeType = %s, numCapturedVars = %u, needsFullScopeChain = %s, needsActivation = %s, isStrictMode = %s\n", + codeBlock, + codeTypeToString(codeBlock->codeType()), + codeBlock->m_numCapturedVars, + codeBlock->needsFullScopeChain()?"true":"false", + codeBlock->ownerExecutable()->needsActivation()?"true":"false", + codeBlock->ownerExecutable()->isStrictMode()?"true":"false"); +#endif + for (unsigned jumpTargetIndex = 0; jumpTargetIndex <= codeBlock->numberOfJumpTargets(); ++jumpTargetIndex) { // The maximum bytecode offset to go into the current basicblock is either the next jump target, or the end of the instructions. unsigned limit = jumpTargetIndex < codeBlock->numberOfJumpTargets() ? codeBlock->jumpTarget(jumpTargetIndex) : codeBlock->instructions().size(); @@ -2539,7 +2668,7 @@ void ByteCodeParser::parseCodeBlock() do { if (!m_currentBlock) { // Check if we can use the last block. - if (!m_graph.m_blocks.isEmpty() && m_graph.m_blocks.last()->begin == m_graph.m_blocks.last()->end) { + if (!m_graph.m_blocks.isEmpty() && m_graph.m_blocks.last()->isEmpty()) { // This must be a block belonging to us. ASSERT(m_inlineStackTop->m_unlinkedBlocks.last().m_blockIndex == m_graph.m_blocks.size() - 1); // Either the block is linkable or it isn't. If it's linkable then it's the last @@ -2557,7 +2686,7 @@ void ByteCodeParser::parseCodeBlock() #endif m_currentBlock->bytecodeBegin = m_currentIndex; } else { - OwnPtr<BasicBlock> block = adoptPtr(new BasicBlock(m_currentIndex, m_graph.size(), m_numArguments, m_numLocals)); + OwnPtr<BasicBlock> block = adoptPtr(new BasicBlock(m_currentIndex, m_numArguments, m_numLocals)); #if DFG_ENABLE(DEBUG_VERBOSE) dataLog("Creating basic block %p, #%zu for %p bc#%u at inline depth %u.\n", block.get(), m_graph.m_blocks.size(), m_inlineStackTop->executable(), m_currentIndex, CodeOrigin::inlineDepthForCallFrame(m_inlineStackTop->m_inlineCallFrame)); #endif @@ -2580,10 +2709,8 @@ void ByteCodeParser::parseCodeBlock() // are at the end of an inline function, or we realized that we // should stop parsing because there was a return in the first // basic block. - ASSERT(m_currentBlock->begin == m_graph.size() || m_graph.last().isTerminal() || (m_currentIndex == codeBlock->instructions().size() && m_inlineStackTop->m_inlineCallFrame) || !shouldContinueParsing); + ASSERT(m_currentBlock->isEmpty() || m_graph.last().isTerminal() || (m_currentIndex == codeBlock->instructions().size() && m_inlineStackTop->m_inlineCallFrame) || !shouldContinueParsing); - m_currentBlock->end = m_graph.size(); - if (!shouldContinueParsing) return; @@ -2600,6 +2727,11 @@ bool ByteCodeParser::parse() // Set during construction. ASSERT(!m_currentIndex); +#if DFG_ENABLE(ALL_VARIABLES_CAPTURED) + // We should be pretending that the code has an activation. + ASSERT(m_graph.needsActivation()); +#endif + InlineStackEntry inlineStackEntry(this, m_codeBlock, m_profiledBlock, NoBlock, InvalidVirtualRegister, 0, InvalidVirtualRegister, InvalidVirtualRegister, CodeForCall); parseCodeBlock(); |