summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/dfg/DFGGraph.h
diff options
context:
space:
mode:
Diffstat (limited to 'Source/JavaScriptCore/dfg/DFGGraph.h')
-rw-r--r--Source/JavaScriptCore/dfg/DFGGraph.h1014
1 files changed, 558 insertions, 456 deletions
diff --git a/Source/JavaScriptCore/dfg/DFGGraph.h b/Source/JavaScriptCore/dfg/DFGGraph.h
index 3e4e4b5bc..1a4e2f7dd 100644
--- a/Source/JavaScriptCore/dfg/DFGGraph.h
+++ b/Source/JavaScriptCore/dfg/DFGGraph.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,21 +26,24 @@
#ifndef DFGGraph_h
#define DFGGraph_h
-#include <wtf/Platform.h>
-
#if ENABLE(DFG_JIT)
+#include "AssemblyHelpers.h"
+#include "BytecodeLivenessAnalysisInlines.h"
#include "CodeBlock.h"
#include "DFGArgumentPosition.h"
-#include "DFGAssemblyHelpers.h"
#include "DFGBasicBlock.h"
-#include "DFGDominators.h"
+#include "DFGFrozenValue.h"
#include "DFGLongLivedState.h"
#include "DFGNode.h"
#include "DFGNodeAllocator.h"
-#include "DFGVariadicFunction.h"
+#include "DFGPlan.h"
+#include "DFGPropertyTypeKey.h"
+#include "DFGScannable.h"
+#include "FullBytecodeLiveness.h"
#include "JSStack.h"
#include "MethodOfGettingAValueProfile.h"
+#include <unordered_map>
#include <wtf/BitVector.h>
#include <wtf/HashMap.h>
#include <wtf/Vector.h>
@@ -53,43 +56,73 @@ class ExecState;
namespace DFG {
-struct StorageAccessData {
- size_t offset;
- unsigned identifierNumber;
-};
+class CFG;
+class Dominators;
+class NaturalLoops;
+class PrePostNumbering;
-struct ResolveGlobalData {
- unsigned identifierNumber;
- ResolveOperations* resolveOperations;
- PutToBaseOperation* putToBaseOperation;
- unsigned resolvePropertyIndex;
-};
+#define DFG_NODE_DO_TO_CHILDREN(graph, node, thingToDo) do { \
+ Node* _node = (node); \
+ if (_node->flags() & NodeHasVarArgs) { \
+ for (unsigned _childIdx = _node->firstChild(); \
+ _childIdx < _node->firstChild() + _node->numChildren(); \
+ _childIdx++) { \
+ if (!!(graph).m_varArgChildren[_childIdx]) \
+ thingToDo(_node, (graph).m_varArgChildren[_childIdx]); \
+ } \
+ } else { \
+ if (!_node->child1()) { \
+ ASSERT( \
+ !_node->child2() \
+ && !_node->child3()); \
+ break; \
+ } \
+ thingToDo(_node, _node->child1()); \
+ \
+ if (!_node->child2()) { \
+ ASSERT(!_node->child3()); \
+ break; \
+ } \
+ thingToDo(_node, _node->child2()); \
+ \
+ if (!_node->child3()) \
+ break; \
+ thingToDo(_node, _node->child3()); \
+ } \
+ } while (false)
-struct ResolveOperationData {
- unsigned identifierNumber;
- ResolveOperations* resolveOperations;
- PutToBaseOperation* putToBaseOperation;
-};
+#define DFG_ASSERT(graph, node, assertion) do { \
+ if (!!(assertion)) \
+ break; \
+ (graph).handleAssertionFailure( \
+ (node), __FILE__, __LINE__, WTF_PRETTY_FUNCTION, #assertion); \
+ } while (false)
+
+#define DFG_CRASH(graph, node, reason) do { \
+ (graph).handleAssertionFailure( \
+ (node), __FILE__, __LINE__, WTF_PRETTY_FUNCTION, (reason)); \
+ } while (false)
-struct PutToBaseOperationData {
- PutToBaseOperation* putToBaseOperation;
+struct InlineVariableData {
+ InlineCallFrame* inlineCallFrame;
+ unsigned argumentPositionStart;
+ VariableAccessData* calleeVariable;
};
enum AddSpeculationMode {
- DontSpeculateInteger,
- SpeculateIntegerAndTruncateConstants,
- SpeculateInteger
+ DontSpeculateInt32,
+ SpeculateInt32AndTruncateConstants,
+ SpeculateInt32
};
-
//
// === Graph ===
//
// The order may be significant for nodes with side-effects (property accesses, value conversions).
// Nodes that are 'dead' remain in the vector with refCount 0.
-class Graph {
+class Graph : public virtual Scannable {
public:
- Graph(VM&, CodeBlock*, unsigned osrEntryBytecodeIndex, const Operands<JSValue>& mustHandleValues);
+ Graph(VM&, Plan&, LongLivedState&);
~Graph();
void changeChild(Edge& edge, Node* newNode)
@@ -116,16 +149,6 @@ public:
changeEdge(edge, newEdge);
}
- void clearAndDerefChild(Node* node, unsigned index)
- {
- if (!node->children.child(index))
- return;
- node->children.setChild(index, Edge());
- }
- void clearAndDerefChild1(Node* node) { clearAndDerefChild(node, 0); }
- void clearAndDerefChild2(Node* node) { clearAndDerefChild(node, 1); }
- void clearAndDerefChild3(Node* node) { clearAndDerefChild(node, 2); }
-
void performSubstitution(Node* node)
{
if (node->flags() & NodeHasVarArgs) {
@@ -145,7 +168,7 @@ public:
return;
// Check if there is any replacement.
- Node* replacement = child->replacement;
+ Node* replacement = child->replacement();
if (!replacement)
return;
@@ -153,200 +176,160 @@ public:
// There is definitely a replacement. Assert that the replacement does not
// have a replacement.
- ASSERT(!child->replacement);
+ ASSERT(!child->replacement());
}
-#define DFG_DEFINE_ADD_NODE(templatePre, templatePost, typeParams, valueParamsComma, valueParams, valueArgs) \
- templatePre typeParams templatePost Node* addNode(SpeculatedType type valueParamsComma valueParams) \
- { \
- Node* node = new (m_allocator) Node(valueArgs); \
- node->predict(type); \
- return node; \
+ template<typename... Params>
+ Node* addNode(SpeculatedType type, Params... params)
+ {
+ Node* node = new (m_allocator) Node(params...);
+ node->predict(type);
+ return node;
}
- DFG_VARIADIC_TEMPLATE_FUNCTION(DFG_DEFINE_ADD_NODE)
-#undef DFG_DEFINE_ADD_NODE
void dethread();
- void convertToConstant(Node* node, unsigned constantNumber)
- {
- if (node->op() == GetLocal)
- dethread();
- else
- ASSERT(!node->hasVariableAccessData());
- node->convertToConstant(constantNumber);
- }
+ FrozenValue* freeze(JSValue); // We use weak freezing by default.
+ FrozenValue* freezeStrong(JSValue); // Shorthand for freeze(value)->strengthenTo(StrongValue).
+
+ void convertToConstant(Node* node, FrozenValue* value);
+ void convertToConstant(Node* node, JSValue value);
+ void convertToStrongConstant(Node* node, JSValue value);
+
+ StructureRegistrationResult registerStructure(Structure* structure);
+ void assertIsRegistered(Structure* structure);
- void convertToConstant(Node* node, JSValue value)
- {
- convertToConstant(node, m_codeBlock->addOrFindConstant(value));
- }
-
// CodeBlock is optional, but may allow additional information to be dumped (e.g. Identifier names).
- void dump(PrintStream& = WTF::dataFile());
+ void dump(PrintStream& = WTF::dataFile(), DumpContext* = 0);
+
+ bool terminalsAreValid();
+
enum PhiNodeDumpMode { DumpLivePhisOnly, DumpAllPhis };
- void dumpBlockHeader(PrintStream&, const char* prefix, BlockIndex, PhiNodeDumpMode);
+ void dumpBlockHeader(PrintStream&, const char* prefix, BasicBlock*, PhiNodeDumpMode, DumpContext*);
void dump(PrintStream&, Edge);
- void dump(PrintStream&, const char* prefix, Node*);
+ void dump(PrintStream&, const char* prefix, Node*, DumpContext* = 0);
static int amountOfNodeWhiteSpace(Node*);
static void printNodeWhiteSpace(PrintStream&, Node*);
// Dump the code origin of the given node as a diff from the code origin of the
// preceding node. Returns true if anything was printed.
- bool dumpCodeOrigin(PrintStream&, const char* prefix, Node* previousNode, Node* currentNode);
+ bool dumpCodeOrigin(PrintStream&, const char* prefix, Node*& previousNode, Node* currentNode, DumpContext*);
- BlockIndex blockIndexForBytecodeOffset(Vector<BlockIndex>& blocks, unsigned bytecodeBegin);
-
- SpeculatedType getJSConstantSpeculation(Node* node)
- {
- return speculationFromValue(node->valueOfJSConstant(m_codeBlock));
- }
-
- AddSpeculationMode addSpeculationMode(Node* add, bool leftShouldSpeculateInteger, bool rightShouldSpeculateInteger)
+ AddSpeculationMode addSpeculationMode(Node* add, bool leftShouldSpeculateInt32, bool rightShouldSpeculateInt32, PredictionPass pass)
{
ASSERT(add->op() == ValueAdd || add->op() == ArithAdd || add->op() == ArithSub);
+ RareCaseProfilingSource source = add->sourceFor(pass);
+
Node* left = add->child1().node();
Node* right = add->child2().node();
if (left->hasConstant())
- return addImmediateShouldSpeculateInteger(add, rightShouldSpeculateInteger, left);
+ return addImmediateShouldSpeculateInt32(add, rightShouldSpeculateInt32, right, left, source);
if (right->hasConstant())
- return addImmediateShouldSpeculateInteger(add, leftShouldSpeculateInteger, right);
+ return addImmediateShouldSpeculateInt32(add, leftShouldSpeculateInt32, left, right, source);
- return (leftShouldSpeculateInteger && rightShouldSpeculateInteger && add->canSpeculateInteger()) ? SpeculateInteger : DontSpeculateInteger;
+ return (leftShouldSpeculateInt32 && rightShouldSpeculateInt32 && add->canSpeculateInt32(source)) ? SpeculateInt32 : DontSpeculateInt32;
}
- AddSpeculationMode valueAddSpeculationMode(Node* add)
+ AddSpeculationMode valueAddSpeculationMode(Node* add, PredictionPass pass)
{
- return addSpeculationMode(add, add->child1()->shouldSpeculateIntegerExpectingDefined(), add->child2()->shouldSpeculateIntegerExpectingDefined());
+ return addSpeculationMode(
+ add,
+ add->child1()->shouldSpeculateInt32OrBooleanExpectingDefined(),
+ add->child2()->shouldSpeculateInt32OrBooleanExpectingDefined(),
+ pass);
}
- AddSpeculationMode arithAddSpeculationMode(Node* add)
+ AddSpeculationMode arithAddSpeculationMode(Node* add, PredictionPass pass)
{
- return addSpeculationMode(add, add->child1()->shouldSpeculateIntegerForArithmetic(), add->child2()->shouldSpeculateIntegerForArithmetic());
+ return addSpeculationMode(
+ add,
+ add->child1()->shouldSpeculateInt32OrBooleanForArithmetic(),
+ add->child2()->shouldSpeculateInt32OrBooleanForArithmetic(),
+ pass);
}
- AddSpeculationMode addSpeculationMode(Node* add)
+ AddSpeculationMode addSpeculationMode(Node* add, PredictionPass pass)
{
if (add->op() == ValueAdd)
- return valueAddSpeculationMode(add);
+ return valueAddSpeculationMode(add, pass);
- return arithAddSpeculationMode(add);
+ return arithAddSpeculationMode(add, pass);
}
- bool addShouldSpeculateInteger(Node* add)
+ bool addShouldSpeculateInt32(Node* add, PredictionPass pass)
{
- return addSpeculationMode(add) != DontSpeculateInteger;
+ return addSpeculationMode(add, pass) != DontSpeculateInt32;
}
- bool mulShouldSpeculateInteger(Node* mul)
+ bool addShouldSpeculateMachineInt(Node* add)
{
- ASSERT(mul->op() == ArithMul);
-
- Node* left = mul->child1().node();
- Node* right = mul->child2().node();
+ if (!enableInt52())
+ return false;
- return Node::shouldSpeculateIntegerForArithmetic(left, right) && mul->canSpeculateInteger();
+ Node* left = add->child1().node();
+ Node* right = add->child2().node();
+
+ bool speculation = Node::shouldSpeculateMachineInt(left, right);
+ return speculation && !hasExitSite(add, Int52Overflow);
}
- bool negateShouldSpeculateInteger(Node* negate)
+ bool binaryArithShouldSpeculateInt32(Node* node, PredictionPass pass)
{
- ASSERT(negate->op() == ArithNegate);
- return negate->child1()->shouldSpeculateIntegerForArithmetic() && negate->canSpeculateInteger();
+ Node* left = node->child1().node();
+ Node* right = node->child2().node();
+
+ return Node::shouldSpeculateInt32OrBooleanForArithmetic(left, right)
+ && node->canSpeculateInt32(node->sourceFor(pass));
}
- // Helper methods to check nodes for constants.
- bool isConstant(Node* node)
- {
- return node->hasConstant();
- }
- bool isJSConstant(Node* node)
- {
- return node->hasConstant();
- }
- bool isInt32Constant(Node* node)
- {
- return node->isInt32Constant(m_codeBlock);
- }
- bool isDoubleConstant(Node* node)
- {
- return node->isDoubleConstant(m_codeBlock);
- }
- bool isNumberConstant(Node* node)
+ bool binaryArithShouldSpeculateMachineInt(Node* node, PredictionPass pass)
{
- return node->isNumberConstant(m_codeBlock);
- }
- bool isBooleanConstant(Node* node)
- {
- return node->isBooleanConstant(m_codeBlock);
- }
- bool isCellConstant(Node* node)
- {
- if (!isJSConstant(node))
+ if (!enableInt52())
return false;
- JSValue value = valueOfJSConstant(node);
- return value.isCell() && !!value;
+
+ Node* left = node->child1().node();
+ Node* right = node->child2().node();
+
+ return Node::shouldSpeculateMachineInt(left, right)
+ && node->canSpeculateInt52(pass)
+ && !hasExitSite(node, Int52Overflow);
}
- bool isFunctionConstant(Node* node)
+
+ bool unaryArithShouldSpeculateInt32(Node* node, PredictionPass pass)
{
- if (!isJSConstant(node))
- return false;
- if (!getJSFunction(valueOfJSConstant(node)))
- return false;
- return true;
+ return node->child1()->shouldSpeculateInt32OrBooleanForArithmetic()
+ && node->canSpeculateInt32(pass);
}
- bool isInternalFunctionConstant(Node* node)
+
+ bool unaryArithShouldSpeculateMachineInt(Node* node, PredictionPass pass)
{
- if (!isJSConstant(node))
- return false;
- JSValue value = valueOfJSConstant(node);
- if (!value.isCell() || !value)
- return false;
- JSCell* cell = value.asCell();
- if (!cell->inherits(&InternalFunction::s_info))
+ if (!enableInt52())
return false;
- return true;
- }
- // Helper methods get constant values from nodes.
- JSValue valueOfJSConstant(Node* node)
- {
- return node->valueOfJSConstant(m_codeBlock);
- }
- int32_t valueOfInt32Constant(Node* node)
- {
- return valueOfJSConstant(node).asInt32();
- }
- double valueOfNumberConstant(Node* node)
- {
- return valueOfJSConstant(node).asNumber();
+ return node->child1()->shouldSpeculateMachineInt()
+ && node->canSpeculateInt52(pass)
+ && !hasExitSite(node, Int52Overflow);
}
- bool valueOfBooleanConstant(Node* node)
- {
- return valueOfJSConstant(node).asBoolean();
- }
- JSFunction* valueOfFunctionConstant(Node* node)
+
+ bool canOptimizeStringObjectAccess(const CodeOrigin&);
+
+ bool roundShouldSpeculateInt32(Node* arithRound, PredictionPass pass)
{
- JSCell* function = getJSFunction(valueOfJSConstant(node));
- ASSERT(function);
- return jsCast<JSFunction*>(function);
+ ASSERT(arithRound->op() == ArithRound || arithRound->op() == ArithFloor || arithRound->op() == ArithCeil);
+ return arithRound->canSpeculateInt32(pass) && !hasExitSite(arithRound->origin.semantic, Overflow) && !hasExitSite(arithRound->origin.semantic, NegativeZero);
}
-
+
static const char *opName(NodeType);
StructureSet* addStructureSet(const StructureSet& structureSet)
{
- ASSERT(structureSet.size());
+ for (Structure* structure : structureSet)
+ registerStructure(structure);
m_structureSet.append(structureSet);
return &m_structureSet.last();
}
- StructureTransitionData* addStructureTransitionData(const StructureTransitionData& structureTransitionData)
- {
- m_structureTransitionData.append(structureTransitionData);
- return &m_structureTransitionData.last();
- }
-
JSGlobalObject* globalObjectFor(CodeOrigin codeOrigin)
{
return m_codeBlock->globalObjectFor(codeOrigin);
@@ -355,210 +338,97 @@ public:
JSObject* globalThisObjectFor(CodeOrigin codeOrigin)
{
JSGlobalObject* object = globalObjectFor(codeOrigin);
- return object->methodTable()->toThisObject(object, 0);
+ return jsCast<JSObject*>(object->methodTable()->toThis(object, object->globalExec(), NotStrictMode));
}
- ExecutableBase* executableFor(InlineCallFrame* inlineCallFrame)
+ ScriptExecutable* executableFor(InlineCallFrame* inlineCallFrame)
{
if (!inlineCallFrame)
- return m_codeBlock->ownerExecutable();
+ return m_codeBlock->ownerScriptExecutable();
- return inlineCallFrame->executable.get();
+ return inlineCallFrame->baselineCodeBlock->ownerScriptExecutable();
}
- ExecutableBase* executableFor(const CodeOrigin& codeOrigin)
+ ScriptExecutable* executableFor(const CodeOrigin& codeOrigin)
{
return executableFor(codeOrigin.inlineCallFrame);
}
- CodeBlock* baselineCodeBlockFor(const CodeOrigin& codeOrigin)
- {
- return baselineCodeBlockForOriginAndBaselineCodeBlock(codeOrigin, m_profiledBlock);
- }
-
- bool hasGlobalExitSite(const CodeOrigin& codeOrigin, ExitKind exitKind)
+ CodeBlock* baselineCodeBlockFor(InlineCallFrame* inlineCallFrame)
{
- return baselineCodeBlockFor(codeOrigin)->hasExitSite(FrequentExitSite(exitKind));
+ if (!inlineCallFrame)
+ return m_profiledBlock;
+ return baselineCodeBlockForInlineCallFrame(inlineCallFrame);
}
- bool hasExitSite(const CodeOrigin& codeOrigin, ExitKind exitKind)
+ CodeBlock* baselineCodeBlockFor(const CodeOrigin& codeOrigin)
{
- return baselineCodeBlockFor(codeOrigin)->hasExitSite(FrequentExitSite(codeOrigin.bytecodeIndex, exitKind));
+ return baselineCodeBlockForOriginAndBaselineCodeBlock(codeOrigin, m_profiledBlock);
}
- int argumentsRegisterFor(const CodeOrigin& codeOrigin)
+ bool isStrictModeFor(CodeOrigin codeOrigin)
{
if (!codeOrigin.inlineCallFrame)
- return m_codeBlock->argumentsRegister();
-
- return baselineCodeBlockForInlineCallFrame(
- codeOrigin.inlineCallFrame)->argumentsRegister() +
- codeOrigin.inlineCallFrame->stackOffset;
+ return m_codeBlock->isStrictMode();
+ return codeOrigin.inlineCallFrame->isStrictMode();
}
- int uncheckedArgumentsRegisterFor(const CodeOrigin& codeOrigin)
+ ECMAMode ecmaModeFor(CodeOrigin codeOrigin)
{
- if (!codeOrigin.inlineCallFrame)
- return m_codeBlock->uncheckedArgumentsRegister();
-
- CodeBlock* codeBlock = baselineCodeBlockForInlineCallFrame(
- codeOrigin.inlineCallFrame);
- if (!codeBlock->usesArguments())
- return InvalidVirtualRegister;
-
- return codeBlock->argumentsRegister() +
- codeOrigin.inlineCallFrame->stackOffset;
+ return isStrictModeFor(codeOrigin) ? StrictMode : NotStrictMode;
}
- int uncheckedActivationRegisterFor(const CodeOrigin&)
+ bool masqueradesAsUndefinedWatchpointIsStillValid(const CodeOrigin& codeOrigin)
{
- // This will ignore CodeOrigin because we don't inline code that uses activations.
- // Hence for inlined call frames it will return the outermost code block's
- // activation register. This method is only used to compare the result to a local
- // to see if we're mucking with the activation register. Hence if we return the
- // "wrong" activation register for the frame then it will compare false, which is
- // what we wanted.
- return m_codeBlock->uncheckedActivationRegister();
+ return globalObjectFor(codeOrigin)->masqueradesAsUndefinedWatchpoint()->isStillValid();
}
- ValueProfile* valueProfileFor(Node* node)
+ bool hasGlobalExitSite(const CodeOrigin& codeOrigin, ExitKind exitKind)
{
- if (!node)
- return 0;
-
- CodeBlock* profiledBlock = baselineCodeBlockFor(node->codeOrigin);
-
- if (node->hasLocal()) {
- if (!operandIsArgument(node->local()))
- return 0;
- int argument = operandToArgument(node->local());
- if (node->variableAccessData() != m_arguments[argument]->variableAccessData())
- return 0;
- return profiledBlock->valueProfileForArgument(argument);
- }
-
- if (node->hasHeapPrediction())
- return profiledBlock->valueProfileForBytecodeOffset(node->codeOrigin.bytecodeIndexForValueProfile());
-
- return 0;
+ return baselineCodeBlockFor(codeOrigin)->hasExitSite(FrequentExitSite(exitKind));
}
- MethodOfGettingAValueProfile methodOfGettingAValueProfileFor(Node* node)
+ bool hasExitSite(const CodeOrigin& codeOrigin, ExitKind exitKind)
{
- if (!node)
- return MethodOfGettingAValueProfile();
-
- CodeBlock* profiledBlock = baselineCodeBlockFor(node->codeOrigin);
-
- if (node->op() == GetLocal) {
- return MethodOfGettingAValueProfile::fromLazyOperand(
- profiledBlock,
- LazyOperandValueProfileKey(
- node->codeOrigin.bytecodeIndex, node->local()));
- }
-
- return MethodOfGettingAValueProfile(valueProfileFor(node));
+ return baselineCodeBlockFor(codeOrigin)->hasExitSite(FrequentExitSite(codeOrigin.bytecodeIndex, exitKind));
}
- bool needsActivation() const
+ bool hasExitSite(Node* node, ExitKind exitKind)
{
- return m_codeBlock->needsFullScopeChain() && m_codeBlock->codeType() != GlobalCode;
+ return hasExitSite(node->origin.semantic, exitKind);
}
- bool usesArguments() const
- {
- return m_codeBlock->usesArguments();
- }
+ ValueProfile* valueProfileFor(Node*);
+ MethodOfGettingAValueProfile methodOfGettingAValueProfileFor(Node*);
- unsigned numSuccessors(BasicBlock* block)
- {
- return block->last()->numSuccessors();
- }
- BlockIndex successor(BasicBlock* block, unsigned index)
- {
- return block->last()->successor(index);
- }
- BlockIndex successorForCondition(BasicBlock* block, bool condition)
+ BlockIndex numBlocks() const { return m_blocks.size(); }
+ BasicBlock* block(BlockIndex blockIndex) const { return m_blocks[blockIndex].get(); }
+ BasicBlock* lastBlock() const { return block(numBlocks() - 1); }
+
+ void appendBlock(PassRefPtr<BasicBlock> basicBlock)
{
- return block->last()->successorForCondition(condition);
+ basicBlock->index = m_blocks.size();
+ m_blocks.append(basicBlock);
}
- bool isPredictedNumerical(Node* node)
+ void killBlock(BlockIndex blockIndex)
{
- return isNumerical(node->child1().useKind()) && isNumerical(node->child2().useKind());
+ m_blocks[blockIndex] = nullptr;
}
- // Note that a 'true' return does not actually mean that the ByVal access clobbers nothing.
- // It really means that it will not clobber the entire world. It's still up to you to
- // carefully consider things like:
- // - PutByVal definitely changes the array it stores to, and may even change its length.
- // - PutByOffset definitely changes the object it stores to.
- // - and so on.
- bool byValIsPure(Node* node)
+ void killBlock(BasicBlock* basicBlock)
{
- switch (node->arrayMode().type()) {
- case Array::Generic:
- return false;
- case Array::Int32:
- case Array::Double:
- case Array::Contiguous:
- case Array::ArrayStorage:
- return !node->arrayMode().isOutOfBounds();
- case Array::SlowPutArrayStorage:
- return !node->arrayMode().mayStoreToHole();
- case Array::String:
- return node->op() == GetByVal;
-#if USE(JSVALUE32_64)
- case Array::Arguments:
- if (node->op() == GetByVal)
- return true;
- return false;
-#endif // USE(JSVALUE32_64)
- default:
- return true;
- }
+ killBlock(basicBlock->index);
}
- bool clobbersWorld(Node* node)
- {
- if (node->flags() & NodeClobbersWorld)
- return true;
- if (!(node->flags() & NodeMightClobber))
- return false;
- switch (node->op()) {
- case ValueAdd:
- case CompareLess:
- case CompareLessEq:
- case CompareGreater:
- case CompareGreaterEq:
- case CompareEq:
- return !isPredictedNumerical(node);
- case GetByVal:
- case PutByVal:
- case PutByValAlias:
- return !byValIsPure(node);
- case ToString:
- switch (node->child1().useKind()) {
- case StringObjectUse:
- case StringOrStringObjectUse:
- return false;
- case CellUse:
- case UntypedUse:
- return true;
- default:
- RELEASE_ASSERT_NOT_REACHED();
- return true;
- }
- default:
- RELEASE_ASSERT_NOT_REACHED();
- return true; // If by some oddity we hit this case in release build it's safer to have CSE assume the worst.
- }
- }
+ void killBlockAndItsContents(BasicBlock*);
+
+ void killUnreachableBlocks();
void determineReachability();
void resetReachability();
- void resetExitStates();
+ void computeRefCounts();
unsigned varArgNumChildren(Node* node)
{
@@ -586,7 +456,7 @@ public:
return node->children.child(index);
}
- void voteNode(Node* node, unsigned ballot)
+ void voteNode(Node* node, unsigned ballot, float weight = 1)
{
switch (node->op()) {
case ValueToInt32:
@@ -598,35 +468,35 @@ public:
}
if (node->op() == GetLocal)
- node->variableAccessData()->vote(ballot);
+ node->variableAccessData()->vote(ballot, weight);
}
- void voteNode(Edge edge, unsigned ballot)
+ void voteNode(Edge edge, unsigned ballot, float weight = 1)
{
- voteNode(edge.node(), ballot);
+ voteNode(edge.node(), ballot, weight);
}
- void voteChildren(Node* node, unsigned ballot)
+ void voteChildren(Node* node, unsigned ballot, float weight = 1)
{
if (node->flags() & NodeHasVarArgs) {
for (unsigned childIdx = node->firstChild();
childIdx < node->firstChild() + node->numChildren();
childIdx++) {
if (!!m_varArgChildren[childIdx])
- voteNode(m_varArgChildren[childIdx], ballot);
+ voteNode(m_varArgChildren[childIdx], ballot, weight);
}
return;
}
if (!node->child1())
return;
- voteNode(node->child1(), ballot);
+ voteNode(node->child1(), ballot, weight);
if (!node->child2())
return;
- voteNode(node->child2(), ballot);
+ voteNode(node->child2(), ballot, weight);
if (!node->child3())
return;
- voteNode(node->child3(), ballot);
+ voteNode(node->child3(), ballot, weight);
}
template<typename T> // T = Node* or Edge
@@ -657,175 +527,407 @@ public:
// any GetLocals in the basic block.
// FIXME: it may be appropriate, in the future, to generalize this to handle GetLocals
// introduced anywhere in the basic block.
- void substituteGetLocal(BasicBlock& block, unsigned startIndexInBlock, VariableAccessData* variableAccessData, Node* newGetLocal)
- {
- if (variableAccessData->isCaptured()) {
- // Let CSE worry about this one.
- return;
+ void substituteGetLocal(BasicBlock& block, unsigned startIndexInBlock, VariableAccessData* variableAccessData, Node* newGetLocal);
+
+ void invalidateCFG();
+
+ void clearFlagsOnAllNodes(NodeFlags);
+
+ void clearReplacements();
+ void clearEpochs();
+ void initializeNodeOwners();
+
+ BlockList blocksInPreOrder();
+ BlockList blocksInPostOrder();
+
+ class NaturalBlockIterable {
+ public:
+ NaturalBlockIterable()
+ : m_graph(nullptr)
+ {
}
- for (unsigned indexInBlock = startIndexInBlock; indexInBlock < block.size(); ++indexInBlock) {
- Node* node = block[indexInBlock];
- bool shouldContinue = true;
- switch (node->op()) {
- case SetLocal: {
- if (node->local() == variableAccessData->local())
- shouldContinue = false;
- break;
+
+ NaturalBlockIterable(Graph& graph)
+ : m_graph(&graph)
+ {
+ }
+
+ class iterator {
+ public:
+ iterator()
+ : m_graph(nullptr)
+ , m_index(0)
+ {
+ }
+
+ iterator(Graph& graph, BlockIndex index)
+ : m_graph(&graph)
+ , m_index(findNext(index))
+ {
+ }
+
+ BasicBlock *operator*()
+ {
+ return m_graph->block(m_index);
+ }
+
+ iterator& operator++()
+ {
+ m_index = findNext(m_index + 1);
+ return *this;
+ }
+
+ bool operator==(const iterator& other) const
+ {
+ return m_index == other.m_index;
}
+
+ bool operator!=(const iterator& other) const
+ {
+ return !(*this == other);
+ }
+
+ private:
+ BlockIndex findNext(BlockIndex index)
+ {
+ while (index < m_graph->numBlocks() && !m_graph->block(index))
+ index++;
+ return index;
+ }
+
+ Graph* m_graph;
+ BlockIndex m_index;
+ };
+
+ iterator begin()
+ {
+ return iterator(*m_graph, 0);
+ }
+
+ iterator end()
+ {
+ return iterator(*m_graph, m_graph->numBlocks());
+ }
+
+ private:
+ Graph* m_graph;
+ };
+
+ NaturalBlockIterable blocksInNaturalOrder()
+ {
+ return NaturalBlockIterable(*this);
+ }
+
+ template<typename ChildFunctor>
+ void doToChildrenWithNode(Node* node, const ChildFunctor& functor)
+ {
+ DFG_NODE_DO_TO_CHILDREN(*this, node, functor);
+ }
+
+ template<typename ChildFunctor>
+ void doToChildren(Node* node, const ChildFunctor& functor)
+ {
+ doToChildrenWithNode(
+ node,
+ [&functor] (Node*, Edge& edge) {
+ functor(edge);
+ });
+ }
+
+ bool uses(Node* node, Node* child)
+ {
+ bool result = false;
+ doToChildren(node, [&] (Edge edge) { result |= edge == child; });
+ return result;
+ }
+
+ Profiler::Compilation* compilation() { return m_plan.compilation.get(); }
+
+ DesiredIdentifiers& identifiers() { return m_plan.identifiers; }
+ DesiredWatchpoints& watchpoints() { return m_plan.watchpoints; }
+
+ // Returns false if the key is already invalid or unwatchable. If this is a Presence condition,
+ // this also makes it cheap to query if the condition holds. Also makes sure that the GC knows
+ // what's going on.
+ bool watchCondition(const ObjectPropertyCondition&);
+
+ // Checks if it's known that loading from the given object at the given offset is fine. This is
+ // computed by tracking which conditions we track with watchCondition().
+ bool isSafeToLoad(JSObject* base, PropertyOffset);
+
+ void registerInferredType(const InferredType::Descriptor& type)
+ {
+ if (type.structure())
+ registerStructure(type.structure());
+ }
+
+ // Tells us what inferred type we are able to prove the property to have now and in the future.
+ InferredType::Descriptor inferredTypeFor(const PropertyTypeKey&);
+ InferredType::Descriptor inferredTypeForProperty(Structure* structure, UniquedStringImpl* uid)
+ {
+ return inferredTypeFor(PropertyTypeKey(structure, uid));
+ }
+
+ AbstractValue inferredValueForProperty(
+ const StructureSet& base, UniquedStringImpl* uid, StructureClobberState = StructuresAreWatched);
+
+ // This uses either constant property inference or property type inference to derive a good abstract
+ // value for some property accessed with the given abstract value base.
+ AbstractValue inferredValueForProperty(
+ const AbstractValue& base, UniquedStringImpl* uid, PropertyOffset, StructureClobberState);
+
+ FullBytecodeLiveness& livenessFor(CodeBlock*);
+ FullBytecodeLiveness& livenessFor(InlineCallFrame*);
+
+ // Quickly query if a single local is live at the given point. This is faster than calling
+ // forAllLiveInBytecode() if you will only query one local. But, if you want to know all of the
+ // locals live, then calling this for each local is much slower than forAllLiveInBytecode().
+ bool isLiveInBytecode(VirtualRegister, CodeOrigin);
+
+ // Quickly get all of the non-argument locals live at the given point. This doesn't give you
+ // any arguments because those are all presumed live. You can call forAllLiveInBytecode() to
+ // also get the arguments. This is much faster than calling isLiveInBytecode() for each local.
+ template<typename Functor>
+ void forAllLocalsLiveInBytecode(CodeOrigin codeOrigin, const Functor& functor)
+ {
+ // Support for not redundantly reporting arguments. Necessary because in case of a varargs
+ // call, only the callee knows that arguments are live while in the case of a non-varargs
+ // call, both callee and caller will see the variables live.
+ VirtualRegister exclusionStart;
+ VirtualRegister exclusionEnd;
+
+ CodeOrigin* codeOriginPtr = &codeOrigin;
+
+ for (;;) {
+ InlineCallFrame* inlineCallFrame = codeOriginPtr->inlineCallFrame;
+ VirtualRegister stackOffset(inlineCallFrame ? inlineCallFrame->stackOffset : 0);
+
+ if (inlineCallFrame) {
+ if (inlineCallFrame->isClosureCall)
+ functor(stackOffset + JSStack::Callee);
+ if (inlineCallFrame->isVarargs())
+ functor(stackOffset + JSStack::ArgumentCount);
+ }
+
+ CodeBlock* codeBlock = baselineCodeBlockFor(inlineCallFrame);
+ FullBytecodeLiveness& fullLiveness = livenessFor(codeBlock);
+ const FastBitVector& liveness = fullLiveness.getLiveness(codeOriginPtr->bytecodeIndex);
+ for (unsigned relativeLocal = codeBlock->m_numCalleeLocals; relativeLocal--;) {
+ VirtualRegister reg = stackOffset + virtualRegisterForLocal(relativeLocal);
- case GetLocal: {
- if (node->variableAccessData() != variableAccessData)
+ // Don't report if our callee already reported.
+ if (reg >= exclusionStart && reg < exclusionEnd)
continue;
- substitute(block, indexInBlock, node, newGetLocal);
- Node* oldTailNode = block.variablesAtTail.operand(variableAccessData->local());
- if (oldTailNode == node)
- block.variablesAtTail.operand(variableAccessData->local()) = newGetLocal;
- shouldContinue = false;
- break;
- }
- default:
- break;
+ if (liveness.get(relativeLocal))
+ functor(reg);
}
- if (!shouldContinue)
+
+ if (!inlineCallFrame)
+ break;
+
+ // Arguments are always live. This would be redundant if it wasn't for our
+ // op_call_varargs inlining. See the comment above.
+ exclusionStart = stackOffset + CallFrame::argumentOffsetIncludingThis(0);
+ exclusionEnd = stackOffset + CallFrame::argumentOffsetIncludingThis(inlineCallFrame->arguments.size());
+
+ // We will always have a "this" argument and exclusionStart should be a smaller stack
+ // offset than exclusionEnd.
+ ASSERT(exclusionStart < exclusionEnd);
+
+ for (VirtualRegister reg = exclusionStart; reg < exclusionEnd; reg += 1)
+ functor(reg);
+
+ codeOriginPtr = inlineCallFrame->getCallerSkippingTailCalls();
+
+ // The first inline call frame could be an inline tail call
+ if (!codeOriginPtr)
break;
}
}
+ // Get a BitVector of all of the non-argument locals live right now. This is mostly useful if
+ // you want to compare two sets of live locals from two different CodeOrigins.
+ BitVector localsLiveInBytecode(CodeOrigin);
+
+ // Tells you all of the arguments and locals live at the given CodeOrigin. This is a small
+ // extension to forAllLocalsLiveInBytecode(), since all arguments are always presumed live.
+ template<typename Functor>
+ void forAllLiveInBytecode(CodeOrigin codeOrigin, const Functor& functor)
+ {
+ forAllLocalsLiveInBytecode(codeOrigin, functor);
+
+ // Report all arguments as being live.
+ for (unsigned argument = block(0)->variablesAtHead.numberOfArguments(); argument--;)
+ functor(virtualRegisterForArgument(argument));
+ }
+
+ BytecodeKills& killsFor(CodeBlock*);
+ BytecodeKills& killsFor(InlineCallFrame*);
+
+ unsigned frameRegisterCount();
+ unsigned stackPointerOffset();
+ unsigned requiredRegisterCountForExit();
+ unsigned requiredRegisterCountForExecutionAndExit();
+
+ JSValue tryGetConstantProperty(JSValue base, const StructureSet&, PropertyOffset);
+ JSValue tryGetConstantProperty(JSValue base, Structure*, PropertyOffset);
+ JSValue tryGetConstantProperty(JSValue base, const StructureAbstractValue&, PropertyOffset);
+ JSValue tryGetConstantProperty(const AbstractValue&, PropertyOffset);
+
+ JSValue tryGetConstantClosureVar(JSValue base, ScopeOffset);
+ JSValue tryGetConstantClosureVar(const AbstractValue&, ScopeOffset);
+ JSValue tryGetConstantClosureVar(Node*, ScopeOffset);
+
+ JSArrayBufferView* tryGetFoldableView(JSValue);
+ JSArrayBufferView* tryGetFoldableView(JSValue, ArrayMode arrayMode);
+
+ void registerFrozenValues();
+
+ virtual void visitChildren(SlotVisitor&) override;
+
+ NO_RETURN_DUE_TO_CRASH void handleAssertionFailure(
+ std::nullptr_t, const char* file, int line, const char* function,
+ const char* assertion);
+ NO_RETURN_DUE_TO_CRASH void handleAssertionFailure(
+ Node*, const char* file, int line, const char* function,
+ const char* assertion);
+ NO_RETURN_DUE_TO_CRASH void handleAssertionFailure(
+ BasicBlock*, const char* file, int line, const char* function,
+ const char* assertion);
+
+ bool hasDebuggerEnabled() const { return m_hasDebuggerEnabled; }
+
+ void ensureDominators();
+ void ensurePrePostNumbering();
+ void ensureNaturalLoops();
+
+ // This function only makes sense to call after bytecode parsing
+ // because it queries the m_hasExceptionHandlers boolean whose value
+ // is only fully determined after bytcode parsing.
+ bool willCatchExceptionInMachineFrame(CodeOrigin, CodeOrigin& opCatchOriginOut, HandlerInfo*& catchHandlerOut);
+
VM& m_vm;
+ Plan& m_plan;
CodeBlock* m_codeBlock;
- RefPtr<Profiler::Compilation> m_compilation;
CodeBlock* m_profiledBlock;
NodeAllocator& m_allocator;
- Vector< OwnPtr<BasicBlock> , 8> m_blocks;
+ Vector< RefPtr<BasicBlock> , 8> m_blocks;
Vector<Edge, 16> m_varArgChildren;
- Vector<StorageAccessData> m_storageAccessData;
- Vector<ResolveGlobalData> m_resolveGlobalData;
- Vector<ResolveOperationData> m_resolveOperationsData;
- Vector<PutToBaseOperationData> m_putToBaseOperationData;
+
+ HashMap<EncodedJSValue, FrozenValue*, EncodedJSValueHash, EncodedJSValueHashTraits> m_frozenValueMap;
+ Bag<FrozenValue> m_frozenValues;
+
+ Vector<uint32_t> m_uint32ValuesInUse;
+
+ Bag<StorageAccessData> m_storageAccessData;
+
+ // In CPS, this is all of the SetArgument nodes for the arguments in the machine code block
+ // that survived DCE. All of them except maybe "this" will survive DCE, because of the Flush
+ // nodes.
+ //
+ // In SSA, this is all of the GetStack nodes for the arguments in the machine code block that
+ // may have some speculation in the prologue and survived DCE. Note that to get the speculation
+ // for an argument in SSA, you must use m_argumentFormats, since we still have to speculate
+ // even if the argument got killed. For example:
+ //
+ // function foo(x) {
+ // var tmp = x + 1;
+ // }
+ //
+ // Assume that x is always int during profiling. The ArithAdd for "x + 1" will be dead and will
+ // have a proven check for the edge to "x". So, we will not insert a Check node and we will
+ // kill the GetStack for "x". But, we must do the int check in the progolue, because that's the
+ // thing we used to allow DCE of ArithAdd. Otherwise the add could be impure:
+ //
+ // var o = {
+ // valueOf: function() { do side effects; }
+ // };
+ // foo(o);
+ //
+ // If we DCE the ArithAdd and we remove the int check on x, then this won't do the side
+ // effects.
Vector<Node*, 8> m_arguments;
+
+ // In CPS, this is meaningless. In SSA, this is the argument speculation that we've locked in.
+ Vector<FlushFormat> m_argumentFormats;
+
SegmentedVector<VariableAccessData, 16> m_variableAccessData;
SegmentedVector<ArgumentPosition, 8> m_argumentPositions;
SegmentedVector<StructureSet, 16> m_structureSet;
- SegmentedVector<StructureTransitionData, 8> m_structureTransitionData;
+ Bag<Transition> m_transitions;
SegmentedVector<NewArrayBufferData, 4> m_newArrayBufferData;
- bool m_hasArguments;
- HashSet<ExecutableBase*> m_executablesWhoseArgumentsEscaped;
- BitVector m_preservedVars;
- Dominators m_dominators;
+ Bag<BranchData> m_branchData;
+ Bag<SwitchData> m_switchData;
+ Bag<MultiGetByOffsetData> m_multiGetByOffsetData;
+ Bag<MultiPutByOffsetData> m_multiPutByOffsetData;
+ Bag<ObjectMaterializationData> m_objectMaterializationData;
+ Bag<CallVarargsData> m_callVarargsData;
+ Bag<LoadVarargsData> m_loadVarargsData;
+ Bag<StackAccessData> m_stackAccessData;
+ Vector<InlineVariableData, 4> m_inlineVariableData;
+ HashMap<CodeBlock*, std::unique_ptr<FullBytecodeLiveness>> m_bytecodeLiveness;
+ HashMap<CodeBlock*, std::unique_ptr<BytecodeKills>> m_bytecodeKills;
+ HashSet<std::pair<JSObject*, PropertyOffset>> m_safeToLoad;
+ HashMap<PropertyTypeKey, InferredType::Descriptor> m_inferredTypes;
+ std::unique_ptr<Dominators> m_dominators;
+ std::unique_ptr<PrePostNumbering> m_prePostNumbering;
+ std::unique_ptr<NaturalLoops> m_naturalLoops;
+ std::unique_ptr<CFG> m_cfg;
unsigned m_localVars;
+ unsigned m_nextMachineLocal;
unsigned m_parameterSlots;
- unsigned m_osrEntryBytecodeIndex;
- Operands<JSValue> m_mustHandleValues;
+
+#if USE(JSVALUE32_64)
+ std::unordered_map<int64_t, double*> m_doubleConstantsMap;
+ std::unique_ptr<Bag<double>> m_doubleConstants;
+#endif
OptimizationFixpointState m_fixpointState;
+ StructureRegistrationState m_structureRegistrationState;
GraphForm m_form;
UnificationState m_unificationState;
+ PlanStage m_planStage { PlanStage::Initial };
RefCountState m_refCountState;
+ bool m_hasDebuggerEnabled;
+ bool m_hasExceptionHandlers { false };
private:
+
+ bool isStringPrototypeMethodSane(JSObject* stringPrototype, Structure* stringPrototypeStructure, UniquedStringImpl*);
+
+ void handleSuccessor(Vector<BasicBlock*, 16>& worklist, BasicBlock*, BasicBlock* successor);
- void handleSuccessor(Vector<BlockIndex, 16>& worklist, BlockIndex blockIndex, BlockIndex successorIndex);
-
- AddSpeculationMode addImmediateShouldSpeculateInteger(Node* add, bool variableShouldSpeculateInteger, Node* immediate)
+ AddSpeculationMode addImmediateShouldSpeculateInt32(Node* add, bool variableShouldSpeculateInt32, Node* operand, Node*immediate, RareCaseProfilingSource source)
{
ASSERT(immediate->hasConstant());
- JSValue immediateValue = immediate->valueOfJSConstant(m_codeBlock);
- if (!immediateValue.isNumber())
- return DontSpeculateInteger;
+ JSValue immediateValue = immediate->asJSValue();
+ if (!immediateValue.isNumber() && !immediateValue.isBoolean())
+ return DontSpeculateInt32;
- if (!variableShouldSpeculateInteger)
- return DontSpeculateInteger;
+ if (!variableShouldSpeculateInt32)
+ return DontSpeculateInt32;
+
+ // Integer constants can be typed Double if they are written like a double in the source code (e.g. 42.0).
+ // In that case, we stay conservative unless the other operand was explicitly typed as integer.
+ NodeFlags operandResultType = operand->result();
+ if (operandResultType != NodeResultInt32 && immediateValue.isDouble())
+ return DontSpeculateInt32;
- if (immediateValue.isInt32())
- return add->canSpeculateInteger() ? SpeculateInteger : DontSpeculateInteger;
+ if (immediateValue.isBoolean() || jsNumber(immediateValue.asNumber()).isInt32())
+ return add->canSpeculateInt32(source) ? SpeculateInt32 : DontSpeculateInt32;
double doubleImmediate = immediateValue.asDouble();
const double twoToThe48 = 281474976710656.0;
if (doubleImmediate < -twoToThe48 || doubleImmediate > twoToThe48)
- return DontSpeculateInteger;
-
- return nodeCanTruncateInteger(add->arithNodeFlags()) ? SpeculateIntegerAndTruncateConstants : DontSpeculateInteger;
- }
-
- bool mulImmediateShouldSpeculateInteger(Node* mul, Node* variable, Node* immediate)
- {
- ASSERT(immediate->hasConstant());
-
- JSValue immediateValue = immediate->valueOfJSConstant(m_codeBlock);
- if (!immediateValue.isInt32())
- return false;
+ return DontSpeculateInt32;
- if (!variable->shouldSpeculateIntegerForArithmetic())
- return false;
-
- int32_t intImmediate = immediateValue.asInt32();
- // Doubles have a 53 bit mantissa so we expect a multiplication of 2^31 (the highest
- // magnitude possible int32 value) and any value less than 2^22 to not result in any
- // rounding in a double multiplication - hence it will be equivalent to an integer
- // multiplication, if we are doing int32 truncation afterwards (which is what
- // canSpeculateInteger() implies).
- const int32_t twoToThe22 = 1 << 22;
- if (intImmediate <= -twoToThe22 || intImmediate >= twoToThe22)
- return mul->canSpeculateInteger() && !nodeMayOverflow(mul->arithNodeFlags());
-
- return mul->canSpeculateInteger();
- }
-};
-
-class GetBytecodeBeginForBlock {
-public:
- GetBytecodeBeginForBlock(Graph& graph)
- : m_graph(graph)
- {
- }
-
- unsigned operator()(BlockIndex* blockIndex) const
- {
- return m_graph.m_blocks[*blockIndex]->bytecodeBegin;
+ return bytecodeCanTruncateInteger(add->arithNodeFlags()) ? SpeculateInt32AndTruncateConstants : DontSpeculateInt32;
}
-
-private:
- Graph& m_graph;
};
-inline BlockIndex Graph::blockIndexForBytecodeOffset(Vector<BlockIndex>& linkingTargets, unsigned bytecodeBegin)
-{
- return *binarySearch<BlockIndex, unsigned>(linkingTargets, linkingTargets.size(), bytecodeBegin, GetBytecodeBeginForBlock(*this));
-}
-
-#define DFG_NODE_DO_TO_CHILDREN(graph, node, thingToDo) do { \
- Node* _node = (node); \
- if (_node->flags() & NodeHasVarArgs) { \
- for (unsigned _childIdx = _node->firstChild(); \
- _childIdx < _node->firstChild() + _node->numChildren(); \
- _childIdx++) { \
- if (!!(graph).m_varArgChildren[_childIdx]) \
- thingToDo(_node, (graph).m_varArgChildren[_childIdx]); \
- } \
- } else { \
- if (!_node->child1()) { \
- ASSERT( \
- !_node->child2() \
- && !_node->child3()); \
- break; \
- } \
- thingToDo(_node, _node->child1()); \
- \
- if (!_node->child2()) { \
- ASSERT(!_node->child3()); \
- break; \
- } \
- thingToDo(_node, _node->child2()); \
- \
- if (!_node->child3()) \
- break; \
- thingToDo(_node, _node->child3()); \
- } \
- } while (false)
-
} } // namespace JSC::DFG
#endif