summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/dfg
diff options
context:
space:
mode:
authorSimon Hausmann <simon.hausmann@nokia.com>2012-01-06 14:44:00 +0100
committerSimon Hausmann <simon.hausmann@nokia.com>2012-01-06 14:44:00 +0100
commit40736c5763bf61337c8c14e16d8587db021a87d4 (patch)
treeb17a9c00042ad89cb1308e2484491799aa14e9f8 /Source/JavaScriptCore/dfg
downloadqtwebkit-40736c5763bf61337c8c14e16d8587db021a87d4.tar.gz
Imported WebKit commit 2ea9d364d0f6efa8fa64acf19f451504c59be0e4 (http://svn.webkit.org/repository/webkit/trunk@104285)
Diffstat (limited to 'Source/JavaScriptCore/dfg')
-rw-r--r--Source/JavaScriptCore/dfg/DFGAbstractState.cpp1056
-rw-r--r--Source/JavaScriptCore/dfg/DFGAbstractState.h196
-rw-r--r--Source/JavaScriptCore/dfg/DFGAbstractValue.h488
-rw-r--r--Source/JavaScriptCore/dfg/DFGAssemblyHelpers.cpp148
-rw-r--r--Source/JavaScriptCore/dfg/DFGAssemblyHelpers.h321
-rw-r--r--Source/JavaScriptCore/dfg/DFGBasicBlock.h111
-rw-r--r--Source/JavaScriptCore/dfg/DFGByteCodeCache.h194
-rw-r--r--Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp2650
-rw-r--r--Source/JavaScriptCore/dfg/DFGByteCodeParser.h47
-rw-r--r--Source/JavaScriptCore/dfg/DFGCapabilities.cpp75
-rw-r--r--Source/JavaScriptCore/dfg/DFGCapabilities.h262
-rw-r--r--Source/JavaScriptCore/dfg/DFGCommon.h100
-rw-r--r--Source/JavaScriptCore/dfg/DFGCorrectableJumpPoint.cpp43
-rw-r--r--Source/JavaScriptCore/dfg/DFGCorrectableJumpPoint.h159
-rw-r--r--Source/JavaScriptCore/dfg/DFGDriver.cpp88
-rw-r--r--Source/JavaScriptCore/dfg/DFGDriver.h51
-rw-r--r--Source/JavaScriptCore/dfg/DFGFPRInfo.h171
-rw-r--r--Source/JavaScriptCore/dfg/DFGGPRInfo.h465
-rw-r--r--Source/JavaScriptCore/dfg/DFGGenerationInfo.h327
-rw-r--r--Source/JavaScriptCore/dfg/DFGGraph.cpp363
-rw-r--r--Source/JavaScriptCore/dfg/DFGGraph.h297
-rw-r--r--Source/JavaScriptCore/dfg/DFGJITCompiler.cpp286
-rw-r--r--Source/JavaScriptCore/dfg/DFGJITCompiler.h334
-rw-r--r--Source/JavaScriptCore/dfg/DFGNode.h1076
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSREntry.cpp175
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSREntry.h61
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRExit.cpp77
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRExit.h164
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRExitCompiler.cpp85
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRExitCompiler.h85
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp662
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp633
-rw-r--r--Source/JavaScriptCore/dfg/DFGOperands.h165
-rw-r--r--Source/JavaScriptCore/dfg/DFGOperations.cpp838
-rw-r--r--Source/JavaScriptCore/dfg/DFGOperations.h199
-rw-r--r--Source/JavaScriptCore/dfg/DFGPropagator.cpp1793
-rw-r--r--Source/JavaScriptCore/dfg/DFGPropagator.h46
-rw-r--r--Source/JavaScriptCore/dfg/DFGRegisterBank.h383
-rw-r--r--Source/JavaScriptCore/dfg/DFGRepatch.cpp637
-rw-r--r--Source/JavaScriptCore/dfg/DFGRepatch.h63
-rw-r--r--Source/JavaScriptCore/dfg/DFGScoreBoard.h164
-rw-r--r--Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp2484
-rw-r--r--Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h2758
-rw-r--r--Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp3561
-rw-r--r--Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp3537
-rw-r--r--Source/JavaScriptCore/dfg/DFGStructureSet.h166
-rw-r--r--Source/JavaScriptCore/dfg/DFGThunks.cpp78
-rw-r--r--Source/JavaScriptCore/dfg/DFGThunks.h47
-rw-r--r--Source/JavaScriptCore/dfg/DFGVariableAccessData.h143
49 files changed, 28312 insertions, 0 deletions
diff --git a/Source/JavaScriptCore/dfg/DFGAbstractState.cpp b/Source/JavaScriptCore/dfg/DFGAbstractState.cpp
new file mode 100644
index 000000000..40ad857cf
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGAbstractState.cpp
@@ -0,0 +1,1056 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "DFGAbstractState.h"
+
+#if ENABLE(DFG_JIT)
+
+#include "CodeBlock.h"
+#include "DFGBasicBlock.h"
+
+namespace JSC { namespace DFG {
+
+#define CFA_PROFILING 0
+
+#if CFA_PROFILING
+#define PROFILE(flag) SamplingFlags::ScopedFlag scopedFlag(flag)
+#else
+#define PROFILE(flag) do { } while (false)
+#endif
+
+// Profiling flags
+#define FLAG_FOR_BLOCK_INITIALIZATION 17
+#define FLAG_FOR_BLOCK_END 18
+#define FLAG_FOR_EXECUTION 19
+#define FLAG_FOR_MERGE_TO_SUCCESSORS 20
+#define FLAG_FOR_STRUCTURE_CLOBBERING 21
+
+AbstractState::AbstractState(CodeBlock* codeBlock, Graph& graph)
+ : m_codeBlock(codeBlock)
+ , m_graph(graph)
+ , m_variables(codeBlock->m_numParameters, graph.m_localVars)
+ , m_block(0)
+{
+ size_t maxBlockSize = 0;
+ for (size_t i = 0; i < graph.m_blocks.size(); ++i) {
+ BasicBlock* block = graph.m_blocks[i].get();
+ if (block->end - block->begin > maxBlockSize)
+ maxBlockSize = block->end - block->begin;
+ }
+ m_nodes.resize(maxBlockSize);
+}
+
+AbstractState::~AbstractState() { }
+
+void AbstractState::beginBasicBlock(BasicBlock* basicBlock)
+{
+ PROFILE(FLAG_FOR_BLOCK_INITIALIZATION);
+
+ ASSERT(!m_block);
+
+ ASSERT(basicBlock->variablesAtHead.numberOfLocals() == basicBlock->valuesAtHead.numberOfLocals());
+ ASSERT(basicBlock->variablesAtTail.numberOfLocals() == basicBlock->valuesAtTail.numberOfLocals());
+ ASSERT(basicBlock->variablesAtHead.numberOfLocals() == basicBlock->variablesAtTail.numberOfLocals());
+
+ for (size_t i = 0; i < basicBlock->end - basicBlock->begin; ++i)
+ m_nodes[i].clear();
+ m_variables = basicBlock->valuesAtHead;
+ m_haveStructures = false;
+ for (size_t i = 0; i < m_variables.numberOfArguments(); ++i) {
+ if (m_variables.argument(i).m_structure.isNeitherClearNorTop()) {
+ m_haveStructures = true;
+ break;
+ }
+ }
+ for (size_t i = 0; i < m_variables.numberOfLocals(); ++i) {
+ if (m_variables.local(i).m_structure.isNeitherClearNorTop()) {
+ m_haveStructures = true;
+ break;
+ }
+ }
+
+ basicBlock->cfaShouldRevisit = false;
+ basicBlock->cfaHasVisited = true;
+ m_block = basicBlock;
+ m_isValid = true;
+}
+
+void AbstractState::initialize(Graph& graph)
+{
+ PROFILE(FLAG_FOR_BLOCK_INITIALIZATION);
+ BasicBlock* root = graph.m_blocks[0].get();
+ root->cfaShouldRevisit = true;
+ for (size_t i = 0; i < root->valuesAtHead.numberOfArguments(); ++i) {
+ PredictedType prediction = graph[root->variablesAtHead.argument(i)].variableAccessData()->prediction();
+ if (isInt32Prediction(prediction))
+ root->valuesAtHead.argument(i).set(PredictInt32);
+ else if (isArrayPrediction(prediction))
+ root->valuesAtHead.argument(i).set(PredictArray);
+ else if (isByteArrayPrediction(prediction))
+ root->valuesAtHead.argument(i).set(PredictByteArray);
+ else if (isBooleanPrediction(prediction))
+ root->valuesAtHead.argument(i).set(PredictBoolean);
+ else if (isInt8ArrayPrediction(prediction))
+ root->valuesAtHead.argument(i).set(PredictInt8Array);
+ else if (isInt16ArrayPrediction(prediction))
+ root->valuesAtHead.argument(i).set(PredictInt16Array);
+ else if (isInt32ArrayPrediction(prediction))
+ root->valuesAtHead.argument(i).set(PredictInt32Array);
+ else if (isUint8ArrayPrediction(prediction))
+ root->valuesAtHead.argument(i).set(PredictUint8Array);
+ else if (isUint16ArrayPrediction(prediction))
+ root->valuesAtHead.argument(i).set(PredictUint16Array);
+ else if (isUint32ArrayPrediction(prediction))
+ root->valuesAtHead.argument(i).set(PredictUint32Array);
+ else if (isFloat32ArrayPrediction(prediction))
+ root->valuesAtHead.argument(i).set(PredictFloat32Array);
+ else if (isFloat64ArrayPrediction(prediction))
+ root->valuesAtHead.argument(i).set(PredictFloat64Array);
+ else
+ root->valuesAtHead.argument(i).makeTop();
+ }
+}
+
+bool AbstractState::endBasicBlock(MergeMode mergeMode)
+{
+ PROFILE(FLAG_FOR_BLOCK_END);
+ ASSERT(m_block);
+
+ BasicBlock* block = m_block; // Save the block for successor merging.
+
+ if (!m_isValid) {
+ reset();
+ return false;
+ }
+
+ bool changed = false;
+
+ if (mergeMode != DontMerge || !ASSERT_DISABLED) {
+ for (size_t argument = 0; argument < block->variablesAtTail.numberOfArguments(); ++argument) {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf(" Merging state for argument %lu.\n", argument);
+#endif
+ changed |= mergeStateAtTail(block->valuesAtTail.argument(argument), m_variables.argument(argument), block->variablesAtTail.argument(argument));
+ }
+
+ for (size_t local = 0; local < block->variablesAtTail.numberOfLocals(); ++local) {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf(" Merging state for local %lu.\n", local);
+#endif
+ changed |= mergeStateAtTail(block->valuesAtTail.local(local), m_variables.local(local), block->variablesAtTail.local(local));
+ }
+ }
+
+ ASSERT(mergeMode != DontMerge || !changed);
+
+ reset();
+
+ if (mergeMode != MergeToSuccessors)
+ return changed;
+
+ return mergeToSuccessors(m_graph, block);
+}
+
+void AbstractState::reset()
+{
+ m_block = 0;
+ m_isValid = false;
+}
+
+bool AbstractState::execute(NodeIndex nodeIndex)
+{
+ PROFILE(FLAG_FOR_EXECUTION);
+ ASSERT(m_block);
+ ASSERT(m_isValid);
+
+ Node& node = m_graph[nodeIndex];
+
+ if (!node.shouldGenerate())
+ return true;
+
+ switch (node.op) {
+ case JSConstant:
+ case WeakJSConstant: {
+ JSValue value = m_graph.valueOfJSConstant(m_codeBlock, nodeIndex);
+ if (value.isCell())
+ m_haveStructures = true;
+ forNode(nodeIndex).set(value);
+ break;
+ }
+
+ case GetLocal: {
+ forNode(nodeIndex) = m_variables.operand(node.local());
+ break;
+ }
+
+ case SetLocal: {
+ if (node.variableAccessData()->shouldUseDoubleFormat()) {
+ forNode(node.child1()).filter(PredictNumber);
+ m_variables.operand(node.local()).set(PredictDouble);
+ break;
+ }
+
+ PredictedType predictedType = node.variableAccessData()->prediction();
+ if (isInt32Prediction(predictedType))
+ forNode(node.child1()).filter(PredictInt32);
+ else if (isArrayPrediction(predictedType))
+ forNode(node.child1()).filter(PredictArray);
+ else if (isByteArrayPrediction(predictedType))
+ forNode(node.child1()).filter(PredictByteArray);
+ else if (isBooleanPrediction(predictedType))
+ forNode(node.child1()).filter(PredictBoolean);
+
+ m_variables.operand(node.local()) = forNode(node.child1());
+ break;
+ }
+
+ case SetArgument:
+ // Assert that the state of arguments has been set.
+ ASSERT(!m_block->valuesAtHead.operand(node.local()).isClear());
+ break;
+
+ case BitAnd:
+ case BitOr:
+ case BitXor:
+ case BitRShift:
+ case BitLShift:
+ case BitURShift:
+ forNode(node.child1()).filter(PredictInt32);
+ forNode(node.child2()).filter(PredictInt32);
+ forNode(nodeIndex).set(PredictInt32);
+ break;
+
+ case UInt32ToNumber:
+ if (!node.canSpeculateInteger())
+ forNode(nodeIndex).set(PredictDouble);
+ else
+ forNode(nodeIndex).set(PredictInt32);
+ break;
+
+ case ValueToInt32:
+ if (!m_graph[node.child1()].shouldNotSpeculateInteger()) {
+ if (m_graph[node.child1()].shouldSpeculateDouble())
+ forNode(node.child1()).filter(PredictNumber);
+ else
+ forNode(node.child1()).filter(PredictInt32);
+ }
+ forNode(nodeIndex).set(PredictInt32);
+ break;
+
+ case ValueToNumber:
+ if (m_graph[node.child1()].shouldNotSpeculateInteger()) {
+ forNode(node.child1()).filter(PredictNumber);
+ forNode(nodeIndex).set(PredictDouble);
+ break;
+ }
+
+ forNode(node.child1()).filter(PredictInt32);
+ forNode(nodeIndex).set(PredictInt32);
+ break;
+
+ case ValueToDouble:
+ forNode(node.child1()).filter(PredictNumber);
+ forNode(nodeIndex).set(PredictDouble);
+ break;
+
+ case ValueAdd:
+ case ArithAdd: {
+ if (Node::shouldSpeculateInteger(m_graph[node.child1()], m_graph[node.child2()]) && node.canSpeculateInteger()) {
+ forNode(node.child1()).filter(PredictInt32);
+ forNode(node.child2()).filter(PredictInt32);
+ forNode(nodeIndex).set(PredictInt32);
+ break;
+ }
+ if (Node::shouldSpeculateNumber(m_graph[node.child1()], m_graph[node.child2()])) {
+ forNode(node.child1()).filter(PredictNumber);
+ forNode(node.child2()).filter(PredictNumber);
+ forNode(nodeIndex).set(PredictDouble);
+ break;
+ }
+ ASSERT(node.op == ValueAdd);
+ clobberStructures(nodeIndex);
+ forNode(nodeIndex).set(PredictString | PredictInt32 | PredictNumber);
+ break;
+ }
+
+ case ArithSub:
+ case ArithMul:
+ case ArithDiv:
+ case ArithMin:
+ case ArithMax: {
+ if (Node::shouldSpeculateInteger(m_graph[node.child1()], m_graph[node.child2()]) && node.canSpeculateInteger()) {
+ forNode(node.child1()).filter(PredictInt32);
+ forNode(node.child2()).filter(PredictInt32);
+ forNode(nodeIndex).set(PredictInt32);
+ break;
+ }
+ forNode(node.child1()).filter(PredictNumber);
+ forNode(node.child2()).filter(PredictNumber);
+ forNode(nodeIndex).set(PredictDouble);
+ break;
+ }
+
+ case ArithMod: {
+ if (m_graph[node.child1()].shouldNotSpeculateInteger() || m_graph[node.child2()].shouldNotSpeculateInteger() || !node.canSpeculateInteger()) {
+ forNode(node.child1()).filter(PredictNumber);
+ forNode(node.child2()).filter(PredictNumber);
+ forNode(nodeIndex).set(PredictDouble);
+ break;
+ }
+ forNode(node.child1()).filter(PredictInt32);
+ forNode(node.child2()).filter(PredictInt32);
+ forNode(nodeIndex).set(PredictInt32);
+ break;
+ }
+
+ case ArithAbs:
+ if (m_graph[node.child1()].shouldSpeculateInteger() && node.canSpeculateInteger()) {
+ forNode(node.child1()).filter(PredictInt32);
+ forNode(nodeIndex).set(PredictInt32);
+ break;
+ }
+ forNode(node.child1()).filter(PredictNumber);
+ forNode(nodeIndex).set(PredictDouble);
+ break;
+
+ case ArithSqrt:
+ forNode(node.child1()).filter(PredictNumber);
+ forNode(nodeIndex).set(PredictDouble);
+ break;
+
+ case LogicalNot: {
+ Node& child = m_graph[node.child1()];
+ if (isBooleanPrediction(child.prediction()) || !child.prediction())
+ forNode(node.child1()).filter(PredictBoolean);
+ else if (child.shouldSpeculateFinalObjectOrOther())
+ forNode(node.child1()).filter(PredictFinalObject | PredictOther);
+ else if (child.shouldSpeculateArrayOrOther())
+ forNode(node.child1()).filter(PredictArray | PredictOther);
+ else if (child.shouldSpeculateInteger())
+ forNode(node.child1()).filter(PredictInt32);
+ else if (child.shouldSpeculateNumber())
+ forNode(node.child1()).filter(PredictNumber);
+ else
+ clobberStructures(nodeIndex);
+ forNode(nodeIndex).set(PredictBoolean);
+ break;
+ }
+
+ case CompareLess:
+ case CompareLessEq:
+ case CompareGreater:
+ case CompareGreaterEq:
+ case CompareEq: {
+ Node& left = m_graph[node.child1()];
+ Node& right = m_graph[node.child2()];
+ PredictedType filter;
+ if (Node::shouldSpeculateInteger(left, right))
+ filter = PredictInt32;
+ else if (Node::shouldSpeculateNumber(left, right))
+ filter = PredictNumber;
+ else if (node.op == CompareEq && Node::shouldSpeculateFinalObject(left, right))
+ filter = PredictFinalObject;
+ else if (node.op == CompareEq && Node::shouldSpeculateArray(left, right))
+ filter = PredictArray;
+ else {
+ filter = PredictTop;
+ clobberStructures(nodeIndex);
+ }
+ forNode(node.child1()).filter(filter);
+ forNode(node.child2()).filter(filter);
+ forNode(nodeIndex).set(PredictBoolean);
+ break;
+ }
+
+ case CompareStrictEq:
+ forNode(nodeIndex).set(PredictBoolean);
+ break;
+
+ case StringCharCodeAt:
+ forNode(node.child1()).filter(PredictString);
+ forNode(node.child2()).filter(PredictInt32);
+ forNode(nodeIndex).set(PredictInt32);
+ break;
+
+ case StringCharAt:
+ forNode(node.child1()).filter(PredictString);
+ forNode(node.child2()).filter(PredictInt32);
+ forNode(nodeIndex).set(PredictString);
+ break;
+
+ case GetByVal: {
+ if (!node.prediction() || !m_graph[node.child1()].prediction() || !m_graph[node.child2()].prediction()) {
+ m_isValid = false;
+ break;
+ }
+ if (!isActionableArrayPrediction(m_graph[node.child1()].prediction()) || !m_graph[node.child2()].shouldSpeculateInteger()) {
+ clobberStructures(nodeIndex);
+ forNode(nodeIndex).makeTop();
+ break;
+ }
+ if (m_graph[node.child1()].prediction() == PredictString) {
+ forNode(node.child1()).filter(PredictString);
+ forNode(node.child2()).filter(PredictInt32);
+ forNode(nodeIndex).set(PredictString);
+ break;
+ }
+ if (m_graph[node.child1()].shouldSpeculateByteArray()) {
+ forNode(node.child1()).filter(PredictByteArray);
+ forNode(node.child2()).filter(PredictInt32);
+ forNode(nodeIndex).set(PredictInt32);
+ break;
+ }
+
+ if (m_graph[node.child1()].shouldSpeculateInt8Array()) {
+ forNode(node.child1()).filter(PredictInt8Array);
+ forNode(node.child2()).filter(PredictInt32);
+ forNode(nodeIndex).set(PredictInt32);
+ break;
+ }
+ if (m_graph[node.child1()].shouldSpeculateInt16Array()) {
+ forNode(node.child1()).filter(PredictInt16Array);
+ forNode(node.child2()).filter(PredictInt32);
+ forNode(nodeIndex).set(PredictInt32);
+ break;
+ }
+ if (m_graph[node.child1()].shouldSpeculateInt32Array()) {
+ forNode(node.child1()).filter(PredictInt32Array);
+ forNode(node.child2()).filter(PredictInt32);
+ forNode(nodeIndex).set(PredictInt32);
+ break;
+ }
+ if (m_graph[node.child1()].shouldSpeculateUint8Array()) {
+ forNode(node.child1()).filter(PredictUint8Array);
+ forNode(node.child2()).filter(PredictInt32);
+ forNode(nodeIndex).set(PredictInt32);
+ break;
+ }
+ if (m_graph[node.child1()].shouldSpeculateUint16Array()) {
+ forNode(node.child1()).filter(PredictUint16Array);
+ forNode(node.child2()).filter(PredictInt32);
+ forNode(nodeIndex).set(PredictInt32);
+ break;
+ }
+ if (m_graph[node.child1()].shouldSpeculateUint32Array()) {
+ forNode(node.child1()).filter(PredictUint32Array);
+ forNode(node.child2()).filter(PredictInt32);
+ forNode(nodeIndex).set(PredictDouble);
+ break;
+ }
+ if (m_graph[node.child1()].shouldSpeculateFloat32Array()) {
+ forNode(node.child1()).filter(PredictFloat32Array);
+ forNode(node.child2()).filter(PredictInt32);
+ forNode(nodeIndex).set(PredictDouble);
+ break;
+ }
+ if (m_graph[node.child1()].shouldSpeculateFloat64Array()) {
+ forNode(node.child1()).filter(PredictFloat64Array);
+ forNode(node.child2()).filter(PredictInt32);
+ forNode(nodeIndex).set(PredictDouble);
+ break;
+ }
+ ASSERT(m_graph[node.child1()].shouldSpeculateArray());
+ forNode(node.child1()).filter(PredictArray);
+ forNode(node.child2()).filter(PredictInt32);
+ forNode(nodeIndex).makeTop();
+ break;
+ }
+
+ case PutByVal:
+ case PutByValAlias: {
+ if (!m_graph[node.child1()].prediction() || !m_graph[node.child2()].prediction()) {
+ m_isValid = false;
+ break;
+ }
+ if (!m_graph[node.child2()].shouldSpeculateInteger() || !isActionableMutableArrayPrediction(m_graph[node.child1()].prediction())) {
+ ASSERT(node.op == PutByVal);
+ clobberStructures(nodeIndex);
+ forNode(nodeIndex).makeTop();
+ break;
+ }
+ if (m_graph[node.child1()].shouldSpeculateByteArray()) {
+ forNode(node.child1()).filter(PredictByteArray);
+ forNode(node.child2()).filter(PredictInt32);
+ forNode(node.child3()).filter(PredictNumber);
+ break;
+ }
+
+ if (m_graph[node.child1()].shouldSpeculateInt8Array()) {
+ forNode(node.child1()).filter(PredictInt8Array);
+ forNode(node.child2()).filter(PredictInt32);
+ forNode(node.child3()).filter(PredictNumber);
+ break;
+ }
+ if (m_graph[node.child1()].shouldSpeculateInt16Array()) {
+ forNode(node.child1()).filter(PredictInt16Array);
+ forNode(node.child2()).filter(PredictInt32);
+ forNode(node.child3()).filter(PredictNumber);
+ break;
+ }
+ if (m_graph[node.child1()].shouldSpeculateInt32Array()) {
+ forNode(node.child1()).filter(PredictInt32Array);
+ forNode(node.child2()).filter(PredictInt32);
+ forNode(node.child3()).filter(PredictNumber);
+ break;
+ }
+ if (m_graph[node.child1()].shouldSpeculateUint8Array()) {
+ forNode(node.child1()).filter(PredictUint8Array);
+ forNode(node.child2()).filter(PredictInt32);
+ forNode(node.child3()).filter(PredictNumber);
+ break;
+ }
+ if (m_graph[node.child1()].shouldSpeculateUint16Array()) {
+ forNode(node.child1()).filter(PredictUint16Array);
+ forNode(node.child2()).filter(PredictInt32);
+ forNode(node.child3()).filter(PredictNumber);
+ break;
+ }
+ if (m_graph[node.child1()].shouldSpeculateUint32Array()) {
+ forNode(node.child1()).filter(PredictUint32Array);
+ forNode(node.child2()).filter(PredictInt32);
+ forNode(node.child3()).filter(PredictNumber);
+ break;
+ }
+ if (m_graph[node.child1()].shouldSpeculateFloat32Array()) {
+ forNode(node.child1()).filter(PredictFloat32Array);
+ forNode(node.child2()).filter(PredictInt32);
+ forNode(node.child3()).filter(PredictNumber);
+ break;
+ }
+ if (m_graph[node.child1()].shouldSpeculateFloat64Array()) {
+ forNode(node.child1()).filter(PredictFloat64Array);
+ forNode(node.child2()).filter(PredictInt32);
+ forNode(node.child3()).filter(PredictNumber);
+ break;
+ }
+ ASSERT(m_graph[node.child1()].shouldSpeculateArray());
+ forNode(node.child1()).filter(PredictArray);
+ forNode(node.child2()).filter(PredictInt32);
+ break;
+ }
+
+ case ArrayPush:
+ forNode(node.child1()).filter(PredictArray);
+ forNode(nodeIndex).set(PredictNumber);
+ break;
+
+ case ArrayPop:
+ forNode(node.child1()).filter(PredictArray);
+ forNode(nodeIndex).makeTop();
+ break;
+
+ case Jump:
+ break;
+
+ case Branch: {
+ // There is probably profit to be found in doing sparse conditional constant
+ // propagation, and to take it one step further, where a variable's value
+ // is specialized on each direction of a branch. For now, we don't do this.
+ Node& child = m_graph[node.child1()];
+ if (isBooleanPrediction(child.prediction()) || !child.prediction())
+ forNode(node.child1()).filter(PredictBoolean);
+ else if (child.shouldSpeculateFinalObjectOrOther())
+ forNode(node.child1()).filter(PredictFinalObject | PredictOther);
+ else if (child.shouldSpeculateArrayOrOther())
+ forNode(node.child1()).filter(PredictArray | PredictOther);
+ else if (child.shouldSpeculateInteger())
+ forNode(node.child1()).filter(PredictInt32);
+ else if (child.shouldSpeculateNumber())
+ forNode(node.child1()).filter(PredictNumber);
+ break;
+ }
+
+ case Return:
+ case Throw:
+ case ThrowReferenceError:
+ m_isValid = false;
+ break;
+
+ case ToPrimitive: {
+ Node& child = m_graph[node.child1()];
+ if (child.shouldSpeculateInteger()) {
+ forNode(node.child1()).filter(PredictInt32);
+ forNode(nodeIndex).set(PredictInt32);
+ break;
+ }
+
+ AbstractValue& source = forNode(node.child1());
+ AbstractValue& destination = forNode(nodeIndex);
+
+ PredictedType type = source.m_type;
+ if (type & ~(PredictNumber | PredictString | PredictBoolean)) {
+ type &= (PredictNumber | PredictString | PredictBoolean);
+ type |= PredictString;
+ }
+ destination.set(type);
+ break;
+ }
+
+ case StrCat:
+ forNode(nodeIndex).set(PredictString);
+ break;
+
+ case NewArray:
+ case NewArrayBuffer:
+ forNode(nodeIndex).set(m_codeBlock->globalObject()->arrayStructure());
+ m_haveStructures = true;
+ break;
+
+ case NewRegexp:
+ forNode(nodeIndex).set(m_codeBlock->globalObject()->regExpStructure());
+ m_haveStructures = true;
+ break;
+
+ case ConvertThis: {
+ Node& child = m_graph[node.child1()];
+ AbstractValue& source = forNode(node.child1());
+ AbstractValue& destination = forNode(nodeIndex);
+
+ if (isObjectPrediction(source.m_type)) {
+ // This is the simple case. We already know that the source is an
+ // object, so there's nothing to do. I don't think this case will
+ // be hit, but then again, you never know.
+ destination = source;
+ break;
+ }
+
+ if (isOtherPrediction(child.prediction())) {
+ source.filter(PredictOther);
+ destination.set(PredictObjectOther);
+ break;
+ }
+
+ if (isObjectPrediction(child.prediction())) {
+ source.filter(PredictObjectMask);
+ destination = source;
+ break;
+ }
+
+ destination = source;
+ destination.merge(PredictObjectOther);
+ break;
+ }
+
+ case CreateThis: {
+ Node& child = m_graph[node.child1()];
+ AbstractValue& source = forNode(node.child1());
+ AbstractValue& destination = forNode(nodeIndex);
+
+ if (child.shouldSpeculateFinalObject())
+ source.filter(PredictFinalObject);
+
+ destination.set(PredictFinalObject);
+ break;
+ }
+
+ case NewObject:
+ forNode(nodeIndex).set(m_codeBlock->globalObject()->emptyObjectStructure());
+ m_haveStructures = true;
+ break;
+
+ case GetCallee:
+ forNode(nodeIndex).set(PredictFunction);
+ break;
+
+ case GetScopeChain:
+ forNode(nodeIndex).set(PredictCellOther);
+ break;
+
+ case GetScopedVar:
+ forNode(nodeIndex).makeTop();
+ break;
+
+ case PutScopedVar:
+ clobberStructures(nodeIndex);
+ break;
+
+ case GetById:
+ if (!node.prediction()) {
+ m_isValid = false;
+ break;
+ }
+ if (isCellPrediction(m_graph[node.child1()].prediction()))
+ forNode(node.child1()).filter(PredictCell);
+ clobberStructures(nodeIndex);
+ forNode(nodeIndex).makeTop();
+ break;
+
+ case GetArrayLength:
+ forNode(node.child1()).filter(PredictArray);
+ forNode(nodeIndex).set(PredictInt32);
+ break;
+
+ case GetStringLength:
+ forNode(node.child1()).filter(PredictString);
+ forNode(nodeIndex).set(PredictInt32);
+ break;
+
+ case GetByteArrayLength:
+ forNode(node.child1()).filter(PredictByteArray);
+ forNode(nodeIndex).set(PredictInt32);
+ break;
+ case GetInt8ArrayLength:
+ forNode(node.child1()).filter(PredictInt8Array);
+ forNode(nodeIndex).set(PredictInt32);
+ break;
+ case GetInt16ArrayLength:
+ forNode(node.child1()).filter(PredictInt16Array);
+ forNode(nodeIndex).set(PredictInt32);
+ break;
+ case GetInt32ArrayLength:
+ forNode(node.child1()).filter(PredictInt32Array);
+ forNode(nodeIndex).set(PredictInt32);
+ break;
+ case GetUint8ArrayLength:
+ forNode(node.child1()).filter(PredictUint8Array);
+ forNode(nodeIndex).set(PredictInt32);
+ break;
+ case GetUint16ArrayLength:
+ forNode(node.child1()).filter(PredictUint16Array);
+ forNode(nodeIndex).set(PredictInt32);
+ break;
+ case GetUint32ArrayLength:
+ forNode(node.child1()).filter(PredictUint32Array);
+ forNode(nodeIndex).set(PredictInt32);
+ break;
+ case GetFloat32ArrayLength:
+ forNode(node.child1()).filter(PredictFloat32Array);
+ forNode(nodeIndex).set(PredictInt32);
+ break;
+ case GetFloat64ArrayLength:
+ forNode(node.child1()).filter(PredictFloat64Array);
+ forNode(nodeIndex).set(PredictInt32);
+ break;
+
+ case CheckStructure:
+ // FIXME: We should be able to propagate the structure sets of constants (i.e. prototypes).
+ forNode(node.child1()).filter(node.structureSet());
+ m_haveStructures = true;
+ break;
+
+ case PutStructure:
+ clobberStructures(nodeIndex);
+ forNode(node.child1()).set(node.structureTransitionData().newStructure);
+ m_haveStructures = true;
+ break;
+ case GetPropertyStorage:
+ forNode(node.child1()).filter(PredictCell);
+ forNode(nodeIndex).clear(); // The result is not a JS value.
+ break;
+ case GetIndexedPropertyStorage: {
+ PredictedType basePrediction = m_graph[node.child2()].prediction();
+ if (!(basePrediction & PredictInt32) && basePrediction) {
+ forNode(nodeIndex).clear();
+ break;
+ }
+ if (m_graph[node.child1()].prediction() == PredictString) {
+ forNode(node.child1()).filter(PredictString);
+ forNode(nodeIndex).clear();
+ break;
+ }
+ if (m_graph[node.child1()].shouldSpeculateByteArray()) {
+ forNode(node.child1()).filter(PredictByteArray);
+ forNode(nodeIndex).clear();
+ break;
+ }
+
+ if (m_graph[node.child1()].shouldSpeculateInt8Array()) {
+ forNode(node.child1()).filter(PredictInt8Array);
+ forNode(nodeIndex).clear();
+ break;
+ }
+ if (m_graph[node.child1()].shouldSpeculateInt16Array()) {
+ forNode(node.child1()).filter(PredictInt16Array);
+ forNode(nodeIndex).clear();
+ break;
+ }
+ if (m_graph[node.child1()].shouldSpeculateInt32Array()) {
+ forNode(node.child1()).filter(PredictInt32Array);
+ forNode(nodeIndex).clear();
+ break;
+ }
+ if (m_graph[node.child1()].shouldSpeculateUint8Array()) {
+ forNode(node.child1()).filter(PredictUint8Array);
+ forNode(nodeIndex).clear();
+ break;
+ }
+ if (m_graph[node.child1()].shouldSpeculateUint16Array()) {
+ forNode(node.child1()).filter(PredictUint16Array);
+ forNode(nodeIndex).set(PredictOther);
+ break;
+ }
+ if (m_graph[node.child1()].shouldSpeculateUint32Array()) {
+ forNode(node.child1()).filter(PredictUint32Array);
+ forNode(nodeIndex).clear();
+ break;
+ }
+ if (m_graph[node.child1()].shouldSpeculateFloat32Array()) {
+ forNode(node.child1()).filter(PredictFloat32Array);
+ forNode(nodeIndex).clear();
+ break;
+ }
+ if (m_graph[node.child1()].shouldSpeculateFloat64Array()) {
+ forNode(node.child1()).filter(PredictFloat64Array);
+ forNode(nodeIndex).clear();
+ break;
+ }
+ forNode(node.child1()).filter(PredictArray);
+ forNode(nodeIndex).clear();
+ break;
+ }
+ case GetByOffset:
+ forNode(node.child1()).filter(PredictCell);
+ forNode(nodeIndex).makeTop();
+ break;
+
+ case PutByOffset:
+ forNode(node.child1()).filter(PredictCell);
+ break;
+
+ case CheckFunction:
+ forNode(node.child1()).filter(PredictFunction);
+ // FIXME: Should be able to propagate the fact that we know what the function is.
+ break;
+
+ case PutById:
+ case PutByIdDirect:
+ forNode(node.child1()).filter(PredictCell);
+ clobberStructures(nodeIndex);
+ break;
+
+ case GetGlobalVar:
+ forNode(nodeIndex).makeTop();
+ break;
+
+ case PutGlobalVar:
+ break;
+
+ case CheckHasInstance:
+ forNode(node.child1()).filter(PredictCell);
+ // Sadly, we don't propagate the fact that we've done CheckHasInstance
+ break;
+
+ case InstanceOf:
+ // Again, sadly, we don't propagate the fact that we've done InstanceOf
+ if (!(m_graph[node.child1()].prediction() & ~PredictCell) && !(forNode(node.child1()).m_type & ~PredictCell))
+ forNode(node.child1()).filter(PredictCell);
+ forNode(node.child3()).filter(PredictCell);
+ forNode(nodeIndex).set(PredictBoolean);
+ break;
+
+ case Phi:
+ case Flush:
+ break;
+
+ case Breakpoint:
+ break;
+
+ case Call:
+ case Construct:
+ case Resolve:
+ case ResolveBase:
+ case ResolveBaseStrictPut:
+ case ResolveGlobal:
+ clobberStructures(nodeIndex);
+ forNode(nodeIndex).makeTop();
+ break;
+
+ case ForceOSRExit:
+ m_isValid = false;
+ break;
+
+ case Phantom:
+ case InlineStart:
+ case Nop:
+ break;
+ }
+
+ return m_isValid;
+}
+
+inline void AbstractState::clobberStructures(NodeIndex nodeIndex)
+{
+ PROFILE(FLAG_FOR_STRUCTURE_CLOBBERING);
+ if (!m_haveStructures)
+ return;
+ for (size_t i = nodeIndex - m_block->begin + 1; i-- > 0;)
+ m_nodes[i].clobberStructures();
+ for (size_t i = 0; i < m_variables.numberOfArguments(); ++i)
+ m_variables.argument(i).clobberStructures();
+ for (size_t i = 0; i < m_variables.numberOfLocals(); ++i)
+ m_variables.local(i).clobberStructures();
+ m_haveStructures = false;
+}
+
+inline bool AbstractState::mergeStateAtTail(AbstractValue& destination, AbstractValue& inVariable, NodeIndex nodeIndex)
+{
+ if (nodeIndex == NoNode)
+ return false;
+
+ AbstractValue* source;
+
+ Node& node = m_graph[nodeIndex];
+ if (!node.refCount())
+ return false;
+
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf(" It's live, node @%u.\n", nodeIndex);
+#endif
+
+ switch (node.op) {
+ case Phi:
+ case SetArgument:
+ case Flush:
+ // The block transfers the value from head to tail.
+ source = &inVariable;
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf(" Transfering from head to tail.\n");
+#endif
+ break;
+
+ case GetLocal:
+ // The block refines the value with additional speculations.
+ source = &forNode(nodeIndex);
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf(" Refining.\n");
+#endif
+ break;
+
+ case SetLocal:
+ // The block sets the variable, and potentially refines it, both
+ // before and after setting it.
+ source = &forNode(node.child1());
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf(" Setting.\n");
+#endif
+ break;
+
+ default:
+ ASSERT_NOT_REACHED();
+ source = 0;
+ break;
+ }
+
+ if (destination == *source) {
+ // Abstract execution did not change the output value of the variable, for this
+ // basic block, on this iteration.
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf(" Not changed!\n");
+#endif
+ return false;
+ }
+
+ // Abstract execution reached a new conclusion about the speculations reached about
+ // this variable after execution of this basic block. Update the state, and return
+ // true to indicate that the fixpoint must go on!
+ destination = *source;
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf(" Changed!\n");
+#endif
+ return true;
+}
+
+inline bool AbstractState::merge(BasicBlock* from, BasicBlock* to)
+{
+ ASSERT(from->variablesAtTail.numberOfArguments() == to->variablesAtHead.numberOfArguments());
+ ASSERT(from->variablesAtTail.numberOfLocals() == to->variablesAtHead.numberOfLocals());
+
+ bool changed = false;
+
+ for (size_t argument = 0; argument < from->variablesAtTail.numberOfArguments(); ++argument)
+ changed |= mergeVariableBetweenBlocks(to->valuesAtHead.argument(argument), from->valuesAtTail.argument(argument), to->variablesAtHead.argument(argument), from->variablesAtTail.argument(argument));
+
+ for (size_t local = 0; local < from->variablesAtTail.numberOfLocals(); ++local)
+ changed |= mergeVariableBetweenBlocks(to->valuesAtHead.local(local), from->valuesAtTail.local(local), to->variablesAtHead.local(local), from->variablesAtTail.local(local));
+
+ if (!to->cfaHasVisited)
+ changed = true;
+
+ to->cfaShouldRevisit |= changed;
+
+ return changed;
+}
+
+inline bool AbstractState::mergeToSuccessors(Graph& graph, BasicBlock* basicBlock)
+{
+ PROFILE(FLAG_FOR_MERGE_TO_SUCCESSORS);
+
+ Node& terminal = graph[basicBlock->end - 1];
+
+ ASSERT(terminal.isTerminal());
+
+ switch (terminal.op) {
+ case Jump:
+ return merge(basicBlock, graph.m_blocks[terminal.takenBlockIndex()].get());
+
+ case Branch:
+ return merge(basicBlock, graph.m_blocks[terminal.takenBlockIndex()].get())
+ | merge(basicBlock, graph.m_blocks[terminal.notTakenBlockIndex()].get());
+
+ case Return:
+ case Throw:
+ case ThrowReferenceError:
+ return false;
+
+ default:
+ ASSERT_NOT_REACHED();
+ return false;
+ }
+}
+
+inline bool AbstractState::mergeVariableBetweenBlocks(AbstractValue& destination, AbstractValue& source, NodeIndex destinationNodeIndex, NodeIndex sourceNodeIndex)
+{
+ if (destinationNodeIndex == NoNode)
+ return false;
+
+ ASSERT_UNUSED(sourceNodeIndex, sourceNodeIndex != NoNode);
+
+ // FIXME: We could do some sparse conditional propagation here!
+
+ return destination.merge(source);
+}
+
+#ifndef NDEBUG
+void AbstractState::dump(FILE* out)
+{
+ bool first = true;
+ for (size_t i = 0; i < m_nodes.size(); ++i) {
+ if (m_nodes[i].isClear())
+ continue;
+ if (first)
+ first = false;
+ else
+ fprintf(out, " ");
+ fprintf(out, "@%lu:", static_cast<unsigned long>(i + m_block->begin));
+ m_nodes[i].dump(out);
+ }
+}
+#endif
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
+
diff --git a/Source/JavaScriptCore/dfg/DFGAbstractState.h b/Source/JavaScriptCore/dfg/DFGAbstractState.h
new file mode 100644
index 000000000..337a4d0b4
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGAbstractState.h
@@ -0,0 +1,196 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGAbstractState_h
+#define DFGAbstractState_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(DFG_JIT)
+
+#include "DFGAbstractValue.h"
+#include "DFGGraph.h"
+#include "DFGNode.h"
+#include <wtf/Vector.h>
+
+namespace JSC {
+
+class CodeBlock;
+
+namespace DFG {
+
+struct BasicBlock;
+
+// This implements the notion of an abstract state for flow-sensitive intraprocedural
+// control flow analysis (CFA), with a focus on the elimination of redundant type checks.
+// It also implements most of the mechanisms of abstract interpretation that such an
+// analysis would use. This class should be used in two idioms:
+//
+// 1) Performing the CFA. In this case, AbstractState should be run over all basic
+// blocks repeatedly until convergence is reached. Convergence is defined by
+// endBasicBlock(AbstractState::MergeToSuccessors) returning false for all blocks.
+//
+// 2) Rematerializing the results of a previously executed CFA. In this case,
+// AbstractState should be run over whatever basic block you're interested in up
+// to the point of the node at which you'd like to interrogate the known type
+// of all other nodes. At this point it's safe to discard the AbstractState entirely,
+// call reset(), or to run it to the end of the basic block and call
+// endBasicBlock(AbstractState::DontMerge). The latter option is safest because
+// it performs some useful integrity checks.
+//
+// After the CFA is run, the inter-block state is saved at the heads and tails of all
+// basic blocks. This allows the intra-block state to be rematerialized by just
+// executing the CFA for that block. If you need to know inter-block state only, then
+// you only need to examine the BasicBlock::m_valuesAtHead or m_valuesAtTail fields.
+//
+// Running this analysis involves the following, modulo the inter-block state
+// merging and convergence fixpoint:
+//
+// AbstractState state(codeBlock, graph);
+// state.beginBasicBlock(basicBlock);
+// bool endReached = true;
+// for (NodeIndex idx = basicBlock.begin; idx < basicBlock.end; ++idx) {
+// if (!state.execute(idx))
+// break;
+// }
+// bool result = state.endBasicBlock(<either Merge or DontMerge>);
+
+class AbstractState {
+public:
+ enum MergeMode {
+ // Don't merge the state in AbstractState with basic blocks.
+ DontMerge,
+
+ // Merge the state in AbstractState with the tail of the basic
+ // block being analyzed.
+ MergeToTail,
+
+ // Merge the state in AbstractState with the tail of the basic
+ // block, and with the heads of successor blocks.
+ MergeToSuccessors
+ };
+
+ AbstractState(CodeBlock*, Graph&);
+
+ ~AbstractState();
+
+ AbstractValue& forNode(NodeIndex nodeIndex)
+ {
+ return m_nodes[nodeIndex - m_block->begin];
+ }
+
+ // Call this before beginning CFA to initialize the abstract values of
+ // arguments, and to indicate which blocks should be listed for CFA
+ // execution.
+ static void initialize(Graph&);
+
+ // Start abstractly executing the given basic block. Initializes the
+ // notion of abstract state to what we believe it to be at the head
+ // of the basic block, according to the basic block's data structures.
+ // This method also sets cfaShouldRevisit to false.
+ void beginBasicBlock(BasicBlock*);
+
+ // Finish abstractly executing a basic block. If MergeToTail or
+ // MergeToSuccessors is passed, then this merges everything we have
+ // learned about how the state changes during this block's execution into
+ // the block's data structures. There are three return modes, depending
+ // on the value of mergeMode:
+ //
+ // DontMerge:
+ // Always returns false.
+ //
+ // MergeToTail:
+ // Returns true if the state of the block at the tail was changed.
+ // This means that you must call mergeToSuccessors(), and if that
+ // returns true, then you must revisit (at least) the successor
+ // blocks. False will always be returned if the block is terminal
+ // (i.e. ends in Throw or Return, or has a ForceOSRExit inside it).
+ //
+ // MergeToSuccessors:
+ // Returns true if the state of the block at the tail was changed,
+ // and, if the state at the heads of successors was changed.
+ // A true return means that you must revisit (at least) the successor
+ // blocks. This also sets cfaShouldRevisit to true for basic blocks
+ // that must be visited next.
+ bool endBasicBlock(MergeMode);
+
+ // Reset the AbstractState. This throws away any results, and at this point
+ // you can safely call beginBasicBlock() on any basic block.
+ void reset();
+
+ // Abstractly executes the given node. The new abstract state is stored into an
+ // abstract register file stored in *this. Loads of local variables (that span
+ // basic blocks) interrogate the basic block's notion of the state at the head.
+ // Stores to local variables are handled in endBasicBlock(). This returns true
+ // if execution should continue past this node. Notably, it will return true
+ // for block terminals, so long as those terminals are not Return or variants
+ // of Throw.
+ bool execute(NodeIndex);
+
+ // Is the execution state still valid? This will be false if execute() has
+ // returned false previously.
+ bool isValid() const { return m_isValid; }
+
+ // Merge the abstract state stored at the first block's tail into the second
+ // block's head. Returns true if the second block's state changed. If so,
+ // that block must be abstractly interpreted again. This also sets
+ // to->cfaShouldRevisit to true, if it returns true, or if to has not been
+ // visited yet.
+ static bool merge(BasicBlock* from, BasicBlock* to);
+
+ // Merge the abstract state stored at the block's tail into all of its
+ // successors. Returns true if any of the successors' states changed. Note
+ // that this is automatically called in endBasicBlock() if MergeMode is
+ // MergeToSuccessors.
+ static bool mergeToSuccessors(Graph&, BasicBlock*);
+
+#ifndef NDEBUG
+ void dump(FILE* out);
+#endif
+
+private:
+ void clobberStructures(NodeIndex);
+
+ bool mergeStateAtTail(AbstractValue& destination, AbstractValue& inVariable, NodeIndex);
+
+ static bool mergeVariableBetweenBlocks(AbstractValue& destination, AbstractValue& source, NodeIndex destinationNodeIndex, NodeIndex sourceNodeIndex);
+
+ CodeBlock* m_codeBlock;
+ Graph& m_graph;
+
+ Vector<AbstractValue, 32> m_nodes;
+ Operands<AbstractValue> m_variables;
+ BasicBlock* m_block;
+ bool m_haveStructures;
+
+ bool m_isValid;
+};
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
+
+#endif // DFGAbstractState_h
+
diff --git a/Source/JavaScriptCore/dfg/DFGAbstractValue.h b/Source/JavaScriptCore/dfg/DFGAbstractValue.h
new file mode 100644
index 000000000..ee43b6c4a
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGAbstractValue.h
@@ -0,0 +1,488 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGAbstractValue_h
+#define DFGAbstractValue_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(DFG_JIT)
+
+#include "DFGStructureSet.h"
+#include "JSCell.h"
+#include "PredictedType.h"
+
+namespace JSC { namespace DFG {
+
+class StructureAbstractValue {
+public:
+ StructureAbstractValue()
+ : m_structure(0)
+ {
+ }
+
+ StructureAbstractValue(Structure* structure)
+ : m_structure(structure)
+ {
+ }
+
+ StructureAbstractValue(const StructureSet& set)
+ {
+ switch (set.size()) {
+ case 0:
+ m_structure = 0;
+ break;
+
+ case 1:
+ m_structure = set[0];
+ break;
+
+ default:
+ m_structure = topValue();
+ break;
+ }
+ }
+
+ void clear()
+ {
+ m_structure = 0;
+ }
+
+ void makeTop()
+ {
+ m_structure = topValue();
+ }
+
+ static StructureAbstractValue top()
+ {
+ StructureAbstractValue value;
+ value.makeTop();
+ return value;
+ }
+
+ void add(Structure* structure)
+ {
+ ASSERT(!contains(structure) && !isTop());
+ if (m_structure)
+ makeTop();
+ else
+ m_structure = structure;
+ }
+
+ bool addAll(const StructureSet& other)
+ {
+ if (isTop() || !other.size())
+ return false;
+ if (other.size() > 1) {
+ makeTop();
+ return true;
+ }
+ if (!m_structure) {
+ m_structure = other[0];
+ return true;
+ }
+ if (m_structure == other[0])
+ return false;
+ makeTop();
+ return true;
+ }
+
+ bool addAll(const StructureAbstractValue& other)
+ {
+ if (!other.m_structure)
+ return false;
+ if (isTop())
+ return false;
+ if (other.isTop()) {
+ makeTop();
+ return true;
+ }
+ if (m_structure) {
+ if (m_structure == other.m_structure)
+ return false;
+ makeTop();
+ return true;
+ }
+ m_structure = other.m_structure;
+ return true;
+ }
+
+ bool contains(Structure* structure) const
+ {
+ if (isTop())
+ return true;
+ if (m_structure == structure)
+ return true;
+ return false;
+ }
+
+ bool isSubsetOf(const StructureSet& other) const
+ {
+ if (isTop())
+ return false;
+ if (!m_structure)
+ return true;
+ return other.contains(m_structure);
+ }
+
+ bool doesNotContainAnyOtherThan(Structure* structure) const
+ {
+ if (isTop())
+ return false;
+ if (!m_structure)
+ return true;
+ return m_structure == structure;
+ }
+
+ bool isSupersetOf(const StructureSet& other) const
+ {
+ if (isTop())
+ return true;
+ if (!other.size())
+ return true;
+ if (other.size() > 1)
+ return false;
+ return m_structure == other[0];
+ }
+
+ bool isSubsetOf(const StructureAbstractValue& other) const
+ {
+ if (other.isTop())
+ return true;
+ if (isTop())
+ return false;
+ if (m_structure) {
+ if (other.m_structure)
+ return m_structure == other.m_structure;
+ return false;
+ }
+ return true;
+ }
+
+ bool isSupersetOf(const StructureAbstractValue& other) const
+ {
+ return other.isSubsetOf(*this);
+ }
+
+ void filter(const StructureSet& other)
+ {
+ if (!m_structure)
+ return;
+
+ if (isTop()) {
+ switch (other.size()) {
+ case 0:
+ m_structure = 0;
+ return;
+
+ case 1:
+ m_structure = other[0];
+ return;
+
+ default:
+ return;
+ }
+ }
+
+ if (other.contains(m_structure))
+ return;
+
+ m_structure = 0;
+ }
+
+ void filter(const StructureAbstractValue& other)
+ {
+ if (isTop()) {
+ m_structure = other.m_structure;
+ return;
+ }
+ if (m_structure == other.m_structure)
+ return;
+ if (other.isTop())
+ return;
+ m_structure = 0;
+ }
+
+ void filter(PredictedType other)
+ {
+ if (!(other & PredictCell)) {
+ clear();
+ return;
+ }
+
+ if (isClearOrTop())
+ return;
+
+ if (!(predictionFromStructure(m_structure) & other))
+ m_structure = 0;
+ }
+
+ bool isClear() const
+ {
+ return !m_structure;
+ }
+
+ bool isTop() const { return m_structure == topValue(); }
+
+ bool isClearOrTop() const { return m_structure <= topValue(); }
+ bool isNeitherClearNorTop() const { return !isClearOrTop(); }
+
+ size_t size() const
+ {
+ ASSERT(!isTop());
+ return !!m_structure;
+ }
+
+ Structure* at(size_t i) const
+ {
+ ASSERT(!isTop());
+ ASSERT(m_structure);
+ ASSERT_UNUSED(i, !i);
+ return m_structure;
+ }
+
+ Structure* operator[](size_t i) const
+ {
+ return at(i);
+ }
+
+ Structure* last() const
+ {
+ return at(0);
+ }
+
+ PredictedType predictionFromStructures() const
+ {
+ if (isTop())
+ return PredictCell;
+ if (isClear())
+ return PredictNone;
+ return predictionFromStructure(m_structure);
+ }
+
+ bool operator==(const StructureAbstractValue& other) const
+ {
+ return m_structure == other.m_structure;
+ }
+
+#ifndef NDEBUG
+ void dump(FILE* out) const
+ {
+ if (isTop()) {
+ fprintf(out, "TOP");
+ return;
+ }
+
+ fprintf(out, "[");
+ if (m_structure)
+ fprintf(out, "%p", m_structure);
+ fprintf(out, "]");
+ }
+#endif
+
+private:
+ static Structure* topValue() { return reinterpret_cast<Structure*>(1); }
+
+ // This can only remember one structure at a time.
+ Structure* m_structure;
+};
+
+struct AbstractValue {
+ AbstractValue()
+ : m_type(PredictNone)
+ {
+ }
+
+ void clear()
+ {
+ m_type = PredictNone;
+ m_structure.clear();
+ checkConsistency();
+ }
+
+ bool isClear()
+ {
+ return m_type == PredictNone && m_structure.isClear();
+ }
+
+ void makeTop()
+ {
+ m_type = PredictTop;
+ m_structure.makeTop();
+ checkConsistency();
+ }
+
+ void clobberStructures()
+ {
+ if (m_type & PredictCell)
+ m_structure.makeTop();
+ else
+ ASSERT(m_structure.isClear());
+ checkConsistency();
+ }
+
+ bool isTop() const
+ {
+ return m_type == PredictTop && m_structure.isTop();
+ }
+
+ static AbstractValue top()
+ {
+ AbstractValue result;
+ result.makeTop();
+ return result;
+ }
+
+ void set(JSValue value)
+ {
+ m_structure.clear();
+ if (value.isCell())
+ m_structure.add(value.asCell()->structure());
+
+ m_type = predictionFromValue(value);
+
+ checkConsistency();
+ }
+
+ void set(Structure* structure)
+ {
+ m_structure.clear();
+ m_structure.add(structure);
+
+ m_type = predictionFromStructure(structure);
+
+ checkConsistency();
+ }
+
+ void set(PredictedType type)
+ {
+ if (type & PredictCell)
+ m_structure.makeTop();
+ else
+ m_structure.clear();
+ m_type = type;
+ checkConsistency();
+ }
+
+ bool operator==(const AbstractValue& other) const
+ {
+ return m_type == other.m_type && m_structure == other.m_structure;
+ }
+
+ bool merge(const AbstractValue& other)
+ {
+ bool result = mergePrediction(m_type, other.m_type) | m_structure.addAll(other.m_structure);
+ checkConsistency();
+ return result;
+ }
+
+ void merge(PredictedType type)
+ {
+ mergePrediction(m_type, type);
+
+ if (type & PredictCell)
+ m_structure.makeTop();
+
+ checkConsistency();
+ }
+
+ void filter(const StructureSet& other)
+ {
+ m_type &= other.predictionFromStructures();
+ m_structure.filter(other);
+
+ // It's possible that prior to the above two statements we had (Foo, TOP), where
+ // Foo is a PredictedType that is disjoint with the passed StructureSet. In that
+ // case, we will now have (None, [someStructure]). In general, we need to make
+ // sure that new information gleaned from the PredictedType needs to be fed back
+ // into the information gleaned from the StructureSet.
+ m_structure.filter(m_type);
+ checkConsistency();
+ }
+
+ void filter(PredictedType type)
+ {
+ if (type == PredictTop)
+ return;
+ m_type &= type;
+
+ // It's possible that prior to this filter() call we had, say, (Final, TOP), and
+ // the passed type is Array. At this point we'll have (None, TOP). The best way
+ // to ensure that the structure filtering does the right thing is to filter on
+ // the new type (None) rather than the one passed (Array).
+ m_structure.filter(m_type);
+ checkConsistency();
+ }
+
+ bool validate(JSValue value) const
+ {
+ if (isTop())
+ return true;
+
+ if (mergePredictions(m_type, predictionFromValue(value)) != m_type)
+ return false;
+
+ if (m_structure.isTop())
+ return true;
+
+ if (value.isCell()) {
+ ASSERT(m_type & PredictCell);
+ return m_structure.contains(value.asCell()->structure());
+ }
+
+ return true;
+ }
+
+ void checkConsistency() const
+ {
+ if (!(m_type & PredictCell))
+ ASSERT(m_structure.isClear());
+
+ // Note that it's possible for a prediction like (Final, []). This really means that
+ // the value is bottom and that any code that uses the value is unreachable. But
+ // we don't want to get pedantic about this as it would only increase the computational
+ // complexity of the code.
+ }
+
+#ifndef NDEBUG
+ void dump(FILE* out) const
+ {
+ fprintf(out, "(%s, ", predictionToString(m_type));
+ m_structure.dump(out);
+ fprintf(out, ")");
+ }
+#endif
+
+ StructureAbstractValue m_structure;
+ PredictedType m_type;
+};
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
+
+#endif // DFGAbstractValue_h
+
+
diff --git a/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.cpp b/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.cpp
new file mode 100644
index 000000000..969101e87
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.cpp
@@ -0,0 +1,148 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "DFGAssemblyHelpers.h"
+
+#if ENABLE(DFG_JIT)
+
+namespace JSC { namespace DFG {
+
+const double AssemblyHelpers::twoToThe32 = (double)0x100000000ull;
+
+Vector<BytecodeAndMachineOffset>& AssemblyHelpers::decodedCodeMapFor(CodeBlock* codeBlock)
+{
+ ASSERT(codeBlock == codeBlock->baselineVersion());
+ ASSERT(codeBlock->getJITType() == JITCode::BaselineJIT);
+ ASSERT(codeBlock->jitCodeMap());
+
+ std::pair<HashMap<CodeBlock*, Vector<BytecodeAndMachineOffset> >::iterator, bool> result = m_decodedCodeMaps.add(codeBlock, Vector<BytecodeAndMachineOffset>());
+
+ if (result.second)
+ codeBlock->jitCodeMap()->decode(result.first->second);
+
+ return result.first->second;
+}
+
+#if ENABLE(SAMPLING_FLAGS)
+void AssemblyHelpers::setSamplingFlag(int32_t flag)
+{
+ ASSERT(flag >= 1);
+ ASSERT(flag <= 32);
+ or32(TrustedImm32(1u << (flag - 1)), AbsoluteAddress(SamplingFlags::addressOfFlags()));
+}
+
+void AssemblyHelpers::clearSamplingFlag(int32_t flag)
+{
+ ASSERT(flag >= 1);
+ ASSERT(flag <= 32);
+ and32(TrustedImm32(~(1u << (flag - 1))), AbsoluteAddress(SamplingFlags::addressOfFlags()));
+}
+#endif
+
+#if DFG_ENABLE(JIT_ASSERT)
+#if USE(JSVALUE64)
+void AssemblyHelpers::jitAssertIsInt32(GPRReg gpr)
+{
+#if CPU(X86_64)
+ Jump checkInt32 = branchPtr(BelowOrEqual, gpr, TrustedImmPtr(reinterpret_cast<void*>(static_cast<uintptr_t>(0xFFFFFFFFu))));
+ breakpoint();
+ checkInt32.link(this);
+#else
+ UNUSED_PARAM(gpr);
+#endif
+}
+
+void AssemblyHelpers::jitAssertIsJSInt32(GPRReg gpr)
+{
+ Jump checkJSInt32 = branchPtr(AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
+ breakpoint();
+ checkJSInt32.link(this);
+}
+
+void AssemblyHelpers::jitAssertIsJSNumber(GPRReg gpr)
+{
+ Jump checkJSNumber = branchTestPtr(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
+ breakpoint();
+ checkJSNumber.link(this);
+}
+
+void AssemblyHelpers::jitAssertIsJSDouble(GPRReg gpr)
+{
+ Jump checkJSInt32 = branchPtr(AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
+ Jump checkJSNumber = branchTestPtr(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
+ checkJSInt32.link(this);
+ breakpoint();
+ checkJSNumber.link(this);
+}
+
+void AssemblyHelpers::jitAssertIsCell(GPRReg gpr)
+{
+ Jump checkCell = branchTestPtr(MacroAssembler::Zero, gpr, GPRInfo::tagMaskRegister);
+ breakpoint();
+ checkCell.link(this);
+}
+#elif USE(JSVALUE32_64)
+void AssemblyHelpers::jitAssertIsInt32(GPRReg gpr)
+{
+ UNUSED_PARAM(gpr);
+}
+
+void AssemblyHelpers::jitAssertIsJSInt32(GPRReg gpr)
+{
+ Jump checkJSInt32 = branch32(Equal, gpr, TrustedImm32(JSValue::Int32Tag));
+ breakpoint();
+ checkJSInt32.link(this);
+}
+
+void AssemblyHelpers::jitAssertIsJSNumber(GPRReg gpr)
+{
+ Jump checkJSInt32 = branch32(Equal, gpr, TrustedImm32(JSValue::Int32Tag));
+ Jump checkJSDouble = branch32(Below, gpr, TrustedImm32(JSValue::LowestTag));
+ breakpoint();
+ checkJSInt32.link(this);
+ checkJSDouble.link(this);
+}
+
+void AssemblyHelpers::jitAssertIsJSDouble(GPRReg gpr)
+{
+ Jump checkJSDouble = branch32(Below, gpr, TrustedImm32(JSValue::LowestTag));
+ breakpoint();
+ checkJSDouble.link(this);
+}
+
+void AssemblyHelpers::jitAssertIsCell(GPRReg gpr)
+{
+ Jump checkCell = branch32(Equal, gpr, TrustedImm32(JSValue::CellTag));
+ breakpoint();
+ checkCell.link(this);
+}
+#endif // USE(JSVALUE32_64)
+#endif // DFG_ENABLE(JIT_ASSERT)
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
+
diff --git a/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.h b/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.h
new file mode 100644
index 000000000..a9dec5062
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.h
@@ -0,0 +1,321 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGAssemblyHelpers_h
+#define DFGAssemblyHelpers_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(DFG_JIT)
+
+#include "CodeBlock.h"
+#include "DFGFPRInfo.h"
+#include "DFGGPRInfo.h"
+#include "DFGNode.h"
+#include "JSGlobalData.h"
+#include "MacroAssembler.h"
+
+namespace JSC { namespace DFG {
+
+#ifndef NDEBUG
+typedef void (*V_DFGDebugOperation_EP)(ExecState*, void*);
+#endif
+
+class AssemblyHelpers : public MacroAssembler {
+public:
+ AssemblyHelpers(JSGlobalData* globalData, CodeBlock* codeBlock)
+ : m_globalData(globalData)
+ , m_codeBlock(codeBlock)
+ , m_baselineCodeBlock(codeBlock->baselineVersion())
+ {
+ ASSERT(m_codeBlock);
+ ASSERT(m_baselineCodeBlock);
+ ASSERT(!m_baselineCodeBlock->alternative());
+ ASSERT(m_baselineCodeBlock->getJITType() == JITCode::BaselineJIT);
+ }
+
+ CodeBlock* codeBlock() { return m_codeBlock; }
+ JSGlobalData* globalData() { return m_globalData; }
+ AssemblerType_T& assembler() { return m_assembler; }
+
+#if CPU(X86_64) || CPU(X86)
+ void preserveReturnAddressAfterCall(GPRReg reg)
+ {
+ pop(reg);
+ }
+
+ void restoreReturnAddressBeforeReturn(GPRReg reg)
+ {
+ push(reg);
+ }
+
+ void restoreReturnAddressBeforeReturn(Address address)
+ {
+ push(address);
+ }
+#endif // CPU(X86_64) || CPU(X86)
+
+#if CPU(ARM)
+ ALWAYS_INLINE void preserveReturnAddressAfterCall(RegisterID reg)
+ {
+ move(linkRegister, reg);
+ }
+
+ ALWAYS_INLINE void restoreReturnAddressBeforeReturn(RegisterID reg)
+ {
+ move(reg, linkRegister);
+ }
+
+ ALWAYS_INLINE void restoreReturnAddressBeforeReturn(Address address)
+ {
+ loadPtr(address, linkRegister);
+ }
+#endif
+
+ void emitGetFromCallFrameHeaderPtr(RegisterFile::CallFrameHeaderEntry entry, GPRReg to)
+ {
+ loadPtr(Address(GPRInfo::callFrameRegister, entry * sizeof(Register)), to);
+ }
+ void emitPutToCallFrameHeader(GPRReg from, RegisterFile::CallFrameHeaderEntry entry)
+ {
+ storePtr(from, Address(GPRInfo::callFrameRegister, entry * sizeof(Register)));
+ }
+
+ void emitPutImmediateToCallFrameHeader(void* value, RegisterFile::CallFrameHeaderEntry entry)
+ {
+ storePtr(TrustedImmPtr(value), Address(GPRInfo::callFrameRegister, entry * sizeof(Register)));
+ }
+
+ Jump branchIfNotCell(GPRReg reg)
+ {
+#if USE(JSVALUE64)
+ return branchTestPtr(MacroAssembler::NonZero, reg, GPRInfo::tagMaskRegister);
+#else
+ return branch32(MacroAssembler::NotEqual, reg, TrustedImm32(JSValue::CellTag));
+#endif
+ }
+
+ static Address addressForGlobalVar(GPRReg global, int32_t varNumber)
+ {
+ return Address(global, varNumber * sizeof(Register));
+ }
+
+ static Address tagForGlobalVar(GPRReg global, int32_t varNumber)
+ {
+ return Address(global, varNumber * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
+ }
+
+ static Address payloadForGlobalVar(GPRReg global, int32_t varNumber)
+ {
+ return Address(global, varNumber * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
+ }
+
+ static Address addressFor(VirtualRegister virtualRegister)
+ {
+ return Address(GPRInfo::callFrameRegister, virtualRegister * sizeof(Register));
+ }
+
+ static Address tagFor(VirtualRegister virtualRegister)
+ {
+ return Address(GPRInfo::callFrameRegister, virtualRegister * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
+ }
+
+ static Address payloadFor(VirtualRegister virtualRegister)
+ {
+ return Address(GPRInfo::callFrameRegister, virtualRegister * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
+ }
+
+ Jump branchIfNotObject(GPRReg structureReg)
+ {
+ return branch8(Below, Address(structureReg, Structure::typeInfoTypeOffset()), TrustedImm32(ObjectType));
+ }
+
+#ifndef NDEBUG
+ // Add a debug call. This call has no effect on JIT code execution state.
+ void debugCall(V_DFGDebugOperation_EP function, void* argument)
+ {
+ EncodedJSValue* buffer = static_cast<EncodedJSValue*>(m_globalData->scratchBufferForSize(sizeof(EncodedJSValue) * (GPRInfo::numberOfRegisters + FPRInfo::numberOfRegisters)));
+
+ for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i)
+ storePtr(GPRInfo::toRegister(i), buffer + i);
+ for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
+ move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0);
+ storeDouble(FPRInfo::toRegister(i), GPRInfo::regT0);
+ }
+#if CPU(X86_64) || CPU(ARM_THUMB2)
+ move(TrustedImmPtr(argument), GPRInfo::argumentGPR1);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+#elif CPU(X86)
+ poke(GPRInfo::callFrameRegister, 0);
+ poke(TrustedImmPtr(argument), 1);
+#else
+#error "DFG JIT not supported on this platform."
+#endif
+ move(TrustedImmPtr(reinterpret_cast<void*>(function)), GPRInfo::regT0);
+ call(GPRInfo::regT0);
+ for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
+ move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0);
+ loadDouble(GPRInfo::regT0, FPRInfo::toRegister(i));
+ }
+ for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i)
+ loadPtr(buffer + i, GPRInfo::toRegister(i));
+ }
+#endif
+
+ // These methods JIT generate dynamic, debug-only checks - akin to ASSERTs.
+#if DFG_ENABLE(JIT_ASSERT)
+ void jitAssertIsInt32(GPRReg);
+ void jitAssertIsJSInt32(GPRReg);
+ void jitAssertIsJSNumber(GPRReg);
+ void jitAssertIsJSDouble(GPRReg);
+ void jitAssertIsCell(GPRReg);
+#else
+ void jitAssertIsInt32(GPRReg) { }
+ void jitAssertIsJSInt32(GPRReg) { }
+ void jitAssertIsJSNumber(GPRReg) { }
+ void jitAssertIsJSDouble(GPRReg) { }
+ void jitAssertIsCell(GPRReg) { }
+#endif
+
+ // These methods convert between doubles, and doubles boxed and JSValues.
+#if USE(JSVALUE64)
+ GPRReg boxDouble(FPRReg fpr, GPRReg gpr)
+ {
+ moveDoubleToPtr(fpr, gpr);
+ subPtr(GPRInfo::tagTypeNumberRegister, gpr);
+ jitAssertIsJSDouble(gpr);
+ return gpr;
+ }
+ FPRReg unboxDouble(GPRReg gpr, FPRReg fpr)
+ {
+ jitAssertIsJSDouble(gpr);
+ addPtr(GPRInfo::tagTypeNumberRegister, gpr);
+ movePtrToDouble(gpr, fpr);
+ return fpr;
+ }
+#endif
+
+#if USE(JSVALUE32_64) && CPU(X86)
+ void boxDouble(FPRReg fpr, GPRReg tagGPR, GPRReg payloadGPR)
+ {
+ movePackedToInt32(fpr, payloadGPR);
+ rshiftPacked(TrustedImm32(32), fpr);
+ movePackedToInt32(fpr, tagGPR);
+ }
+ void unboxDouble(GPRReg tagGPR, GPRReg payloadGPR, FPRReg fpr, FPRReg scratchFPR)
+ {
+ jitAssertIsJSDouble(tagGPR);
+ moveInt32ToPacked(payloadGPR, fpr);
+ moveInt32ToPacked(tagGPR, scratchFPR);
+ lshiftPacked(TrustedImm32(32), scratchFPR);
+ orPacked(scratchFPR, fpr);
+ }
+#endif
+
+#if USE(JSVALUE32_64) && CPU(ARM)
+ void boxDouble(FPRReg fpr, GPRReg tagGPR, GPRReg payloadGPR)
+ {
+ m_assembler.vmov(payloadGPR, tagGPR, fpr);
+ }
+ void unboxDouble(GPRReg tagGPR, GPRReg payloadGPR, FPRReg fpr, FPRReg scratchFPR)
+ {
+ jitAssertIsJSDouble(tagGPR);
+ UNUSED_PARAM(scratchFPR);
+ m_assembler.vmov(fpr, payloadGPR, tagGPR);
+ }
+#endif
+
+#if ENABLE(SAMPLING_COUNTERS)
+ static void emitCount(MacroAssembler& jit, AbstractSamplingCounter& counter, int32_t increment = 1)
+ {
+ jit.add64(TrustedImm32(increment), AbsoluteAddress(counter.addressOfCounter()));
+ }
+ void emitCount(AbstractSamplingCounter& counter, int32_t increment = 1)
+ {
+ add64(TrustedImm32(increment), AbsoluteAddress(counter.addressOfCounter()));
+ }
+#endif
+
+#if ENABLE(SAMPLING_FLAGS)
+ void setSamplingFlag(int32_t);
+ void clearSamplingFlag(int32_t flag);
+#endif
+
+ JSGlobalObject* globalObjectFor(CodeOrigin codeOrigin)
+ {
+ return codeBlock()->globalObjectFor(codeOrigin);
+ }
+
+ JSObject* globalThisObjectFor(CodeOrigin codeOrigin)
+ {
+ JSGlobalObject* object = globalObjectFor(codeOrigin);
+ return object->methodTable()->toThisObject(object, 0);
+ }
+
+ bool strictModeFor(CodeOrigin codeOrigin)
+ {
+ if (!codeOrigin.inlineCallFrame)
+ return codeBlock()->isStrictMode();
+ return codeOrigin.inlineCallFrame->callee->jsExecutable()->isStrictMode();
+ }
+
+ static CodeBlock* baselineCodeBlockForOriginAndBaselineCodeBlock(const CodeOrigin& codeOrigin, CodeBlock* baselineCodeBlock)
+ {
+ if (codeOrigin.inlineCallFrame) {
+ ExecutableBase* executable = codeOrigin.inlineCallFrame->executable.get();
+ ASSERT(executable->structure()->classInfo() == &FunctionExecutable::s_info);
+ return static_cast<FunctionExecutable*>(executable)->baselineCodeBlockFor(codeOrigin.inlineCallFrame->isCall ? CodeForCall : CodeForConstruct);
+ }
+ return baselineCodeBlock;
+ }
+
+ CodeBlock* baselineCodeBlockFor(const CodeOrigin& codeOrigin)
+ {
+ return baselineCodeBlockForOriginAndBaselineCodeBlock(codeOrigin, baselineCodeBlock());
+ }
+
+ CodeBlock* baselineCodeBlock()
+ {
+ return m_baselineCodeBlock;
+ }
+
+ Vector<BytecodeAndMachineOffset>& decodedCodeMapFor(CodeBlock*);
+
+ static const double twoToThe32;
+
+protected:
+ JSGlobalData* m_globalData;
+ CodeBlock* m_codeBlock;
+ CodeBlock* m_baselineCodeBlock;
+
+ HashMap<CodeBlock*, Vector<BytecodeAndMachineOffset> > m_decodedCodeMaps;
+};
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
+
+#endif // DFGAssemblyHelpers_h
+
diff --git a/Source/JavaScriptCore/dfg/DFGBasicBlock.h b/Source/JavaScriptCore/dfg/DFGBasicBlock.h
new file mode 100644
index 000000000..9d464bdc2
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGBasicBlock.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGBasicBlock_h
+#define DFGBasicBlock_h
+
+#if ENABLE(DFG_JIT)
+
+#include "DFGAbstractValue.h"
+#include "DFGNode.h"
+#include "DFGOperands.h"
+#include <wtf/OwnPtr.h>
+#include <wtf/Vector.h>
+
+namespace JSC { namespace DFG {
+
+typedef Vector <BlockIndex, 2> PredecessorList;
+
+struct BasicBlock {
+ BasicBlock(unsigned bytecodeBegin, NodeIndex begin, unsigned numArguments, unsigned numLocals)
+ : bytecodeBegin(bytecodeBegin)
+ , begin(begin)
+ , end(NoNode)
+ , isOSRTarget(false)
+ , cfaHasVisited(false)
+ , cfaShouldRevisit(false)
+#if !ASSERT_DISABLED
+ , isLinked(false)
+#endif
+ , isReachable(false)
+ , variablesAtHead(numArguments, numLocals)
+ , variablesAtTail(numArguments, numLocals)
+ , valuesAtHead(numArguments, numLocals)
+ , valuesAtTail(numArguments, numLocals)
+ {
+ }
+
+ void ensureLocals(unsigned newNumLocals)
+ {
+ variablesAtHead.ensureLocals(newNumLocals);
+ variablesAtTail.ensureLocals(newNumLocals);
+ valuesAtHead.ensureLocals(newNumLocals);
+ valuesAtTail.ensureLocals(newNumLocals);
+ }
+
+ // This value is used internally for block linking and OSR entry. It is mostly meaningless
+ // for other purposes due to inlining.
+ unsigned bytecodeBegin;
+
+ NodeIndex begin;
+ NodeIndex end;
+ bool isOSRTarget;
+ bool cfaHasVisited;
+ bool cfaShouldRevisit;
+#if !ASSERT_DISABLED
+ bool isLinked;
+#endif
+ bool isReachable;
+
+ PredecessorList m_predecessors;
+
+ Operands<NodeIndex, NodeIndexTraits> variablesAtHead;
+ Operands<NodeIndex, NodeIndexTraits> variablesAtTail;
+
+ Operands<AbstractValue> valuesAtHead;
+ Operands<AbstractValue> valuesAtTail;
+};
+
+struct UnlinkedBlock {
+ BlockIndex m_blockIndex;
+ bool m_needsNormalLinking;
+ bool m_needsEarlyReturnLinking;
+
+ UnlinkedBlock() { }
+
+ explicit UnlinkedBlock(BlockIndex blockIndex)
+ : m_blockIndex(blockIndex)
+ , m_needsNormalLinking(true)
+ , m_needsEarlyReturnLinking(false)
+ {
+ }
+};
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
+
+#endif // DFGBasicBlock_h
+
diff --git a/Source/JavaScriptCore/dfg/DFGByteCodeCache.h b/Source/JavaScriptCore/dfg/DFGByteCodeCache.h
new file mode 100644
index 000000000..fd3b5147f
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGByteCodeCache.h
@@ -0,0 +1,194 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGByteCodeCache_h
+#define DFGByteCodeCache_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(DFG_JIT)
+
+#include "CodeBlock.h"
+#include "Executable.h"
+#include "JSFunction.h"
+#include <wtf/HashMap.h>
+
+namespace JSC { namespace DFG {
+
+class CodeBlockKey {
+public:
+ CodeBlockKey()
+ : m_executable(0)
+ , m_kind(CodeForCall) // CodeForCall = empty value
+ {
+ }
+
+ CodeBlockKey(WTF::HashTableDeletedValueType)
+ : m_executable(0)
+ , m_kind(CodeForConstruct) // CodeForConstruct = deleted value
+ {
+ }
+
+ CodeBlockKey(FunctionExecutable* executable, CodeSpecializationKind kind)
+ : m_executable(executable)
+ , m_kind(kind)
+ {
+ }
+
+ bool operator==(const CodeBlockKey& other) const
+ {
+ return m_executable == other.m_executable
+ && m_kind == other.m_kind;
+ }
+
+ unsigned hash() const
+ {
+ return WTF::PtrHash<FunctionExecutable*>::hash(m_executable) ^ static_cast<unsigned>(m_kind);
+ }
+
+ FunctionExecutable* executable() const { return m_executable; }
+ CodeSpecializationKind kind() const { return m_kind; }
+
+ bool isHashTableDeletedValue() const
+ {
+ return !m_executable && m_kind == CodeForConstruct;
+ }
+
+private:
+ FunctionExecutable* m_executable;
+ CodeSpecializationKind m_kind;
+};
+
+struct CodeBlockKeyHash {
+ static unsigned hash(const CodeBlockKey& key) { return key.hash(); }
+ static bool equal(const CodeBlockKey& a, const CodeBlockKey& b) { return a == b; }
+
+ static const bool safeToCompareToEmptyOrDeleted = true;
+};
+
+} } // namespace JSC::DFG
+
+namespace WTF {
+
+template<typename T> struct DefaultHash;
+template<> struct DefaultHash<JSC::DFG::CodeBlockKey> {
+ typedef JSC::DFG::CodeBlockKeyHash Hash;
+};
+
+template<typename T> struct HashTraits;
+template<> struct HashTraits<JSC::DFG::CodeBlockKey> : SimpleClassHashTraits<JSC::DFG::CodeBlockKey> { };
+
+} // namespace WTF
+
+namespace JSC { namespace DFG {
+
+struct ByteCodeCacheValue {
+ FunctionCodeBlock* codeBlock;
+ bool owned;
+ bool oldValueOfShouldDiscardBytecode;
+
+ // All uses of this struct initialize everything manually. But gcc isn't
+ // smart enough to see that, so this constructor is just here to make the
+ // compiler happy.
+ ByteCodeCacheValue()
+ : codeBlock(0)
+ , owned(false)
+ , oldValueOfShouldDiscardBytecode(false)
+ {
+ }
+};
+
+template<bool (*filterFunction)(CodeBlock*, CodeSpecializationKind)>
+class ByteCodeCache {
+public:
+ typedef HashMap<CodeBlockKey, ByteCodeCacheValue> Map;
+
+ ByteCodeCache() { }
+
+ ~ByteCodeCache()
+ {
+ Map::iterator begin = m_map.begin();
+ Map::iterator end = m_map.end();
+ for (Map::iterator iter = begin; iter != end; ++iter) {
+ if (!iter->second.codeBlock)
+ continue;
+ if (iter->second.owned) {
+ delete iter->second.codeBlock;
+ continue;
+ }
+ iter->second.codeBlock->m_shouldDiscardBytecode = iter->second.oldValueOfShouldDiscardBytecode;
+ }
+ }
+
+ CodeBlock* get(const CodeBlockKey& key, ScopeChainNode* scope)
+ {
+ Map::iterator iter = m_map.find(key);
+ if (iter != m_map.end())
+ return iter->second.codeBlock;
+
+ ByteCodeCacheValue value;
+
+ // First see if there is already a parsed code block that still has some
+ // bytecode in it.
+ value.codeBlock = key.executable()->codeBlockWithBytecodeFor(key.kind());
+ if (value.codeBlock) {
+ value.owned = false;
+ value.oldValueOfShouldDiscardBytecode = value.codeBlock->m_shouldDiscardBytecode;
+ } else {
+ // Nope, so try to parse one.
+ JSObject* exception;
+ value.owned = true;
+ value.codeBlock = key.executable()->produceCodeBlockFor(scope, OptimizingCompilation, key.kind(), exception).leakPtr();
+ }
+
+ // Check if there is any reason to reject this from our cache. If so, then
+ // poison it.
+ if (!!value.codeBlock && !filterFunction(value.codeBlock, key.kind())) {
+ if (value.owned)
+ delete value.codeBlock;
+ value.codeBlock = 0;
+ }
+
+ // If we're about to return a code block, make sure that we're not going
+ // to be discarding its bytecode if a GC were to happen during DFG
+ // compilation. That's unlikely, but it's good to thoroughly enjoy this
+ // kind of paranoia.
+ if (!!value.codeBlock)
+ value.codeBlock->m_shouldDiscardBytecode = false;
+
+ m_map.add(key, value);
+
+ return value.codeBlock;
+ }
+
+private:
+ Map m_map;
+};
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
+
+#endif // DFGByteCodeCache_h
diff --git a/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp b/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
new file mode 100644
index 000000000..3b1f8c860
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
@@ -0,0 +1,2650 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "DFGByteCodeParser.h"
+
+#if ENABLE(DFG_JIT)
+
+#include "DFGByteCodeCache.h"
+#include "DFGCapabilities.h"
+#include "CodeBlock.h"
+#include <wtf/HashMap.h>
+#include <wtf/MathExtras.h>
+
+namespace JSC { namespace DFG {
+
+// === ByteCodeParser ===
+//
+// This class is used to compile the dataflow graph from a CodeBlock.
+class ByteCodeParser {
+public:
+ ByteCodeParser(JSGlobalData* globalData, CodeBlock* codeBlock, CodeBlock* profiledBlock, Graph& graph)
+ : m_globalData(globalData)
+ , m_codeBlock(codeBlock)
+ , m_profiledBlock(profiledBlock)
+ , m_graph(graph)
+ , m_currentBlock(0)
+ , m_currentIndex(0)
+ , m_constantUndefined(UINT_MAX)
+ , m_constantNull(UINT_MAX)
+ , m_constantNaN(UINT_MAX)
+ , m_constant1(UINT_MAX)
+ , m_constants(codeBlock->numberOfConstantRegisters())
+ , m_numArguments(codeBlock->m_numParameters)
+ , m_numLocals(codeBlock->m_numCalleeRegisters)
+ , m_preservedVars(codeBlock->m_numVars)
+ , m_parameterSlots(0)
+ , m_numPassedVarArgs(0)
+ , m_globalResolveNumber(0)
+ , m_inlineStackTop(0)
+ , m_haveBuiltOperandMaps(false)
+ {
+ ASSERT(m_profiledBlock);
+
+ for (int i = 0; i < codeBlock->m_numVars; ++i)
+ m_preservedVars.set(i);
+ }
+
+ // Parse a full CodeBlock of bytecode.
+ bool parse();
+
+private:
+ // Just parse from m_currentIndex to the end of the current CodeBlock.
+ void parseCodeBlock();
+
+ // Helper for min and max.
+ bool handleMinMax(bool usesResult, int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis);
+
+ // Handle calls. This resolves issues surrounding inlining and intrinsics.
+ void handleCall(Interpreter*, Instruction* currentInstruction, NodeType op, CodeSpecializationKind);
+ void emitFunctionCheck(JSFunction* expectedFunction, NodeIndex callTarget, int registerOffset, CodeSpecializationKind);
+ // Handle inlining. Return true if it succeeded, false if we need to plant a call.
+ bool handleInlining(bool usesResult, int callTarget, NodeIndex callTargetNodeIndex, int resultOperand, bool certainAboutExpectedFunction, JSFunction*, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, CodeSpecializationKind);
+ // Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call.
+ bool handleIntrinsic(bool usesResult, int resultOperand, Intrinsic, int registerOffset, int argumentCountIncludingThis, PredictedType prediction);
+ // Prepare to parse a block.
+ void prepareToParseBlock();
+ // Parse a single basic block of bytecode instructions.
+ bool parseBlock(unsigned limit);
+ // Find reachable code and setup predecessor links in the graph's BasicBlocks.
+ void determineReachability();
+ // Enqueue a block onto the worklist, if necessary.
+ void handleSuccessor(Vector<BlockIndex, 16>& worklist, BlockIndex, BlockIndex successor);
+ // Link block successors.
+ void linkBlock(BasicBlock*, Vector<BlockIndex>& possibleTargets);
+ void linkBlocks(Vector<UnlinkedBlock>& unlinkedBlocks, Vector<BlockIndex>& possibleTargets);
+ // Link GetLocal & SetLocal nodes, to ensure live values are generated.
+ enum PhiStackType {
+ LocalPhiStack,
+ ArgumentPhiStack
+ };
+ template<PhiStackType stackType>
+ void processPhiStack();
+ // Add spill locations to nodes.
+ void allocateVirtualRegisters();
+
+ VariableAccessData* newVariableAccessData(int operand)
+ {
+ ASSERT(operand < FirstConstantRegisterIndex);
+
+ m_graph.m_variableAccessData.append(VariableAccessData(static_cast<VirtualRegister>(operand)));
+ return &m_graph.m_variableAccessData.last();
+ }
+
+ // Get/Set the operands/result of a bytecode instruction.
+ NodeIndex getDirect(int operand)
+ {
+ // Is this a constant?
+ if (operand >= FirstConstantRegisterIndex) {
+ unsigned constant = operand - FirstConstantRegisterIndex;
+ ASSERT(constant < m_constants.size());
+ return getJSConstant(constant);
+ }
+
+ // Is this an argument?
+ if (operandIsArgument(operand))
+ return getArgument(operand);
+
+ // Must be a local.
+ return getLocal((unsigned)operand);
+ }
+ NodeIndex get(int operand)
+ {
+ return getDirect(m_inlineStackTop->remapOperand(operand));
+ }
+ void setDirect(int operand, NodeIndex value)
+ {
+ // Is this an argument?
+ if (operandIsArgument(operand)) {
+ setArgument(operand, value);
+ return;
+ }
+
+ // Must be a local.
+ setLocal((unsigned)operand, value);
+ }
+ void set(int operand, NodeIndex value)
+ {
+ setDirect(m_inlineStackTop->remapOperand(operand), value);
+ }
+
+ // Used in implementing get/set, above, where the operand is a local variable.
+ NodeIndex getLocal(unsigned operand)
+ {
+ NodeIndex nodeIndex = m_currentBlock->variablesAtTail.local(operand);
+
+ if (nodeIndex != NoNode) {
+ Node* nodePtr = &m_graph[nodeIndex];
+ if (nodePtr->op == Flush) {
+ // Two possibilities: either the block wants the local to be live
+ // but has not loaded its value, or it has loaded its value, in
+ // which case we're done.
+ Node& flushChild = m_graph[nodePtr->child1()];
+ if (flushChild.op == Phi) {
+ VariableAccessData* variableAccessData = flushChild.variableAccessData();
+ nodeIndex = addToGraph(GetLocal, OpInfo(variableAccessData), nodePtr->child1());
+ m_currentBlock->variablesAtTail.local(operand) = nodeIndex;
+ return nodeIndex;
+ }
+ nodePtr = &flushChild;
+ }
+ if (nodePtr->op == GetLocal)
+ return nodeIndex;
+ ASSERT(nodePtr->op == SetLocal);
+ return nodePtr->child1();
+ }
+
+ // Check for reads of temporaries from prior blocks,
+ // expand m_preservedVars to cover these.
+ m_preservedVars.set(operand);
+
+ VariableAccessData* variableAccessData = newVariableAccessData(operand);
+
+ NodeIndex phi = addToGraph(Phi, OpInfo(variableAccessData));
+ m_localPhiStack.append(PhiStackEntry(m_currentBlock, phi, operand));
+ nodeIndex = addToGraph(GetLocal, OpInfo(variableAccessData), phi);
+ m_currentBlock->variablesAtTail.local(operand) = nodeIndex;
+
+ m_currentBlock->variablesAtHead.setLocalFirstTime(operand, nodeIndex);
+
+ return nodeIndex;
+ }
+ void setLocal(unsigned operand, NodeIndex value)
+ {
+ m_currentBlock->variablesAtTail.local(operand) = addToGraph(SetLocal, OpInfo(newVariableAccessData(operand)), value);
+ }
+
+ // Used in implementing get/set, above, where the operand is an argument.
+ NodeIndex getArgument(unsigned operand)
+ {
+ unsigned argument = operandToArgument(operand);
+ ASSERT(argument < m_numArguments);
+
+ NodeIndex nodeIndex = m_currentBlock->variablesAtTail.argument(argument);
+
+ if (nodeIndex != NoNode) {
+ Node* nodePtr = &m_graph[nodeIndex];
+ if (nodePtr->op == Flush) {
+ // Two possibilities: either the block wants the local to be live
+ // but has not loaded its value, or it has loaded its value, in
+ // which case we're done.
+ Node& flushChild = m_graph[nodePtr->child1()];
+ if (flushChild.op == Phi) {
+ VariableAccessData* variableAccessData = flushChild.variableAccessData();
+ nodeIndex = addToGraph(GetLocal, OpInfo(variableAccessData), nodePtr->child1());
+ m_currentBlock->variablesAtTail.local(operand) = nodeIndex;
+ return nodeIndex;
+ }
+ nodePtr = &flushChild;
+ }
+ if (nodePtr->op == SetArgument) {
+ // We're getting an argument in the first basic block; link
+ // the GetLocal to the SetArgument.
+ ASSERT(nodePtr->local() == static_cast<VirtualRegister>(operand));
+ nodeIndex = addToGraph(GetLocal, OpInfo(nodePtr->variableAccessData()), nodeIndex);
+ m_currentBlock->variablesAtTail.argument(argument) = nodeIndex;
+ return nodeIndex;
+ }
+
+ if (nodePtr->op == GetLocal)
+ return nodeIndex;
+
+ ASSERT(nodePtr->op == SetLocal);
+ return nodePtr->child1();
+ }
+
+ VariableAccessData* variableAccessData = newVariableAccessData(operand);
+
+ NodeIndex phi = addToGraph(Phi, OpInfo(variableAccessData));
+ m_argumentPhiStack.append(PhiStackEntry(m_currentBlock, phi, argument));
+ nodeIndex = addToGraph(GetLocal, OpInfo(variableAccessData), phi);
+ m_currentBlock->variablesAtTail.argument(argument) = nodeIndex;
+
+ m_currentBlock->variablesAtHead.setArgumentFirstTime(argument, nodeIndex);
+
+ return nodeIndex;
+ }
+ void setArgument(int operand, NodeIndex value)
+ {
+ unsigned argument = operandToArgument(operand);
+ ASSERT(argument < m_numArguments);
+
+ m_currentBlock->variablesAtTail.argument(argument) = addToGraph(SetLocal, OpInfo(newVariableAccessData(operand)), value);
+ }
+
+ void flush(int operand)
+ {
+ // FIXME: This should check if the same operand had already been flushed to
+ // some other local variable.
+
+ operand = m_inlineStackTop->remapOperand(operand);
+
+ ASSERT(operand < FirstConstantRegisterIndex);
+
+ NodeIndex nodeIndex;
+ int index;
+ if (operandIsArgument(operand)) {
+ index = operandToArgument(operand);
+ nodeIndex = m_currentBlock->variablesAtTail.argument(index);
+ } else {
+ index = operand;
+ nodeIndex = m_currentBlock->variablesAtTail.local(index);
+ m_preservedVars.set(operand);
+ }
+
+ if (nodeIndex != NoNode) {
+ Node& node = m_graph[nodeIndex];
+ if (node.op == Flush || node.op == SetArgument) {
+ // If a local has already been flushed, or if it's an argument in the
+ // first basic block, then there is really no need to flush it. In fact
+ // emitting a Flush instruction could just confuse things, since the
+ // getArgument() code assumes that we never see a Flush of a SetArgument.
+ return;
+ }
+
+ addToGraph(Flush, OpInfo(node.variableAccessData()), nodeIndex);
+ return;
+ }
+
+ VariableAccessData* variableAccessData = newVariableAccessData(operand);
+ NodeIndex phi = addToGraph(Phi, OpInfo(variableAccessData));
+ nodeIndex = addToGraph(Flush, OpInfo(variableAccessData), phi);
+ if (operandIsArgument(operand)) {
+ m_argumentPhiStack.append(PhiStackEntry(m_currentBlock, phi, index));
+ m_currentBlock->variablesAtTail.argument(index) = nodeIndex;
+ m_currentBlock->variablesAtHead.setArgumentFirstTime(index, nodeIndex);
+ } else {
+ m_localPhiStack.append(PhiStackEntry(m_currentBlock, phi, index));
+ m_currentBlock->variablesAtTail.local(index) = nodeIndex;
+ m_currentBlock->variablesAtHead.setLocalFirstTime(index, nodeIndex);
+ }
+ }
+
+ // Get an operand, and perform a ToInt32/ToNumber conversion on it.
+ NodeIndex getToInt32(int operand)
+ {
+ return toInt32(get(operand));
+ }
+ NodeIndex getToNumber(int operand)
+ {
+ return toNumber(get(operand));
+ }
+
+ // Perform an ES5 ToInt32 operation - returns a node of type NodeResultInt32.
+ NodeIndex toInt32(NodeIndex index)
+ {
+ Node& node = m_graph[index];
+
+ if (node.hasInt32Result())
+ return index;
+
+ if (node.op == UInt32ToNumber)
+ return node.child1();
+
+ // Check for numeric constants boxed as JSValues.
+ if (node.op == JSConstant) {
+ JSValue v = valueOfJSConstant(index);
+ if (v.isInt32())
+ return getJSConstant(node.constantNumber());
+ // FIXME: We could convert the double ToInteger at this point.
+ }
+
+ return addToGraph(ValueToInt32, index);
+ }
+
+ // Perform an ES5 ToNumber operation - returns a node of type NodeResultDouble.
+ NodeIndex toNumber(NodeIndex index)
+ {
+ Node& node = m_graph[index];
+
+ if (node.hasNumberResult())
+ return index;
+
+ if (node.op == JSConstant) {
+ JSValue v = valueOfJSConstant(index);
+ if (v.isNumber())
+ return getJSConstant(node.constantNumber());
+ }
+
+ return addToGraph(ValueToNumber, OpInfo(NodeUseBottom), index);
+ }
+
+ NodeIndex getJSConstant(unsigned constant)
+ {
+ NodeIndex index = m_constants[constant].asJSValue;
+ if (index != NoNode)
+ return index;
+
+ NodeIndex resultIndex = addToGraph(JSConstant, OpInfo(constant));
+ m_constants[constant].asJSValue = resultIndex;
+ return resultIndex;
+ }
+
+ // Helper functions to get/set the this value.
+ NodeIndex getThis()
+ {
+ return get(m_inlineStackTop->m_codeBlock->thisRegister());
+ }
+ void setThis(NodeIndex value)
+ {
+ set(m_inlineStackTop->m_codeBlock->thisRegister(), value);
+ }
+
+ // Convenience methods for checking nodes for constants.
+ bool isJSConstant(NodeIndex index)
+ {
+ return m_graph[index].op == JSConstant;
+ }
+ bool isInt32Constant(NodeIndex nodeIndex)
+ {
+ return isJSConstant(nodeIndex) && valueOfJSConstant(nodeIndex).isInt32();
+ }
+ bool isSmallInt32Constant(NodeIndex nodeIndex)
+ {
+ if (!isJSConstant(nodeIndex))
+ return false;
+ JSValue value = valueOfJSConstant(nodeIndex);
+ if (!value.isInt32())
+ return false;
+ int32_t intValue = value.asInt32();
+ return intValue >= -5 && intValue <= 5;
+ }
+ // Convenience methods for getting constant values.
+ JSValue valueOfJSConstant(NodeIndex index)
+ {
+ ASSERT(isJSConstant(index));
+ return m_codeBlock->getConstant(FirstConstantRegisterIndex + m_graph[index].constantNumber());
+ }
+ int32_t valueOfInt32Constant(NodeIndex nodeIndex)
+ {
+ ASSERT(isInt32Constant(nodeIndex));
+ return valueOfJSConstant(nodeIndex).asInt32();
+ }
+
+ // This method returns a JSConstant with the value 'undefined'.
+ NodeIndex constantUndefined()
+ {
+ // Has m_constantUndefined been set up yet?
+ if (m_constantUndefined == UINT_MAX) {
+ // Search the constant pool for undefined, if we find it, we can just reuse this!
+ unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
+ for (m_constantUndefined = 0; m_constantUndefined < numberOfConstants; ++m_constantUndefined) {
+ JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantUndefined);
+ if (testMe.isUndefined())
+ return getJSConstant(m_constantUndefined);
+ }
+
+ // Add undefined to the CodeBlock's constants, and add a corresponding slot in m_constants.
+ ASSERT(m_constants.size() == numberOfConstants);
+ m_codeBlock->addConstant(jsUndefined());
+ m_constants.append(ConstantRecord());
+ ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
+ }
+
+ // m_constantUndefined must refer to an entry in the CodeBlock's constant pool that has the value 'undefined'.
+ ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantUndefined).isUndefined());
+ return getJSConstant(m_constantUndefined);
+ }
+
+ // This method returns a JSConstant with the value 'null'.
+ NodeIndex constantNull()
+ {
+ // Has m_constantNull been set up yet?
+ if (m_constantNull == UINT_MAX) {
+ // Search the constant pool for null, if we find it, we can just reuse this!
+ unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
+ for (m_constantNull = 0; m_constantNull < numberOfConstants; ++m_constantNull) {
+ JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNull);
+ if (testMe.isNull())
+ return getJSConstant(m_constantNull);
+ }
+
+ // Add null to the CodeBlock's constants, and add a corresponding slot in m_constants.
+ ASSERT(m_constants.size() == numberOfConstants);
+ m_codeBlock->addConstant(jsNull());
+ m_constants.append(ConstantRecord());
+ ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
+ }
+
+ // m_constantNull must refer to an entry in the CodeBlock's constant pool that has the value 'null'.
+ ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNull).isNull());
+ return getJSConstant(m_constantNull);
+ }
+
+ // This method returns a DoubleConstant with the value 1.
+ NodeIndex one()
+ {
+ // Has m_constant1 been set up yet?
+ if (m_constant1 == UINT_MAX) {
+ // Search the constant pool for the value 1, if we find it, we can just reuse this!
+ unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
+ for (m_constant1 = 0; m_constant1 < numberOfConstants; ++m_constant1) {
+ JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1);
+ if (testMe.isInt32() && testMe.asInt32() == 1)
+ return getJSConstant(m_constant1);
+ }
+
+ // Add the value 1 to the CodeBlock's constants, and add a corresponding slot in m_constants.
+ ASSERT(m_constants.size() == numberOfConstants);
+ m_codeBlock->addConstant(jsNumber(1));
+ m_constants.append(ConstantRecord());
+ ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
+ }
+
+ // m_constant1 must refer to an entry in the CodeBlock's constant pool that has the integer value 1.
+ ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1).isInt32());
+ ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1).asInt32() == 1);
+ return getJSConstant(m_constant1);
+ }
+
+ // This method returns a DoubleConstant with the value NaN.
+ NodeIndex constantNaN()
+ {
+ JSValue nan = jsNaN();
+
+ // Has m_constantNaN been set up yet?
+ if (m_constantNaN == UINT_MAX) {
+ // Search the constant pool for the value NaN, if we find it, we can just reuse this!
+ unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
+ for (m_constantNaN = 0; m_constantNaN < numberOfConstants; ++m_constantNaN) {
+ JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN);
+ if (JSValue::encode(testMe) == JSValue::encode(nan))
+ return getJSConstant(m_constantNaN);
+ }
+
+ // Add the value nan to the CodeBlock's constants, and add a corresponding slot in m_constants.
+ ASSERT(m_constants.size() == numberOfConstants);
+ m_codeBlock->addConstant(nan);
+ m_constants.append(ConstantRecord());
+ ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
+ }
+
+ // m_constantNaN must refer to an entry in the CodeBlock's constant pool that has the value nan.
+ ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN).isDouble());
+ ASSERT(isnan(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN).asDouble()));
+ return getJSConstant(m_constantNaN);
+ }
+
+ NodeIndex cellConstant(JSCell* cell)
+ {
+ pair<HashMap<JSCell*, NodeIndex>::iterator, bool> iter = m_cellConstantNodes.add(cell, NoNode);
+ if (iter.second)
+ iter.first->second = addToGraph(WeakJSConstant, OpInfo(cell));
+
+ return iter.first->second;
+ }
+
+ CodeOrigin currentCodeOrigin()
+ {
+ return CodeOrigin(m_currentIndex, m_inlineStackTop->m_inlineCallFrame);
+ }
+
+ // These methods create a node and add it to the graph. If nodes of this type are
+ // 'mustGenerate' then the node will implicitly be ref'ed to ensure generation.
+ NodeIndex addToGraph(NodeType op, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode)
+ {
+ NodeIndex resultIndex = (NodeIndex)m_graph.size();
+ m_graph.append(Node(op, currentCodeOrigin(), child1, child2, child3));
+
+ if (op & NodeMustGenerate)
+ m_graph.ref(resultIndex);
+ return resultIndex;
+ }
+ NodeIndex addToGraph(NodeType op, OpInfo info, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode)
+ {
+ NodeIndex resultIndex = (NodeIndex)m_graph.size();
+ m_graph.append(Node(op, currentCodeOrigin(), info, child1, child2, child3));
+
+ if (op & NodeMustGenerate)
+ m_graph.ref(resultIndex);
+ return resultIndex;
+ }
+ NodeIndex addToGraph(NodeType op, OpInfo info1, OpInfo info2, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode)
+ {
+ NodeIndex resultIndex = (NodeIndex)m_graph.size();
+ m_graph.append(Node(op, currentCodeOrigin(), info1, info2, child1, child2, child3));
+
+ if (op & NodeMustGenerate)
+ m_graph.ref(resultIndex);
+ return resultIndex;
+ }
+
+ NodeIndex addToGraph(Node::VarArgTag, NodeType op, OpInfo info1, OpInfo info2)
+ {
+ NodeIndex resultIndex = (NodeIndex)m_graph.size();
+ m_graph.append(Node(Node::VarArg, op, currentCodeOrigin(), info1, info2, m_graph.m_varArgChildren.size() - m_numPassedVarArgs, m_numPassedVarArgs));
+
+ m_numPassedVarArgs = 0;
+
+ if (op & NodeMustGenerate)
+ m_graph.ref(resultIndex);
+ return resultIndex;
+ }
+ void addVarArgChild(NodeIndex child)
+ {
+ m_graph.m_varArgChildren.append(child);
+ m_numPassedVarArgs++;
+ }
+
+ NodeIndex addCall(Interpreter* interpreter, Instruction* currentInstruction, NodeType op)
+ {
+ Instruction* putInstruction = currentInstruction + OPCODE_LENGTH(op_call);
+
+ PredictedType prediction = PredictNone;
+ if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result)
+ prediction = getPrediction(m_graph.size(), m_currentIndex + OPCODE_LENGTH(op_call));
+
+ addVarArgChild(get(currentInstruction[1].u.operand));
+ int argCount = currentInstruction[2].u.operand;
+ if (RegisterFile::CallFrameHeaderSize + (unsigned)argCount > m_parameterSlots)
+ m_parameterSlots = RegisterFile::CallFrameHeaderSize + argCount;
+
+ int registerOffset = currentInstruction[3].u.operand;
+ int dummyThisArgument = op == Call ? 0 : 1;
+ for (int i = 0 + dummyThisArgument; i < argCount; ++i)
+ addVarArgChild(get(registerOffset + argumentToOperand(i)));
+
+ NodeIndex call = addToGraph(Node::VarArg, op, OpInfo(0), OpInfo(prediction));
+ if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result)
+ set(putInstruction[1].u.operand, call);
+ return call;
+ }
+
+ PredictedType getPredictionWithoutOSRExit(NodeIndex nodeIndex, unsigned bytecodeIndex)
+ {
+ UNUSED_PARAM(nodeIndex);
+
+ ValueProfile* profile = m_inlineStackTop->m_profiledBlock->valueProfileForBytecodeOffset(bytecodeIndex);
+ ASSERT(profile);
+ PredictedType prediction = profile->computeUpdatedPrediction();
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ printf("Dynamic [@%u, bc#%u] prediction: %s\n", nodeIndex, bytecodeIndex, predictionToString(prediction));
+#endif
+
+ return prediction;
+ }
+
+ PredictedType getPrediction(NodeIndex nodeIndex, unsigned bytecodeIndex)
+ {
+ PredictedType prediction = getPredictionWithoutOSRExit(nodeIndex, bytecodeIndex);
+
+ if (prediction == PredictNone) {
+ // We have no information about what values this node generates. Give up
+ // on executing this code, since we're likely to do more damage than good.
+ addToGraph(ForceOSRExit);
+ }
+
+ return prediction;
+ }
+
+ PredictedType getPredictionWithoutOSRExit()
+ {
+ return getPredictionWithoutOSRExit(m_graph.size(), m_currentIndex);
+ }
+
+ PredictedType getPrediction()
+ {
+ return getPrediction(m_graph.size(), m_currentIndex);
+ }
+
+ NodeIndex makeSafe(NodeIndex nodeIndex)
+ {
+ if (!m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
+ && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)
+ && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
+ return nodeIndex;
+
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ printf("Making %s @%u safe at bc#%u because slow-case counter is at %u and exit profiles say %d, %d\n", Graph::opName(m_graph[nodeIndex].op), nodeIndex, m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow), m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero));
+#endif
+
+ switch (m_graph[nodeIndex].op) {
+ case UInt32ToNumber:
+ case ArithAdd:
+ case ArithSub:
+ case ValueAdd:
+ case ArithMod: // for ArithMode "MayOverflow" means we tried to divide by zero, or we saw double.
+ m_graph[nodeIndex].mergeArithNodeFlags(NodeMayOverflow);
+ break;
+
+ case ArithMul:
+ if (m_inlineStackTop->m_profiledBlock->likelyToTakeDeepestSlowCase(m_currentIndex)
+ || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)) {
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ printf("Making ArithMul @%u take deepest slow case.\n", nodeIndex);
+#endif
+ m_graph[nodeIndex].mergeArithNodeFlags(NodeMayOverflow | NodeMayNegZero);
+ } else if (m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
+ || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero)) {
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ printf("Making ArithMul @%u take faster slow case.\n", nodeIndex);
+#endif
+ m_graph[nodeIndex].mergeArithNodeFlags(NodeMayNegZero);
+ }
+ break;
+
+ default:
+ ASSERT_NOT_REACHED();
+ break;
+ }
+
+ return nodeIndex;
+ }
+
+ NodeIndex makeDivSafe(NodeIndex nodeIndex)
+ {
+ ASSERT(m_graph[nodeIndex].op == ArithDiv);
+
+ // The main slow case counter for op_div in the old JIT counts only when
+ // the operands are not numbers. We don't care about that since we already
+ // have speculations in place that take care of that separately. We only
+ // care about when the outcome of the division is not an integer, which
+ // is what the special fast case counter tells us.
+
+ if (!m_inlineStackTop->m_profiledBlock->likelyToTakeSpecialFastCase(m_currentIndex)
+ && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)
+ && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
+ return nodeIndex;
+
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ printf("Making %s @%u safe at bc#%u because special fast-case counter is at %u and exit profiles say %d, %d\n", Graph::opName(m_graph[nodeIndex].op), nodeIndex, m_currentIndex, m_inlineStackTop->m_profiledBlock->specialFastCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow), m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero));
+#endif
+
+ // FIXME: It might be possible to make this more granular. The DFG certainly can
+ // distinguish between negative zero and overflow in its exit profiles.
+ m_graph[nodeIndex].mergeArithNodeFlags(NodeMayOverflow | NodeMayNegZero);
+
+ return nodeIndex;
+ }
+
+ bool structureChainIsStillValid(bool direct, Structure* previousStructure, StructureChain* chain)
+ {
+ if (direct)
+ return true;
+
+ if (!previousStructure->storedPrototype().isNull() && previousStructure->storedPrototype().asCell()->structure() != chain->head()->get())
+ return false;
+
+ for (WriteBarrier<Structure>* it = chain->head(); *it; ++it) {
+ if (!(*it)->storedPrototype().isNull() && (*it)->storedPrototype().asCell()->structure() != it[1].get())
+ return false;
+ }
+
+ return true;
+ }
+
+ void buildOperandMapsIfNecessary();
+
+ JSGlobalData* m_globalData;
+ CodeBlock* m_codeBlock;
+ CodeBlock* m_profiledBlock;
+ Graph& m_graph;
+
+ // The current block being generated.
+ BasicBlock* m_currentBlock;
+ // The bytecode index of the current instruction being generated.
+ unsigned m_currentIndex;
+
+ // We use these values during code generation, and to avoid the need for
+ // special handling we make sure they are available as constants in the
+ // CodeBlock's constant pool. These variables are initialized to
+ // UINT_MAX, and lazily updated to hold an index into the CodeBlock's
+ // constant pool, as necessary.
+ unsigned m_constantUndefined;
+ unsigned m_constantNull;
+ unsigned m_constantNaN;
+ unsigned m_constant1;
+ HashMap<JSCell*, unsigned> m_cellConstants;
+ HashMap<JSCell*, NodeIndex> m_cellConstantNodes;
+
+ // A constant in the constant pool may be represented by more than one
+ // node in the graph, depending on the context in which it is being used.
+ struct ConstantRecord {
+ ConstantRecord()
+ : asInt32(NoNode)
+ , asNumeric(NoNode)
+ , asJSValue(NoNode)
+ {
+ }
+
+ NodeIndex asInt32;
+ NodeIndex asNumeric;
+ NodeIndex asJSValue;
+ };
+
+ // Track the index of the node whose result is the current value for every
+ // register value in the bytecode - argument, local, and temporary.
+ Vector<ConstantRecord, 16> m_constants;
+
+ // The number of arguments passed to the function.
+ unsigned m_numArguments;
+ // The number of locals (vars + temporaries) used in the function.
+ unsigned m_numLocals;
+ // The set of registers we need to preserve across BasicBlock boundaries;
+ // typically equal to the set of vars, but we expand this to cover all
+ // temporaries that persist across blocks (dues to ?:, &&, ||, etc).
+ BitVector m_preservedVars;
+ // The number of slots (in units of sizeof(Register)) that we need to
+ // preallocate for calls emanating from this frame. This includes the
+ // size of the CallFrame, only if this is not a leaf function. (I.e.
+ // this is 0 if and only if this function is a leaf.)
+ unsigned m_parameterSlots;
+ // The number of var args passed to the next var arg node.
+ unsigned m_numPassedVarArgs;
+ // The index in the global resolve info.
+ unsigned m_globalResolveNumber;
+
+ struct PhiStackEntry {
+ PhiStackEntry(BasicBlock* block, NodeIndex phi, unsigned varNo)
+ : m_block(block)
+ , m_phi(phi)
+ , m_varNo(varNo)
+ {
+ }
+
+ BasicBlock* m_block;
+ NodeIndex m_phi;
+ unsigned m_varNo;
+ };
+ Vector<PhiStackEntry, 16> m_argumentPhiStack;
+ Vector<PhiStackEntry, 16> m_localPhiStack;
+
+ struct InlineStackEntry {
+ ByteCodeParser* m_byteCodeParser;
+
+ CodeBlock* m_codeBlock;
+ CodeBlock* m_profiledBlock;
+ InlineCallFrame* m_inlineCallFrame;
+ VirtualRegister m_calleeVR; // absolute virtual register, not relative to call frame
+
+ ScriptExecutable* executable() { return m_codeBlock->ownerExecutable(); }
+
+ QueryableExitProfile m_exitProfile;
+
+ // Remapping of identifier and constant numbers from the code block being
+ // inlined (inline callee) to the code block that we're inlining into
+ // (the machine code block, which is the transitive, though not necessarily
+ // direct, caller).
+ Vector<unsigned> m_identifierRemap;
+ Vector<unsigned> m_constantRemap;
+
+ // Blocks introduced by this code block, which need successor linking.
+ // May include up to one basic block that includes the continuation after
+ // the callsite in the caller. These must be appended in the order that they
+ // are created, but their bytecodeBegin values need not be in order as they
+ // are ignored.
+ Vector<UnlinkedBlock> m_unlinkedBlocks;
+
+ // Potential block linking targets. Must be sorted by bytecodeBegin, and
+ // cannot have two blocks that have the same bytecodeBegin. For this very
+ // reason, this is not equivalent to
+ Vector<BlockIndex> m_blockLinkingTargets;
+
+ // If the callsite's basic block was split into two, then this will be
+ // the head of the callsite block. It needs its successors linked to the
+ // m_unlinkedBlocks, but not the other way around: there's no way for
+ // any blocks in m_unlinkedBlocks to jump back into this block.
+ BlockIndex m_callsiteBlockHead;
+
+ // Does the callsite block head need linking? This is typically true
+ // but will be false for the machine code block's inline stack entry
+ // (since that one is not inlined) and for cases where an inline callee
+ // did the linking for us.
+ bool m_callsiteBlockHeadNeedsLinking;
+
+ VirtualRegister m_returnValue;
+
+ // Did we see any returns? We need to handle the (uncommon but necessary)
+ // case where a procedure that does not return was inlined.
+ bool m_didReturn;
+
+ // Did we have any early returns?
+ bool m_didEarlyReturn;
+
+ InlineStackEntry* m_caller;
+
+ InlineStackEntry(ByteCodeParser*, CodeBlock*, CodeBlock* profiledBlock, BlockIndex callsiteBlockHead, VirtualRegister calleeVR, JSFunction* callee, VirtualRegister returnValueVR, VirtualRegister inlineCallFrameStart, CodeSpecializationKind);
+
+ ~InlineStackEntry()
+ {
+ m_byteCodeParser->m_inlineStackTop = m_caller;
+ }
+
+ int remapOperand(int operand) const
+ {
+ if (!m_inlineCallFrame)
+ return operand;
+
+ if (operand >= FirstConstantRegisterIndex) {
+ int result = m_constantRemap[operand - FirstConstantRegisterIndex];
+ ASSERT(result >= FirstConstantRegisterIndex);
+ return result;
+ }
+
+ return operand + m_inlineCallFrame->stackOffset;
+ }
+ };
+
+ InlineStackEntry* m_inlineStackTop;
+
+ // Have we built operand maps? We initialize them lazily, and only when doing
+ // inlining.
+ bool m_haveBuiltOperandMaps;
+ // Mapping between identifier names and numbers.
+ IdentifierMap m_identifierMap;
+ // Mapping between values and constant numbers.
+ JSValueMap m_jsValueMap;
+
+ // Cache of code blocks that we've generated bytecode for.
+ ByteCodeCache<canInlineFunctionFor> m_codeBlockCache;
+};
+
+#define NEXT_OPCODE(name) \
+ m_currentIndex += OPCODE_LENGTH(name); \
+ continue
+
+#define LAST_OPCODE(name) \
+ m_currentIndex += OPCODE_LENGTH(name); \
+ return shouldContinueParsing
+
+void ByteCodeParser::handleCall(Interpreter* interpreter, Instruction* currentInstruction, NodeType op, CodeSpecializationKind kind)
+{
+ ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct));
+
+ NodeIndex callTarget = get(currentInstruction[1].u.operand);
+ enum { ConstantFunction, LinkedFunction, UnknownFunction } callType;
+
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ printf("Slow case count for call at @%lu bc#%u: %u/%u; exit profile: %d.\n", m_graph.size(), m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_profiledBlock->executionEntryCount(), m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
+#endif
+
+ if (m_graph.isFunctionConstant(m_codeBlock, callTarget))
+ callType = ConstantFunction;
+ else if (!!m_inlineStackTop->m_profiledBlock->getCallLinkInfo(m_currentIndex).lastSeenCallee
+ && !m_inlineStackTop->m_profiledBlock->couldTakeSlowCase(m_currentIndex)
+ && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache))
+ callType = LinkedFunction;
+ else
+ callType = UnknownFunction;
+ if (callType != UnknownFunction) {
+ int argumentCountIncludingThis = currentInstruction[2].u.operand;
+ int registerOffset = currentInstruction[3].u.operand;
+
+ // Do we have a result?
+ bool usesResult = false;
+ int resultOperand = 0; // make compiler happy
+ unsigned nextOffset = m_currentIndex + OPCODE_LENGTH(op_call);
+ Instruction* putInstruction = currentInstruction + OPCODE_LENGTH(op_call);
+ PredictedType prediction = PredictNone;
+ if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result) {
+ resultOperand = putInstruction[1].u.operand;
+ usesResult = true;
+ prediction = getPrediction(m_graph.size(), nextOffset);
+ nextOffset += OPCODE_LENGTH(op_call_put_result);
+ }
+ JSFunction* expectedFunction;
+ Intrinsic intrinsic;
+ bool certainAboutExpectedFunction;
+ if (callType == ConstantFunction) {
+ expectedFunction = m_graph.valueOfFunctionConstant(m_codeBlock, callTarget);
+ intrinsic = expectedFunction->executable()->intrinsicFor(kind);
+ certainAboutExpectedFunction = true;
+ } else {
+ ASSERT(callType == LinkedFunction);
+ expectedFunction = m_inlineStackTop->m_profiledBlock->getCallLinkInfo(m_currentIndex).lastSeenCallee.get();
+ intrinsic = expectedFunction->executable()->intrinsicFor(kind);
+ certainAboutExpectedFunction = false;
+ }
+
+ if (intrinsic != NoIntrinsic) {
+ if (!certainAboutExpectedFunction)
+ emitFunctionCheck(expectedFunction, callTarget, registerOffset, kind);
+
+ if (handleIntrinsic(usesResult, resultOperand, intrinsic, registerOffset, argumentCountIncludingThis, prediction)) {
+ if (!certainAboutExpectedFunction) {
+ // Need to keep the call target alive for OSR. We could easily optimize this out if we wanted
+ // to, since at this point we know that the call target is a constant. It's just that OSR isn't
+ // smart enough to figure that out, since it doesn't understand CheckFunction.
+ addToGraph(Phantom, callTarget);
+ }
+
+ return;
+ }
+ } else if (handleInlining(usesResult, currentInstruction[1].u.operand, callTarget, resultOperand, certainAboutExpectedFunction, expectedFunction, registerOffset, argumentCountIncludingThis, nextOffset, kind))
+ return;
+ }
+
+ addCall(interpreter, currentInstruction, op);
+}
+
+void ByteCodeParser::emitFunctionCheck(JSFunction* expectedFunction, NodeIndex callTarget, int registerOffset, CodeSpecializationKind kind)
+{
+ NodeIndex thisArgument;
+ if (kind == CodeForCall)
+ thisArgument = get(registerOffset + argumentToOperand(0));
+ else
+ thisArgument = NoNode;
+ addToGraph(CheckFunction, OpInfo(expectedFunction), callTarget, thisArgument);
+}
+
+bool ByteCodeParser::handleInlining(bool usesResult, int callTarget, NodeIndex callTargetNodeIndex, int resultOperand, bool certainAboutExpectedFunction, JSFunction* expectedFunction, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, CodeSpecializationKind kind)
+{
+ // First, the really simple checks: do we have an actual JS function?
+ if (!expectedFunction)
+ return false;
+ if (expectedFunction->isHostFunction())
+ return false;
+
+ FunctionExecutable* executable = expectedFunction->jsExecutable();
+
+ // Does the number of arguments we're passing match the arity of the target? We could
+ // inline arity check failures, but for simplicity we currently don't.
+ if (static_cast<int>(executable->parameterCount()) + 1 != argumentCountIncludingThis)
+ return false;
+
+ // Have we exceeded inline stack depth, or are we trying to inline a recursive call?
+ // If either of these are detected, then don't inline.
+ unsigned depth = 0;
+ for (InlineStackEntry* entry = m_inlineStackTop; entry; entry = entry->m_caller) {
+ ++depth;
+ if (depth >= Options::maximumInliningDepth)
+ return false; // Depth exceeded.
+
+ if (entry->executable() == executable)
+ return false; // Recursion detected.
+ }
+
+ // Does the code block's size match the heuristics/requirements for being
+ // an inline candidate?
+ CodeBlock* profiledBlock = executable->profiledCodeBlockFor(kind);
+ if (!mightInlineFunctionFor(profiledBlock, kind))
+ return false;
+
+ // If we get here then it looks like we should definitely inline this code. Proceed
+ // with parsing the code to get bytecode, so that we can then parse the bytecode.
+ CodeBlock* codeBlock = m_codeBlockCache.get(CodeBlockKey(executable, kind), expectedFunction->scope());
+ if (!codeBlock)
+ return false;
+
+ ASSERT(canInlineFunctionFor(codeBlock, kind));
+
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ printf("Inlining executable %p.\n", executable);
+#endif
+
+ // Now we know without a doubt that we are committed to inlining. So begin the process
+ // by checking the callee (if necessary) and making sure that arguments and the callee
+ // are flushed.
+ if (!certainAboutExpectedFunction)
+ emitFunctionCheck(expectedFunction, callTargetNodeIndex, registerOffset, kind);
+
+ // FIXME: Don't flush constants!
+
+ for (int i = 1; i < argumentCountIncludingThis; ++i)
+ flush(registerOffset + argumentToOperand(i));
+
+ int inlineCallFrameStart = m_inlineStackTop->remapOperand(registerOffset) - RegisterFile::CallFrameHeaderSize;
+
+ // Make sure that the area used by the call frame is reserved.
+ for (int arg = inlineCallFrameStart + RegisterFile::CallFrameHeaderSize + codeBlock->m_numVars; arg-- > inlineCallFrameStart + 1;)
+ m_preservedVars.set(m_inlineStackTop->remapOperand(arg));
+
+ // Make sure that we have enough locals.
+ unsigned newNumLocals = inlineCallFrameStart + RegisterFile::CallFrameHeaderSize + codeBlock->m_numCalleeRegisters;
+ if (newNumLocals > m_numLocals) {
+ m_numLocals = newNumLocals;
+ for (size_t i = 0; i < m_graph.m_blocks.size(); ++i)
+ m_graph.m_blocks[i]->ensureLocals(newNumLocals);
+ }
+
+ InlineStackEntry inlineStackEntry(this, codeBlock, profiledBlock, m_graph.m_blocks.size() - 1, (VirtualRegister)m_inlineStackTop->remapOperand(callTarget), expectedFunction, (VirtualRegister)m_inlineStackTop->remapOperand(usesResult ? resultOperand : InvalidVirtualRegister), (VirtualRegister)inlineCallFrameStart, kind);
+
+ // This is where the actual inlining really happens.
+ unsigned oldIndex = m_currentIndex;
+ m_currentIndex = 0;
+
+ addToGraph(InlineStart);
+
+ parseCodeBlock();
+
+ m_currentIndex = oldIndex;
+
+ // If the inlined code created some new basic blocks, then we have linking to do.
+ if (inlineStackEntry.m_callsiteBlockHead != m_graph.m_blocks.size() - 1) {
+
+ ASSERT(!inlineStackEntry.m_unlinkedBlocks.isEmpty());
+ if (inlineStackEntry.m_callsiteBlockHeadNeedsLinking)
+ linkBlock(m_graph.m_blocks[inlineStackEntry.m_callsiteBlockHead].get(), inlineStackEntry.m_blockLinkingTargets);
+ else
+ ASSERT(m_graph.m_blocks[inlineStackEntry.m_callsiteBlockHead]->isLinked);
+
+ // It's possible that the callsite block head is not owned by the caller.
+ if (!inlineStackEntry.m_caller->m_unlinkedBlocks.isEmpty()) {
+ // It's definitely owned by the caller, because the caller created new blocks.
+ // Assert that this all adds up.
+ ASSERT(inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_blockIndex == inlineStackEntry.m_callsiteBlockHead);
+ ASSERT(inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_needsNormalLinking);
+ inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_needsNormalLinking = false;
+ } else {
+ // It's definitely not owned by the caller. Tell the caller that he does not
+ // need to link his callsite block head, because we did it for him.
+ ASSERT(inlineStackEntry.m_caller->m_callsiteBlockHeadNeedsLinking);
+ ASSERT(inlineStackEntry.m_caller->m_callsiteBlockHead == inlineStackEntry.m_callsiteBlockHead);
+ inlineStackEntry.m_caller->m_callsiteBlockHeadNeedsLinking = false;
+ }
+
+ linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
+ } else
+ ASSERT(inlineStackEntry.m_unlinkedBlocks.isEmpty());
+
+ // If there was a return, but no early returns, then we're done. We allow parsing of
+ // the caller to continue in whatever basic block we're in right now.
+ if (!inlineStackEntry.m_didEarlyReturn && inlineStackEntry.m_didReturn) {
+ BasicBlock* lastBlock = m_graph.m_blocks.last().get();
+ ASSERT(lastBlock->begin == lastBlock->end || !m_graph.last().isTerminal());
+
+ // If we created new blocks then the last block needs linking, but in the
+ // caller. It doesn't need to be linked to, but it needs outgoing links.
+ if (!inlineStackEntry.m_unlinkedBlocks.isEmpty()) {
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ printf("Reascribing bytecode index of block %p from bc#%u to bc#%u (inline return case).\n", lastBlock, lastBlock->bytecodeBegin, m_currentIndex);
+#endif
+ // For debugging purposes, set the bytecodeBegin. Note that this doesn't matter
+ // for release builds because this block will never serve as a potential target
+ // in the linker's binary search.
+ lastBlock->bytecodeBegin = m_currentIndex;
+ m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.m_blocks.size() - 1));
+ }
+
+ m_currentBlock = m_graph.m_blocks.last().get();
+
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ printf("Done inlining executable %p, continuing code generation at epilogue.\n", executable);
+#endif
+ return true;
+ }
+
+ // If we get to this point then all blocks must end in some sort of terminals.
+ ASSERT(m_graph.last().isTerminal());
+
+ // Link the early returns to the basic block we're about to create.
+ for (size_t i = 0; i < inlineStackEntry.m_unlinkedBlocks.size(); ++i) {
+ if (!inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking)
+ continue;
+ BasicBlock* block = m_graph.m_blocks[inlineStackEntry.m_unlinkedBlocks[i].m_blockIndex].get();
+ ASSERT(!block->isLinked);
+ Node& node = m_graph[block->end - 1];
+ ASSERT(node.op == Jump);
+ ASSERT(node.takenBlockIndex() == NoBlock);
+ node.setTakenBlockIndex(m_graph.m_blocks.size());
+ inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking = false;
+#if !ASSERT_DISABLED
+ block->isLinked = true;
+#endif
+ }
+
+ // Need to create a new basic block for the continuation at the caller.
+ OwnPtr<BasicBlock> block = adoptPtr(new BasicBlock(nextOffset, m_graph.size(), m_numArguments, m_numLocals));
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ printf("Creating inline epilogue basic block %p, #%lu for %p bc#%u at inline depth %u.\n", block.get(), m_graph.m_blocks.size(), m_inlineStackTop->executable(), m_currentIndex, CodeOrigin::inlineDepthForCallFrame(m_inlineStackTop->m_inlineCallFrame));
+#endif
+ m_currentBlock = block.get();
+ ASSERT(m_inlineStackTop->m_caller->m_blockLinkingTargets.isEmpty() || m_graph.m_blocks[m_inlineStackTop->m_caller->m_blockLinkingTargets.last()]->bytecodeBegin < nextOffset);
+ m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.m_blocks.size()));
+ m_inlineStackTop->m_caller->m_blockLinkingTargets.append(m_graph.m_blocks.size());
+ m_graph.m_blocks.append(block.release());
+ prepareToParseBlock();
+
+ // At this point we return and continue to generate code for the caller, but
+ // in the new basic block.
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ printf("Done inlining executable %p, continuing code generation in new block.\n", executable);
+#endif
+ return true;
+}
+
+bool ByteCodeParser::handleMinMax(bool usesResult, int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis)
+{
+ if (!usesResult)
+ return true;
+
+ if (argumentCountIncludingThis == 1) { // Math.min()
+ set(resultOperand, constantNaN());
+ return true;
+ }
+
+ if (argumentCountIncludingThis == 2) { // Math.min(x)
+ set(resultOperand, getToNumber(registerOffset + argumentToOperand(1)));
+ return true;
+ }
+
+ if (argumentCountIncludingThis == 3) { // Math.min(x, y)
+ set(resultOperand, addToGraph(op, OpInfo(NodeUseBottom), getToNumber(registerOffset + argumentToOperand(1)), getToNumber(registerOffset + argumentToOperand(2))));
+ return true;
+ }
+
+ // Don't handle >=3 arguments for now.
+ return false;
+}
+
+// FIXME: We dead-code-eliminate unused Math intrinsics, but that's invalid because
+// they need to perform the ToNumber conversion, which can have side-effects.
+bool ByteCodeParser::handleIntrinsic(bool usesResult, int resultOperand, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, PredictedType prediction)
+{
+ switch (intrinsic) {
+ case AbsIntrinsic: {
+ if (!usesResult) {
+ // There is no such thing as executing abs for effect, so this
+ // is dead code.
+ return true;
+ }
+
+ if (argumentCountIncludingThis == 1) { // Math.abs()
+ set(resultOperand, constantNaN());
+ return true;
+ }
+
+ if (!MacroAssembler::supportsFloatingPointAbs())
+ return false;
+
+ NodeIndex nodeIndex = addToGraph(ArithAbs, OpInfo(NodeUseBottom), getToNumber(registerOffset + argumentToOperand(1)));
+ if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
+ m_graph[nodeIndex].mergeArithNodeFlags(NodeMayOverflow);
+ set(resultOperand, nodeIndex);
+ return true;
+ }
+
+ case MinIntrinsic:
+ return handleMinMax(usesResult, resultOperand, ArithMin, registerOffset, argumentCountIncludingThis);
+
+ case MaxIntrinsic:
+ return handleMinMax(usesResult, resultOperand, ArithMax, registerOffset, argumentCountIncludingThis);
+
+ case SqrtIntrinsic: {
+ if (!usesResult)
+ return true;
+
+ if (argumentCountIncludingThis == 1) { // Math.sqrt()
+ set(resultOperand, constantNaN());
+ return true;
+ }
+
+ if (!MacroAssembler::supportsFloatingPointSqrt())
+ return false;
+
+ set(resultOperand, addToGraph(ArithSqrt, getToNumber(registerOffset + argumentToOperand(1))));
+ return true;
+ }
+
+ case ArrayPushIntrinsic: {
+ if (argumentCountIncludingThis != 2)
+ return false;
+
+ NodeIndex arrayPush = addToGraph(ArrayPush, OpInfo(0), OpInfo(prediction), get(registerOffset + argumentToOperand(0)), get(registerOffset + argumentToOperand(1)));
+ if (usesResult)
+ set(resultOperand, arrayPush);
+
+ return true;
+ }
+
+ case ArrayPopIntrinsic: {
+ if (argumentCountIncludingThis != 1)
+ return false;
+
+ NodeIndex arrayPop = addToGraph(ArrayPop, OpInfo(0), OpInfo(prediction), get(registerOffset + argumentToOperand(0)));
+ if (usesResult)
+ set(resultOperand, arrayPop);
+ return true;
+ }
+
+ case CharCodeAtIntrinsic: {
+ if (argumentCountIncludingThis != 2)
+ return false;
+
+ int thisOperand = registerOffset + argumentToOperand(0);
+ if (!(m_graph[get(thisOperand)].prediction() & PredictString))
+ return false;
+
+ int indexOperand = registerOffset + argumentToOperand(1);
+ NodeIndex storage = addToGraph(GetIndexedPropertyStorage, get(thisOperand), getToInt32(indexOperand));
+ NodeIndex charCode = addToGraph(StringCharCodeAt, get(thisOperand), getToInt32(indexOperand), storage);
+
+ if (usesResult)
+ set(resultOperand, charCode);
+ return true;
+ }
+
+ case CharAtIntrinsic: {
+ if (argumentCountIncludingThis != 2)
+ return false;
+
+ int thisOperand = registerOffset + argumentToOperand(0);
+ if (!(m_graph[get(thisOperand)].prediction() & PredictString))
+ return false;
+
+ int indexOperand = registerOffset + argumentToOperand(1);
+ NodeIndex storage = addToGraph(GetIndexedPropertyStorage, get(thisOperand), getToInt32(indexOperand));
+ NodeIndex charCode = addToGraph(StringCharAt, get(thisOperand), getToInt32(indexOperand), storage);
+
+ if (usesResult)
+ set(resultOperand, charCode);
+ return true;
+ }
+
+ default:
+ return false;
+ }
+}
+
+void ByteCodeParser::prepareToParseBlock()
+{
+ for (unsigned i = 0; i < m_constants.size(); ++i)
+ m_constants[i] = ConstantRecord();
+ m_cellConstantNodes.clear();
+}
+
+bool ByteCodeParser::parseBlock(unsigned limit)
+{
+ bool shouldContinueParsing = true;
+
+ Interpreter* interpreter = m_globalData->interpreter;
+ Instruction* instructionsBegin = m_inlineStackTop->m_codeBlock->instructions().begin();
+ unsigned blockBegin = m_currentIndex;
+
+ // If we are the first basic block, introduce markers for arguments. This allows
+ // us to track if a use of an argument may use the actual argument passed, as
+ // opposed to using a value we set explicitly.
+ if (m_currentBlock == m_graph.m_blocks[0].get() && !m_inlineStackTop->m_inlineCallFrame) {
+ m_graph.m_arguments.resize(m_numArguments);
+ for (unsigned argument = 0; argument < m_numArguments; ++argument) {
+ NodeIndex setArgument = addToGraph(SetArgument, OpInfo(newVariableAccessData(argumentToOperand(argument))));
+ m_graph.m_arguments[argument] = setArgument;
+ m_currentBlock->variablesAtHead.setArgumentFirstTime(argument, setArgument);
+ m_currentBlock->variablesAtTail.setArgumentFirstTime(argument, setArgument);
+ }
+ }
+
+ while (true) {
+ // Don't extend over jump destinations.
+ if (m_currentIndex == limit) {
+ // Ordinarily we want to plant a jump. But refuse to do this if the block is
+ // empty. This is a special case for inlining, which might otherwise create
+ // some empty blocks in some cases. When parseBlock() returns with an empty
+ // block, it will get repurposed instead of creating a new one. Note that this
+ // logic relies on every bytecode resulting in one or more nodes, which would
+ // be true anyway except for op_loop_hint, which emits a Phantom to force this
+ // to be true.
+ if (m_currentBlock->begin != m_graph.size())
+ addToGraph(Jump, OpInfo(m_currentIndex));
+ else {
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ printf("Refusing to plant jump at limit %u because block %p is empty.\n", limit, m_currentBlock);
+#endif
+ }
+ return shouldContinueParsing;
+ }
+
+ // Switch on the current bytecode opcode.
+ Instruction* currentInstruction = instructionsBegin + m_currentIndex;
+ OpcodeID opcodeID = interpreter->getOpcodeID(currentInstruction->u.opcode);
+ switch (opcodeID) {
+
+ // === Function entry opcodes ===
+
+ case op_enter:
+ // Initialize all locals to undefined.
+ for (int i = 0; i < m_inlineStackTop->m_codeBlock->m_numVars; ++i)
+ set(i, constantUndefined());
+ NEXT_OPCODE(op_enter);
+
+ case op_convert_this: {
+ NodeIndex op1 = getThis();
+ if (m_graph[op1].op == ConvertThis)
+ setThis(op1);
+ else
+ setThis(addToGraph(ConvertThis, op1));
+ NEXT_OPCODE(op_convert_this);
+ }
+
+ case op_create_this: {
+ NodeIndex op1 = get(currentInstruction[2].u.operand);
+ set(currentInstruction[1].u.operand, addToGraph(CreateThis, op1));
+ NEXT_OPCODE(op_create_this);
+ }
+
+ case op_new_object: {
+ set(currentInstruction[1].u.operand, addToGraph(NewObject));
+ NEXT_OPCODE(op_new_object);
+ }
+
+ case op_new_array: {
+ int startOperand = currentInstruction[2].u.operand;
+ int numOperands = currentInstruction[3].u.operand;
+ for (int operandIdx = startOperand; operandIdx < startOperand + numOperands; ++operandIdx)
+ addVarArgChild(get(operandIdx));
+ set(currentInstruction[1].u.operand, addToGraph(Node::VarArg, NewArray, OpInfo(0), OpInfo(0)));
+ NEXT_OPCODE(op_new_array);
+ }
+
+ case op_new_array_buffer: {
+ int startConstant = currentInstruction[2].u.operand;
+ int numConstants = currentInstruction[3].u.operand;
+ set(currentInstruction[1].u.operand, addToGraph(NewArrayBuffer, OpInfo(startConstant), OpInfo(numConstants)));
+ NEXT_OPCODE(op_new_array_buffer);
+ }
+
+ case op_new_regexp: {
+ set(currentInstruction[1].u.operand, addToGraph(NewRegexp, OpInfo(currentInstruction[2].u.operand)));
+ NEXT_OPCODE(op_new_regexp);
+ }
+
+ case op_get_callee: {
+ if (m_inlineStackTop->m_inlineCallFrame)
+ set(currentInstruction[1].u.operand, getDirect(m_inlineStackTop->m_calleeVR));
+ else
+ set(currentInstruction[1].u.operand, addToGraph(GetCallee));
+ NEXT_OPCODE(op_get_callee);
+ }
+
+ // === Bitwise operations ===
+
+ case op_bitand: {
+ NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
+ NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
+ set(currentInstruction[1].u.operand, addToGraph(BitAnd, op1, op2));
+ NEXT_OPCODE(op_bitand);
+ }
+
+ case op_bitor: {
+ NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
+ NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
+ set(currentInstruction[1].u.operand, addToGraph(BitOr, op1, op2));
+ NEXT_OPCODE(op_bitor);
+ }
+
+ case op_bitxor: {
+ NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
+ NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
+ set(currentInstruction[1].u.operand, addToGraph(BitXor, op1, op2));
+ NEXT_OPCODE(op_bitxor);
+ }
+
+ case op_rshift: {
+ NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
+ NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
+ NodeIndex result;
+ // Optimize out shifts by zero.
+ if (isInt32Constant(op2) && !(valueOfInt32Constant(op2) & 0x1f))
+ result = op1;
+ else
+ result = addToGraph(BitRShift, op1, op2);
+ set(currentInstruction[1].u.operand, result);
+ NEXT_OPCODE(op_rshift);
+ }
+
+ case op_lshift: {
+ NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
+ NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
+ NodeIndex result;
+ // Optimize out shifts by zero.
+ if (isInt32Constant(op2) && !(valueOfInt32Constant(op2) & 0x1f))
+ result = op1;
+ else
+ result = addToGraph(BitLShift, op1, op2);
+ set(currentInstruction[1].u.operand, result);
+ NEXT_OPCODE(op_lshift);
+ }
+
+ case op_urshift: {
+ NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
+ NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
+ NodeIndex result;
+ // The result of a zero-extending right shift is treated as an unsigned value.
+ // This means that if the top bit is set, the result is not in the int32 range,
+ // and as such must be stored as a double. If the shift amount is a constant,
+ // we may be able to optimize.
+ if (isInt32Constant(op2)) {
+ // If we know we are shifting by a non-zero amount, then since the operation
+ // zero fills we know the top bit of the result must be zero, and as such the
+ // result must be within the int32 range. Conversely, if this is a shift by
+ // zero, then the result may be changed by the conversion to unsigned, but it
+ // is not necessary to perform the shift!
+ if (valueOfInt32Constant(op2) & 0x1f)
+ result = addToGraph(BitURShift, op1, op2);
+ else
+ result = makeSafe(addToGraph(UInt32ToNumber, OpInfo(NodeUseBottom), op1));
+ } else {
+ // Cannot optimize at this stage; shift & potentially rebox as a double.
+ result = addToGraph(BitURShift, op1, op2);
+ result = makeSafe(addToGraph(UInt32ToNumber, OpInfo(NodeUseBottom), result));
+ }
+ set(currentInstruction[1].u.operand, result);
+ NEXT_OPCODE(op_urshift);
+ }
+
+ // === Increment/Decrement opcodes ===
+
+ case op_pre_inc: {
+ unsigned srcDst = currentInstruction[1].u.operand;
+ NodeIndex op = getToNumber(srcDst);
+ set(srcDst, makeSafe(addToGraph(ArithAdd, OpInfo(NodeUseBottom), op, one())));
+ NEXT_OPCODE(op_pre_inc);
+ }
+
+ case op_post_inc: {
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned srcDst = currentInstruction[2].u.operand;
+ ASSERT(result != srcDst); // Required for assumptions we make during OSR.
+ NodeIndex op = getToNumber(srcDst);
+ set(result, op);
+ set(srcDst, makeSafe(addToGraph(ArithAdd, OpInfo(NodeUseBottom), op, one())));
+ NEXT_OPCODE(op_post_inc);
+ }
+
+ case op_pre_dec: {
+ unsigned srcDst = currentInstruction[1].u.operand;
+ NodeIndex op = getToNumber(srcDst);
+ set(srcDst, makeSafe(addToGraph(ArithSub, OpInfo(NodeUseBottom), op, one())));
+ NEXT_OPCODE(op_pre_dec);
+ }
+
+ case op_post_dec: {
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned srcDst = currentInstruction[2].u.operand;
+ NodeIndex op = getToNumber(srcDst);
+ set(result, op);
+ set(srcDst, makeSafe(addToGraph(ArithSub, OpInfo(NodeUseBottom), op, one())));
+ NEXT_OPCODE(op_post_dec);
+ }
+
+ // === Arithmetic operations ===
+
+ case op_add: {
+ NodeIndex op1 = get(currentInstruction[2].u.operand);
+ NodeIndex op2 = get(currentInstruction[3].u.operand);
+ if (m_graph[op1].hasNumberResult() && m_graph[op2].hasNumberResult())
+ set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithAdd, OpInfo(NodeUseBottom), toNumber(op1), toNumber(op2))));
+ else
+ set(currentInstruction[1].u.operand, makeSafe(addToGraph(ValueAdd, OpInfo(NodeUseBottom), op1, op2)));
+ NEXT_OPCODE(op_add);
+ }
+
+ case op_sub: {
+ NodeIndex op1 = getToNumber(currentInstruction[2].u.operand);
+ NodeIndex op2 = getToNumber(currentInstruction[3].u.operand);
+ set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithSub, OpInfo(NodeUseBottom), op1, op2)));
+ NEXT_OPCODE(op_sub);
+ }
+
+ case op_mul: {
+ // Multiply requires that the inputs are not truncated, unfortunately.
+ NodeIndex op1 = getToNumber(currentInstruction[2].u.operand);
+ NodeIndex op2 = getToNumber(currentInstruction[3].u.operand);
+ set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithMul, OpInfo(NodeUseBottom), op1, op2)));
+ NEXT_OPCODE(op_mul);
+ }
+
+ case op_mod: {
+ NodeIndex op1 = getToNumber(currentInstruction[2].u.operand);
+ NodeIndex op2 = getToNumber(currentInstruction[3].u.operand);
+ set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithMod, OpInfo(NodeUseBottom), op1, op2)));
+ NEXT_OPCODE(op_mod);
+ }
+
+ case op_div: {
+ NodeIndex op1 = getToNumber(currentInstruction[2].u.operand);
+ NodeIndex op2 = getToNumber(currentInstruction[3].u.operand);
+ set(currentInstruction[1].u.operand, makeDivSafe(addToGraph(ArithDiv, OpInfo(NodeUseBottom), op1, op2)));
+ NEXT_OPCODE(op_div);
+ }
+
+ // === Misc operations ===
+
+#if ENABLE(DEBUG_WITH_BREAKPOINT)
+ case op_debug:
+ addToGraph(Breakpoint);
+ NEXT_OPCODE(op_debug);
+#endif
+ case op_mov: {
+ NodeIndex op = get(currentInstruction[2].u.operand);
+ set(currentInstruction[1].u.operand, op);
+ NEXT_OPCODE(op_mov);
+ }
+
+ case op_check_has_instance:
+ addToGraph(CheckHasInstance, get(currentInstruction[1].u.operand));
+ NEXT_OPCODE(op_check_has_instance);
+
+ case op_instanceof: {
+ NodeIndex value = get(currentInstruction[2].u.operand);
+ NodeIndex baseValue = get(currentInstruction[3].u.operand);
+ NodeIndex prototype = get(currentInstruction[4].u.operand);
+ set(currentInstruction[1].u.operand, addToGraph(InstanceOf, value, baseValue, prototype));
+ NEXT_OPCODE(op_instanceof);
+ }
+
+ case op_not: {
+ NodeIndex value = get(currentInstruction[2].u.operand);
+ set(currentInstruction[1].u.operand, addToGraph(LogicalNot, value));
+ NEXT_OPCODE(op_not);
+ }
+
+ case op_to_primitive: {
+ NodeIndex value = get(currentInstruction[2].u.operand);
+ set(currentInstruction[1].u.operand, addToGraph(ToPrimitive, value));
+ NEXT_OPCODE(op_to_primitive);
+ }
+
+ case op_strcat: {
+ int startOperand = currentInstruction[2].u.operand;
+ int numOperands = currentInstruction[3].u.operand;
+ for (int operandIdx = startOperand; operandIdx < startOperand + numOperands; ++operandIdx)
+ addVarArgChild(get(operandIdx));
+ set(currentInstruction[1].u.operand, addToGraph(Node::VarArg, StrCat, OpInfo(0), OpInfo(0)));
+ NEXT_OPCODE(op_strcat);
+ }
+
+ case op_less: {
+ NodeIndex op1 = get(currentInstruction[2].u.operand);
+ NodeIndex op2 = get(currentInstruction[3].u.operand);
+ set(currentInstruction[1].u.operand, addToGraph(CompareLess, op1, op2));
+ NEXT_OPCODE(op_less);
+ }
+
+ case op_lesseq: {
+ NodeIndex op1 = get(currentInstruction[2].u.operand);
+ NodeIndex op2 = get(currentInstruction[3].u.operand);
+ set(currentInstruction[1].u.operand, addToGraph(CompareLessEq, op1, op2));
+ NEXT_OPCODE(op_lesseq);
+ }
+
+ case op_greater: {
+ NodeIndex op1 = get(currentInstruction[2].u.operand);
+ NodeIndex op2 = get(currentInstruction[3].u.operand);
+ set(currentInstruction[1].u.operand, addToGraph(CompareGreater, op1, op2));
+ NEXT_OPCODE(op_greater);
+ }
+
+ case op_greatereq: {
+ NodeIndex op1 = get(currentInstruction[2].u.operand);
+ NodeIndex op2 = get(currentInstruction[3].u.operand);
+ set(currentInstruction[1].u.operand, addToGraph(CompareGreaterEq, op1, op2));
+ NEXT_OPCODE(op_greatereq);
+ }
+
+ case op_eq: {
+ NodeIndex op1 = get(currentInstruction[2].u.operand);
+ NodeIndex op2 = get(currentInstruction[3].u.operand);
+ set(currentInstruction[1].u.operand, addToGraph(CompareEq, op1, op2));
+ NEXT_OPCODE(op_eq);
+ }
+
+ case op_eq_null: {
+ NodeIndex value = get(currentInstruction[2].u.operand);
+ set(currentInstruction[1].u.operand, addToGraph(CompareEq, value, constantNull()));
+ NEXT_OPCODE(op_eq_null);
+ }
+
+ case op_stricteq: {
+ NodeIndex op1 = get(currentInstruction[2].u.operand);
+ NodeIndex op2 = get(currentInstruction[3].u.operand);
+ set(currentInstruction[1].u.operand, addToGraph(CompareStrictEq, op1, op2));
+ NEXT_OPCODE(op_stricteq);
+ }
+
+ case op_neq: {
+ NodeIndex op1 = get(currentInstruction[2].u.operand);
+ NodeIndex op2 = get(currentInstruction[3].u.operand);
+ set(currentInstruction[1].u.operand, addToGraph(LogicalNot, addToGraph(CompareEq, op1, op2)));
+ NEXT_OPCODE(op_neq);
+ }
+
+ case op_neq_null: {
+ NodeIndex value = get(currentInstruction[2].u.operand);
+ set(currentInstruction[1].u.operand, addToGraph(LogicalNot, addToGraph(CompareEq, value, constantNull())));
+ NEXT_OPCODE(op_neq_null);
+ }
+
+ case op_nstricteq: {
+ NodeIndex op1 = get(currentInstruction[2].u.operand);
+ NodeIndex op2 = get(currentInstruction[3].u.operand);
+ set(currentInstruction[1].u.operand, addToGraph(LogicalNot, addToGraph(CompareStrictEq, op1, op2)));
+ NEXT_OPCODE(op_nstricteq);
+ }
+
+ // === Property access operations ===
+
+ case op_get_by_val: {
+ PredictedType prediction = getPrediction();
+
+ NodeIndex base = get(currentInstruction[2].u.operand);
+ NodeIndex property = get(currentInstruction[3].u.operand);
+ NodeIndex propertyStorage = addToGraph(GetIndexedPropertyStorage, base, property);
+ NodeIndex getByVal = addToGraph(GetByVal, OpInfo(0), OpInfo(prediction), base, property, propertyStorage);
+ set(currentInstruction[1].u.operand, getByVal);
+
+ NEXT_OPCODE(op_get_by_val);
+ }
+
+ case op_put_by_val: {
+ NodeIndex base = get(currentInstruction[1].u.operand);
+ NodeIndex property = get(currentInstruction[2].u.operand);
+ NodeIndex value = get(currentInstruction[3].u.operand);
+
+ addToGraph(PutByVal, base, property, value);
+
+ NEXT_OPCODE(op_put_by_val);
+ }
+
+ case op_method_check: {
+ Instruction* getInstruction = currentInstruction + OPCODE_LENGTH(op_method_check);
+
+ PredictedType prediction = getPrediction();
+
+ ASSERT(interpreter->getOpcodeID(getInstruction->u.opcode) == op_get_by_id);
+
+ NodeIndex base = get(getInstruction[2].u.operand);
+ unsigned identifier = m_inlineStackTop->m_identifierRemap[getInstruction[3].u.operand];
+
+ // Check if the method_check was monomorphic. If so, emit a CheckXYZMethod
+ // node, which is a lot more efficient.
+ StructureStubInfo& stubInfo = m_inlineStackTop->m_profiledBlock->getStubInfo(m_currentIndex);
+ MethodCallLinkInfo& methodCall = m_inlineStackTop->m_profiledBlock->getMethodCallLinkInfo(m_currentIndex);
+
+ if (methodCall.seen
+ && !!methodCall.cachedStructure
+ && !stubInfo.seen
+ && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)) {
+ // It's monomorphic as far as we can tell, since the method_check was linked
+ // but the slow path (i.e. the normal get_by_id) never fired.
+
+ addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(methodCall.cachedStructure.get())), base);
+ if (methodCall.cachedPrototype.get() != m_inlineStackTop->m_profiledBlock->globalObject()->methodCallDummy())
+ addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(methodCall.cachedPrototypeStructure.get())), cellConstant(methodCall.cachedPrototype.get()));
+
+ set(getInstruction[1].u.operand, cellConstant(methodCall.cachedFunction.get()));
+ } else
+ set(getInstruction[1].u.operand, addToGraph(GetById, OpInfo(identifier), OpInfo(prediction), base));
+
+ m_currentIndex += OPCODE_LENGTH(op_method_check) + OPCODE_LENGTH(op_get_by_id);
+ continue;
+ }
+ case op_get_scoped_var: {
+ PredictedType prediction = getPrediction();
+ int dst = currentInstruction[1].u.operand;
+ int slot = currentInstruction[2].u.operand;
+ int depth = currentInstruction[3].u.operand;
+ NodeIndex getScopeChain = addToGraph(GetScopeChain, OpInfo(depth));
+ NodeIndex getScopedVar = addToGraph(GetScopedVar, OpInfo(slot), OpInfo(prediction), getScopeChain);
+ set(dst, getScopedVar);
+ NEXT_OPCODE(op_get_scoped_var);
+ }
+ case op_put_scoped_var: {
+ int slot = currentInstruction[1].u.operand;
+ int depth = currentInstruction[2].u.operand;
+ int source = currentInstruction[3].u.operand;
+ NodeIndex getScopeChain = addToGraph(GetScopeChain, OpInfo(depth));
+ addToGraph(PutScopedVar, OpInfo(slot), getScopeChain, get(source));
+ NEXT_OPCODE(op_put_scoped_var);
+ }
+ case op_get_by_id: {
+ PredictedType prediction = getPredictionWithoutOSRExit();
+
+ NodeIndex base = get(currentInstruction[2].u.operand);
+ unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
+
+ Identifier identifier = m_codeBlock->identifier(identifierNumber);
+ StructureStubInfo& stubInfo = m_inlineStackTop->m_profiledBlock->getStubInfo(m_currentIndex);
+
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ printf("Slow case count for GetById @%lu bc#%u: %u; exit profile: %d\n", m_graph.size(), m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
+#endif
+
+ size_t offset = notFound;
+ StructureSet structureSet;
+ if (stubInfo.seen
+ && !m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
+ && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)) {
+ switch (stubInfo.accessType) {
+ case access_get_by_id_self: {
+ Structure* structure = stubInfo.u.getByIdSelf.baseObjectStructure.get();
+ offset = structure->get(*m_globalData, identifier);
+
+ if (offset != notFound)
+ structureSet.add(structure);
+
+ if (offset != notFound)
+ ASSERT(structureSet.size());
+ break;
+ }
+
+ case access_get_by_id_self_list: {
+ PolymorphicAccessStructureList* list = stubInfo.u.getByIdProtoList.structureList;
+ unsigned size = stubInfo.u.getByIdProtoList.listSize;
+ for (unsigned i = 0; i < size; ++i) {
+ if (!list->list[i].isDirect) {
+ offset = notFound;
+ break;
+ }
+
+ Structure* structure = list->list[i].base.get();
+ if (structureSet.contains(structure))
+ continue;
+
+ size_t myOffset = structure->get(*m_globalData, identifier);
+
+ if (myOffset == notFound) {
+ offset = notFound;
+ break;
+ }
+
+ if (!i)
+ offset = myOffset;
+ else if (offset != myOffset) {
+ offset = notFound;
+ break;
+ }
+
+ structureSet.add(structure);
+ }
+
+ if (offset != notFound)
+ ASSERT(structureSet.size());
+ break;
+ }
+
+ default:
+ ASSERT(offset == notFound);
+ break;
+ }
+ }
+
+ if (offset != notFound) {
+ ASSERT(structureSet.size());
+
+ // The implementation of GetByOffset does not know to terminate speculative
+ // execution if it doesn't have a prediction, so we do it manually.
+ if (prediction == PredictNone)
+ addToGraph(ForceOSRExit);
+
+ addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structureSet)), base);
+ set(currentInstruction[1].u.operand, addToGraph(GetByOffset, OpInfo(m_graph.m_storageAccessData.size()), OpInfo(prediction), addToGraph(GetPropertyStorage, base)));
+
+ StorageAccessData storageAccessData;
+ storageAccessData.offset = offset;
+ storageAccessData.identifierNumber = identifierNumber;
+ m_graph.m_storageAccessData.append(storageAccessData);
+ } else
+ set(currentInstruction[1].u.operand, addToGraph(GetById, OpInfo(identifierNumber), OpInfo(prediction), base));
+
+ NEXT_OPCODE(op_get_by_id);
+ }
+
+ case op_put_by_id: {
+ NodeIndex value = get(currentInstruction[3].u.operand);
+ NodeIndex base = get(currentInstruction[1].u.operand);
+ unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
+ bool direct = currentInstruction[8].u.operand;
+
+ StructureStubInfo& stubInfo = m_inlineStackTop->m_profiledBlock->getStubInfo(m_currentIndex);
+ if (!stubInfo.seen)
+ addToGraph(ForceOSRExit);
+
+ bool alreadyGenerated = false;
+
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ printf("Slow case count for PutById @%lu bc#%u: %u; exit profile: %d\n", m_graph.size(), m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
+#endif
+
+ if (stubInfo.seen
+ && !m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
+ && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)) {
+ switch (stubInfo.accessType) {
+ case access_put_by_id_replace: {
+ Structure* structure = stubInfo.u.putByIdReplace.baseObjectStructure.get();
+ Identifier identifier = m_codeBlock->identifier(identifierNumber);
+ size_t offset = structure->get(*m_globalData, identifier);
+
+ if (offset != notFound) {
+ addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structure)), base);
+ addToGraph(PutByOffset, OpInfo(m_graph.m_storageAccessData.size()), base, addToGraph(GetPropertyStorage, base), value);
+
+ StorageAccessData storageAccessData;
+ storageAccessData.offset = offset;
+ storageAccessData.identifierNumber = identifierNumber;
+ m_graph.m_storageAccessData.append(storageAccessData);
+
+ alreadyGenerated = true;
+ }
+ break;
+ }
+
+ case access_put_by_id_transition_normal:
+ case access_put_by_id_transition_direct: {
+ Structure* previousStructure = stubInfo.u.putByIdTransition.previousStructure.get();
+ Structure* newStructure = stubInfo.u.putByIdTransition.structure.get();
+
+ if (previousStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity())
+ break;
+
+ StructureChain* structureChain = stubInfo.u.putByIdTransition.chain.get();
+
+ Identifier identifier = m_codeBlock->identifier(identifierNumber);
+ size_t offset = newStructure->get(*m_globalData, identifier);
+
+ if (offset != notFound && structureChainIsStillValid(direct, previousStructure, structureChain)) {
+ addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(previousStructure)), base);
+ if (!direct) {
+ if (!previousStructure->storedPrototype().isNull())
+ addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(previousStructure->storedPrototype().asCell()->structure())), cellConstant(previousStructure->storedPrototype().asCell()));
+
+ for (WriteBarrier<Structure>* it = structureChain->head(); *it; ++it) {
+ JSValue prototype = (*it)->storedPrototype();
+ if (prototype.isNull())
+ continue;
+ ASSERT(prototype.isCell());
+ addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(prototype.asCell()->structure())), cellConstant(prototype.asCell()));
+ }
+ }
+ addToGraph(PutStructure, OpInfo(m_graph.addStructureTransitionData(StructureTransitionData(previousStructure, newStructure))), base);
+
+ addToGraph(PutByOffset, OpInfo(m_graph.m_storageAccessData.size()), base, addToGraph(GetPropertyStorage, base), value);
+
+ StorageAccessData storageAccessData;
+ storageAccessData.offset = offset;
+ storageAccessData.identifierNumber = identifierNumber;
+ m_graph.m_storageAccessData.append(storageAccessData);
+
+ alreadyGenerated = true;
+ }
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+
+ if (!alreadyGenerated) {
+ if (direct)
+ addToGraph(PutByIdDirect, OpInfo(identifierNumber), base, value);
+ else
+ addToGraph(PutById, OpInfo(identifierNumber), base, value);
+ }
+
+ NEXT_OPCODE(op_put_by_id);
+ }
+
+ case op_get_global_var: {
+ PredictedType prediction = getPrediction();
+
+ NodeIndex getGlobalVar = addToGraph(GetGlobalVar, OpInfo(currentInstruction[2].u.operand));
+ set(currentInstruction[1].u.operand, getGlobalVar);
+ m_graph.predictGlobalVar(currentInstruction[2].u.operand, prediction);
+ NEXT_OPCODE(op_get_global_var);
+ }
+
+ case op_put_global_var: {
+ NodeIndex value = get(currentInstruction[2].u.operand);
+ addToGraph(PutGlobalVar, OpInfo(currentInstruction[1].u.operand), value);
+ NEXT_OPCODE(op_put_global_var);
+ }
+
+ // === Block terminators. ===
+
+ case op_jmp: {
+ unsigned relativeOffset = currentInstruction[1].u.operand;
+ addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
+ LAST_OPCODE(op_jmp);
+ }
+
+ case op_loop: {
+ unsigned relativeOffset = currentInstruction[1].u.operand;
+ addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
+ LAST_OPCODE(op_loop);
+ }
+
+ case op_jtrue: {
+ unsigned relativeOffset = currentInstruction[2].u.operand;
+ NodeIndex condition = get(currentInstruction[1].u.operand);
+ addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jtrue)), condition);
+ LAST_OPCODE(op_jtrue);
+ }
+
+ case op_jfalse: {
+ unsigned relativeOffset = currentInstruction[2].u.operand;
+ NodeIndex condition = get(currentInstruction[1].u.operand);
+ addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jfalse)), OpInfo(m_currentIndex + relativeOffset), condition);
+ LAST_OPCODE(op_jfalse);
+ }
+
+ case op_loop_if_true: {
+ unsigned relativeOffset = currentInstruction[2].u.operand;
+ NodeIndex condition = get(currentInstruction[1].u.operand);
+ addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_true)), condition);
+ LAST_OPCODE(op_loop_if_true);
+ }
+
+ case op_loop_if_false: {
+ unsigned relativeOffset = currentInstruction[2].u.operand;
+ NodeIndex condition = get(currentInstruction[1].u.operand);
+ addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_false)), OpInfo(m_currentIndex + relativeOffset), condition);
+ LAST_OPCODE(op_loop_if_false);
+ }
+
+ case op_jeq_null: {
+ unsigned relativeOffset = currentInstruction[2].u.operand;
+ NodeIndex value = get(currentInstruction[1].u.operand);
+ NodeIndex condition = addToGraph(CompareEq, value, constantNull());
+ addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jeq_null)), condition);
+ LAST_OPCODE(op_jeq_null);
+ }
+
+ case op_jneq_null: {
+ unsigned relativeOffset = currentInstruction[2].u.operand;
+ NodeIndex value = get(currentInstruction[1].u.operand);
+ NodeIndex condition = addToGraph(CompareEq, value, constantNull());
+ addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jneq_null)), OpInfo(m_currentIndex + relativeOffset), condition);
+ LAST_OPCODE(op_jneq_null);
+ }
+
+ case op_jless: {
+ unsigned relativeOffset = currentInstruction[3].u.operand;
+ NodeIndex op1 = get(currentInstruction[1].u.operand);
+ NodeIndex op2 = get(currentInstruction[2].u.operand);
+ NodeIndex condition = addToGraph(CompareLess, op1, op2);
+ addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jless)), condition);
+ LAST_OPCODE(op_jless);
+ }
+
+ case op_jlesseq: {
+ unsigned relativeOffset = currentInstruction[3].u.operand;
+ NodeIndex op1 = get(currentInstruction[1].u.operand);
+ NodeIndex op2 = get(currentInstruction[2].u.operand);
+ NodeIndex condition = addToGraph(CompareLessEq, op1, op2);
+ addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jlesseq)), condition);
+ LAST_OPCODE(op_jlesseq);
+ }
+
+ case op_jgreater: {
+ unsigned relativeOffset = currentInstruction[3].u.operand;
+ NodeIndex op1 = get(currentInstruction[1].u.operand);
+ NodeIndex op2 = get(currentInstruction[2].u.operand);
+ NodeIndex condition = addToGraph(CompareGreater, op1, op2);
+ addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jgreater)), condition);
+ LAST_OPCODE(op_jgreater);
+ }
+
+ case op_jgreatereq: {
+ unsigned relativeOffset = currentInstruction[3].u.operand;
+ NodeIndex op1 = get(currentInstruction[1].u.operand);
+ NodeIndex op2 = get(currentInstruction[2].u.operand);
+ NodeIndex condition = addToGraph(CompareGreaterEq, op1, op2);
+ addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jgreatereq)), condition);
+ LAST_OPCODE(op_jgreatereq);
+ }
+
+ case op_jnless: {
+ unsigned relativeOffset = currentInstruction[3].u.operand;
+ NodeIndex op1 = get(currentInstruction[1].u.operand);
+ NodeIndex op2 = get(currentInstruction[2].u.operand);
+ NodeIndex condition = addToGraph(CompareLess, op1, op2);
+ addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jnless)), OpInfo(m_currentIndex + relativeOffset), condition);
+ LAST_OPCODE(op_jnless);
+ }
+
+ case op_jnlesseq: {
+ unsigned relativeOffset = currentInstruction[3].u.operand;
+ NodeIndex op1 = get(currentInstruction[1].u.operand);
+ NodeIndex op2 = get(currentInstruction[2].u.operand);
+ NodeIndex condition = addToGraph(CompareLessEq, op1, op2);
+ addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jnlesseq)), OpInfo(m_currentIndex + relativeOffset), condition);
+ LAST_OPCODE(op_jnlesseq);
+ }
+
+ case op_jngreater: {
+ unsigned relativeOffset = currentInstruction[3].u.operand;
+ NodeIndex op1 = get(currentInstruction[1].u.operand);
+ NodeIndex op2 = get(currentInstruction[2].u.operand);
+ NodeIndex condition = addToGraph(CompareGreater, op1, op2);
+ addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jngreater)), OpInfo(m_currentIndex + relativeOffset), condition);
+ LAST_OPCODE(op_jngreater);
+ }
+
+ case op_jngreatereq: {
+ unsigned relativeOffset = currentInstruction[3].u.operand;
+ NodeIndex op1 = get(currentInstruction[1].u.operand);
+ NodeIndex op2 = get(currentInstruction[2].u.operand);
+ NodeIndex condition = addToGraph(CompareGreaterEq, op1, op2);
+ addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jngreatereq)), OpInfo(m_currentIndex + relativeOffset), condition);
+ LAST_OPCODE(op_jngreatereq);
+ }
+
+ case op_loop_if_less: {
+ unsigned relativeOffset = currentInstruction[3].u.operand;
+ NodeIndex op1 = get(currentInstruction[1].u.operand);
+ NodeIndex op2 = get(currentInstruction[2].u.operand);
+ NodeIndex condition = addToGraph(CompareLess, op1, op2);
+ addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_less)), condition);
+ LAST_OPCODE(op_loop_if_less);
+ }
+
+ case op_loop_if_lesseq: {
+ unsigned relativeOffset = currentInstruction[3].u.operand;
+ NodeIndex op1 = get(currentInstruction[1].u.operand);
+ NodeIndex op2 = get(currentInstruction[2].u.operand);
+ NodeIndex condition = addToGraph(CompareLessEq, op1, op2);
+ addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_lesseq)), condition);
+ LAST_OPCODE(op_loop_if_lesseq);
+ }
+
+ case op_loop_if_greater: {
+ unsigned relativeOffset = currentInstruction[3].u.operand;
+ NodeIndex op1 = get(currentInstruction[1].u.operand);
+ NodeIndex op2 = get(currentInstruction[2].u.operand);
+ NodeIndex condition = addToGraph(CompareGreater, op1, op2);
+ addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_greater)), condition);
+ LAST_OPCODE(op_loop_if_greater);
+ }
+
+ case op_loop_if_greatereq: {
+ unsigned relativeOffset = currentInstruction[3].u.operand;
+ NodeIndex op1 = get(currentInstruction[1].u.operand);
+ NodeIndex op2 = get(currentInstruction[2].u.operand);
+ NodeIndex condition = addToGraph(CompareGreaterEq, op1, op2);
+ addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_greatereq)), condition);
+ LAST_OPCODE(op_loop_if_greatereq);
+ }
+
+ case op_ret:
+ if (m_inlineStackTop->m_inlineCallFrame) {
+ if (m_inlineStackTop->m_returnValue != InvalidVirtualRegister)
+ setDirect(m_inlineStackTop->m_returnValue, get(currentInstruction[1].u.operand));
+ m_inlineStackTop->m_didReturn = true;
+ if (m_inlineStackTop->m_unlinkedBlocks.isEmpty()) {
+ // If we're returning from the first block, then we're done parsing.
+ ASSERT(m_inlineStackTop->m_callsiteBlockHead == m_graph.m_blocks.size() - 1);
+ shouldContinueParsing = false;
+ LAST_OPCODE(op_ret);
+ } else {
+ // If inlining created blocks, and we're doing a return, then we need some
+ // special linking.
+ ASSERT(m_inlineStackTop->m_unlinkedBlocks.last().m_blockIndex == m_graph.m_blocks.size() - 1);
+ m_inlineStackTop->m_unlinkedBlocks.last().m_needsNormalLinking = false;
+ }
+ if (m_currentIndex + OPCODE_LENGTH(op_ret) != m_inlineStackTop->m_codeBlock->instructions().size() || m_inlineStackTop->m_didEarlyReturn) {
+ ASSERT(m_currentIndex + OPCODE_LENGTH(op_ret) <= m_inlineStackTop->m_codeBlock->instructions().size());
+ addToGraph(Jump, OpInfo(NoBlock));
+ m_inlineStackTop->m_unlinkedBlocks.last().m_needsEarlyReturnLinking = true;
+ m_inlineStackTop->m_didEarlyReturn = true;
+ }
+ LAST_OPCODE(op_ret);
+ }
+ addToGraph(Return, get(currentInstruction[1].u.operand));
+ LAST_OPCODE(op_ret);
+
+ case op_end:
+ ASSERT(!m_inlineStackTop->m_inlineCallFrame);
+ addToGraph(Return, get(currentInstruction[1].u.operand));
+ LAST_OPCODE(op_end);
+
+ case op_throw:
+ addToGraph(Throw, get(currentInstruction[1].u.operand));
+ LAST_OPCODE(op_throw);
+
+ case op_throw_reference_error:
+ addToGraph(ThrowReferenceError);
+ LAST_OPCODE(op_throw_reference_error);
+
+ case op_call:
+ handleCall(interpreter, currentInstruction, Call, CodeForCall);
+ NEXT_OPCODE(op_call);
+
+ case op_construct:
+ handleCall(interpreter, currentInstruction, Construct, CodeForConstruct);
+ NEXT_OPCODE(op_construct);
+
+ case op_call_put_result:
+ NEXT_OPCODE(op_call_put_result);
+
+ case op_resolve: {
+ PredictedType prediction = getPrediction();
+
+ unsigned identifier = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
+
+ NodeIndex resolve = addToGraph(Resolve, OpInfo(identifier), OpInfo(prediction));
+ set(currentInstruction[1].u.operand, resolve);
+
+ NEXT_OPCODE(op_resolve);
+ }
+
+ case op_resolve_base: {
+ PredictedType prediction = getPrediction();
+
+ unsigned identifier = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
+
+ NodeIndex resolve = addToGraph(currentInstruction[3].u.operand ? ResolveBaseStrictPut : ResolveBase, OpInfo(identifier), OpInfo(prediction));
+ set(currentInstruction[1].u.operand, resolve);
+
+ NEXT_OPCODE(op_resolve_base);
+ }
+
+ case op_resolve_global: {
+ PredictedType prediction = getPrediction();
+
+ NodeIndex resolve = addToGraph(ResolveGlobal, OpInfo(m_graph.m_resolveGlobalData.size()), OpInfo(prediction));
+ m_graph.m_resolveGlobalData.append(ResolveGlobalData());
+ ResolveGlobalData& data = m_graph.m_resolveGlobalData.last();
+ data.identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
+ data.resolveInfoIndex = m_globalResolveNumber++;
+ set(currentInstruction[1].u.operand, resolve);
+
+ NEXT_OPCODE(op_resolve_global);
+ }
+
+ case op_loop_hint: {
+ // Baseline->DFG OSR jumps between loop hints. The DFG assumes that Baseline->DFG
+ // OSR can only happen at basic block boundaries. Assert that these two statements
+ // are compatible.
+ ASSERT_UNUSED(blockBegin, m_currentIndex == blockBegin);
+
+ // We never do OSR into an inlined code block. That could not happen, since OSR
+ // looks up the code block that is the replacement for the baseline JIT code
+ // block. Hence, machine code block = true code block = not inline code block.
+ if (!m_inlineStackTop->m_caller)
+ m_currentBlock->isOSRTarget = true;
+
+ // Emit a phantom node to ensure that there is a placeholder node for this bytecode
+ // op.
+ addToGraph(Phantom);
+
+ NEXT_OPCODE(op_loop_hint);
+ }
+
+ default:
+ // Parse failed! This should not happen because the capabilities checker
+ // should have caught it.
+ ASSERT_NOT_REACHED();
+ return false;
+ }
+
+ ASSERT(canCompileOpcode(opcodeID));
+ }
+}
+
+template<ByteCodeParser::PhiStackType stackType>
+void ByteCodeParser::processPhiStack()
+{
+ Vector<PhiStackEntry, 16>& phiStack = (stackType == ArgumentPhiStack) ? m_argumentPhiStack : m_localPhiStack;
+
+ while (!phiStack.isEmpty()) {
+ PhiStackEntry entry = phiStack.last();
+ phiStack.removeLast();
+
+ PredecessorList& predecessors = entry.m_block->m_predecessors;
+ unsigned varNo = entry.m_varNo;
+ VariableAccessData* dataForPhi = m_graph[entry.m_phi].variableAccessData();
+
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf(" Handling phi entry for var %u, phi @%u.\n", entry.m_varNo, entry.m_phi);
+#endif
+
+ for (size_t i = 0; i < predecessors.size(); ++i) {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf(" Dealing with predecessor block %u.\n", predecessors[i]);
+#endif
+
+ BasicBlock* predecessorBlock = m_graph.m_blocks[predecessors[i]].get();
+
+ NodeIndex& var = (stackType == ArgumentPhiStack) ? predecessorBlock->variablesAtTail.argument(varNo) : predecessorBlock->variablesAtTail.local(varNo);
+
+ NodeIndex valueInPredecessor = var;
+ if (valueInPredecessor == NoNode) {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf(" Did not find node, adding phi.\n");
+#endif
+
+ valueInPredecessor = addToGraph(Phi, OpInfo(newVariableAccessData(stackType == ArgumentPhiStack ? argumentToOperand(varNo) : static_cast<int>(varNo))));
+ var = valueInPredecessor;
+ if (stackType == ArgumentPhiStack)
+ predecessorBlock->variablesAtHead.setArgumentFirstTime(varNo, valueInPredecessor);
+ else
+ predecessorBlock->variablesAtHead.setLocalFirstTime(varNo, valueInPredecessor);
+ phiStack.append(PhiStackEntry(predecessorBlock, valueInPredecessor, varNo));
+ } else if (m_graph[valueInPredecessor].op == GetLocal) {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf(" Found GetLocal @%u.\n", valueInPredecessor);
+#endif
+
+ // We want to ensure that the VariableAccessDatas are identical between the
+ // GetLocal and its block-local Phi. Strictly speaking we only need the two
+ // to be unified. But for efficiency, we want the code that creates GetLocals
+ // and Phis to try to reuse VariableAccessDatas as much as possible.
+ ASSERT(m_graph[valueInPredecessor].variableAccessData() == m_graph[m_graph[valueInPredecessor].child1()].variableAccessData());
+
+ valueInPredecessor = m_graph[valueInPredecessor].child1();
+ } else {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf(" Found @%u.\n", valueInPredecessor);
+#endif
+ }
+ ASSERT(m_graph[valueInPredecessor].op == SetLocal || m_graph[valueInPredecessor].op == Phi || m_graph[valueInPredecessor].op == Flush || (m_graph[valueInPredecessor].op == SetArgument && stackType == ArgumentPhiStack));
+
+ VariableAccessData* dataForPredecessor = m_graph[valueInPredecessor].variableAccessData();
+
+ dataForPredecessor->unify(dataForPhi);
+
+ Node* phiNode = &m_graph[entry.m_phi];
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf(" Ref count of @%u = %u.\n", entry.m_phi, phiNode->refCount());
+#endif
+ if (phiNode->refCount()) {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf(" Reffing @%u.\n", valueInPredecessor);
+#endif
+ m_graph.ref(valueInPredecessor);
+ }
+
+ if (phiNode->child1() == NoNode) {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf(" Setting @%u->child1 = @%u.\n", entry.m_phi, valueInPredecessor);
+#endif
+ phiNode->children.fixed.child1 = valueInPredecessor;
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf(" Children of @%u: ", entry.m_phi);
+ phiNode->dumpChildren(stdout);
+ printf(".\n");
+#endif
+ continue;
+ }
+ if (phiNode->child2() == NoNode) {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf(" Setting @%u->child2 = @%u.\n", entry.m_phi, valueInPredecessor);
+#endif
+ phiNode->children.fixed.child2 = valueInPredecessor;
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf(" Children of @%u: ", entry.m_phi);
+ phiNode->dumpChildren(stdout);
+ printf(".\n");
+#endif
+ continue;
+ }
+ if (phiNode->child3() == NoNode) {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf(" Setting @%u->child3 = @%u.\n", entry.m_phi, valueInPredecessor);
+#endif
+ phiNode->children.fixed.child3 = valueInPredecessor;
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf(" Children of @%u: ", entry.m_phi);
+ phiNode->dumpChildren(stdout);
+ printf(".\n");
+#endif
+ continue;
+ }
+
+ NodeIndex newPhi = addToGraph(Phi, OpInfo(dataForPhi));
+
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf(" Splitting @%u, created @%u.\n", entry.m_phi, newPhi);
+#endif
+
+ phiNode = &m_graph[entry.m_phi]; // reload after vector resize
+ Node& newPhiNode = m_graph[newPhi];
+ if (phiNode->refCount())
+ m_graph.ref(newPhi);
+
+ newPhiNode.children.fixed.child1 = phiNode->child1();
+ newPhiNode.children.fixed.child2 = phiNode->child2();
+ newPhiNode.children.fixed.child3 = phiNode->child3();
+
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf(" Children of @%u: ", newPhi);
+ newPhiNode.dumpChildren(stdout);
+ printf(".\n");
+#endif
+
+ phiNode->children.fixed.child1 = newPhi;
+ phiNode->children.fixed.child2 = valueInPredecessor;
+ phiNode->children.fixed.child3 = NoNode;
+
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf(" Children of @%u: ", entry.m_phi);
+ phiNode->dumpChildren(stdout);
+ printf(".\n");
+#endif
+ }
+ }
+}
+
+void ByteCodeParser::linkBlock(BasicBlock* block, Vector<BlockIndex>& possibleTargets)
+{
+ ASSERT(block->end != NoNode);
+ ASSERT(!block->isLinked);
+ ASSERT(block->end > block->begin);
+ Node& node = m_graph[block->end - 1];
+ ASSERT(node.isTerminal());
+
+ switch (node.op) {
+ case Jump:
+ node.setTakenBlockIndex(m_graph.blockIndexForBytecodeOffset(possibleTargets, node.takenBytecodeOffsetDuringParsing()));
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ printf("Linked basic block %p to %p, #%u.\n", block, m_graph.m_blocks[node.takenBlockIndex()].get(), node.takenBlockIndex());
+#endif
+ break;
+
+ case Branch:
+ node.setTakenBlockIndex(m_graph.blockIndexForBytecodeOffset(possibleTargets, node.takenBytecodeOffsetDuringParsing()));
+ node.setNotTakenBlockIndex(m_graph.blockIndexForBytecodeOffset(possibleTargets, node.notTakenBytecodeOffsetDuringParsing()));
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ printf("Linked basic block %p to %p, #%u and %p, #%u.\n", block, m_graph.m_blocks[node.takenBlockIndex()].get(), node.takenBlockIndex(), m_graph.m_blocks[node.notTakenBlockIndex()].get(), node.notTakenBlockIndex());
+#endif
+ break;
+
+ default:
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ printf("Marking basic block %p as linked.\n", block);
+#endif
+ break;
+ }
+
+#if !ASSERT_DISABLED
+ block->isLinked = true;
+#endif
+}
+
+void ByteCodeParser::linkBlocks(Vector<UnlinkedBlock>& unlinkedBlocks, Vector<BlockIndex>& possibleTargets)
+{
+ for (size_t i = 0; i < unlinkedBlocks.size(); ++i) {
+ if (unlinkedBlocks[i].m_needsNormalLinking) {
+ linkBlock(m_graph.m_blocks[unlinkedBlocks[i].m_blockIndex].get(), possibleTargets);
+ unlinkedBlocks[i].m_needsNormalLinking = false;
+ }
+ }
+}
+
+void ByteCodeParser::handleSuccessor(Vector<BlockIndex, 16>& worklist, BlockIndex blockIndex, BlockIndex successorIndex)
+{
+ BasicBlock* successor = m_graph.m_blocks[successorIndex].get();
+ if (!successor->isReachable) {
+ successor->isReachable = true;
+ worklist.append(successorIndex);
+ }
+
+ successor->m_predecessors.append(blockIndex);
+}
+
+void ByteCodeParser::determineReachability()
+{
+ Vector<BlockIndex, 16> worklist;
+ worklist.append(0);
+ m_graph.m_blocks[0]->isReachable = true;
+ while (!worklist.isEmpty()) {
+ BlockIndex index = worklist.last();
+ worklist.removeLast();
+
+ BasicBlock* block = m_graph.m_blocks[index].get();
+ ASSERT(block->isLinked);
+
+ Node& node = m_graph[block->end - 1];
+ ASSERT(node.isTerminal());
+
+ if (node.isJump())
+ handleSuccessor(worklist, index, node.takenBlockIndex());
+ else if (node.isBranch()) {
+ handleSuccessor(worklist, index, node.takenBlockIndex());
+ handleSuccessor(worklist, index, node.notTakenBlockIndex());
+ }
+ }
+}
+
+void ByteCodeParser::buildOperandMapsIfNecessary()
+{
+ if (m_haveBuiltOperandMaps)
+ return;
+
+ for (size_t i = 0; i < m_codeBlock->numberOfIdentifiers(); ++i)
+ m_identifierMap.add(m_codeBlock->identifier(i).impl(), i);
+ for (size_t i = 0; i < m_codeBlock->numberOfConstantRegisters(); ++i)
+ m_jsValueMap.add(JSValue::encode(m_codeBlock->getConstant(i + FirstConstantRegisterIndex)), i + FirstConstantRegisterIndex);
+
+ m_haveBuiltOperandMaps = true;
+}
+
+ByteCodeParser::InlineStackEntry::InlineStackEntry(ByteCodeParser* byteCodeParser, CodeBlock* codeBlock, CodeBlock* profiledBlock, BlockIndex callsiteBlockHead, VirtualRegister calleeVR, JSFunction* callee, VirtualRegister returnValueVR, VirtualRegister inlineCallFrameStart, CodeSpecializationKind kind)
+ : m_byteCodeParser(byteCodeParser)
+ , m_codeBlock(codeBlock)
+ , m_profiledBlock(profiledBlock)
+ , m_calleeVR(calleeVR)
+ , m_exitProfile(profiledBlock->exitProfile())
+ , m_callsiteBlockHead(callsiteBlockHead)
+ , m_returnValue(returnValueVR)
+ , m_didReturn(false)
+ , m_didEarlyReturn(false)
+ , m_caller(byteCodeParser->m_inlineStackTop)
+{
+ if (m_caller) {
+ // Inline case.
+ ASSERT(codeBlock != byteCodeParser->m_codeBlock);
+ ASSERT(callee);
+ ASSERT(calleeVR != InvalidVirtualRegister);
+ ASSERT(inlineCallFrameStart != InvalidVirtualRegister);
+ ASSERT(callsiteBlockHead != NoBlock);
+
+ InlineCallFrame inlineCallFrame;
+ inlineCallFrame.executable.set(*byteCodeParser->m_globalData, byteCodeParser->m_codeBlock->ownerExecutable(), codeBlock->ownerExecutable());
+ inlineCallFrame.stackOffset = inlineCallFrameStart + RegisterFile::CallFrameHeaderSize;
+ inlineCallFrame.callee.set(*byteCodeParser->m_globalData, byteCodeParser->m_codeBlock->ownerExecutable(), callee);
+ inlineCallFrame.caller = byteCodeParser->currentCodeOrigin();
+ inlineCallFrame.arguments.resize(codeBlock->m_numParameters); // Set the number of arguments including this, but don't configure the value recoveries, yet.
+ inlineCallFrame.isCall = isCall(kind);
+ byteCodeParser->m_codeBlock->inlineCallFrames().append(inlineCallFrame);
+ m_inlineCallFrame = &byteCodeParser->m_codeBlock->inlineCallFrames().last();
+
+ byteCodeParser->buildOperandMapsIfNecessary();
+
+ m_identifierRemap.resize(codeBlock->numberOfIdentifiers());
+ m_constantRemap.resize(codeBlock->numberOfConstantRegisters());
+
+ for (size_t i = 0; i < codeBlock->numberOfIdentifiers(); ++i) {
+ StringImpl* rep = codeBlock->identifier(i).impl();
+ pair<IdentifierMap::iterator, bool> result = byteCodeParser->m_identifierMap.add(rep, byteCodeParser->m_codeBlock->numberOfIdentifiers());
+ if (result.second)
+ byteCodeParser->m_codeBlock->addIdentifier(Identifier(byteCodeParser->m_globalData, rep));
+ m_identifierRemap[i] = result.first->second;
+ }
+ for (size_t i = 0; i < codeBlock->numberOfConstantRegisters(); ++i) {
+ JSValue value = codeBlock->getConstant(i + FirstConstantRegisterIndex);
+ pair<JSValueMap::iterator, bool> result = byteCodeParser->m_jsValueMap.add(JSValue::encode(value), byteCodeParser->m_codeBlock->numberOfConstantRegisters() + FirstConstantRegisterIndex);
+ if (result.second) {
+ byteCodeParser->m_codeBlock->addConstant(value);
+ byteCodeParser->m_constants.append(ConstantRecord());
+ }
+ m_constantRemap[i] = result.first->second;
+ }
+
+ m_callsiteBlockHeadNeedsLinking = true;
+ } else {
+ // Machine code block case.
+ ASSERT(codeBlock == byteCodeParser->m_codeBlock);
+ ASSERT(!callee);
+ ASSERT(calleeVR == InvalidVirtualRegister);
+ ASSERT(returnValueVR == InvalidVirtualRegister);
+ ASSERT(inlineCallFrameStart == InvalidVirtualRegister);
+ ASSERT(callsiteBlockHead == NoBlock);
+
+ m_inlineCallFrame = 0;
+
+ m_identifierRemap.resize(codeBlock->numberOfIdentifiers());
+ m_constantRemap.resize(codeBlock->numberOfConstantRegisters());
+
+ for (size_t i = 0; i < codeBlock->numberOfIdentifiers(); ++i)
+ m_identifierRemap[i] = i;
+ for (size_t i = 0; i < codeBlock->numberOfConstantRegisters(); ++i)
+ m_constantRemap[i] = i + FirstConstantRegisterIndex;
+
+ m_callsiteBlockHeadNeedsLinking = false;
+ }
+
+ for (size_t i = 0; i < m_constantRemap.size(); ++i)
+ ASSERT(m_constantRemap[i] >= static_cast<unsigned>(FirstConstantRegisterIndex));
+
+ byteCodeParser->m_inlineStackTop = this;
+}
+
+void ByteCodeParser::parseCodeBlock()
+{
+ CodeBlock* codeBlock = m_inlineStackTop->m_codeBlock;
+
+ for (unsigned jumpTargetIndex = 0; jumpTargetIndex <= codeBlock->numberOfJumpTargets(); ++jumpTargetIndex) {
+ // The maximum bytecode offset to go into the current basicblock is either the next jump target, or the end of the instructions.
+ unsigned limit = jumpTargetIndex < codeBlock->numberOfJumpTargets() ? codeBlock->jumpTarget(jumpTargetIndex) : codeBlock->instructions().size();
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ printf("Parsing bytecode with limit %p bc#%u at inline depth %u.\n", m_inlineStackTop->executable(), limit, CodeOrigin::inlineDepthForCallFrame(m_inlineStackTop->m_inlineCallFrame));
+#endif
+ ASSERT(m_currentIndex < limit);
+
+ // Loop until we reach the current limit (i.e. next jump target).
+ do {
+ if (!m_currentBlock) {
+ // Check if we can use the last block.
+ if (!m_graph.m_blocks.isEmpty() && m_graph.m_blocks.last()->begin == m_graph.m_blocks.last()->end) {
+ // This must be a block belonging to us.
+ ASSERT(m_inlineStackTop->m_unlinkedBlocks.last().m_blockIndex == m_graph.m_blocks.size() - 1);
+ // Either the block is linkable or it isn't. If it's linkable then it's the last
+ // block in the blockLinkingTargets list. If it's not then the last block will
+ // have a lower bytecode index that the one we're about to give to this block.
+ if (m_inlineStackTop->m_blockLinkingTargets.isEmpty() || m_inlineStackTop->m_blockLinkingTargets.last() != m_currentIndex) {
+ // Make the block linkable.
+ ASSERT(m_inlineStackTop->m_blockLinkingTargets.isEmpty() || m_inlineStackTop->m_blockLinkingTargets.last() < m_currentIndex);
+ m_inlineStackTop->m_blockLinkingTargets.append(m_graph.m_blocks.size() - 1);
+ }
+ // Change its bytecode begin and continue.
+ m_currentBlock = m_graph.m_blocks.last().get();
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ printf("Reascribing bytecode index of block %p from bc#%u to bc#%u (peephole case).\n", m_currentBlock, m_currentBlock->bytecodeBegin, m_currentIndex);
+#endif
+ m_currentBlock->bytecodeBegin = m_currentIndex;
+ } else {
+ OwnPtr<BasicBlock> block = adoptPtr(new BasicBlock(m_currentIndex, m_graph.size(), m_numArguments, m_numLocals));
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ printf("Creating basic block %p, #%lu for %p bc#%u at inline depth %u.\n", block.get(), m_graph.m_blocks.size(), m_inlineStackTop->executable(), m_currentIndex, CodeOrigin::inlineDepthForCallFrame(m_inlineStackTop->m_inlineCallFrame));
+#endif
+ m_currentBlock = block.get();
+ ASSERT(m_inlineStackTop->m_unlinkedBlocks.isEmpty() || m_graph.m_blocks[m_inlineStackTop->m_unlinkedBlocks.last().m_blockIndex]->bytecodeBegin < m_currentIndex);
+ m_inlineStackTop->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.m_blocks.size()));
+ m_inlineStackTop->m_blockLinkingTargets.append(m_graph.m_blocks.size());
+ m_graph.m_blocks.append(block.release());
+ prepareToParseBlock();
+ }
+ }
+
+ bool shouldContinueParsing = parseBlock(limit);
+
+ // We should not have gone beyond the limit.
+ ASSERT(m_currentIndex <= limit);
+
+ // We should have planted a terminal, or we just gave up because
+ // we realized that the jump target information is imprecise, or we
+ // are at the end of an inline function, or we realized that we
+ // should stop parsing because there was a return in the first
+ // basic block.
+ ASSERT(m_currentBlock->begin == m_graph.size() || m_graph.last().isTerminal() || (m_currentIndex == codeBlock->instructions().size() && m_inlineStackTop->m_inlineCallFrame) || !shouldContinueParsing);
+
+ m_currentBlock->end = m_graph.size();
+
+ if (!shouldContinueParsing)
+ return;
+
+ m_currentBlock = 0;
+ } while (m_currentIndex < limit);
+ }
+
+ // Should have reached the end of the instructions.
+ ASSERT(m_currentIndex == codeBlock->instructions().size());
+}
+
+bool ByteCodeParser::parse()
+{
+ // Set during construction.
+ ASSERT(!m_currentIndex);
+
+ InlineStackEntry inlineStackEntry(this, m_codeBlock, m_profiledBlock, NoBlock, InvalidVirtualRegister, 0, InvalidVirtualRegister, InvalidVirtualRegister, CodeForCall);
+
+ parseCodeBlock();
+
+ linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
+ determineReachability();
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf("Processing local variable phis.\n");
+#endif
+ processPhiStack<LocalPhiStack>();
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf("Processing argument phis.\n");
+#endif
+ processPhiStack<ArgumentPhiStack>();
+
+ m_graph.m_preservedVars = m_preservedVars;
+ m_graph.m_localVars = m_numLocals;
+ m_graph.m_parameterSlots = m_parameterSlots;
+
+ return true;
+}
+
+bool parse(Graph& graph, JSGlobalData* globalData, CodeBlock* codeBlock)
+{
+#if DFG_DEBUG_LOCAL_DISBALE
+ UNUSED_PARAM(graph);
+ UNUSED_PARAM(globalData);
+ UNUSED_PARAM(codeBlock);
+ return false;
+#else
+ return ByteCodeParser(globalData, codeBlock, codeBlock->alternative(), graph).parse();
+#endif
+}
+
+} } // namespace JSC::DFG
+
+#endif
diff --git a/Source/JavaScriptCore/dfg/DFGByteCodeParser.h b/Source/JavaScriptCore/dfg/DFGByteCodeParser.h
new file mode 100644
index 000000000..d4efe61db
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGByteCodeParser.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGByteCodeParser_h
+#define DFGByteCodeParser_h
+
+#if ENABLE(DFG_JIT)
+
+#include <dfg/DFGGraph.h>
+
+namespace JSC {
+
+class CodeBlock;
+class JSGlobalData;
+
+namespace DFG {
+
+// Populate the Graph with a basic block of code from the CodeBlock,
+// starting at the provided bytecode index.
+bool parse(Graph&, JSGlobalData*, CodeBlock*);
+
+} } // namespace JSC::DFG
+
+#endif
+#endif
diff --git a/Source/JavaScriptCore/dfg/DFGCapabilities.cpp b/Source/JavaScriptCore/dfg/DFGCapabilities.cpp
new file mode 100644
index 000000000..a8dec067f
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGCapabilities.cpp
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "DFGCapabilities.h"
+
+#include "CodeBlock.h"
+#include "Interpreter.h"
+
+namespace JSC { namespace DFG {
+
+#if ENABLE(DFG_JIT)
+
+template<bool (*canHandleOpcode)(OpcodeID)>
+bool canHandleOpcodes(CodeBlock* codeBlock)
+{
+ Interpreter* interpreter = codeBlock->globalData()->interpreter;
+ Instruction* instructionsBegin = codeBlock->instructions().begin();
+ unsigned instructionCount = codeBlock->instructions().size();
+
+ for (unsigned bytecodeOffset = 0; bytecodeOffset < instructionCount; ) {
+ switch (interpreter->getOpcodeID(instructionsBegin[bytecodeOffset].u.opcode)) {
+#define DEFINE_OP(opcode, length) \
+ case opcode: \
+ if (!canHandleOpcode(opcode)) \
+ return false; \
+ bytecodeOffset += length; \
+ break;
+ FOR_EACH_OPCODE_ID(DEFINE_OP)
+#undef DEFINE_OP
+ default:
+ ASSERT_NOT_REACHED();
+ break;
+ }
+ }
+
+ return true;
+}
+
+bool canCompileOpcodes(CodeBlock* codeBlock)
+{
+ return canHandleOpcodes<canCompileOpcode>(codeBlock);
+}
+
+bool canInlineOpcodes(CodeBlock* codeBlock)
+{
+ return canHandleOpcodes<canInlineOpcode>(codeBlock);
+}
+
+#endif
+
+} } // namespace JSC::DFG
+
diff --git a/Source/JavaScriptCore/dfg/DFGCapabilities.h b/Source/JavaScriptCore/dfg/DFGCapabilities.h
new file mode 100644
index 000000000..2653c73b0
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGCapabilities.h
@@ -0,0 +1,262 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGCapabilities_h
+#define DFGCapabilities_h
+
+#include "Intrinsic.h"
+#include "DFGNode.h"
+#include "Executable.h"
+#include "Options.h"
+#include "Interpreter.h"
+#include <wtf/Platform.h>
+
+namespace JSC { namespace DFG {
+
+#if ENABLE(DFG_JIT)
+// Fast check functions; if they return true it is still necessary to
+// check opcodes.
+inline bool mightCompileEval(CodeBlock* codeBlock)
+{
+ return codeBlock->instructionCount() <= Options::maximumOptimizationCandidateInstructionCount;
+}
+inline bool mightCompileProgram(CodeBlock* codeBlock)
+{
+ return codeBlock->instructionCount() <= Options::maximumOptimizationCandidateInstructionCount;
+}
+inline bool mightCompileFunctionForCall(CodeBlock* codeBlock)
+{
+ return codeBlock->instructionCount() <= Options::maximumOptimizationCandidateInstructionCount;
+}
+inline bool mightCompileFunctionForConstruct(CodeBlock* codeBlock)
+{
+ return codeBlock->instructionCount() <= Options::maximumOptimizationCandidateInstructionCount;
+}
+
+inline bool mightInlineFunctionForCall(CodeBlock* codeBlock)
+{
+ return codeBlock->instructionCount() <= Options::maximumFunctionForCallInlineCandidateInstructionCount;
+}
+inline bool mightInlineFunctionForConstruct(CodeBlock* codeBlock)
+{
+ return codeBlock->instructionCount() <= Options::maximumFunctionForConstructInlineCandidateInstructionCount;
+}
+
+// Opcode checking.
+inline bool canCompileOpcode(OpcodeID opcodeID)
+{
+ switch (opcodeID) {
+ case op_enter:
+ case op_convert_this:
+ case op_create_this:
+ case op_get_callee:
+ case op_bitand:
+ case op_bitor:
+ case op_bitxor:
+ case op_rshift:
+ case op_lshift:
+ case op_urshift:
+ case op_pre_inc:
+ case op_post_inc:
+ case op_pre_dec:
+ case op_post_dec:
+ case op_add:
+ case op_sub:
+ case op_mul:
+ case op_mod:
+ case op_div:
+#if ENABLE(DEBUG_WITH_BREAKPOINT)
+ case op_debug:
+#endif
+ case op_mov:
+ case op_check_has_instance:
+ case op_instanceof:
+ case op_not:
+ case op_less:
+ case op_lesseq:
+ case op_greater:
+ case op_greatereq:
+ case op_eq:
+ case op_eq_null:
+ case op_stricteq:
+ case op_neq:
+ case op_neq_null:
+ case op_nstricteq:
+ case op_get_by_val:
+ case op_put_by_val:
+ case op_method_check:
+ case op_get_scoped_var:
+ case op_put_scoped_var:
+ case op_get_by_id:
+ case op_put_by_id:
+ case op_get_global_var:
+ case op_put_global_var:
+ case op_jmp:
+ case op_loop:
+ case op_jtrue:
+ case op_jfalse:
+ case op_loop_if_true:
+ case op_loop_if_false:
+ case op_jeq_null:
+ case op_jneq_null:
+ case op_jless:
+ case op_jlesseq:
+ case op_jgreater:
+ case op_jgreatereq:
+ case op_jnless:
+ case op_jnlesseq:
+ case op_jngreater:
+ case op_jngreatereq:
+ case op_loop_hint:
+ case op_loop_if_less:
+ case op_loop_if_lesseq:
+ case op_loop_if_greater:
+ case op_loop_if_greatereq:
+ case op_ret:
+ case op_end:
+ case op_call_put_result:
+ case op_resolve:
+ case op_resolve_base:
+ case op_resolve_global:
+ case op_new_object:
+ case op_new_array:
+ case op_new_array_buffer:
+ case op_strcat:
+ case op_to_primitive:
+ case op_throw:
+ case op_throw_reference_error:
+ case op_call:
+ case op_construct:
+ return true;
+
+ // Opcodes we support conditionally. Enabling these opcodes currently results in
+ // performance regressions. Each node that we disable under restrictions has a
+ // comment describing what we know about the regression so far.
+
+ // Regresses string-validate-input, probably because it uses comparisons (< and >)
+ // on strings, which currently will cause speculation failures in some cases.
+ case op_new_regexp:
+#if DFG_ENABLE(RESTRICTIONS)
+ return false;
+#else
+ return true;
+#endif
+
+ default:
+ return false;
+ }
+}
+
+inline bool canInlineOpcode(OpcodeID opcodeID)
+{
+ switch (opcodeID) {
+
+ // These opcodes would be easy to support with inlining, but we currently don't do it.
+ // The issue is that the scope chain will not be set correctly.
+ case op_get_scoped_var:
+ case op_put_scoped_var:
+ case op_resolve:
+ case op_resolve_base:
+ case op_resolve_global:
+
+ // Constant buffers aren't copied correctly. This is easy to fix, but for
+ // now we just disable inlining for functions that use them.
+ case op_new_array_buffer:
+
+ // Inlining doesn't correctly remap regular expression operands.
+ case op_new_regexp:
+ return false;
+
+ default:
+ return canCompileOpcode(opcodeID);
+ }
+}
+
+bool canCompileOpcodes(CodeBlock*);
+bool canInlineOpcodes(CodeBlock*);
+#else // ENABLE(DFG_JIT)
+inline bool mightCompileEval(CodeBlock*) { return false; }
+inline bool mightCompileProgram(CodeBlock*) { return false; }
+inline bool mightCompileFunctionForCall(CodeBlock*) { return false; }
+inline bool mightCompileFunctionForConstruct(CodeBlock*) { return false; }
+inline bool mightInlineFunctionForCall(CodeBlock*) { return false; }
+inline bool mightInlineFunctionForConstruct(CodeBlock*) { return false; }
+
+inline bool canCompileOpcode(OpcodeID) { return false; }
+inline bool canInlineOpcode(OpcodeID) { return false; }
+inline bool canCompileOpcodes(CodeBlock*) { return false; }
+inline bool canInlineOpcodes(CodeBlock*) { return false; }
+#endif // ENABLE(DFG_JIT)
+
+inline bool canCompileEval(CodeBlock* codeBlock)
+{
+ return mightCompileEval(codeBlock) && canCompileOpcodes(codeBlock);
+}
+
+inline bool canCompileProgram(CodeBlock* codeBlock)
+{
+ return mightCompileProgram(codeBlock) && canCompileOpcodes(codeBlock);
+}
+
+inline bool canCompileFunctionForCall(CodeBlock* codeBlock)
+{
+ return mightCompileFunctionForCall(codeBlock) && canCompileOpcodes(codeBlock);
+}
+
+inline bool canCompileFunctionForConstruct(CodeBlock* codeBlock)
+{
+ return mightCompileFunctionForConstruct(codeBlock) && canCompileOpcodes(codeBlock);
+}
+
+inline bool canInlineFunctionForCall(CodeBlock* codeBlock)
+{
+ return mightInlineFunctionForCall(codeBlock) && canInlineOpcodes(codeBlock);
+}
+
+inline bool canInlineFunctionForConstruct(CodeBlock* codeBlock)
+{
+ return mightInlineFunctionForConstruct(codeBlock) && canInlineOpcodes(codeBlock);
+}
+
+inline bool mightInlineFunctionFor(CodeBlock* codeBlock, CodeSpecializationKind kind)
+{
+ if (kind == CodeForCall)
+ return mightInlineFunctionForCall(codeBlock);
+ ASSERT(kind == CodeForConstruct);
+ return mightInlineFunctionForConstruct(codeBlock);
+}
+
+inline bool canInlineFunctionFor(CodeBlock* codeBlock, CodeSpecializationKind kind)
+{
+ if (kind == CodeForCall)
+ return canInlineFunctionForCall(codeBlock);
+ ASSERT(kind == CodeForConstruct);
+ return canInlineFunctionForConstruct(codeBlock);
+}
+
+} } // namespace JSC::DFG
+
+#endif // DFGCapabilities_h
+
diff --git a/Source/JavaScriptCore/dfg/DFGCommon.h b/Source/JavaScriptCore/dfg/DFGCommon.h
new file mode 100644
index 000000000..469dbd33e
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGCommon.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGCommon_h
+#define DFGCommon_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(DFG_JIT)
+
+#include "CodeOrigin.h"
+#include "VirtualRegister.h"
+
+/* DFG_ENABLE() - turn on a specific features in the DFG JIT */
+#define DFG_ENABLE(DFG_FEATURE) (defined DFG_ENABLE_##DFG_FEATURE && DFG_ENABLE_##DFG_FEATURE)
+
+// Emit various logging information for debugging, including dumping the dataflow graphs.
+#define DFG_ENABLE_DEBUG_VERBOSE 0
+// Emit dumps during propagation, in addition to just after.
+#define DFG_ENABLE_DEBUG_PROPAGATION_VERBOSE 0
+// Emit logging for OSR exit value recoveries at every node, not just nodes that
+// actually has speculation checks.
+#define DFG_ENABLE_VERBOSE_VALUE_RECOVERIES 0
+// Enable generation of dynamic checks into the instruction stream.
+#if !ASSERT_DISABLED
+#define DFG_ENABLE_JIT_ASSERT 1
+#else
+#define DFG_ENABLE_JIT_ASSERT 0
+#endif
+// Consistency check contents compiler data structures.
+#define DFG_ENABLE_CONSISTENCY_CHECK 0
+// Emit a breakpoint into the head of every generated function, to aid debugging in GDB.
+#define DFG_ENABLE_JIT_BREAK_ON_EVERY_FUNCTION 0
+// Emit a breakpoint into the head of every generated node, to aid debugging in GDB.
+#define DFG_ENABLE_JIT_BREAK_ON_EVERY_BLOCK 0
+// Emit a breakpoint into the head of every generated node, to aid debugging in GDB.
+#define DFG_ENABLE_JIT_BREAK_ON_EVERY_NODE 0
+// Emit a pair of xorPtr()'s on regT0 with the node index to make it easy to spot node boundaries in disassembled code.
+#define DFG_ENABLE_XOR_DEBUG_AID 0
+// Emit a breakpoint into the speculation failure code.
+#define DFG_ENABLE_JIT_BREAK_ON_SPECULATION_FAILURE 0
+// Log every speculation failure.
+#define DFG_ENABLE_VERBOSE_SPECULATION_FAILURE 0
+// Disable the DFG JIT without having to touch Platform.h
+#define DFG_DEBUG_LOCAL_DISBALE 0
+// Enable OSR entry from baseline JIT.
+#define DFG_ENABLE_OSR_ENTRY ENABLE(DFG_JIT)
+// Generate stats on how successful we were in making use of the DFG jit, and remaining on the hot path.
+#define DFG_ENABLE_SUCCESS_STATS 0
+// Used to enable conditionally supported opcodes that currently result in performance regressions.
+#define DFG_ENABLE_RESTRICTIONS 1
+
+namespace JSC { namespace DFG {
+
+// Type for a reference to another node in the graph.
+typedef uint32_t NodeIndex;
+static const NodeIndex NoNode = UINT_MAX;
+
+typedef uint32_t BlockIndex;
+static const BlockIndex NoBlock = UINT_MAX;
+
+struct NodeIndexTraits {
+ static NodeIndex defaultValue() { return NoNode; }
+ static void dump(NodeIndex value, FILE* out)
+ {
+ if (value == NoNode)
+ fprintf(out, "-");
+ else
+ fprintf(out, "@%u", value);
+ }
+};
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
+
+#endif // DFGCommon_h
+
diff --git a/Source/JavaScriptCore/dfg/DFGCorrectableJumpPoint.cpp b/Source/JavaScriptCore/dfg/DFGCorrectableJumpPoint.cpp
new file mode 100644
index 000000000..cfab2bd7b
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGCorrectableJumpPoint.cpp
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "DFGCorrectableJumpPoint.h"
+
+#if ENABLE(DFG_JIT)
+
+#include "CodeBlock.h"
+
+namespace JSC { namespace DFG {
+
+CodeLocationJump CorrectableJumpPoint::codeLocationForRepatch(CodeBlock* codeBlock) const
+{
+ ASSERT(m_mode == CorrectedJump);
+ return CodeLocationJump(codeBlock->getJITCode().dataAddressAtOffset(m_codeOffset));
+}
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
diff --git a/Source/JavaScriptCore/dfg/DFGCorrectableJumpPoint.h b/Source/JavaScriptCore/dfg/DFGCorrectableJumpPoint.h
new file mode 100644
index 000000000..983f479c2
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGCorrectableJumpPoint.h
@@ -0,0 +1,159 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGCorrectableJumpPoint_h
+#define DFGCorrectableJumpPoint_h
+
+#if ENABLE(DFG_JIT)
+
+#include "LinkBuffer.h"
+#include "MacroAssembler.h"
+
+namespace JSC { namespace DFG {
+
+// This is a type-safe union of MacroAssembler::Jump and CodeLocationJump.
+// Furthermore, it supports the notion of branching (possibly conditionally, but
+// also possibly jumping unconditionally) to an out-of-line patchable jump.
+// Thus it goes through three states:
+//
+// 1) Label of unpatchable branch or jump (i.e. MacroAssembler::Jump).
+// 2) Label of patchable jump (i.e. MacroAssembler::Jump).
+// 3) Corrected post-linking label of patchable jump (i.e. CodeLocationJump).
+//
+// The setting of state (1) corresponds to planting the in-line unpatchable
+// branch or jump. The state transition (1)->(2) corresponds to linking the
+// in-line branch or jump to the out-of-line patchable jump, and recording
+// the latter's label. The state transition (2)->(3) corresponds to recording
+// the out-of-line patchable jump's location after branch compaction has
+// completed.
+//
+// You can also go directly from the first state to the third state, if you
+// wish to use this class for in-line patchable jumps.
+
+class CorrectableJumpPoint {
+public:
+ CorrectableJumpPoint(MacroAssembler::Jump check)
+ : m_codeOffset(check.m_label.m_offset)
+#ifndef NDEBUG
+ , m_mode(InitialJump)
+#endif
+ {
+#if CPU(ARM_THUMB2)
+ m_type = check.m_type;
+ m_condition = check.m_condition;
+#endif
+ }
+
+ void switchToLateJump(MacroAssembler::Jump check)
+ {
+#ifndef NDEBUG
+ ASSERT(m_mode == InitialJump);
+ m_mode = LateJump;
+#endif
+ // Late jumps should only ever be real jumps.
+#if CPU(ARM_THUMB2)
+ ASSERT(check.m_type == ARMv7Assembler::JumpNoConditionFixedSize);
+ ASSERT(check.m_condition == ARMv7Assembler::ConditionInvalid);
+ m_type = ARMv7Assembler::JumpNoConditionFixedSize;
+ m_condition = ARMv7Assembler::ConditionInvalid;
+#endif
+ m_codeOffset = check.m_label.m_offset;
+ }
+
+ void correctInitialJump(LinkBuffer& linkBuffer)
+ {
+ ASSERT(m_mode == InitialJump);
+#if CPU(ARM_THUMB2)
+ ASSERT(m_type == ARMv7Assembler::JumpNoConditionFixedSize);
+ ASSERT(m_condition == ARMv7Assembler::ConditionInvalid);
+#endif
+ correctJump(linkBuffer);
+ }
+
+ void correctLateJump(LinkBuffer& linkBuffer)
+ {
+ ASSERT(m_mode == LateJump);
+ correctJump(linkBuffer);
+ }
+
+ MacroAssembler::Jump initialJump() const
+ {
+ ASSERT(m_mode == InitialJump);
+ return getJump();
+ }
+
+ MacroAssembler::Jump lateJump() const
+ {
+ ASSERT(m_mode == LateJump);
+ return getJump();
+ }
+
+ CodeLocationJump codeLocationForRepatch(CodeBlock*) const;
+
+private:
+ void correctJump(LinkBuffer& linkBuffer)
+ {
+#ifndef NDEBUG
+ m_mode = CorrectedJump;
+#endif
+ MacroAssembler::Label label;
+ label.m_label.m_offset = m_codeOffset;
+ m_codeOffset = linkBuffer.offsetOf(label);
+ }
+
+ MacroAssembler::Jump getJump() const
+ {
+ MacroAssembler::Jump jump;
+ jump.m_label.m_offset = m_codeOffset;
+#if CPU(ARM_THUMB2)
+ jump.m_type = m_type;
+ jump.m_condition = m_condition;
+#endif
+ return jump;
+ }
+
+ unsigned m_codeOffset;
+
+#if CPU(ARM_THUMB2)
+ ARMv7Assembler::JumpType m_type : 8;
+ ARMv7Assembler::Condition m_condition : 8;
+#endif
+
+#ifndef NDEBUG
+ enum Mode {
+ InitialJump,
+ LateJump,
+ CorrectedJump
+ };
+
+ Mode m_mode;
+#endif
+};
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
+
+#endif // DFGCorrectableJumpPoint_h
diff --git a/Source/JavaScriptCore/dfg/DFGDriver.cpp b/Source/JavaScriptCore/dfg/DFGDriver.cpp
new file mode 100644
index 000000000..2143fbc19
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGDriver.cpp
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "DFGDriver.h"
+
+#if ENABLE(DFG_JIT)
+
+#include "DFGByteCodeParser.h"
+#include "DFGJITCompiler.h"
+#include "DFGPropagator.h"
+
+namespace JSC { namespace DFG {
+
+enum CompileMode { CompileFunction, CompileOther };
+inline bool compile(CompileMode compileMode, ExecState* exec, CodeBlock* codeBlock, JITCode& jitCode, MacroAssemblerCodePtr* jitCodeWithArityCheck)
+{
+ SamplingRegion samplingRegion("DFG Compilation (Driver)");
+
+ ASSERT(codeBlock);
+ ASSERT(codeBlock->alternative());
+ ASSERT(codeBlock->alternative()->getJITType() == JITCode::BaselineJIT);
+
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ fprintf(stderr, "DFG compiling code block %p(%p), number of instructions = %u.\n", codeBlock, codeBlock->alternative(), codeBlock->instructionCount());
+#endif
+
+ JSGlobalData* globalData = &exec->globalData();
+ Graph dfg;
+ if (!parse(dfg, globalData, codeBlock))
+ return false;
+
+ if (compileMode == CompileFunction)
+ dfg.predictArgumentTypes(codeBlock);
+
+ propagate(dfg, globalData, codeBlock);
+
+ JITCompiler dataFlowJIT(globalData, dfg, codeBlock);
+ if (compileMode == CompileFunction) {
+ ASSERT(jitCodeWithArityCheck);
+
+ dataFlowJIT.compileFunction(jitCode, *jitCodeWithArityCheck);
+ } else {
+ ASSERT(compileMode == CompileOther);
+ ASSERT(!jitCodeWithArityCheck);
+
+ dataFlowJIT.compile(jitCode);
+ }
+
+ return true;
+}
+
+bool tryCompile(ExecState* exec, CodeBlock* codeBlock, JITCode& jitCode)
+{
+ return compile(CompileOther, exec, codeBlock, jitCode, 0);
+}
+
+bool tryCompileFunction(ExecState* exec, CodeBlock* codeBlock, JITCode& jitCode, MacroAssemblerCodePtr& jitCodeWithArityCheck)
+{
+ return compile(CompileFunction, exec, codeBlock, jitCode, &jitCodeWithArityCheck);
+}
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
+
diff --git a/Source/JavaScriptCore/dfg/DFGDriver.h b/Source/JavaScriptCore/dfg/DFGDriver.h
new file mode 100644
index 000000000..dad45f32e
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGDriver.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGDriver_h
+#define DFGDriver_h
+
+#include <wtf/Platform.h>
+
+namespace JSC {
+
+class ExecState;
+class CodeBlock;
+class JITCode;
+class MacroAssemblerCodePtr;
+
+namespace DFG {
+
+#if ENABLE(DFG_JIT)
+bool tryCompile(ExecState*, CodeBlock*, JITCode&);
+bool tryCompileFunction(ExecState*, CodeBlock*, JITCode&, MacroAssemblerCodePtr& jitCodeWithArityCheck);
+#else
+inline bool tryCompile(ExecState*, CodeBlock*, JITCode&) { return false; }
+inline bool tryCompileFunction(ExecState*, CodeBlock*, JITCode&, MacroAssemblerCodePtr&) { return false; }
+#endif
+
+} } // namespace JSC::DFG
+
+#endif
+
diff --git a/Source/JavaScriptCore/dfg/DFGFPRInfo.h b/Source/JavaScriptCore/dfg/DFGFPRInfo.h
new file mode 100644
index 000000000..dbd60a4e6
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGFPRInfo.h
@@ -0,0 +1,171 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGFPRInfo_h
+#define DFGFPRInfo_h
+
+#if ENABLE(DFG_JIT)
+
+#include <assembler/MacroAssembler.h>
+#include <dfg/DFGRegisterBank.h>
+
+namespace JSC { namespace DFG {
+
+typedef MacroAssembler::FPRegisterID FPRReg;
+#define InvalidFPRReg ((FPRReg)-1)
+
+#if CPU(X86) || CPU(X86_64)
+
+class FPRInfo {
+public:
+ typedef FPRReg RegisterType;
+ static const unsigned numberOfRegisters = 6;
+
+ // Temporary registers.
+ static const FPRReg fpRegT0 = X86Registers::xmm0;
+ static const FPRReg fpRegT1 = X86Registers::xmm1;
+ static const FPRReg fpRegT2 = X86Registers::xmm2;
+ static const FPRReg fpRegT3 = X86Registers::xmm3;
+ static const FPRReg fpRegT4 = X86Registers::xmm4;
+ static const FPRReg fpRegT5 = X86Registers::xmm5;
+#if CPU(X86_64)
+ // Only X86_64 passes aguments in xmm registers
+ static const FPRReg argumentFPR0 = X86Registers::xmm0; // fpRegT0
+ static const FPRReg argumentFPR1 = X86Registers::xmm1; // fpRegT1
+ static const FPRReg argumentFPR2 = X86Registers::xmm2; // fpRegT2
+ static const FPRReg argumentFPR3 = X86Registers::xmm3; // fpRegT3
+#endif
+ // On X86 the return will actually be on the x87 stack,
+ // so we'll copy to xmm0 for sanity!
+ static const FPRReg returnValueFPR = X86Registers::xmm0; // fpRegT0
+
+ // FPRReg mapping is direct, the machine regsiter numbers can
+ // be used directly as indices into the FPR RegisterBank.
+ COMPILE_ASSERT(X86Registers::xmm0 == 0, xmm0_is_0);
+ COMPILE_ASSERT(X86Registers::xmm1 == 1, xmm1_is_1);
+ COMPILE_ASSERT(X86Registers::xmm2 == 2, xmm2_is_2);
+ COMPILE_ASSERT(X86Registers::xmm3 == 3, xmm3_is_3);
+ COMPILE_ASSERT(X86Registers::xmm4 == 4, xmm4_is_4);
+ COMPILE_ASSERT(X86Registers::xmm5 == 5, xmm5_is_5);
+ static FPRReg toRegister(unsigned index)
+ {
+ return (FPRReg)index;
+ }
+ static unsigned toIndex(FPRReg reg)
+ {
+ return (unsigned)reg;
+ }
+
+#ifndef NDEBUG
+ static const char* debugName(FPRReg reg)
+ {
+ ASSERT(reg != InvalidFPRReg);
+#if CPU(X86_64)
+ ASSERT(reg < 16);
+ static const char* nameForRegister[16] = {
+ "xmm0", "xmm1", "xmm2", "xmm3",
+ "xmm4", "xmm5", "xmm6", "xmm7",
+ "xmm8", "xmm9", "xmm10", "xmm11",
+ "xmm12", "xmm13", "xmm14", "xmm15"
+ };
+#elif CPU(X86)
+ ASSERT(reg < 8);
+ static const char* nameForRegister[8] = {
+ "xmm0", "xmm1", "xmm2", "xmm3",
+ "xmm4", "xmm5", "xmm6", "xmm7"
+ };
+#endif
+ return nameForRegister[reg];
+ }
+#endif
+};
+
+#endif
+
+#if CPU(ARM_THUMB2)
+
+class FPRInfo {
+public:
+ typedef FPRReg RegisterType;
+ static const unsigned numberOfRegisters = 6;
+
+ // Temporary registers.
+ // d7 is use by the MacroAssembler as fpTempRegister.
+ static const FPRReg fpRegT0 = ARMRegisters::d0;
+ static const FPRReg fpRegT1 = ARMRegisters::d1;
+ static const FPRReg fpRegT2 = ARMRegisters::d2;
+ static const FPRReg fpRegT3 = ARMRegisters::d3;
+ static const FPRReg fpRegT4 = ARMRegisters::d4;
+ static const FPRReg fpRegT5 = ARMRegisters::d5;
+ // ARMv7 doesn't pass arguments in fp registers. The return
+ // value is also actually in integer registers, for now
+ // we'll return in d0 for simplicity.
+ static const FPRReg returnValueFPR = ARMRegisters::d0; // fpRegT0
+
+ // FPRReg mapping is direct, the machine regsiter numbers can
+ // be used directly as indices into the FPR RegisterBank.
+ COMPILE_ASSERT(ARMRegisters::d0 == 0, d0_is_0);
+ COMPILE_ASSERT(ARMRegisters::d1 == 1, d1_is_1);
+ COMPILE_ASSERT(ARMRegisters::d2 == 2, d2_is_2);
+ COMPILE_ASSERT(ARMRegisters::d3 == 3, d3_is_3);
+ COMPILE_ASSERT(ARMRegisters::d4 == 4, d4_is_4);
+ COMPILE_ASSERT(ARMRegisters::d5 == 5, d5_is_5);
+ static FPRReg toRegister(unsigned index)
+ {
+ return (FPRReg)index;
+ }
+ static unsigned toIndex(FPRReg reg)
+ {
+ return (unsigned)reg;
+ }
+
+#ifndef NDEBUG
+ static const char* debugName(FPRReg reg)
+ {
+ ASSERT(reg != InvalidFPRReg);
+ ASSERT(reg < 32);
+ static const char* nameForRegister[32] = {
+ "d0", "d1", "d2", "d3",
+ "d4", "d5", "d6", "d7",
+ "d8", "d9", "d10", "d11",
+ "d12", "d13", "d14", "d15"
+ "d16", "d17", "d18", "d19"
+ "d20", "d21", "d22", "d23"
+ "d24", "d25", "d26", "d27"
+ "d28", "d29", "d30", "d31"
+ };
+ return nameForRegister[reg];
+ }
+#endif
+};
+
+#endif
+
+typedef RegisterBank<FPRInfo>::iterator fpr_iterator;
+
+} } // namespace JSC::DFG
+
+#endif
+#endif
diff --git a/Source/JavaScriptCore/dfg/DFGGPRInfo.h b/Source/JavaScriptCore/dfg/DFGGPRInfo.h
new file mode 100644
index 000000000..2f779d645
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGGPRInfo.h
@@ -0,0 +1,465 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGGPRInfo_h
+#define DFGGPRInfo_h
+
+#if ENABLE(DFG_JIT)
+
+#include <assembler/MacroAssembler.h>
+#include <dfg/DFGRegisterBank.h>
+
+namespace JSC { namespace DFG {
+
+typedef MacroAssembler::RegisterID GPRReg;
+#define InvalidGPRReg ((GPRReg)-1)
+
+#if USE(JSVALUE64)
+class JSValueRegs {
+public:
+ JSValueRegs()
+ : m_gpr(InvalidGPRReg)
+ {
+ }
+
+ explicit JSValueRegs(GPRReg gpr)
+ : m_gpr(gpr)
+ {
+ }
+
+ bool operator!() const { return m_gpr == InvalidGPRReg; }
+
+ GPRReg gpr() const { return m_gpr; }
+
+private:
+ GPRReg m_gpr;
+};
+
+class JSValueSource {
+public:
+ JSValueSource()
+ : m_offset(notAddress())
+ , m_base(InvalidGPRReg)
+ {
+ }
+
+ JSValueSource(JSValueRegs regs)
+ : m_offset(notAddress())
+ , m_base(regs.gpr())
+ {
+ }
+
+ explicit JSValueSource(GPRReg gpr)
+ : m_offset(notAddress())
+ , m_base(gpr)
+ {
+ }
+
+ JSValueSource(MacroAssembler::Address address)
+ : m_offset(address.offset)
+ , m_base(address.base)
+ {
+ ASSERT(m_offset != notAddress());
+ ASSERT(m_base != InvalidGPRReg);
+ }
+
+ static JSValueSource unboxedCell(GPRReg payloadGPR)
+ {
+ return JSValueSource(payloadGPR);
+ }
+
+ bool operator!() const { return m_base == InvalidGPRReg; }
+
+ bool isAddress() const { return m_offset != notAddress(); }
+
+ int32_t offset() const
+ {
+ ASSERT(isAddress());
+ return m_offset;
+ }
+
+ GPRReg base() const
+ {
+ ASSERT(isAddress());
+ return m_base;
+ }
+
+ GPRReg gpr() const
+ {
+ ASSERT(!isAddress());
+ return m_base;
+ }
+
+ MacroAssembler::Address asAddress() const { return MacroAssembler::Address(base(), offset()); }
+
+private:
+ static inline int32_t notAddress() { return 0x80000000; }
+
+ int32_t m_offset;
+ GPRReg m_base;
+};
+#endif
+
+#if USE(JSVALUE32_64)
+class JSValueRegs {
+public:
+ JSValueRegs()
+ : m_tagGPR(static_cast<int8_t>(InvalidGPRReg))
+ , m_payloadGPR(static_cast<int8_t>(InvalidGPRReg))
+ {
+ }
+
+ JSValueRegs(GPRReg tagGPR, GPRReg payloadGPR)
+ : m_tagGPR(tagGPR)
+ , m_payloadGPR(payloadGPR)
+ {
+ ASSERT((static_cast<GPRReg>(m_tagGPR) == InvalidGPRReg) == (static_cast<GPRReg>(payloadGPR) == InvalidGPRReg));
+ }
+
+ bool operator!() const { return static_cast<GPRReg>(m_tagGPR) == InvalidGPRReg; }
+
+ GPRReg tagGPR() const { return static_cast<GPRReg>(m_tagGPR); }
+ GPRReg payloadGPR() const { return static_cast<GPRReg>(m_payloadGPR); }
+
+private:
+ int8_t m_tagGPR;
+ int8_t m_payloadGPR;
+};
+
+class JSValueSource {
+public:
+ JSValueSource()
+ : m_offset(notAddress())
+ , m_baseOrTag(static_cast<int8_t>(InvalidGPRReg))
+ , m_payload(static_cast<int8_t>(InvalidGPRReg))
+ , m_tagType(0)
+ {
+ }
+
+ JSValueSource(JSValueRegs regs)
+ : m_offset(notAddress())
+ , m_baseOrTag(regs.tagGPR())
+ , m_payload(regs.payloadGPR())
+ , m_tagType(0)
+ {
+ }
+
+ JSValueSource(GPRReg tagGPR, GPRReg payloadGPR)
+ : m_offset(notAddress())
+ , m_baseOrTag(static_cast<int8_t>(tagGPR))
+ , m_payload(static_cast<int8_t>(payloadGPR))
+ , m_tagType(0)
+ {
+ }
+
+ JSValueSource(MacroAssembler::Address address)
+ : m_offset(address.offset)
+ , m_baseOrTag(static_cast<int8_t>(address.base))
+ , m_payload(static_cast<int8_t>(InvalidGPRReg))
+ , m_tagType(0)
+ {
+ ASSERT(m_offset != notAddress());
+ ASSERT(static_cast<GPRReg>(m_baseOrTag) != InvalidGPRReg);
+ }
+
+ static JSValueSource unboxedCell(GPRReg payloadGPR)
+ {
+ JSValueSource result;
+ result.m_offset = notAddress();
+ result.m_baseOrTag = static_cast<int8_t>(InvalidGPRReg);
+ result.m_payload = static_cast<int8_t>(payloadGPR);
+ result.m_tagType = static_cast<int8_t>(JSValue::CellTag);
+ return result;
+ }
+
+ bool operator!() const { return static_cast<GPRReg>(m_baseOrTag) == InvalidGPRReg && static_cast<GPRReg>(m_payload) == InvalidGPRReg; }
+
+ bool isAddress() const
+ {
+ ASSERT(!!*this);
+ return m_offset != notAddress();
+ }
+
+ int32_t offset() const
+ {
+ ASSERT(isAddress());
+ return m_offset;
+ }
+
+ GPRReg base() const
+ {
+ ASSERT(isAddress());
+ return static_cast<GPRReg>(m_baseOrTag);
+ }
+
+ GPRReg tagGPR() const
+ {
+ ASSERT(!isAddress() && m_baseOrTag != InvalidGPRReg);
+ return static_cast<GPRReg>(m_baseOrTag);
+ }
+
+ GPRReg payloadGPR() const
+ {
+ ASSERT(!isAddress());
+ return static_cast<GPRReg>(m_payload);
+ }
+
+ bool hasKnownTag() const
+ {
+ ASSERT(!!*this);
+ ASSERT(!isAddress());
+ return static_cast<GPRReg>(m_baseOrTag) == InvalidGPRReg;
+ }
+
+ uint32_t tag() const
+ {
+ return static_cast<int32_t>(m_tagType);
+ }
+
+ MacroAssembler::Address asAddress(unsigned additionalOffset = 0) const { return MacroAssembler::Address(base(), offset() + additionalOffset); }
+
+private:
+ static inline int32_t notAddress() { return 0x80000000; }
+
+ int32_t m_offset;
+ int8_t m_baseOrTag;
+ int8_t m_payload;
+ int8_t m_tagType; // Contains the low bits of the tag.
+};
+#endif
+
+#if CPU(X86)
+#define NUMBER_OF_ARGUMENT_REGISTERS 0
+
+class GPRInfo {
+public:
+ typedef GPRReg RegisterType;
+ static const unsigned numberOfRegisters = 5;
+
+ // Temporary registers.
+ static const GPRReg regT0 = X86Registers::eax;
+ static const GPRReg regT1 = X86Registers::edx;
+ static const GPRReg regT2 = X86Registers::ecx;
+ static const GPRReg regT3 = X86Registers::ebx;
+ static const GPRReg regT4 = X86Registers::esi;
+ // These registers match the baseline JIT.
+ static const GPRReg cachedResultRegister = regT0;
+ static const GPRReg cachedResultRegister2 = regT1;
+ static const GPRReg callFrameRegister = X86Registers::edi;
+ // These constants provide the names for the general purpose argument & return value registers.
+ static const GPRReg argumentGPR0 = X86Registers::ecx; // regT2
+ static const GPRReg argumentGPR1 = X86Registers::edx; // regT1
+ static const GPRReg returnValueGPR = X86Registers::eax; // regT0
+ static const GPRReg returnValueGPR2 = X86Registers::edx; // regT1
+ static const GPRReg nonPreservedNonReturnGPR = X86Registers::ecx;
+
+ static GPRReg toRegister(unsigned index)
+ {
+ ASSERT(index < numberOfRegisters);
+ static const GPRReg registerForIndex[numberOfRegisters] = { regT0, regT1, regT2, regT3, regT4 };
+ return registerForIndex[index];
+ }
+
+ static unsigned toIndex(GPRReg reg)
+ {
+ ASSERT(reg != InvalidGPRReg);
+ ASSERT(reg < 8);
+ static const unsigned indexForRegister[8] = { 0, 2, 1, 3, InvalidIndex, InvalidIndex, 4, InvalidIndex };
+ unsigned result = indexForRegister[reg];
+ ASSERT(result != InvalidIndex);
+ return result;
+ }
+
+#ifndef NDEBUG
+ static const char* debugName(GPRReg reg)
+ {
+ ASSERT(reg != InvalidGPRReg);
+ ASSERT(reg < 8);
+ static const char* nameForRegister[8] = {
+ "eax", "ecx", "edx", "ebx",
+ "esp", "ebp", "esi", "edi",
+ };
+ return nameForRegister[reg];
+ }
+#endif
+private:
+
+ static const unsigned InvalidIndex = 0xffffffff;
+};
+
+#endif
+
+#if CPU(X86_64)
+#define NUMBER_OF_ARGUMENT_REGISTERS 6
+
+class GPRInfo {
+public:
+ typedef GPRReg RegisterType;
+ static const unsigned numberOfRegisters = 9;
+
+ // These registers match the baseline JIT.
+ static const GPRReg cachedResultRegister = X86Registers::eax;
+ static const GPRReg timeoutCheckRegister = X86Registers::r12;
+ static const GPRReg callFrameRegister = X86Registers::r13;
+ static const GPRReg tagTypeNumberRegister = X86Registers::r14;
+ static const GPRReg tagMaskRegister = X86Registers::r15;
+ // Temporary registers.
+ static const GPRReg regT0 = X86Registers::eax;
+ static const GPRReg regT1 = X86Registers::edx;
+ static const GPRReg regT2 = X86Registers::ecx;
+ static const GPRReg regT3 = X86Registers::ebx;
+ static const GPRReg regT4 = X86Registers::edi;
+ static const GPRReg regT5 = X86Registers::esi;
+ static const GPRReg regT6 = X86Registers::r8;
+ static const GPRReg regT7 = X86Registers::r9;
+ static const GPRReg regT8 = X86Registers::r10;
+ // These constants provide the names for the general purpose argument & return value registers.
+ static const GPRReg argumentGPR0 = X86Registers::edi; // regT4
+ static const GPRReg argumentGPR1 = X86Registers::esi; // regT5
+ static const GPRReg argumentGPR2 = X86Registers::edx; // regT1
+ static const GPRReg argumentGPR3 = X86Registers::ecx; // regT2
+ static const GPRReg argumentGPR4 = X86Registers::r8; // regT6
+ static const GPRReg argumentGPR5 = X86Registers::r9; // regT7
+ static const GPRReg returnValueGPR = X86Registers::eax; // regT0
+ static const GPRReg returnValueGPR2 = X86Registers::edx; // regT1
+ static const GPRReg nonPreservedNonReturnGPR = X86Registers::esi;
+
+ static GPRReg toRegister(unsigned index)
+ {
+ ASSERT(index < numberOfRegisters);
+ static const GPRReg registerForIndex[numberOfRegisters] = { regT0, regT1, regT2, regT3, regT4, regT5, regT6, regT7, regT8 };
+ return registerForIndex[index];
+ }
+
+ static unsigned toIndex(GPRReg reg)
+ {
+ ASSERT(reg != InvalidGPRReg);
+ ASSERT(reg < 16);
+ static const unsigned indexForRegister[16] = { 0, 2, 1, 3, InvalidIndex, InvalidIndex, 5, 4, 6, 7, 8, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex };
+ unsigned result = indexForRegister[reg];
+ ASSERT(result != InvalidIndex);
+ return result;
+ }
+
+#ifndef NDEBUG
+ static const char* debugName(GPRReg reg)
+ {
+ ASSERT(reg != InvalidGPRReg);
+ ASSERT(reg < 16);
+ static const char* nameForRegister[16] = {
+ "rax", "rcx", "rdx", "rbx",
+ "rsp", "rbp", "rsi", "rdi",
+ "r8", "r9", "r10", "r11",
+ "r12", "r13", "r14", "r15"
+ };
+ return nameForRegister[reg];
+ }
+#endif
+private:
+
+ static const unsigned InvalidIndex = 0xffffffff;
+};
+
+#endif
+
+#if CPU(ARM_THUMB2)
+#define NUMBER_OF_ARGUMENT_REGISTERS 4
+
+class GPRInfo {
+public:
+ typedef GPRReg RegisterType;
+ static const unsigned numberOfRegisters = 9;
+
+ // Temporary registers.
+ static const GPRReg regT0 = ARMRegisters::r0;
+ static const GPRReg regT1 = ARMRegisters::r1;
+ static const GPRReg regT2 = ARMRegisters::r2;
+ static const GPRReg regT3 = ARMRegisters::r4;
+ static const GPRReg regT4 = ARMRegisters::r7;
+ static const GPRReg regT5 = ARMRegisters::r8;
+ static const GPRReg regT6 = ARMRegisters::r9;
+ static const GPRReg regT7 = ARMRegisters::r10;
+ static const GPRReg regT8 = ARMRegisters::r11;
+ // These registers match the baseline JIT.
+ static const GPRReg cachedResultRegister = regT0;
+ static const GPRReg cachedResultRegister2 = regT1;
+ static const GPRReg callFrameRegister = ARMRegisters::r5;
+ // These constants provide the names for the general purpose argument & return value registers.
+ static const GPRReg argumentGPR0 = ARMRegisters::r0; // regT0
+ static const GPRReg argumentGPR1 = ARMRegisters::r1; // regT1
+ static const GPRReg argumentGPR2 = ARMRegisters::r2; // regT2
+ // FIXME: r3 is currently used be the MacroAssembler as a temporary - it seems
+ // This could threoretically be a problem if theis is used in code generation
+ // between the arguments being set up, and the call being made. That said,
+ // any change introducing a problem here is likely to be immediately apparent!
+ static const GPRReg argumentGPR3 = ARMRegisters::r3; // FIXME!
+ static const GPRReg returnValueGPR = ARMRegisters::r0; // regT0
+ static const GPRReg returnValueGPR2 = ARMRegisters::r1; // regT1
+ static const GPRReg nonPreservedNonReturnGPR = ARMRegisters::r2;
+
+ static GPRReg toRegister(unsigned index)
+ {
+ ASSERT(index < numberOfRegisters);
+ static const GPRReg registerForIndex[numberOfRegisters] = { regT0, regT1, regT2, regT3, regT4, regT5, regT6, regT7, regT8 };
+ return registerForIndex[index];
+ }
+
+ static unsigned toIndex(GPRReg reg)
+ {
+ ASSERT(reg != InvalidGPRReg);
+ ASSERT(reg < 16);
+ static const unsigned indexForRegister[16] = { 0, 1, 2, InvalidIndex, 3, InvalidIndex, InvalidIndex, 4, 5, 6, 7, 8, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex };
+ unsigned result = indexForRegister[reg];
+ ASSERT(result != InvalidIndex);
+ return result;
+ }
+
+#ifndef NDEBUG
+ static const char* debugName(GPRReg reg)
+ {
+ ASSERT(reg != InvalidGPRReg);
+ ASSERT(reg < 16);
+ static const char* nameForRegister[16] = {
+ "r0", "r1", "r2", "r3",
+ "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11",
+ "r12", "r13", "r14", "r15"
+ };
+ return nameForRegister[reg];
+ }
+#endif
+private:
+
+ static const unsigned InvalidIndex = 0xffffffff;
+};
+
+#endif
+
+typedef RegisterBank<GPRInfo>::iterator gpr_iterator;
+
+} } // namespace JSC::DFG
+
+#endif
+#endif
diff --git a/Source/JavaScriptCore/dfg/DFGGenerationInfo.h b/Source/JavaScriptCore/dfg/DFGGenerationInfo.h
new file mode 100644
index 000000000..6f0fe3143
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGGenerationInfo.h
@@ -0,0 +1,327 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGGenerationInfo_h
+#define DFGGenerationInfo_h
+
+#if ENABLE(DFG_JIT)
+
+#include "DataFormat.h"
+#include <dfg/DFGJITCompiler.h>
+
+namespace JSC { namespace DFG {
+
+// === GenerationInfo ===
+//
+// This class is used to track the current status of a live values during code generation.
+// Can provide information as to whether a value is in machine registers, and if so which,
+// whether a value has been spilled to the RegsiterFile, and if so may be able to provide
+// details of the format in memory (all values are spilled in a boxed form, but we may be
+// able to track the type of box), and tracks how many outstanding uses of a value remain,
+// so that we know when the value is dead and the machine registers associated with it
+// may be released.
+class GenerationInfo {
+public:
+ GenerationInfo()
+ : m_nodeIndex(NoNode)
+ , m_useCount(0)
+ , m_registerFormat(DataFormatNone)
+ , m_spillFormat(DataFormatNone)
+ , m_canFill(false)
+ {
+ }
+
+ void initConstant(NodeIndex nodeIndex, uint32_t useCount)
+ {
+ m_nodeIndex = nodeIndex;
+ m_useCount = useCount;
+ m_registerFormat = DataFormatNone;
+ m_spillFormat = DataFormatNone;
+ m_canFill = true;
+ ASSERT(m_useCount);
+ }
+ void initInteger(NodeIndex nodeIndex, uint32_t useCount, GPRReg gpr)
+ {
+ m_nodeIndex = nodeIndex;
+ m_useCount = useCount;
+ m_registerFormat = DataFormatInteger;
+ m_spillFormat = DataFormatNone;
+ m_canFill = false;
+ u.gpr = gpr;
+ ASSERT(m_useCount);
+ }
+#if USE(JSVALUE64)
+ void initJSValue(NodeIndex nodeIndex, uint32_t useCount, GPRReg gpr, DataFormat format = DataFormatJS)
+ {
+ ASSERT(format & DataFormatJS);
+
+ m_nodeIndex = nodeIndex;
+ m_useCount = useCount;
+ m_registerFormat = format;
+ m_spillFormat = DataFormatNone;
+ m_canFill = false;
+ u.gpr = gpr;
+ ASSERT(m_useCount);
+ }
+#elif USE(JSVALUE32_64)
+ void initJSValue(NodeIndex nodeIndex, uint32_t useCount, GPRReg tagGPR, GPRReg payloadGPR, DataFormat format = DataFormatJS)
+ {
+ ASSERT(format & DataFormatJS);
+
+ m_nodeIndex = nodeIndex;
+ m_useCount = useCount;
+ m_registerFormat = format;
+ m_spillFormat = DataFormatNone;
+ m_canFill = false;
+ u.v.tagGPR = tagGPR;
+ u.v.payloadGPR = payloadGPR;
+ ASSERT(m_useCount);
+ }
+#endif
+ void initCell(NodeIndex nodeIndex, uint32_t useCount, GPRReg gpr)
+ {
+ m_nodeIndex = nodeIndex;
+ m_useCount = useCount;
+ m_registerFormat = DataFormatCell;
+ m_spillFormat = DataFormatNone;
+ m_canFill = false;
+ u.gpr = gpr;
+ ASSERT(m_useCount);
+ }
+ void initBoolean(NodeIndex nodeIndex, uint32_t useCount, GPRReg gpr)
+ {
+ m_nodeIndex = nodeIndex;
+ m_useCount = useCount;
+ m_registerFormat = DataFormatBoolean;
+ m_spillFormat = DataFormatNone;
+ m_canFill = false;
+ u.gpr = gpr;
+ ASSERT(m_useCount);
+ }
+ void initDouble(NodeIndex nodeIndex, uint32_t useCount, FPRReg fpr)
+ {
+ ASSERT(fpr != InvalidFPRReg);
+ m_nodeIndex = nodeIndex;
+ m_useCount = useCount;
+ m_registerFormat = DataFormatDouble;
+ m_spillFormat = DataFormatNone;
+ m_canFill = false;
+ u.fpr = fpr;
+ ASSERT(m_useCount);
+ }
+ void initStorage(NodeIndex nodeIndex, uint32_t useCount, GPRReg gpr)
+ {
+ m_nodeIndex = nodeIndex;
+ m_useCount = useCount;
+ m_registerFormat = DataFormatStorage;
+ m_spillFormat = DataFormatNone;
+ m_canFill = false;
+ u.gpr = gpr;
+ ASSERT(m_useCount);
+ }
+
+ // Get the index of the node that produced this value.
+ NodeIndex nodeIndex() { return m_nodeIndex; }
+
+ // Mark the value as having been used (decrement the useCount).
+ // Returns true if this was the last use of the value, and any
+ // associated machine registers may be freed.
+ bool use()
+ {
+ ASSERT(m_useCount);
+ return !--m_useCount;
+ }
+
+ // Used to check the operands of operations to see if they are on
+ // their last use; in some cases it may be safe to reuse the same
+ // machine register for the result of the operation.
+ bool canReuse()
+ {
+ ASSERT(m_useCount);
+ return m_useCount == 1;
+ }
+
+ // Get the format of the value in machine registers (or 'none').
+ DataFormat registerFormat() { return m_registerFormat; }
+ // Get the format of the value as it is spilled in the RegisterFile (or 'none').
+ DataFormat spillFormat() { return m_spillFormat; }
+
+ bool isJSFormat(DataFormat expectedFormat)
+ {
+ return JSC::isJSFormat(registerFormat(), expectedFormat) || JSC::isJSFormat(spillFormat(), expectedFormat);
+ }
+
+ bool isJSInteger()
+ {
+ return isJSFormat(DataFormatJSInteger);
+ }
+
+ bool isJSDouble()
+ {
+ return isJSFormat(DataFormatJSDouble);
+ }
+
+ bool isJSCell()
+ {
+ return isJSFormat(DataFormatJSCell);
+ }
+
+ bool isJSBoolean()
+ {
+ return isJSFormat(DataFormatJSBoolean);
+ }
+
+ bool isUnknownJS()
+ {
+ return spillFormat() == DataFormatNone
+ ? registerFormat() == DataFormatJS || registerFormat() == DataFormatNone
+ : spillFormat() == DataFormatJS;
+ }
+
+ // Get the machine resister currently holding the value.
+#if USE(JSVALUE64)
+ GPRReg gpr() { ASSERT(m_registerFormat && m_registerFormat != DataFormatDouble); return u.gpr; }
+ FPRReg fpr() { ASSERT(m_registerFormat == DataFormatDouble); return u.fpr; }
+ JSValueRegs jsValueRegs() { ASSERT(m_registerFormat & DataFormatJS); return JSValueRegs(u.gpr); }
+#elif USE(JSVALUE32_64)
+ GPRReg gpr() { ASSERT(!(m_registerFormat & DataFormatJS) && m_registerFormat != DataFormatDouble); return u.gpr; }
+ GPRReg tagGPR() { ASSERT(m_registerFormat & DataFormatJS); return u.v.tagGPR; }
+ GPRReg payloadGPR() { ASSERT(m_registerFormat & DataFormatJS); return u.v.payloadGPR; }
+ FPRReg fpr() { ASSERT(m_registerFormat == DataFormatDouble || m_registerFormat == DataFormatJSDouble); return u.fpr; }
+ JSValueRegs jsValueRegs() { ASSERT(m_registerFormat & DataFormatJS); return JSValueRegs(u.v.tagGPR, u.v.payloadGPR); }
+#endif
+
+ // Check whether a value needs spilling in order to free up any associated machine registers.
+ bool needsSpill()
+ {
+ // This should only be called on values that are currently in a register.
+ ASSERT(m_registerFormat != DataFormatNone);
+ // Constants do not need spilling, nor do values that have already been
+ // spilled to the RegisterFile.
+ return !m_canFill;
+ }
+
+ // Called when a VirtualRegister is being spilled to the RegisterFile for the first time.
+ void spill(DataFormat spillFormat)
+ {
+ // We shouldn't be spill values that don't need spilling.
+ ASSERT(!m_canFill);
+ ASSERT(m_spillFormat == DataFormatNone);
+ // We should only be spilling values that are currently in machine registers.
+ ASSERT(m_registerFormat != DataFormatNone);
+
+ m_registerFormat = DataFormatNone;
+ m_spillFormat = spillFormat;
+ m_canFill = true;
+ }
+
+ // Called on values that don't need spilling (constants and values that have
+ // already been spilled), to mark them as no longer being in machine registers.
+ void setSpilled()
+ {
+ // Should only be called on values that don't need spilling, and are currently in registers.
+ ASSERT(m_canFill && m_registerFormat != DataFormatNone);
+ m_registerFormat = DataFormatNone;
+ }
+
+ void killSpilled()
+ {
+ m_spillFormat = DataFormatNone;
+ m_canFill = false;
+ }
+
+ // Record that this value is filled into machine registers,
+ // tracking which registers, and what format the value has.
+#if USE(JSVALUE64)
+ void fillJSValue(GPRReg gpr, DataFormat format = DataFormatJS)
+ {
+ ASSERT(format & DataFormatJS);
+ m_registerFormat = format;
+ u.gpr = gpr;
+ }
+#elif USE(JSVALUE32_64)
+ void fillJSValue(GPRReg tagGPR, GPRReg payloadGPR, DataFormat format = DataFormatJS)
+ {
+ ASSERT(format & DataFormatJS);
+ m_registerFormat = format;
+ u.v.tagGPR = tagGPR; // FIXME: for JSValues with known type (boolean, integer, cell etc.) no tagGPR is needed?
+ u.v.payloadGPR = payloadGPR;
+ }
+ void fillCell(GPRReg gpr)
+ {
+ m_registerFormat = DataFormatCell;
+ u.gpr = gpr;
+ }
+#endif
+ void fillInteger(GPRReg gpr)
+ {
+ m_registerFormat = DataFormatInteger;
+ u.gpr = gpr;
+ }
+ void fillBoolean(GPRReg gpr)
+ {
+ m_registerFormat = DataFormatBoolean;
+ u.gpr = gpr;
+ }
+ void fillDouble(FPRReg fpr)
+ {
+ ASSERT(fpr != InvalidFPRReg);
+ m_registerFormat = DataFormatDouble;
+ u.fpr = fpr;
+ }
+ void fillStorage(GPRReg gpr)
+ {
+ m_registerFormat = DataFormatStorage;
+ u.gpr = gpr;
+ }
+
+ bool alive()
+ {
+ return m_useCount;
+ }
+
+private:
+ // The index of the node whose result is stored in this virtual register.
+ NodeIndex m_nodeIndex;
+ uint32_t m_useCount;
+ DataFormat m_registerFormat;
+ DataFormat m_spillFormat;
+ bool m_canFill;
+ union {
+ GPRReg gpr;
+ FPRReg fpr;
+#if USE(JSVALUE32_64)
+ struct {
+ GPRReg tagGPR;
+ GPRReg payloadGPR;
+ } v;
+#endif
+ } u;
+};
+
+} } // namespace JSC::DFG
+
+#endif
+#endif
diff --git a/Source/JavaScriptCore/dfg/DFGGraph.cpp b/Source/JavaScriptCore/dfg/DFGGraph.cpp
new file mode 100644
index 000000000..487b69206
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGGraph.cpp
@@ -0,0 +1,363 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "DFGGraph.h"
+
+#include "CodeBlock.h"
+
+#if ENABLE(DFG_JIT)
+
+namespace JSC { namespace DFG {
+
+#ifndef NDEBUG
+
+// Creates an array of stringized names.
+static const char* dfgOpNames[] = {
+#define STRINGIZE_DFG_OP_ENUM(opcode, flags) #opcode ,
+ FOR_EACH_DFG_OP(STRINGIZE_DFG_OP_ENUM)
+#undef STRINGIZE_DFG_OP_ENUM
+};
+
+const char *Graph::opName(NodeType op)
+{
+ return dfgOpNames[op & NodeIdMask];
+}
+
+const char* Graph::nameOfVariableAccessData(VariableAccessData* variableAccessData)
+{
+ // Variables are already numbered. For readability of IR dumps, this returns
+ // an alphabetic name for the variable access data, so that you don't have to
+ // reason about two numbers (variable number and live range number), but instead
+ // a number and a letter.
+
+ unsigned index = std::numeric_limits<unsigned>::max();
+ for (unsigned i = 0; i < m_variableAccessData.size(); ++i) {
+ if (&m_variableAccessData[i] == variableAccessData) {
+ index = i;
+ break;
+ }
+ }
+
+ ASSERT(index != std::numeric_limits<unsigned>::max());
+
+ if (!index)
+ return "A";
+
+ static char buf[10];
+ BoundsCheckedPointer<char> ptr(buf, sizeof(buf));
+
+ while (index) {
+ *ptr++ = 'A' + (index % 26);
+ index /= 26;
+ }
+
+ *ptr++ = 0;
+
+ return buf;
+}
+
+static void printWhiteSpace(unsigned amount)
+{
+ while (amount-- > 0)
+ printf(" ");
+}
+
+void Graph::dumpCodeOrigin(NodeIndex nodeIndex)
+{
+ if (!nodeIndex)
+ return;
+
+ Node& currentNode = at(nodeIndex);
+ Node& previousNode = at(nodeIndex - 1);
+ if (previousNode.codeOrigin.inlineCallFrame == currentNode.codeOrigin.inlineCallFrame)
+ return;
+
+ Vector<CodeOrigin> previousInlineStack = previousNode.codeOrigin.inlineStack();
+ Vector<CodeOrigin> currentInlineStack = currentNode.codeOrigin.inlineStack();
+ unsigned commonSize = std::min(previousInlineStack.size(), currentInlineStack.size());
+ unsigned indexOfDivergence = commonSize;
+ for (unsigned i = 0; i < commonSize; ++i) {
+ if (previousInlineStack[i].inlineCallFrame != currentInlineStack[i].inlineCallFrame) {
+ indexOfDivergence = i;
+ break;
+ }
+ }
+
+ // Print the pops.
+ for (unsigned i = previousInlineStack.size(); i-- > indexOfDivergence;) {
+ printWhiteSpace(i * 2);
+ printf("<-- %p\n", previousInlineStack[i].inlineCallFrame->executable.get());
+ }
+
+ // Print the pushes.
+ for (unsigned i = indexOfDivergence; i < currentInlineStack.size(); ++i) {
+ printWhiteSpace(i * 2);
+ printf("--> %p\n", currentInlineStack[i].inlineCallFrame->executable.get());
+ }
+}
+
+void Graph::dump(NodeIndex nodeIndex, CodeBlock* codeBlock)
+{
+ Node& node = at(nodeIndex);
+ NodeType op = node.op;
+
+ unsigned refCount = node.refCount();
+ bool skipped = !refCount;
+ bool mustGenerate = node.mustGenerate();
+ if (mustGenerate) {
+ ASSERT(refCount);
+ --refCount;
+ }
+
+ dumpCodeOrigin(nodeIndex);
+ printWhiteSpace((node.codeOrigin.inlineDepth() - 1) * 2);
+
+ // Example/explanation of dataflow dump output
+ //
+ // 14: <!2:7> GetByVal(@3, @13)
+ // ^1 ^2 ^3 ^4 ^5
+ //
+ // (1) The nodeIndex of this operation.
+ // (2) The reference count. The number printed is the 'real' count,
+ // not including the 'mustGenerate' ref. If the node is
+ // 'mustGenerate' then the count it prefixed with '!'.
+ // (3) The virtual register slot assigned to this node.
+ // (4) The name of the operation.
+ // (5) The arguments to the operation. The may be of the form:
+ // @# - a NodeIndex referencing a prior node in the graph.
+ // arg# - an argument number.
+ // $# - the index in the CodeBlock of a constant { for numeric constants the value is displayed | for integers, in both decimal and hex }.
+ // id# - the index in the CodeBlock of an identifier { if codeBlock is passed to dump(), the string representation is displayed }.
+ // var# - the index of a var on the global object, used by GetGlobalVar/PutGlobalVar operations.
+ printf("% 4d:%s<%c%u:", (int)nodeIndex, skipped ? " skipped " : " ", mustGenerate ? '!' : ' ', refCount);
+ if (node.hasResult() && !skipped && node.hasVirtualRegister())
+ printf("%u", node.virtualRegister());
+ else
+ printf("-");
+ printf(">\t%s(", opName(op));
+ bool hasPrinted = false;
+ if (op & NodeHasVarArgs) {
+ for (unsigned childIdx = node.firstChild(); childIdx < node.firstChild() + node.numChildren(); childIdx++) {
+ if (hasPrinted)
+ printf(", ");
+ else
+ hasPrinted = true;
+ printf("@%u", m_varArgChildren[childIdx]);
+ }
+ } else {
+ if (node.child1() != NoNode)
+ printf("@%u", node.child1());
+ if (node.child2() != NoNode)
+ printf(", @%u", node.child2());
+ if (node.child3() != NoNode)
+ printf(", @%u", node.child3());
+ hasPrinted = node.child1() != NoNode;
+ }
+
+ if (node.hasArithNodeFlags()) {
+ printf("%s%s", hasPrinted ? ", " : "", arithNodeFlagsAsString(node.rawArithNodeFlags()));
+ hasPrinted = true;
+ }
+ if (node.hasVarNumber()) {
+ printf("%svar%u", hasPrinted ? ", " : "", node.varNumber());
+ hasPrinted = true;
+ }
+ if (node.hasIdentifier()) {
+ if (codeBlock)
+ printf("%sid%u{%s}", hasPrinted ? ", " : "", node.identifierNumber(), codeBlock->identifier(node.identifierNumber()).ustring().utf8().data());
+ else
+ printf("%sid%u", hasPrinted ? ", " : "", node.identifierNumber());
+ hasPrinted = true;
+ }
+ if (node.hasStructureSet()) {
+ for (size_t i = 0; i < node.structureSet().size(); ++i) {
+ printf("%sstruct(%p)", hasPrinted ? ", " : "", node.structureSet()[i]);
+ hasPrinted = true;
+ }
+ }
+ if (node.hasStructureTransitionData()) {
+ printf("%sstruct(%p -> %p)", hasPrinted ? ", " : "", node.structureTransitionData().previousStructure, node.structureTransitionData().newStructure);
+ hasPrinted = true;
+ }
+ if (node.hasStorageAccessData()) {
+ StorageAccessData& storageAccessData = m_storageAccessData[node.storageAccessDataIndex()];
+ if (codeBlock)
+ printf("%sid%u{%s}", hasPrinted ? ", " : "", storageAccessData.identifierNumber, codeBlock->identifier(storageAccessData.identifierNumber).ustring().utf8().data());
+ else
+ printf("%sid%u", hasPrinted ? ", " : "", storageAccessData.identifierNumber);
+
+ printf(", %lu", static_cast<unsigned long>(storageAccessData.offset));
+ hasPrinted = true;
+ }
+ ASSERT(node.hasVariableAccessData() == node.hasLocal());
+ if (node.hasVariableAccessData()) {
+ VariableAccessData* variableAccessData = node.variableAccessData();
+ int operand = variableAccessData->operand();
+ if (operandIsArgument(operand))
+ printf("%sarg%u(%s)", hasPrinted ? ", " : "", operandToArgument(operand), nameOfVariableAccessData(variableAccessData));
+ else
+ printf("%sr%u(%s)", hasPrinted ? ", " : "", operand, nameOfVariableAccessData(variableAccessData));
+ hasPrinted = true;
+ }
+ if (node.hasConstantBuffer() && codeBlock) {
+ if (hasPrinted)
+ printf(", ");
+ printf("%u:[", node.startConstant());
+ for (unsigned i = 0; i < node.numConstants(); ++i) {
+ if (i)
+ printf(", ");
+ printf("%s", codeBlock->constantBuffer(node.startConstant())[i].description());
+ }
+ printf("]");
+ hasPrinted = true;
+ }
+ if (op == JSConstant) {
+ printf("%s$%u", hasPrinted ? ", " : "", node.constantNumber());
+ if (codeBlock) {
+ JSValue value = valueOfJSConstant(codeBlock, nodeIndex);
+ printf(" = %s", value.description());
+ }
+ hasPrinted = true;
+ }
+ if (op == WeakJSConstant) {
+ printf("%s%p", hasPrinted ? ", " : "", node.weakConstant());
+ hasPrinted = true;
+ }
+ if (node.isBranch() || node.isJump()) {
+ printf("%sT:#%u", hasPrinted ? ", " : "", node.takenBlockIndex());
+ hasPrinted = true;
+ }
+ if (node.isBranch()) {
+ printf("%sF:#%u", hasPrinted ? ", " : "", node.notTakenBlockIndex());
+ hasPrinted = true;
+ }
+ (void)hasPrinted;
+
+ printf(")");
+
+ if (!skipped) {
+ if (node.hasVariableAccessData())
+ printf(" predicting %s, double ratio %lf%s", predictionToString(node.variableAccessData()->prediction()), node.variableAccessData()->doubleVoteRatio(), node.variableAccessData()->shouldUseDoubleFormat() ? ", forcing double" : "");
+ else if (node.hasVarNumber())
+ printf(" predicting %s", predictionToString(getGlobalVarPrediction(node.varNumber())));
+ else if (node.hasHeapPrediction())
+ printf(" predicting %s", predictionToString(node.getHeapPrediction()));
+ }
+
+ printf("\n");
+}
+
+void Graph::dump(CodeBlock* codeBlock)
+{
+ for (size_t b = 0; b < m_blocks.size(); ++b) {
+ BasicBlock* block = m_blocks[b].get();
+ printf("Block #%u (bc#%u): %s%s\n", (int)b, block->bytecodeBegin, block->isReachable ? "" : " (skipped)", block->isOSRTarget ? " (OSR target)" : "");
+ printf(" vars before: ");
+ if (block->cfaHasVisited)
+ dumpOperands(block->valuesAtHead, stdout);
+ else
+ printf("<empty>");
+ printf("\n");
+ printf(" var links: ");
+ dumpOperands(block->variablesAtHead, stdout);
+ printf("\n");
+ for (size_t i = block->begin; i < block->end; ++i)
+ dump(i, codeBlock);
+ printf(" vars after: ");
+ if (block->cfaHasVisited)
+ dumpOperands(block->valuesAtTail, stdout);
+ else
+ printf("<empty>");
+ printf("\n");
+ }
+ printf("Phi Nodes:\n");
+ for (size_t i = m_blocks.last()->end; i < size(); ++i)
+ dump(i, codeBlock);
+}
+
+#endif
+
+// FIXME: Convert this to be iterative, not recursive.
+#define DO_TO_CHILDREN(node, thingToDo) do { \
+ Node& _node = (node); \
+ if (_node.op & NodeHasVarArgs) { \
+ for (unsigned _childIdx = _node.firstChild(); \
+ _childIdx < _node.firstChild() + _node.numChildren(); \
+ _childIdx++) \
+ thingToDo(m_varArgChildren[_childIdx]); \
+ } else { \
+ if (_node.child1() == NoNode) { \
+ ASSERT(_node.child2() == NoNode \
+ && _node.child3() == NoNode); \
+ break; \
+ } \
+ thingToDo(_node.child1()); \
+ \
+ if (_node.child2() == NoNode) { \
+ ASSERT(_node.child3() == NoNode); \
+ break; \
+ } \
+ thingToDo(_node.child2()); \
+ \
+ if (_node.child3() == NoNode) \
+ break; \
+ thingToDo(_node.child3()); \
+ } \
+ } while (false)
+
+void Graph::refChildren(NodeIndex op)
+{
+ DO_TO_CHILDREN(at(op), ref);
+}
+
+void Graph::derefChildren(NodeIndex op)
+{
+ DO_TO_CHILDREN(at(op), deref);
+}
+
+void Graph::predictArgumentTypes(CodeBlock* codeBlock)
+{
+ ASSERT(codeBlock);
+ ASSERT(codeBlock->alternative());
+
+ CodeBlock* profiledCodeBlock = codeBlock->alternative();
+ ASSERT(codeBlock->m_numParameters >= 1);
+ for (size_t arg = 0; arg < static_cast<size_t>(codeBlock->m_numParameters); ++arg) {
+ ValueProfile* profile = profiledCodeBlock->valueProfileForArgument(arg);
+ if (!profile)
+ continue;
+
+ at(m_arguments[arg]).variableAccessData()->predict(profile->computeUpdatedPrediction());
+
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ printf("Argument [%lu] prediction: %s\n", arg, predictionToString(at(m_arguments[arg]).variableAccessData()->prediction()));
+#endif
+ }
+}
+
+} } // namespace JSC::DFG
+
+#endif
diff --git a/Source/JavaScriptCore/dfg/DFGGraph.h b/Source/JavaScriptCore/dfg/DFGGraph.h
new file mode 100644
index 000000000..fb729063d
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGGraph.h
@@ -0,0 +1,297 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGGraph_h
+#define DFGGraph_h
+
+#if ENABLE(DFG_JIT)
+
+#include "CodeBlock.h"
+#include "DFGBasicBlock.h"
+#include "DFGNode.h"
+#include "PredictionTracker.h"
+#include "RegisterFile.h"
+#include <wtf/BitVector.h>
+#include <wtf/HashMap.h>
+#include <wtf/Vector.h>
+#include <wtf/StdLibExtras.h>
+
+namespace JSC {
+
+class CodeBlock;
+class ExecState;
+
+namespace DFG {
+
+struct StorageAccessData {
+ size_t offset;
+ unsigned identifierNumber;
+
+ // NOTE: the offset and identifierNumber do not by themselves
+ // uniquely identify a property. The identifierNumber and a
+ // Structure* do. If those two match, then the offset should
+ // be the same, as well. For any Node that has a StorageAccessData,
+ // it is possible to retrieve the Structure* by looking at the
+ // first child. It should be a CheckStructure, which has the
+ // Structure*.
+};
+
+struct ResolveGlobalData {
+ unsigned identifierNumber;
+ unsigned resolveInfoIndex;
+};
+
+//
+// === Graph ===
+//
+// The dataflow graph is an ordered vector of nodes.
+// The order may be significant for nodes with side-effects (property accesses, value conversions).
+// Nodes that are 'dead' remain in the vector with refCount 0.
+class Graph : public Vector<Node, 64> {
+public:
+ // Mark a node as being referenced.
+ void ref(NodeIndex nodeIndex)
+ {
+ Node& node = at(nodeIndex);
+ // If the value (before incrementing) was at refCount zero then we need to ref its children.
+ if (node.ref())
+ refChildren(nodeIndex);
+ }
+
+ void deref(NodeIndex nodeIndex)
+ {
+ if (at(nodeIndex).deref())
+ derefChildren(nodeIndex);
+ }
+
+ void clearAndDerefChild1(Node& node)
+ {
+ if (node.children.fixed.child1 == NoNode)
+ return;
+ deref(node.children.fixed.child1);
+ node.children.fixed.child1 = NoNode;
+ }
+
+ void clearAndDerefChild2(Node& node)
+ {
+ if (node.children.fixed.child2 == NoNode)
+ return;
+ deref(node.children.fixed.child2);
+ node.children.fixed.child2 = NoNode;
+ }
+
+ void clearAndDerefChild3(Node& node)
+ {
+ if (node.children.fixed.child3 == NoNode)
+ return;
+ deref(node.children.fixed.child3);
+ node.children.fixed.child3 = NoNode;
+ }
+
+#ifndef NDEBUG
+ // CodeBlock is optional, but may allow additional information to be dumped (e.g. Identifier names).
+ void dump(CodeBlock* = 0);
+ void dump(NodeIndex, CodeBlock* = 0);
+
+ // Dump the code origin of the given node as a diff from the code origin of the
+ // preceding node.
+ void dumpCodeOrigin(NodeIndex);
+#endif
+
+ BlockIndex blockIndexForBytecodeOffset(Vector<BlockIndex>& blocks, unsigned bytecodeBegin);
+
+ bool predictGlobalVar(unsigned varNumber, PredictedType prediction)
+ {
+ return m_predictions.predictGlobalVar(varNumber, prediction);
+ }
+
+ PredictedType getGlobalVarPrediction(unsigned varNumber)
+ {
+ return m_predictions.getGlobalVarPrediction(varNumber);
+ }
+
+ PredictedType getJSConstantPrediction(Node& node, CodeBlock* codeBlock)
+ {
+ return predictionFromValue(node.valueOfJSConstant(codeBlock));
+ }
+
+ // Helper methods to check nodes for constants.
+ bool isConstant(NodeIndex nodeIndex)
+ {
+ return at(nodeIndex).hasConstant();
+ }
+ bool isJSConstant(NodeIndex nodeIndex)
+ {
+ return at(nodeIndex).hasConstant();
+ }
+ bool isInt32Constant(CodeBlock* codeBlock, NodeIndex nodeIndex)
+ {
+ return at(nodeIndex).isInt32Constant(codeBlock);
+ }
+ bool isDoubleConstant(CodeBlock* codeBlock, NodeIndex nodeIndex)
+ {
+ return at(nodeIndex).isDoubleConstant(codeBlock);
+ }
+ bool isNumberConstant(CodeBlock* codeBlock, NodeIndex nodeIndex)
+ {
+ return at(nodeIndex).isNumberConstant(codeBlock);
+ }
+ bool isBooleanConstant(CodeBlock* codeBlock, NodeIndex nodeIndex)
+ {
+ return at(nodeIndex).isBooleanConstant(codeBlock);
+ }
+ bool isFunctionConstant(CodeBlock* codeBlock, NodeIndex nodeIndex)
+ {
+ if (!isJSConstant(nodeIndex))
+ return false;
+ if (!getJSFunction(valueOfJSConstant(codeBlock, nodeIndex)))
+ return false;
+ return true;
+ }
+ // Helper methods get constant values from nodes.
+ JSValue valueOfJSConstant(CodeBlock* codeBlock, NodeIndex nodeIndex)
+ {
+ return at(nodeIndex).valueOfJSConstant(codeBlock);
+ }
+ int32_t valueOfInt32Constant(CodeBlock* codeBlock, NodeIndex nodeIndex)
+ {
+ return valueOfJSConstant(codeBlock, nodeIndex).asInt32();
+ }
+ double valueOfNumberConstant(CodeBlock* codeBlock, NodeIndex nodeIndex)
+ {
+ return valueOfJSConstant(codeBlock, nodeIndex).asNumber();
+ }
+ bool valueOfBooleanConstant(CodeBlock* codeBlock, NodeIndex nodeIndex)
+ {
+ return valueOfJSConstant(codeBlock, nodeIndex).asBoolean();
+ }
+ JSFunction* valueOfFunctionConstant(CodeBlock* codeBlock, NodeIndex nodeIndex)
+ {
+ JSCell* function = getJSFunction(valueOfJSConstant(codeBlock, nodeIndex));
+ ASSERT(function);
+ return asFunction(function);
+ }
+
+#ifndef NDEBUG
+ static const char *opName(NodeType);
+
+ // This is O(n), and should only be used for verbose dumps.
+ const char* nameOfVariableAccessData(VariableAccessData*);
+#endif
+
+ void predictArgumentTypes(CodeBlock*);
+
+ StructureSet* addStructureSet(const StructureSet& structureSet)
+ {
+ ASSERT(structureSet.size());
+ m_structureSet.append(structureSet);
+ return &m_structureSet.last();
+ }
+
+ StructureTransitionData* addStructureTransitionData(const StructureTransitionData& structureTransitionData)
+ {
+ m_structureTransitionData.append(structureTransitionData);
+ return &m_structureTransitionData.last();
+ }
+
+ ValueProfile* valueProfileFor(NodeIndex nodeIndex, CodeBlock* profiledBlock)
+ {
+ if (nodeIndex == NoNode)
+ return 0;
+
+ Node& node = at(nodeIndex);
+
+ switch (node.op) {
+ case GetLocal: {
+ if (!operandIsArgument(node.local()))
+ return 0;
+ int argument = operandToArgument(node.local());
+ if (node.variableAccessData() != at(m_arguments[argument]).variableAccessData())
+ return 0;
+ return profiledBlock->valueProfileForArgument(argument);
+ }
+
+ // Nodes derives from calls need special handling because the value profile is
+ // associated with the op_call_put_result instruction.
+ case Call:
+ case Construct:
+ case ArrayPop:
+ case ArrayPush: {
+ ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct));
+ return profiledBlock->valueProfileForBytecodeOffset(node.codeOrigin.bytecodeIndex + OPCODE_LENGTH(op_call));
+ }
+
+ default:
+ if (node.hasHeapPrediction())
+ return profiledBlock->valueProfileForBytecodeOffset(node.codeOrigin.bytecodeIndex);
+ return 0;
+ }
+ }
+
+ Vector< OwnPtr<BasicBlock> , 8> m_blocks;
+ Vector<NodeIndex, 16> m_varArgChildren;
+ Vector<StorageAccessData> m_storageAccessData;
+ Vector<ResolveGlobalData> m_resolveGlobalData;
+ Vector<NodeIndex, 8> m_arguments;
+ SegmentedVector<VariableAccessData, 16> m_variableAccessData;
+ SegmentedVector<StructureSet, 16> m_structureSet;
+ SegmentedVector<StructureTransitionData, 8> m_structureTransitionData;
+ BitVector m_preservedVars;
+ unsigned m_localVars;
+ unsigned m_parameterSlots;
+private:
+
+ // When a node's refCount goes from 0 to 1, it must (logically) recursively ref all of its children, and vice versa.
+ void refChildren(NodeIndex);
+ void derefChildren(NodeIndex);
+
+ PredictionTracker m_predictions;
+};
+
+class GetBytecodeBeginForBlock {
+public:
+ GetBytecodeBeginForBlock(Graph& graph)
+ : m_graph(graph)
+ {
+ }
+
+ unsigned operator()(BlockIndex* blockIndex) const
+ {
+ return m_graph.m_blocks[*blockIndex]->bytecodeBegin;
+ }
+
+private:
+ Graph& m_graph;
+};
+
+inline BlockIndex Graph::blockIndexForBytecodeOffset(Vector<BlockIndex>& linkingTargets, unsigned bytecodeBegin)
+{
+ return *WTF::binarySearchWithFunctor<BlockIndex, unsigned>(linkingTargets.begin(), linkingTargets.size(), bytecodeBegin, WTF::KeyMustBePresentInArray, GetBytecodeBeginForBlock(*this));
+}
+
+} } // namespace JSC::DFG
+
+#endif
+#endif
diff --git a/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp b/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp
new file mode 100644
index 000000000..c50b84f7f
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp
@@ -0,0 +1,286 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "DFGJITCompiler.h"
+
+#if ENABLE(DFG_JIT)
+
+#include "CodeBlock.h"
+#include "DFGOSRExitCompiler.h"
+#include "DFGOperations.h"
+#include "DFGRegisterBank.h"
+#include "DFGSpeculativeJIT.h"
+#include "DFGThunks.h"
+#include "JSGlobalData.h"
+#include "LinkBuffer.h"
+
+namespace JSC { namespace DFG {
+
+void JITCompiler::linkOSRExits()
+{
+ for (unsigned i = 0; i < codeBlock()->numberOfOSRExits(); ++i) {
+ OSRExit& exit = codeBlock()->osrExit(i);
+ exit.m_check.initialJump().link(this);
+ store32(Imm32(i), &globalData()->osrExitIndex);
+ beginUninterruptedSequence();
+ exit.m_check.switchToLateJump(jump());
+ endUninterruptedSequence();
+ }
+}
+
+void JITCompiler::compileEntry()
+{
+ // This code currently matches the old JIT. In the function header we need to
+ // pop the return address (since we do not allow any recursion on the machine
+ // stack), and perform a fast register file check.
+ // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56292
+ // We'll need to convert the remaining cti_ style calls (specifically the register file
+ // check) which will be dependent on stack layout. (We'd need to account for this in
+ // both normal return code and when jumping to an exception handler).
+ preserveReturnAddressAfterCall(GPRInfo::regT2);
+ emitPutToCallFrameHeader(GPRInfo::regT2, RegisterFile::ReturnPC);
+ emitPutImmediateToCallFrameHeader(m_codeBlock, RegisterFile::CodeBlock);
+}
+
+void JITCompiler::compileBody(SpeculativeJIT& speculative)
+{
+ // We generate the speculative code path, followed by OSR exit code to return
+ // to the old JIT code if speculations fail.
+
+#if DFG_ENABLE(JIT_BREAK_ON_EVERY_FUNCTION)
+ // Handy debug tool!
+ breakpoint();
+#endif
+
+ addPtr(Imm32(1), AbsoluteAddress(codeBlock()->addressOfSpeculativeSuccessCounter()));
+
+ bool compiledSpeculative = speculative.compile();
+ ASSERT_UNUSED(compiledSpeculative, compiledSpeculative);
+
+ linkOSRExits();
+
+ // Iterate over the m_calls vector, checking for jumps to link.
+ bool didLinkExceptionCheck = false;
+ for (unsigned i = 0; i < m_exceptionChecks.size(); ++i) {
+ Jump& exceptionCheck = m_exceptionChecks[i].m_exceptionCheck;
+ if (exceptionCheck.isSet()) {
+ exceptionCheck.link(this);
+ didLinkExceptionCheck = true;
+ }
+ }
+
+ // If any exception checks were linked, generate code to lookup a handler.
+ if (didLinkExceptionCheck) {
+ // lookupExceptionHandler is passed two arguments, exec (the CallFrame*), and
+ // the index into the CodeBlock's callReturnIndexVector corresponding to the
+ // call that threw the exception (this was set in nonPreservedNonReturnGPR, when
+ // the exception check was planted).
+ move(GPRInfo::nonPreservedNonReturnGPR, GPRInfo::argumentGPR1);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+#if CPU(X86)
+ // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
+ poke(GPRInfo::argumentGPR0);
+ poke(GPRInfo::argumentGPR1, 1);
+#endif
+ m_calls.append(CallLinkRecord(call(), lookupExceptionHandler));
+ // lookupExceptionHandler leaves the handler CallFrame* in the returnValueGPR,
+ // and the address of the handler in returnValueGPR2.
+ jump(GPRInfo::returnValueGPR2);
+ }
+}
+
+void JITCompiler::link(LinkBuffer& linkBuffer)
+{
+ // Link the code, populate data in CodeBlock data structures.
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ fprintf(stderr, "JIT code for %p start at [%p, %p). Size = %lu.\n", m_codeBlock, linkBuffer.debugAddress(), static_cast<char*>(linkBuffer.debugAddress()) + linkBuffer.debugSize(), linkBuffer.debugSize());
+#endif
+
+ // Link all calls out from the JIT code to their respective functions.
+ for (unsigned i = 0; i < m_calls.size(); ++i)
+ linkBuffer.link(m_calls[i].m_call, m_calls[i].m_function);
+
+ if (m_codeBlock->needsCallReturnIndices()) {
+ m_codeBlock->callReturnIndexVector().reserveCapacity(m_exceptionChecks.size());
+ for (unsigned i = 0; i < m_exceptionChecks.size(); ++i) {
+ unsigned returnAddressOffset = linkBuffer.returnAddressOffset(m_exceptionChecks[i].m_call);
+ CodeOrigin codeOrigin = m_exceptionChecks[i].m_codeOrigin;
+ while (codeOrigin.inlineCallFrame)
+ codeOrigin = codeOrigin.inlineCallFrame->caller;
+ unsigned exceptionInfo = codeOrigin.bytecodeIndex;
+ m_codeBlock->callReturnIndexVector().append(CallReturnOffsetToBytecodeOffset(returnAddressOffset, exceptionInfo));
+ }
+ }
+
+ unsigned numCallsFromInlineCode = 0;
+ for (unsigned i = 0; i < m_exceptionChecks.size(); ++i) {
+ if (m_exceptionChecks[i].m_codeOrigin.inlineCallFrame)
+ numCallsFromInlineCode++;
+ }
+
+ if (numCallsFromInlineCode) {
+ Vector<CodeOriginAtCallReturnOffset>& codeOrigins = m_codeBlock->codeOrigins();
+ codeOrigins.resize(numCallsFromInlineCode);
+
+ for (unsigned i = 0, j = 0; i < m_exceptionChecks.size(); ++i) {
+ CallExceptionRecord& record = m_exceptionChecks[i];
+ if (record.m_codeOrigin.inlineCallFrame) {
+ unsigned returnAddressOffset = linkBuffer.returnAddressOffset(m_exceptionChecks[i].m_call);
+ codeOrigins[j].codeOrigin = record.m_codeOrigin;
+ codeOrigins[j].callReturnOffset = returnAddressOffset;
+ j++;
+ }
+ }
+ }
+
+ m_codeBlock->setNumberOfStructureStubInfos(m_propertyAccesses.size());
+ for (unsigned i = 0; i < m_propertyAccesses.size(); ++i) {
+ StructureStubInfo& info = m_codeBlock->structureStubInfo(i);
+ CodeLocationCall callReturnLocation = linkBuffer.locationOf(m_propertyAccesses[i].m_functionCall);
+ info.callReturnLocation = callReturnLocation;
+ info.deltaCheckImmToCall = differenceBetweenCodePtr(linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCheckImmToCall), callReturnLocation);
+ info.deltaCallToStructCheck = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCallToStructCheck));
+#if USE(JSVALUE64)
+ info.deltaCallToLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCallToLoadOrStore));
+#else
+ info.deltaCallToTagLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCallToTagLoadOrStore));
+ info.deltaCallToPayloadLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCallToPayloadLoadOrStore));
+#endif
+ info.deltaCallToSlowCase = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCallToSlowCase));
+ info.deltaCallToDone = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCallToDone));
+ info.baseGPR = m_propertyAccesses[i].m_baseGPR;
+#if USE(JSVALUE64)
+ info.valueGPR = m_propertyAccesses[i].m_valueGPR;
+#else
+ info.valueTagGPR = m_propertyAccesses[i].m_valueTagGPR;
+ info.valueGPR = m_propertyAccesses[i].m_valueGPR;
+#endif
+ info.scratchGPR = m_propertyAccesses[i].m_scratchGPR;
+ }
+
+ m_codeBlock->setNumberOfCallLinkInfos(m_jsCalls.size());
+ for (unsigned i = 0; i < m_jsCalls.size(); ++i) {
+ CallLinkInfo& info = m_codeBlock->callLinkInfo(i);
+ info.callType = m_jsCalls[i].m_callType;
+ info.isDFG = true;
+ info.callReturnLocation = CodeLocationLabel(linkBuffer.locationOf(m_jsCalls[i].m_slowCall));
+ info.hotPathBegin = linkBuffer.locationOf(m_jsCalls[i].m_targetToCheck);
+ info.hotPathOther = linkBuffer.locationOfNearCall(m_jsCalls[i].m_fastCall);
+ }
+
+ MacroAssemblerCodeRef osrExitThunk = globalData()->getCTIStub(osrExitGenerationThunkGenerator);
+ CodeLocationLabel target = CodeLocationLabel(osrExitThunk.code());
+ for (unsigned i = 0; i < codeBlock()->numberOfOSRExits(); ++i) {
+ OSRExit& exit = codeBlock()->osrExit(i);
+ linkBuffer.link(exit.m_check.lateJump(), target);
+ exit.m_check.correctLateJump(linkBuffer);
+ }
+
+ codeBlock()->shrinkWeakReferencesToFit();
+ codeBlock()->shrinkWeakReferenceTransitionsToFit();
+}
+
+void JITCompiler::compile(JITCode& entry)
+{
+ compileEntry();
+ SpeculativeJIT speculative(*this);
+ compileBody(speculative);
+
+ LinkBuffer linkBuffer(*m_globalData, this);
+ link(linkBuffer);
+ speculative.linkOSREntries(linkBuffer);
+
+ entry = JITCode(linkBuffer.finalizeCode(), JITCode::DFGJIT);
+}
+
+void JITCompiler::compileFunction(JITCode& entry, MacroAssemblerCodePtr& entryWithArityCheck)
+{
+ compileEntry();
+
+ // === Function header code generation ===
+ // This is the main entry point, without performing an arity check.
+ // If we needed to perform an arity check we will already have moved the return address,
+ // so enter after this.
+ Label fromArityCheck(this);
+ // Plant a check that sufficient space is available in the RegisterFile.
+ // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56291
+ addPtr(Imm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1);
+ Jump registerFileCheck = branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->registerFile().addressOfEnd()), GPRInfo::regT1);
+ // Return here after register file check.
+ Label fromRegisterFileCheck = label();
+
+
+ // === Function body code generation ===
+ SpeculativeJIT speculative(*this);
+ compileBody(speculative);
+
+ // === Function footer code generation ===
+ //
+ // Generate code to perform the slow register file check (if the fast one in
+ // the function header fails), and generate the entry point with arity check.
+ //
+ // Generate the register file check; if the fast check in the function head fails,
+ // we need to call out to a helper function to check whether more space is available.
+ // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions).
+ registerFileCheck.link(this);
+ move(stackPointerRegister, GPRInfo::argumentGPR0);
+ poke(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
+ Call callRegisterFileCheck = call();
+ jump(fromRegisterFileCheck);
+
+ // The fast entry point into a function does not check the correct number of arguments
+ // have been passed to the call (we only use the fast entry point where we can statically
+ // determine the correct number of arguments have been passed, or have already checked).
+ // In cases where an arity check is necessary, we enter here.
+ // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions).
+ Label arityCheck = label();
+ compileEntry();
+
+ load32(AssemblyHelpers::payloadFor((VirtualRegister)RegisterFile::ArgumentCount), GPRInfo::regT1);
+ branch32(AboveOrEqual, GPRInfo::regT1, Imm32(m_codeBlock->m_numParameters)).linkTo(fromArityCheck, this);
+ move(stackPointerRegister, GPRInfo::argumentGPR0);
+ poke(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
+ Call callArityCheck = call();
+ move(GPRInfo::regT0, GPRInfo::callFrameRegister);
+ jump(fromArityCheck);
+
+
+ // === Link ===
+ LinkBuffer linkBuffer(*m_globalData, this);
+ link(linkBuffer);
+ speculative.linkOSREntries(linkBuffer);
+
+ // FIXME: switch the register file check & arity check over to DFGOpertaion style calls, not JIT stubs.
+ linkBuffer.link(callRegisterFileCheck, cti_register_file_check);
+ linkBuffer.link(callArityCheck, m_codeBlock->m_isConstructor ? cti_op_construct_arityCheck : cti_op_call_arityCheck);
+
+ entryWithArityCheck = linkBuffer.locationOf(arityCheck);
+ entry = JITCode(linkBuffer.finalizeCode(), JITCode::DFGJIT);
+}
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
diff --git a/Source/JavaScriptCore/dfg/DFGJITCompiler.h b/Source/JavaScriptCore/dfg/DFGJITCompiler.h
new file mode 100644
index 000000000..de0475a56
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGJITCompiler.h
@@ -0,0 +1,334 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGJITCompiler_h
+#define DFGJITCompiler_h
+
+#if ENABLE(DFG_JIT)
+
+#include <assembler/LinkBuffer.h>
+#include <assembler/MacroAssembler.h>
+#include <bytecode/CodeBlock.h>
+#include <dfg/DFGAssemblyHelpers.h>
+#include <dfg/DFGFPRInfo.h>
+#include <dfg/DFGGPRInfo.h>
+#include <dfg/DFGGraph.h>
+#include <dfg/DFGRegisterBank.h>
+#include <jit/JITCode.h>
+
+namespace JSC {
+
+class AbstractSamplingCounter;
+class CodeBlock;
+class JSGlobalData;
+
+namespace DFG {
+
+class JITCodeGenerator;
+class NodeToRegisterMap;
+class SpeculativeJIT;
+class SpeculationRecovery;
+
+struct EntryLocation;
+struct OSRExit;
+
+// === CallLinkRecord ===
+//
+// A record of a call out from JIT code that needs linking to a helper function.
+// Every CallLinkRecord contains a reference to the call instruction & the function
+// that it needs to be linked to.
+struct CallLinkRecord {
+ CallLinkRecord(MacroAssembler::Call call, FunctionPtr function)
+ : m_call(call)
+ , m_function(function)
+ {
+ }
+
+ MacroAssembler::Call m_call;
+ FunctionPtr m_function;
+};
+
+// === CallExceptionRecord ===
+//
+// A record of a call out from JIT code that might throw an exception.
+// Calls that might throw an exception also record the Jump taken on exception
+// (unset if not present) and code origin used to recover handler/source info.
+struct CallExceptionRecord {
+ CallExceptionRecord(MacroAssembler::Call call, CodeOrigin codeOrigin)
+ : m_call(call)
+ , m_codeOrigin(codeOrigin)
+ {
+ }
+
+ CallExceptionRecord(MacroAssembler::Call call, MacroAssembler::Jump exceptionCheck, CodeOrigin codeOrigin)
+ : m_call(call)
+ , m_exceptionCheck(exceptionCheck)
+ , m_codeOrigin(codeOrigin)
+ {
+ }
+
+ MacroAssembler::Call m_call;
+ MacroAssembler::Jump m_exceptionCheck;
+ CodeOrigin m_codeOrigin;
+};
+
+struct PropertyAccessRecord {
+#if USE(JSVALUE64)
+ PropertyAccessRecord(MacroAssembler::DataLabelPtr deltaCheckImmToCall, MacroAssembler::Call functionCall, MacroAssembler::Jump deltaCallToStructCheck, MacroAssembler::DataLabelCompact deltaCallToLoadOrStore, MacroAssembler::Label deltaCallToSlowCase, MacroAssembler::Label deltaCallToDone, int8_t baseGPR, int8_t valueGPR, int8_t scratchGPR)
+#elif USE(JSVALUE32_64)
+ PropertyAccessRecord(MacroAssembler::DataLabelPtr deltaCheckImmToCall, MacroAssembler::Call functionCall, MacroAssembler::Jump deltaCallToStructCheck, MacroAssembler::DataLabelCompact deltaCallToTagLoadOrStore, MacroAssembler::DataLabelCompact deltaCallToPayloadLoadOrStore, MacroAssembler::Label deltaCallToSlowCase, MacroAssembler::Label deltaCallToDone, int8_t baseGPR, int8_t valueTagGPR, int8_t valueGPR, int8_t scratchGPR)
+#endif
+ : m_deltaCheckImmToCall(deltaCheckImmToCall)
+ , m_functionCall(functionCall)
+ , m_deltaCallToStructCheck(deltaCallToStructCheck)
+#if USE(JSVALUE64)
+ , m_deltaCallToLoadOrStore(deltaCallToLoadOrStore)
+#elif USE(JSVALUE32_64)
+ , m_deltaCallToTagLoadOrStore(deltaCallToTagLoadOrStore)
+ , m_deltaCallToPayloadLoadOrStore(deltaCallToPayloadLoadOrStore)
+#endif
+ , m_deltaCallToSlowCase(deltaCallToSlowCase)
+ , m_deltaCallToDone(deltaCallToDone)
+ , m_baseGPR(baseGPR)
+#if USE(JSVALUE32_64)
+ , m_valueTagGPR(valueTagGPR)
+#endif
+ , m_valueGPR(valueGPR)
+ , m_scratchGPR(scratchGPR)
+ {
+ }
+
+ MacroAssembler::DataLabelPtr m_deltaCheckImmToCall;
+ MacroAssembler::Call m_functionCall;
+ MacroAssembler::Jump m_deltaCallToStructCheck;
+#if USE(JSVALUE64)
+ MacroAssembler::DataLabelCompact m_deltaCallToLoadOrStore;
+#elif USE(JSVALUE32_64)
+ MacroAssembler::DataLabelCompact m_deltaCallToTagLoadOrStore;
+ MacroAssembler::DataLabelCompact m_deltaCallToPayloadLoadOrStore;
+#endif
+ MacroAssembler::Label m_deltaCallToSlowCase;
+ MacroAssembler::Label m_deltaCallToDone;
+ int8_t m_baseGPR;
+#if USE(JSVALUE32_64)
+ int8_t m_valueTagGPR;
+#endif
+ int8_t m_valueGPR;
+ int8_t m_scratchGPR;
+};
+
+// === JITCompiler ===
+//
+// DFG::JITCompiler is responsible for generating JIT code from the dataflow graph.
+// It does so by delegating to the speculative & non-speculative JITs, which
+// generate to a MacroAssembler (which the JITCompiler owns through an inheritance
+// relationship). The JITCompiler holds references to information required during
+// compilation, and also records information used in linking (e.g. a list of all
+// call to be linked).
+class JITCompiler : public AssemblyHelpers {
+public:
+ JITCompiler(JSGlobalData* globalData, Graph& dfg, CodeBlock* codeBlock)
+ : AssemblyHelpers(globalData, codeBlock)
+ , m_graph(dfg)
+ {
+ }
+
+ void compile(JITCode& entry);
+ void compileFunction(JITCode& entry, MacroAssemblerCodePtr& entryWithArityCheck);
+
+ // Accessors for properties.
+ Graph& graph() { return m_graph; }
+
+ // Notify the JIT of a call that does not require linking.
+ void notifyCall(Call functionCall, CodeOrigin codeOrigin)
+ {
+ m_exceptionChecks.append(CallExceptionRecord(functionCall, codeOrigin));
+ }
+
+ // Add a call out from JIT code, without an exception check.
+ Call appendCall(const FunctionPtr& function)
+ {
+ Call functionCall = call();
+ m_calls.append(CallLinkRecord(functionCall, function));
+ return functionCall;
+ }
+
+ // Add a call out from JIT code, with an exception check.
+ Call addExceptionCheck(Call functionCall, CodeOrigin codeOrigin)
+ {
+ move(TrustedImm32(m_exceptionChecks.size()), GPRInfo::nonPreservedNonReturnGPR);
+#if USE(JSVALUE64)
+ Jump exceptionCheck = branchTestPtr(NonZero, AbsoluteAddress(&globalData()->exception));
+#elif USE(JSVALUE32_64)
+ Jump exceptionCheck = branch32(NotEqual, AbsoluteAddress(reinterpret_cast<char*>(&globalData()->exception) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag));
+#endif
+ m_exceptionChecks.append(CallExceptionRecord(functionCall, exceptionCheck, codeOrigin));
+ return functionCall;
+ }
+
+ // Add a call out from JIT code, with a fast exception check that tests if the return value is zero.
+ Call addFastExceptionCheck(Call functionCall, CodeOrigin codeOrigin)
+ {
+ move(TrustedImm32(m_exceptionChecks.size()), GPRInfo::nonPreservedNonReturnGPR);
+ Jump exceptionCheck = branchTestPtr(Zero, GPRInfo::returnValueGPR);
+ m_exceptionChecks.append(CallExceptionRecord(functionCall, exceptionCheck, codeOrigin));
+ return functionCall;
+ }
+
+ // Helper methods to check nodes for constants.
+ bool isConstant(NodeIndex nodeIndex) { return graph().isConstant(nodeIndex); }
+ bool isJSConstant(NodeIndex nodeIndex) { return graph().isJSConstant(nodeIndex); }
+ bool isInt32Constant(NodeIndex nodeIndex) { return graph().isInt32Constant(codeBlock(), nodeIndex); }
+ bool isDoubleConstant(NodeIndex nodeIndex) { return graph().isDoubleConstant(codeBlock(), nodeIndex); }
+ bool isNumberConstant(NodeIndex nodeIndex) { return graph().isNumberConstant(codeBlock(), nodeIndex); }
+ bool isBooleanConstant(NodeIndex nodeIndex) { return graph().isBooleanConstant(codeBlock(), nodeIndex); }
+ bool isFunctionConstant(NodeIndex nodeIndex) { return graph().isFunctionConstant(codeBlock(), nodeIndex); }
+ // Helper methods get constant values from nodes.
+ JSValue valueOfJSConstant(NodeIndex nodeIndex) { return graph().valueOfJSConstant(codeBlock(), nodeIndex); }
+ int32_t valueOfInt32Constant(NodeIndex nodeIndex) { return graph().valueOfInt32Constant(codeBlock(), nodeIndex); }
+ double valueOfNumberConstant(NodeIndex nodeIndex) { return graph().valueOfNumberConstant(codeBlock(), nodeIndex); }
+ bool valueOfBooleanConstant(NodeIndex nodeIndex) { return graph().valueOfBooleanConstant(codeBlock(), nodeIndex); }
+ JSFunction* valueOfFunctionConstant(NodeIndex nodeIndex) { return graph().valueOfFunctionConstant(codeBlock(), nodeIndex); }
+
+ // Helper methods to get predictions
+ PredictedType getPrediction(Node& node) { return node.prediction(); }
+ PredictedType getPrediction(NodeIndex nodeIndex) { return getPrediction(graph()[nodeIndex]); }
+
+#if USE(JSVALUE32_64)
+ void* addressOfDoubleConstant(NodeIndex nodeIndex)
+ {
+ ASSERT(isNumberConstant(nodeIndex));
+ unsigned constantIndex = graph()[nodeIndex].constantNumber();
+ return &(codeBlock()->constantRegister(FirstConstantRegisterIndex + constantIndex));
+ }
+#endif
+
+ void addPropertyAccess(const PropertyAccessRecord& record)
+ {
+ m_propertyAccesses.append(record);
+ }
+
+ void addJSCall(Call fastCall, Call slowCall, DataLabelPtr targetToCheck, CallLinkInfo::CallType callType, CodeOrigin codeOrigin)
+ {
+ m_jsCalls.append(JSCallRecord(fastCall, slowCall, targetToCheck, callType, codeOrigin));
+ }
+
+ void addWeakReference(JSCell* target)
+ {
+ m_codeBlock->appendWeakReference(target);
+ }
+
+ void addWeakReferenceTransition(JSCell* codeOrigin, JSCell* from, JSCell* to)
+ {
+ m_codeBlock->appendWeakReferenceTransition(codeOrigin, from, to);
+ }
+
+ template<typename T>
+ Jump branchWeakPtr(RelationalCondition cond, T left, JSCell* weakPtr)
+ {
+ Jump result = branchPtr(cond, left, TrustedImmPtr(weakPtr));
+ addWeakReference(weakPtr);
+ return result;
+ }
+
+ void noticeOSREntry(BasicBlock& basicBlock, JITCompiler::Label blockHead, LinkBuffer& linkBuffer)
+ {
+#if DFG_ENABLE(OSR_ENTRY)
+ OSREntryData* entry = codeBlock()->appendDFGOSREntryData(basicBlock.bytecodeBegin, linkBuffer.offsetOf(blockHead));
+
+ entry->m_expectedValues = basicBlock.valuesAtHead;
+
+ // Fix the expected values: in our protocol, a dead variable will have an expected
+ // value of (None, []). But the old JIT may stash some values there. So we really
+ // need (Top, TOP).
+ for (size_t argument = 0; argument < basicBlock.variablesAtHead.numberOfArguments(); ++argument) {
+ if (basicBlock.variablesAtHead.argument(argument) == NoNode)
+ entry->m_expectedValues.argument(argument).makeTop();
+ }
+ for (size_t local = 0; local < basicBlock.variablesAtHead.numberOfLocals(); ++local) {
+ NodeIndex nodeIndex = basicBlock.variablesAtHead.local(local);
+ if (nodeIndex == NoNode)
+ entry->m_expectedValues.local(local).makeTop();
+ else if (m_graph[nodeIndex].variableAccessData()->shouldUseDoubleFormat())
+ entry->m_localsForcedDouble.set(local);
+ }
+#else
+ UNUSED_PARAM(basicBlock);
+ UNUSED_PARAM(blockHead);
+ UNUSED_PARAM(linkBuffer);
+#endif
+ }
+
+ ValueProfile* valueProfileFor(NodeIndex nodeIndex)
+ {
+ if (nodeIndex == NoNode)
+ return 0;
+
+ return m_graph.valueProfileFor(nodeIndex, baselineCodeBlockFor(m_graph[nodeIndex].codeOrigin));
+ }
+
+private:
+ // Internal implementation to compile.
+ void compileEntry();
+ void compileBody(SpeculativeJIT&);
+ void link(LinkBuffer&);
+
+ void exitSpeculativeWithOSR(const OSRExit&, SpeculationRecovery*);
+ void linkOSRExits();
+
+ // The dataflow graph currently being generated.
+ Graph& m_graph;
+
+ // Vector of calls out from JIT code, including exception handler information.
+ // Count of the number of CallRecords with exception handlers.
+ Vector<CallLinkRecord> m_calls;
+ Vector<CallExceptionRecord> m_exceptionChecks;
+
+ struct JSCallRecord {
+ JSCallRecord(Call fastCall, Call slowCall, DataLabelPtr targetToCheck, CallLinkInfo::CallType callType, CodeOrigin codeOrigin)
+ : m_fastCall(fastCall)
+ , m_slowCall(slowCall)
+ , m_targetToCheck(targetToCheck)
+ , m_callType(callType)
+ , m_codeOrigin(codeOrigin)
+ {
+ }
+
+ Call m_fastCall;
+ Call m_slowCall;
+ DataLabelPtr m_targetToCheck;
+ CallLinkInfo::CallType m_callType;
+ CodeOrigin m_codeOrigin;
+ };
+
+ Vector<PropertyAccessRecord, 4> m_propertyAccesses;
+ Vector<JSCallRecord, 4> m_jsCalls;
+};
+
+} } // namespace JSC::DFG
+
+#endif
+#endif
+
diff --git a/Source/JavaScriptCore/dfg/DFGNode.h b/Source/JavaScriptCore/dfg/DFGNode.h
new file mode 100644
index 000000000..cb5be691c
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGNode.h
@@ -0,0 +1,1076 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGNode_h
+#define DFGNode_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(DFG_JIT)
+
+#include "CodeBlock.h"
+#include "CodeOrigin.h"
+#include "DFGCommon.h"
+#include "DFGOperands.h"
+#include "DFGVariableAccessData.h"
+#include "JSValue.h"
+#include "PredictedType.h"
+#include "ValueProfile.h"
+#include <wtf/BoundsCheckedPointer.h>
+#include <wtf/Vector.h>
+
+namespace JSC { namespace DFG {
+
+struct StructureTransitionData {
+ Structure* previousStructure;
+ Structure* newStructure;
+
+ StructureTransitionData() { }
+
+ StructureTransitionData(Structure* previousStructure, Structure* newStructure)
+ : previousStructure(previousStructure)
+ , newStructure(newStructure)
+ {
+ }
+};
+
+typedef unsigned ArithNodeFlags;
+#define NodeUseBottom 0x00
+#define NodeUsedAsNumber 0x01
+#define NodeNeedsNegZero 0x02
+#define NodeUsedAsMask 0x03
+#define NodeMayOverflow 0x04
+#define NodeMayNegZero 0x08
+#define NodeBehaviorMask 0x0c
+
+static inline bool nodeUsedAsNumber(ArithNodeFlags flags)
+{
+ return !!(flags & NodeUsedAsNumber);
+}
+
+static inline bool nodeCanTruncateInteger(ArithNodeFlags flags)
+{
+ return !nodeUsedAsNumber(flags);
+}
+
+static inline bool nodeCanIgnoreNegativeZero(ArithNodeFlags flags)
+{
+ return !(flags & NodeNeedsNegZero);
+}
+
+static inline bool nodeMayOverflow(ArithNodeFlags flags)
+{
+ return !!(flags & NodeMayOverflow);
+}
+
+static inline bool nodeCanSpeculateInteger(ArithNodeFlags flags)
+{
+ if (flags & NodeMayOverflow)
+ return !nodeUsedAsNumber(flags);
+
+ if (flags & NodeMayNegZero)
+ return nodeCanIgnoreNegativeZero(flags);
+
+ return true;
+}
+
+#ifndef NDEBUG
+static inline const char* arithNodeFlagsAsString(ArithNodeFlags flags)
+{
+ if (!flags)
+ return "<empty>";
+
+ static const int size = 64;
+ static char description[size];
+ BoundsCheckedPointer<char> ptr(description, size);
+
+ bool hasPrinted = false;
+
+ if (flags & NodeUsedAsNumber) {
+ ptr.strcat("UsedAsNum");
+ hasPrinted = true;
+ }
+
+ if (flags & NodeNeedsNegZero) {
+ if (hasPrinted)
+ ptr.strcat("|");
+ ptr.strcat("NeedsNegZero");
+ hasPrinted = true;
+ }
+
+ if (flags & NodeMayOverflow) {
+ if (hasPrinted)
+ ptr.strcat("|");
+ ptr.strcat("MayOverflow");
+ hasPrinted = true;
+ }
+
+ if (flags & NodeMayNegZero) {
+ if (hasPrinted)
+ ptr.strcat("|");
+ ptr.strcat("MayNegZero");
+ hasPrinted = true;
+ }
+
+ *ptr++ = 0;
+
+ return description;
+}
+#endif
+
+// Entries in the NodeType enum (below) are composed of an id, a result type (possibly none)
+// and some additional informative flags (must generate, is constant, etc).
+#define NodeIdMask 0xFFF
+#define NodeResultMask 0xF000
+#define NodeMustGenerate 0x10000 // set on nodes that have side effects, and may not trivially be removed by DCE.
+#define NodeIsConstant 0x20000
+#define NodeIsJump 0x40000
+#define NodeIsBranch 0x80000
+#define NodeIsTerminal 0x100000
+#define NodeHasVarArgs 0x200000
+#define NodeClobbersWorld 0x400000
+#define NodeMightClobber 0x800000
+
+// These values record the result type of the node (as checked by NodeResultMask, above), 0 for no result.
+#define NodeResultJS 0x1000
+#define NodeResultNumber 0x2000
+#define NodeResultInt32 0x3000
+#define NodeResultBoolean 0x4000
+#define NodeResultStorage 0x5000
+
+// This macro defines a set of information about all known node types, used to populate NodeId, NodeType below.
+#define FOR_EACH_DFG_OP(macro) \
+ /* A constant in the CodeBlock's constant pool. */\
+ macro(JSConstant, NodeResultJS) \
+ \
+ /* A constant not in the CodeBlock's constant pool. Uses get patched to jumps that exit the */\
+ /* code block. */\
+ macro(WeakJSConstant, NodeResultJS) \
+ \
+ /* Nodes for handling functions (both as call and as construct). */\
+ macro(ConvertThis, NodeResultJS) \
+ macro(CreateThis, NodeResultJS) /* Note this is not MustGenerate since we're returning it anyway. */ \
+ macro(GetCallee, NodeResultJS) \
+ \
+ /* Nodes for local variable access. */\
+ macro(GetLocal, NodeResultJS) \
+ macro(SetLocal, 0) \
+ macro(Phantom, NodeMustGenerate) \
+ macro(Nop, 0) \
+ macro(Phi, 0) \
+ macro(Flush, NodeMustGenerate) \
+ \
+ /* Marker for arguments being set. */\
+ macro(SetArgument, 0) \
+ \
+ /* Hint that inlining begins here. No code is generated for this node. It's only */\
+ /* used for copying OSR data into inline frame data, to support reification of */\
+ /* call frames of inlined functions. */\
+ macro(InlineStart, 0) \
+ \
+ /* Nodes for bitwise operations. */\
+ macro(BitAnd, NodeResultInt32) \
+ macro(BitOr, NodeResultInt32) \
+ macro(BitXor, NodeResultInt32) \
+ macro(BitLShift, NodeResultInt32) \
+ macro(BitRShift, NodeResultInt32) \
+ macro(BitURShift, NodeResultInt32) \
+ /* Bitwise operators call ToInt32 on their operands. */\
+ macro(ValueToInt32, NodeResultInt32 | NodeMustGenerate) \
+ /* Used to box the result of URShift nodes (result has range 0..2^32-1). */\
+ macro(UInt32ToNumber, NodeResultNumber) \
+ \
+ /* Nodes for arithmetic operations. */\
+ macro(ArithAdd, NodeResultNumber) \
+ macro(ArithSub, NodeResultNumber) \
+ macro(ArithMul, NodeResultNumber) \
+ macro(ArithDiv, NodeResultNumber) \
+ macro(ArithMod, NodeResultNumber) \
+ macro(ArithAbs, NodeResultNumber) \
+ macro(ArithMin, NodeResultNumber) \
+ macro(ArithMax, NodeResultNumber) \
+ macro(ArithSqrt, NodeResultNumber) \
+ /* Arithmetic operators call ToNumber on their operands. */\
+ macro(ValueToNumber, NodeResultNumber | NodeMustGenerate) \
+ \
+ /* A variant of ValueToNumber, which a hint that the parents will always use this as a double. */\
+ macro(ValueToDouble, NodeResultNumber | NodeMustGenerate) \
+ \
+ /* Add of values may either be arithmetic, or result in string concatenation. */\
+ macro(ValueAdd, NodeResultJS | NodeMustGenerate | NodeMightClobber) \
+ \
+ /* Property access. */\
+ /* PutByValAlias indicates a 'put' aliases a prior write to the same property. */\
+ /* Since a put to 'length' may invalidate optimizations here, */\
+ /* this must be the directly subsequent property put. */\
+ macro(GetByVal, NodeResultJS | NodeMustGenerate | NodeMightClobber) \
+ macro(PutByVal, NodeMustGenerate | NodeClobbersWorld) \
+ macro(PutByValAlias, NodeMustGenerate | NodeClobbersWorld) \
+ macro(GetById, NodeResultJS | NodeMustGenerate | NodeClobbersWorld) \
+ macro(PutById, NodeMustGenerate | NodeClobbersWorld) \
+ macro(PutByIdDirect, NodeMustGenerate | NodeClobbersWorld) \
+ macro(CheckStructure, NodeMustGenerate) \
+ macro(PutStructure, NodeMustGenerate | NodeClobbersWorld) \
+ macro(GetPropertyStorage, NodeResultStorage) \
+ macro(GetIndexedPropertyStorage, NodeMustGenerate | NodeResultStorage) \
+ macro(GetByOffset, NodeResultJS) \
+ macro(PutByOffset, NodeMustGenerate | NodeClobbersWorld) \
+ macro(GetArrayLength, NodeResultInt32) \
+ macro(GetStringLength, NodeResultInt32) \
+ macro(GetByteArrayLength, NodeResultInt32) \
+ macro(GetInt8ArrayLength, NodeResultInt32) \
+ macro(GetInt16ArrayLength, NodeResultInt32) \
+ macro(GetInt32ArrayLength, NodeResultInt32) \
+ macro(GetUint8ArrayLength, NodeResultInt32) \
+ macro(GetUint16ArrayLength, NodeResultInt32) \
+ macro(GetUint32ArrayLength, NodeResultInt32) \
+ macro(GetFloat32ArrayLength, NodeResultInt32) \
+ macro(GetFloat64ArrayLength, NodeResultInt32) \
+ macro(GetScopeChain, NodeResultJS) \
+ macro(GetScopedVar, NodeResultJS | NodeMustGenerate) \
+ macro(PutScopedVar, NodeMustGenerate | NodeClobbersWorld) \
+ macro(GetGlobalVar, NodeResultJS | NodeMustGenerate) \
+ macro(PutGlobalVar, NodeMustGenerate | NodeClobbersWorld) \
+ macro(CheckFunction, NodeMustGenerate) \
+ \
+ /* Optimizations for array mutation. */\
+ macro(ArrayPush, NodeResultJS | NodeMustGenerate | NodeClobbersWorld) \
+ macro(ArrayPop, NodeResultJS | NodeMustGenerate | NodeClobbersWorld) \
+ \
+ /* Optimizations for string access */ \
+ macro(StringCharCodeAt, NodeResultInt32) \
+ macro(StringCharAt, NodeResultJS) \
+ \
+ /* Nodes for comparison operations. */\
+ macro(CompareLess, NodeResultBoolean | NodeMustGenerate | NodeMightClobber) \
+ macro(CompareLessEq, NodeResultBoolean | NodeMustGenerate | NodeMightClobber) \
+ macro(CompareGreater, NodeResultBoolean | NodeMustGenerate | NodeMightClobber) \
+ macro(CompareGreaterEq, NodeResultBoolean | NodeMustGenerate | NodeMightClobber) \
+ macro(CompareEq, NodeResultBoolean | NodeMustGenerate | NodeMightClobber) \
+ macro(CompareStrictEq, NodeResultBoolean) \
+ \
+ /* Calls. */\
+ macro(Call, NodeResultJS | NodeMustGenerate | NodeHasVarArgs | NodeClobbersWorld) \
+ macro(Construct, NodeResultJS | NodeMustGenerate | NodeHasVarArgs | NodeClobbersWorld) \
+ \
+ /* Allocations. */\
+ macro(NewObject, NodeResultJS) \
+ macro(NewArray, NodeResultJS | NodeHasVarArgs) \
+ macro(NewArrayBuffer, NodeResultJS) \
+ macro(NewRegexp, NodeResultJS) \
+ \
+ /* Resolve nodes. */\
+ macro(Resolve, NodeResultJS | NodeMustGenerate | NodeClobbersWorld) \
+ macro(ResolveBase, NodeResultJS | NodeMustGenerate | NodeClobbersWorld) \
+ macro(ResolveBaseStrictPut, NodeResultJS | NodeMustGenerate | NodeClobbersWorld) \
+ macro(ResolveGlobal, NodeResultJS | NodeMustGenerate | NodeClobbersWorld) \
+ \
+ /* Nodes for misc operations. */\
+ macro(Breakpoint, NodeMustGenerate | NodeClobbersWorld) \
+ macro(CheckHasInstance, NodeMustGenerate) \
+ macro(InstanceOf, NodeResultBoolean) \
+ macro(LogicalNot, NodeResultBoolean | NodeMightClobber) \
+ macro(ToPrimitive, NodeResultJS | NodeMustGenerate | NodeClobbersWorld) \
+ macro(StrCat, NodeResultJS | NodeMustGenerate | NodeHasVarArgs | NodeClobbersWorld) \
+ \
+ /* Block terminals. */\
+ macro(Jump, NodeMustGenerate | NodeIsTerminal | NodeIsJump) \
+ macro(Branch, NodeMustGenerate | NodeIsTerminal | NodeIsBranch) \
+ macro(Return, NodeMustGenerate | NodeIsTerminal) \
+ macro(Throw, NodeMustGenerate | NodeIsTerminal) \
+ macro(ThrowReferenceError, NodeMustGenerate | NodeIsTerminal) \
+ \
+ /* This is a pseudo-terminal. It means that execution should fall out of DFG at */\
+ /* this point, but execution does continue in the basic block - just in a */\
+ /* different compiler. */\
+ macro(ForceOSRExit, NodeMustGenerate)
+
+// This enum generates a monotonically increasing id for all Node types,
+// and is used by the subsequent enum to fill out the id (as accessed via the NodeIdMask).
+enum NodeId {
+#define DFG_OP_ENUM(opcode, flags) opcode##_id,
+ FOR_EACH_DFG_OP(DFG_OP_ENUM)
+#undef DFG_OP_ENUM
+ LastNodeId
+};
+
+// Entries in this enum describe all Node types.
+// The enum value contains a monotonically increasing id, a result type, and additional flags.
+enum NodeType {
+#define DFG_OP_ENUM(opcode, flags) opcode = opcode##_id | (flags),
+ FOR_EACH_DFG_OP(DFG_OP_ENUM)
+#undef DFG_OP_ENUM
+};
+
+// This type used in passing an immediate argument to Node constructor;
+// distinguishes an immediate value (typically an index into a CodeBlock data structure -
+// a constant index, argument, or identifier) from a NodeIndex.
+struct OpInfo {
+ explicit OpInfo(int32_t value) : m_value(static_cast<uintptr_t>(value)) { }
+ explicit OpInfo(uint32_t value) : m_value(static_cast<uintptr_t>(value)) { }
+#if OS(DARWIN) || USE(JSVALUE64)
+ explicit OpInfo(size_t value) : m_value(static_cast<uintptr_t>(value)) { }
+#endif
+ explicit OpInfo(void* value) : m_value(reinterpret_cast<uintptr_t>(value)) { }
+ uintptr_t m_value;
+};
+
+// === Node ===
+//
+// Node represents a single operation in the data flow graph.
+struct Node {
+ enum VarArgTag { VarArg };
+
+ // Construct a node with up to 3 children, no immediate value.
+ Node(NodeType op, CodeOrigin codeOrigin, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode)
+ : op(op)
+ , codeOrigin(codeOrigin)
+ , m_virtualRegister(InvalidVirtualRegister)
+ , m_refCount(0)
+ , m_prediction(PredictNone)
+ {
+ ASSERT(!(op & NodeHasVarArgs));
+ ASSERT(!hasArithNodeFlags());
+ children.fixed.child1 = child1;
+ children.fixed.child2 = child2;
+ children.fixed.child3 = child3;
+ }
+
+ // Construct a node with up to 3 children and an immediate value.
+ Node(NodeType op, CodeOrigin codeOrigin, OpInfo imm, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode)
+ : op(op)
+ , codeOrigin(codeOrigin)
+ , m_virtualRegister(InvalidVirtualRegister)
+ , m_refCount(0)
+ , m_opInfo(imm.m_value)
+ , m_prediction(PredictNone)
+ {
+ ASSERT(!(op & NodeHasVarArgs));
+ children.fixed.child1 = child1;
+ children.fixed.child2 = child2;
+ children.fixed.child3 = child3;
+ }
+
+ // Construct a node with up to 3 children and two immediate values.
+ Node(NodeType op, CodeOrigin codeOrigin, OpInfo imm1, OpInfo imm2, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode)
+ : op(op)
+ , codeOrigin(codeOrigin)
+ , m_virtualRegister(InvalidVirtualRegister)
+ , m_refCount(0)
+ , m_opInfo(imm1.m_value)
+ , m_opInfo2(safeCast<unsigned>(imm2.m_value))
+ , m_prediction(PredictNone)
+ {
+ ASSERT(!(op & NodeHasVarArgs));
+ children.fixed.child1 = child1;
+ children.fixed.child2 = child2;
+ children.fixed.child3 = child3;
+ }
+
+ // Construct a node with a variable number of children and two immediate values.
+ Node(VarArgTag, NodeType op, CodeOrigin codeOrigin, OpInfo imm1, OpInfo imm2, unsigned firstChild, unsigned numChildren)
+ : op(op)
+ , codeOrigin(codeOrigin)
+ , m_virtualRegister(InvalidVirtualRegister)
+ , m_refCount(0)
+ , m_opInfo(imm1.m_value)
+ , m_opInfo2(safeCast<unsigned>(imm2.m_value))
+ , m_prediction(PredictNone)
+ {
+ ASSERT(op & NodeHasVarArgs);
+ children.variable.firstChild = firstChild;
+ children.variable.numChildren = numChildren;
+ }
+
+ bool mustGenerate()
+ {
+ return op & NodeMustGenerate;
+ }
+
+ bool isConstant()
+ {
+ return op == JSConstant;
+ }
+
+ bool isWeakConstant()
+ {
+ return op == WeakJSConstant;
+ }
+
+ bool hasConstant()
+ {
+ return isConstant() || isWeakConstant();
+ }
+
+ unsigned constantNumber()
+ {
+ ASSERT(isConstant());
+ return m_opInfo;
+ }
+
+ JSCell* weakConstant()
+ {
+ return bitwise_cast<JSCell*>(m_opInfo);
+ }
+
+ JSValue valueOfJSConstant(CodeBlock* codeBlock)
+ {
+ if (op == WeakJSConstant)
+ return JSValue(weakConstant());
+ return codeBlock->constantRegister(FirstConstantRegisterIndex + constantNumber()).get();
+ }
+
+ bool isInt32Constant(CodeBlock* codeBlock)
+ {
+ return isConstant() && valueOfJSConstant(codeBlock).isInt32();
+ }
+
+ bool isDoubleConstant(CodeBlock* codeBlock)
+ {
+ bool result = isConstant() && valueOfJSConstant(codeBlock).isDouble();
+ if (result)
+ ASSERT(!isInt32Constant(codeBlock));
+ return result;
+ }
+
+ bool isNumberConstant(CodeBlock* codeBlock)
+ {
+ bool result = isConstant() && valueOfJSConstant(codeBlock).isNumber();
+ ASSERT(result == (isInt32Constant(codeBlock) || isDoubleConstant(codeBlock)));
+ return result;
+ }
+
+ bool isBooleanConstant(CodeBlock* codeBlock)
+ {
+ return isConstant() && valueOfJSConstant(codeBlock).isBoolean();
+ }
+
+ bool hasVariableAccessData()
+ {
+ switch (op) {
+ case GetLocal:
+ case SetLocal:
+ case Phi:
+ case SetArgument:
+ case Flush:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool hasLocal()
+ {
+ return hasVariableAccessData();
+ }
+
+ VariableAccessData* variableAccessData()
+ {
+ ASSERT(hasVariableAccessData());
+ return reinterpret_cast<VariableAccessData*>(m_opInfo)->find();
+ }
+
+ VirtualRegister local()
+ {
+ return variableAccessData()->local();
+ }
+
+#ifndef NDEBUG
+ bool hasIdentifier()
+ {
+ switch (op) {
+ case GetById:
+ case PutById:
+ case PutByIdDirect:
+ case Resolve:
+ case ResolveBase:
+ case ResolveBaseStrictPut:
+ return true;
+ default:
+ return false;
+ }
+ }
+#endif
+
+ unsigned identifierNumber()
+ {
+ ASSERT(hasIdentifier());
+ return m_opInfo;
+ }
+
+ unsigned resolveGlobalDataIndex()
+ {
+ ASSERT(op == ResolveGlobal);
+ return m_opInfo;
+ }
+
+ bool hasArithNodeFlags()
+ {
+ switch (op) {
+ case ValueToNumber:
+ case ValueToDouble:
+ case UInt32ToNumber:
+ case ArithAdd:
+ case ArithSub:
+ case ArithMul:
+ case ArithAbs:
+ case ArithMin:
+ case ArithMax:
+ case ArithMod:
+ case ArithDiv:
+ case ValueAdd:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ ArithNodeFlags rawArithNodeFlags()
+ {
+ ASSERT(hasArithNodeFlags());
+ return m_opInfo;
+ }
+
+ // This corrects the arithmetic node flags, so that irrelevant bits are
+ // ignored. In particular, anything other than ArithMul does not need
+ // to know if it can speculate on negative zero.
+ ArithNodeFlags arithNodeFlags()
+ {
+ ArithNodeFlags result = rawArithNodeFlags();
+ if (op == ArithMul)
+ return result;
+ return result & ~NodeNeedsNegZero;
+ }
+
+ ArithNodeFlags arithNodeFlagsForCompare()
+ {
+ if (hasArithNodeFlags())
+ return arithNodeFlags();
+ return 0;
+ }
+
+ void setArithNodeFlag(ArithNodeFlags flags)
+ {
+ ASSERT(hasArithNodeFlags());
+ m_opInfo = flags;
+ }
+
+ bool mergeArithNodeFlags(ArithNodeFlags flags)
+ {
+ if (!hasArithNodeFlags())
+ return false;
+ ArithNodeFlags newFlags = m_opInfo | flags;
+ if (newFlags == m_opInfo)
+ return false;
+ m_opInfo = newFlags;
+ return true;
+ }
+
+ bool hasConstantBuffer()
+ {
+ return op == NewArrayBuffer;
+ }
+
+ unsigned startConstant()
+ {
+ ASSERT(hasConstantBuffer());
+ return m_opInfo;
+ }
+
+ unsigned numConstants()
+ {
+ ASSERT(hasConstantBuffer());
+ return m_opInfo2;
+ }
+
+ bool hasRegexpIndex()
+ {
+ return op == NewRegexp;
+ }
+
+ unsigned regexpIndex()
+ {
+ ASSERT(hasRegexpIndex());
+ return m_opInfo;
+ }
+
+ bool hasVarNumber()
+ {
+ return op == GetGlobalVar || op == PutGlobalVar || op == GetScopedVar || op == PutScopedVar;
+ }
+
+ unsigned varNumber()
+ {
+ ASSERT(hasVarNumber());
+ return m_opInfo;
+ }
+
+ bool hasScopeChainDepth()
+ {
+ return op == GetScopeChain;
+ }
+
+ unsigned scopeChainDepth()
+ {
+ ASSERT(hasScopeChainDepth());
+ return m_opInfo;
+ }
+
+ bool hasResult()
+ {
+ return op & NodeResultMask;
+ }
+
+ bool hasInt32Result()
+ {
+ return (op & NodeResultMask) == NodeResultInt32;
+ }
+
+ bool hasNumberResult()
+ {
+ return (op & NodeResultMask) == NodeResultNumber;
+ }
+
+ bool hasJSResult()
+ {
+ return (op & NodeResultMask) == NodeResultJS;
+ }
+
+ bool hasBooleanResult()
+ {
+ return (op & NodeResultMask) == NodeResultBoolean;
+ }
+
+ bool isJump()
+ {
+ return op & NodeIsJump;
+ }
+
+ bool isBranch()
+ {
+ return op & NodeIsBranch;
+ }
+
+ bool isTerminal()
+ {
+ return op & NodeIsTerminal;
+ }
+
+ unsigned takenBytecodeOffsetDuringParsing()
+ {
+ ASSERT(isBranch() || isJump());
+ return m_opInfo;
+ }
+
+ unsigned notTakenBytecodeOffsetDuringParsing()
+ {
+ ASSERT(isBranch());
+ return m_opInfo2;
+ }
+
+ void setTakenBlockIndex(BlockIndex blockIndex)
+ {
+ ASSERT(isBranch() || isJump());
+ m_opInfo = blockIndex;
+ }
+
+ void setNotTakenBlockIndex(BlockIndex blockIndex)
+ {
+ ASSERT(isBranch());
+ m_opInfo2 = blockIndex;
+ }
+
+ BlockIndex takenBlockIndex()
+ {
+ ASSERT(isBranch() || isJump());
+ return m_opInfo;
+ }
+
+ BlockIndex notTakenBlockIndex()
+ {
+ ASSERT(isBranch());
+ return m_opInfo2;
+ }
+
+ bool hasHeapPrediction()
+ {
+ switch (op) {
+ case GetById:
+ case GetByVal:
+ case Call:
+ case Construct:
+ case GetByOffset:
+ case GetScopedVar:
+ case Resolve:
+ case ResolveBase:
+ case ResolveBaseStrictPut:
+ case ResolveGlobal:
+ case ArrayPop:
+ case ArrayPush:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ PredictedType getHeapPrediction()
+ {
+ ASSERT(hasHeapPrediction());
+ return static_cast<PredictedType>(m_opInfo2);
+ }
+
+ bool predictHeap(PredictedType prediction)
+ {
+ ASSERT(hasHeapPrediction());
+
+ return mergePrediction(m_opInfo2, prediction);
+ }
+
+ bool hasFunctionCheckData()
+ {
+ return op == CheckFunction;
+ }
+
+ JSFunction* function()
+ {
+ ASSERT(hasFunctionCheckData());
+ return reinterpret_cast<JSFunction*>(m_opInfo);
+ }
+
+ bool hasStructureTransitionData()
+ {
+ return op == PutStructure;
+ }
+
+ StructureTransitionData& structureTransitionData()
+ {
+ ASSERT(hasStructureTransitionData());
+ return *reinterpret_cast<StructureTransitionData*>(m_opInfo);
+ }
+
+ bool hasStructureSet()
+ {
+ return op == CheckStructure;
+ }
+
+ StructureSet& structureSet()
+ {
+ ASSERT(hasStructureSet());
+ return *reinterpret_cast<StructureSet*>(m_opInfo);
+ }
+
+ bool hasStorageAccessData()
+ {
+ return op == GetByOffset || op == PutByOffset;
+ }
+
+ unsigned storageAccessDataIndex()
+ {
+ return m_opInfo;
+ }
+
+ bool hasVirtualRegister()
+ {
+ return m_virtualRegister != InvalidVirtualRegister;
+ }
+
+ VirtualRegister virtualRegister()
+ {
+ ASSERT(hasResult());
+ ASSERT(m_virtualRegister != InvalidVirtualRegister);
+ return m_virtualRegister;
+ }
+
+ void setVirtualRegister(VirtualRegister virtualRegister)
+ {
+ ASSERT(hasResult());
+ ASSERT(m_virtualRegister == InvalidVirtualRegister);
+ m_virtualRegister = virtualRegister;
+ }
+
+ bool shouldGenerate()
+ {
+ return m_refCount && op != Phi && op != Flush;
+ }
+
+ unsigned refCount()
+ {
+ return m_refCount;
+ }
+
+ // returns true when ref count passes from 0 to 1.
+ bool ref()
+ {
+ return !m_refCount++;
+ }
+
+ unsigned adjustedRefCount()
+ {
+ return mustGenerate() ? m_refCount - 1 : m_refCount;
+ }
+
+ void setRefCount(unsigned refCount)
+ {
+ m_refCount = refCount;
+ }
+
+ // Derefs the node and returns true if the ref count reached zero.
+ // In general you don't want to use this directly; use Graph::deref
+ // instead.
+ bool deref()
+ {
+ ASSERT(m_refCount);
+ return !--m_refCount;
+ }
+
+ NodeIndex child1()
+ {
+ ASSERT(!(op & NodeHasVarArgs));
+ return children.fixed.child1;
+ }
+
+ // This is useful if you want to do a fast check on the first child
+ // before also doing a check on the opcode. Use this with care and
+ // avoid it if possible.
+ NodeIndex child1Unchecked()
+ {
+ return children.fixed.child1;
+ }
+
+ NodeIndex child2()
+ {
+ ASSERT(!(op & NodeHasVarArgs));
+ return children.fixed.child2;
+ }
+
+ NodeIndex child3()
+ {
+ ASSERT(!(op & NodeHasVarArgs));
+ return children.fixed.child3;
+ }
+
+ unsigned firstChild()
+ {
+ ASSERT(op & NodeHasVarArgs);
+ return children.variable.firstChild;
+ }
+
+ unsigned numChildren()
+ {
+ ASSERT(op & NodeHasVarArgs);
+ return children.variable.numChildren;
+ }
+
+ PredictedType prediction()
+ {
+ return m_prediction;
+ }
+
+ bool predict(PredictedType prediction)
+ {
+ return mergePrediction(m_prediction, prediction);
+ }
+
+ bool shouldSpeculateInteger()
+ {
+ return isInt32Prediction(prediction());
+ }
+
+ bool shouldSpeculateDouble()
+ {
+ return isDoublePrediction(prediction());
+ }
+
+ bool shouldSpeculateNumber()
+ {
+ return isNumberPrediction(prediction()) || prediction() == PredictNone;
+ }
+
+ bool shouldNotSpeculateInteger()
+ {
+ return !!(prediction() & PredictDouble);
+ }
+
+ bool shouldSpeculateFinalObject()
+ {
+ return isFinalObjectPrediction(prediction());
+ }
+
+ bool shouldSpeculateFinalObjectOrOther()
+ {
+ return isFinalObjectOrOtherPrediction(prediction());
+ }
+
+ bool shouldSpeculateArray()
+ {
+ return isArrayPrediction(prediction());
+ }
+
+ bool shouldSpeculateByteArray()
+ {
+ return !!(prediction() & PredictByteArray);
+ }
+
+ bool shouldSpeculateInt8Array()
+ {
+#if CPU(X86) || CPU(X86_64)
+ return isInt8ArrayPrediction(prediction());
+#else
+ return false;
+#endif
+ }
+
+ bool shouldSpeculateInt16Array()
+ {
+#if CPU(X86) || CPU(X86_64)
+ return isInt16ArrayPrediction(prediction());
+#else
+ return false;
+#endif
+ }
+
+ bool shouldSpeculateInt32Array()
+ {
+ return isInt32ArrayPrediction(prediction());
+ }
+
+ bool shouldSpeculateUint8Array()
+ {
+ return isUint8ArrayPrediction(prediction());
+ }
+
+ bool shouldSpeculateUint16Array()
+ {
+ return isUint16ArrayPrediction(prediction());
+ }
+
+ bool shouldSpeculateUint32Array()
+ {
+ return isUint32ArrayPrediction(prediction());
+ }
+
+ bool shouldSpeculateFloat32Array()
+ {
+#if CPU(X86) || CPU(X86_64)
+ return isFloat32ArrayPrediction(prediction());
+#else
+ return false;
+#endif
+ }
+
+ bool shouldSpeculateFloat64Array()
+ {
+ return isFloat64ArrayPrediction(prediction());
+ }
+
+ bool shouldSpeculateArrayOrOther()
+ {
+ return isArrayOrOtherPrediction(prediction());
+ }
+
+ bool shouldSpeculateObject()
+ {
+ return isObjectPrediction(prediction());
+ }
+
+ bool shouldSpeculateCell()
+ {
+ return isCellPrediction(prediction());
+ }
+
+ static bool shouldSpeculateInteger(Node& op1, Node& op2)
+ {
+ return op1.shouldSpeculateInteger() && op2.shouldSpeculateInteger();
+ }
+
+ static bool shouldSpeculateNumber(Node& op1, Node& op2)
+ {
+ return op1.shouldSpeculateNumber() && op2.shouldSpeculateNumber();
+ }
+
+ static bool shouldSpeculateFinalObject(Node& op1, Node& op2)
+ {
+ return (op1.shouldSpeculateFinalObject() && op2.shouldSpeculateObject())
+ || (op1.shouldSpeculateObject() && op2.shouldSpeculateFinalObject());
+ }
+
+ static bool shouldSpeculateArray(Node& op1, Node& op2)
+ {
+ return (op1.shouldSpeculateArray() && op2.shouldSpeculateObject())
+ || (op1.shouldSpeculateObject() && op2.shouldSpeculateArray());
+ }
+
+ bool canSpeculateInteger()
+ {
+ return nodeCanSpeculateInteger(arithNodeFlags());
+ }
+
+#ifndef NDEBUG
+ void dumpChildren(FILE* out)
+ {
+ if (child1() == NoNode)
+ return;
+ fprintf(out, "@%u", child1());
+ if (child2() == NoNode)
+ return;
+ fprintf(out, ", @%u", child2());
+ if (child3() == NoNode)
+ return;
+ fprintf(out, ", @%u", child3());
+ }
+#endif
+
+ // This enum value describes the type of the node.
+ NodeType op;
+ // Used to look up exception handling information (currently implemented as a bytecode index).
+ CodeOrigin codeOrigin;
+ // References to up to 3 children (0 for no child).
+ union {
+ struct {
+ NodeIndex child1, child2, child3;
+ } fixed;
+ struct {
+ unsigned firstChild;
+ unsigned numChildren;
+ } variable;
+ } children;
+
+private:
+ // The virtual register number (spill location) associated with this .
+ VirtualRegister m_virtualRegister;
+ // The number of uses of the result of this operation (+1 for 'must generate' nodes, which have side-effects).
+ unsigned m_refCount;
+ // Immediate values, accesses type-checked via accessors above. The first one is
+ // big enough to store a pointer.
+ uintptr_t m_opInfo;
+ unsigned m_opInfo2;
+ // The prediction ascribed to this node after propagation.
+ PredictedType m_prediction;
+};
+
+} } // namespace JSC::DFG
+
+#endif
+#endif
diff --git a/Source/JavaScriptCore/dfg/DFGOSREntry.cpp b/Source/JavaScriptCore/dfg/DFGOSREntry.cpp
new file mode 100644
index 000000000..4510ec7b9
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGOSREntry.cpp
@@ -0,0 +1,175 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "DFGOSREntry.h"
+
+#if ENABLE(DFG_JIT)
+
+#include "CallFrame.h"
+#include "CodeBlock.h"
+#include "DFGNode.h"
+#include "JIT.h"
+
+namespace JSC { namespace DFG {
+
+void* prepareOSREntry(ExecState* exec, CodeBlock* codeBlock, unsigned bytecodeIndex)
+{
+#if DFG_ENABLE(OSR_ENTRY)
+ ASSERT(codeBlock->getJITType() == JITCode::DFGJIT);
+ ASSERT(codeBlock->alternative());
+ ASSERT(codeBlock->alternative()->getJITType() == JITCode::BaselineJIT);
+ ASSERT(!codeBlock->jitCodeMap());
+ ASSERT(codeBlock->numberOfDFGOSREntries());
+
+#if ENABLE(JIT_VERBOSE_OSR)
+ printf("OSR in %p(%p) from bc#%u\n", codeBlock, codeBlock->alternative(), bytecodeIndex);
+#endif
+
+ JSGlobalData* globalData = &exec->globalData();
+ OSREntryData* entry = codeBlock->dfgOSREntryDataForBytecodeIndex(bytecodeIndex);
+
+ ASSERT(entry->m_bytecodeIndex == bytecodeIndex);
+
+ // The code below checks if it is safe to perform OSR entry. It may find
+ // that it is unsafe to do so, for any number of reasons, which are documented
+ // below. If the code decides not to OSR then it returns 0, and it's the caller's
+ // responsibility to patch up the state in such a way as to ensure that it's
+ // both safe and efficient to continue executing baseline code for now. This
+ // should almost certainly include calling either codeBlock->optimizeAfterWarmUp()
+ // or codeBlock->dontOptimizeAnytimeSoon().
+
+ // 1) Verify predictions. If the predictions are inconsistent with the actual
+ // values, then OSR entry is not possible at this time. It's tempting to
+ // assume that we could somehow avoid this case. We can certainly avoid it
+ // for first-time loop OSR - that is, OSR into a CodeBlock that we have just
+ // compiled. Then we are almost guaranteed that all of the predictions will
+ // check out. It would be pretty easy to make that a hard guarantee. But
+ // then there would still be the case where two call frames with the same
+ // baseline CodeBlock are on the stack at the same time. The top one
+ // triggers compilation and OSR. In that case, we may no longer have
+ // accurate value profiles for the one deeper in the stack. Hence, when we
+ // pop into the CodeBlock that is deeper on the stack, we might OSR and
+ // realize that the predictions are wrong. Probably, in most cases, this is
+ // just an anomaly in the sense that the older CodeBlock simply went off
+ // into a less-likely path. So, the wisest course of action is to simply not
+ // OSR at this time.
+
+ for (size_t argument = 0; argument < entry->m_expectedValues.numberOfArguments(); ++argument) {
+ if (argument >= exec->argumentCountIncludingThis()) {
+#if ENABLE(JIT_VERBOSE_OSR)
+ printf(" OSR failed because argument %lu was not passed, expected ", argument);
+ entry->m_expectedValues.argument(argument).dump(stdout);
+ printf(".\n");
+#endif
+ return 0;
+ }
+
+ JSValue value;
+ if (!argument)
+ value = exec->hostThisValue();
+ else
+ value = exec->argument(argument - 1);
+
+ if (!entry->m_expectedValues.argument(argument).validate(value)) {
+#if ENABLE(JIT_VERBOSE_OSR)
+ printf(" OSR failed because argument %lu is %s, expected ", argument, value.description());
+ entry->m_expectedValues.argument(argument).dump(stdout);
+ printf(".\n");
+#endif
+ return 0;
+ }
+ }
+
+ for (size_t local = 0; local < entry->m_expectedValues.numberOfLocals(); ++local) {
+ if (entry->m_localsForcedDouble.get(local)) {
+ if (!exec->registers()[local].jsValue().isNumber()) {
+#if ENABLE(JIT_VERBOSE_OSR)
+ printf(" OSR failed because variable %lu is %s, expected number.\n", local, exec->registers()[local].jsValue().description());
+#endif
+ return 0;
+ }
+ continue;
+ }
+ if (!entry->m_expectedValues.local(local).validate(exec->registers()[local].jsValue())) {
+#if ENABLE(JIT_VERBOSE_OSR)
+ printf(" OSR failed because variable %lu is %s, expected ", local, exec->registers()[local].jsValue().description());
+ entry->m_expectedValues.local(local).dump(stdout);
+ printf(".\n");
+#endif
+ return 0;
+ }
+ }
+
+ // 2) Check the stack height. The DFG JIT may require a taller stack than the
+ // baseline JIT, in some cases. If we can't grow the stack, then don't do
+ // OSR right now. That's the only option we have unless we want basic block
+ // boundaries to start throwing RangeErrors. Although that would be possible,
+ // it seems silly: you'd be diverting the program to error handling when it
+ // would have otherwise just kept running albeit less quickly.
+
+ if (!globalData->interpreter->registerFile().grow(&exec->registers()[codeBlock->m_numCalleeRegisters])) {
+#if ENABLE(JIT_VERBOSE_OSR)
+ printf(" OSR failed because stack growth failed.\n");
+#endif
+ return 0;
+ }
+
+#if ENABLE(JIT_VERBOSE_OSR)
+ printf(" OSR should succeed.\n");
+#endif
+
+#if USE(JSVALUE64)
+ // 3) Perform data format conversions.
+ for (size_t local = 0; local < entry->m_expectedValues.numberOfLocals(); ++local) {
+ if (entry->m_localsForcedDouble.get(local))
+ *bitwise_cast<double*>(exec->registers() + local) = exec->registers()[local].jsValue().asNumber();
+ }
+#endif
+
+ // 4) Fix the call frame.
+
+ exec->setCodeBlock(codeBlock);
+
+ // 5) Find and return the destination machine code address.
+
+ void* result = codeBlock->getJITCode().executableAddressAtOffset(entry->m_machineCodeOffset);
+
+#if ENABLE(JIT_VERBOSE_OSR)
+ printf(" OSR returning machine code address %p.\n", result);
+#endif
+
+ return result;
+#else // DFG_ENABLE(OSR_ENTRY)
+ UNUSED_PARAM(exec);
+ UNUSED_PARAM(codeBlock);
+ UNUSED_PARAM(bytecodeIndex);
+ return 0;
+#endif
+}
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
diff --git a/Source/JavaScriptCore/dfg/DFGOSREntry.h b/Source/JavaScriptCore/dfg/DFGOSREntry.h
new file mode 100644
index 000000000..e38a6ceb9
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGOSREntry.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGOSREntry_h
+#define DFGOSREntry_h
+
+#include "DFGAbstractValue.h"
+#include "DFGOperands.h"
+#include <wtf/BitVector.h>
+
+namespace JSC {
+
+class ExecState;
+class CodeBlock;
+
+namespace DFG {
+
+#if ENABLE(DFG_JIT)
+struct OSREntryData {
+ unsigned m_bytecodeIndex;
+ unsigned m_machineCodeOffset;
+ Operands<AbstractValue> m_expectedValues;
+ BitVector m_localsForcedDouble;
+};
+
+inline unsigned getOSREntryDataBytecodeIndex(OSREntryData* osrEntryData)
+{
+ return osrEntryData->m_bytecodeIndex;
+}
+
+void* prepareOSREntry(ExecState*, CodeBlock*, unsigned bytecodeIndex);
+#else
+inline void* prepareOSREntry(ExecState*, CodeBlock*, unsigned) { return 0; }
+#endif
+
+} } // namespace JSC::DFG
+
+#endif // DFGOSREntry_h
+
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExit.cpp b/Source/JavaScriptCore/dfg/DFGOSRExit.cpp
new file mode 100644
index 000000000..7b1941146
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGOSRExit.cpp
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "DFGOSRExit.h"
+
+#if ENABLE(DFG_JIT)
+
+#include "DFGAssemblyHelpers.h"
+#include "DFGSpeculativeJIT.h"
+
+namespace JSC { namespace DFG {
+
+OSRExit::OSRExit(ExitKind kind, JSValueSource jsValueSource, ValueProfile* valueProfile, MacroAssembler::Jump check, SpeculativeJIT* jit, unsigned recoveryIndex)
+ : m_jsValueSource(jsValueSource)
+ , m_valueProfile(valueProfile)
+ , m_check(check)
+ , m_nodeIndex(jit->m_compileIndex)
+ , m_codeOrigin(jit->m_codeOriginForOSR)
+ , m_recoveryIndex(recoveryIndex)
+ , m_kind(kind)
+ , m_count(0)
+ , m_arguments(jit->m_arguments.size())
+ , m_variables(jit->m_variables.size())
+ , m_lastSetOperand(jit->m_lastSetOperand)
+{
+ ASSERT(m_codeOrigin.isSet());
+ for (unsigned argument = 0; argument < m_arguments.size(); ++argument)
+ m_arguments[argument] = jit->computeValueRecoveryFor(jit->m_arguments[argument]);
+ for (unsigned variable = 0; variable < m_variables.size(); ++variable)
+ m_variables[variable] = jit->computeValueRecoveryFor(jit->m_variables[variable]);
+}
+
+#ifndef NDEBUG
+void OSRExit::dump(FILE* out) const
+{
+ for (unsigned argument = 0; argument < m_arguments.size(); ++argument)
+ m_arguments[argument].dump(out);
+ fprintf(out, " : ");
+ for (unsigned variable = 0; variable < m_variables.size(); ++variable)
+ m_variables[variable].dump(out);
+}
+#endif
+
+bool OSRExit::considerAddingAsFrequentExitSiteSlow(CodeBlock* dfgCodeBlock, CodeBlock* profiledCodeBlock)
+{
+ if (static_cast<double>(m_count) / dfgCodeBlock->speculativeFailCounter() <= Options::osrExitProminenceForFrequentExitSite)
+ return false;
+
+ return AssemblyHelpers::baselineCodeBlockForOriginAndBaselineCodeBlock(m_codeOrigin, profiledCodeBlock)->addFrequentExitSite(FrequentExitSite(m_codeOrigin.bytecodeIndex, m_kind));
+}
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExit.h b/Source/JavaScriptCore/dfg/DFGOSRExit.h
new file mode 100644
index 000000000..cf96f4f3a
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGOSRExit.h
@@ -0,0 +1,164 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGOSRExit_h
+#define DFGOSRExit_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(DFG_JIT)
+
+#include "CodeOrigin.h"
+#include "DFGCommon.h"
+#include "DFGCorrectableJumpPoint.h"
+#include "DFGExitProfile.h"
+#include "DFGGPRInfo.h"
+#include "DFGOperands.h"
+#include "MacroAssembler.h"
+#include "ValueProfile.h"
+#include "ValueRecovery.h"
+#include <wtf/Vector.h>
+
+namespace JSC { namespace DFG {
+
+class SpeculativeJIT;
+
+// This enum describes the types of additional recovery that
+// may need be performed should a speculation check fail.
+enum SpeculationRecoveryType {
+ SpeculativeAdd,
+ BooleanSpeculationCheck
+};
+
+// === SpeculationRecovery ===
+//
+// This class provides additional information that may be associated with a
+// speculation check - for example
+class SpeculationRecovery {
+public:
+ SpeculationRecovery(SpeculationRecoveryType type, GPRReg dest, GPRReg src)
+ : m_type(type)
+ , m_dest(dest)
+ , m_src(src)
+ {
+ }
+
+ SpeculationRecoveryType type() { return m_type; }
+ GPRReg dest() { return m_dest; }
+ GPRReg src() { return m_src; }
+
+private:
+ // Indicates the type of additional recovery to be performed.
+ SpeculationRecoveryType m_type;
+ // different recovery types may required different additional information here.
+ GPRReg m_dest;
+ GPRReg m_src;
+};
+
+// === OSRExit ===
+//
+// This structure describes how to exit the speculative path by
+// going into baseline code.
+struct OSRExit {
+ OSRExit(ExitKind, JSValueSource, ValueProfile*, MacroAssembler::Jump, SpeculativeJIT*, unsigned recoveryIndex = 0);
+
+ MacroAssemblerCodeRef m_code;
+
+ JSValueSource m_jsValueSource;
+ ValueProfile* m_valueProfile;
+
+ CorrectableJumpPoint m_check;
+ NodeIndex m_nodeIndex;
+ CodeOrigin m_codeOrigin;
+
+ unsigned m_recoveryIndex;
+
+ ExitKind m_kind;
+ uint32_t m_count;
+
+ // Convenient way of iterating over ValueRecoveries while being
+ // generic over argument versus variable.
+ int numberOfRecoveries() const { return m_arguments.size() + m_variables.size(); }
+ const ValueRecovery& valueRecovery(int index) const
+ {
+ if (index < (int)m_arguments.size())
+ return m_arguments[index];
+ return m_variables[index - m_arguments.size()];
+ }
+ ValueRecovery& valueRecoveryForOperand(int operand)
+ {
+ if (operandIsArgument(operand))
+ return m_arguments[operandToArgument(operand)];
+ return m_variables[operand];
+ }
+ bool isArgument(int index) const { return index < (int)m_arguments.size(); }
+ bool isVariable(int index) const { return !isArgument(index); }
+ int argumentForIndex(int index) const
+ {
+ return index;
+ }
+ int variableForIndex(int index) const
+ {
+ return index - m_arguments.size();
+ }
+ int operandForIndex(int index) const
+ {
+ if (index < (int)m_arguments.size())
+ return operandToArgument(index);
+ return index - m_arguments.size();
+ }
+
+ bool considerAddingAsFrequentExitSite(CodeBlock* dfgCodeBlock, CodeBlock* profiledCodeBlock)
+ {
+ if (!m_count || !exitKindIsCountable(m_kind))
+ return false;
+ return considerAddingAsFrequentExitSiteSlow(dfgCodeBlock, profiledCodeBlock);
+ }
+
+#ifndef NDEBUG
+ void dump(FILE* out) const;
+#endif
+
+ Vector<ValueRecovery, 0> m_arguments;
+ Vector<ValueRecovery, 0> m_variables;
+ int m_lastSetOperand;
+
+private:
+ bool considerAddingAsFrequentExitSiteSlow(CodeBlock* dfgCodeBlock, CodeBlock* profiledCodeBlock);
+};
+
+#if DFG_ENABLE(VERBOSE_SPECULATION_FAILURE)
+struct SpeculationFailureDebugInfo {
+ CodeBlock* codeBlock;
+ NodeIndex nodeIndex;
+};
+#endif
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
+
+#endif // DFGOSRExit_h
+
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.cpp b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.cpp
new file mode 100644
index 000000000..3da8189d1
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.cpp
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "DFGOSRExitCompiler.h"
+
+#if ENABLE(DFG_JIT)
+
+#include "CallFrame.h"
+#include "LinkBuffer.h"
+#include "RepatchBuffer.h"
+
+namespace JSC { namespace DFG {
+
+extern "C" {
+
+void compileOSRExit(ExecState* exec)
+{
+ CodeBlock* codeBlock = exec->codeBlock();
+
+ ASSERT(codeBlock);
+ ASSERT(codeBlock->getJITType() == JITCode::DFGJIT);
+
+ JSGlobalData* globalData = &exec->globalData();
+
+ uint32_t exitIndex = globalData->osrExitIndex;
+ OSRExit& exit = codeBlock->osrExit(exitIndex);
+
+ SpeculationRecovery* recovery = 0;
+ if (exit.m_recoveryIndex)
+ recovery = &codeBlock->speculationRecovery(exit.m_recoveryIndex - 1);
+
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ fprintf(stderr, "Generating OSR exit #%u (bc#%u, @%u, %s) for code block %p.\n", exitIndex, exit.m_codeOrigin.bytecodeIndex, exit.m_nodeIndex, exitKindToString(exit.m_kind), codeBlock);
+#endif
+
+ {
+ AssemblyHelpers jit(globalData, codeBlock);
+ OSRExitCompiler exitCompiler(jit);
+
+ exitCompiler.compileExit(exit, recovery);
+
+ LinkBuffer patchBuffer(*globalData, &jit);
+ exit.m_code = patchBuffer.finalizeCode();
+
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ fprintf(stderr, "OSR exit code at [%p, %p).\n", patchBuffer.debugAddress(), static_cast<char*>(patchBuffer.debugAddress()) + patchBuffer.debugSize());
+#endif
+ }
+
+ {
+ RepatchBuffer repatchBuffer(codeBlock);
+ repatchBuffer.relink(exit.m_check.codeLocationForRepatch(codeBlock), CodeLocationLabel(exit.m_code.code()));
+ }
+
+ globalData->osrExitJumpDestination = exit.m_code.code().executableAddress();
+}
+
+} // extern "C"
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.h b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.h
new file mode 100644
index 000000000..e08362f22
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGOSRExitCompiler_h
+#define DFGOSRExitCompiler_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(DFG_JIT)
+
+#include "DFGAssemblyHelpers.h"
+#include "DFGOSRExit.h"
+#include "DFGOperations.h"
+
+namespace JSC {
+
+class ExecState;
+
+namespace DFG {
+
+class OSRExitCompiler {
+public:
+ OSRExitCompiler(AssemblyHelpers& jit)
+ : m_jit(jit)
+ {
+ }
+
+ void compileExit(const OSRExit&, SpeculationRecovery*);
+
+private:
+#if !ASSERT_DISABLED
+ static unsigned badIndex() { return static_cast<unsigned>(-1); };
+#endif
+
+ void initializePoisoned(unsigned size)
+ {
+#if ASSERT_DISABLED
+ m_poisonScratchIndices.resize(size);
+#else
+ m_poisonScratchIndices.fill(badIndex(), size);
+#endif
+ }
+
+ unsigned poisonIndex(unsigned index)
+ {
+ unsigned result = m_poisonScratchIndices[index];
+ ASSERT(result != badIndex());
+ return result;
+ }
+
+ AssemblyHelpers& m_jit;
+ Vector<unsigned> m_poisonScratchIndices;
+};
+
+extern "C" {
+void DFG_OPERATION compileOSRExit(ExecState*);
+}
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
+
+#endif // DFGOSRExitCompiler_h
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp
new file mode 100644
index 000000000..3d27e00e4
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp
@@ -0,0 +1,662 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "DFGOSRExitCompiler.h"
+
+#if ENABLE(DFG_JIT) && USE(JSVALUE32_64)
+
+#include "DFGOperations.h"
+
+namespace JSC { namespace DFG {
+
+void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* recovery)
+{
+ // 1) Pro-forma stuff.
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ fprintf(stderr, "OSR exit for Node @%d (", (int)exit.m_nodeIndex);
+ for (CodeOrigin codeOrigin = exit.m_codeOrigin; ; codeOrigin = codeOrigin.inlineCallFrame->caller) {
+ fprintf(stderr, "bc#%u", codeOrigin.bytecodeIndex);
+ if (!codeOrigin.inlineCallFrame)
+ break;
+ fprintf(stderr, " -> %p ", codeOrigin.inlineCallFrame->executable.get());
+ }
+ fprintf(stderr, ") at JIT offset 0x%x ", m_jit.debugOffset());
+ exit.dump(stderr);
+#endif
+#if DFG_ENABLE(VERBOSE_SPECULATION_FAILURE)
+ SpeculationFailureDebugInfo* debugInfo = new SpeculationFailureDebugInfo;
+ debugInfo->codeBlock = m_jit.codeBlock();
+ debugInfo->nodeIndex = exit.m_nodeIndex;
+
+ m_jit.debugCall(debugOperationPrintSpeculationFailure, debugInfo);
+#endif
+
+#if DFG_ENABLE(JIT_BREAK_ON_SPECULATION_FAILURE)
+ m_jit.breakpoint();
+#endif
+
+#if DFG_ENABLE(SUCCESS_STATS)
+ static SamplingCounter counter("SpeculationFailure");
+ m_jit.emitCount(counter);
+#endif
+
+ // 2) Perform speculation recovery. This only comes into play when an operation
+ // starts mutating state before verifying the speculation it has already made.
+
+ if (recovery) {
+ switch (recovery->type()) {
+ case SpeculativeAdd:
+ m_jit.sub32(recovery->src(), recovery->dest());
+ break;
+
+ case BooleanSpeculationCheck:
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ // 3) Refine some value profile, if appropriate.
+
+ if (!!exit.m_jsValueSource && !!exit.m_valueProfile) {
+ if (exit.m_jsValueSource.isAddress()) {
+ // Save a register so we can use it.
+ GPRReg scratch = GPRInfo::regT0;
+ if (scratch == exit.m_jsValueSource.base())
+ scratch = GPRInfo::regT1;
+ EncodedJSValue* scratchBuffer = static_cast<EncodedJSValue*>(m_jit.globalData()->scratchBufferForSize(sizeof(uint32_t)));
+ m_jit.store32(scratch, scratchBuffer);
+ m_jit.load32(exit.m_jsValueSource.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), scratch);
+ m_jit.store32(scratch, &bitwise_cast<EncodedValueDescriptor*>(exit.m_valueProfile->specFailBucket(0))->asBits.tag);
+ m_jit.load32(exit.m_jsValueSource.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), scratch);
+ m_jit.store32(scratch, &bitwise_cast<EncodedValueDescriptor*>(exit.m_valueProfile->specFailBucket(0))->asBits.payload);
+ m_jit.load32(scratchBuffer, scratch);
+ } else if (exit.m_jsValueSource.hasKnownTag()) {
+ m_jit.store32(AssemblyHelpers::Imm32(exit.m_jsValueSource.tag()), &bitwise_cast<EncodedValueDescriptor*>(exit.m_valueProfile->specFailBucket(0))->asBits.tag);
+ m_jit.store32(exit.m_jsValueSource.payloadGPR(), &bitwise_cast<EncodedValueDescriptor*>(exit.m_valueProfile->specFailBucket(0))->asBits.payload);
+ } else {
+ m_jit.store32(exit.m_jsValueSource.tagGPR(), &bitwise_cast<EncodedValueDescriptor*>(exit.m_valueProfile->specFailBucket(0))->asBits.tag);
+ m_jit.store32(exit.m_jsValueSource.payloadGPR(), &bitwise_cast<EncodedValueDescriptor*>(exit.m_valueProfile->specFailBucket(0))->asBits.payload);
+ }
+ }
+
+ // 4) Figure out how many scratch slots we'll need. We need one for every GPR/FPR
+ // whose destination is now occupied by a DFG virtual register, and we need
+ // one for every displaced virtual register if there are more than
+ // GPRInfo::numberOfRegisters of them. Also see if there are any constants,
+ // any undefined slots, any FPR slots, and any unboxed ints.
+
+ Vector<bool> poisonedVirtualRegisters(exit.m_variables.size());
+ for (unsigned i = 0; i < poisonedVirtualRegisters.size(); ++i)
+ poisonedVirtualRegisters[i] = false;
+
+ unsigned numberOfPoisonedVirtualRegisters = 0;
+ unsigned numberOfDisplacedVirtualRegisters = 0;
+
+ // Booleans for fast checks. We expect that most OSR exits do not have to rebox
+ // Int32s, have no FPRs, and have no constants. If there are constants, we
+ // expect most of them to be jsUndefined(); if that's true then we handle that
+ // specially to minimize code size and execution time.
+ bool haveUnboxedInt32InRegisterFile = false;
+ bool haveUnboxedCellInRegisterFile = false;
+ bool haveUnboxedBooleanInRegisterFile = false;
+ bool haveUInt32s = false;
+ bool haveFPRs = false;
+ bool haveConstants = false;
+ bool haveUndefined = false;
+
+ for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
+ const ValueRecovery& recovery = exit.valueRecovery(index);
+ switch (recovery.technique()) {
+ case DisplacedInRegisterFile:
+ case Int32DisplacedInRegisterFile:
+ case CellDisplacedInRegisterFile:
+ case BooleanDisplacedInRegisterFile:
+ numberOfDisplacedVirtualRegisters++;
+ ASSERT((int)recovery.virtualRegister() >= 0);
+
+ // See if we might like to store to this virtual register before doing
+ // virtual register shuffling. If so, we say that the virtual register
+ // is poisoned: it cannot be stored to until after displaced virtual
+ // registers are handled. We track poisoned virtual register carefully
+ // to ensure this happens efficiently. Note that we expect this case
+ // to be rare, so the handling of it is optimized for the cases in
+ // which it does not happen.
+ if (recovery.virtualRegister() < (int)exit.m_variables.size()) {
+ switch (exit.m_variables[recovery.virtualRegister()].technique()) {
+ case InGPR:
+ case UnboxedInt32InGPR:
+ case UnboxedBooleanInGPR:
+ case UInt32InGPR:
+ case InPair:
+ case InFPR:
+ if (!poisonedVirtualRegisters[recovery.virtualRegister()]) {
+ poisonedVirtualRegisters[recovery.virtualRegister()] = true;
+ numberOfPoisonedVirtualRegisters++;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+
+ case UInt32InGPR:
+ haveUInt32s = true;
+ break;
+
+ case AlreadyInRegisterFileAsUnboxedInt32:
+ haveUnboxedInt32InRegisterFile = true;
+ break;
+
+ case AlreadyInRegisterFileAsUnboxedCell:
+ haveUnboxedCellInRegisterFile = true;
+ break;
+
+ case AlreadyInRegisterFileAsUnboxedBoolean:
+ haveUnboxedBooleanInRegisterFile = true;
+ break;
+
+ case InFPR:
+ haveFPRs = true;
+ break;
+
+ case Constant:
+ haveConstants = true;
+ if (recovery.constant().isUndefined())
+ haveUndefined = true;
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ unsigned scratchBufferLengthBeforeUInt32s = numberOfPoisonedVirtualRegisters + ((numberOfDisplacedVirtualRegisters * 2) <= GPRInfo::numberOfRegisters ? 0 : numberOfDisplacedVirtualRegisters);
+ EncodedJSValue* scratchBuffer = static_cast<EncodedJSValue*>(m_jit.globalData()->scratchBufferForSize(sizeof(EncodedJSValue) * (scratchBufferLengthBeforeUInt32s + (haveUInt32s ? 2 : 0))));
+
+ // From here on, the code assumes that it is profitable to maximize the distance
+ // between when something is computed and when it is stored.
+
+ // 5) Perform all reboxing of integers and cells, except for those in registers.
+
+ if (haveUnboxedInt32InRegisterFile || haveUnboxedCellInRegisterFile || haveUnboxedBooleanInRegisterFile) {
+ for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
+ const ValueRecovery& recovery = exit.valueRecovery(index);
+ switch (recovery.technique()) {
+ case AlreadyInRegisterFileAsUnboxedInt32:
+ m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag), AssemblyHelpers::tagFor(static_cast<VirtualRegister>(exit.operandForIndex(index))));
+ break;
+
+ case AlreadyInRegisterFileAsUnboxedCell:
+ m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor(static_cast<VirtualRegister>(exit.operandForIndex(index))));
+ break;
+
+ case AlreadyInRegisterFileAsUnboxedBoolean:
+ m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::BooleanTag), AssemblyHelpers::tagFor(static_cast<VirtualRegister>(exit.operandForIndex(index))));
+ break;
+
+ default:
+ break;
+ }
+ }
+ }
+
+ // 6) Dump all non-poisoned GPRs. For poisoned GPRs, save them into the scratch storage.
+ // Note that GPRs do not have a fast change (like haveFPRs) because we expect that
+ // most OSR failure points will have at least one GPR that needs to be dumped.
+
+ initializePoisoned(exit.m_variables.size());
+ unsigned currentPoisonIndex = 0;
+
+ for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
+ const ValueRecovery& recovery = exit.valueRecovery(index);
+ int operand = exit.operandForIndex(index);
+ switch (recovery.technique()) {
+ case InGPR:
+ case UnboxedInt32InGPR:
+ case UnboxedBooleanInGPR:
+ if (exit.isVariable(index) && poisonedVirtualRegisters[exit.variableForIndex(index)]) {
+ m_jit.store32(recovery.gpr(), reinterpret_cast<char*>(scratchBuffer + currentPoisonIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
+ m_poisonScratchIndices[exit.variableForIndex(index)] = currentPoisonIndex;
+ currentPoisonIndex++;
+ } else {
+ uint32_t tag = JSValue::EmptyValueTag;
+ if (recovery.technique() == InGPR)
+ tag = JSValue::CellTag;
+ else if (recovery.technique() == UnboxedInt32InGPR)
+ tag = JSValue::Int32Tag;
+ else
+ tag = JSValue::BooleanTag;
+ m_jit.store32(AssemblyHelpers::TrustedImm32(tag), AssemblyHelpers::tagFor((VirtualRegister)operand));
+ m_jit.store32(recovery.gpr(), AssemblyHelpers::payloadFor((VirtualRegister)operand));
+ }
+ break;
+ case InPair:
+ if (exit.isVariable(index) && poisonedVirtualRegisters[exit.variableForIndex(index)]) {
+ m_jit.store32(recovery.tagGPR(), reinterpret_cast<char*>(scratchBuffer + currentPoisonIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
+ m_jit.store32(recovery.payloadGPR(), reinterpret_cast<char*>(scratchBuffer + currentPoisonIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
+ m_poisonScratchIndices[exit.variableForIndex(index)] = currentPoisonIndex;
+ currentPoisonIndex++;
+ } else {
+ m_jit.store32(recovery.tagGPR(), AssemblyHelpers::tagFor((VirtualRegister)operand));
+ m_jit.store32(recovery.payloadGPR(), AssemblyHelpers::payloadFor((VirtualRegister)operand));
+ }
+ break;
+ case UInt32InGPR: {
+ EncodedJSValue* myScratch = scratchBuffer + scratchBufferLengthBeforeUInt32s;
+
+ GPRReg addressGPR = GPRInfo::regT0;
+ if (addressGPR == recovery.gpr())
+ addressGPR = GPRInfo::regT1;
+
+ m_jit.storePtr(addressGPR, myScratch);
+ m_jit.move(AssemblyHelpers::TrustedImmPtr(myScratch + 1), addressGPR);
+ m_jit.storeDouble(FPRInfo::fpRegT0, addressGPR);
+
+ AssemblyHelpers::Jump positive = m_jit.branch32(AssemblyHelpers::GreaterThanOrEqual, recovery.gpr(), AssemblyHelpers::TrustedImm32(0));
+
+ m_jit.convertInt32ToDouble(recovery.gpr(), FPRInfo::fpRegT0);
+ m_jit.addDouble(AssemblyHelpers::AbsoluteAddress(&AssemblyHelpers::twoToThe32), FPRInfo::fpRegT0);
+ if (exit.isVariable(index) && poisonedVirtualRegisters[exit.variableForIndex(index)]) {
+ m_jit.move(AssemblyHelpers::TrustedImmPtr(scratchBuffer + currentPoisonIndex), addressGPR);
+ m_jit.storeDouble(FPRInfo::fpRegT0, addressGPR);
+ } else
+ m_jit.storeDouble(FPRInfo::fpRegT0, AssemblyHelpers::addressFor((VirtualRegister)operand));
+
+ AssemblyHelpers::Jump done = m_jit.jump();
+
+ positive.link(&m_jit);
+
+ if (exit.isVariable(index) && poisonedVirtualRegisters[exit.variableForIndex(index)]) {
+ m_jit.store32(recovery.gpr(), reinterpret_cast<char*>(scratchBuffer + currentPoisonIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
+ m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag), reinterpret_cast<char*>(scratchBuffer + currentPoisonIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
+ } else {
+ m_jit.store32(recovery.gpr(), AssemblyHelpers::payloadFor((VirtualRegister)operand));
+ m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag), AssemblyHelpers::tagFor((VirtualRegister)operand));
+ }
+
+ done.link(&m_jit);
+
+ m_jit.move(AssemblyHelpers::TrustedImmPtr(myScratch + 1), addressGPR);
+ m_jit.loadDouble(addressGPR, FPRInfo::fpRegT0);
+ m_jit.loadPtr(myScratch, addressGPR);
+
+ if (exit.isVariable(index) && poisonedVirtualRegisters[exit.variableForIndex(index)]) {
+ m_poisonScratchIndices[exit.variableForIndex(index)] = currentPoisonIndex;
+ currentPoisonIndex++;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ }
+
+ // 7) Dump all doubles into the register file, or to the scratch storage if the
+ // destination virtual register is poisoned.
+ if (haveFPRs) {
+ for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
+ const ValueRecovery& recovery = exit.valueRecovery(index);
+ if (recovery.technique() != InFPR)
+ continue;
+ if (exit.isVariable(index) && poisonedVirtualRegisters[exit.variableForIndex(index)]) {
+ m_jit.storeDouble(recovery.fpr(), scratchBuffer + currentPoisonIndex);
+ m_poisonScratchIndices[exit.variableForIndex(index)] = currentPoisonIndex;
+ currentPoisonIndex++;
+ } else
+ m_jit.storeDouble(recovery.fpr(), AssemblyHelpers::addressFor((VirtualRegister)exit.operandForIndex(index)));
+ }
+ }
+
+ // At this point all GPRs are available for scratch use.
+
+ ASSERT(currentPoisonIndex == numberOfPoisonedVirtualRegisters);
+
+ // 8) Reshuffle displaced virtual registers. Optimize for the case that
+ // the number of displaced virtual registers is not more than the number
+ // of available physical registers.
+
+ if (numberOfDisplacedVirtualRegisters) {
+ if (numberOfDisplacedVirtualRegisters * 2 <= GPRInfo::numberOfRegisters) {
+ // So far this appears to be the case that triggers all the time, but
+ // that is far from guaranteed.
+
+ unsigned displacementIndex = 0;
+ for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
+ const ValueRecovery& recovery = exit.valueRecovery(index);
+ switch (recovery.technique()) {
+ case DisplacedInRegisterFile:
+ m_jit.load32(AssemblyHelpers::payloadFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++));
+ m_jit.load32(AssemblyHelpers::tagFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++));
+ break;
+ case Int32DisplacedInRegisterFile:
+ m_jit.load32(AssemblyHelpers::payloadFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++));
+ m_jit.move(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag), GPRInfo::toRegister(displacementIndex++));
+ break;
+ case CellDisplacedInRegisterFile:
+ m_jit.load32(AssemblyHelpers::payloadFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++));
+ m_jit.move(AssemblyHelpers::TrustedImm32(JSValue::CellTag), GPRInfo::toRegister(displacementIndex++));
+ break;
+ case BooleanDisplacedInRegisterFile:
+ m_jit.load32(AssemblyHelpers::payloadFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++));
+ m_jit.move(AssemblyHelpers::TrustedImm32(JSValue::BooleanTag), GPRInfo::toRegister(displacementIndex++));
+ break;
+ default:
+ break;
+ }
+ }
+
+ displacementIndex = 0;
+ for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
+ const ValueRecovery& recovery = exit.valueRecovery(index);
+ switch (recovery.technique()) {
+ case DisplacedInRegisterFile:
+ case Int32DisplacedInRegisterFile:
+ case CellDisplacedInRegisterFile:
+ case BooleanDisplacedInRegisterFile:
+ m_jit.store32(GPRInfo::toRegister(displacementIndex++), AssemblyHelpers::payloadFor((VirtualRegister)exit.operandForIndex(index)));
+ m_jit.store32(GPRInfo::toRegister(displacementIndex++), AssemblyHelpers::tagFor((VirtualRegister)exit.operandForIndex(index)));
+ break;
+ default:
+ break;
+ }
+ }
+ } else {
+ // FIXME: This should use the shuffling algorithm that we use
+ // for speculative->non-speculative jumps, if we ever discover that
+ // some hot code with lots of live values that get displaced and
+ // spilled really enjoys frequently failing speculation.
+
+ // For now this code is engineered to be correct but probably not
+ // super. In particular, it correctly handles cases where for example
+ // the displacements are a permutation of the destination values, like
+ //
+ // 1 -> 2
+ // 2 -> 1
+ //
+ // It accomplishes this by simply lifting all of the virtual registers
+ // from their old (DFG JIT) locations and dropping them in a scratch
+ // location in memory, and then transferring from that scratch location
+ // to their new (old JIT) locations.
+
+ unsigned scratchIndex = numberOfPoisonedVirtualRegisters;
+ for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
+ const ValueRecovery& recovery = exit.valueRecovery(index);
+ switch (recovery.technique()) {
+ case DisplacedInRegisterFile:
+ m_jit.load32(AssemblyHelpers::payloadFor(recovery.virtualRegister()), GPRInfo::regT0);
+ m_jit.load32(AssemblyHelpers::tagFor(recovery.virtualRegister()), GPRInfo::regT1);
+ m_jit.store32(GPRInfo::regT0, reinterpret_cast<char*>(scratchBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
+ m_jit.store32(GPRInfo::regT1, reinterpret_cast<char*>(scratchBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
+ scratchIndex++;
+ break;
+ case Int32DisplacedInRegisterFile:
+ case CellDisplacedInRegisterFile:
+ case BooleanDisplacedInRegisterFile:
+ m_jit.load32(AssemblyHelpers::payloadFor(recovery.virtualRegister()), GPRInfo::regT0);
+ m_jit.store32(GPRInfo::regT0, reinterpret_cast<char*>(scratchBuffer + scratchIndex++) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
+ break;
+ default:
+ break;
+ }
+ }
+
+ scratchIndex = numberOfPoisonedVirtualRegisters;
+ for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
+ const ValueRecovery& recovery = exit.valueRecovery(index);
+ switch (recovery.technique()) {
+ case DisplacedInRegisterFile:
+ m_jit.load32(reinterpret_cast<char*>(scratchBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0);
+ m_jit.load32(reinterpret_cast<char*>(scratchBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag), GPRInfo::regT1);
+ m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)exit.operandForIndex(index)));
+ m_jit.store32(GPRInfo::regT1, AssemblyHelpers::tagFor((VirtualRegister)exit.operandForIndex(index)));
+ scratchIndex++;
+ break;
+ case Int32DisplacedInRegisterFile:
+ m_jit.load32(reinterpret_cast<char*>(scratchBuffer + scratchIndex++) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0);
+ m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag), AssemblyHelpers::tagFor((VirtualRegister)exit.operandForIndex(index)));
+ m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)exit.operandForIndex(index)));
+ break;
+ case CellDisplacedInRegisterFile:
+ m_jit.load32(reinterpret_cast<char*>(scratchBuffer + scratchIndex++) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0);
+ m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)exit.operandForIndex(index)));
+ m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)exit.operandForIndex(index)));
+ break;
+ case BooleanDisplacedInRegisterFile:
+ m_jit.load32(reinterpret_cast<char*>(scratchBuffer + scratchIndex++) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0);
+ m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::BooleanTag), AssemblyHelpers::tagFor((VirtualRegister)exit.operandForIndex(index)));
+ m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)exit.operandForIndex(index)));
+ break;
+ default:
+ break;
+ }
+ }
+
+ ASSERT(scratchIndex == numberOfPoisonedVirtualRegisters + numberOfDisplacedVirtualRegisters);
+ }
+ }
+
+ // 9) Dump all poisoned virtual registers.
+
+ if (numberOfPoisonedVirtualRegisters) {
+ for (int virtualRegister = 0; virtualRegister < (int)exit.m_variables.size(); ++virtualRegister) {
+ if (!poisonedVirtualRegisters[virtualRegister])
+ continue;
+
+ const ValueRecovery& recovery = exit.m_variables[virtualRegister];
+ switch (recovery.technique()) {
+ case InGPR:
+ case UnboxedInt32InGPR:
+ case UnboxedBooleanInGPR: {
+ m_jit.load32(reinterpret_cast<char*>(scratchBuffer + poisonIndex(virtualRegister)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0);
+ m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)virtualRegister));
+ uint32_t tag = JSValue::EmptyValueTag;
+ if (recovery.technique() == InGPR)
+ tag = JSValue::CellTag;
+ else if (recovery.technique() == UnboxedInt32InGPR)
+ tag = JSValue::Int32Tag;
+ else
+ tag = JSValue::BooleanTag;
+ m_jit.store32(AssemblyHelpers::TrustedImm32(tag), AssemblyHelpers::tagFor((VirtualRegister)virtualRegister));
+ break;
+ }
+
+ case InFPR:
+ case InPair:
+ case UInt32InGPR:
+ m_jit.load32(reinterpret_cast<char*>(scratchBuffer + poisonIndex(virtualRegister)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0);
+ m_jit.load32(reinterpret_cast<char*>(scratchBuffer + poisonIndex(virtualRegister)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag), GPRInfo::regT1);
+ m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)virtualRegister));
+ m_jit.store32(GPRInfo::regT1, AssemblyHelpers::tagFor((VirtualRegister)virtualRegister));
+ break;
+
+ default:
+ break;
+ }
+ }
+ }
+
+ // 10) Dump all constants. Optimize for Undefined, since that's a constant we see
+ // often.
+
+ if (haveConstants) {
+ if (haveUndefined) {
+ m_jit.move(AssemblyHelpers::TrustedImm32(jsUndefined().payload()), GPRInfo::regT0);
+ m_jit.move(AssemblyHelpers::TrustedImm32(jsUndefined().tag()), GPRInfo::regT1);
+ }
+
+ for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
+ const ValueRecovery& recovery = exit.valueRecovery(index);
+ if (recovery.technique() != Constant)
+ continue;
+ if (recovery.constant().isUndefined()) {
+ m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)exit.operandForIndex(index)));
+ m_jit.store32(GPRInfo::regT1, AssemblyHelpers::tagFor((VirtualRegister)exit.operandForIndex(index)));
+ } else {
+ m_jit.store32(AssemblyHelpers::TrustedImm32(recovery.constant().payload()), AssemblyHelpers::payloadFor((VirtualRegister)exit.operandForIndex(index)));
+ m_jit.store32(AssemblyHelpers::TrustedImm32(recovery.constant().tag()), AssemblyHelpers::tagFor((VirtualRegister)exit.operandForIndex(index)));
+ }
+ }
+ }
+
+ // 11) Adjust the old JIT's execute counter. Since we are exiting OSR, we know
+ // that all new calls into this code will go to the new JIT, so the execute
+ // counter only affects call frames that performed OSR exit and call frames
+ // that were still executing the old JIT at the time of another call frame's
+ // OSR exit. We want to ensure that the following is true:
+ //
+ // (a) Code the performs an OSR exit gets a chance to reenter optimized
+ // code eventually, since optimized code is faster. But we don't
+ // want to do such reentery too aggressively (see (c) below).
+ //
+ // (b) If there is code on the call stack that is still running the old
+ // JIT's code and has never OSR'd, then it should get a chance to
+ // perform OSR entry despite the fact that we've exited.
+ //
+ // (c) Code the performs an OSR exit should not immediately retry OSR
+ // entry, since both forms of OSR are expensive. OSR entry is
+ // particularly expensive.
+ //
+ // (d) Frequent OSR failures, even those that do not result in the code
+ // running in a hot loop, result in recompilation getting triggered.
+ //
+ // To ensure (c), we'd like to set the execute counter to
+ // counterValueForOptimizeAfterWarmUp(). This seems like it would endanger
+ // (a) and (b), since then every OSR exit would delay the opportunity for
+ // every call frame to perform OSR entry. Essentially, if OSR exit happens
+ // frequently and the function has few loops, then the counter will never
+ // become non-negative and OSR entry will never be triggered. OSR entry
+ // will only happen if a loop gets hot in the old JIT, which does a pretty
+ // good job of ensuring (a) and (b). But that doesn't take care of (d),
+ // since each speculation failure would reset the execute counter.
+ // So we check here if the number of speculation failures is significantly
+ // larger than the number of successes (we want 90% success rate), and if
+ // there have been a large enough number of failures. If so, we set the
+ // counter to 0; otherwise we set the counter to
+ // counterValueForOptimizeAfterWarmUp().
+
+ m_jit.add32(AssemblyHelpers::Imm32(1), AssemblyHelpers::AbsoluteAddress(&exit.m_count));
+
+ m_jit.move(AssemblyHelpers::TrustedImmPtr(m_jit.codeBlock()), GPRInfo::regT0);
+
+ m_jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeFailCounter()), GPRInfo::regT2);
+ m_jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeSuccessCounter()), GPRInfo::regT1);
+ m_jit.add32(AssemblyHelpers::Imm32(1), GPRInfo::regT2);
+ m_jit.add32(AssemblyHelpers::Imm32(-1), GPRInfo::regT1);
+ m_jit.store32(GPRInfo::regT2, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeFailCounter()));
+ m_jit.store32(GPRInfo::regT1, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeSuccessCounter()));
+
+ m_jit.move(AssemblyHelpers::TrustedImmPtr(m_jit.baselineCodeBlock()), GPRInfo::regT0);
+
+ AssemblyHelpers::Jump fewFails = m_jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, AssemblyHelpers::Imm32(m_jit.codeBlock()->largeFailCountThreshold()));
+ m_jit.mul32(AssemblyHelpers::Imm32(Options::desiredSpeculativeSuccessFailRatio), GPRInfo::regT2, GPRInfo::regT2);
+
+ AssemblyHelpers::Jump lowFailRate = m_jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, GPRInfo::regT1);
+
+ // Reoptimize as soon as possible.
+ m_jit.store32(AssemblyHelpers::Imm32(Options::executionCounterValueForOptimizeNextInvocation), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfExecuteCounter()));
+ AssemblyHelpers::Jump doneAdjusting = m_jit.jump();
+
+ fewFails.link(&m_jit);
+ lowFailRate.link(&m_jit);
+
+ m_jit.store32(AssemblyHelpers::Imm32(m_jit.baselineCodeBlock()->counterValueForOptimizeAfterLongWarmUp()), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfExecuteCounter()));
+
+ doneAdjusting.link(&m_jit);
+
+ // 12) Load the result of the last bytecode operation into regT0.
+
+ if (exit.m_lastSetOperand != std::numeric_limits<int>::max()) {
+ m_jit.load32(AssemblyHelpers::payloadFor((VirtualRegister)exit.m_lastSetOperand), GPRInfo::cachedResultRegister);
+ m_jit.load32(AssemblyHelpers::tagFor((VirtualRegister)exit.m_lastSetOperand), GPRInfo::cachedResultRegister2);
+ }
+
+ // 13) Fix call frame (s).
+
+ ASSERT(m_jit.baselineCodeBlock()->getJITType() == JITCode::BaselineJIT);
+ m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(m_jit.baselineCodeBlock()), AssemblyHelpers::addressFor((VirtualRegister)RegisterFile::CodeBlock));
+
+ for (CodeOrigin codeOrigin = exit.m_codeOrigin; codeOrigin.inlineCallFrame; codeOrigin = codeOrigin.inlineCallFrame->caller) {
+ InlineCallFrame* inlineCallFrame = codeOrigin.inlineCallFrame;
+ CodeBlock* baselineCodeBlock = m_jit.baselineCodeBlockFor(codeOrigin);
+ CodeBlock* baselineCodeBlockForCaller = m_jit.baselineCodeBlockFor(inlineCallFrame->caller);
+ Vector<BytecodeAndMachineOffset>& decodedCodeMap = m_jit.decodedCodeMapFor(baselineCodeBlockForCaller);
+ unsigned returnBytecodeIndex = inlineCallFrame->caller.bytecodeIndex + OPCODE_LENGTH(op_call);
+ BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned, BytecodeAndMachineOffset::getBytecodeIndex>(decodedCodeMap.begin(), decodedCodeMap.size(), returnBytecodeIndex);
+
+ ASSERT(mapping);
+ ASSERT(mapping->m_bytecodeIndex == returnBytecodeIndex);
+
+ void* jumpTarget = baselineCodeBlockForCaller->getJITCode().executableAddressAtOffset(mapping->m_machineCodeOffset);
+
+ GPRReg callerFrameGPR;
+ if (inlineCallFrame->caller.inlineCallFrame) {
+ m_jit.add32(AssemblyHelpers::Imm32(inlineCallFrame->caller.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister, GPRInfo::regT3);
+ callerFrameGPR = GPRInfo::regT3;
+ } else
+ callerFrameGPR = GPRInfo::callFrameRegister;
+
+ m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::CodeBlock)));
+ m_jit.store32(AssemblyHelpers::Imm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::ScopeChain)));
+ m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->callee->scope()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::ScopeChain)));
+ m_jit.store32(AssemblyHelpers::Imm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::CallerFrame)));
+ m_jit.storePtr(callerFrameGPR, AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::CallerFrame)));
+ m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::ReturnPC)));
+ m_jit.store32(AssemblyHelpers::Imm32(inlineCallFrame->arguments.size()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::ArgumentCount)));
+ m_jit.store32(AssemblyHelpers::Imm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::Callee)));
+ m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->callee.get()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::Callee)));
+ }
+
+ if (exit.m_codeOrigin.inlineCallFrame)
+ m_jit.addPtr(AssemblyHelpers::Imm32(exit.m_codeOrigin.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister);
+
+ // 14) Jump into the corresponding baseline JIT code.
+
+ CodeBlock* baselineCodeBlock = m_jit.baselineCodeBlockFor(exit.m_codeOrigin);
+ Vector<BytecodeAndMachineOffset>& decodedCodeMap = m_jit.decodedCodeMapFor(baselineCodeBlock);
+
+ BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned, BytecodeAndMachineOffset::getBytecodeIndex>(decodedCodeMap.begin(), decodedCodeMap.size(), exit.m_codeOrigin.bytecodeIndex);
+
+ ASSERT(mapping);
+ ASSERT(mapping->m_bytecodeIndex == exit.m_codeOrigin.bytecodeIndex);
+
+ void* jumpTarget = baselineCodeBlock->getJITCode().executableAddressAtOffset(mapping->m_machineCodeOffset);
+
+ ASSERT(GPRInfo::regT2 != GPRInfo::cachedResultRegister && GPRInfo::regT2 != GPRInfo::cachedResultRegister2);
+
+ m_jit.move(AssemblyHelpers::TrustedImmPtr(jumpTarget), GPRInfo::regT2);
+ m_jit.jump(GPRInfo::regT2);
+
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ fprintf(stderr, " -> %p\n", jumpTarget);
+#endif
+}
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT) && USE(JSVALUE32_64)
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp
new file mode 100644
index 000000000..c6f4a9ed4
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp
@@ -0,0 +1,633 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "DFGOSRExitCompiler.h"
+
+#if ENABLE(DFG_JIT) && USE(JSVALUE64)
+
+#include "DFGOperations.h"
+
+namespace JSC { namespace DFG {
+
+void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* recovery)
+{
+ // 1) Pro-forma stuff.
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ fprintf(stderr, "OSR exit for Node @%d (", (int)exit.m_nodeIndex);
+ for (CodeOrigin codeOrigin = exit.m_codeOrigin; ; codeOrigin = codeOrigin.inlineCallFrame->caller) {
+ fprintf(stderr, "bc#%u", codeOrigin.bytecodeIndex);
+ if (!codeOrigin.inlineCallFrame)
+ break;
+ fprintf(stderr, " -> %p ", codeOrigin.inlineCallFrame->executable.get());
+ }
+ fprintf(stderr, ") ");
+ exit.dump(stderr);
+#endif
+#if DFG_ENABLE(VERBOSE_SPECULATION_FAILURE)
+ SpeculationFailureDebugInfo* debugInfo = new SpeculationFailureDebugInfo;
+ debugInfo->codeBlock = m_jit.codeBlock();
+ debugInfo->nodeIndex = exit.m_nodeIndex;
+
+ m_jit.debugCall(debugOperationPrintSpeculationFailure, debugInfo);
+#endif
+
+#if DFG_ENABLE(JIT_BREAK_ON_SPECULATION_FAILURE)
+ m_jit.breakpoint();
+#endif
+
+#if DFG_ENABLE(SUCCESS_STATS)
+ static SamplingCounter counter("SpeculationFailure");
+ m_jit.emitCount(counter);
+#endif
+
+ // 2) Perform speculation recovery. This only comes into play when an operation
+ // starts mutating state before verifying the speculation it has already made.
+
+ GPRReg alreadyBoxed = InvalidGPRReg;
+
+ if (recovery) {
+ switch (recovery->type()) {
+ case SpeculativeAdd:
+ m_jit.sub32(recovery->src(), recovery->dest());
+ m_jit.orPtr(GPRInfo::tagTypeNumberRegister, recovery->dest());
+ alreadyBoxed = recovery->dest();
+ break;
+
+ case BooleanSpeculationCheck:
+ m_jit.xorPtr(AssemblyHelpers::TrustedImm32(static_cast<int32_t>(ValueFalse)), recovery->dest());
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ // 3) Refine some value profile, if appropriate.
+
+ if (!!exit.m_jsValueSource && !!exit.m_valueProfile) {
+ if (exit.m_jsValueSource.isAddress()) {
+ // We can't be sure that we have a spare register. So use the tagTypeNumberRegister,
+ // since we know how to restore it.
+ m_jit.loadPtr(AssemblyHelpers::Address(exit.m_jsValueSource.asAddress()), GPRInfo::tagTypeNumberRegister);
+ m_jit.storePtr(GPRInfo::tagTypeNumberRegister, exit.m_valueProfile->specFailBucket(0));
+ m_jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(TagTypeNumber)), GPRInfo::tagTypeNumberRegister);
+ } else
+ m_jit.storePtr(exit.m_jsValueSource.gpr(), exit.m_valueProfile->specFailBucket(0));
+ }
+
+ // 4) Figure out how many scratch slots we'll need. We need one for every GPR/FPR
+ // whose destination is now occupied by a DFG virtual register, and we need
+ // one for every displaced virtual register if there are more than
+ // GPRInfo::numberOfRegisters of them. Also see if there are any constants,
+ // any undefined slots, any FPR slots, and any unboxed ints.
+
+ Vector<bool> poisonedVirtualRegisters(exit.m_variables.size());
+ for (unsigned i = 0; i < poisonedVirtualRegisters.size(); ++i)
+ poisonedVirtualRegisters[i] = false;
+
+ unsigned numberOfPoisonedVirtualRegisters = 0;
+ unsigned numberOfDisplacedVirtualRegisters = 0;
+
+ // Booleans for fast checks. We expect that most OSR exits do not have to rebox
+ // Int32s, have no FPRs, and have no constants. If there are constants, we
+ // expect most of them to be jsUndefined(); if that's true then we handle that
+ // specially to minimize code size and execution time.
+ bool haveUnboxedInt32s = false;
+ bool haveUnboxedDoubles = false;
+ bool haveFPRs = false;
+ bool haveConstants = false;
+ bool haveUndefined = false;
+ bool haveUInt32s = false;
+
+ for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
+ const ValueRecovery& recovery = exit.valueRecovery(index);
+ switch (recovery.technique()) {
+ case Int32DisplacedInRegisterFile:
+ case DoubleDisplacedInRegisterFile:
+ case DisplacedInRegisterFile:
+ numberOfDisplacedVirtualRegisters++;
+ ASSERT((int)recovery.virtualRegister() >= 0);
+
+ // See if we might like to store to this virtual register before doing
+ // virtual register shuffling. If so, we say that the virtual register
+ // is poisoned: it cannot be stored to until after displaced virtual
+ // registers are handled. We track poisoned virtual register carefully
+ // to ensure this happens efficiently. Note that we expect this case
+ // to be rare, so the handling of it is optimized for the cases in
+ // which it does not happen.
+ if (recovery.virtualRegister() < (int)exit.m_variables.size()) {
+ switch (exit.m_variables[recovery.virtualRegister()].technique()) {
+ case InGPR:
+ case UnboxedInt32InGPR:
+ case UInt32InGPR:
+ case InFPR:
+ if (!poisonedVirtualRegisters[recovery.virtualRegister()]) {
+ poisonedVirtualRegisters[recovery.virtualRegister()] = true;
+ numberOfPoisonedVirtualRegisters++;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+
+ case UnboxedInt32InGPR:
+ case AlreadyInRegisterFileAsUnboxedInt32:
+ haveUnboxedInt32s = true;
+ break;
+
+ case AlreadyInRegisterFileAsUnboxedDouble:
+ haveUnboxedDoubles = true;
+ break;
+
+ case UInt32InGPR:
+ haveUInt32s = true;
+ break;
+
+ case InFPR:
+ haveFPRs = true;
+ break;
+
+ case Constant:
+ haveConstants = true;
+ if (recovery.constant().isUndefined())
+ haveUndefined = true;
+ break;
+
+ default:
+ break;
+ }
+ }
+
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ fprintf(stderr, " ");
+ if (numberOfPoisonedVirtualRegisters)
+ fprintf(stderr, "Poisoned=%u ", numberOfPoisonedVirtualRegisters);
+ if (numberOfDisplacedVirtualRegisters)
+ fprintf(stderr, "Displaced=%u ", numberOfDisplacedVirtualRegisters);
+ if (haveUnboxedInt32s)
+ fprintf(stderr, "UnboxedInt32 ");
+ if (haveUnboxedDoubles)
+ fprintf(stderr, "UnboxedDoubles ");
+ if (haveUInt32s)
+ fprintf(stderr, "UInt32 ");
+ if (haveFPRs)
+ fprintf(stderr, "FPR ");
+ if (haveConstants)
+ fprintf(stderr, "Constants ");
+ if (haveUndefined)
+ fprintf(stderr, "Undefined ");
+ fprintf(stderr, " ");
+#endif
+
+ EncodedJSValue* scratchBuffer = static_cast<EncodedJSValue*>(m_jit.globalData()->scratchBufferForSize(sizeof(EncodedJSValue) * std::max(haveUInt32s ? 2u : 0u, numberOfPoisonedVirtualRegisters + (numberOfDisplacedVirtualRegisters <= GPRInfo::numberOfRegisters ? 0 : numberOfDisplacedVirtualRegisters))));
+
+ // From here on, the code assumes that it is profitable to maximize the distance
+ // between when something is computed and when it is stored.
+
+ // 5) Perform all reboxing of integers.
+
+ if (haveUnboxedInt32s || haveUInt32s) {
+ for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
+ const ValueRecovery& recovery = exit.valueRecovery(index);
+ switch (recovery.technique()) {
+ case UnboxedInt32InGPR:
+ if (recovery.gpr() != alreadyBoxed)
+ m_jit.orPtr(GPRInfo::tagTypeNumberRegister, recovery.gpr());
+ break;
+
+ case AlreadyInRegisterFileAsUnboxedInt32:
+ m_jit.store32(AssemblyHelpers::Imm32(static_cast<uint32_t>(TagTypeNumber >> 32)), AssemblyHelpers::tagFor(static_cast<VirtualRegister>(exit.operandForIndex(index))));
+ break;
+
+ case UInt32InGPR: {
+ // This occurs when the speculative JIT left an unsigned 32-bit integer
+ // in a GPR. If it's positive, we can just box the int. Otherwise we
+ // need to turn it into a boxed double.
+
+ // We don't try to be clever with register allocation here; we assume
+ // that the program is using FPRs and we don't try to figure out which
+ // ones it is using. Instead just temporarily save fpRegT0 and then
+ // restore it. This makes sense because this path is not cheap to begin
+ // with, and should happen very rarely.
+
+ GPRReg addressGPR = GPRInfo::regT0;
+ if (addressGPR == recovery.gpr())
+ addressGPR = GPRInfo::regT1;
+
+ m_jit.storePtr(addressGPR, scratchBuffer);
+ m_jit.move(AssemblyHelpers::TrustedImmPtr(scratchBuffer + 1), addressGPR);
+ m_jit.storeDouble(FPRInfo::fpRegT0, addressGPR);
+
+ AssemblyHelpers::Jump positive = m_jit.branch32(AssemblyHelpers::GreaterThanOrEqual, recovery.gpr(), AssemblyHelpers::TrustedImm32(0));
+
+ m_jit.convertInt32ToDouble(recovery.gpr(), FPRInfo::fpRegT0);
+ m_jit.addDouble(AssemblyHelpers::AbsoluteAddress(&AssemblyHelpers::twoToThe32), FPRInfo::fpRegT0);
+ m_jit.boxDouble(FPRInfo::fpRegT0, recovery.gpr());
+
+ AssemblyHelpers::Jump done = m_jit.jump();
+
+ positive.link(&m_jit);
+
+ m_jit.orPtr(GPRInfo::tagTypeNumberRegister, recovery.gpr());
+
+ done.link(&m_jit);
+
+ m_jit.loadDouble(addressGPR, FPRInfo::fpRegT0);
+ m_jit.loadPtr(scratchBuffer, addressGPR);
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+ }
+
+ // 6) Dump all non-poisoned GPRs. For poisoned GPRs, save them into the scratch storage.
+ // Note that GPRs do not have a fast change (like haveFPRs) because we expect that
+ // most OSR failure points will have at least one GPR that needs to be dumped.
+
+ initializePoisoned(exit.m_variables.size());
+ unsigned currentPoisonIndex = 0;
+
+ for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
+ const ValueRecovery& recovery = exit.valueRecovery(index);
+ int operand = exit.operandForIndex(index);
+ switch (recovery.technique()) {
+ case InGPR:
+ case UnboxedInt32InGPR:
+ case UInt32InGPR:
+ if (exit.isVariable(index) && poisonedVirtualRegisters[exit.variableForIndex(index)]) {
+ m_jit.storePtr(recovery.gpr(), scratchBuffer + currentPoisonIndex);
+ m_poisonScratchIndices[exit.variableForIndex(index)] = currentPoisonIndex;
+ currentPoisonIndex++;
+ } else
+ m_jit.storePtr(recovery.gpr(), AssemblyHelpers::addressFor((VirtualRegister)operand));
+ break;
+ default:
+ break;
+ }
+ }
+
+ // At this point all GPRs are available for scratch use.
+
+ if (haveFPRs) {
+ // 7) Box all doubles (relies on there being more GPRs than FPRs)
+
+ for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
+ const ValueRecovery& recovery = exit.valueRecovery(index);
+ if (recovery.technique() != InFPR)
+ continue;
+ FPRReg fpr = recovery.fpr();
+ GPRReg gpr = GPRInfo::toRegister(FPRInfo::toIndex(fpr));
+ m_jit.boxDouble(fpr, gpr);
+ }
+
+ // 8) Dump all doubles into the register file, or to the scratch storage if
+ // the destination virtual register is poisoned.
+
+ for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
+ const ValueRecovery& recovery = exit.valueRecovery(index);
+ if (recovery.technique() != InFPR)
+ continue;
+ GPRReg gpr = GPRInfo::toRegister(FPRInfo::toIndex(recovery.fpr()));
+ if (exit.isVariable(index) && poisonedVirtualRegisters[exit.variableForIndex(index)]) {
+ m_jit.storePtr(gpr, scratchBuffer + currentPoisonIndex);
+ m_poisonScratchIndices[exit.variableForIndex(index)] = currentPoisonIndex;
+ currentPoisonIndex++;
+ } else
+ m_jit.storePtr(gpr, AssemblyHelpers::addressFor((VirtualRegister)exit.operandForIndex(index)));
+ }
+ }
+
+ // At this point all GPRs and FPRs are available for scratch use.
+
+ // 9) Box all unboxed doubles in the register file.
+ if (haveUnboxedDoubles) {
+ for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
+ const ValueRecovery& recovery = exit.valueRecovery(index);
+ if (recovery.technique() != AlreadyInRegisterFileAsUnboxedDouble)
+ continue;
+ m_jit.loadDouble(AssemblyHelpers::addressFor((VirtualRegister)exit.operandForIndex(index)), FPRInfo::fpRegT0);
+ m_jit.boxDouble(FPRInfo::fpRegT0, GPRInfo::regT0);
+ m_jit.storePtr(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)exit.operandForIndex(index)));
+ }
+ }
+
+ ASSERT(currentPoisonIndex == numberOfPoisonedVirtualRegisters);
+
+ // 10) Reshuffle displaced virtual registers. Optimize for the case that
+ // the number of displaced virtual registers is not more than the number
+ // of available physical registers.
+
+ if (numberOfDisplacedVirtualRegisters) {
+ if (numberOfDisplacedVirtualRegisters <= GPRInfo::numberOfRegisters) {
+ // So far this appears to be the case that triggers all the time, but
+ // that is far from guaranteed.
+
+ unsigned displacementIndex = 0;
+ for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
+ const ValueRecovery& recovery = exit.valueRecovery(index);
+ switch (recovery.technique()) {
+ case DisplacedInRegisterFile:
+ m_jit.loadPtr(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++));
+ break;
+
+ case Int32DisplacedInRegisterFile: {
+ GPRReg gpr = GPRInfo::toRegister(displacementIndex++);
+ m_jit.load32(AssemblyHelpers::addressFor(recovery.virtualRegister()), gpr);
+ m_jit.orPtr(GPRInfo::tagTypeNumberRegister, gpr);
+ break;
+ }
+
+ case DoubleDisplacedInRegisterFile: {
+ GPRReg gpr = GPRInfo::toRegister(displacementIndex++);
+ m_jit.loadPtr(AssemblyHelpers::addressFor(recovery.virtualRegister()), gpr);
+ m_jit.subPtr(GPRInfo::tagTypeNumberRegister, gpr);
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+
+ displacementIndex = 0;
+ for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
+ const ValueRecovery& recovery = exit.valueRecovery(index);
+ switch (recovery.technique()) {
+ case DisplacedInRegisterFile:
+ case Int32DisplacedInRegisterFile:
+ case DoubleDisplacedInRegisterFile:
+ m_jit.storePtr(GPRInfo::toRegister(displacementIndex++), AssemblyHelpers::addressFor((VirtualRegister)exit.operandForIndex(index)));
+ break;
+
+ default:
+ break;
+ }
+ }
+ } else {
+ // FIXME: This should use the shuffling algorithm that we use
+ // for speculative->non-speculative jumps, if we ever discover that
+ // some hot code with lots of live values that get displaced and
+ // spilled really enjoys frequently failing speculation.
+
+ // For now this code is engineered to be correct but probably not
+ // super. In particular, it correctly handles cases where for example
+ // the displacements are a permutation of the destination values, like
+ //
+ // 1 -> 2
+ // 2 -> 1
+ //
+ // It accomplishes this by simply lifting all of the virtual registers
+ // from their old (DFG JIT) locations and dropping them in a scratch
+ // location in memory, and then transferring from that scratch location
+ // to their new (old JIT) locations.
+
+ unsigned scratchIndex = numberOfPoisonedVirtualRegisters;
+ for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
+ const ValueRecovery& recovery = exit.valueRecovery(index);
+
+ switch (recovery.technique()) {
+ case DisplacedInRegisterFile:
+ m_jit.loadPtr(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::regT0);
+ m_jit.storePtr(GPRInfo::regT0, scratchBuffer + scratchIndex++);
+ break;
+
+ case Int32DisplacedInRegisterFile: {
+ m_jit.load32(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::regT0);
+ m_jit.orPtr(GPRInfo::tagTypeNumberRegister, GPRInfo::regT0);
+ m_jit.storePtr(GPRInfo::regT0, scratchBuffer + scratchIndex++);
+ break;
+ }
+
+ case DoubleDisplacedInRegisterFile: {
+ m_jit.loadPtr(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::regT0);
+ m_jit.subPtr(GPRInfo::tagTypeNumberRegister, GPRInfo::regT0);
+ m_jit.storePtr(GPRInfo::regT0, scratchBuffer + scratchIndex++);
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+
+ scratchIndex = numberOfPoisonedVirtualRegisters;
+ for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
+ const ValueRecovery& recovery = exit.valueRecovery(index);
+ switch (recovery.technique()) {
+ case DisplacedInRegisterFile:
+ case Int32DisplacedInRegisterFile:
+ case DoubleDisplacedInRegisterFile:
+ m_jit.loadPtr(scratchBuffer + scratchIndex++, GPRInfo::regT0);
+ m_jit.storePtr(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)exit.operandForIndex(index)));
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ ASSERT(scratchIndex == numberOfPoisonedVirtualRegisters + numberOfDisplacedVirtualRegisters);
+ }
+ }
+
+ // 11) Dump all poisoned virtual registers.
+
+ if (numberOfPoisonedVirtualRegisters) {
+ for (int virtualRegister = 0; virtualRegister < (int)exit.m_variables.size(); ++virtualRegister) {
+ if (!poisonedVirtualRegisters[virtualRegister])
+ continue;
+
+ const ValueRecovery& recovery = exit.m_variables[virtualRegister];
+ switch (recovery.technique()) {
+ case InGPR:
+ case UnboxedInt32InGPR:
+ case UInt32InGPR:
+ case InFPR:
+ m_jit.loadPtr(scratchBuffer + poisonIndex(virtualRegister), GPRInfo::regT0);
+ m_jit.storePtr(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)virtualRegister));
+ break;
+
+ default:
+ break;
+ }
+ }
+ }
+
+ // 12) Dump all constants. Optimize for Undefined, since that's a constant we see
+ // often.
+
+ if (haveConstants) {
+ if (haveUndefined)
+ m_jit.move(AssemblyHelpers::TrustedImmPtr(JSValue::encode(jsUndefined())), GPRInfo::regT0);
+
+ for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
+ const ValueRecovery& recovery = exit.valueRecovery(index);
+ if (recovery.technique() != Constant)
+ continue;
+ if (recovery.constant().isUndefined())
+ m_jit.storePtr(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)exit.operandForIndex(index)));
+ else
+ m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(JSValue::encode(recovery.constant())), AssemblyHelpers::addressFor((VirtualRegister)exit.operandForIndex(index)));
+ }
+ }
+
+ // 13) Adjust the old JIT's execute counter. Since we are exiting OSR, we know
+ // that all new calls into this code will go to the new JIT, so the execute
+ // counter only affects call frames that performed OSR exit and call frames
+ // that were still executing the old JIT at the time of another call frame's
+ // OSR exit. We want to ensure that the following is true:
+ //
+ // (a) Code the performs an OSR exit gets a chance to reenter optimized
+ // code eventually, since optimized code is faster. But we don't
+ // want to do such reentery too aggressively (see (c) below).
+ //
+ // (b) If there is code on the call stack that is still running the old
+ // JIT's code and has never OSR'd, then it should get a chance to
+ // perform OSR entry despite the fact that we've exited.
+ //
+ // (c) Code the performs an OSR exit should not immediately retry OSR
+ // entry, since both forms of OSR are expensive. OSR entry is
+ // particularly expensive.
+ //
+ // (d) Frequent OSR failures, even those that do not result in the code
+ // running in a hot loop, result in recompilation getting triggered.
+ //
+ // To ensure (c), we'd like to set the execute counter to
+ // counterValueForOptimizeAfterWarmUp(). This seems like it would endanger
+ // (a) and (b), since then every OSR exit would delay the opportunity for
+ // every call frame to perform OSR entry. Essentially, if OSR exit happens
+ // frequently and the function has few loops, then the counter will never
+ // become non-negative and OSR entry will never be triggered. OSR entry
+ // will only happen if a loop gets hot in the old JIT, which does a pretty
+ // good job of ensuring (a) and (b). But that doesn't take care of (d),
+ // since each speculation failure would reset the execute counter.
+ // So we check here if the number of speculation failures is significantly
+ // larger than the number of successes (we want 90% success rate), and if
+ // there have been a large enough number of failures. If so, we set the
+ // counter to 0; otherwise we set the counter to
+ // counterValueForOptimizeAfterWarmUp().
+
+ m_jit.add32(AssemblyHelpers::Imm32(1), AssemblyHelpers::AbsoluteAddress(&exit.m_count));
+
+ m_jit.move(AssemblyHelpers::TrustedImmPtr(m_jit.codeBlock()), GPRInfo::regT0);
+
+ m_jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeFailCounter()), GPRInfo::regT2);
+ m_jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeSuccessCounter()), GPRInfo::regT1);
+ m_jit.add32(AssemblyHelpers::Imm32(1), GPRInfo::regT2);
+ m_jit.add32(AssemblyHelpers::Imm32(-1), GPRInfo::regT1);
+ m_jit.store32(GPRInfo::regT2, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeFailCounter()));
+ m_jit.store32(GPRInfo::regT1, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeSuccessCounter()));
+
+ m_jit.move(AssemblyHelpers::TrustedImmPtr(m_jit.baselineCodeBlock()), GPRInfo::regT0);
+
+ AssemblyHelpers::Jump fewFails = m_jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, AssemblyHelpers::Imm32(m_jit.codeBlock()->largeFailCountThreshold()));
+ m_jit.mul32(AssemblyHelpers::Imm32(Options::desiredSpeculativeSuccessFailRatio), GPRInfo::regT2, GPRInfo::regT2);
+
+ AssemblyHelpers::Jump lowFailRate = m_jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, GPRInfo::regT1);
+
+ // Reoptimize as soon as possible.
+ m_jit.store32(AssemblyHelpers::Imm32(Options::executionCounterValueForOptimizeNextInvocation), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfExecuteCounter()));
+ AssemblyHelpers::Jump doneAdjusting = m_jit.jump();
+
+ fewFails.link(&m_jit);
+ lowFailRate.link(&m_jit);
+
+ m_jit.store32(AssemblyHelpers::Imm32(m_jit.baselineCodeBlock()->counterValueForOptimizeAfterLongWarmUp()), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfExecuteCounter()));
+
+ doneAdjusting.link(&m_jit);
+
+ // 14) Load the result of the last bytecode operation into regT0.
+
+ if (exit.m_lastSetOperand != std::numeric_limits<int>::max())
+ m_jit.loadPtr(AssemblyHelpers::addressFor((VirtualRegister)exit.m_lastSetOperand), GPRInfo::cachedResultRegister);
+
+ // 15) Fix call frame(s).
+
+ ASSERT(m_jit.baselineCodeBlock()->getJITType() == JITCode::BaselineJIT);
+ m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(m_jit.baselineCodeBlock()), AssemblyHelpers::addressFor((VirtualRegister)RegisterFile::CodeBlock));
+
+ for (CodeOrigin codeOrigin = exit.m_codeOrigin; codeOrigin.inlineCallFrame; codeOrigin = codeOrigin.inlineCallFrame->caller) {
+ InlineCallFrame* inlineCallFrame = codeOrigin.inlineCallFrame;
+ CodeBlock* baselineCodeBlock = m_jit.baselineCodeBlockFor(codeOrigin);
+ CodeBlock* baselineCodeBlockForCaller = m_jit.baselineCodeBlockFor(inlineCallFrame->caller);
+ Vector<BytecodeAndMachineOffset>& decodedCodeMap = m_jit.decodedCodeMapFor(baselineCodeBlockForCaller);
+ unsigned returnBytecodeIndex = inlineCallFrame->caller.bytecodeIndex + OPCODE_LENGTH(op_call);
+ BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned, BytecodeAndMachineOffset::getBytecodeIndex>(decodedCodeMap.begin(), decodedCodeMap.size(), returnBytecodeIndex);
+
+ ASSERT(mapping);
+ ASSERT(mapping->m_bytecodeIndex == returnBytecodeIndex);
+
+ void* jumpTarget = baselineCodeBlockForCaller->getJITCode().executableAddressAtOffset(mapping->m_machineCodeOffset);
+
+ GPRReg callerFrameGPR;
+ if (inlineCallFrame->caller.inlineCallFrame) {
+ m_jit.addPtr(AssemblyHelpers::Imm32(inlineCallFrame->caller.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister, GPRInfo::regT3);
+ callerFrameGPR = GPRInfo::regT3;
+ } else
+ callerFrameGPR = GPRInfo::callFrameRegister;
+
+ m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::CodeBlock)));
+ m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->callee->scope()), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::ScopeChain)));
+ m_jit.storePtr(callerFrameGPR, AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::CallerFrame)));
+ m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::ReturnPC)));
+ m_jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::ArgumentCount)));
+ m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->callee.get()), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::Callee)));
+ }
+
+ if (exit.m_codeOrigin.inlineCallFrame)
+ m_jit.addPtr(AssemblyHelpers::Imm32(exit.m_codeOrigin.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister);
+
+ // 16) Jump into the corresponding baseline JIT code.
+
+ CodeBlock* baselineCodeBlock = m_jit.baselineCodeBlockFor(exit.m_codeOrigin);
+ Vector<BytecodeAndMachineOffset>& decodedCodeMap = m_jit.decodedCodeMapFor(baselineCodeBlock);
+
+ BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned, BytecodeAndMachineOffset::getBytecodeIndex>(decodedCodeMap.begin(), decodedCodeMap.size(), exit.m_codeOrigin.bytecodeIndex);
+
+ ASSERT(mapping);
+ ASSERT(mapping->m_bytecodeIndex == exit.m_codeOrigin.bytecodeIndex);
+
+ void* jumpTarget = baselineCodeBlock->getJITCode().executableAddressAtOffset(mapping->m_machineCodeOffset);
+
+ ASSERT(GPRInfo::regT1 != GPRInfo::cachedResultRegister);
+
+ m_jit.move(AssemblyHelpers::TrustedImmPtr(jumpTarget), GPRInfo::regT1);
+
+ m_jit.jump(GPRInfo::regT1);
+
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ fprintf(stderr, "-> %p\n", jumpTarget);
+#endif
+}
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT) && USE(JSVALUE64)
diff --git a/Source/JavaScriptCore/dfg/DFGOperands.h b/Source/JavaScriptCore/dfg/DFGOperands.h
new file mode 100644
index 000000000..9ce43119c
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGOperands.h
@@ -0,0 +1,165 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGOperands_h
+#define DFGOperands_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(DFG_JIT)
+
+#include "CallFrame.h"
+#include <wtf/Vector.h>
+
+namespace JSC { namespace DFG {
+
+// argument 0 is 'this'.
+inline bool operandIsArgument(int operand) { return operand < 0; }
+inline int operandToArgument(int operand) { return -operand + CallFrame::thisArgumentOffset(); }
+inline int argumentToOperand(int argument) { return -argument + CallFrame::thisArgumentOffset(); }
+
+template<typename T> struct OperandValueTraits;
+
+template<typename T>
+struct OperandValueTraits {
+ static T defaultValue() { return T(); }
+ static void dump(const T& value, FILE* out) { value.dump(out); }
+};
+
+template<typename T, typename Traits = OperandValueTraits<T> >
+class Operands {
+public:
+ Operands() { }
+
+ explicit Operands(size_t numArguments, size_t numLocals)
+ {
+ m_arguments.fill(Traits::defaultValue(), numArguments);
+ m_locals.fill(Traits::defaultValue(), numLocals);
+ }
+
+ size_t numberOfArguments() const { return m_arguments.size(); }
+ size_t numberOfLocals() const { return m_locals.size(); }
+
+ T& argument(size_t idx) { return m_arguments[idx]; }
+ const T& argument(size_t idx) const { return m_arguments[idx]; }
+
+ T& local(size_t idx) { return m_locals[idx]; }
+ const T& local(size_t idx) const { return m_locals[idx]; }
+
+ void ensureLocals(size_t size)
+ {
+ if (size <= m_locals.size())
+ return;
+
+ size_t oldSize = m_locals.size();
+ m_locals.resize(size);
+ for (size_t i = oldSize; i < m_locals.size(); ++i)
+ m_locals[i] = Traits::defaultValue();
+ }
+
+ void setLocal(size_t idx, const T& value)
+ {
+ ensureLocals(idx + 1);
+
+ m_locals[idx] = value;
+ }
+
+ T getLocal(size_t idx)
+ {
+ if (idx >= m_locals.size())
+ return Traits::defaultValue();
+ return m_locals[idx];
+ }
+
+ void setArgumentFirstTime(size_t idx, const T& value)
+ {
+ ASSERT(m_arguments[idx] == Traits::defaultValue());
+ argument(idx) = value;
+ }
+
+ void setLocalFirstTime(size_t idx, const T& value)
+ {
+ ASSERT(idx >= m_locals.size() || m_locals[idx] == Traits::defaultValue());
+ setLocal(idx, value);
+ }
+
+ T& operand(int operand)
+ {
+ if (operandIsArgument(operand)) {
+ int argument = operandToArgument(operand);
+ return m_arguments[argument];
+ }
+
+ return m_locals[operand];
+ }
+
+ const T& operand(int operand) const { return const_cast<const T&>(const_cast<Operands*>(this)->operand(operand)); }
+
+ void setOperand(int operand, const T& value)
+ {
+ if (operandIsArgument(operand)) {
+ int argument = operandToArgument(operand);
+ m_arguments[argument] = value;
+ return;
+ }
+
+ setLocal(operand, value);
+ }
+
+ void clear()
+ {
+ for (size_t i = 0; i < m_arguments.size(); ++i)
+ m_arguments[i] = Traits::defaultValue();
+ for (size_t i = 0; i < m_locals.size(); ++i)
+ m_locals[i] = Traits::defaultValue();
+ }
+
+private:
+ Vector<T, 8> m_arguments;
+ Vector<T, 16> m_locals;
+};
+
+template<typename T, typename Traits>
+void dumpOperands(Operands<T, Traits>& operands, FILE* out)
+{
+ for (size_t argument = 0; argument < operands.numberOfArguments(); ++argument) {
+ if (argument)
+ fprintf(out, " ");
+ Traits::dump(operands.argument(argument), out);
+ }
+ fprintf(out, " : ");
+ for (size_t local = 0; local < operands.numberOfLocals(); ++local) {
+ if (local)
+ fprintf(out, " ");
+ Traits::dump(operands.local(local), out);
+ }
+}
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
+
+#endif // DFGOperands_h
+
diff --git a/Source/JavaScriptCore/dfg/DFGOperations.cpp b/Source/JavaScriptCore/dfg/DFGOperations.cpp
new file mode 100644
index 000000000..7b4ef3f88
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGOperations.cpp
@@ -0,0 +1,838 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "DFGOperations.h"
+
+#if ENABLE(DFG_JIT)
+
+#include "CodeBlock.h"
+#include "DFGOSRExit.h"
+#include "DFGRepatch.h"
+#include "InlineASM.h"
+#include "Interpreter.h"
+#include "JSByteArray.h"
+#include "JSGlobalData.h"
+#include "Operations.h"
+
+#if CPU(X86_64)
+
+#define FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, register) \
+ asm( \
+ ".globl " SYMBOL_STRING(function) "\n" \
+ SYMBOL_STRING(function) ":" "\n" \
+ "mov (%rsp), %" STRINGIZE(register) "\n" \
+ "jmp " SYMBOL_STRING_RELOCATION(function##WithReturnAddress) "\n" \
+ );
+#define FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, rsi)
+#define FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_ECI(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, rcx)
+#define FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJI(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, rcx)
+#define FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJCI(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, r8)
+
+#elif CPU(X86)
+
+#define FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, offset) \
+ asm( \
+ ".globl " SYMBOL_STRING(function) "\n" \
+ SYMBOL_STRING(function) ":" "\n" \
+ "mov (%esp), %eax\n" \
+ "mov %eax, " STRINGIZE(offset) "(%esp)\n" \
+ "jmp " SYMBOL_STRING_RELOCATION(function##WithReturnAddress) "\n" \
+ );
+#define FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, 8)
+#define FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_ECI(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, 16)
+#define FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJI(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, 20)
+#define FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJCI(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, 24)
+
+#elif COMPILER(GCC) && CPU(ARM_THUMB2)
+
+#define FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) \
+ asm ( \
+ ".text" "\n" \
+ ".align 2" "\n" \
+ ".globl " SYMBOL_STRING(function) "\n" \
+ HIDE_SYMBOL(function) "\n" \
+ ".thumb" "\n" \
+ ".thumb_func " THUMB_FUNC_PARAM(function) "\n" \
+ SYMBOL_STRING(function) ":" "\n" \
+ "cpy a2, lr" "\n" \
+ "b " SYMBOL_STRING_RELOCATION(function) "WithReturnAddress" "\n" \
+ );
+
+#define FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_ECI(function) \
+ asm ( \
+ ".text" "\n" \
+ ".align 2" "\n" \
+ ".globl " SYMBOL_STRING(function) "\n" \
+ HIDE_SYMBOL(function) "\n" \
+ ".thumb" "\n" \
+ ".thumb_func " THUMB_FUNC_PARAM(function) "\n" \
+ SYMBOL_STRING(function) ":" "\n" \
+ "cpy a4, lr" "\n" \
+ "b " SYMBOL_STRING_RELOCATION(function) "WithReturnAddress" "\n" \
+ );
+
+#define FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJI(function) \
+ asm ( \
+ ".text" "\n" \
+ ".align 2" "\n" \
+ ".globl " SYMBOL_STRING(function) "\n" \
+ HIDE_SYMBOL(function) "\n" \
+ ".thumb" "\n" \
+ ".thumb_func " THUMB_FUNC_PARAM(function) "\n" \
+ SYMBOL_STRING(function) ":" "\n" \
+ "str lr, [sp, #0]" "\n" \
+ "b " SYMBOL_STRING_RELOCATION(function) "WithReturnAddress" "\n" \
+ );
+
+#define FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJCI(function) \
+ asm ( \
+ ".text" "\n" \
+ ".align 2" "\n" \
+ ".globl " SYMBOL_STRING(function) "\n" \
+ HIDE_SYMBOL(function) "\n" \
+ ".thumb" "\n" \
+ ".thumb_func " THUMB_FUNC_PARAM(function) "\n" \
+ SYMBOL_STRING(function) ":" "\n" \
+ "str lr, [sp, #4]" "\n" \
+ "b " SYMBOL_STRING_RELOCATION(function) "WithReturnAddress" "\n" \
+ );
+
+#endif
+
+#define P_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) \
+void* DFG_OPERATION function##WithReturnAddress(ExecState*, ReturnAddressPtr); \
+FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function)
+
+#define J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_ECI(function) \
+EncodedJSValue DFG_OPERATION function##WithReturnAddress(ExecState*, JSCell*, Identifier*, ReturnAddressPtr); \
+FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_ECI(function)
+
+#define J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJI(function) \
+EncodedJSValue DFG_OPERATION function##WithReturnAddress(ExecState*, EncodedJSValue, Identifier*, ReturnAddressPtr); \
+FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJI(function)
+
+#define V_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJCI(function) \
+void DFG_OPERATION function##WithReturnAddress(ExecState*, EncodedJSValue, JSCell*, Identifier*, ReturnAddressPtr); \
+FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJCI(function)
+
+namespace JSC { namespace DFG {
+
+static inline void putByVal(ExecState* exec, JSValue baseValue, uint32_t index, JSValue value)
+{
+ JSGlobalData* globalData = &exec->globalData();
+
+ if (isJSArray(baseValue)) {
+ JSArray* array = asArray(baseValue);
+ if (array->canSetIndex(index)) {
+ array->setIndex(*globalData, index, value);
+ return;
+ }
+
+ JSArray::putByIndex(array, exec, index, value);
+ return;
+ }
+
+ if (isJSByteArray(baseValue) && asByteArray(baseValue)->canAccessIndex(index)) {
+ JSByteArray* byteArray = asByteArray(baseValue);
+ // FIXME: the JITstub used to relink this to an optimized form!
+ if (value.isInt32()) {
+ byteArray->setIndex(index, value.asInt32());
+ return;
+ }
+
+ if (value.isNumber()) {
+ byteArray->setIndex(index, value.asNumber());
+ return;
+ }
+ }
+
+ baseValue.put(exec, index, value);
+}
+
+template<bool strict>
+ALWAYS_INLINE static void DFG_OPERATION operationPutByValInternal(ExecState* exec, EncodedJSValue encodedBase, EncodedJSValue encodedProperty, EncodedJSValue encodedValue)
+{
+ JSValue baseValue = JSValue::decode(encodedBase);
+ JSValue property = JSValue::decode(encodedProperty);
+ JSValue value = JSValue::decode(encodedValue);
+
+ if (LIKELY(property.isUInt32())) {
+ putByVal(exec, baseValue, property.asUInt32(), value);
+ return;
+ }
+
+ if (property.isDouble()) {
+ double propertyAsDouble = property.asDouble();
+ uint32_t propertyAsUInt32 = static_cast<uint32_t>(propertyAsDouble);
+ if (propertyAsDouble == propertyAsUInt32) {
+ putByVal(exec, baseValue, propertyAsUInt32, value);
+ return;
+ }
+ }
+
+ JSGlobalData* globalData = &exec->globalData();
+
+ // Don't put to an object if toString throws an exception.
+ Identifier ident(exec, property.toString(exec));
+ if (!globalData->exception) {
+ PutPropertySlot slot(strict);
+ baseValue.put(exec, ident, value, slot);
+ }
+}
+
+extern "C" {
+
+EncodedJSValue DFG_OPERATION operationConvertThis(ExecState* exec, EncodedJSValue encodedOp)
+{
+ return JSValue::encode(JSValue::decode(encodedOp).toThisObject(exec));
+}
+
+inline JSCell* createThis(ExecState* exec, JSCell* prototype, JSFunction* constructor)
+{
+#if !ASSERT_DISABLED
+ ConstructData constructData;
+ ASSERT(constructor->methodTable()->getConstructData(constructor, constructData) == ConstructTypeJS);
+#endif
+
+ JSGlobalData& globalData = exec->globalData();
+
+ Structure* structure;
+ if (prototype->isObject())
+ structure = asObject(prototype)->inheritorID(globalData);
+ else
+ structure = constructor->scope()->globalObject->emptyObjectStructure();
+
+ return constructEmptyObject(exec, structure);
+}
+
+JSCell* DFG_OPERATION operationCreateThis(ExecState* exec, JSCell* prototype)
+{
+ return createThis(exec, prototype, asFunction(exec->callee()));
+}
+
+JSCell* DFG_OPERATION operationCreateThisInlined(ExecState* exec, JSCell* prototype, JSCell* constructor)
+{
+ return createThis(exec, prototype, static_cast<JSFunction*>(constructor));
+}
+
+JSCell* DFG_OPERATION operationNewObject(ExecState* exec)
+{
+ return constructEmptyObject(exec);
+}
+
+EncodedJSValue DFG_OPERATION operationValueAdd(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2)
+{
+ JSValue op1 = JSValue::decode(encodedOp1);
+ JSValue op2 = JSValue::decode(encodedOp2);
+
+ return JSValue::encode(jsAdd(exec, op1, op2));
+}
+
+EncodedJSValue DFG_OPERATION operationValueAddNotNumber(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2)
+{
+ JSValue op1 = JSValue::decode(encodedOp1);
+ JSValue op2 = JSValue::decode(encodedOp2);
+
+ ASSERT(!op1.isNumber() || !op2.isNumber());
+
+ if (op1.isString()) {
+ if (op2.isString())
+ return JSValue::encode(jsString(exec, asString(op1), asString(op2)));
+ return JSValue::encode(jsString(exec, asString(op1), op2.toPrimitiveString(exec)));
+ }
+
+ return JSValue::encode(jsAddSlowCase(exec, op1, op2));
+}
+
+static inline EncodedJSValue getByVal(ExecState* exec, JSCell* base, uint32_t index)
+{
+ // FIXME: the JIT used to handle these in compiled code!
+ if (isJSArray(base) && asArray(base)->canGetIndex(index))
+ return JSValue::encode(asArray(base)->getIndex(index));
+
+ // FIXME: the JITstub used to relink this to an optimized form!
+ if (isJSString(base) && asString(base)->canGetIndex(index))
+ return JSValue::encode(asString(base)->getIndex(exec, index));
+
+ // FIXME: the JITstub used to relink this to an optimized form!
+ if (isJSByteArray(base) && asByteArray(base)->canAccessIndex(index))
+ return JSValue::encode(asByteArray(base)->getIndex(exec, index));
+
+ return JSValue::encode(JSValue(base).get(exec, index));
+}
+
+EncodedJSValue DFG_OPERATION operationGetByVal(ExecState* exec, EncodedJSValue encodedBase, EncodedJSValue encodedProperty)
+{
+ JSValue baseValue = JSValue::decode(encodedBase);
+ JSValue property = JSValue::decode(encodedProperty);
+
+ if (LIKELY(baseValue.isCell())) {
+ JSCell* base = baseValue.asCell();
+
+ if (property.isUInt32()) {
+ return getByVal(exec, base, property.asUInt32());
+ } else if (property.isDouble()) {
+ double propertyAsDouble = property.asDouble();
+ uint32_t propertyAsUInt32 = static_cast<uint32_t>(propertyAsDouble);
+ if (propertyAsUInt32 == propertyAsDouble)
+ return getByVal(exec, base, propertyAsUInt32);
+ } else if (property.isString()) {
+ if (JSValue result = base->fastGetOwnProperty(exec, asString(property)->value(exec)))
+ return JSValue::encode(result);
+ }
+ }
+
+ Identifier ident(exec, property.toString(exec));
+ return JSValue::encode(baseValue.get(exec, ident));
+}
+
+EncodedJSValue DFG_OPERATION operationGetByValCell(ExecState* exec, JSCell* base, EncodedJSValue encodedProperty)
+{
+ JSValue property = JSValue::decode(encodedProperty);
+
+ if (property.isUInt32())
+ return getByVal(exec, base, property.asUInt32());
+ if (property.isDouble()) {
+ double propertyAsDouble = property.asDouble();
+ uint32_t propertyAsUInt32 = static_cast<uint32_t>(propertyAsDouble);
+ if (propertyAsUInt32 == propertyAsDouble)
+ return getByVal(exec, base, propertyAsUInt32);
+ } else if (property.isString()) {
+ if (JSValue result = base->fastGetOwnProperty(exec, asString(property)->value(exec)))
+ return JSValue::encode(result);
+ }
+
+ Identifier ident(exec, property.toString(exec));
+ return JSValue::encode(JSValue(base).get(exec, ident));
+}
+
+EncodedJSValue DFG_OPERATION operationGetById(ExecState* exec, EncodedJSValue base, Identifier* propertyName)
+{
+ JSValue baseValue = JSValue::decode(base);
+ PropertySlot slot(baseValue);
+ return JSValue::encode(baseValue.get(exec, *propertyName, slot));
+}
+
+J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJI(operationGetByIdBuildList);
+EncodedJSValue DFG_OPERATION operationGetByIdBuildListWithReturnAddress(ExecState* exec, EncodedJSValue base, Identifier* propertyName, ReturnAddressPtr returnAddress)
+{
+ JSValue baseValue = JSValue::decode(base);
+ PropertySlot slot(baseValue);
+ JSValue result = baseValue.get(exec, *propertyName, slot);
+
+ StructureStubInfo& stubInfo = exec->codeBlock()->getStubInfo(returnAddress);
+ dfgBuildGetByIDList(exec, baseValue, *propertyName, slot, stubInfo);
+
+ return JSValue::encode(result);
+}
+
+J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJI(operationGetByIdProtoBuildList);
+EncodedJSValue DFG_OPERATION operationGetByIdProtoBuildListWithReturnAddress(ExecState* exec, EncodedJSValue base, Identifier* propertyName, ReturnAddressPtr returnAddress)
+{
+ JSValue baseValue = JSValue::decode(base);
+ PropertySlot slot(baseValue);
+ JSValue result = baseValue.get(exec, *propertyName, slot);
+
+ StructureStubInfo& stubInfo = exec->codeBlock()->getStubInfo(returnAddress);
+ dfgBuildGetByIDProtoList(exec, baseValue, *propertyName, slot, stubInfo);
+
+ return JSValue::encode(result);
+}
+
+J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJI(operationGetByIdOptimize);
+EncodedJSValue DFG_OPERATION operationGetByIdOptimizeWithReturnAddress(ExecState* exec, EncodedJSValue base, Identifier* propertyName, ReturnAddressPtr returnAddress)
+{
+ JSValue baseValue = JSValue::decode(base);
+ PropertySlot slot(baseValue);
+ JSValue result = baseValue.get(exec, *propertyName, slot);
+
+ StructureStubInfo& stubInfo = exec->codeBlock()->getStubInfo(returnAddress);
+ if (stubInfo.seen)
+ dfgRepatchGetByID(exec, baseValue, *propertyName, slot, stubInfo);
+ else
+ stubInfo.seen = true;
+
+ return JSValue::encode(result);
+}
+
+void DFG_OPERATION operationPutByValStrict(ExecState* exec, EncodedJSValue encodedBase, EncodedJSValue encodedProperty, EncodedJSValue encodedValue)
+{
+ operationPutByValInternal<true>(exec, encodedBase, encodedProperty, encodedValue);
+}
+
+void DFG_OPERATION operationPutByValNonStrict(ExecState* exec, EncodedJSValue encodedBase, EncodedJSValue encodedProperty, EncodedJSValue encodedValue)
+{
+ operationPutByValInternal<false>(exec, encodedBase, encodedProperty, encodedValue);
+}
+
+void DFG_OPERATION operationPutByValCellStrict(ExecState* exec, JSCell* cell, EncodedJSValue encodedProperty, EncodedJSValue encodedValue)
+{
+ operationPutByValInternal<true>(exec, JSValue::encode(cell), encodedProperty, encodedValue);
+}
+
+void DFG_OPERATION operationPutByValCellNonStrict(ExecState* exec, JSCell* cell, EncodedJSValue encodedProperty, EncodedJSValue encodedValue)
+{
+ operationPutByValInternal<false>(exec, JSValue::encode(cell), encodedProperty, encodedValue);
+}
+
+void DFG_OPERATION operationPutByValBeyondArrayBounds(ExecState* exec, JSArray* array, int32_t index, EncodedJSValue encodedValue)
+{
+ // We should only get here if index is outside the existing vector.
+ ASSERT(!array->canSetIndex(index));
+ JSArray::putByIndex(array, exec, index, JSValue::decode(encodedValue));
+}
+
+EncodedJSValue DFG_OPERATION operationArrayPush(ExecState* exec, EncodedJSValue encodedValue, JSArray* array)
+{
+ array->push(exec, JSValue::decode(encodedValue));
+ return JSValue::encode(jsNumber(array->length()));
+}
+
+EncodedJSValue DFG_OPERATION operationArrayPop(ExecState*, JSArray* array)
+{
+ return JSValue::encode(array->pop());
+}
+
+void DFG_OPERATION operationPutByIdStrict(ExecState* exec, EncodedJSValue encodedValue, JSCell* base, Identifier* propertyName)
+{
+ PutPropertySlot slot(true);
+ base->methodTable()->put(base, exec, *propertyName, JSValue::decode(encodedValue), slot);
+}
+
+void DFG_OPERATION operationPutByIdNonStrict(ExecState* exec, EncodedJSValue encodedValue, JSCell* base, Identifier* propertyName)
+{
+ PutPropertySlot slot(false);
+ base->methodTable()->put(base, exec, *propertyName, JSValue::decode(encodedValue), slot);
+}
+
+void DFG_OPERATION operationPutByIdDirectStrict(ExecState* exec, EncodedJSValue encodedValue, JSCell* base, Identifier* propertyName)
+{
+ PutPropertySlot slot(true);
+ JSValue(base).putDirect(exec, *propertyName, JSValue::decode(encodedValue), slot);
+}
+
+void DFG_OPERATION operationPutByIdDirectNonStrict(ExecState* exec, EncodedJSValue encodedValue, JSCell* base, Identifier* propertyName)
+{
+ PutPropertySlot slot(false);
+ JSValue(base).putDirect(exec, *propertyName, JSValue::decode(encodedValue), slot);
+}
+
+V_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJCI(operationPutByIdStrictOptimize);
+void DFG_OPERATION operationPutByIdStrictOptimizeWithReturnAddress(ExecState* exec, EncodedJSValue encodedValue, JSCell* base, Identifier* propertyName, ReturnAddressPtr returnAddress)
+{
+ JSValue value = JSValue::decode(encodedValue);
+ JSValue baseValue(base);
+ PutPropertySlot slot(true);
+
+ baseValue.put(exec, *propertyName, value, slot);
+
+ StructureStubInfo& stubInfo = exec->codeBlock()->getStubInfo(returnAddress);
+ if (stubInfo.seen)
+ dfgRepatchPutByID(exec, baseValue, *propertyName, slot, stubInfo, NotDirect);
+ else
+ stubInfo.seen = true;
+}
+
+V_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJCI(operationPutByIdNonStrictOptimize);
+void DFG_OPERATION operationPutByIdNonStrictOptimizeWithReturnAddress(ExecState* exec, EncodedJSValue encodedValue, JSCell* base, Identifier* propertyName, ReturnAddressPtr returnAddress)
+{
+ JSValue value = JSValue::decode(encodedValue);
+ JSValue baseValue(base);
+ PutPropertySlot slot(false);
+
+ baseValue.put(exec, *propertyName, value, slot);
+
+ StructureStubInfo& stubInfo = exec->codeBlock()->getStubInfo(returnAddress);
+ if (stubInfo.seen)
+ dfgRepatchPutByID(exec, baseValue, *propertyName, slot, stubInfo, NotDirect);
+ else
+ stubInfo.seen = true;
+}
+
+V_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJCI(operationPutByIdDirectStrictOptimize);
+void DFG_OPERATION operationPutByIdDirectStrictOptimizeWithReturnAddress(ExecState* exec, EncodedJSValue encodedValue, JSCell* base, Identifier* propertyName, ReturnAddressPtr returnAddress)
+{
+ JSValue value = JSValue::decode(encodedValue);
+ JSValue baseValue(base);
+ PutPropertySlot slot(true);
+
+ baseValue.putDirect(exec, *propertyName, value, slot);
+
+ StructureStubInfo& stubInfo = exec->codeBlock()->getStubInfo(returnAddress);
+ if (stubInfo.seen)
+ dfgRepatchPutByID(exec, baseValue, *propertyName, slot, stubInfo, Direct);
+ else
+ stubInfo.seen = true;
+}
+
+V_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJCI(operationPutByIdDirectNonStrictOptimize);
+void DFG_OPERATION operationPutByIdDirectNonStrictOptimizeWithReturnAddress(ExecState* exec, EncodedJSValue encodedValue, JSCell* base, Identifier* propertyName, ReturnAddressPtr returnAddress)
+{
+ JSValue value = JSValue::decode(encodedValue);
+ JSValue baseValue(base);
+ PutPropertySlot slot(false);
+
+ baseValue.putDirect(exec, *propertyName, value, slot);
+
+ StructureStubInfo& stubInfo = exec->codeBlock()->getStubInfo(returnAddress);
+ if (stubInfo.seen)
+ dfgRepatchPutByID(exec, baseValue, *propertyName, slot, stubInfo, Direct);
+ else
+ stubInfo.seen = true;
+}
+
+size_t DFG_OPERATION operationCompareLess(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2)
+{
+ return jsLess<true>(exec, JSValue::decode(encodedOp1), JSValue::decode(encodedOp2));
+}
+
+size_t DFG_OPERATION operationCompareLessEq(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2)
+{
+ return jsLessEq<true>(exec, JSValue::decode(encodedOp1), JSValue::decode(encodedOp2));
+}
+
+size_t DFG_OPERATION operationCompareGreater(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2)
+{
+ return jsLess<false>(exec, JSValue::decode(encodedOp2), JSValue::decode(encodedOp1));
+}
+
+size_t DFG_OPERATION operationCompareGreaterEq(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2)
+{
+ return jsLessEq<false>(exec, JSValue::decode(encodedOp2), JSValue::decode(encodedOp1));
+}
+
+size_t DFG_OPERATION operationCompareEq(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2)
+{
+ return JSValue::equalSlowCaseInline(exec, JSValue::decode(encodedOp1), JSValue::decode(encodedOp2));
+}
+
+size_t DFG_OPERATION operationCompareStrictEqCell(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2)
+{
+ JSValue op1 = JSValue::decode(encodedOp1);
+ JSValue op2 = JSValue::decode(encodedOp2);
+
+ ASSERT(op1.isCell());
+ ASSERT(op2.isCell());
+
+ return JSValue::strictEqualSlowCaseInline(exec, op1, op2);
+}
+
+size_t DFG_OPERATION operationCompareStrictEq(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2)
+{
+ return JSValue::strictEqual(exec, JSValue::decode(encodedOp1), JSValue::decode(encodedOp2));
+}
+
+EncodedJSValue DFG_OPERATION getHostCallReturnValue();
+EncodedJSValue DFG_OPERATION getHostCallReturnValueWithExecState(ExecState*);
+
+#if CPU(X86_64)
+asm (
+".globl " SYMBOL_STRING(getHostCallReturnValue) "\n"
+SYMBOL_STRING(getHostCallReturnValue) ":" "\n"
+ "mov -40(%r13), %r13\n"
+ "mov %r13, %rdi\n"
+ "jmp " SYMBOL_STRING_RELOCATION(getHostCallReturnValueWithExecState) "\n"
+);
+#elif CPU(X86)
+asm (
+".globl " SYMBOL_STRING(getHostCallReturnValue) "\n"
+SYMBOL_STRING(getHostCallReturnValue) ":" "\n"
+ "mov -40(%edi), %edi\n"
+ "mov %edi, 4(%esp)\n"
+ "jmp " SYMBOL_STRING_RELOCATION(getHostCallReturnValueWithExecState) "\n"
+);
+#elif CPU(ARM_THUMB2)
+asm (
+".text" "\n"
+".align 2" "\n"
+".globl " SYMBOL_STRING(getHostCallReturnValue) "\n"
+HIDE_SYMBOL(getHostCallReturnValue) "\n"
+".thumb" "\n"
+".thumb_func " THUMB_FUNC_PARAM(getHostCallReturnValue) "\n"
+SYMBOL_STRING(getHostCallReturnValue) ":" "\n"
+ "ldr r5, [r5, #-40]" "\n"
+ "cpy r0, r5" "\n"
+ "b " SYMBOL_STRING_RELOCATION(getHostCallReturnValueWithExecState) "\n"
+);
+#endif
+
+EncodedJSValue DFG_OPERATION getHostCallReturnValueWithExecState(ExecState* exec)
+{
+ return JSValue::encode(exec->globalData().hostCallReturnValue);
+}
+
+static void* handleHostCall(ExecState* execCallee, JSValue callee, CodeSpecializationKind kind)
+{
+ ExecState* exec = execCallee->callerFrame();
+ JSGlobalData* globalData = &exec->globalData();
+
+ execCallee->setScopeChain(exec->scopeChain());
+ execCallee->setCodeBlock(0);
+
+ if (kind == CodeForCall) {
+ CallData callData;
+ CallType callType = getCallData(callee, callData);
+
+ ASSERT(callType != CallTypeJS);
+
+ if (callType == CallTypeHost) {
+ globalData->hostCallReturnValue = JSValue::decode(callData.native.function(execCallee));
+ if (globalData->exception)
+ return 0;
+
+ return reinterpret_cast<void*>(getHostCallReturnValue);
+ }
+
+ ASSERT(callType == CallTypeNone);
+ exec->globalData().exception = createNotAFunctionError(exec, callee);
+ return 0;
+ }
+
+ ASSERT(kind == CodeForConstruct);
+
+ ConstructData constructData;
+ ConstructType constructType = getConstructData(callee, constructData);
+
+ ASSERT(constructType != ConstructTypeJS);
+
+ if (constructType == ConstructTypeHost) {
+ globalData->hostCallReturnValue = JSValue::decode(constructData.native.function(execCallee));
+ if (globalData->exception)
+ return 0;
+
+ return reinterpret_cast<void*>(getHostCallReturnValue);
+ }
+
+ ASSERT(constructType == ConstructTypeNone);
+ exec->globalData().exception = createNotAConstructorError(exec, callee);
+ return 0;
+}
+
+inline void* linkFor(ExecState* execCallee, ReturnAddressPtr returnAddress, CodeSpecializationKind kind)
+{
+ ExecState* exec = execCallee->callerFrame();
+ JSGlobalData* globalData = &exec->globalData();
+ JSValue calleeAsValue = execCallee->calleeAsValue();
+ JSCell* calleeAsFunctionCell = getJSFunction(calleeAsValue);
+ if (!calleeAsFunctionCell)
+ return handleHostCall(execCallee, calleeAsValue, kind);
+
+ JSFunction* callee = asFunction(calleeAsFunctionCell);
+ execCallee->setScopeChain(callee->scopeUnchecked());
+ ExecutableBase* executable = callee->executable();
+
+ MacroAssemblerCodePtr codePtr;
+ CodeBlock* codeBlock = 0;
+ if (executable->isHostFunction())
+ codePtr = executable->generatedJITCodeFor(kind).addressForCall();
+ else {
+ FunctionExecutable* functionExecutable = static_cast<FunctionExecutable*>(executable);
+ JSObject* error = functionExecutable->compileFor(execCallee, callee->scope(), kind);
+ if (error) {
+ globalData->exception = createStackOverflowError(exec);
+ return 0;
+ }
+ codeBlock = &functionExecutable->generatedBytecodeFor(kind);
+ if (execCallee->argumentCountIncludingThis() < static_cast<size_t>(codeBlock->m_numParameters))
+ codePtr = functionExecutable->generatedJITCodeWithArityCheckFor(kind);
+ else
+ codePtr = functionExecutable->generatedJITCodeFor(kind).addressForCall();
+ }
+ CallLinkInfo& callLinkInfo = exec->codeBlock()->getCallLinkInfo(returnAddress);
+ if (!callLinkInfo.seenOnce())
+ callLinkInfo.setSeen();
+ else
+ dfgLinkFor(execCallee, callLinkInfo, codeBlock, callee, codePtr, kind);
+ return codePtr.executableAddress();
+}
+
+P_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(operationLinkCall);
+void* DFG_OPERATION operationLinkCallWithReturnAddress(ExecState* execCallee, ReturnAddressPtr returnAddress)
+{
+ return linkFor(execCallee, returnAddress, CodeForCall);
+}
+
+P_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(operationLinkConstruct);
+void* DFG_OPERATION operationLinkConstructWithReturnAddress(ExecState* execCallee, ReturnAddressPtr returnAddress)
+{
+ return linkFor(execCallee, returnAddress, CodeForConstruct);
+}
+
+inline void* virtualFor(ExecState* execCallee, CodeSpecializationKind kind)
+{
+ ExecState* exec = execCallee->callerFrame();
+ JSValue calleeAsValue = execCallee->calleeAsValue();
+ JSCell* calleeAsFunctionCell = getJSFunction(calleeAsValue);
+ if (UNLIKELY(!calleeAsFunctionCell))
+ return handleHostCall(execCallee, calleeAsValue, kind);
+
+ JSFunction* function = asFunction(calleeAsFunctionCell);
+ execCallee->setScopeChain(function->scopeUnchecked());
+ ExecutableBase* executable = function->executable();
+ if (UNLIKELY(!executable->hasJITCodeFor(kind))) {
+ FunctionExecutable* functionExecutable = static_cast<FunctionExecutable*>(executable);
+ JSObject* error = functionExecutable->compileFor(execCallee, function->scope(), kind);
+ if (error) {
+ exec->globalData().exception = error;
+ return 0;
+ }
+ }
+ return executable->generatedJITCodeWithArityCheckFor(kind).executableAddress();
+}
+
+void* DFG_OPERATION operationVirtualCall(ExecState* execCallee)
+{
+ return virtualFor(execCallee, CodeForCall);
+}
+
+void* DFG_OPERATION operationVirtualConstruct(ExecState* execCallee)
+{
+ return virtualFor(execCallee, CodeForConstruct);
+}
+
+EncodedJSValue DFG_OPERATION operationResolve(ExecState* exec, Identifier* propertyName)
+{
+ ScopeChainNode* scopeChain = exec->scopeChain();
+ ScopeChainIterator iter = scopeChain->begin();
+ ScopeChainIterator end = scopeChain->end();
+ ASSERT(iter != end);
+
+ do {
+ JSObject* record = iter->get();
+ PropertySlot slot(record);
+ if (record->getPropertySlot(exec, *propertyName, slot))
+ return JSValue::encode(slot.getValue(exec, *propertyName));
+ } while (++iter != end);
+
+ return throwVMError(exec, createUndefinedVariableError(exec, *propertyName));
+}
+
+EncodedJSValue DFG_OPERATION operationResolveBase(ExecState* exec, Identifier* propertyName)
+{
+ return JSValue::encode(resolveBase(exec, *propertyName, exec->scopeChain(), false));
+}
+
+EncodedJSValue DFG_OPERATION operationResolveBaseStrictPut(ExecState* exec, Identifier* propertyName)
+{
+ JSValue base = resolveBase(exec, *propertyName, exec->scopeChain(), true);
+ if (!base)
+ throwError(exec, createErrorForInvalidGlobalAssignment(exec, propertyName->ustring()));
+ return JSValue::encode(base);
+}
+
+EncodedJSValue DFG_OPERATION operationResolveGlobal(ExecState* exec, GlobalResolveInfo* resolveInfo, Identifier* propertyName)
+{
+ JSGlobalObject* globalObject = exec->lexicalGlobalObject();
+
+ PropertySlot slot(globalObject);
+ if (globalObject->getPropertySlot(exec, *propertyName, slot)) {
+ JSValue result = slot.getValue(exec, *propertyName);
+
+ if (slot.isCacheableValue() && !globalObject->structure()->isUncacheableDictionary() && slot.slotBase() == globalObject) {
+ resolveInfo->structure.set(exec->globalData(), exec->codeBlock()->ownerExecutable(), globalObject->structure());
+ resolveInfo->offset = slot.cachedOffset();
+ }
+
+ return JSValue::encode(result);
+ }
+
+ return throwVMError(exec, createUndefinedVariableError(exec, *propertyName));
+}
+
+EncodedJSValue DFG_OPERATION operationToPrimitive(ExecState* exec, EncodedJSValue value)
+{
+ return JSValue::encode(JSValue::decode(value).toPrimitive(exec));
+}
+
+EncodedJSValue DFG_OPERATION operationStrCat(ExecState* exec, void* start, size_t size)
+{
+ return JSValue::encode(jsString(exec, static_cast<Register*>(start), size));
+}
+
+EncodedJSValue DFG_OPERATION operationNewArray(ExecState* exec, void* start, size_t size)
+{
+ return JSValue::encode(constructArray(exec, static_cast<JSValue*>(start), size));
+}
+
+EncodedJSValue DFG_OPERATION operationNewArrayBuffer(ExecState* exec, size_t start, size_t size)
+{
+ return JSValue::encode(constructArray(exec, exec->codeBlock()->constantBuffer(start), size));
+}
+
+EncodedJSValue DFG_OPERATION operationNewRegexp(ExecState* exec, void* regexpPtr)
+{
+ RegExp* regexp = static_cast<RegExp*>(regexpPtr);
+ if (!regexp->isValid()) {
+ throwError(exec, createSyntaxError(exec, "Invalid flags supplied to RegExp constructor."));
+ return JSValue::encode(jsUndefined());
+ }
+
+ return JSValue::encode(RegExpObject::create(exec->globalData(), exec->lexicalGlobalObject(), exec->lexicalGlobalObject()->regExpStructure(), regexp));
+}
+
+DFGHandlerEncoded DFG_OPERATION lookupExceptionHandler(ExecState* exec, uint32_t callIndex)
+{
+ JSValue exceptionValue = exec->exception();
+ ASSERT(exceptionValue);
+
+ unsigned vPCIndex = exec->codeBlock()->bytecodeOffsetForCallAtIndex(callIndex);
+ HandlerInfo* handler = exec->globalData().interpreter->throwException(exec, exceptionValue, vPCIndex);
+
+ void* catchRoutine = handler ? handler->nativeCode.executableAddress() : (void*)ctiOpThrowNotCaught;
+ ASSERT(catchRoutine);
+ return dfgHandlerEncoded(exec, catchRoutine);
+}
+
+double DFG_OPERATION dfgConvertJSValueToNumber(ExecState* exec, EncodedJSValue value)
+{
+ return JSValue::decode(value).toNumber(exec);
+}
+
+size_t DFG_OPERATION dfgConvertJSValueToInt32(ExecState* exec, EncodedJSValue value)
+{
+ // toInt32/toUInt32 return the same value; we want the value zero extended to fill the register.
+ return JSValue::decode(value).toUInt32(exec);
+}
+
+size_t DFG_OPERATION dfgConvertJSValueToBoolean(ExecState* exec, EncodedJSValue encodedOp)
+{
+ return JSValue::decode(encodedOp).toBoolean(exec);
+}
+
+#if DFG_ENABLE(VERBOSE_SPECULATION_FAILURE)
+void DFG_OPERATION debugOperationPrintSpeculationFailure(ExecState*, void* debugInfoRaw)
+{
+ SpeculationFailureDebugInfo* debugInfo = static_cast<SpeculationFailureDebugInfo*>(debugInfoRaw);
+ CodeBlock* codeBlock = debugInfo->codeBlock;
+ CodeBlock* alternative = codeBlock->alternative();
+ printf("Speculation failure in %p at @%u with executeCounter = %d, reoptimizationRetryCounter = %u, optimizationDelayCounter = %u, success/fail %u/%u\n", codeBlock, debugInfo->nodeIndex, alternative ? alternative->executeCounter() : 0, alternative ? alternative->reoptimizationRetryCounter() : 0, alternative ? alternative->optimizationDelayCounter() : 0, codeBlock->speculativeSuccessCounter(), codeBlock->speculativeFailCounter());
+}
+#endif
+
+} // extern "C"
+} } // namespace JSC::DFG
+
+#endif
diff --git a/Source/JavaScriptCore/dfg/DFGOperations.h b/Source/JavaScriptCore/dfg/DFGOperations.h
new file mode 100644
index 000000000..b4121dc21
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGOperations.h
@@ -0,0 +1,199 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGOperations_h
+#define DFGOperations_h
+
+#if ENABLE(DFG_JIT)
+
+#include <dfg/DFGJITCompiler.h>
+
+namespace JSC {
+
+struct GlobalResolveInfo;
+
+namespace DFG {
+
+enum PutKind { Direct, NotDirect };
+
+extern "C" {
+
+#if CALLING_CONVENTION_IS_STDCALL
+#define DFG_OPERATION CDECL
+#else
+#define DFG_OPERATION
+#endif
+
+// These typedefs provide typechecking when generating calls out to helper routines;
+// this helps prevent calling a helper routine with the wrong arguments!
+/*
+ Key:
+ V: void
+ J: JSValue
+ P: pointer (void*)
+ C: JSCell*
+ A: JSArray*
+ S: size_t
+ Z: int32_t
+ D: double
+ I: Identifier*
+ G: GlobalResolveInfo*
+*/
+typedef int32_t DFG_OPERATION (*Z_DFGOperation_D)(double);
+typedef JSCell* DFG_OPERATION (*C_DFGOperation_E)(ExecState*);
+typedef JSCell* DFG_OPERATION (*C_DFGOperation_EC)(ExecState*, JSCell*);
+typedef JSCell* DFG_OPERATION (*C_DFGOperation_ECC)(ExecState*, JSCell*, JSCell*);
+typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EA)(ExecState*, JSArray*);
+typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EJA)(ExecState*, EncodedJSValue, JSArray*);
+typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_ECJ)(ExecState*, JSCell*, EncodedJSValue);
+typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EJJ)(ExecState*, EncodedJSValue, EncodedJSValue);
+typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EJ)(ExecState*, EncodedJSValue);
+typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EJP)(ExecState*, EncodedJSValue, void*);
+typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_ECI)(ExecState*, JSCell*, Identifier*);
+typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EJI)(ExecState*, EncodedJSValue, Identifier*);
+typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EP)(ExecState*, void*);
+typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EPP)(ExecState*, void*, void*);
+typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EGI)(ExecState*, GlobalResolveInfo*, Identifier*);
+typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EPS)(ExecState*, void*, size_t);
+typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_ESS)(ExecState*, size_t, size_t);
+typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EI)(ExecState*, Identifier*);
+typedef size_t DFG_OPERATION (*S_DFGOperation_EJ)(ExecState*, EncodedJSValue);
+typedef size_t DFG_OPERATION (*S_DFGOperation_EJJ)(ExecState*, EncodedJSValue, EncodedJSValue);
+typedef void DFG_OPERATION (*V_DFGOperation_EJJJ)(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue);
+typedef void DFG_OPERATION (*V_DFGOperation_ECJJ)(ExecState*, JSCell*, EncodedJSValue, EncodedJSValue);
+typedef void DFG_OPERATION (*V_DFGOperation_EJPP)(ExecState*, EncodedJSValue, EncodedJSValue, void*);
+typedef void DFG_OPERATION (*V_DFGOperation_EJCI)(ExecState*, EncodedJSValue, JSCell*, Identifier*);
+typedef void DFG_OPERATION (*V_DFGOperation_EPZJ)(ExecState*, void*, int32_t, EncodedJSValue);
+typedef void DFG_OPERATION (*V_DFGOperation_EAZJ)(ExecState*, JSArray*, int32_t, EncodedJSValue);
+typedef double DFG_OPERATION (*D_DFGOperation_DD)(double, double);
+typedef double DFG_OPERATION (*D_DFGOperation_EJ)(ExecState*, EncodedJSValue);
+typedef void* DFG_OPERATION (*P_DFGOperation_E)(ExecState*);
+
+// These routines are provide callbacks out to C++ implementations of operations too complex to JIT.
+JSCell* DFG_OPERATION operationNewObject(ExecState*);
+JSCell* DFG_OPERATION operationCreateThis(ExecState*, JSCell* encodedOp1);
+JSCell* DFG_OPERATION operationCreateThisInlined(ExecState*, JSCell* encodedOp1, JSCell* constructor);
+EncodedJSValue DFG_OPERATION operationConvertThis(ExecState*, EncodedJSValue encodedOp1);
+EncodedJSValue DFG_OPERATION operationValueAdd(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2);
+EncodedJSValue DFG_OPERATION operationValueAddNotNumber(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2);
+EncodedJSValue DFG_OPERATION operationGetByVal(ExecState*, EncodedJSValue encodedBase, EncodedJSValue encodedProperty);
+EncodedJSValue DFG_OPERATION operationGetByValCell(ExecState*, JSCell*, EncodedJSValue encodedProperty);
+EncodedJSValue DFG_OPERATION operationGetById(ExecState*, EncodedJSValue, Identifier*);
+EncodedJSValue DFG_OPERATION operationGetByIdBuildList(ExecState*, EncodedJSValue, Identifier*);
+EncodedJSValue DFG_OPERATION operationGetByIdProtoBuildList(ExecState*, EncodedJSValue, Identifier*);
+EncodedJSValue DFG_OPERATION operationGetByIdOptimize(ExecState*, EncodedJSValue, Identifier*);
+EncodedJSValue DFG_OPERATION operationGetMethodOptimize(ExecState*, EncodedJSValue, Identifier*);
+EncodedJSValue DFG_OPERATION operationResolve(ExecState*, Identifier*);
+EncodedJSValue DFG_OPERATION operationResolveBase(ExecState*, Identifier*);
+EncodedJSValue DFG_OPERATION operationResolveBaseStrictPut(ExecState*, Identifier*);
+EncodedJSValue DFG_OPERATION operationResolveGlobal(ExecState*, GlobalResolveInfo*, Identifier*);
+EncodedJSValue DFG_OPERATION operationToPrimitive(ExecState*, EncodedJSValue);
+EncodedJSValue DFG_OPERATION operationStrCat(ExecState*, void* start, size_t);
+EncodedJSValue DFG_OPERATION operationNewArray(ExecState*, void* start, size_t);
+EncodedJSValue DFG_OPERATION operationNewArrayBuffer(ExecState*, size_t, size_t);
+EncodedJSValue DFG_OPERATION operationNewRegexp(ExecState*, void*);
+void DFG_OPERATION operationPutByValStrict(ExecState*, EncodedJSValue encodedBase, EncodedJSValue encodedProperty, EncodedJSValue encodedValue);
+void DFG_OPERATION operationPutByValNonStrict(ExecState*, EncodedJSValue encodedBase, EncodedJSValue encodedProperty, EncodedJSValue encodedValue);
+void DFG_OPERATION operationPutByValCellStrict(ExecState*, JSCell*, EncodedJSValue encodedProperty, EncodedJSValue encodedValue);
+void DFG_OPERATION operationPutByValCellNonStrict(ExecState*, JSCell*, EncodedJSValue encodedProperty, EncodedJSValue encodedValue);
+void DFG_OPERATION operationPutByValBeyondArrayBounds(ExecState*, JSArray*, int32_t index, EncodedJSValue encodedValue);
+EncodedJSValue DFG_OPERATION operationArrayPush(ExecState*, EncodedJSValue encodedValue, JSArray*);
+EncodedJSValue DFG_OPERATION operationArrayPop(ExecState*, JSArray*);
+void DFG_OPERATION operationPutByIdStrict(ExecState*, EncodedJSValue encodedValue, JSCell* base, Identifier*);
+void DFG_OPERATION operationPutByIdNonStrict(ExecState*, EncodedJSValue encodedValue, JSCell* base, Identifier*);
+void DFG_OPERATION operationPutByIdDirectStrict(ExecState*, EncodedJSValue encodedValue, JSCell* base, Identifier*);
+void DFG_OPERATION operationPutByIdDirectNonStrict(ExecState*, EncodedJSValue encodedValue, JSCell* base, Identifier*);
+void DFG_OPERATION operationPutByIdStrictOptimize(ExecState*, EncodedJSValue encodedValue, JSCell* base, Identifier*);
+void DFG_OPERATION operationPutByIdNonStrictOptimize(ExecState*, EncodedJSValue encodedValue, JSCell* base, Identifier*);
+void DFG_OPERATION operationPutByIdDirectStrictOptimize(ExecState*, EncodedJSValue encodedValue, JSCell* base, Identifier*);
+void DFG_OPERATION operationPutByIdDirectNonStrictOptimize(ExecState*, EncodedJSValue encodedValue, JSCell* base, Identifier*);
+// These comparisons return a boolean within a size_t such that the value is zero extended to fill the register.
+size_t DFG_OPERATION operationCompareLess(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2);
+size_t DFG_OPERATION operationCompareLessEq(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2);
+size_t DFG_OPERATION operationCompareGreater(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2);
+size_t DFG_OPERATION operationCompareGreaterEq(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2);
+size_t DFG_OPERATION operationCompareEq(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2);
+size_t DFG_OPERATION operationCompareStrictEqCell(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2);
+size_t DFG_OPERATION operationCompareStrictEq(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2);
+void* DFG_OPERATION operationVirtualCall(ExecState*);
+void* DFG_OPERATION operationLinkCall(ExecState*);
+void* DFG_OPERATION operationVirtualConstruct(ExecState*);
+void* DFG_OPERATION operationLinkConstruct(ExecState*);
+
+// This method is used to lookup an exception hander, keyed by faultLocation, which is
+// the return location from one of the calls out to one of the helper operations above.
+struct DFGHandler {
+ DFGHandler(ExecState* exec, void* handler)
+ {
+ u.s.exec = exec;
+ u.s.handler = handler;
+ }
+
+#if !CPU(X86_64)
+ uint64_t encoded()
+ {
+ COMPILE_ASSERT(sizeof(Union) == sizeof(uint64_t), DFGHandler_Union_is_64bit);
+ return u.encoded;
+ }
+#endif
+
+ union Union {
+ struct Struct {
+ ExecState* exec;
+ void* handler;
+ } s;
+ uint64_t encoded;
+ } u;
+};
+#if CPU(X86_64)
+typedef DFGHandler DFGHandlerEncoded;
+inline DFGHandlerEncoded dfgHandlerEncoded(ExecState* exec, void* handler)
+{
+ return DFGHandler(exec, handler);
+}
+#else
+typedef uint64_t DFGHandlerEncoded;
+inline DFGHandlerEncoded dfgHandlerEncoded(ExecState* exec, void* handler)
+{
+ return DFGHandler(exec, handler).encoded();
+}
+#endif
+DFGHandlerEncoded DFG_OPERATION lookupExceptionHandler(ExecState*, uint32_t);
+
+// These operations implement the implicitly called ToInt32, ToNumber, and ToBoolean conversions from ES5.
+double DFG_OPERATION dfgConvertJSValueToNumber(ExecState*, EncodedJSValue);
+// This conversion returns an int32_t within a size_t such that the value is zero extended to fill the register.
+size_t DFG_OPERATION dfgConvertJSValueToInt32(ExecState*, EncodedJSValue);
+size_t DFG_OPERATION dfgConvertJSValueToBoolean(ExecState*, EncodedJSValue);
+
+#if DFG_ENABLE(VERBOSE_SPECULATION_FAILURE)
+void DFG_OPERATION debugOperationPrintSpeculationFailure(ExecState*, void*);
+#endif
+
+} // extern "C"
+} } // namespace JSC::DFG
+
+#endif
+#endif
diff --git a/Source/JavaScriptCore/dfg/DFGPropagator.cpp b/Source/JavaScriptCore/dfg/DFGPropagator.cpp
new file mode 100644
index 000000000..631e82830
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGPropagator.cpp
@@ -0,0 +1,1793 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "DFGPropagator.h"
+
+#if ENABLE(DFG_JIT)
+
+#include "DFGAbstractState.h"
+#include "DFGGraph.h"
+#include "DFGScoreBoard.h"
+#include <wtf/FixedArray.h>
+
+namespace JSC { namespace DFG {
+
+class Propagator {
+public:
+ Propagator(Graph& graph, JSGlobalData& globalData, CodeBlock* codeBlock, CodeBlock* profiledBlock)
+ : m_graph(graph)
+ , m_globalData(globalData)
+ , m_codeBlock(codeBlock)
+ , m_profiledBlock(profiledBlock)
+ {
+ // Replacements are used to implement local common subexpression elimination.
+ m_replacements.resize(m_graph.size());
+
+ for (unsigned i = 0; i < m_graph.size(); ++i)
+ m_replacements[i] = NoNode;
+
+ for (unsigned i = 0; i < LastNodeId; ++i)
+ m_lastSeen[i] = NoNode;
+ }
+
+ void fixpoint()
+ {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ m_graph.dump(m_codeBlock);
+#endif
+
+ propagateArithNodeFlags();
+ propagatePredictions();
+ fixup();
+
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf("Graph after propagation fixup:\n");
+ m_graph.dump(m_codeBlock);
+#endif
+
+ localCSE();
+
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf("Graph after CSE:\n");
+ m_graph.dump(m_codeBlock);
+#endif
+
+ allocateVirtualRegisters();
+
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf("Graph after virtual register allocation:\n");
+ m_graph.dump(m_codeBlock);
+#endif
+
+ globalCFA();
+
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ printf("Graph after propagation:\n");
+ m_graph.dump(m_codeBlock);
+#endif
+ }
+
+private:
+ bool isNotNegZero(NodeIndex nodeIndex)
+ {
+ if (!m_graph.isNumberConstant(m_codeBlock, nodeIndex))
+ return false;
+ double value = m_graph.valueOfNumberConstant(m_codeBlock, nodeIndex);
+ return !value && 1.0 / value < 0.0;
+ }
+
+ bool isNotZero(NodeIndex nodeIndex)
+ {
+ if (!m_graph.isNumberConstant(m_codeBlock, nodeIndex))
+ return false;
+ return !!m_graph.valueOfNumberConstant(m_codeBlock, nodeIndex);
+ }
+
+ void propagateArithNodeFlags(Node& node)
+ {
+ if (!node.shouldGenerate())
+ return;
+
+ NodeType op = node.op;
+ ArithNodeFlags flags = 0;
+
+ if (node.hasArithNodeFlags())
+ flags = node.rawArithNodeFlags();
+
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf(" %s @%u: %s ", Graph::opName(op), m_compileIndex, arithNodeFlagsAsString(flags));
+#endif
+
+ flags &= NodeUsedAsMask;
+
+ bool changed = false;
+
+ switch (op) {
+ case ValueToInt32:
+ case BitAnd:
+ case BitOr:
+ case BitXor:
+ case BitLShift:
+ case BitRShift:
+ case BitURShift: {
+ // These operations are perfectly happy with truncated integers,
+ // so we don't want to propagate anything.
+ break;
+ }
+
+ case ValueToNumber:
+ case ValueToDouble:
+ case UInt32ToNumber: {
+ changed |= m_graph[node.child1()].mergeArithNodeFlags(flags);
+ break;
+ }
+
+ case ArithAdd:
+ case ValueAdd: {
+ if (isNotNegZero(node.child1()) || isNotNegZero(node.child2()))
+ flags &= ~NodeNeedsNegZero;
+
+ changed |= m_graph[node.child1()].mergeArithNodeFlags(flags);
+ changed |= m_graph[node.child2()].mergeArithNodeFlags(flags);
+ break;
+ }
+
+ case ArithSub: {
+ if (isNotZero(node.child1()) || isNotZero(node.child2()))
+ flags &= ~NodeNeedsNegZero;
+
+ changed |= m_graph[node.child1()].mergeArithNodeFlags(flags);
+ changed |= m_graph[node.child2()].mergeArithNodeFlags(flags);
+ break;
+ }
+
+ case ArithMul:
+ case ArithDiv: {
+ // As soon as a multiply happens, we can easily end up in the part
+ // of the double domain where the point at which you do truncation
+ // can change the outcome. So, ArithMul always checks for overflow
+ // no matter what, and always forces its inputs to check as well.
+
+ flags |= NodeUsedAsNumber | NodeNeedsNegZero;
+ changed |= m_graph[node.child1()].mergeArithNodeFlags(flags);
+ changed |= m_graph[node.child2()].mergeArithNodeFlags(flags);
+ break;
+ }
+
+ case ArithMin:
+ case ArithMax: {
+ flags |= NodeUsedAsNumber;
+ changed |= m_graph[node.child1()].mergeArithNodeFlags(flags);
+ changed |= m_graph[node.child2()].mergeArithNodeFlags(flags);
+ break;
+ }
+
+ case ArithAbs: {
+ flags &= ~NodeNeedsNegZero;
+ changed |= m_graph[node.child1()].mergeArithNodeFlags(flags);
+ break;
+ }
+
+ case PutByVal: {
+ changed |= m_graph[node.child1()].mergeArithNodeFlags(flags | NodeUsedAsNumber | NodeNeedsNegZero);
+ changed |= m_graph[node.child2()].mergeArithNodeFlags(flags | NodeUsedAsNumber);
+ changed |= m_graph[node.child3()].mergeArithNodeFlags(flags | NodeUsedAsNumber | NodeNeedsNegZero);
+ break;
+ }
+
+ case GetByVal: {
+ changed |= m_graph[node.child1()].mergeArithNodeFlags(flags | NodeUsedAsNumber | NodeNeedsNegZero);
+ changed |= m_graph[node.child2()].mergeArithNodeFlags(flags | NodeUsedAsNumber);
+ break;
+ }
+
+ default:
+ flags |= NodeUsedAsNumber | NodeNeedsNegZero;
+ if (op & NodeHasVarArgs) {
+ for (unsigned childIdx = node.firstChild(); childIdx < node.firstChild() + node.numChildren(); childIdx++)
+ changed |= m_graph[m_graph.m_varArgChildren[childIdx]].mergeArithNodeFlags(flags);
+ } else {
+ if (node.child1() == NoNode)
+ break;
+ changed |= m_graph[node.child1()].mergeArithNodeFlags(flags);
+ if (node.child2() == NoNode)
+ break;
+ changed |= m_graph[node.child2()].mergeArithNodeFlags(flags);
+ if (node.child3() == NoNode)
+ break;
+ changed |= m_graph[node.child3()].mergeArithNodeFlags(flags);
+ }
+ break;
+ }
+
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf("%s\n", changed ? "CHANGED" : "");
+#endif
+
+ m_changed |= changed;
+ }
+
+ void propagateArithNodeFlagsForward()
+ {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf("Propagating arithmetic node flags forward [%u]\n", ++m_count);
+#endif
+ for (m_compileIndex = 0; m_compileIndex < m_graph.size(); ++m_compileIndex)
+ propagateArithNodeFlags(m_graph[m_compileIndex]);
+ }
+
+ void propagateArithNodeFlagsBackward()
+ {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf("Propagating arithmetic node flags backward [%u]\n", ++m_count);
+#endif
+ for (m_compileIndex = m_graph.size(); m_compileIndex-- > 0;)
+ propagateArithNodeFlags(m_graph[m_compileIndex]);
+ }
+
+ void propagateArithNodeFlags()
+ {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ m_count = 0;
+#endif
+ do {
+ m_changed = false;
+
+ // Up here we start with a backward pass because we suspect that to be
+ // more profitable.
+ propagateArithNodeFlagsBackward();
+ if (!m_changed)
+ break;
+
+ m_changed = false;
+ propagateArithNodeFlagsForward();
+ } while (m_changed);
+ }
+
+ bool setPrediction(PredictedType prediction)
+ {
+ ASSERT(m_graph[m_compileIndex].hasResult());
+
+ // setPrediction() is used when we know that there is no way that we can change
+ // our minds about what the prediction is going to be. There is no semantic
+ // difference between setPrediction() and mergePrediction() other than the
+ // increased checking to validate this property.
+ ASSERT(m_graph[m_compileIndex].prediction() == PredictNone || m_graph[m_compileIndex].prediction() == prediction);
+
+ return m_graph[m_compileIndex].predict(prediction);
+ }
+
+ bool mergePrediction(PredictedType prediction)
+ {
+ ASSERT(m_graph[m_compileIndex].hasResult());
+
+ return m_graph[m_compileIndex].predict(prediction);
+ }
+
+ void propagateNodePredictions(Node& node)
+ {
+ if (!node.shouldGenerate())
+ return;
+
+ NodeType op = node.op;
+
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf(" %s @%u: ", Graph::opName(op), m_compileIndex);
+#endif
+
+ bool changed = false;
+
+ switch (op) {
+ case JSConstant:
+ case WeakJSConstant: {
+ changed |= setPrediction(predictionFromValue(m_graph.valueOfJSConstant(m_codeBlock, m_compileIndex)));
+ break;
+ }
+
+ case GetLocal: {
+ PredictedType prediction = node.variableAccessData()->prediction();
+ if (prediction)
+ changed |= mergePrediction(prediction);
+ break;
+ }
+
+ case SetLocal: {
+ changed |= node.variableAccessData()->predict(m_graph[node.child1()].prediction());
+ break;
+ }
+
+ case BitAnd:
+ case BitOr:
+ case BitXor:
+ case BitRShift:
+ case BitLShift:
+ case BitURShift:
+ case ValueToInt32: {
+ changed |= setPrediction(PredictInt32);
+ break;
+ }
+
+ case ArrayPop:
+ case ArrayPush: {
+ if (node.getHeapPrediction())
+ changed |= mergePrediction(node.getHeapPrediction());
+ break;
+ }
+
+ case StringCharCodeAt: {
+ changed |= mergePrediction(PredictInt32);
+ break;
+ }
+
+ case ArithMod: {
+ PredictedType left = m_graph[node.child1()].prediction();
+ PredictedType right = m_graph[node.child2()].prediction();
+
+ if (left && right) {
+ if (isInt32Prediction(mergePredictions(left, right)) && nodeCanSpeculateInteger(node.arithNodeFlags()))
+ changed |= mergePrediction(PredictInt32);
+ else
+ changed |= mergePrediction(PredictDouble);
+ }
+ break;
+ }
+
+ case UInt32ToNumber: {
+ if (nodeCanSpeculateInteger(node.arithNodeFlags()))
+ changed |= setPrediction(PredictInt32);
+ else
+ changed |= setPrediction(PredictNumber);
+ break;
+ }
+
+ case ValueToNumber: {
+ PredictedType prediction = m_graph[node.child1()].prediction();
+
+ if (prediction) {
+ if (!(prediction & PredictDouble) && nodeCanSpeculateInteger(node.arithNodeFlags()))
+ changed |= mergePrediction(PredictInt32);
+ else
+ changed |= mergePrediction(PredictNumber);
+ }
+
+ break;
+ }
+
+ case ValueAdd: {
+ PredictedType left = m_graph[node.child1()].prediction();
+ PredictedType right = m_graph[node.child2()].prediction();
+
+ if (left && right) {
+ if (isNumberPrediction(left) && isNumberPrediction(right)) {
+ if (isInt32Prediction(mergePredictions(left, right)) && nodeCanSpeculateInteger(node.arithNodeFlags()))
+ changed |= mergePrediction(PredictInt32);
+ else
+ changed |= mergePrediction(PredictDouble);
+ } else if (!(left & PredictNumber) || !(right & PredictNumber)) {
+ // left or right is definitely something other than a number.
+ changed |= mergePrediction(PredictString);
+ } else
+ changed |= mergePrediction(PredictString | PredictInt32 | PredictDouble);
+ }
+ break;
+ }
+
+ case ArithAdd:
+ case ArithSub:
+ case ArithMul:
+ case ArithMin:
+ case ArithMax:
+ case ArithDiv: {
+ PredictedType left = m_graph[node.child1()].prediction();
+ PredictedType right = m_graph[node.child2()].prediction();
+
+ if (left && right) {
+ if (isInt32Prediction(mergePredictions(left, right)) && nodeCanSpeculateInteger(node.arithNodeFlags()))
+ changed |= mergePrediction(PredictInt32);
+ else
+ changed |= mergePrediction(PredictDouble);
+ }
+ break;
+ }
+
+ case ArithSqrt: {
+ changed |= setPrediction(PredictDouble);
+ break;
+ }
+
+ case ArithAbs: {
+ PredictedType child = m_graph[node.child1()].prediction();
+ if (child) {
+ if (nodeCanSpeculateInteger(node.arithNodeFlags()))
+ changed |= mergePrediction(child);
+ else
+ changed |= setPrediction(PredictDouble);
+ }
+ break;
+ }
+
+ case LogicalNot:
+ case CompareLess:
+ case CompareLessEq:
+ case CompareGreater:
+ case CompareGreaterEq:
+ case CompareEq:
+ case CompareStrictEq:
+ case InstanceOf: {
+ changed |= setPrediction(PredictBoolean);
+ break;
+ }
+
+ case GetById: {
+ if (node.getHeapPrediction())
+ changed |= mergePrediction(node.getHeapPrediction());
+ else if (m_codeBlock->identifier(node.identifierNumber()) == m_globalData.propertyNames->length) {
+ // If there is no prediction from value profiles, check if we might be
+ // able to infer the type ourselves.
+ bool isArray = isArrayPrediction(m_graph[node.child1()].prediction());
+ bool isString = isStringPrediction(m_graph[node.child1()].prediction());
+ bool isByteArray = m_graph[node.child1()].shouldSpeculateByteArray();
+ bool isInt8Array = m_graph[node.child1()].shouldSpeculateInt8Array();
+ bool isInt16Array = m_graph[node.child1()].shouldSpeculateInt16Array();
+ bool isInt32Array = m_graph[node.child1()].shouldSpeculateInt32Array();
+ bool isUint8Array = m_graph[node.child1()].shouldSpeculateUint8Array();
+ bool isUint16Array = m_graph[node.child1()].shouldSpeculateUint16Array();
+ bool isUint32Array = m_graph[node.child1()].shouldSpeculateUint32Array();
+ bool isFloat32Array = m_graph[node.child1()].shouldSpeculateFloat32Array();
+ bool isFloat64Array = m_graph[node.child1()].shouldSpeculateFloat64Array();
+ if (isArray || isString || isByteArray || isInt8Array || isInt16Array || isInt32Array || isUint8Array || isUint16Array || isUint32Array || isFloat32Array || isFloat64Array)
+ changed |= mergePrediction(PredictInt32);
+ }
+ break;
+ }
+
+ case GetByVal: {
+ if (m_graph[node.child1()].shouldSpeculateUint32Array() || m_graph[node.child1()].shouldSpeculateFloat32Array() || m_graph[node.child1()].shouldSpeculateFloat64Array())
+ changed |= mergePrediction(PredictDouble);
+ else if (node.getHeapPrediction())
+ changed |= mergePrediction(node.getHeapPrediction());
+ break;
+ }
+
+ case GetPropertyStorage:
+ case GetIndexedPropertyStorage: {
+ changed |= setPrediction(PredictOther);
+ break;
+ }
+
+ case GetByOffset: {
+ if (node.getHeapPrediction())
+ changed |= mergePrediction(node.getHeapPrediction());
+ break;
+ }
+
+ case Call:
+ case Construct: {
+ if (node.getHeapPrediction())
+ changed |= mergePrediction(node.getHeapPrediction());
+ break;
+ }
+
+ case ConvertThis: {
+ PredictedType prediction = m_graph[node.child1()].prediction();
+ if (prediction) {
+ if (prediction & ~PredictObjectMask) {
+ prediction &= PredictObjectMask;
+ prediction = mergePredictions(prediction, PredictObjectOther);
+ }
+ changed |= mergePrediction(prediction);
+ }
+ break;
+ }
+
+ case GetGlobalVar: {
+ PredictedType prediction = m_graph.getGlobalVarPrediction(node.varNumber());
+ if (prediction)
+ changed |= mergePrediction(prediction);
+ break;
+ }
+
+ case PutGlobalVar: {
+ changed |= m_graph.predictGlobalVar(node.varNumber(), m_graph[node.child1()].prediction());
+ break;
+ }
+
+ case GetScopedVar:
+ case Resolve:
+ case ResolveBase:
+ case ResolveBaseStrictPut:
+ case ResolveGlobal: {
+ PredictedType prediction = node.getHeapPrediction();
+ if (prediction)
+ changed |= mergePrediction(prediction);
+ break;
+ }
+
+ case GetScopeChain: {
+ changed |= setPrediction(PredictCellOther);
+ break;
+ }
+
+ case GetCallee: {
+ changed |= setPrediction(PredictFunction);
+ break;
+ }
+
+ case CreateThis:
+ case NewObject: {
+ changed |= setPrediction(PredictFinalObject);
+ break;
+ }
+
+ case NewArray:
+ case NewArrayBuffer: {
+ changed |= setPrediction(PredictArray);
+ break;
+ }
+
+ case NewRegexp: {
+ changed |= setPrediction(PredictObjectOther);
+ break;
+ }
+
+ case StringCharAt:
+ case StrCat: {
+ changed |= setPrediction(PredictString);
+ break;
+ }
+
+ case ToPrimitive: {
+ PredictedType child = m_graph[node.child1()].prediction();
+ if (child) {
+ if (isObjectPrediction(child)) {
+ // I'd love to fold this case into the case below, but I can't, because
+ // removing PredictObjectMask from something that only has an object
+ // prediction and nothing else means we have an ill-formed PredictedType
+ // (strong predict-none). This should be killed once we remove all traces
+ // of static (aka weak) predictions.
+ changed |= mergePrediction(PredictString);
+ } else if (child & PredictObjectMask) {
+ // Objects get turned into strings. So if the input has hints of objectness,
+ // the output will have hinsts of stringiness.
+ changed |= mergePrediction(mergePredictions(child & ~PredictObjectMask, PredictString));
+ } else
+ changed |= mergePrediction(child);
+ }
+ break;
+ }
+
+ case ValueToDouble:
+ case GetArrayLength:
+ case GetByteArrayLength:
+ case GetInt8ArrayLength:
+ case GetInt16ArrayLength:
+ case GetInt32ArrayLength:
+ case GetUint8ArrayLength:
+ case GetUint16ArrayLength:
+ case GetUint32ArrayLength:
+ case GetFloat32ArrayLength:
+ case GetFloat64ArrayLength:
+ case GetStringLength: {
+ // This node should never be visible at this stage of compilation. It is
+ // inserted by fixup(), which follows this phase.
+ ASSERT_NOT_REACHED();
+ break;
+ }
+
+#ifndef NDEBUG
+ // These get ignored because they don't return anything.
+ case PutScopedVar:
+ case DFG::Jump:
+ case Branch:
+ case Breakpoint:
+ case Return:
+ case CheckHasInstance:
+ case Phi:
+ case Flush:
+ case Throw:
+ case ThrowReferenceError:
+ case ForceOSRExit:
+ case SetArgument:
+ case PutByVal:
+ case PutByValAlias:
+ case PutById:
+ case PutByIdDirect:
+ case CheckStructure:
+ case CheckFunction:
+ case PutStructure:
+ case PutByOffset:
+ break;
+
+ // These gets ignored because it doesn't do anything.
+ case Phantom:
+ case InlineStart:
+ case Nop:
+ break;
+#else
+ default:
+ break;
+#endif
+ }
+
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf("%s\n", predictionToString(m_graph[m_compileIndex].prediction()));
+#endif
+
+ m_changed |= changed;
+ }
+
+ void propagatePredictionsForward()
+ {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf("Propagating predictions forward [%u]\n", ++m_count);
+#endif
+ for (m_compileIndex = 0; m_compileIndex < m_graph.size(); ++m_compileIndex)
+ propagateNodePredictions(m_graph[m_compileIndex]);
+ }
+
+ void propagatePredictionsBackward()
+ {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf("Propagating predictions backward [%u]\n", ++m_count);
+#endif
+ for (m_compileIndex = m_graph.size(); m_compileIndex-- > 0;)
+ propagateNodePredictions(m_graph[m_compileIndex]);
+ }
+
+ void vote(NodeIndex nodeIndex, VariableAccessData::Ballot ballot)
+ {
+ switch (m_graph[nodeIndex].op) {
+ case ValueToNumber:
+ case ValueToDouble:
+ case ValueToInt32:
+ case UInt32ToNumber:
+ nodeIndex = m_graph[nodeIndex].child1();
+ break;
+ default:
+ break;
+ }
+
+ if (m_graph[nodeIndex].op == GetLocal)
+ m_graph[nodeIndex].variableAccessData()->vote(ballot);
+ }
+
+ void vote(Node& node, VariableAccessData::Ballot ballot)
+ {
+ if (node.op & NodeHasVarArgs) {
+ for (unsigned childIdx = node.firstChild(); childIdx < node.firstChild() + node.numChildren(); childIdx++)
+ vote(m_graph.m_varArgChildren[childIdx], ballot);
+ return;
+ }
+
+ if (node.child1() == NoNode)
+ return;
+ vote(node.child1(), ballot);
+ if (node.child2() == NoNode)
+ return;
+ vote(node.child2(), ballot);
+ if (node.child3() == NoNode)
+ return;
+ vote(node.child3(), ballot);
+ }
+
+ void doRoundOfDoubleVoting()
+ {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf("Voting on double uses of locals [%u]\n", m_count);
+#endif
+ for (unsigned i = 0; i < m_graph.m_variableAccessData.size(); ++i)
+ m_graph.m_variableAccessData[i].find()->clearVotes();
+ for (m_compileIndex = 0; m_compileIndex < m_graph.size(); ++m_compileIndex) {
+ Node& node = m_graph[m_compileIndex];
+ switch (node.op) {
+ case ValueAdd:
+ case ArithAdd:
+ case ArithSub:
+ case ArithMul:
+ case ArithMin:
+ case ArithMax:
+ case ArithMod:
+ case ArithDiv: {
+ PredictedType left = m_graph[node.child1()].prediction();
+ PredictedType right = m_graph[node.child2()].prediction();
+
+ VariableAccessData::Ballot ballot;
+
+ if (isNumberPrediction(left) && isNumberPrediction(right) && !(Node::shouldSpeculateInteger(m_graph[node.child1()], m_graph[node.child1()]) && node.canSpeculateInteger()))
+ ballot = VariableAccessData::VoteDouble;
+ else
+ ballot = VariableAccessData::VoteValue;
+
+ vote(node.child1(), ballot);
+ vote(node.child2(), ballot);
+ break;
+ }
+
+ case ArithAbs:
+ VariableAccessData::Ballot ballot;
+ if (!(m_graph[node.child1()].shouldSpeculateInteger() && node.canSpeculateInteger()))
+ ballot = VariableAccessData::VoteDouble;
+ else
+ ballot = VariableAccessData::VoteValue;
+
+ vote(node.child1(), ballot);
+ break;
+
+ case ArithSqrt:
+ vote(node.child1(), VariableAccessData::VoteDouble);
+ break;
+
+ case ValueToNumber:
+ case ValueToDouble:
+ // Don't vote.
+ break;
+
+ case SetLocal: {
+ PredictedType prediction = m_graph[node.child1()].prediction();
+ if (isDoublePrediction(prediction))
+ node.variableAccessData()->vote(VariableAccessData::VoteDouble);
+ else if (!isNumberPrediction(prediction) || isInt32Prediction(prediction))
+ node.variableAccessData()->vote(VariableAccessData::VoteValue);
+ break;
+ }
+
+ default:
+ vote(node, VariableAccessData::VoteValue);
+ break;
+ }
+ }
+ for (unsigned i = 0; i < m_graph.m_variableAccessData.size(); ++i)
+ m_changed |= m_graph.m_variableAccessData[i].find()->tallyVotesForShouldUseDoubleFormat();
+ }
+
+ void propagatePredictions()
+ {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ m_count = 0;
+#endif
+ // Two stage process: first propagate predictions, then propagate while doing double voting.
+
+ do {
+ m_changed = false;
+
+ // Forward propagation is near-optimal for both topologically-sorted and
+ // DFS-sorted code.
+ propagatePredictionsForward();
+ if (!m_changed)
+ break;
+
+ // Backward propagation reduces the likelihood that pathological code will
+ // cause slowness. Loops (especially nested ones) resemble backward flow.
+ // This pass captures two cases: (1) it detects if the forward fixpoint
+ // found a sound solution and (2) short-circuits backward flow.
+ m_changed = false;
+ propagatePredictionsBackward();
+ } while (m_changed);
+
+ do {
+ m_changed = false;
+ doRoundOfDoubleVoting();
+ propagatePredictionsForward();
+ if (!m_changed)
+ break;
+
+ m_changed = false;
+ doRoundOfDoubleVoting();
+ propagatePredictionsBackward();
+ } while (m_changed);
+ }
+
+ void toDouble(NodeIndex nodeIndex)
+ {
+ if (m_graph[nodeIndex].op == ValueToNumber) {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf(" @%u -> ValueToDouble", nodeIndex);
+#endif
+ m_graph[nodeIndex].op = ValueToDouble;
+ }
+ }
+
+ void fixupNode(Node& node)
+ {
+ if (!node.shouldGenerate())
+ return;
+
+ NodeType op = node.op;
+
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf(" %s @%u: ", Graph::opName(op), m_compileIndex);
+#endif
+
+ switch (op) {
+ case ValueAdd: {
+ if (!nodeCanSpeculateInteger(node.arithNodeFlags())) {
+ toDouble(node.child1());
+ toDouble(node.child2());
+ break;
+ }
+
+ PredictedType left = m_graph[node.child1()].prediction();
+ PredictedType right = m_graph[node.child2()].prediction();
+
+ if (left && right
+ && isNumberPrediction(left) && isNumberPrediction(right)
+ && ((left & PredictDouble) || (right & PredictDouble))) {
+ toDouble(node.child1());
+ toDouble(node.child2());
+ }
+ break;
+ }
+
+ case ArithAdd:
+ case ArithSub:
+ case ArithMul:
+ case ArithMin:
+ case ArithMax:
+ case ArithMod:
+ case ArithDiv: {
+ if (!nodeCanSpeculateInteger(node.arithNodeFlags())) {
+ toDouble(node.child1());
+ toDouble(node.child2());
+ break;
+ }
+
+ PredictedType left = m_graph[node.child1()].prediction();
+ PredictedType right = m_graph[node.child2()].prediction();
+
+ if (left && right
+ && ((left & PredictDouble) || (right & PredictDouble))) {
+ toDouble(node.child1());
+ toDouble(node.child2());
+ }
+ break;
+ }
+
+ case ArithAbs: {
+ if (!nodeCanSpeculateInteger(node.arithNodeFlags())) {
+ toDouble(node.child1());
+ break;
+ }
+
+ PredictedType prediction = m_graph[node.child1()].prediction();
+ if (prediction & PredictDouble)
+ toDouble(node.child1());
+ break;
+ }
+
+ case ArithSqrt: {
+ toDouble(node.child1());
+ break;
+ }
+
+ case GetById: {
+ if (!isInt32Prediction(m_graph[m_compileIndex].prediction()))
+ break;
+ if (m_codeBlock->identifier(node.identifierNumber()) != m_globalData.propertyNames->length)
+ break;
+ bool isArray = isArrayPrediction(m_graph[node.child1()].prediction());
+ bool isString = isStringPrediction(m_graph[node.child1()].prediction());
+ bool isByteArray = m_graph[node.child1()].shouldSpeculateByteArray();
+ bool isInt8Array = m_graph[node.child1()].shouldSpeculateInt8Array();
+ bool isInt16Array = m_graph[node.child1()].shouldSpeculateInt16Array();
+ bool isInt32Array = m_graph[node.child1()].shouldSpeculateInt32Array();
+ bool isUint8Array = m_graph[node.child1()].shouldSpeculateUint8Array();
+ bool isUint16Array = m_graph[node.child1()].shouldSpeculateUint16Array();
+ bool isUint32Array = m_graph[node.child1()].shouldSpeculateUint32Array();
+ bool isFloat32Array = m_graph[node.child1()].shouldSpeculateFloat32Array();
+ bool isFloat64Array = m_graph[node.child1()].shouldSpeculateFloat64Array();
+ if (!isArray && !isString && !isByteArray && !isInt8Array && !isInt16Array && !isInt32Array && !isUint8Array && !isUint16Array && !isUint32Array && !isFloat32Array && !isFloat64Array)
+ break;
+
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf(" @%u -> %s", m_compileIndex, isArray ? "GetArrayLength" : "GetStringLength");
+#endif
+ if (isArray)
+ node.op = GetArrayLength;
+ else if (isString)
+ node.op = GetStringLength;
+ else if (isByteArray)
+ node.op = GetByteArrayLength;
+ else if (isInt8Array)
+ node.op = GetInt8ArrayLength;
+ else if (isInt16Array)
+ node.op = GetInt16ArrayLength;
+ else if (isInt32Array)
+ node.op = GetInt32ArrayLength;
+ else if (isUint8Array)
+ node.op = GetUint8ArrayLength;
+ else if (isUint16Array)
+ node.op = GetUint16ArrayLength;
+ else if (isUint32Array)
+ node.op = GetUint32ArrayLength;
+ else if (isFloat32Array)
+ node.op = GetFloat32ArrayLength;
+ else if (isFloat64Array)
+ node.op = GetFloat64ArrayLength;
+ else
+ ASSERT_NOT_REACHED();
+ m_graph.deref(m_compileIndex); // No longer MustGenerate
+ break;
+ }
+ case GetIndexedPropertyStorage: {
+ PredictedType basePrediction = m_graph[node.child2()].prediction();
+ if (!(basePrediction & PredictInt32) && basePrediction) {
+ node.op = Nop;
+ m_graph.clearAndDerefChild1(node);
+ m_graph.clearAndDerefChild2(node);
+ m_graph.clearAndDerefChild3(node);
+ node.setRefCount(0);
+ }
+ break;
+ }
+ case GetByVal:
+ case StringCharAt:
+ case StringCharCodeAt: {
+ if (node.child3() != NoNode && m_graph[node.child3()].op == Nop)
+ node.children.fixed.child3 = NoNode;
+ break;
+ }
+ default:
+ break;
+ }
+
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf("\n");
+#endif
+ }
+
+ void fixup()
+ {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf("Performing Fixup\n");
+#endif
+ for (m_compileIndex = 0; m_compileIndex < m_graph.size(); ++m_compileIndex)
+ fixupNode(m_graph[m_compileIndex]);
+ }
+
+ NodeIndex canonicalize(NodeIndex nodeIndex)
+ {
+ if (nodeIndex == NoNode)
+ return NoNode;
+
+ if (m_graph[nodeIndex].op == ValueToNumber)
+ nodeIndex = m_graph[nodeIndex].child1();
+
+ if (m_graph[nodeIndex].op == ValueToInt32)
+ nodeIndex = m_graph[nodeIndex].child1();
+
+ return nodeIndex;
+ }
+
+ // Computes where the search for a candidate for CSE should start. Don't call
+ // this directly; call startIndex() instead as it does logging in debug mode.
+ NodeIndex computeStartIndexForChildren(NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode)
+ {
+ const unsigned limit = 300;
+
+ NodeIndex start = m_start;
+ if (m_compileIndex - start > limit)
+ start = m_compileIndex - limit;
+
+ ASSERT(start >= m_start);
+
+ NodeIndex child = canonicalize(child1);
+ if (child == NoNode)
+ return start;
+
+ if (start < child)
+ start = child;
+
+ child = canonicalize(child2);
+ if (child == NoNode)
+ return start;
+
+ if (start < child)
+ start = child;
+
+ child = canonicalize(child3);
+ if (child == NoNode)
+ return start;
+
+ if (start < child)
+ start = child;
+
+ return start;
+ }
+
+ NodeIndex startIndexForChildren(NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode)
+ {
+ NodeIndex result = computeStartIndexForChildren(child1, child2, child3);
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf(" lookback %u: ", result);
+#endif
+ return result;
+ }
+
+ NodeIndex startIndex()
+ {
+ Node& node = m_graph[m_compileIndex];
+ return startIndexForChildren(node.child1(), node.child2(), node.child3());
+ }
+
+ NodeIndex endIndexForPureCSE()
+ {
+ NodeIndex result = m_lastSeen[m_graph[m_compileIndex].op & NodeIdMask];
+ if (result == NoNode)
+ result = 0;
+ else
+ result++;
+ ASSERT(result <= m_compileIndex);
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf(" limit %u: ", result);
+#endif
+ return result;
+ }
+
+ NodeIndex pureCSE(Node& node)
+ {
+ NodeIndex child1 = canonicalize(node.child1());
+ NodeIndex child2 = canonicalize(node.child2());
+ NodeIndex child3 = canonicalize(node.child3());
+
+ NodeIndex start = startIndex();
+ for (NodeIndex index = endIndexForPureCSE(); index-- > start;) {
+ Node& otherNode = m_graph[index];
+ if (node.op != otherNode.op)
+ continue;
+
+ if (node.arithNodeFlagsForCompare() != otherNode.arithNodeFlagsForCompare())
+ continue;
+
+ NodeIndex otherChild = canonicalize(otherNode.child1());
+ if (otherChild == NoNode)
+ return index;
+ if (otherChild != child1)
+ continue;
+
+ otherChild = canonicalize(otherNode.child2());
+ if (otherChild == NoNode)
+ return index;
+ if (otherChild != child2)
+ continue;
+
+ otherChild = canonicalize(otherNode.child3());
+ if (otherChild == NoNode)
+ return index;
+ if (otherChild != child3)
+ continue;
+
+ return index;
+ }
+ return NoNode;
+ }
+
+ bool isPredictedNumerical(Node& node)
+ {
+ PredictedType left = m_graph[node.child1()].prediction();
+ PredictedType right = m_graph[node.child2()].prediction();
+ return isNumberPrediction(left) && isNumberPrediction(right);
+ }
+
+ bool logicalNotIsPure(Node& node)
+ {
+ PredictedType prediction = m_graph[node.child1()].prediction();
+ return isBooleanPrediction(prediction) || !prediction;
+ }
+
+ bool byValIsPure(Node& node)
+ {
+ return m_graph[node.child2()].shouldSpeculateInteger()
+ && ((node.op == PutByVal || node.op == PutByValAlias)
+ ? isActionableMutableArrayPrediction(m_graph[node.child1()].prediction())
+ : isActionableArrayPrediction(m_graph[node.child1()].prediction()));
+ }
+
+ bool clobbersWorld(NodeIndex nodeIndex)
+ {
+ Node& node = m_graph[nodeIndex];
+ if (node.op & NodeClobbersWorld)
+ return true;
+ if (!(node.op & NodeMightClobber))
+ return false;
+ switch (node.op) {
+ case ValueAdd:
+ case CompareLess:
+ case CompareLessEq:
+ case CompareGreater:
+ case CompareGreaterEq:
+ case CompareEq:
+ return !isPredictedNumerical(node);
+ case LogicalNot:
+ return !logicalNotIsPure(node);
+ case GetByVal:
+ return !byValIsPure(node);
+ default:
+ ASSERT_NOT_REACHED();
+ return true; // If by some oddity we hit this case in release build it's safer to have CSE assume the worst.
+ }
+ }
+
+ NodeIndex impureCSE(Node& node)
+ {
+ NodeIndex child1 = canonicalize(node.child1());
+ NodeIndex child2 = canonicalize(node.child2());
+ NodeIndex child3 = canonicalize(node.child3());
+
+ NodeIndex start = startIndex();
+ for (NodeIndex index = m_compileIndex; index-- > start;) {
+ Node& otherNode = m_graph[index];
+ if (node.op == otherNode.op
+ && node.arithNodeFlagsForCompare() == otherNode.arithNodeFlagsForCompare()) {
+ NodeIndex otherChild = canonicalize(otherNode.child1());
+ if (otherChild == NoNode)
+ return index;
+ if (otherChild == child1) {
+ otherChild = canonicalize(otherNode.child2());
+ if (otherChild == NoNode)
+ return index;
+ if (otherChild == child2) {
+ otherChild = canonicalize(otherNode.child3());
+ if (otherChild == NoNode)
+ return index;
+ if (otherChild == child3)
+ return index;
+ }
+ }
+ }
+ if (clobbersWorld(index))
+ break;
+ }
+ return NoNode;
+ }
+
+ NodeIndex globalVarLoadElimination(unsigned varNumber, JSGlobalObject* globalObject)
+ {
+ NodeIndex start = startIndexForChildren();
+ for (NodeIndex index = m_compileIndex; index-- > start;) {
+ Node& node = m_graph[index];
+ switch (node.op) {
+ case GetGlobalVar:
+ if (node.varNumber() == varNumber && m_codeBlock->globalObjectFor(node.codeOrigin) == globalObject)
+ return index;
+ break;
+ case PutGlobalVar:
+ if (node.varNumber() == varNumber && m_codeBlock->globalObjectFor(node.codeOrigin) == globalObject)
+ return node.child1();
+ break;
+ default:
+ break;
+ }
+ if (clobbersWorld(index))
+ break;
+ }
+ return NoNode;
+ }
+
+ NodeIndex getByValLoadElimination(NodeIndex child1, NodeIndex child2)
+ {
+ NodeIndex start = startIndexForChildren(child1, child2);
+ for (NodeIndex index = m_compileIndex; index-- > start;) {
+ Node& node = m_graph[index];
+ switch (node.op) {
+ case GetByVal:
+ if (!byValIsPure(node))
+ return NoNode;
+ if (node.child1() == child1 && canonicalize(node.child2()) == canonicalize(child2))
+ return index;
+ break;
+ case PutByVal:
+ case PutByValAlias:
+ if (!byValIsPure(node))
+ return NoNode;
+ if (node.child1() == child1 && canonicalize(node.child2()) == canonicalize(child2))
+ return node.child3();
+ // We must assume that the PutByVal will clobber the location we're getting from.
+ // FIXME: We can do better; if we know that the PutByVal is accessing an array of a
+ // different type than the GetByVal, then we know that they won't clobber each other.
+ return NoNode;
+ case PutStructure:
+ case PutByOffset:
+ // GetByVal currently always speculates that it's accessing an
+ // array with an integer index, which means that it's impossible
+ // for a structure change or a put to property storage to affect
+ // the GetByVal.
+ break;
+ case ArrayPush:
+ // A push cannot affect previously existing elements in the array.
+ break;
+ default:
+ if (clobbersWorld(index))
+ return NoNode;
+ break;
+ }
+ }
+ return NoNode;
+ }
+
+ bool checkFunctionElimination(JSFunction* function, NodeIndex child1)
+ {
+ NodeIndex start = startIndexForChildren(child1);
+ for (NodeIndex index = endIndexForPureCSE(); index-- > start;) {
+ Node& node = m_graph[index];
+ if (node.op == CheckFunction && node.child1() == child1 && node.function() == function)
+ return true;
+ }
+ return false;
+ }
+
+ bool checkStructureLoadElimination(const StructureSet& structureSet, NodeIndex child1)
+ {
+ NodeIndex start = startIndexForChildren(child1);
+ for (NodeIndex index = m_compileIndex; index-- > start;) {
+ Node& node = m_graph[index];
+ switch (node.op) {
+ case CheckStructure:
+ if (node.child1() == child1
+ && structureSet.isSupersetOf(node.structureSet()))
+ return true;
+ break;
+
+ case PutStructure:
+ if (node.child1() == child1
+ && structureSet.contains(node.structureTransitionData().newStructure))
+ return true;
+ if (structureSet.contains(node.structureTransitionData().previousStructure))
+ return false;
+ break;
+
+ case PutByOffset:
+ // Setting a property cannot change the structure.
+ break;
+
+ case PutByVal:
+ case PutByValAlias:
+ if (byValIsPure(node)) {
+ // If PutByVal speculates that it's accessing an array with an
+ // integer index, then it's impossible for it to cause a structure
+ // change.
+ break;
+ }
+ return false;
+
+ default:
+ if (clobbersWorld(index))
+ return false;
+ break;
+ }
+ }
+ return false;
+ }
+
+ NodeIndex getByOffsetLoadElimination(unsigned identifierNumber, NodeIndex child1)
+ {
+ NodeIndex start = startIndexForChildren(child1);
+ for (NodeIndex index = m_compileIndex; index-- > start;) {
+ Node& node = m_graph[index];
+ switch (node.op) {
+ case GetByOffset:
+ if (node.child1() == child1
+ && m_graph.m_storageAccessData[node.storageAccessDataIndex()].identifierNumber == identifierNumber)
+ return index;
+ break;
+
+ case PutByOffset:
+ if (m_graph.m_storageAccessData[node.storageAccessDataIndex()].identifierNumber == identifierNumber) {
+ if (node.child2() == child1)
+ return node.child3();
+ return NoNode;
+ }
+ break;
+
+ case PutStructure:
+ // Changing the structure cannot change the outcome of a property get.
+ break;
+
+ case PutByVal:
+ case PutByValAlias:
+ if (byValIsPure(node)) {
+ // If PutByVal speculates that it's accessing an array with an
+ // integer index, then it's impossible for it to cause a structure
+ // change.
+ break;
+ }
+ return NoNode;
+
+ default:
+ if (clobbersWorld(index))
+ return NoNode;
+ break;
+ }
+ }
+ return NoNode;
+ }
+
+ NodeIndex getPropertyStorageLoadElimination(NodeIndex child1)
+ {
+ NodeIndex start = startIndexForChildren(child1);
+ for (NodeIndex index = m_compileIndex; index-- > start;) {
+ Node& node = m_graph[index];
+ switch (node.op) {
+ case GetPropertyStorage:
+ if (node.child1() == child1)
+ return index;
+ break;
+
+ case PutByOffset:
+ case PutStructure:
+ // Changing the structure or putting to the storage cannot
+ // change the property storage pointer.
+ break;
+
+ case PutByVal:
+ case PutByValAlias:
+ if (byValIsPure(node)) {
+ // If PutByVal speculates that it's accessing an array with an
+ // integer index, then it's impossible for it to cause a structure
+ // change.
+ break;
+ }
+ return NoNode;
+
+ default:
+ if (clobbersWorld(index))
+ return NoNode;
+ break;
+ }
+ }
+ return NoNode;
+ }
+
+ NodeIndex getIndexedPropertyStorageLoadElimination(NodeIndex child1, bool hasIntegerIndexPrediction)
+ {
+ NodeIndex start = startIndexForChildren(child1);
+ for (NodeIndex index = m_compileIndex; index-- > start;) {
+ Node& node = m_graph[index];
+ switch (node.op) {
+ case GetIndexedPropertyStorage: {
+ PredictedType basePrediction = m_graph[node.child2()].prediction();
+ bool nodeHasIntegerIndexPrediction = !(!(basePrediction & PredictInt32) && basePrediction);
+ if (node.child1() == child1 && hasIntegerIndexPrediction == nodeHasIntegerIndexPrediction)
+ return index;
+ break;
+ }
+
+ case PutByOffset:
+ case PutStructure:
+ // Changing the structure or putting to the storage cannot
+ // change the property storage pointer.
+ break;
+
+ case PutByValAlias:
+ // PutByValAlias can't change the indexed storage pointer
+ break;
+
+ case PutByVal:
+ if (isFixedIndexedStorageObjectPrediction(m_graph[node.child1()].prediction()) && byValIsPure(node))
+ break;
+ return NoNode;
+
+ default:
+ if (clobbersWorld(index))
+ return NoNode;
+ break;
+ }
+ }
+ return NoNode;
+ }
+
+ NodeIndex getScopeChainLoadElimination(unsigned depth)
+ {
+ NodeIndex start = startIndexForChildren();
+ for (NodeIndex index = endIndexForPureCSE(); index-- > start;) {
+ Node& node = m_graph[index];
+ if (node.op == GetScopeChain
+ && node.scopeChainDepth() == depth)
+ return index;
+ }
+ return NoNode;
+ }
+
+ void performSubstitution(NodeIndex& child, bool addRef = true)
+ {
+ // Check if this operand is actually unused.
+ if (child == NoNode)
+ return;
+
+ // Check if there is any replacement.
+ NodeIndex replacement = m_replacements[child];
+ if (replacement == NoNode)
+ return;
+
+ child = replacement;
+
+ // There is definitely a replacement. Assert that the replacement does not
+ // have a replacement.
+ ASSERT(m_replacements[child] == NoNode);
+
+ if (addRef)
+ m_graph[child].ref();
+ }
+
+ void setReplacement(NodeIndex replacement)
+ {
+ if (replacement == NoNode)
+ return;
+
+ // Be safe. Don't try to perform replacements if the predictions don't
+ // agree.
+ if (m_graph[m_compileIndex].prediction() != m_graph[replacement].prediction())
+ return;
+
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf(" Replacing @%u -> @%u", m_compileIndex, replacement);
+#endif
+
+ Node& node = m_graph[m_compileIndex];
+ node.op = Phantom;
+ node.setRefCount(1);
+
+ // At this point we will eliminate all references to this node.
+ m_replacements[m_compileIndex] = replacement;
+ }
+
+ void eliminate()
+ {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf(" Eliminating @%u", m_compileIndex);
+#endif
+
+ Node& node = m_graph[m_compileIndex];
+ ASSERT(node.refCount() == 1);
+ ASSERT(node.mustGenerate());
+ node.op = Phantom;
+ }
+
+ void performNodeCSE(Node& node)
+ {
+ bool shouldGenerate = node.shouldGenerate();
+
+ if (node.op & NodeHasVarArgs) {
+ for (unsigned childIdx = node.firstChild(); childIdx < node.firstChild() + node.numChildren(); childIdx++)
+ performSubstitution(m_graph.m_varArgChildren[childIdx], shouldGenerate);
+ } else {
+ performSubstitution(node.children.fixed.child1, shouldGenerate);
+ performSubstitution(node.children.fixed.child2, shouldGenerate);
+ performSubstitution(node.children.fixed.child3, shouldGenerate);
+ }
+
+ if (!shouldGenerate)
+ return;
+
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf(" %s @%u: ", Graph::opName(m_graph[m_compileIndex].op), m_compileIndex);
+#endif
+
+ // NOTE: there are some nodes that we deliberately don't CSE even though we
+ // probably could, like StrCat and ToPrimitive. That's because there is no
+ // evidence that doing CSE on these nodes would result in a performance
+ // progression. Hence considering these nodes in CSE would just mean that this
+ // code does more work with no win. Of course, we may want to reconsider this,
+ // since StrCat is trivially CSE-able. It's not trivially doable for
+ // ToPrimitive, but we could change that with some speculations if we really
+ // needed to.
+
+ switch (node.op) {
+
+ // Handle the pure nodes. These nodes never have any side-effects.
+ case BitAnd:
+ case BitOr:
+ case BitXor:
+ case BitRShift:
+ case BitLShift:
+ case BitURShift:
+ case ArithAdd:
+ case ArithSub:
+ case ArithMul:
+ case ArithMod:
+ case ArithDiv:
+ case ArithAbs:
+ case ArithMin:
+ case ArithMax:
+ case ArithSqrt:
+ case GetByteArrayLength:
+ case GetInt8ArrayLength:
+ case GetInt16ArrayLength:
+ case GetInt32ArrayLength:
+ case GetUint8ArrayLength:
+ case GetUint16ArrayLength:
+ case GetUint32ArrayLength:
+ case GetFloat32ArrayLength:
+ case GetFloat64ArrayLength:
+ case GetCallee:
+ case GetStringLength:
+ case StringCharAt:
+ case StringCharCodeAt:
+ setReplacement(pureCSE(node));
+ break;
+
+ case GetArrayLength:
+ setReplacement(impureCSE(node));
+ break;
+
+ case GetScopeChain:
+ setReplacement(getScopeChainLoadElimination(node.scopeChainDepth()));
+ break;
+
+ // Handle nodes that are conditionally pure: these are pure, and can
+ // be CSE'd, so long as the prediction is the one we want.
+ case ValueAdd:
+ case CompareLess:
+ case CompareLessEq:
+ case CompareGreater:
+ case CompareGreaterEq:
+ case CompareEq: {
+ if (isPredictedNumerical(node)) {
+ NodeIndex replacementIndex = pureCSE(node);
+ if (replacementIndex != NoNode && isPredictedNumerical(m_graph[replacementIndex]))
+ setReplacement(replacementIndex);
+ }
+ break;
+ }
+
+ case LogicalNot: {
+ if (logicalNotIsPure(node)) {
+ NodeIndex replacementIndex = pureCSE(node);
+ if (replacementIndex != NoNode && logicalNotIsPure(m_graph[replacementIndex]))
+ setReplacement(replacementIndex);
+ }
+ break;
+ }
+
+ // Finally handle heap accesses. These are not quite pure, but we can still
+ // optimize them provided that some subtle conditions are met.
+ case GetGlobalVar:
+ setReplacement(globalVarLoadElimination(node.varNumber(), m_codeBlock->globalObjectFor(node.codeOrigin)));
+ break;
+
+ case GetByVal:
+ if (byValIsPure(node))
+ setReplacement(getByValLoadElimination(node.child1(), node.child2()));
+ break;
+
+ case PutByVal:
+ if (byValIsPure(node) && getByValLoadElimination(node.child1(), node.child2()) != NoNode)
+ node.op = PutByValAlias;
+ break;
+
+ case CheckStructure:
+ if (checkStructureLoadElimination(node.structureSet(), node.child1()))
+ eliminate();
+ break;
+
+ case CheckFunction:
+ if (checkFunctionElimination(node.function(), node.child1()))
+ eliminate();
+ break;
+
+ case GetIndexedPropertyStorage: {
+ PredictedType basePrediction = m_graph[node.child2()].prediction();
+ bool nodeHasIntegerIndexPrediction = !(!(basePrediction & PredictInt32) && basePrediction);
+ setReplacement(getIndexedPropertyStorageLoadElimination(node.child1(), nodeHasIntegerIndexPrediction));
+ break;
+ }
+
+ case GetPropertyStorage:
+ setReplacement(getPropertyStorageLoadElimination(node.child1()));
+ break;
+
+ case GetByOffset:
+ setReplacement(getByOffsetLoadElimination(m_graph.m_storageAccessData[node.storageAccessDataIndex()].identifierNumber, node.child1()));
+ break;
+
+ default:
+ // do nothing.
+ break;
+ }
+
+ m_lastSeen[node.op & NodeIdMask] = m_compileIndex;
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf("\n");
+#endif
+ }
+
+ void performBlockCSE(BasicBlock& block)
+ {
+ m_start = block.begin;
+ NodeIndex end = block.end;
+ for (m_compileIndex = m_start; m_compileIndex < end; ++m_compileIndex)
+ performNodeCSE(m_graph[m_compileIndex]);
+ }
+
+ void localCSE()
+ {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf("Performing local CSE:");
+#endif
+ for (unsigned block = 0; block < m_graph.m_blocks.size(); ++block)
+ performBlockCSE(*m_graph.m_blocks[block]);
+ }
+
+ void allocateVirtualRegisters()
+ {
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ printf("Preserved vars: ");
+ m_graph.m_preservedVars.dump(stdout);
+ printf("\n");
+#endif
+ ScoreBoard scoreBoard(m_graph, m_graph.m_preservedVars);
+ unsigned sizeExcludingPhiNodes = m_graph.m_blocks.last()->end;
+ for (size_t i = 0; i < sizeExcludingPhiNodes; ++i) {
+ Node& node = m_graph[i];
+
+ if (!node.shouldGenerate())
+ continue;
+
+ // GetLocal nodes are effectively phi nodes in the graph, referencing
+ // results from prior blocks.
+ if (node.op != GetLocal) {
+ // First, call use on all of the current node's children, then
+ // allocate a VirtualRegister for this node. We do so in this
+ // order so that if a child is on its last use, and a
+ // VirtualRegister is freed, then it may be reused for node.
+ if (node.op & NodeHasVarArgs) {
+ for (unsigned childIdx = node.firstChild(); childIdx < node.firstChild() + node.numChildren(); childIdx++)
+ scoreBoard.use(m_graph.m_varArgChildren[childIdx]);
+ } else {
+ scoreBoard.use(node.child1());
+ scoreBoard.use(node.child2());
+ scoreBoard.use(node.child3());
+ }
+ }
+
+ if (!node.hasResult())
+ continue;
+
+ node.setVirtualRegister(scoreBoard.allocate());
+ // 'mustGenerate' nodes have their useCount artificially elevated,
+ // call use now to account for this.
+ if (node.mustGenerate())
+ scoreBoard.use(i);
+ }
+
+ // 'm_numCalleeRegisters' is the number of locals and temporaries allocated
+ // for the function (and checked for on entry). Since we perform a new and
+ // different allocation of temporaries, more registers may now be required.
+ unsigned calleeRegisters = scoreBoard.highWatermark() + m_graph.m_parameterSlots;
+ if ((unsigned)m_codeBlock->m_numCalleeRegisters < calleeRegisters)
+ m_codeBlock->m_numCalleeRegisters = calleeRegisters;
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ printf("Num callee registers: %u\n", calleeRegisters);
+#endif
+ }
+
+ void performBlockCFA(AbstractState& state, BlockIndex blockIndex)
+ {
+ BasicBlock* block = m_graph.m_blocks[blockIndex].get();
+ if (!block->cfaShouldRevisit)
+ return;
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf(" Block #%u (bc#%u):\n", blockIndex, block->bytecodeBegin);
+#endif
+ state.beginBasicBlock(block);
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf(" head vars: ");
+ dumpOperands(block->valuesAtHead, stdout);
+ printf("\n");
+#endif
+ for (NodeIndex nodeIndex = block->begin; nodeIndex < block->end; ++nodeIndex) {
+ if (!m_graph[nodeIndex].shouldGenerate())
+ continue;
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf(" %s @%u: ", Graph::opName(m_graph[nodeIndex].op), nodeIndex);
+ state.dump(stdout);
+ printf("\n");
+#endif
+ if (!state.execute(nodeIndex))
+ break;
+ }
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf(" tail regs: ");
+ state.dump(stdout);
+ printf("\n");
+#endif
+ m_changed |= state.endBasicBlock(AbstractState::MergeToSuccessors);
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf(" tail vars: ");
+ dumpOperands(block->valuesAtTail, stdout);
+ printf("\n");
+#endif
+ }
+
+ void performForwardCFA(AbstractState& state)
+ {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ printf("CFA [%u]\n", ++m_count);
+#endif
+
+ for (BlockIndex block = 0; block < m_graph.m_blocks.size(); ++block)
+ performBlockCFA(state, block);
+ }
+
+ void globalCFA()
+ {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ m_count = 0;
+#endif
+
+ // This implements a pseudo-worklist-based forward CFA, except that the visit order
+ // of blocks is the bytecode program order (which is nearly topological), and
+ // instead of a worklist we just walk all basic blocks checking if cfaShouldRevisit
+ // is set to true. This is likely to balance the efficiency properties of both
+ // worklist-based and forward fixpoint-based approaches. Like a worklist-based
+ // approach, it won't visit code if it's meaningless to do so (nothing changed at
+ // the head of the block or the predecessors have not been visited). Like a forward
+ // fixpoint-based approach, it has a high probability of only visiting a block
+ // after all predecessors have been visited. Only loops will cause this analysis to
+ // revisit blocks, and the amount of revisiting is proportional to loop depth.
+
+ AbstractState::initialize(m_graph);
+
+ AbstractState state(m_codeBlock, m_graph);
+
+ do {
+ m_changed = false;
+ performForwardCFA(state);
+ } while (m_changed);
+ }
+
+ Graph& m_graph;
+ JSGlobalData& m_globalData;
+ CodeBlock* m_codeBlock;
+ CodeBlock* m_profiledBlock;
+
+ NodeIndex m_start;
+ NodeIndex m_compileIndex;
+
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ unsigned m_count;
+#endif
+
+ bool m_changed;
+
+ Vector<NodeIndex, 16> m_replacements;
+ FixedArray<NodeIndex, LastNodeId> m_lastSeen;
+};
+
+void propagate(Graph& graph, JSGlobalData* globalData, CodeBlock* codeBlock)
+{
+ ASSERT(codeBlock);
+ CodeBlock* profiledBlock = codeBlock->alternative();
+ ASSERT(profiledBlock);
+
+ Propagator propagator(graph, *globalData, codeBlock, profiledBlock);
+ propagator.fixpoint();
+
+}
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
+
diff --git a/Source/JavaScriptCore/dfg/DFGPropagator.h b/Source/JavaScriptCore/dfg/DFGPropagator.h
new file mode 100644
index 000000000..e24c06b2b
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGPropagator.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGPropagator_h
+#define DFGPropagator_h
+
+#if ENABLE(DFG_JIT)
+
+#include <dfg/DFGGraph.h>
+
+namespace JSC {
+
+class CodeBlock;
+class JSGlobalData;
+
+namespace DFG {
+
+// Propagate dynamic predictions from value sources to variables.
+void propagate(Graph&, JSGlobalData*, CodeBlock*);
+
+} } // namespace JSC::DFG
+
+#endif
+#endif
diff --git a/Source/JavaScriptCore/dfg/DFGRegisterBank.h b/Source/JavaScriptCore/dfg/DFGRegisterBank.h
new file mode 100644
index 000000000..11cc70931
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGRegisterBank.h
@@ -0,0 +1,383 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGRegisterBank_h
+#define DFGRegisterBank_h
+
+#if ENABLE(DFG_JIT)
+
+#include <dfg/DFGCommon.h>
+
+namespace JSC { namespace DFG {
+
+// === RegisterBank ===
+//
+// This class is used to implement the GPR and FPR register banks.
+// All registers have two pieces of state associated with them:
+// a lock count (used to indicate this register is already in use
+// in code generation of the current node, and cannot be spilled or
+// allocated as a temporary), and VirtualRegister 'name', recording
+// which value (if any) a machine register currently holds.
+// Either or both of these pieces of information may be valid for a
+// given register. A register may be:
+//
+// - unlocked, and unnamed: Available for allocation.
+// - locked, but unnamed: Already allocated as a temporary or
+// result for the current node.
+// - unlocked, but named: Contains the result of a prior operation,
+// not yet in use for this node,
+// - locked, but named: Contains the result of a prior operation,
+// already allocated as a operand to the
+// current operation.
+//
+// For every named register we also record a hint value indicating
+// the order in which registers should be selected to be spilled;
+// registers that can be more cheaply spilled and/or filled should
+// be selected first.
+//
+// Locking register is a strong retention mechanism; a locked register
+// will never be reallocated (this is used to ensure the operands to
+// the current node are in registers). Naming, conversely, in a weak
+// retention mechanism - allocating a register may force a named value
+// to be spilled.
+//
+// All named values must be given a hint that is greater than Min and
+// less than Max.
+template<class BankInfo>
+class RegisterBank {
+ typedef typename BankInfo::RegisterType RegID;
+ static const size_t NUM_REGS = BankInfo::numberOfRegisters;
+
+ typedef uint32_t SpillHint;
+ static const SpillHint SpillHintInvalid = 0xffffffff;
+
+public:
+ RegisterBank()
+ : m_lastAllocated(NUM_REGS - 1)
+ {
+ }
+
+ // Attempt to allocate a register - this function finds an unlocked
+ // register, locks it, and returns it. If none can be found, this
+ // returns -1 (InvalidGPRReg or InvalidFPRReg).
+ RegID tryAllocate()
+ {
+ VirtualRegister ignored;
+
+ for (uint32_t i = m_lastAllocated + 1; i < NUM_REGS; ++i) {
+ if (!m_data[i].lockCount && m_data[i].name == InvalidVirtualRegister)
+ return allocateInternal(i, ignored);
+ }
+ // Loop over the remaining entries.
+ for (uint32_t i = 0; i <= m_lastAllocated; ++i) {
+ if (!m_data[i].lockCount && m_data[i].name == InvalidVirtualRegister)
+ return allocateInternal(i, ignored);
+ }
+
+ return (RegID)-1;
+ }
+
+ // Allocate a register - this function finds an unlocked register,
+ // locks it, and returns it. If any named registers exist, one
+ // of these should be selected to be allocated. If all unlocked
+ // registers are named, then one of the named registers will need
+ // to be spilled. In this case the register selected to be spilled
+ // will be one of the registers that has the lowest 'spillOrder'
+ // cost associated with it.
+ //
+ // This method select the register to be allocated, and calls the
+ // private 'allocateInternal' method to update internal data
+ // structures accordingly.
+ RegID allocate(VirtualRegister &spillMe)
+ {
+ uint32_t currentLowest = NUM_REGS;
+ SpillHint currentSpillOrder = SpillHintInvalid;
+
+ // Scan through all register, starting at the last allocated & looping around.
+ ASSERT(m_lastAllocated < NUM_REGS);
+
+ // This loop is broken into two halves, looping from the last allocated
+ // register (the register returned last time this method was called) to
+ // the maximum register value, then from 0 to the last allocated.
+ // This implements a simple round-robin like approach to try to reduce
+ // thrash, and minimize time spent scanning locked registers in allocation.
+ // If a unlocked and unnamed register is found return it immediately.
+ // Otherwise, find the first unlocked register with the lowest spillOrder.
+ for (uint32_t i = m_lastAllocated + 1; i < NUM_REGS; ++i) {
+ // (1) If the current register is locked, it is not a candidate.
+ if (m_data[i].lockCount)
+ continue;
+ // (2) If the current register's spill order is 0, pick this! – unassigned registers have spill order 0.
+ SpillHint spillOrder = m_data[i].spillOrder;
+ if (spillOrder == SpillHintInvalid)
+ return allocateInternal(i, spillMe);
+ // If this register is better (has a lower spill order value) than any prior
+ // candidate, then record it.
+ if (spillOrder < currentSpillOrder) {
+ currentSpillOrder = spillOrder;
+ currentLowest = i;
+ }
+ }
+ // Loop over the remaining entries.
+ for (uint32_t i = 0; i <= m_lastAllocated; ++i) {
+ if (m_data[i].lockCount)
+ continue;
+ SpillHint spillOrder = m_data[i].spillOrder;
+ if (spillOrder == SpillHintInvalid)
+ return allocateInternal(i, spillMe);
+ if (spillOrder < currentSpillOrder) {
+ currentSpillOrder = spillOrder;
+ currentLowest = i;
+ }
+ }
+
+ // Deadlock check - this could only occur is all registers are locked!
+ ASSERT(currentLowest != NUM_REGS && currentSpillOrder != SpillHintInvalid);
+ // There were no available registers; currentLowest will need to be spilled.
+ return allocateInternal(currentLowest, spillMe);
+ }
+
+ // Allocates the given register, even if this will force a spill.
+ VirtualRegister allocateSpecific(RegID reg)
+ {
+ unsigned index = BankInfo::toIndex(reg);
+
+ ++m_data[index].lockCount;
+ VirtualRegister name = nameAtIndex(index);
+ if (name != InvalidVirtualRegister)
+ releaseAtIndex(index);
+
+ return name;
+ }
+
+ // retain/release - these methods are used to associate/disassociate names
+ // with values in registers. retain should only be called on locked registers.
+ void retain(RegID reg, VirtualRegister name, SpillHint spillOrder)
+ {
+ unsigned index = BankInfo::toIndex(reg);
+
+ // SpillHint must be valid.
+ ASSERT(spillOrder != SpillHintInvalid);
+ // 'index' must be a valid, locked register.
+ ASSERT(index < NUM_REGS);
+ ASSERT(m_data[index].lockCount);
+ // 'index' should not currently be named, the new name must be valid.
+ ASSERT(m_data[index].name == InvalidVirtualRegister);
+ ASSERT(name != InvalidVirtualRegister);
+ // 'index' should not currently have a spillOrder.
+ ASSERT(m_data[index].spillOrder == SpillHintInvalid);
+
+ m_data[index].name = name;
+ m_data[index].spillOrder = spillOrder;
+ }
+ void release(RegID reg)
+ {
+ releaseAtIndex(BankInfo::toIndex(reg));
+ }
+
+ // lock/unlock register, ensures that they are not spilled.
+ void lock(RegID reg)
+ {
+ unsigned index = BankInfo::toIndex(reg);
+
+ ASSERT(index < NUM_REGS);
+ ++m_data[index].lockCount;
+ ASSERT(m_data[index].lockCount);
+ }
+ void unlock(RegID reg)
+ {
+ unsigned index = BankInfo::toIndex(reg);
+
+ ASSERT(index < NUM_REGS);
+ ASSERT(m_data[index].lockCount);
+ --m_data[index].lockCount;
+ }
+ bool isLocked(RegID reg) const
+ {
+ return isLockedAtIndex(BankInfo::toIndex(reg));
+ }
+
+ // Get the name (VirtualRegister) associated with the
+ // given register (or InvalidVirtualRegister for none).
+ VirtualRegister name(RegID reg) const
+ {
+ return nameAtIndex(BankInfo::toIndex(reg));
+ }
+
+#ifndef NDEBUG
+ void dump()
+ {
+ // For each register, print the VirtualRegister 'name'.
+ for (uint32_t i =0; i < NUM_REGS; ++i) {
+ if (m_data[i].name != InvalidVirtualRegister)
+ fprintf(stderr, "[%02d]", m_data[i].name);
+ else
+ fprintf(stderr, "[--]");
+ }
+ fprintf(stderr, "\n");
+ }
+#endif
+
+ class iterator {
+ friend class RegisterBank<BankInfo>;
+ public:
+ VirtualRegister name() const
+ {
+ return m_bank->nameAtIndex(m_index);
+ }
+
+ bool isLocked() const
+ {
+ return m_bank->isLockedAtIndex(m_index);
+ }
+
+ void release() const
+ {
+ m_bank->releaseAtIndex(m_index);
+ }
+
+ RegID regID() const
+ {
+ return BankInfo::toRegister(m_index);
+ }
+
+#ifndef NDEBUG
+ const char* debugName() const
+ {
+ return BankInfo::debugName(regID());
+ }
+#endif
+
+ iterator& operator++()
+ {
+ ++m_index;
+ return *this;
+ }
+
+ bool operator!=(const iterator& other) const
+ {
+ ASSERT(m_bank == other.m_bank);
+ return m_index != other.m_index;
+ }
+
+ unsigned index() const
+ {
+ return m_index;
+ }
+
+ private:
+ iterator(RegisterBank<BankInfo>* bank, unsigned index)
+ : m_bank(bank)
+ , m_index(index)
+ {
+ }
+
+ RegisterBank<BankInfo>* m_bank;
+ unsigned m_index;
+ };
+
+ iterator begin()
+ {
+ return iterator(this, 0);
+ }
+
+ iterator end()
+ {
+ return iterator(this, NUM_REGS);
+ }
+
+private:
+ bool isLockedAtIndex(unsigned index) const
+ {
+ ASSERT(index < NUM_REGS);
+ return m_data[index].lockCount;
+ }
+
+ VirtualRegister nameAtIndex(unsigned index) const
+ {
+ ASSERT(index < NUM_REGS);
+ return m_data[index].name;
+ }
+
+ void releaseAtIndex(unsigned index)
+ {
+ // 'index' must be a valid register.
+ ASSERT(index < NUM_REGS);
+ // 'index' should currently be named.
+ ASSERT(m_data[index].name != InvalidVirtualRegister);
+ // 'index' should currently have a valid spill order.
+ ASSERT(m_data[index].spillOrder != SpillHintInvalid);
+
+ m_data[index].name = InvalidVirtualRegister;
+ m_data[index].spillOrder = SpillHintInvalid;
+ }
+
+ // Used by 'allocate', above, to update inforamtion in the map.
+ RegID allocateInternal(uint32_t i, VirtualRegister &spillMe)
+ {
+ // 'i' must be a valid, unlocked register.
+ ASSERT(i < NUM_REGS && !m_data[i].lockCount);
+
+ // Return the VirtualRegister of the named value currently stored in
+ // the register being returned - or InvalidVirtualRegister if none.
+ spillMe = m_data[i].name;
+
+ // Clear any name/spillOrder currently associated with the register,
+ m_data[i] = MapEntry();
+ // Mark the register as locked (with a lock count of 1).
+ m_data[i].lockCount = 1;
+
+ m_lastAllocated = i;
+ return BankInfo::toRegister(i);
+ }
+
+ // === MapEntry ===
+ //
+ // This structure provides information for an individual machine register
+ // being managed by the RegisterBank. For each register we track a lock
+ // count, name and spillOrder hint.
+ struct MapEntry {
+ MapEntry()
+ : name(InvalidVirtualRegister)
+ , spillOrder(SpillHintInvalid)
+ , lockCount(0)
+ {
+ }
+
+ VirtualRegister name;
+ SpillHint spillOrder;
+ uint32_t lockCount;
+ };
+
+ // Holds the current status of all registers.
+ MapEntry m_data[NUM_REGS];
+ // Used to to implement a simple round-robin like allocation scheme.
+ uint32_t m_lastAllocated;
+};
+
+} } // namespace JSC::DFG
+
+#endif
+#endif
diff --git a/Source/JavaScriptCore/dfg/DFGRepatch.cpp b/Source/JavaScriptCore/dfg/DFGRepatch.cpp
new file mode 100644
index 000000000..ae4a44ffe
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGRepatch.cpp
@@ -0,0 +1,637 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "DFGRepatch.h"
+
+#if ENABLE(DFG_JIT)
+
+#include "DFGSpeculativeJIT.h"
+#include "LinkBuffer.h"
+#include "Operations.h"
+#include "RepatchBuffer.h"
+
+namespace JSC { namespace DFG {
+
+static void dfgRepatchCall(CodeBlock* codeblock, CodeLocationCall call, FunctionPtr newCalleeFunction)
+{
+ RepatchBuffer repatchBuffer(codeblock);
+ repatchBuffer.relink(call, newCalleeFunction);
+}
+
+static void dfgRepatchByIdSelfAccess(CodeBlock* codeBlock, StructureStubInfo& stubInfo, Structure* structure, size_t offset, const FunctionPtr &slowPathFunction, bool compact)
+{
+ RepatchBuffer repatchBuffer(codeBlock);
+
+ // Only optimize once!
+ repatchBuffer.relink(stubInfo.callReturnLocation, slowPathFunction);
+
+ // Patch the structure check & the offset of the load.
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelPtrAtOffset(-(intptr_t)stubInfo.deltaCheckImmToCall), structure);
+#if USE(JSVALUE64)
+ if (compact)
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.deltaCallToLoadOrStore), sizeof(JSValue) * offset);
+ else
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.deltaCallToLoadOrStore), sizeof(JSValue) * offset);
+#elif USE(JSVALUE32_64)
+ if (compact) {
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.deltaCallToTagLoadOrStore), sizeof(JSValue) * offset + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.deltaCallToPayloadLoadOrStore), sizeof(JSValue) * offset + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
+ } else {
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.deltaCallToTagLoadOrStore), sizeof(JSValue) * offset + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.deltaCallToPayloadLoadOrStore), sizeof(JSValue) * offset + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
+ }
+#endif
+}
+
+static void emitRestoreScratch(MacroAssembler& stubJit, bool needToRestoreScratch, GPRReg scratchGPR, MacroAssembler::Jump& success, MacroAssembler::Jump& fail, MacroAssembler::JumpList failureCases)
+{
+ if (needToRestoreScratch) {
+ stubJit.pop(scratchGPR);
+
+ success = stubJit.jump();
+
+ // link failure cases here, so we can pop scratchGPR, and then jump back.
+ failureCases.link(&stubJit);
+
+ stubJit.pop(scratchGPR);
+
+ fail = stubJit.jump();
+ return;
+ }
+
+ success = stubJit.jump();
+}
+
+static void linkRestoreScratch(LinkBuffer& patchBuffer, bool needToRestoreScratch, MacroAssembler::Jump success, MacroAssembler::Jump fail, MacroAssembler::JumpList failureCases, CodeLocationLabel successLabel, CodeLocationLabel slowCaseBegin)
+{
+ patchBuffer.link(success, successLabel);
+
+ if (needToRestoreScratch) {
+ patchBuffer.link(fail, slowCaseBegin);
+ return;
+ }
+
+ // link failure cases directly back to normal path
+ patchBuffer.link(failureCases, slowCaseBegin);
+}
+
+static void linkRestoreScratch(LinkBuffer& patchBuffer, bool needToRestoreScratch, StructureStubInfo& stubInfo, MacroAssembler::Jump success, MacroAssembler::Jump fail, MacroAssembler::JumpList failureCases)
+{
+ linkRestoreScratch(patchBuffer, needToRestoreScratch, success, fail, failureCases, stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToDone), stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToSlowCase));
+}
+
+static void generateProtoChainAccessStub(ExecState* exec, StructureStubInfo& stubInfo, StructureChain* chain, size_t count, size_t offset, Structure* structure, CodeLocationLabel successLabel, CodeLocationLabel slowCaseLabel, MacroAssemblerCodeRef& stubRoutine)
+{
+ JSGlobalData* globalData = &exec->globalData();
+
+ MacroAssembler stubJit;
+
+ GPRReg baseGPR = static_cast<GPRReg>(stubInfo.baseGPR);
+#if USE(JSVALUE32_64)
+ GPRReg resultTagGPR = static_cast<GPRReg>(stubInfo.valueTagGPR);
+#endif
+ GPRReg resultGPR = static_cast<GPRReg>(stubInfo.valueGPR);
+ GPRReg scratchGPR = static_cast<GPRReg>(stubInfo.scratchGPR);
+ bool needToRestoreScratch = false;
+
+ if (scratchGPR == InvalidGPRReg) {
+ scratchGPR = SpeculativeJIT::selectScratchGPR(baseGPR, resultGPR);
+ stubJit.push(scratchGPR);
+ needToRestoreScratch = true;
+ }
+
+ MacroAssembler::JumpList failureCases;
+
+ failureCases.append(stubJit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::structureOffset()), MacroAssembler::TrustedImmPtr(structure)));
+
+ Structure* currStructure = structure;
+ WriteBarrier<Structure>* it = chain->head();
+ JSObject* protoObject = 0;
+ for (unsigned i = 0; i < count; ++i, ++it) {
+ protoObject = asObject(currStructure->prototypeForLookup(exec));
+ stubJit.move(MacroAssembler::TrustedImmPtr(protoObject), scratchGPR);
+ failureCases.append(stubJit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(scratchGPR, JSCell::structureOffset()), MacroAssembler::TrustedImmPtr(protoObject->structure())));
+ currStructure = it->get();
+ }
+
+ stubJit.loadPtr(protoObject->addressOfPropertyStorage(), resultGPR);
+#if USE(JSVALUE64)
+ stubJit.loadPtr(MacroAssembler::Address(resultGPR, offset * sizeof(WriteBarrier<Unknown>)), resultGPR);
+#elif USE(JSVALUE32_64)
+ stubJit.load32(MacroAssembler::Address(resultGPR, offset * sizeof(WriteBarrier<Unknown>) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
+ stubJit.load32(MacroAssembler::Address(resultGPR, offset * sizeof(WriteBarrier<Unknown>) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultGPR);
+#endif
+
+ MacroAssembler::Jump success, fail;
+
+ emitRestoreScratch(stubJit, needToRestoreScratch, scratchGPR, success, fail, failureCases);
+
+ LinkBuffer patchBuffer(*globalData, &stubJit);
+
+ linkRestoreScratch(patchBuffer, needToRestoreScratch, success, fail, failureCases, successLabel, slowCaseLabel);
+
+ stubRoutine = patchBuffer.finalizeCode();
+}
+
+static bool tryCacheGetByID(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot, StructureStubInfo& stubInfo)
+{
+ // FIXME: Write a test that proves we need to check for recursion here just
+ // like the interpreter does, then add a check for recursion.
+
+ CodeBlock* codeBlock = exec->codeBlock();
+ JSGlobalData* globalData = &exec->globalData();
+
+ if (isJSArray(baseValue) && propertyName == exec->propertyNames().length) {
+ GPRReg baseGPR = static_cast<GPRReg>(stubInfo.baseGPR);
+#if USE(JSVALUE32_64)
+ GPRReg resultTagGPR = static_cast<GPRReg>(stubInfo.valueTagGPR);
+#endif
+ GPRReg resultGPR = static_cast<GPRReg>(stubInfo.valueGPR);
+ GPRReg scratchGPR = static_cast<GPRReg>(stubInfo.scratchGPR);
+ bool needToRestoreScratch = false;
+
+ MacroAssembler stubJit;
+
+ if (scratchGPR == InvalidGPRReg) {
+ scratchGPR = SpeculativeJIT::selectScratchGPR(baseGPR, resultGPR);
+ stubJit.push(scratchGPR);
+ needToRestoreScratch = true;
+ }
+
+ MacroAssembler::JumpList failureCases;
+
+ failureCases.append(stubJit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info)));
+
+ stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSArray::storageOffset()), scratchGPR);
+ stubJit.load32(MacroAssembler::Address(scratchGPR, OBJECT_OFFSETOF(ArrayStorage, m_length)), scratchGPR);
+ failureCases.append(stubJit.branch32(MacroAssembler::LessThan, scratchGPR, MacroAssembler::TrustedImm32(0)));
+
+#if USE(JSVALUE64)
+ stubJit.orPtr(GPRInfo::tagTypeNumberRegister, scratchGPR, resultGPR);
+#elif USE(JSVALUE32_64)
+ stubJit.move(scratchGPR, resultGPR);
+ stubJit.move(JITCompiler::TrustedImm32(0xffffffff), resultTagGPR); // JSValue::Int32Tag
+#endif
+
+ MacroAssembler::Jump success, fail;
+
+ emitRestoreScratch(stubJit, needToRestoreScratch, scratchGPR, success, fail, failureCases);
+
+ LinkBuffer patchBuffer(*globalData, &stubJit);
+
+ linkRestoreScratch(patchBuffer, needToRestoreScratch, stubInfo, success, fail, failureCases);
+
+ stubInfo.stubRoutine = patchBuffer.finalizeCode();
+
+ RepatchBuffer repatchBuffer(codeBlock);
+ repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.deltaCallToStructCheck), CodeLocationLabel(stubInfo.stubRoutine.code()));
+ repatchBuffer.relink(stubInfo.callReturnLocation, operationGetById);
+
+ return true;
+ }
+
+ // FIXME: should support length access for String.
+
+ // FIXME: Cache property access for immediates.
+ if (!baseValue.isCell())
+ return false;
+ JSCell* baseCell = baseValue.asCell();
+ Structure* structure = baseCell->structure();
+ if (!slot.isCacheable())
+ return false;
+ if (structure->isUncacheableDictionary() || structure->typeInfo().prohibitsPropertyCaching())
+ return false;
+
+ // Optimize self access.
+ if (slot.slotBase() == baseValue) {
+ if ((slot.cachedPropertyType() != PropertySlot::Value) || ((slot.cachedOffset() * sizeof(JSValue)) > (unsigned)MacroAssembler::MaximumCompactPtrAlignedAddressOffset))
+ return false;
+
+ dfgRepatchByIdSelfAccess(codeBlock, stubInfo, structure, slot.cachedOffset(), operationGetByIdBuildList, true);
+ stubInfo.initGetByIdSelf(*globalData, codeBlock->ownerExecutable(), structure);
+ return true;
+ }
+
+ if (structure->isDictionary())
+ return false;
+
+ // FIXME: optimize getters and setters
+ if (slot.cachedPropertyType() != PropertySlot::Value)
+ return false;
+
+ size_t offset = slot.cachedOffset();
+ size_t count = normalizePrototypeChain(exec, baseValue, slot.slotBase(), propertyName, offset);
+ if (!count)
+ return false;
+
+ StructureChain* prototypeChain = structure->prototypeChain(exec);
+
+ ASSERT(slot.slotBase().isObject());
+
+ generateProtoChainAccessStub(exec, stubInfo, prototypeChain, count, offset, structure, stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToDone), stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToSlowCase), stubInfo.stubRoutine);
+
+ RepatchBuffer repatchBuffer(codeBlock);
+ repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.deltaCallToStructCheck), CodeLocationLabel(stubInfo.stubRoutine.code()));
+ repatchBuffer.relink(stubInfo.callReturnLocation, operationGetByIdProtoBuildList);
+
+ stubInfo.initGetByIdChain(*globalData, codeBlock->ownerExecutable(), structure, prototypeChain);
+ return true;
+}
+
+void dfgRepatchGetByID(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot, StructureStubInfo& stubInfo)
+{
+ bool cached = tryCacheGetByID(exec, baseValue, propertyName, slot, stubInfo);
+ if (!cached)
+ dfgRepatchCall(exec->codeBlock(), stubInfo.callReturnLocation, operationGetById);
+}
+
+static bool tryBuildGetByIDList(ExecState* exec, JSValue baseValue, const Identifier&, const PropertySlot& slot, StructureStubInfo& stubInfo)
+{
+ if (!baseValue.isCell()
+ || !slot.isCacheable()
+ || baseValue.asCell()->structure()->isUncacheableDictionary()
+ || slot.slotBase() != baseValue
+ || slot.cachedPropertyType() != PropertySlot::Value
+ || (slot.cachedOffset() * sizeof(JSValue)) > (unsigned)MacroAssembler::MaximumCompactPtrAlignedAddressOffset)
+ return false;
+
+ CodeBlock* codeBlock = exec->codeBlock();
+ JSCell* baseCell = baseValue.asCell();
+ Structure* structure = baseCell->structure();
+ JSGlobalData* globalData = &exec->globalData();
+
+ ASSERT(slot.slotBase().isObject());
+
+ PolymorphicAccessStructureList* polymorphicStructureList;
+ int listIndex = 1;
+
+ if (stubInfo.accessType == access_get_by_id_self) {
+ ASSERT(!stubInfo.stubRoutine);
+ polymorphicStructureList = new PolymorphicAccessStructureList(*globalData, codeBlock->ownerExecutable(), MacroAssemblerCodeRef::createSelfManagedCodeRef(stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToSlowCase)), stubInfo.u.getByIdSelf.baseObjectStructure.get(), true);
+ stubInfo.initGetByIdSelfList(polymorphicStructureList, 1);
+ } else {
+ polymorphicStructureList = stubInfo.u.getByIdSelfList.structureList;
+ listIndex = stubInfo.u.getByIdSelfList.listSize;
+ }
+
+ if (listIndex < POLYMORPHIC_LIST_CACHE_SIZE) {
+ stubInfo.u.getByIdSelfList.listSize++;
+
+ GPRReg baseGPR = static_cast<GPRReg>(stubInfo.baseGPR);
+#if USE(JSVALUE32_64)
+ GPRReg resultTagGPR = static_cast<GPRReg>(stubInfo.valueTagGPR);
+#endif
+ GPRReg resultGPR = static_cast<GPRReg>(stubInfo.valueGPR);
+
+ MacroAssembler stubJit;
+
+ MacroAssembler::Jump wrongStruct = stubJit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::structureOffset()), MacroAssembler::TrustedImmPtr(structure));
+
+ stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::offsetOfPropertyStorage()), resultGPR);
+#if USE(JSVALUE64)
+ stubJit.loadPtr(MacroAssembler::Address(resultGPR, slot.cachedOffset() * sizeof(JSValue)), resultGPR);
+#elif USE(JSVALUE32_64)
+ stubJit.load32(MacroAssembler::Address(resultGPR, slot.cachedOffset() * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
+ stubJit.load32(MacroAssembler::Address(resultGPR, slot.cachedOffset() * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultGPR);
+#endif
+
+ MacroAssembler::Jump success = stubJit.jump();
+
+ LinkBuffer patchBuffer(*globalData, &stubJit);
+
+ CodeLocationLabel lastProtoBegin = CodeLocationLabel(polymorphicStructureList->list[listIndex - 1].stubRoutine.code());
+ ASSERT(!!lastProtoBegin);
+
+ patchBuffer.link(wrongStruct, lastProtoBegin);
+ patchBuffer.link(success, stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToDone));
+
+ MacroAssemblerCodeRef stubRoutine = patchBuffer.finalizeCode();
+
+ polymorphicStructureList->list[listIndex].set(*globalData, codeBlock->ownerExecutable(), stubRoutine, structure, true);
+
+ CodeLocationJump jumpLocation = stubInfo.callReturnLocation.jumpAtOffset(stubInfo.deltaCallToStructCheck);
+ RepatchBuffer repatchBuffer(codeBlock);
+ repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code()));
+
+ if (listIndex < (POLYMORPHIC_LIST_CACHE_SIZE - 1))
+ return true;
+ }
+
+ return false;
+}
+
+void dfgBuildGetByIDList(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot, StructureStubInfo& stubInfo)
+{
+ bool dontChangeCall = tryBuildGetByIDList(exec, baseValue, propertyName, slot, stubInfo);
+ if (!dontChangeCall)
+ dfgRepatchCall(exec->codeBlock(), stubInfo.callReturnLocation, operationGetById);
+}
+
+static bool tryBuildGetByIDProtoList(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot, StructureStubInfo& stubInfo)
+{
+ if (!baseValue.isCell()
+ || !slot.isCacheable()
+ || baseValue.asCell()->structure()->isDictionary()
+ || baseValue.asCell()->structure()->typeInfo().prohibitsPropertyCaching()
+ || slot.slotBase() == baseValue
+ || slot.cachedPropertyType() != PropertySlot::Value)
+ return false;
+
+ ASSERT(slot.slotBase().isObject());
+
+ size_t offset = slot.cachedOffset();
+ size_t count = normalizePrototypeChain(exec, baseValue, slot.slotBase(), propertyName, offset);
+ if (!count)
+ return false;
+
+ Structure* structure = baseValue.asCell()->structure();
+ StructureChain* prototypeChain = structure->prototypeChain(exec);
+ CodeBlock* codeBlock = exec->codeBlock();
+ JSGlobalData* globalData = &exec->globalData();
+
+ PolymorphicAccessStructureList* polymorphicStructureList;
+ int listIndex = 1;
+
+ if (stubInfo.accessType == access_get_by_id_chain) {
+ ASSERT(!!stubInfo.stubRoutine);
+ polymorphicStructureList = new PolymorphicAccessStructureList(*globalData, codeBlock->ownerExecutable(), stubInfo.stubRoutine, stubInfo.u.getByIdChain.baseObjectStructure.get(), stubInfo.u.getByIdChain.chain.get(), true);
+ stubInfo.stubRoutine = MacroAssemblerCodeRef();
+ stubInfo.initGetByIdProtoList(polymorphicStructureList, 1);
+ } else {
+ ASSERT(stubInfo.accessType == access_get_by_id_proto_list);
+ polymorphicStructureList = stubInfo.u.getByIdProtoList.structureList;
+ listIndex = stubInfo.u.getByIdProtoList.listSize;
+ }
+
+ if (listIndex < POLYMORPHIC_LIST_CACHE_SIZE) {
+ stubInfo.u.getByIdProtoList.listSize++;
+
+ CodeLocationLabel lastProtoBegin = CodeLocationLabel(polymorphicStructureList->list[listIndex - 1].stubRoutine.code());
+ ASSERT(!!lastProtoBegin);
+
+ MacroAssemblerCodeRef stubRoutine;
+
+ generateProtoChainAccessStub(exec, stubInfo, prototypeChain, count, offset, structure, stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToDone), lastProtoBegin, stubRoutine);
+
+ polymorphicStructureList->list[listIndex].set(*globalData, codeBlock->ownerExecutable(), stubRoutine, structure, true);
+
+ CodeLocationJump jumpLocation = stubInfo.callReturnLocation.jumpAtOffset(stubInfo.deltaCallToStructCheck);
+ RepatchBuffer repatchBuffer(codeBlock);
+ repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code()));
+
+ if (listIndex < (POLYMORPHIC_LIST_CACHE_SIZE - 1))
+ return true;
+ }
+
+ return false;
+}
+
+void dfgBuildGetByIDProtoList(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot, StructureStubInfo& stubInfo)
+{
+ bool dontChangeCall = tryBuildGetByIDProtoList(exec, baseValue, propertyName, slot, stubInfo);
+ if (!dontChangeCall)
+ dfgRepatchCall(exec->codeBlock(), stubInfo.callReturnLocation, operationGetById);
+}
+
+static V_DFGOperation_EJCI appropriatePutByIdFunction(const PutPropertySlot &slot, PutKind putKind)
+{
+ if (slot.isStrictMode()) {
+ if (putKind == Direct)
+ return operationPutByIdDirectStrict;
+ return operationPutByIdStrict;
+ }
+ if (putKind == Direct)
+ return operationPutByIdDirectNonStrict;
+ return operationPutByIdNonStrict;
+}
+
+static void testPrototype(MacroAssembler &stubJit, GPRReg scratchGPR, JSValue prototype, MacroAssembler::JumpList& failureCases)
+{
+ if (prototype.isNull())
+ return;
+
+ ASSERT(prototype.isCell());
+
+ stubJit.move(MacroAssembler::TrustedImmPtr(prototype.asCell()), scratchGPR);
+ failureCases.append(stubJit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(scratchGPR, JSCell::structureOffset()), MacroAssembler::TrustedImmPtr(prototype.asCell()->structure())));
+}
+
+static bool tryCachePutByID(ExecState* exec, JSValue baseValue, const Identifier&, const PutPropertySlot& slot, StructureStubInfo& stubInfo, PutKind putKind)
+{
+ CodeBlock* codeBlock = exec->codeBlock();
+ JSGlobalData* globalData = &exec->globalData();
+
+ if (!baseValue.isCell())
+ return false;
+ JSCell* baseCell = baseValue.asCell();
+ Structure* structure = baseCell->structure();
+ Structure* oldStructure = structure->previousID();
+
+ if (!slot.isCacheable())
+ return false;
+ if (structure->isUncacheableDictionary())
+ return false;
+
+ // Optimize self access.
+ if (slot.base() == baseValue) {
+ if (slot.type() == PutPropertySlot::NewProperty) {
+ if (structure->isDictionary())
+ return false;
+
+ // skip optimizing the case where we need a realloc
+ if (oldStructure->propertyStorageCapacity() != structure->propertyStorageCapacity())
+ return false;
+
+ normalizePrototypeChain(exec, baseCell);
+
+ StructureChain* prototypeChain = structure->prototypeChain(exec);
+
+ GPRReg baseGPR = static_cast<GPRReg>(stubInfo.baseGPR);
+#if USE(JSVALUE32_64)
+ GPRReg valueTagGPR = static_cast<GPRReg>(stubInfo.valueTagGPR);
+#endif
+ GPRReg valueGPR = static_cast<GPRReg>(stubInfo.valueGPR);
+ GPRReg scratchGPR = static_cast<GPRReg>(stubInfo.scratchGPR);
+ bool needToRestoreScratch = false;
+
+ ASSERT(scratchGPR != baseGPR);
+
+ MacroAssembler stubJit;
+
+ MacroAssembler::JumpList failureCases;
+
+ if (scratchGPR == InvalidGPRReg) {
+ scratchGPR = SpeculativeJIT::selectScratchGPR(baseGPR, valueGPR);
+ stubJit.push(scratchGPR);
+ needToRestoreScratch = true;
+ }
+
+ failureCases.append(stubJit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::structureOffset()), MacroAssembler::TrustedImmPtr(oldStructure)));
+
+ testPrototype(stubJit, scratchGPR, oldStructure->storedPrototype(), failureCases);
+
+ if (putKind == NotDirect) {
+ for (WriteBarrier<Structure>* it = prototypeChain->head(); *it; ++it)
+ testPrototype(stubJit, scratchGPR, (*it)->storedPrototype(), failureCases);
+ }
+
+#if ENABLE(GGC) || ENABLE(WRITE_BARRIER_PROFILING)
+ // Must always emit this write barrier as the structure transition itself requires it
+ GPRReg scratch2 = SpeculativeJIT::selectScratchGPR(baseGPR, valueGPR, scratchGPR);
+ stubJit.push(scratch2);
+ SpeculativeJIT::writeBarrier(stubJit, baseGPR, scratchGPR, scratch2, WriteBarrierForPropertyAccess);
+ stubJit.pop(scratch2);
+#endif
+
+ stubJit.storePtr(MacroAssembler::TrustedImmPtr(structure), MacroAssembler::Address(baseGPR, JSCell::structureOffset()));
+#if USE(JSVALUE64)
+ if (structure->isUsingInlineStorage())
+ stubJit.storePtr(valueGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + slot.cachedOffset() * sizeof(JSValue)));
+ else {
+ stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::offsetOfPropertyStorage()), scratchGPR);
+ stubJit.storePtr(valueGPR, MacroAssembler::Address(scratchGPR, slot.cachedOffset() * sizeof(JSValue)));
+ }
+#elif USE(JSVALUE32_64)
+ if (structure->isUsingInlineStorage()) {
+ stubJit.store32(valueGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + slot.cachedOffset() * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
+ stubJit.store32(valueTagGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + slot.cachedOffset() * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
+ } else {
+ stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::offsetOfPropertyStorage()), scratchGPR);
+ stubJit.store32(valueGPR, MacroAssembler::Address(scratchGPR, slot.cachedOffset() * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
+ stubJit.store32(valueTagGPR, MacroAssembler::Address(scratchGPR, slot.cachedOffset() * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
+ }
+#endif
+
+ MacroAssembler::Jump success;
+ MacroAssembler::Jump failure;
+
+ if (needToRestoreScratch) {
+ stubJit.pop(scratchGPR);
+ success = stubJit.jump();
+
+ failureCases.link(&stubJit);
+ stubJit.pop(scratchGPR);
+ failure = stubJit.jump();
+ } else
+ success = stubJit.jump();
+
+ LinkBuffer patchBuffer(*globalData, &stubJit);
+ patchBuffer.link(success, stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToDone));
+ if (needToRestoreScratch)
+ patchBuffer.link(failure, stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToSlowCase));
+ else
+ patchBuffer.link(failureCases, stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToSlowCase));
+
+ stubInfo.stubRoutine = patchBuffer.finalizeCode();
+
+ RepatchBuffer repatchBuffer(codeBlock);
+ repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.deltaCallToStructCheck), CodeLocationLabel(stubInfo.stubRoutine.code()));
+ repatchBuffer.relink(stubInfo.callReturnLocation, appropriatePutByIdFunction(slot, putKind));
+
+ stubInfo.initPutByIdTransition(*globalData, codeBlock->ownerExecutable(), oldStructure, structure, prototypeChain, putKind == Direct);
+
+ return true;
+ }
+
+ dfgRepatchByIdSelfAccess(codeBlock, stubInfo, structure, slot.cachedOffset(), appropriatePutByIdFunction(slot, putKind), false);
+ stubInfo.initPutByIdReplace(*globalData, codeBlock->ownerExecutable(), structure);
+ return true;
+ }
+
+ // FIXME: should support the transition case!
+ return false;
+}
+
+void dfgRepatchPutByID(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PutPropertySlot& slot, StructureStubInfo& stubInfo, PutKind putKind)
+{
+ bool cached = tryCachePutByID(exec, baseValue, propertyName, slot, stubInfo, putKind);
+ if (!cached)
+ dfgRepatchCall(exec->codeBlock(), stubInfo.callReturnLocation, appropriatePutByIdFunction(slot, putKind));
+}
+
+void dfgLinkFor(ExecState* exec, CallLinkInfo& callLinkInfo, CodeBlock* calleeCodeBlock, JSFunction* callee, MacroAssemblerCodePtr codePtr, CodeSpecializationKind kind)
+{
+ CodeBlock* callerCodeBlock = exec->callerFrame()->codeBlock();
+
+ RepatchBuffer repatchBuffer(callerCodeBlock);
+
+ ASSERT(!callLinkInfo.isLinked());
+ callLinkInfo.callee.set(exec->callerFrame()->globalData(), callLinkInfo.hotPathBegin, callerCodeBlock->ownerExecutable(), callee);
+ callLinkInfo.lastSeenCallee.set(exec->callerFrame()->globalData(), callerCodeBlock->ownerExecutable(), callee);
+ repatchBuffer.relink(callLinkInfo.hotPathOther, codePtr);
+
+ if (calleeCodeBlock)
+ calleeCodeBlock->linkIncomingCall(&callLinkInfo);
+
+ if (kind == CodeForCall) {
+ repatchBuffer.relink(CodeLocationCall(callLinkInfo.callReturnLocation), operationVirtualCall);
+ return;
+ }
+ ASSERT(kind == CodeForConstruct);
+ repatchBuffer.relink(CodeLocationCall(callLinkInfo.callReturnLocation), operationVirtualConstruct);
+}
+
+void dfgResetGetByID(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo)
+{
+ repatchBuffer.relink(stubInfo.callReturnLocation, operationGetByIdOptimize);
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelPtrAtOffset(-(uintptr_t)stubInfo.deltaCheckImmToCall), reinterpret_cast<void*>(-1));
+#if USE(JSVALUE64)
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.deltaCallToLoadOrStore), 0);
+#else
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.deltaCallToTagLoadOrStore), 0);
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.deltaCallToPayloadLoadOrStore), 0);
+#endif
+ repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.deltaCallToStructCheck), stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToSlowCase));
+}
+
+void dfgResetPutByID(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo)
+{
+ V_DFGOperation_EJCI unoptimizedFunction = bitwise_cast<V_DFGOperation_EJCI>(MacroAssembler::readCallTarget(stubInfo.callReturnLocation).executableAddress());
+ V_DFGOperation_EJCI optimizedFunction;
+ if (unoptimizedFunction == operationPutByIdStrict)
+ optimizedFunction = operationPutByIdStrictOptimize;
+ else if (unoptimizedFunction == operationPutByIdNonStrict)
+ optimizedFunction = operationPutByIdNonStrictOptimize;
+ else if (unoptimizedFunction == operationPutByIdDirectStrict)
+ optimizedFunction = operationPutByIdDirectStrictOptimize;
+ else {
+ ASSERT(unoptimizedFunction == operationPutByIdDirectNonStrict);
+ optimizedFunction = operationPutByIdDirectNonStrictOptimize;
+ }
+ repatchBuffer.relink(stubInfo.callReturnLocation, optimizedFunction);
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelPtrAtOffset(-(uintptr_t)stubInfo.deltaCheckImmToCall), reinterpret_cast<void*>(-1));
+#if USE(JSVALUE64)
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.deltaCallToLoadOrStore), 0);
+#else
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.deltaCallToTagLoadOrStore), 0);
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.deltaCallToPayloadLoadOrStore), 0);
+#endif
+ repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.deltaCallToStructCheck), stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToSlowCase));
+}
+
+} } // namespace JSC::DFG
+
+#endif
diff --git a/Source/JavaScriptCore/dfg/DFGRepatch.h b/Source/JavaScriptCore/dfg/DFGRepatch.h
new file mode 100644
index 000000000..f146128fb
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGRepatch.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGRepatch_h
+#define DFGRepatch_h
+
+#if ENABLE(DFG_JIT)
+
+#include <dfg/DFGJITCompiler.h>
+#include <dfg/DFGOperations.h>
+
+namespace JSC { namespace DFG {
+
+void dfgRepatchGetByID(ExecState*, JSValue, const Identifier&, const PropertySlot&, StructureStubInfo&);
+void dfgBuildGetByIDList(ExecState*, JSValue, const Identifier&, const PropertySlot&, StructureStubInfo&);
+void dfgBuildGetByIDProtoList(ExecState*, JSValue, const Identifier&, const PropertySlot&, StructureStubInfo&);
+void dfgRepatchPutByID(ExecState*, JSValue, const Identifier&, const PutPropertySlot&, StructureStubInfo&, PutKind);
+void dfgLinkFor(ExecState*, CallLinkInfo&, CodeBlock*, JSFunction* callee, MacroAssemblerCodePtr, CodeSpecializationKind);
+void dfgResetGetByID(RepatchBuffer&, StructureStubInfo&);
+void dfgResetPutByID(RepatchBuffer&, StructureStubInfo&);
+
+} } // namespace JSC::DFG
+
+#else // ENABLE(DFG_JIT)
+
+#include <wtf/Assertions.h>
+
+namespace JSC {
+
+class RepatchBuffer;
+struct StructureStubInfo;
+
+namespace DFG {
+
+inline NO_RETURN_DUE_TO_ASSERT void dfgResetGetByID(RepatchBuffer&, StructureStubInfo&) { ASSERT_NOT_REACHED(); }
+inline NO_RETURN_DUE_TO_ASSERT void dfgResetPutByID(RepatchBuffer&, StructureStubInfo&) { ASSERT_NOT_REACHED(); }
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
+#endif // DFGRepatch_h
diff --git a/Source/JavaScriptCore/dfg/DFGScoreBoard.h b/Source/JavaScriptCore/dfg/DFGScoreBoard.h
new file mode 100644
index 000000000..7f9211a26
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGScoreBoard.h
@@ -0,0 +1,164 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGScoreBoard_h
+#define DFGScoreBoard_h
+
+#if ENABLE(DFG_JIT)
+
+#include <dfg/DFGGraph.h>
+#include <wtf/BitVector.h>
+#include <wtf/Vector.h>
+
+namespace JSC { namespace DFG {
+
+// === ScoreBoard ===
+//
+// This class is used in performing a virtual register allocation over the graph.
+// VirtualRegisters are allocated to nodes, with a used count for each virtual
+// register tracking the lifespan of the value; after the final use of a node
+// the VirtualRegister associated is freed such that it can be reused for
+// another node.
+class ScoreBoard {
+public:
+ ScoreBoard(Graph& graph, const BitVector& usedVars)
+ : m_graph(graph)
+ , m_highWatermark(0)
+ {
+ m_used.fill(0, usedVars.size());
+ m_free.reserveCapacity(usedVars.size());
+ for (size_t i = usedVars.size(); i-- > 0;) {
+ if (usedVars.get(i)) {
+ m_used[i] = max(); // This is mostly for debugging and sanity.
+ m_highWatermark = std::max(m_highWatermark, static_cast<unsigned>(i) + 1);
+ } else
+ m_free.append(i);
+ }
+ }
+
+#if DFG_ENABLE(CONSISTENCY_CHECK)
+ ~ScoreBoard()
+ {
+ // For every entry in the used list the use count of the virtual register should be zero.
+ for (size_t i = 0; i < m_free.size(); ++i)
+ ASSERT(!m_used[i] || m_used[i] == max());
+ }
+#endif
+
+ VirtualRegister allocate()
+ {
+ // Do we have any VirtualRegsiters in the free list, that were used by
+ // prior nodes, but are now available?
+ if (!m_free.isEmpty()) {
+ uint32_t index = m_free.last();
+ m_free.removeLast();
+ // Use count must have hit zero for it to have been added to the free list!
+ ASSERT(!m_used[index]);
+ m_highWatermark = std::max(m_highWatermark, static_cast<unsigned>(index) + 1);
+ return (VirtualRegister)index;
+ }
+
+ // Allocate a new VirtualRegister, and add a corresponding entry to m_used.
+ size_t next = m_used.size();
+ m_used.append(0);
+ m_highWatermark = std::max(m_highWatermark, static_cast<unsigned>(next) + 1);
+ return (VirtualRegister)next;
+ }
+
+ // Increment the usecount for the VirtualRegsiter associated with 'child',
+ // if it reaches the node's refcount, free the VirtualRegsiter.
+ void use(NodeIndex child)
+ {
+ if (child == NoNode)
+ return;
+
+ // Find the virtual register number for this child, increment its use count.
+ Node& node = m_graph[child];
+ uint32_t index = node.virtualRegister();
+ ASSERT(m_used[index] != max());
+ if (node.refCount() == ++m_used[index]) {
+ // If the use count in the scoreboard reaches the use count for the node,
+ // then this was its last use; the virtual register is now free.
+ // Clear the use count & add to the free list.
+ m_used[index] = 0;
+ m_free.append(index);
+ }
+ }
+
+ unsigned highWatermark()
+ {
+ return m_highWatermark;
+ }
+
+#ifndef NDEBUG
+ void dump()
+ {
+ printf(" USED: [ ");
+ for (unsigned i = 0; i < m_used.size(); ++i) {
+ if (!m_free.contains(i)) {
+ printf("%d:", i);
+ if (m_used[i] == max())
+ printf("local ");
+ else
+ printf("%d ", m_used[i]);
+ }
+ }
+ printf("]\n");
+
+ printf(" FREE: [ ");
+ for (unsigned i = 0; i < m_used.size(); ++i) {
+ if (m_free.contains(i) && m_used[i] != max()) {
+ ASSERT(!m_used[i]);
+ printf("%d ", i);
+ }
+ }
+ printf("]\n");
+ }
+
+#endif
+
+private:
+ static uint32_t max() { return std::numeric_limits<uint32_t>::max(); }
+
+ // The graph, so we can get refCounts for nodes, to determine when values are dead.
+ Graph& m_graph;
+
+ // The size of the span of virtual registers that this code block will use.
+ unsigned m_highWatermark;
+
+ // For every virtual register that has been allocated (either currently alive, or in
+ // the free list), we keep a count of the number of remaining uses until it is dead
+ // (0, in the case of entries in the free list). Since there is an entry for every
+ // allocated VirtualRegister, the length of this array conveniently provides the
+ // next available VirtualRegister number.
+ Vector<uint32_t, 64> m_used;
+ // A free list of VirtualRegsiters no longer alive.
+ Vector<uint32_t, 64> m_free;
+};
+
+} } // namespace JSC::DFG
+
+#endif
+#endif
diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
new file mode 100644
index 000000000..939fef669
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
@@ -0,0 +1,2484 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "DFGSpeculativeJIT.h"
+
+#if ENABLE(DFG_JIT)
+
+#include "JSByteArray.h"
+#include "LinkBuffer.h"
+
+namespace JSC { namespace DFG {
+
+// On Windows we need to wrap fmod; on other platforms we can call it directly.
+// On ARMv7 we assert that all function pointers have to low bit set (point to thumb code).
+#if CALLING_CONVENTION_IS_STDCALL || CPU(ARM_THUMB2)
+static double DFG_OPERATION fmodAsDFGOperation(double x, double y)
+{
+ return fmod(x, y);
+}
+#else
+#define fmodAsDFGOperation fmod
+#endif
+
+void SpeculativeJIT::clearGenerationInfo()
+{
+ for (unsigned i = 0; i < m_generationInfo.size(); ++i)
+ m_generationInfo[i] = GenerationInfo();
+ m_gprs = RegisterBank<GPRInfo>();
+ m_fprs = RegisterBank<FPRInfo>();
+}
+
+GPRReg SpeculativeJIT::fillStorage(NodeIndex nodeIndex)
+{
+ Node& node = m_jit.graph()[nodeIndex];
+ VirtualRegister virtualRegister = node.virtualRegister();
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+
+ switch (info.registerFormat()) {
+ case DataFormatNone: {
+ GPRReg gpr = allocate();
+ ASSERT(info.spillFormat() == DataFormatStorage);
+ m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
+ m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
+ info.fillStorage(gpr);
+ return gpr;
+ }
+
+ case DataFormatStorage: {
+ GPRReg gpr = info.gpr();
+ m_gprs.lock(gpr);
+ return gpr;
+ }
+
+ default:
+ ASSERT_NOT_REACHED();
+ }
+
+ return InvalidGPRReg;
+}
+
+void SpeculativeJIT::useChildren(Node& node)
+{
+ if (node.op & NodeHasVarArgs) {
+ for (unsigned childIdx = node.firstChild(); childIdx < node.firstChild() + node.numChildren(); childIdx++)
+ use(m_jit.graph().m_varArgChildren[childIdx]);
+ } else {
+ NodeIndex child1 = node.child1();
+ if (child1 == NoNode) {
+ ASSERT(node.child2() == NoNode && node.child3() == NoNode);
+ return;
+ }
+ use(child1);
+
+ NodeIndex child2 = node.child2();
+ if (child2 == NoNode) {
+ ASSERT(node.child3() == NoNode);
+ return;
+ }
+ use(child2);
+
+ NodeIndex child3 = node.child3();
+ if (child3 == NoNode)
+ return;
+ use(child3);
+ }
+}
+
+bool SpeculativeJIT::isStrictInt32(NodeIndex nodeIndex)
+{
+ if (isInt32Constant(nodeIndex))
+ return true;
+
+ Node& node = m_jit.graph()[nodeIndex];
+ GenerationInfo& info = m_generationInfo[node.virtualRegister()];
+
+ return info.registerFormat() == DataFormatInteger;
+}
+
+bool SpeculativeJIT::isKnownInteger(NodeIndex nodeIndex)
+{
+ if (isInt32Constant(nodeIndex))
+ return true;
+
+ Node& node = m_jit.graph()[nodeIndex];
+
+ if (node.hasInt32Result())
+ return true;
+
+ GenerationInfo& info = m_generationInfo[node.virtualRegister()];
+
+ return info.isJSInteger();
+}
+
+bool SpeculativeJIT::isKnownNumeric(NodeIndex nodeIndex)
+{
+ if (isInt32Constant(nodeIndex) || isNumberConstant(nodeIndex))
+ return true;
+
+ Node& node = m_jit.graph()[nodeIndex];
+
+ if (node.hasNumberResult())
+ return true;
+
+ GenerationInfo& info = m_generationInfo[node.virtualRegister()];
+
+ return info.isJSInteger() || info.isJSDouble();
+}
+
+bool SpeculativeJIT::isKnownCell(NodeIndex nodeIndex)
+{
+ return m_generationInfo[m_jit.graph()[nodeIndex].virtualRegister()].isJSCell();
+}
+
+bool SpeculativeJIT::isKnownNotCell(NodeIndex nodeIndex)
+{
+ Node& node = m_jit.graph()[nodeIndex];
+ VirtualRegister virtualRegister = node.virtualRegister();
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+ if (node.hasConstant() && !valueOfJSConstant(nodeIndex).isCell())
+ return true;
+ return !(info.isJSCell() || info.isUnknownJS());
+}
+
+bool SpeculativeJIT::isKnownNotInteger(NodeIndex nodeIndex)
+{
+ Node& node = m_jit.graph()[nodeIndex];
+ VirtualRegister virtualRegister = node.virtualRegister();
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+
+ return info.isJSDouble() || info.isJSCell() || info.isJSBoolean()
+ || (node.hasConstant() && !valueOfJSConstant(nodeIndex).isInt32());
+}
+
+bool SpeculativeJIT::isKnownNotNumber(NodeIndex nodeIndex)
+{
+ Node& node = m_jit.graph()[nodeIndex];
+ VirtualRegister virtualRegister = node.virtualRegister();
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+
+ return (!info.isJSDouble() && !info.isJSInteger() && !info.isUnknownJS())
+ || (node.hasConstant() && !isNumberConstant(nodeIndex));
+}
+
+bool SpeculativeJIT::isKnownBoolean(NodeIndex nodeIndex)
+{
+ Node& node = m_jit.graph()[nodeIndex];
+ if (node.hasBooleanResult())
+ return true;
+
+ if (isBooleanConstant(nodeIndex))
+ return true;
+
+ VirtualRegister virtualRegister = node.virtualRegister();
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+
+ return info.isJSBoolean();
+}
+
+bool SpeculativeJIT::isKnownNotBoolean(NodeIndex nodeIndex)
+{
+ Node& node = m_jit.graph()[nodeIndex];
+ VirtualRegister virtualRegister = node.virtualRegister();
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+ if (node.hasConstant() && !valueOfJSConstant(nodeIndex).isBoolean())
+ return true;
+ return !(info.isJSBoolean() || info.isUnknownJS());
+}
+
+void SpeculativeJIT::writeBarrier(MacroAssembler& jit, GPRReg owner, GPRReg scratch1, GPRReg scratch2, WriteBarrierUseKind useKind)
+{
+ UNUSED_PARAM(jit);
+ UNUSED_PARAM(owner);
+ UNUSED_PARAM(scratch1);
+ UNUSED_PARAM(scratch2);
+ UNUSED_PARAM(useKind);
+ ASSERT(owner != scratch1);
+ ASSERT(owner != scratch2);
+ ASSERT(scratch1 != scratch2);
+
+#if ENABLE(WRITE_BARRIER_PROFILING)
+ JITCompiler::emitCount(jit, WriteBarrierCounters::jitCounterFor(useKind));
+#endif
+ markCellCard(jit, owner, scratch1, scratch2);
+}
+
+void SpeculativeJIT::markCellCard(MacroAssembler& jit, GPRReg owner, GPRReg scratch1, GPRReg scratch2)
+{
+ UNUSED_PARAM(jit);
+ UNUSED_PARAM(owner);
+ UNUSED_PARAM(scratch1);
+ UNUSED_PARAM(scratch2);
+
+#if ENABLE(GGC)
+ jit.move(owner, scratch1);
+ jit.andPtr(TrustedImm32(static_cast<int32_t>(MarkedBlock::blockMask)), scratch1);
+ jit.move(owner, scratch2);
+ // consume additional 8 bits as we're using an approximate filter
+ jit.rshift32(TrustedImm32(MarkedBlock::atomShift + 8), scratch2);
+ jit.andPtr(TrustedImm32(MarkedBlock::atomMask >> 8), scratch2);
+ MacroAssembler::Jump filter = jit.branchTest8(MacroAssembler::Zero, MacroAssembler::BaseIndex(scratch1, scratch2, MacroAssembler::TimesOne, MarkedBlock::offsetOfMarks()));
+ jit.move(owner, scratch2);
+ jit.rshift32(TrustedImm32(MarkedBlock::cardShift), scratch2);
+ jit.andPtr(TrustedImm32(MarkedBlock::cardMask), scratch2);
+ jit.store8(TrustedImm32(1), MacroAssembler::BaseIndex(scratch1, scratch2, MacroAssembler::TimesOne, MarkedBlock::offsetOfCards()));
+ filter.link(&jit);
+#endif
+}
+
+void SpeculativeJIT::writeBarrier(GPRReg ownerGPR, GPRReg valueGPR, NodeIndex valueIndex, WriteBarrierUseKind useKind, GPRReg scratch1, GPRReg scratch2)
+{
+ UNUSED_PARAM(ownerGPR);
+ UNUSED_PARAM(valueGPR);
+ UNUSED_PARAM(scratch1);
+ UNUSED_PARAM(scratch2);
+ UNUSED_PARAM(useKind);
+
+ if (isKnownNotCell(valueIndex))
+ return;
+
+#if ENABLE(WRITE_BARRIER_PROFILING)
+ JITCompiler::emitCount(m_jit, WriteBarrierCounters::jitCounterFor(useKind));
+#endif
+
+#if ENABLE(GGC)
+ GPRTemporary temp1;
+ GPRTemporary temp2;
+ if (scratch1 == InvalidGPRReg) {
+ GPRTemporary scratchGPR(this);
+ temp1.adopt(scratchGPR);
+ scratch1 = temp1.gpr();
+ }
+ if (scratch2 == InvalidGPRReg) {
+ GPRTemporary scratchGPR(this);
+ temp2.adopt(scratchGPR);
+ scratch2 = temp2.gpr();
+ }
+
+ JITCompiler::Jump rhsNotCell;
+ bool hadCellCheck = false;
+ if (!isKnownCell(valueIndex) && !isCellPrediction(m_jit.getPrediction(valueIndex))) {
+ hadCellCheck = true;
+ rhsNotCell = m_jit.branchIfNotCell(valueGPR);
+ }
+
+ markCellCard(m_jit, ownerGPR, scratch1, scratch2);
+
+ if (hadCellCheck)
+ rhsNotCell.link(&m_jit);
+#endif
+}
+
+void SpeculativeJIT::writeBarrier(GPRReg ownerGPR, JSCell* value, WriteBarrierUseKind useKind, GPRReg scratch1, GPRReg scratch2)
+{
+ UNUSED_PARAM(ownerGPR);
+ UNUSED_PARAM(value);
+ UNUSED_PARAM(scratch1);
+ UNUSED_PARAM(scratch2);
+ UNUSED_PARAM(useKind);
+
+ if (Heap::isMarked(value))
+ return;
+
+#if ENABLE(WRITE_BARRIER_PROFILING)
+ JITCompiler::emitCount(m_jit, WriteBarrierCounters::jitCounterFor(useKind));
+#endif
+
+#if ENABLE(GGC)
+ GPRTemporary temp1;
+ GPRTemporary temp2;
+ if (scratch1 == InvalidGPRReg) {
+ GPRTemporary scratchGPR(this);
+ temp1.adopt(scratchGPR);
+ scratch1 = temp1.gpr();
+ }
+ if (scratch2 == InvalidGPRReg) {
+ GPRTemporary scratchGPR(this);
+ temp2.adopt(scratchGPR);
+ scratch2 = temp2.gpr();
+ }
+
+ markCellCard(m_jit, ownerGPR, scratch1, scratch2);
+#endif
+}
+
+void SpeculativeJIT::writeBarrier(JSCell* owner, GPRReg valueGPR, NodeIndex valueIndex, WriteBarrierUseKind useKind, GPRReg scratch)
+{
+ UNUSED_PARAM(owner);
+ UNUSED_PARAM(valueGPR);
+ UNUSED_PARAM(scratch);
+ UNUSED_PARAM(useKind);
+
+ if (isKnownNotCell(valueIndex))
+ return;
+
+#if ENABLE(WRITE_BARRIER_PROFILING)
+ JITCompiler::emitCount(m_jit, WriteBarrierCounters::jitCounterFor(useKind));
+#endif
+
+#if ENABLE(GGC)
+ JITCompiler::Jump rhsNotCell;
+ bool hadCellCheck = false;
+ if (!isKnownCell(valueIndex) && !isCellPrediction(m_jit.getPrediction(valueIndex))) {
+ hadCellCheck = true;
+ rhsNotCell = m_jit.branchIfNotCell(valueGPR);
+ }
+
+ GPRTemporary temp;
+ if (scratch == InvalidGPRReg) {
+ GPRTemporary scratchGPR(this);
+ temp.adopt(scratchGPR);
+ scratch = temp.gpr();
+ }
+
+ uint8_t* cardAddress = Heap::addressOfCardFor(owner);
+ m_jit.move(JITCompiler::TrustedImmPtr(cardAddress), scratch);
+ m_jit.store8(JITCompiler::TrustedImm32(1), JITCompiler::Address(scratch));
+
+ if (hadCellCheck)
+ rhsNotCell.link(&m_jit);
+#endif
+}
+
+bool SpeculativeJIT::nonSpeculativeCompare(Node& node, MacroAssembler::RelationalCondition cond, S_DFGOperation_EJJ helperFunction)
+{
+ NodeIndex branchNodeIndex = detectPeepHoleBranch();
+ if (branchNodeIndex != NoNode) {
+ ASSERT(node.adjustedRefCount() == 1);
+
+ nonSpeculativePeepholeBranch(node, branchNodeIndex, cond, helperFunction);
+
+ m_compileIndex = branchNodeIndex;
+
+ return true;
+ }
+
+ nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
+
+ return false;
+}
+
+bool SpeculativeJIT::nonSpeculativeStrictEq(Node& node, bool invert)
+{
+ if (!invert && (isKnownNumeric(node.child1()) || isKnownNumeric(node.child2())))
+ return nonSpeculativeCompare(node, MacroAssembler::Equal, operationCompareStrictEq);
+
+ NodeIndex branchNodeIndex = detectPeepHoleBranch();
+ if (branchNodeIndex != NoNode) {
+ ASSERT(node.adjustedRefCount() == 1);
+
+ nonSpeculativePeepholeStrictEq(node, branchNodeIndex, invert);
+
+ m_compileIndex = branchNodeIndex;
+
+ return true;
+ }
+
+ nonSpeculativeNonPeepholeStrictEq(node, invert);
+
+ return false;
+}
+
+#ifndef NDEBUG
+static const char* dataFormatString(DataFormat format)
+{
+ // These values correspond to the DataFormat enum.
+ const char* strings[] = {
+ "[ ]",
+ "[ i]",
+ "[ d]",
+ "[ c]",
+ "Err!",
+ "Err!",
+ "Err!",
+ "Err!",
+ "[J ]",
+ "[Ji]",
+ "[Jd]",
+ "[Jc]",
+ "Err!",
+ "Err!",
+ "Err!",
+ "Err!",
+ };
+ return strings[format];
+}
+
+void SpeculativeJIT::dump(const char* label)
+{
+ if (label)
+ fprintf(stderr, "<%s>\n", label);
+
+ fprintf(stderr, " gprs:\n");
+ m_gprs.dump();
+ fprintf(stderr, " fprs:\n");
+ m_fprs.dump();
+ fprintf(stderr, " VirtualRegisters:\n");
+ for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
+ GenerationInfo& info = m_generationInfo[i];
+ if (info.alive())
+ fprintf(stderr, " % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
+ else
+ fprintf(stderr, " % 3d:[__][__]", i);
+ if (info.registerFormat() == DataFormatDouble)
+ fprintf(stderr, ":fpr%d\n", info.fpr());
+ else if (info.registerFormat() != DataFormatNone
+#if USE(JSVALUE32_64)
+ && !(info.registerFormat() & DataFormatJS)
+#endif
+ ) {
+ ASSERT(info.gpr() != InvalidGPRReg);
+ fprintf(stderr, ":%s\n", GPRInfo::debugName(info.gpr()));
+ } else
+ fprintf(stderr, "\n");
+ }
+ if (label)
+ fprintf(stderr, "</%s>\n", label);
+}
+#endif
+
+
+#if DFG_ENABLE(CONSISTENCY_CHECK)
+void SpeculativeJIT::checkConsistency()
+{
+ bool failed = false;
+
+ for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
+ if (iter.isLocked()) {
+ fprintf(stderr, "DFG_CONSISTENCY_CHECK failed: gpr %s is locked.\n", iter.debugName());
+ failed = true;
+ }
+ }
+ for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
+ if (iter.isLocked()) {
+ fprintf(stderr, "DFG_CONSISTENCY_CHECK failed: fpr %s is locked.\n", iter.debugName());
+ failed = true;
+ }
+ }
+
+ for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
+ VirtualRegister virtualRegister = (VirtualRegister)i;
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+ if (!info.alive())
+ continue;
+ switch (info.registerFormat()) {
+ case DataFormatNone:
+ break;
+ case DataFormatJS:
+ case DataFormatJSInteger:
+ case DataFormatJSDouble:
+ case DataFormatJSCell:
+ case DataFormatJSBoolean:
+#if USE(JSVALUE32_64)
+ break;
+#endif
+ case DataFormatInteger:
+ case DataFormatCell:
+ case DataFormatBoolean:
+ case DataFormatStorage: {
+ GPRReg gpr = info.gpr();
+ ASSERT(gpr != InvalidGPRReg);
+ if (m_gprs.name(gpr) != virtualRegister) {
+ fprintf(stderr, "DFG_CONSISTENCY_CHECK failed: name mismatch for virtual register %d (gpr %s).\n", virtualRegister, GPRInfo::debugName(gpr));
+ failed = true;
+ }
+ break;
+ }
+ case DataFormatDouble: {
+ FPRReg fpr = info.fpr();
+ ASSERT(fpr != InvalidFPRReg);
+ if (m_fprs.name(fpr) != virtualRegister) {
+ fprintf(stderr, "DFG_CONSISTENCY_CHECK failed: name mismatch for virtual register %d (fpr %s).\n", virtualRegister, FPRInfo::debugName(fpr));
+ failed = true;
+ }
+ break;
+ }
+ }
+ }
+
+ for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
+ VirtualRegister virtualRegister = iter.name();
+ if (virtualRegister == InvalidVirtualRegister)
+ continue;
+
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+#if USE(JSVALUE64)
+ if (iter.regID() != info.gpr()) {
+ fprintf(stderr, "DFG_CONSISTENCY_CHECK failed: name mismatch for gpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
+ failed = true;
+ }
+#else
+ if (!(info.registerFormat() & DataFormatJS)) {
+ if (iter.regID() != info.gpr()) {
+ fprintf(stderr, "DFG_CONSISTENCY_CHECK failed: name mismatch for gpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
+ failed = true;
+ }
+ } else {
+ if (iter.regID() != info.tagGPR() && iter.regID() != info.payloadGPR()) {
+ fprintf(stderr, "DFG_CONSISTENCY_CHECK failed: name mismatch for gpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
+ failed = true;
+ }
+ }
+#endif
+ }
+
+ for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
+ VirtualRegister virtualRegister = iter.name();
+ if (virtualRegister == InvalidVirtualRegister)
+ continue;
+
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+ if (iter.regID() != info.fpr()) {
+ fprintf(stderr, "DFG_CONSISTENCY_CHECK failed: name mismatch for fpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
+ failed = true;
+ }
+ }
+
+ if (failed) {
+ dump();
+ CRASH();
+ }
+}
+#endif
+
+GPRTemporary::GPRTemporary()
+ : m_jit(0)
+ , m_gpr(InvalidGPRReg)
+{
+}
+
+GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
+ : m_jit(jit)
+ , m_gpr(InvalidGPRReg)
+{
+ m_gpr = m_jit->allocate();
+}
+
+GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
+ : m_jit(jit)
+ , m_gpr(InvalidGPRReg)
+{
+ m_gpr = m_jit->allocate(specific);
+}
+
+GPRTemporary::GPRTemporary(SpeculativeJIT* jit, SpeculateIntegerOperand& op1)
+ : m_jit(jit)
+ , m_gpr(InvalidGPRReg)
+{
+ if (m_jit->canReuse(op1.index()))
+ m_gpr = m_jit->reuse(op1.gpr());
+ else
+ m_gpr = m_jit->allocate();
+}
+
+GPRTemporary::GPRTemporary(SpeculativeJIT* jit, SpeculateIntegerOperand& op1, SpeculateIntegerOperand& op2)
+ : m_jit(jit)
+ , m_gpr(InvalidGPRReg)
+{
+ if (m_jit->canReuse(op1.index()))
+ m_gpr = m_jit->reuse(op1.gpr());
+ else if (m_jit->canReuse(op2.index()))
+ m_gpr = m_jit->reuse(op2.gpr());
+ else
+ m_gpr = m_jit->allocate();
+}
+
+GPRTemporary::GPRTemporary(SpeculativeJIT* jit, SpeculateStrictInt32Operand& op1)
+ : m_jit(jit)
+ , m_gpr(InvalidGPRReg)
+{
+ if (m_jit->canReuse(op1.index()))
+ m_gpr = m_jit->reuse(op1.gpr());
+ else
+ m_gpr = m_jit->allocate();
+}
+
+GPRTemporary::GPRTemporary(SpeculativeJIT* jit, IntegerOperand& op1)
+ : m_jit(jit)
+ , m_gpr(InvalidGPRReg)
+{
+ if (m_jit->canReuse(op1.index()))
+ m_gpr = m_jit->reuse(op1.gpr());
+ else
+ m_gpr = m_jit->allocate();
+}
+
+GPRTemporary::GPRTemporary(SpeculativeJIT* jit, IntegerOperand& op1, IntegerOperand& op2)
+ : m_jit(jit)
+ , m_gpr(InvalidGPRReg)
+{
+ if (m_jit->canReuse(op1.index()))
+ m_gpr = m_jit->reuse(op1.gpr());
+ else if (m_jit->canReuse(op2.index()))
+ m_gpr = m_jit->reuse(op2.gpr());
+ else
+ m_gpr = m_jit->allocate();
+}
+
+GPRTemporary::GPRTemporary(SpeculativeJIT* jit, SpeculateCellOperand& op1)
+ : m_jit(jit)
+ , m_gpr(InvalidGPRReg)
+{
+ if (m_jit->canReuse(op1.index()))
+ m_gpr = m_jit->reuse(op1.gpr());
+ else
+ m_gpr = m_jit->allocate();
+}
+
+GPRTemporary::GPRTemporary(SpeculativeJIT* jit, SpeculateBooleanOperand& op1)
+ : m_jit(jit)
+ , m_gpr(InvalidGPRReg)
+{
+ if (m_jit->canReuse(op1.index()))
+ m_gpr = m_jit->reuse(op1.gpr());
+ else
+ m_gpr = m_jit->allocate();
+}
+
+#if USE(JSVALUE64)
+GPRTemporary::GPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
+ : m_jit(jit)
+ , m_gpr(InvalidGPRReg)
+{
+ if (m_jit->canReuse(op1.index()))
+ m_gpr = m_jit->reuse(op1.gpr());
+ else
+ m_gpr = m_jit->allocate();
+}
+#else
+GPRTemporary::GPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1, bool tag)
+ : m_jit(jit)
+ , m_gpr(InvalidGPRReg)
+{
+ if (!op1.isDouble() && m_jit->canReuse(op1.index()))
+ m_gpr = m_jit->reuse(tag ? op1.tagGPR() : op1.payloadGPR());
+ else
+ m_gpr = m_jit->allocate();
+}
+#endif
+
+GPRTemporary::GPRTemporary(SpeculativeJIT* jit, StorageOperand& op1)
+ : m_jit(jit)
+ , m_gpr(InvalidGPRReg)
+{
+ if (m_jit->canReuse(op1.index()))
+ m_gpr = m_jit->reuse(op1.gpr());
+ else
+ m_gpr = m_jit->allocate();
+}
+
+void GPRTemporary::adopt(GPRTemporary& other)
+{
+ ASSERT(!m_jit);
+ ASSERT(m_gpr == InvalidGPRReg);
+ ASSERT(other.m_jit);
+ ASSERT(other.m_gpr != InvalidGPRReg);
+ m_jit = other.m_jit;
+ m_gpr = other.m_gpr;
+ other.m_jit = 0;
+ other.m_gpr = InvalidGPRReg;
+}
+
+FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
+ : m_jit(jit)
+ , m_fpr(InvalidFPRReg)
+{
+ m_fpr = m_jit->fprAllocate();
+}
+
+FPRTemporary::FPRTemporary(SpeculativeJIT* jit, DoubleOperand& op1)
+ : m_jit(jit)
+ , m_fpr(InvalidFPRReg)
+{
+ if (m_jit->canReuse(op1.index()))
+ m_fpr = m_jit->reuse(op1.fpr());
+ else
+ m_fpr = m_jit->fprAllocate();
+}
+
+FPRTemporary::FPRTemporary(SpeculativeJIT* jit, DoubleOperand& op1, DoubleOperand& op2)
+ : m_jit(jit)
+ , m_fpr(InvalidFPRReg)
+{
+ if (m_jit->canReuse(op1.index()))
+ m_fpr = m_jit->reuse(op1.fpr());
+ else if (m_jit->canReuse(op2.index()))
+ m_fpr = m_jit->reuse(op2.fpr());
+ else
+ m_fpr = m_jit->fprAllocate();
+}
+
+FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
+ : m_jit(jit)
+ , m_fpr(InvalidFPRReg)
+{
+ if (m_jit->canReuse(op1.index()))
+ m_fpr = m_jit->reuse(op1.fpr());
+ else
+ m_fpr = m_jit->fprAllocate();
+}
+
+FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
+ : m_jit(jit)
+ , m_fpr(InvalidFPRReg)
+{
+ if (m_jit->canReuse(op1.index()))
+ m_fpr = m_jit->reuse(op1.fpr());
+ else if (m_jit->canReuse(op2.index()))
+ m_fpr = m_jit->reuse(op2.fpr());
+ else
+ m_fpr = m_jit->fprAllocate();
+}
+
+#if USE(JSVALUE32_64)
+FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
+ : m_jit(jit)
+ , m_fpr(InvalidFPRReg)
+{
+ if (op1.isDouble() && m_jit->canReuse(op1.index()))
+ m_fpr = m_jit->reuse(op1.fpr());
+ else
+ m_fpr = m_jit->fprAllocate();
+}
+#endif
+
+#ifndef NDEBUG
+void ValueSource::dump(FILE* out) const
+{
+ switch (kind()) {
+ case SourceNotSet:
+ fprintf(out, "NotSet");
+ break;
+ case ValueInRegisterFile:
+ fprintf(out, "InRegFile");
+ break;
+ case Int32InRegisterFile:
+ fprintf(out, "Int32");
+ break;
+ case CellInRegisterFile:
+ fprintf(out, "Cell");
+ break;
+ case BooleanInRegisterFile:
+ fprintf(out, "Bool");
+ break;
+ case DoubleInRegisterFile:
+ fprintf(out, "Double");
+ break;
+ case HaveNode:
+ fprintf(out, "Node(%d)", m_nodeIndex);
+ break;
+ }
+}
+#endif
+
+void SpeculativeJIT::compilePeepHoleDoubleBranch(Node& node, NodeIndex branchNodeIndex, JITCompiler::DoubleCondition condition)
+{
+ Node& branchNode = at(branchNodeIndex);
+ BlockIndex taken = branchNode.takenBlockIndex();
+ BlockIndex notTaken = branchNode.notTakenBlockIndex();
+
+ SpeculateDoubleOperand op1(this, node.child1());
+ SpeculateDoubleOperand op2(this, node.child2());
+
+ addBranch(m_jit.branchDouble(condition, op1.fpr(), op2.fpr()), taken);
+
+ if (notTaken != (m_block + 1))
+ addBranch(m_jit.jump(), notTaken);
+}
+
+void SpeculativeJIT::compilePeepHoleObjectEquality(Node& node, NodeIndex branchNodeIndex, const ClassInfo* classInfo, PredictionChecker predictionCheck)
+{
+ Node& branchNode = at(branchNodeIndex);
+ BlockIndex taken = branchNode.takenBlockIndex();
+ BlockIndex notTaken = branchNode.notTakenBlockIndex();
+
+ MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
+
+ if (taken == (m_block + 1)) {
+ condition = MacroAssembler::NotEqual;
+ BlockIndex tmp = taken;
+ taken = notTaken;
+ notTaken = tmp;
+ }
+
+ SpeculateCellOperand op1(this, node.child1());
+ SpeculateCellOperand op2(this, node.child2());
+
+ GPRReg op1GPR = op1.gpr();
+ GPRReg op2GPR = op2.gpr();
+
+ if (!predictionCheck(m_state.forNode(node.child1()).m_type))
+ speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(op1GPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(classInfo)));
+ if (!predictionCheck(m_state.forNode(node.child2()).m_type))
+ speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node.child2(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(op2GPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(classInfo)));
+
+ addBranch(m_jit.branchPtr(condition, op1GPR, op2GPR), taken);
+ if (notTaken != (m_block + 1))
+ addBranch(m_jit.jump(), notTaken);
+}
+
+void SpeculativeJIT::compilePeepHoleIntegerBranch(Node& node, NodeIndex branchNodeIndex, JITCompiler::RelationalCondition condition)
+{
+ Node& branchNode = at(branchNodeIndex);
+ BlockIndex taken = branchNode.takenBlockIndex();
+ BlockIndex notTaken = branchNode.notTakenBlockIndex();
+
+ // The branch instruction will branch to the taken block.
+ // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
+ if (taken == (m_block + 1)) {
+ condition = JITCompiler::invert(condition);
+ BlockIndex tmp = taken;
+ taken = notTaken;
+ notTaken = tmp;
+ }
+
+ if (isInt32Constant(node.child1())) {
+ int32_t imm = valueOfInt32Constant(node.child1());
+ SpeculateIntegerOperand op2(this, node.child2());
+ addBranch(m_jit.branch32(condition, JITCompiler::Imm32(imm), op2.gpr()), taken);
+ } else if (isInt32Constant(node.child2())) {
+ SpeculateIntegerOperand op1(this, node.child1());
+ int32_t imm = valueOfInt32Constant(node.child2());
+ addBranch(m_jit.branch32(condition, op1.gpr(), JITCompiler::Imm32(imm)), taken);
+ } else {
+ SpeculateIntegerOperand op1(this, node.child1());
+ SpeculateIntegerOperand op2(this, node.child2());
+ addBranch(m_jit.branch32(condition, op1.gpr(), op2.gpr()), taken);
+ }
+
+ // Check for fall through, otherwise we need to jump.
+ if (notTaken != (m_block + 1))
+ addBranch(m_jit.jump(), notTaken);
+}
+
+// Returns true if the compare is fused with a subsequent branch.
+bool SpeculativeJIT::compilePeepHoleBranch(Node& node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_DFGOperation_EJJ operation)
+{
+ // Fused compare & branch.
+ NodeIndex branchNodeIndex = detectPeepHoleBranch();
+ if (branchNodeIndex != NoNode) {
+ // detectPeepHoleBranch currently only permits the branch to be the very next node,
+ // so can be no intervening nodes to also reference the compare.
+ ASSERT(node.adjustedRefCount() == 1);
+
+ if (Node::shouldSpeculateInteger(at(node.child1()), at(node.child2()))) {
+ compilePeepHoleIntegerBranch(node, branchNodeIndex, condition);
+ use(node.child1());
+ use(node.child2());
+ } else if (Node::shouldSpeculateNumber(at(node.child1()), at(node.child2()))) {
+ compilePeepHoleDoubleBranch(node, branchNodeIndex, doubleCondition);
+ use(node.child1());
+ use(node.child2());
+ } else if (node.op == CompareEq && Node::shouldSpeculateFinalObject(at(node.child1()), at(node.child2()))) {
+ compilePeepHoleObjectEquality(node, branchNodeIndex, &JSFinalObject::s_info, isFinalObjectPrediction);
+ use(node.child1());
+ use(node.child2());
+ } else if (node.op == CompareEq && Node::shouldSpeculateArray(at(node.child1()), at(node.child2()))) {
+ compilePeepHoleObjectEquality(node, branchNodeIndex, &JSArray::s_info, isArrayPrediction);
+ use(node.child1());
+ use(node.child2());
+ } else
+ nonSpeculativePeepholeBranch(node, branchNodeIndex, condition, operation);
+
+ m_compileIndex = branchNodeIndex;
+ return true;
+ }
+ return false;
+}
+
+void SpeculativeJIT::compileMovHint(Node& node)
+{
+ ASSERT(node.op == SetLocal);
+
+ setNodeIndexForOperand(node.child1(), node.local());
+ m_lastSetOperand = node.local();
+}
+
+void SpeculativeJIT::compile(BasicBlock& block)
+{
+ ASSERT(m_compileOkay);
+ ASSERT(m_compileIndex == block.begin);
+
+ if (!block.isReachable) {
+ m_compileIndex = block.end;
+ return;
+ }
+
+ m_blockHeads[m_block] = m_jit.label();
+#if DFG_ENABLE(JIT_BREAK_ON_EVERY_BLOCK)
+ m_jit.breakpoint();
+#endif
+
+ ASSERT(m_arguments.size() == block.variablesAtHead.numberOfArguments());
+ for (size_t i = 0; i < m_arguments.size(); ++i) {
+ NodeIndex nodeIndex = block.variablesAtHead.argument(i);
+ if (nodeIndex == NoNode)
+ m_arguments[i] = ValueSource(ValueInRegisterFile);
+ else
+ m_arguments[i] = ValueSource::forPrediction(at(nodeIndex).variableAccessData()->prediction());
+ }
+
+ m_state.reset();
+ m_state.beginBasicBlock(&block);
+
+ ASSERT(m_variables.size() == block.variablesAtHead.numberOfLocals());
+ for (size_t i = 0; i < m_variables.size(); ++i) {
+ NodeIndex nodeIndex = block.variablesAtHead.local(i);
+ if (nodeIndex == NoNode)
+ m_variables[i] = ValueSource(ValueInRegisterFile);
+ else if (at(nodeIndex).variableAccessData()->shouldUseDoubleFormat())
+ m_variables[i] = ValueSource(DoubleInRegisterFile);
+ else
+ m_variables[i] = ValueSource::forPrediction(at(nodeIndex).variableAccessData()->prediction());
+ }
+
+ m_lastSetOperand = std::numeric_limits<int>::max();
+ m_codeOriginForOSR = CodeOrigin();
+
+ for (; m_compileIndex < block.end; ++m_compileIndex) {
+ Node& node = at(m_compileIndex);
+ m_codeOriginForOSR = node.codeOrigin;
+ if (!node.shouldGenerate()) {
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ fprintf(stderr, "SpeculativeJIT skipping Node @%d (bc#%u) at JIT offset 0x%x ", (int)m_compileIndex, node.codeOrigin.bytecodeIndex, m_jit.debugOffset());
+#endif
+ switch (node.op) {
+ case SetLocal:
+ compileMovHint(node);
+ break;
+
+ case InlineStart: {
+ InlineCallFrame* inlineCallFrame = node.codeOrigin.inlineCallFrame;
+ int argumentCountIncludingThis = inlineCallFrame->arguments.size();
+ for (int i = 0; i < argumentCountIncludingThis; ++i) {
+ ValueRecovery recovery = computeValueRecoveryFor(m_variables[inlineCallFrame->stackOffset + CallFrame::argumentOffsetIncludingThis(i)]);
+ // The recovery cannot point to registers, since the call frame reification isn't
+ // as smart as OSR, so it can't handle that. The exception is the this argument,
+ // which we don't really need to be able to recover.
+ ASSERT(!i || !recovery.isInRegisters());
+ inlineCallFrame->arguments[i] = recovery;
+ }
+ break;
+ }
+
+ default:
+ break;
+ }
+ } else {
+
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ fprintf(stderr, "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x ", (int)m_compileIndex, node.codeOrigin.bytecodeIndex, m_jit.debugOffset());
+#endif
+#if DFG_ENABLE(JIT_BREAK_ON_EVERY_NODE)
+ m_jit.breakpoint();
+#endif
+#if DFG_ENABLE(XOR_DEBUG_AID)
+ m_jit.xorPtr(JITCompiler::TrustedImm32(m_compileIndex), GPRInfo::regT0);
+ m_jit.xorPtr(JITCompiler::TrustedImm32(m_compileIndex), GPRInfo::regT0);
+#endif
+ checkConsistency();
+ compile(node);
+ if (!m_compileOkay) {
+ m_compileOkay = true;
+ m_compileIndex = block.end;
+ clearGenerationInfo();
+ return;
+ }
+
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ if (node.hasResult()) {
+ GenerationInfo& info = m_generationInfo[node.virtualRegister()];
+ fprintf(stderr, "-> %s, vr#%d", dataFormatToString(info.registerFormat()), (int)node.virtualRegister());
+ if (info.registerFormat() != DataFormatNone) {
+ if (info.registerFormat() == DataFormatDouble)
+ fprintf(stderr, ", %s", FPRInfo::debugName(info.fpr()));
+#if USE(JSVALUE32_64)
+ else if (info.registerFormat() & DataFormatJS)
+ fprintf(stderr, ", %s %s", GPRInfo::debugName(info.tagGPR()), GPRInfo::debugName(info.payloadGPR()));
+#endif
+ else
+ fprintf(stderr, ", %s", GPRInfo::debugName(info.gpr()));
+ }
+ fprintf(stderr, " ");
+ } else
+ fprintf(stderr, " ");
+#endif
+ }
+
+#if DFG_ENABLE(VERBOSE_VALUE_RECOVERIES)
+ for (size_t i = 0; i < m_arguments.size(); ++i)
+ computeValueRecoveryFor(argumentToOperand(i)).dump(stderr);
+
+ fprintf(stderr, " : ");
+
+ for (int operand = 0; operand < (int)m_variables.size(); ++operand)
+ computeValueRecoveryFor(operand).dump(stderr);
+#endif
+
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ fprintf(stderr, "\n");
+#endif
+
+ // Make sure that the abstract state is rematerialized for the next node.
+ m_state.execute(m_compileIndex);
+
+ if (node.shouldGenerate())
+ checkConsistency();
+ }
+
+ // Perform the most basic verification that children have been used correctly.
+#if !ASSERT_DISABLED
+ for (unsigned index = 0; index < m_generationInfo.size(); ++index) {
+ GenerationInfo& info = m_generationInfo[index];
+ ASSERT(!info.alive());
+ }
+#endif
+}
+
+// If we are making type predictions about our arguments then
+// we need to check that they are correct on function entry.
+void SpeculativeJIT::checkArgumentTypes()
+{
+ ASSERT(!m_compileIndex);
+ m_codeOriginForOSR = CodeOrigin(0);
+
+ for (size_t i = 0; i < m_arguments.size(); ++i)
+ m_arguments[i] = ValueSource(ValueInRegisterFile);
+ for (size_t i = 0; i < m_variables.size(); ++i)
+ m_variables[i] = ValueSource(ValueInRegisterFile);
+
+ for (int i = 0; i < m_jit.codeBlock()->m_numParameters; ++i) {
+ VariableAccessData* variableAccessData = at(m_jit.graph().m_arguments[i]).variableAccessData();
+ VirtualRegister virtualRegister = variableAccessData->local();
+ PredictedType predictedType = variableAccessData->prediction();
+#if USE(JSVALUE64)
+ if (isInt32Prediction(predictedType))
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchPtr(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
+ else if (isArrayPrediction(predictedType)) {
+ GPRTemporary temp(this);
+ m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), temp.gpr());
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchTestPtr(MacroAssembler::NonZero, temp.gpr(), GPRInfo::tagMaskRegister));
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info)));
+ } else if (isByteArrayPrediction(predictedType)) {
+ GPRTemporary temp(this);
+ m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), temp.gpr());
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchTestPtr(MacroAssembler::NonZero, temp.gpr(), GPRInfo::tagMaskRegister));
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSByteArray::s_info)));
+ } else if (isBooleanPrediction(predictedType)) {
+ GPRTemporary temp(this);
+ m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), temp.gpr());
+ m_jit.xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchTestPtr(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
+ } else if (isInt8ArrayPrediction(predictedType)) {
+ GPRTemporary temp(this);
+ m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), temp.gpr());
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchTestPtr(MacroAssembler::NonZero, temp.gpr(), GPRInfo::tagMaskRegister));
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->int8ArrayDescriptor().m_classInfo)));
+ } else if (isInt16ArrayPrediction(predictedType)) {
+ GPRTemporary temp(this);
+ m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), temp.gpr());
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchTestPtr(MacroAssembler::NonZero, temp.gpr(), GPRInfo::tagMaskRegister));
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->int16ArrayDescriptor().m_classInfo)));
+ } else if (isInt32ArrayPrediction(predictedType)) {
+ GPRTemporary temp(this);
+ m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), temp.gpr());
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchTestPtr(MacroAssembler::NonZero, temp.gpr(), GPRInfo::tagMaskRegister));
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->int32ArrayDescriptor().m_classInfo)));
+ } else if (isUint8ArrayPrediction(predictedType)) {
+ GPRTemporary temp(this);
+ m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), temp.gpr());
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchTestPtr(MacroAssembler::NonZero, temp.gpr(), GPRInfo::tagMaskRegister));
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->uint8ArrayDescriptor().m_classInfo)));
+ } else if (isUint16ArrayPrediction(predictedType)) {
+ GPRTemporary temp(this);
+ m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), temp.gpr());
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchTestPtr(MacroAssembler::NonZero, temp.gpr(), GPRInfo::tagMaskRegister));
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->uint16ArrayDescriptor().m_classInfo)));
+ } else if (isUint32ArrayPrediction(predictedType)) {
+ GPRTemporary temp(this);
+ m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), temp.gpr());
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchTestPtr(MacroAssembler::NonZero, temp.gpr(), GPRInfo::tagMaskRegister));
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->uint32ArrayDescriptor().m_classInfo)));
+ } else if (isFloat32ArrayPrediction(predictedType)) {
+ GPRTemporary temp(this);
+ m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), temp.gpr());
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchTestPtr(MacroAssembler::NonZero, temp.gpr(), GPRInfo::tagMaskRegister));
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->float32ArrayDescriptor().m_classInfo)));
+ } else if (isFloat64ArrayPrediction(predictedType)) {
+ GPRTemporary temp(this);
+ m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), temp.gpr());
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchTestPtr(MacroAssembler::NonZero, temp.gpr(), GPRInfo::tagMaskRegister));
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->float64ArrayDescriptor().m_classInfo)));
+ }
+#else
+ if (isInt32Prediction(predictedType))
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
+ else if (isArrayPrediction(predictedType)) {
+ GPRTemporary temp(this);
+ m_jit.load32(JITCompiler::tagFor(virtualRegister), temp.gpr());
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::NotEqual, temp.gpr(), TrustedImm32(JSValue::CellTag)));
+ m_jit.load32(JITCompiler::payloadFor(virtualRegister), temp.gpr());
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info)));
+ } else if (isByteArrayPrediction(predictedType)) {
+ GPRTemporary temp(this);
+ m_jit.load32(JITCompiler::tagFor(virtualRegister), temp.gpr());
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::NotEqual, temp.gpr(), TrustedImm32(JSValue::CellTag)));
+ m_jit.load32(JITCompiler::payloadFor(virtualRegister), temp.gpr());
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSByteArray::s_info)));
+ } else if (isBooleanPrediction(predictedType))
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
+ else if (isInt8ArrayPrediction(predictedType)) {
+ GPRTemporary temp(this);
+ m_jit.load32(JITCompiler::tagFor(virtualRegister), temp.gpr());
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::NotEqual, temp.gpr(), TrustedImm32(JSValue::CellTag)));
+ m_jit.load32(JITCompiler::payloadFor(virtualRegister), temp.gpr());
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->int8ArrayDescriptor().m_classInfo)));
+ } else if (isInt16ArrayPrediction(predictedType)) {
+ GPRTemporary temp(this);
+ m_jit.load32(JITCompiler::tagFor(virtualRegister), temp.gpr());
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::NotEqual, temp.gpr(), TrustedImm32(JSValue::CellTag)));
+ m_jit.load32(JITCompiler::payloadFor(virtualRegister), temp.gpr());
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->int16ArrayDescriptor().m_classInfo)));
+ } else if (isInt32ArrayPrediction(predictedType)) {
+ GPRTemporary temp(this);
+ m_jit.load32(JITCompiler::tagFor(virtualRegister), temp.gpr());
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::NotEqual, temp.gpr(), TrustedImm32(JSValue::CellTag)));
+ m_jit.load32(JITCompiler::payloadFor(virtualRegister), temp.gpr());
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->int32ArrayDescriptor().m_classInfo)));
+ } else if (isUint8ArrayPrediction(predictedType)) {
+ GPRTemporary temp(this);
+ m_jit.load32(JITCompiler::tagFor(virtualRegister), temp.gpr());
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::NotEqual, temp.gpr(), TrustedImm32(JSValue::CellTag)));
+ m_jit.load32(JITCompiler::payloadFor(virtualRegister), temp.gpr());
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->uint8ArrayDescriptor().m_classInfo)));
+ } else if (isUint16ArrayPrediction(predictedType)) {
+ GPRTemporary temp(this);
+ m_jit.load32(JITCompiler::tagFor(virtualRegister), temp.gpr());
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::NotEqual, temp.gpr(), TrustedImm32(JSValue::CellTag)));
+ m_jit.load32(JITCompiler::payloadFor(virtualRegister), temp.gpr());
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->uint16ArrayDescriptor().m_classInfo)));
+ } else if (isUint32ArrayPrediction(predictedType)) {
+ GPRTemporary temp(this);
+ m_jit.load32(JITCompiler::tagFor(virtualRegister), temp.gpr());
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::NotEqual, temp.gpr(), TrustedImm32(JSValue::CellTag)));
+ m_jit.load32(JITCompiler::payloadFor(virtualRegister), temp.gpr());
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->uint32ArrayDescriptor().m_classInfo)));
+ } else if (isFloat32ArrayPrediction(predictedType)) {
+ GPRTemporary temp(this);
+ m_jit.load32(JITCompiler::tagFor(virtualRegister), temp.gpr());
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::NotEqual, temp.gpr(), TrustedImm32(JSValue::CellTag)));
+ m_jit.load32(JITCompiler::payloadFor(virtualRegister), temp.gpr());
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->float32ArrayDescriptor().m_classInfo)));
+ } else if (isFloat64ArrayPrediction(predictedType)) {
+ GPRTemporary temp(this);
+ m_jit.load32(JITCompiler::tagFor(virtualRegister), temp.gpr());
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::NotEqual, temp.gpr(), TrustedImm32(JSValue::CellTag)));
+ m_jit.load32(JITCompiler::payloadFor(virtualRegister), temp.gpr());
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->float64ArrayDescriptor().m_classInfo)));
+ }
+#endif
+ }
+}
+
+bool SpeculativeJIT::compile()
+{
+ checkArgumentTypes();
+
+ ASSERT(!m_compileIndex);
+ for (m_block = 0; m_block < m_jit.graph().m_blocks.size(); ++m_block)
+ compile(*m_jit.graph().m_blocks[m_block]);
+ linkBranches();
+ return true;
+}
+
+void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
+{
+ for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().m_blocks.size(); ++blockIndex) {
+ BasicBlock& block = *m_jit.graph().m_blocks[blockIndex];
+ if (block.isOSRTarget)
+ m_jit.noticeOSREntry(block, m_blockHeads[blockIndex], linkBuffer);
+ }
+}
+
+ValueRecovery SpeculativeJIT::computeValueRecoveryFor(const ValueSource& valueSource)
+{
+ switch (valueSource.kind()) {
+ case ValueInRegisterFile:
+ return ValueRecovery::alreadyInRegisterFile();
+
+ case Int32InRegisterFile:
+ return ValueRecovery::alreadyInRegisterFileAsUnboxedInt32();
+
+ case CellInRegisterFile:
+ return ValueRecovery::alreadyInRegisterFileAsUnboxedCell();
+
+ case BooleanInRegisterFile:
+ return ValueRecovery::alreadyInRegisterFileAsUnboxedBoolean();
+
+ case DoubleInRegisterFile:
+ return ValueRecovery::alreadyInRegisterFileAsUnboxedDouble();
+
+ case HaveNode: {
+ if (m_jit.isConstant(valueSource.nodeIndex()))
+ return ValueRecovery::constant(m_jit.valueOfJSConstant(valueSource.nodeIndex()));
+
+ Node* nodePtr = &at(valueSource.nodeIndex());
+ if (!nodePtr->shouldGenerate()) {
+ // It's legitimately dead. As in, nobody will ever use this node, or operand,
+ // ever. Set it to Undefined to make the GC happy after the OSR.
+ return ValueRecovery::constant(jsUndefined());
+ }
+
+ GenerationInfo* infoPtr = &m_generationInfo[nodePtr->virtualRegister()];
+ if (!infoPtr->alive() || infoPtr->nodeIndex() != valueSource.nodeIndex()) {
+ // Try to see if there is an alternate node that would contain the value we want.
+ // There are four possibilities:
+ //
+ // ValueToNumber: If the only live version of the value is a ValueToNumber node
+ // then it means that all remaining uses of the value would have performed a
+ // ValueToNumber conversion anyway. Thus, we can substitute ValueToNumber.
+ //
+ // ValueToInt32: Likewise, if the only remaining live version of the value is
+ // ValueToInt32, then we can use it. But if there is both a ValueToInt32
+ // and a ValueToNumber, then we better go with ValueToNumber because it
+ // means that some remaining uses would have converted to number while
+ // others would have converted to Int32.
+ //
+ // UInt32ToNumber: If the only live version of the value is a UInt32ToNumber
+ // then the only remaining uses are ones that want a properly formed number
+ // rather than a UInt32 intermediate.
+ //
+ // The reverse of the above: This node could be a UInt32ToNumber, but its
+ // alternative is still alive. This means that the only remaining uses of
+ // the number would be fine with a UInt32 intermediate.
+
+ bool found = false;
+
+ if (nodePtr->op == UInt32ToNumber) {
+ NodeIndex nodeIndex = nodePtr->child1();
+ nodePtr = &at(nodeIndex);
+ infoPtr = &m_generationInfo[nodePtr->virtualRegister()];
+ if (infoPtr->alive() && infoPtr->nodeIndex() == nodeIndex)
+ found = true;
+ }
+
+ if (!found) {
+ NodeIndex valueToNumberIndex = NoNode;
+ NodeIndex valueToInt32Index = NoNode;
+ NodeIndex uint32ToNumberIndex = NoNode;
+
+ for (unsigned virtualRegister = 0; virtualRegister < m_generationInfo.size(); ++virtualRegister) {
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+ if (!info.alive())
+ continue;
+ if (info.nodeIndex() == NoNode)
+ continue;
+ Node& node = at(info.nodeIndex());
+ if (node.child1Unchecked() != valueSource.nodeIndex())
+ continue;
+ switch (node.op) {
+ case ValueToNumber:
+ case ValueToDouble:
+ valueToNumberIndex = info.nodeIndex();
+ break;
+ case ValueToInt32:
+ valueToInt32Index = info.nodeIndex();
+ break;
+ case UInt32ToNumber:
+ uint32ToNumberIndex = info.nodeIndex();
+ break;
+ default:
+ break;
+ }
+ }
+
+ NodeIndex nodeIndexToUse;
+ if (valueToNumberIndex != NoNode)
+ nodeIndexToUse = valueToNumberIndex;
+ else if (valueToInt32Index != NoNode)
+ nodeIndexToUse = valueToInt32Index;
+ else if (uint32ToNumberIndex != NoNode)
+ nodeIndexToUse = uint32ToNumberIndex;
+ else
+ nodeIndexToUse = NoNode;
+
+ if (nodeIndexToUse != NoNode) {
+ nodePtr = &at(nodeIndexToUse);
+ infoPtr = &m_generationInfo[nodePtr->virtualRegister()];
+ ASSERT(infoPtr->alive() && infoPtr->nodeIndex() == nodeIndexToUse);
+ found = true;
+ }
+ }
+
+ if (!found)
+ return ValueRecovery::constant(jsUndefined());
+ }
+
+ ASSERT(infoPtr->alive());
+
+ if (infoPtr->registerFormat() != DataFormatNone) {
+ if (infoPtr->registerFormat() == DataFormatDouble)
+ return ValueRecovery::inFPR(infoPtr->fpr());
+#if USE(JSVALUE32_64)
+ if (infoPtr->registerFormat() & DataFormatJS)
+ return ValueRecovery::inPair(infoPtr->tagGPR(), infoPtr->payloadGPR());
+#endif
+ return ValueRecovery::inGPR(infoPtr->gpr(), infoPtr->registerFormat());
+ }
+ if (infoPtr->spillFormat() != DataFormatNone)
+ return ValueRecovery::displacedInRegisterFile(static_cast<VirtualRegister>(nodePtr->virtualRegister()), infoPtr->spillFormat());
+
+ ASSERT_NOT_REACHED();
+ return ValueRecovery();
+ }
+
+ default:
+ ASSERT_NOT_REACHED();
+ return ValueRecovery();
+ }
+}
+
+void SpeculativeJIT::compileGetCharCodeAt(Node& node)
+{
+ ASSERT(node.child3() == NoNode);
+ SpeculateCellOperand string(this, node.child1());
+ SpeculateStrictInt32Operand index(this, node.child2());
+ StorageOperand storage(this, node.child3());
+
+ GPRReg stringReg = string.gpr();
+ GPRReg indexReg = index.gpr();
+ GPRReg storageReg = storage.gpr();
+
+ if (!isStringPrediction(m_state.forNode(node.child1()).m_type)) {
+ ASSERT(!(at(node.child1()).prediction() & PredictString));
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ noResult(m_compileIndex);
+ return;
+ }
+
+ // unsigned comparison so we can filter out negative indices and indices that are too large
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
+
+ GPRTemporary scratch(this);
+ GPRReg scratchReg = scratch.gpr();
+
+ m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
+
+ // Load the character into scratchReg
+ JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
+
+ m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
+ JITCompiler::Jump cont8Bit = m_jit.jump();
+
+ is16Bit.link(&m_jit);
+
+ m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
+
+ cont8Bit.link(&m_jit);
+
+ integerResult(scratchReg, m_compileIndex);
+}
+
+void SpeculativeJIT::compileGetByValOnString(Node& node)
+{
+ SpeculateCellOperand base(this, node.child1());
+ SpeculateStrictInt32Operand property(this, node.child2());
+ StorageOperand storage(this, node.child3());
+ GPRReg baseReg = base.gpr();
+ GPRReg propertyReg = property.gpr();
+ GPRReg storageReg = storage.gpr();
+
+ if (!isStringPrediction(m_state.forNode(node.child1()).m_type)) {
+ ASSERT(!(at(node.child1()).prediction() & PredictString));
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ noResult(m_compileIndex);
+ return;
+ }
+
+ // unsigned comparison so we can filter out negative indices and indices that are too large
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(baseReg, JSString::offsetOfLength())));
+
+ GPRTemporary scratch(this);
+ GPRReg scratchReg = scratch.gpr();
+
+ m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
+
+ // Load the character into scratchReg
+ JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
+
+ m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
+ JITCompiler::Jump cont8Bit = m_jit.jump();
+
+ is16Bit.link(&m_jit);
+
+ m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
+
+ // We only support ascii characters
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100)));
+
+ // 8 bit string values don't need the isASCII check.
+ cont8Bit.link(&m_jit);
+
+ GPRTemporary smallStrings(this);
+ GPRReg smallStringsReg = smallStrings.gpr();
+ m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.globalData()->smallStrings.singleCharacterStrings()), smallStringsReg);
+ m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, scratchReg, MacroAssembler::ScalePtr, 0), scratchReg);
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
+ cellResult(scratchReg, m_compileIndex);
+}
+
+void SpeculativeJIT::compileValueToInt32(Node& node)
+{
+ if (at(node.child1()).shouldNotSpeculateInteger()) {
+ if (at(node.child1()).shouldSpeculateDouble()) {
+ SpeculateDoubleOperand op1(this, node.child1());
+ GPRTemporary result(this);
+ FPRReg fpr = op1.fpr();
+ GPRReg gpr = result.gpr();
+ JITCompiler::Jump truncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateSuccessful);
+
+ silentSpillAllRegisters(gpr);
+ callOperation(toInt32, gpr, fpr);
+ silentFillAllRegisters(gpr);
+
+ truncatedToInteger.link(&m_jit);
+ integerResult(gpr, m_compileIndex);
+ return;
+ }
+ // Do it the safe way.
+ nonSpeculativeValueToInt32(node);
+ return;
+ }
+
+ SpeculateIntegerOperand op1(this, node.child1());
+ GPRTemporary result(this, op1);
+ m_jit.move(op1.gpr(), result.gpr());
+ integerResult(result.gpr(), m_compileIndex, op1.format());
+}
+
+void SpeculativeJIT::compileUInt32ToNumber(Node& node)
+{
+ if (!nodeCanSpeculateInteger(node.arithNodeFlags())) {
+ // We know that this sometimes produces doubles. So produce a double every
+ // time. This at least allows subsequent code to not have weird conditionals.
+
+ IntegerOperand op1(this, node.child1());
+ FPRTemporary result(this);
+
+ GPRReg inputGPR = op1.gpr();
+ FPRReg outputFPR = result.fpr();
+
+ m_jit.convertInt32ToDouble(inputGPR, outputFPR);
+
+ JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
+ m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
+ positive.link(&m_jit);
+
+ doubleResult(outputFPR, m_compileIndex);
+ return;
+ }
+
+ IntegerOperand op1(this, node.child1());
+ GPRTemporary result(this, op1);
+
+ // Test the operand is positive. This is a very special speculation check - we actually
+ // use roll-forward speculation here, where if this fails, we jump to the baseline
+ // instruction that follows us, rather than the one we're executing right now. We have
+ // to do this because by this point, the original values necessary to compile whatever
+ // operation the UInt32ToNumber originated from might be dead.
+ speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::LessThan, op1.gpr(), TrustedImm32(0)));
+
+ // Verify that we can do roll forward.
+ ASSERT(at(m_compileIndex + 1).op == SetLocal);
+ ASSERT(at(m_compileIndex + 1).codeOrigin == node.codeOrigin);
+ ASSERT(at(m_compileIndex + 2).codeOrigin != node.codeOrigin);
+
+ // Now do the magic.
+ OSRExit& exit = m_jit.codeBlock()->lastOSRExit();
+ Node& setLocal = at(m_compileIndex + 1);
+ exit.m_codeOrigin = at(m_compileIndex + 2).codeOrigin;
+ exit.m_lastSetOperand = setLocal.local();
+
+ // Create the value recovery, and stuff it into the right place.
+ exit.valueRecoveryForOperand(setLocal.local()) = ValueRecovery::uint32InGPR(op1.gpr());
+
+ m_jit.move(op1.gpr(), result.gpr());
+ integerResult(result.gpr(), m_compileIndex, op1.format());
+}
+
+static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
+{
+ // Unordered compare so we pick up NaN
+ static const double zero = 0;
+ static const double byteMax = 255;
+ static const double half = 0.5;
+ jit.loadDouble(&zero, scratch);
+ MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
+ jit.loadDouble(&byteMax, scratch);
+ MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
+
+ jit.loadDouble(&half, scratch);
+ // FIXME: This should probably just use a floating point round!
+ // https://bugs.webkit.org/show_bug.cgi?id=72054
+ jit.addDouble(source, scratch);
+ jit.truncateDoubleToInt32(scratch, result);
+ MacroAssembler::Jump truncatedInt = jit.jump();
+
+ tooSmall.link(&jit);
+ jit.xorPtr(result, result);
+ MacroAssembler::Jump zeroed = jit.jump();
+
+ tooBig.link(&jit);
+ jit.move(JITCompiler::TrustedImm32(255), result);
+
+ truncatedInt.link(&jit);
+ zeroed.link(&jit);
+
+}
+
+void SpeculativeJIT::compilePutByValForByteArray(GPRReg base, GPRReg property, Node& node)
+{
+ NodeIndex baseIndex = node.child1();
+ NodeIndex valueIndex = node.child3();
+
+ if (!isByteArrayPrediction(m_state.forNode(baseIndex).m_type))
+ speculationCheck(BadType, JSValueSource::unboxedCell(base), baseIndex, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(base, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSByteArray::s_info)));
+ GPRTemporary value;
+ GPRReg valueGPR;
+
+ if (at(valueIndex).isConstant()) {
+ JSValue jsValue = valueOfJSConstant(valueIndex);
+ if (!jsValue.isNumber()) {
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ noResult(m_compileIndex);
+ return;
+ }
+ double d = jsValue.asNumber();
+ d += 0.5;
+ if (!(d > 0))
+ d = 0;
+ else if (d > 255)
+ d = 255;
+ GPRTemporary scratch(this);
+ GPRReg scratchReg = scratch.gpr();
+ m_jit.move(Imm32((int)d), scratchReg);
+ value.adopt(scratch);
+ valueGPR = scratchReg;
+ } else if (!at(valueIndex).shouldNotSpeculateInteger()) {
+ SpeculateIntegerOperand valueOp(this, valueIndex);
+ GPRTemporary scratch(this);
+ GPRReg scratchReg = scratch.gpr();
+ m_jit.move(valueOp.gpr(), scratchReg);
+ MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::BelowOrEqual, scratchReg, TrustedImm32(0xff));
+ MacroAssembler::Jump tooBig = m_jit.branch32(MacroAssembler::GreaterThan, scratchReg, TrustedImm32(0xff));
+ m_jit.xorPtr(scratchReg, scratchReg);
+ MacroAssembler::Jump clamped = m_jit.jump();
+ tooBig.link(&m_jit);
+ m_jit.move(TrustedImm32(255), scratchReg);
+ clamped.link(&m_jit);
+ inBounds.link(&m_jit);
+ value.adopt(scratch);
+ valueGPR = scratchReg;
+ } else {
+ SpeculateDoubleOperand valueOp(this, valueIndex);
+ GPRTemporary result(this);
+ FPRTemporary floatScratch(this);
+ FPRReg fpr = valueOp.fpr();
+ GPRReg gpr = result.gpr();
+ compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
+ value.adopt(result);
+ valueGPR = gpr;
+ }
+ ASSERT_UNUSED(valueGPR, valueGPR != property);
+ ASSERT(valueGPR != base);
+ GPRTemporary storage(this);
+ GPRReg storageReg = storage.gpr();
+ ASSERT(valueGPR != storageReg);
+ m_jit.loadPtr(MacroAssembler::Address(base, JSByteArray::offsetOfStorage()), storageReg);
+ MacroAssembler::Jump outOfBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, property, MacroAssembler::Address(storageReg, ByteArray::offsetOfSize()));
+ m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne, ByteArray::offsetOfData()));
+ outOfBounds.link(&m_jit);
+ noResult(m_compileIndex);
+}
+
+void SpeculativeJIT::compileGetByValOnByteArray(Node& node)
+{
+ SpeculateCellOperand base(this, node.child1());
+ SpeculateStrictInt32Operand property(this, node.child2());
+
+ GPRReg baseReg = base.gpr();
+ GPRReg propertyReg = property.gpr();
+
+ if (!isByteArrayPrediction(m_state.forNode(node.child1()).m_type)) {
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ noResult(m_compileIndex);
+ return;
+ }
+
+ // Load the character into scratchReg
+ GPRTemporary storage(this);
+ GPRReg storageReg = storage.gpr();
+ m_jit.loadPtr(MacroAssembler::Address(baseReg, JSByteArray::offsetOfStorage()), storageReg);
+
+ // unsigned comparison so we can filter out negative indices and indices that are too large
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ByteArray::offsetOfSize())));
+
+ m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, ByteArray::offsetOfData()), storageReg);
+ integerResult(storageReg, m_compileIndex);
+}
+
+void SpeculativeJIT::compileGetTypedArrayLength(const TypedArrayDescriptor& descriptor, Node& node, bool needsSpeculationCheck)
+{
+ SpeculateCellOperand base(this, node.child1());
+ GPRTemporary result(this);
+
+ GPRReg baseGPR = base.gpr();
+ GPRReg resultGPR = result.gpr();
+
+ if (needsSpeculationCheck)
+ speculationCheck(BadType, JSValueSource::unboxedCell(baseGPR), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(descriptor.m_classInfo)));
+
+ m_jit.load32(MacroAssembler::Address(baseGPR, descriptor.m_lengthOffset), resultGPR);
+
+ integerResult(resultGPR, m_compileIndex);
+}
+
+void SpeculativeJIT::compileGetByValOnIntTypedArray(const TypedArrayDescriptor& descriptor, Node& node, size_t elementSize, TypedArraySpeculationRequirements speculationRequirements, TypedArraySignedness signedness)
+{
+ SpeculateCellOperand base(this, node.child1());
+ SpeculateStrictInt32Operand property(this, node.child2());
+ StorageOperand storage(this, node.child3());
+
+ GPRReg baseReg = base.gpr();
+ GPRReg propertyReg = property.gpr();
+ GPRReg storageReg = storage.gpr();
+
+ GPRTemporary result(this);
+ GPRReg resultReg = result.gpr();
+
+ if (speculationRequirements != NoTypedArrayTypeSpecCheck) {
+ ASSERT_NOT_REACHED();
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ noResult(m_compileIndex);
+ return;
+ }
+
+ MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(baseReg, descriptor.m_lengthOffset));
+ m_jit.xorPtr(resultReg, resultReg);
+ MacroAssembler::Jump outOfBounds = m_jit.jump();
+ inBounds.link(&m_jit);
+ switch (elementSize) {
+ case 1:
+ if (signedness == SignedTypedArray)
+ m_jit.load8Signed(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
+ else
+ m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
+ break;
+ case 2:
+ if (signedness == SignedTypedArray)
+ m_jit.load16Signed(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
+ else
+ m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
+ break;
+ case 4:
+ m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
+ break;
+ default:
+ ASSERT_NOT_REACHED();
+ }
+ outOfBounds.link(&m_jit);
+ if (elementSize < 4 || signedness == SignedTypedArray)
+ integerResult(resultReg, m_compileIndex);
+ else {
+ FPRTemporary fresult(this);
+ m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
+ JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
+ m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
+ positive.link(&m_jit);
+ doubleResult(fresult.fpr(), m_compileIndex);
+ }
+}
+
+void SpeculativeJIT::compilePutByValForIntTypedArray(const TypedArrayDescriptor& descriptor, GPRReg base, GPRReg property, Node& node, size_t elementSize, TypedArraySpeculationRequirements speculationRequirements, TypedArraySignedness signedness)
+{
+ NodeIndex baseIndex = node.child1();
+ NodeIndex valueIndex = node.child3();
+
+ if (speculationRequirements != NoTypedArrayTypeSpecCheck)
+ speculationCheck(BadType, JSValueSource::unboxedCell(base), baseIndex, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(base, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(descriptor.m_classInfo)));
+ GPRTemporary value;
+ GPRReg valueGPR;
+
+ if (at(valueIndex).isConstant()) {
+ JSValue jsValue = valueOfJSConstant(valueIndex);
+ if (!jsValue.isNumber()) {
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ noResult(m_compileIndex);
+ return;
+ }
+ double d = jsValue.asNumber();
+ GPRTemporary scratch(this);
+ GPRReg scratchReg = scratch.gpr();
+ m_jit.move(Imm32((int)d), scratchReg);
+ value.adopt(scratch);
+ valueGPR = scratchReg;
+ } else if (!at(valueIndex).shouldNotSpeculateInteger()) {
+ SpeculateIntegerOperand valueOp(this, valueIndex);
+ GPRTemporary scratch(this);
+ GPRReg scratchReg = scratch.gpr();
+ m_jit.move(valueOp.gpr(), scratchReg);
+ value.adopt(scratch);
+ valueGPR = scratchReg;
+ } else {
+ SpeculateDoubleOperand valueOp(this, valueIndex);
+ GPRTemporary result(this);
+ FPRReg fpr = valueOp.fpr();
+ GPRReg gpr = result.gpr();
+ MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
+ m_jit.xorPtr(gpr, gpr);
+ MacroAssembler::Jump fixed = m_jit.jump();
+ notNaN.link(&m_jit);
+
+ MacroAssembler::Jump done;
+ if (signedness == SignedTypedArray)
+ done = m_jit.branchTruncateDoubleToInt32(fpr, gpr, MacroAssembler::BranchIfTruncateSuccessful);
+ else
+ done = m_jit.branchTruncateDoubleToUint32(fpr, gpr, MacroAssembler::BranchIfTruncateSuccessful);
+
+ silentSpillAllRegisters(gpr);
+ callOperation(toInt32, gpr, fpr);
+ silentFillAllRegisters(gpr);
+
+ done.link(&m_jit);
+ fixed.link(&m_jit);
+ value.adopt(result);
+ valueGPR = gpr;
+ }
+ ASSERT_UNUSED(valueGPR, valueGPR != property);
+ ASSERT(valueGPR != base);
+ GPRTemporary storage(this);
+ GPRReg storageReg = storage.gpr();
+ ASSERT(valueGPR != storageReg);
+ m_jit.loadPtr(MacroAssembler::Address(base, descriptor.m_storageOffset), storageReg);
+ MacroAssembler::Jump outOfBounds;
+ if (speculationRequirements != NoTypedArraySpecCheck)
+ outOfBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, property, MacroAssembler::Address(base, descriptor.m_lengthOffset));
+
+ switch (elementSize) {
+ case 1:
+ m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
+ break;
+ case 2:
+ m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
+ break;
+ case 4:
+ m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
+ break;
+ default:
+ ASSERT_NOT_REACHED();
+ }
+ if (speculationRequirements != NoTypedArraySpecCheck)
+ outOfBounds.link(&m_jit);
+ noResult(m_compileIndex);
+}
+
+void SpeculativeJIT::compileGetByValOnFloatTypedArray(const TypedArrayDescriptor& descriptor, Node& node, size_t elementSize, TypedArraySpeculationRequirements speculationRequirements)
+{
+ SpeculateCellOperand base(this, node.child1());
+ SpeculateStrictInt32Operand property(this, node.child2());
+ StorageOperand storage(this, node.child3());
+
+ GPRReg baseReg = base.gpr();
+ GPRReg propertyReg = property.gpr();
+ GPRReg storageReg = storage.gpr();
+
+ if (speculationRequirements != NoTypedArrayTypeSpecCheck) {
+ ASSERT_NOT_REACHED();
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ noResult(m_compileIndex);
+ return;
+ }
+
+ FPRTemporary result(this);
+ FPRReg resultReg = result.fpr();
+ ASSERT(speculationRequirements != NoTypedArraySpecCheck);
+ MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(baseReg, descriptor.m_lengthOffset));
+ static const double zero = 0;
+ m_jit.loadDouble(&zero, resultReg);
+ MacroAssembler::Jump outOfBounds = m_jit.jump();
+ inBounds.link(&m_jit);
+ switch (elementSize) {
+ case 4:
+ m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
+ m_jit.convertFloatToDouble(resultReg, resultReg);
+ break;
+ case 8: {
+ m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
+ MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, resultReg, resultReg);
+ static const double NaN = std::numeric_limits<double>::quiet_NaN();
+ m_jit.loadDouble(&NaN, resultReg);
+ notNaN.link(&m_jit);
+ break;
+ }
+ default:
+ ASSERT_NOT_REACHED();
+ }
+ outOfBounds.link(&m_jit);
+ doubleResult(resultReg, m_compileIndex);
+}
+
+void SpeculativeJIT::compilePutByValForFloatTypedArray(const TypedArrayDescriptor& descriptor, GPRReg base, GPRReg property, Node& node, size_t elementSize, TypedArraySpeculationRequirements speculationRequirements)
+{
+ NodeIndex baseIndex = node.child1();
+ NodeIndex valueIndex = node.child3();
+
+ SpeculateDoubleOperand valueOp(this, valueIndex);
+
+ if (speculationRequirements != NoTypedArrayTypeSpecCheck)
+ speculationCheck(BadType, JSValueSource::unboxedCell(base), baseIndex, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(base, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(descriptor.m_classInfo)));
+
+ GPRTemporary result(this);
+
+ GPRTemporary storage(this);
+ GPRReg storageReg = storage.gpr();
+
+ m_jit.loadPtr(MacroAssembler::Address(base, descriptor.m_storageOffset), storageReg);
+ MacroAssembler::Jump outOfBounds;
+ if (speculationRequirements != NoTypedArraySpecCheck)
+ outOfBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, property, MacroAssembler::Address(base, descriptor.m_lengthOffset));
+
+ switch (elementSize) {
+ case 4: {
+ FPRTemporary scratch(this);
+ m_jit.moveDouble(valueOp.fpr(), scratch.fpr());
+ m_jit.convertDoubleToFloat(valueOp.fpr(), scratch.fpr());
+ m_jit.storeFloat(scratch.fpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
+ break;
+ }
+ case 8:
+ m_jit.storeDouble(valueOp.fpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
+ break;
+ default:
+ ASSERT_NOT_REACHED();
+ }
+ if (speculationRequirements != NoTypedArraySpecCheck)
+ outOfBounds.link(&m_jit);
+ noResult(m_compileIndex);
+}
+
+void SpeculativeJIT::compileInstanceOfForObject(Node&, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg)
+{
+ // Check that prototype is an object.
+ m_jit.loadPtr(MacroAssembler::Address(prototypeReg, JSCell::structureOffset()), scratchReg);
+ speculationCheck(BadType, JSValueRegs(), NoNode, m_jit.branchIfNotObject(scratchReg));
+
+ // Initialize scratchReg with the value being checked.
+ m_jit.move(valueReg, scratchReg);
+
+ // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
+ MacroAssembler::Label loop(&m_jit);
+ m_jit.loadPtr(MacroAssembler::Address(scratchReg, JSCell::structureOffset()), scratchReg);
+#if USE(JSVALUE64)
+ m_jit.loadPtr(MacroAssembler::Address(scratchReg, Structure::prototypeOffset()), scratchReg);
+#else
+ m_jit.load32(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), scratchReg);
+#endif
+ MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
+#if USE(JSVALUE64)
+ m_jit.branchTestPtr(MacroAssembler::Zero, scratchReg, GPRInfo::tagMaskRegister).linkTo(loop, &m_jit);
+#else
+ m_jit.branchTest32(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
+#endif
+
+ // No match - result is false.
+#if USE(JSVALUE64)
+ m_jit.move(MacroAssembler::TrustedImmPtr(JSValue::encode(jsBoolean(false))), scratchReg);
+#else
+ m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
+#endif
+ MacroAssembler::Jump putResult = m_jit.jump();
+
+ isInstance.link(&m_jit);
+#if USE(JSVALUE64)
+ m_jit.move(MacroAssembler::TrustedImmPtr(JSValue::encode(jsBoolean(true))), scratchReg);
+#else
+ m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
+#endif
+
+ putResult.link(&m_jit);
+}
+
+void SpeculativeJIT::compileInstanceOf(Node& node)
+{
+ if (!!(at(node.child1()).prediction() & ~PredictCell) && !!(m_state.forNode(node.child1()).m_type & ~PredictCell)) {
+ // It might not be a cell. Speculate less aggressively.
+
+ JSValueOperand value(this, node.child1());
+ SpeculateCellOperand prototype(this, node.child3());
+ GPRTemporary scratch(this);
+
+ GPRReg prototypeReg = prototype.gpr();
+ GPRReg scratchReg = scratch.gpr();
+
+#if USE(JSVALUE64)
+ GPRReg valueReg = value.gpr();
+ MacroAssembler::Jump isCell = m_jit.branchTestPtr(MacroAssembler::Zero, valueReg, GPRInfo::tagMaskRegister);
+ m_jit.move(MacroAssembler::TrustedImmPtr(JSValue::encode(jsBoolean(false))), scratchReg);
+#else
+ GPRReg valueTagReg = value.tagGPR();
+ GPRReg valueReg = value.payloadGPR();
+ MacroAssembler::Jump isCell = m_jit.branch32(MacroAssembler::Equal, valueTagReg, TrustedImm32(JSValue::CellTag));
+ m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
+#endif
+
+ MacroAssembler::Jump done = m_jit.jump();
+
+ isCell.link(&m_jit);
+
+ compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg);
+
+ done.link(&m_jit);
+
+#if USE(JSVALUE64)
+ jsValueResult(scratchReg, m_compileIndex, DataFormatJSBoolean);
+#else
+ booleanResult(scratchReg, m_compileIndex);
+#endif
+ return;
+ }
+
+ SpeculateCellOperand value(this, node.child1());
+ // Base unused since we speculate default InstanceOf behaviour in CheckHasInstance.
+ SpeculateCellOperand prototype(this, node.child3());
+
+ GPRTemporary scratch(this);
+
+ GPRReg valueReg = value.gpr();
+ GPRReg prototypeReg = prototype.gpr();
+ GPRReg scratchReg = scratch.gpr();
+
+ compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg);
+
+#if USE(JSVALUE64)
+ jsValueResult(scratchReg, m_compileIndex, DataFormatJSBoolean);
+#else
+ booleanResult(scratchReg, m_compileIndex);
+#endif
+}
+
+static bool isPowerOfTwo(int32_t num)
+{
+ return num && !(num & (num - 1));
+}
+
+void SpeculativeJIT::compileSoftModulo(Node& node)
+{
+ bool shouldGeneratePowerOfTwoCheck = true;
+
+ // In the fast path, the dividend value could be the final result
+ // (in case of |dividend| < |divisor|), so we speculate it as strict int32.
+ SpeculateStrictInt32Operand op1(this, node.child1());
+ GPRReg op1Gpr = op1.gpr();
+
+ if (isInt32Constant(node.child2())) {
+ int32_t divisor = valueOfInt32Constant(node.child2());
+ if (divisor < 0)
+ divisor = -divisor;
+
+ if (isPowerOfTwo(divisor)) {
+ GPRTemporary result(this);
+ GPRReg resultGPR = result.gpr();
+ m_jit.move(op1Gpr, resultGPR);
+ JITCompiler::Jump positiveDividend = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1Gpr, TrustedImm32(0));
+ m_jit.neg32(resultGPR);
+ m_jit.and32(TrustedImm32(divisor - 1), resultGPR);
+ m_jit.neg32(resultGPR);
+ JITCompiler::Jump done = m_jit.jump();
+
+ positiveDividend.link(&m_jit);
+ m_jit.and32(TrustedImm32(divisor - 1), resultGPR);
+
+ done.link(&m_jit);
+ integerResult(resultGPR, m_compileIndex);
+ return;
+ }
+#if CPU(X86) || CPU(X86_64)
+ if (divisor) {
+ GPRTemporary eax(this, X86Registers::eax);
+ GPRTemporary edx(this, X86Registers::edx);
+ GPRTemporary scratch(this);
+ GPRReg scratchGPR = scratch.gpr();
+
+ m_jit.move(op1Gpr, eax.gpr());
+ m_jit.move(TrustedImm32(divisor), scratchGPR);
+ m_jit.assembler().cdq();
+ m_jit.assembler().idivl_r(scratchGPR);
+ integerResult(edx.gpr(), m_compileIndex);
+ return;
+ }
+#endif
+ // Fallback to non-constant case but avoid unnecessary checks.
+ shouldGeneratePowerOfTwoCheck = false;
+ }
+
+ SpeculateIntegerOperand op2(this, node.child2());
+ GPRReg op2Gpr = op2.gpr();
+
+ speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchTest32(JITCompiler::Zero, op2Gpr));
+
+#if CPU(X86) || CPU(X86_64)
+ GPRTemporary eax(this, X86Registers::eax);
+ GPRTemporary edx(this, X86Registers::edx);
+ GPRReg temp2 = InvalidGPRReg;
+ if (op2Gpr == X86Registers::eax || op2Gpr == X86Registers::edx) {
+ temp2 = allocate();
+ m_jit.move(op2Gpr, temp2);
+ op2Gpr = temp2;
+ }
+ GPRReg resultGPR = edx.gpr();
+ GPRReg scratchGPR = eax.gpr();
+#else
+ GPRTemporary result(this);
+ GPRTemporary scratch(this);
+ GPRTemporary scratch3(this);
+ GPRReg scratchGPR3 = scratch3.gpr();
+ GPRReg resultGPR = result.gpr();
+ GPRReg scratchGPR = scratch.gpr();
+#endif
+
+ GPRTemporary scratch2(this);
+ GPRReg scratchGPR2 = scratch2.gpr();
+ JITCompiler::JumpList exitBranch;
+
+ // resultGPR is to hold the ABS value of the dividend before final result is produced
+ m_jit.move(op1Gpr, resultGPR);
+ // scratchGPR2 is to hold the ABS value of the divisor
+ m_jit.move(op2Gpr, scratchGPR2);
+
+ // Check for negative result remainder
+ // According to ECMA-262, the sign of the result equals the sign of the dividend
+ JITCompiler::Jump positiveDividend = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1Gpr, TrustedImm32(0));
+ m_jit.neg32(resultGPR);
+ m_jit.move(TrustedImm32(1), scratchGPR);
+ JITCompiler::Jump saveCondition = m_jit.jump();
+
+ positiveDividend.link(&m_jit);
+ m_jit.move(TrustedImm32(0), scratchGPR);
+
+ // Save the condition for negative remainder
+ saveCondition.link(&m_jit);
+ m_jit.push(scratchGPR);
+
+ JITCompiler::Jump positiveDivisor = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op2Gpr, TrustedImm32(0));
+ m_jit.neg32(scratchGPR2);
+
+ positiveDivisor.link(&m_jit);
+ exitBranch.append(m_jit.branch32(JITCompiler::LessThan, resultGPR, scratchGPR2));
+
+ // Power of two fast case
+ if (shouldGeneratePowerOfTwoCheck) {
+ m_jit.move(scratchGPR2, scratchGPR);
+ m_jit.sub32(TrustedImm32(1), scratchGPR);
+ JITCompiler::Jump notPowerOfTwo = m_jit.branchTest32(JITCompiler::NonZero, scratchGPR, scratchGPR2);
+ m_jit.and32(scratchGPR, resultGPR);
+ exitBranch.append(m_jit.jump());
+
+ notPowerOfTwo.link(&m_jit);
+ }
+
+#if CPU(X86) || CPU(X86_64)
+ m_jit.move(resultGPR, eax.gpr());
+ m_jit.assembler().cdq();
+ m_jit.assembler().idivl_r(scratchGPR2);
+#elif CPU(ARM_THUMB2)
+ m_jit.countLeadingZeros32(scratchGPR2, scratchGPR);
+ m_jit.countLeadingZeros32(resultGPR, scratchGPR3);
+ m_jit.sub32(scratchGPR3, scratchGPR);
+
+ JITCompiler::Jump useFullTable = m_jit.branch32(JITCompiler::Equal, scratchGPR, TrustedImm32(31));
+
+ m_jit.neg32(scratchGPR);
+ m_jit.add32(TrustedImm32(31), scratchGPR);
+
+ int elementSizeByShift = -1;
+ elementSizeByShift = 3;
+ m_jit.relativeTableJump(scratchGPR, elementSizeByShift);
+
+ useFullTable.link(&m_jit);
+ // Modulo table
+ for (int i = 31; i > 0; --i) {
+ ShiftTypeAndAmount shift(SRType_LSL, i);
+ m_jit.assembler().sub_S(scratchGPR, resultGPR, scratchGPR2, shift);
+ m_jit.assembler().it(ARMv7Assembler::ConditionCS);
+ m_jit.assembler().mov(resultGPR, scratchGPR);
+ }
+
+ JITCompiler::Jump lower = m_jit.branch32(JITCompiler::Below, resultGPR, scratchGPR2);
+ m_jit.sub32(scratchGPR2, resultGPR);
+ lower.link(&m_jit);
+#endif // CPU(X86) || CPU(X86_64)
+
+ exitBranch.link(&m_jit);
+
+ // Check for negative remainder
+ m_jit.pop(scratchGPR);
+ JITCompiler::Jump positiveResult = m_jit.branch32(JITCompiler::Equal, scratchGPR, TrustedImm32(0));
+ m_jit.neg32(resultGPR);
+ positiveResult.link(&m_jit);
+
+ integerResult(resultGPR, m_compileIndex);
+
+#if CPU(X86) || CPU(X86_64)
+ if (temp2 != InvalidGPRReg)
+ unlock(temp2);
+#endif
+}
+
+void SpeculativeJIT::compileArithMul(Node& node)
+{
+ if (Node::shouldSpeculateInteger(at(node.child1()), at(node.child2())) && node.canSpeculateInteger()) {
+ SpeculateIntegerOperand op1(this, node.child1());
+ SpeculateIntegerOperand op2(this, node.child2());
+ GPRTemporary result(this);
+
+ GPRReg reg1 = op1.gpr();
+ GPRReg reg2 = op2.gpr();
+
+ // What is unfortunate is that we cannot take advantage of nodeCanTruncateInteger()
+ // here. A multiply on integers performed in the double domain and then truncated to
+ // an integer will give a different result than a multiply performed in the integer
+ // domain and then truncated, if the integer domain result would have resulted in
+ // something bigger than what a 32-bit integer can hold. JavaScript mandates that
+ // the semantics are always as if the multiply had been performed in the double
+ // domain.
+
+ speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchMul32(MacroAssembler::Overflow, reg1, reg2, result.gpr()));
+
+ // Check for negative zero, if the users of this node care about such things.
+ if (!nodeCanIgnoreNegativeZero(node.arithNodeFlags())) {
+ MacroAssembler::Jump resultNonZero = m_jit.branchTest32(MacroAssembler::NonZero, result.gpr());
+ speculationCheck(NegativeZero, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::LessThan, reg1, TrustedImm32(0)));
+ speculationCheck(NegativeZero, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::LessThan, reg2, TrustedImm32(0)));
+ resultNonZero.link(&m_jit);
+ }
+
+ integerResult(result.gpr(), m_compileIndex);
+ return;
+ }
+
+ SpeculateDoubleOperand op1(this, node.child1());
+ SpeculateDoubleOperand op2(this, node.child2());
+ FPRTemporary result(this, op1, op2);
+
+ FPRReg reg1 = op1.fpr();
+ FPRReg reg2 = op2.fpr();
+
+ m_jit.mulDouble(reg1, reg2, result.fpr());
+
+ doubleResult(result.fpr(), m_compileIndex);
+}
+
+void SpeculativeJIT::compileArithMod(Node& node)
+{
+ if (!at(node.child1()).shouldNotSpeculateInteger() && !at(node.child2()).shouldNotSpeculateInteger()
+ && node.canSpeculateInteger()) {
+ compileSoftModulo(node);
+ return;
+ }
+
+ SpeculateDoubleOperand op1(this, node.child1());
+ SpeculateDoubleOperand op2(this, node.child2());
+
+ FPRReg op1FPR = op1.fpr();
+ FPRReg op2FPR = op2.fpr();
+
+ flushRegisters();
+
+ FPRResult result(this);
+
+ callOperation(fmodAsDFGOperation, result.fpr(), op1FPR, op2FPR);
+
+ doubleResult(result.fpr(), m_compileIndex);
+}
+
+// Returns true if the compare is fused with a subsequent branch.
+bool SpeculativeJIT::compare(Node& node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_DFGOperation_EJJ operation)
+{
+ if (compilePeepHoleBranch(node, condition, doubleCondition, operation))
+ return true;
+
+ if (Node::shouldSpeculateInteger(at(node.child1()), at(node.child2())))
+ compileIntegerCompare(node, condition);
+ else if (Node::shouldSpeculateNumber(at(node.child1()), at(node.child2())))
+ compileDoubleCompare(node, doubleCondition);
+ else if (node.op == CompareEq && Node::shouldSpeculateFinalObject(at(node.child1()), at(node.child2())))
+ compileObjectEquality(node, &JSFinalObject::s_info, isFinalObjectPrediction);
+ else if (node.op == CompareEq && Node::shouldSpeculateArray(at(node.child1()), at(node.child2())))
+ compileObjectEquality(node, &JSArray::s_info, isArrayPrediction);
+ else
+ nonSpeculativeNonPeepholeCompare(node, condition, operation);
+
+ return false;
+}
+
+bool SpeculativeJIT::compileStrictEqForConstant(Node& node, NodeIndex value, JSValue constant)
+{
+ JSValueOperand op1(this, value);
+
+ NodeIndex branchNodeIndex = detectPeepHoleBranch();
+ if (branchNodeIndex != NoNode) {
+ Node& branchNode = at(branchNodeIndex);
+ BlockIndex taken = branchNode.takenBlockIndex();
+ BlockIndex notTaken = branchNode.notTakenBlockIndex();
+ MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
+
+ // The branch instruction will branch to the taken block.
+ // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
+ if (taken == (m_block + 1)) {
+ condition = MacroAssembler::NotEqual;
+ BlockIndex tmp = taken;
+ taken = notTaken;
+ notTaken = tmp;
+ }
+
+#if USE(JSVALUE64)
+ addBranch(m_jit.branchPtr(condition, op1.gpr(), MacroAssembler::TrustedImmPtr(bitwise_cast<void*>(JSValue::encode(constant)))), taken);
+#else
+ GPRReg payloadGPR = op1.payloadGPR();
+ GPRReg tagGPR = op1.tagGPR();
+ if (condition == MacroAssembler::Equal) {
+ // Drop down if not equal, go elsewhere if equal.
+ MacroAssembler::Jump notEqual = m_jit.branch32(MacroAssembler::NotEqual, tagGPR, MacroAssembler::Imm32(constant.tag()));
+ addBranch(m_jit.branch32(MacroAssembler::Equal, payloadGPR, MacroAssembler::Imm32(constant.payload())), taken);
+ notEqual.link(&m_jit);
+ } else {
+ // Drop down if equal, go elsehwere if not equal.
+ addBranch(m_jit.branch32(MacroAssembler::NotEqual, tagGPR, MacroAssembler::Imm32(constant.tag())), taken);
+ addBranch(m_jit.branch32(MacroAssembler::NotEqual, payloadGPR, MacroAssembler::Imm32(constant.payload())), taken);
+ }
+#endif
+
+ if (notTaken != (m_block + 1))
+ addBranch(m_jit.jump(), notTaken);
+
+ use(node.child1());
+ use(node.child2());
+ m_compileIndex = branchNodeIndex;
+ return true;
+ }
+
+ GPRTemporary result(this);
+
+#if USE(JSVALUE64)
+ GPRReg op1GPR = op1.gpr();
+ GPRReg resultGPR = result.gpr();
+ m_jit.move(MacroAssembler::TrustedImmPtr(bitwise_cast<void*>(ValueFalse)), resultGPR);
+ MacroAssembler::Jump notEqual = m_jit.branchPtr(MacroAssembler::NotEqual, op1GPR, MacroAssembler::TrustedImmPtr(bitwise_cast<void*>(JSValue::encode(constant))));
+ m_jit.or32(MacroAssembler::Imm32(1), resultGPR);
+ notEqual.link(&m_jit);
+ jsValueResult(resultGPR, m_compileIndex, DataFormatJSBoolean);
+#else
+ GPRReg op1PayloadGPR = op1.payloadGPR();
+ GPRReg op1TagGPR = op1.tagGPR();
+ GPRReg resultGPR = result.gpr();
+ m_jit.move(Imm32(0), resultGPR);
+ MacroAssembler::JumpList notEqual;
+ notEqual.append(m_jit.branch32(MacroAssembler::NotEqual, op1TagGPR, MacroAssembler::Imm32(constant.tag())));
+ notEqual.append(m_jit.branch32(MacroAssembler::NotEqual, op1PayloadGPR, MacroAssembler::Imm32(constant.payload())));
+ m_jit.move(Imm32(1), resultGPR);
+ notEqual.link(&m_jit);
+ booleanResult(resultGPR, m_compileIndex);
+#endif
+
+ return false;
+}
+
+bool SpeculativeJIT::compileStrictEq(Node& node)
+{
+ // 1) If either operand is a constant and that constant is not a double, integer,
+ // or string, then do a JSValue comparison.
+
+ if (isJSConstant(node.child1())) {
+ JSValue value = valueOfJSConstant(node.child1());
+ if (!value.isNumber() && !value.isString())
+ return compileStrictEqForConstant(node, node.child2(), value);
+ }
+
+ if (isJSConstant(node.child2())) {
+ JSValue value = valueOfJSConstant(node.child2());
+ if (!value.isNumber() && !value.isString())
+ return compileStrictEqForConstant(node, node.child1(), value);
+ }
+
+ // 2) If the operands are predicted integer, do an integer comparison.
+
+ if (Node::shouldSpeculateInteger(at(node.child1()), at(node.child2()))) {
+ NodeIndex branchNodeIndex = detectPeepHoleBranch();
+ if (branchNodeIndex != NoNode) {
+ compilePeepHoleIntegerBranch(node, branchNodeIndex, MacroAssembler::Equal);
+ use(node.child1());
+ use(node.child2());
+ m_compileIndex = branchNodeIndex;
+ return true;
+ }
+ compileIntegerCompare(node, MacroAssembler::Equal);
+ return false;
+ }
+
+ // 3) If the operands are predicted double, do a double comparison.
+
+ if (Node::shouldSpeculateNumber(at(node.child1()), at(node.child2()))) {
+ NodeIndex branchNodeIndex = detectPeepHoleBranch();
+ if (branchNodeIndex != NoNode) {
+ compilePeepHoleDoubleBranch(node, branchNodeIndex, MacroAssembler::DoubleEqual);
+ use(node.child1());
+ use(node.child2());
+ m_compileIndex = branchNodeIndex;
+ return true;
+ }
+ compileDoubleCompare(node, MacroAssembler::DoubleEqual);
+ return false;
+ }
+
+ // 4) If the operands are predicted final object or array, then do a final object
+ // or array comparison.
+
+ if (Node::shouldSpeculateFinalObject(at(node.child1()), at(node.child2()))) {
+ NodeIndex branchNodeIndex = detectPeepHoleBranch();
+ if (branchNodeIndex != NoNode) {
+ compilePeepHoleObjectEquality(node, branchNodeIndex, &JSFinalObject::s_info, isFinalObjectPrediction);
+ use(node.child1());
+ use(node.child2());
+ m_compileIndex = branchNodeIndex;
+ return true;
+ }
+ compileObjectEquality(node, &JSFinalObject::s_info, isFinalObjectPrediction);
+ return false;
+ }
+
+ if (Node::shouldSpeculateArray(at(node.child1()), at(node.child2()))) {
+ NodeIndex branchNodeIndex = detectPeepHoleBranch();
+ if (branchNodeIndex != NoNode) {
+ compilePeepHoleObjectEquality(node, branchNodeIndex, &JSArray::s_info, isArrayPrediction);
+ use(node.child1());
+ use(node.child2());
+ m_compileIndex = branchNodeIndex;
+ return true;
+ }
+ compileObjectEquality(node, &JSArray::s_info, isArrayPrediction);
+ return false;
+ }
+
+ // 5) Fall back to non-speculative strict equality.
+
+ return nonSpeculativeStrictEq(node);
+}
+
+void SpeculativeJIT::compileGetIndexedPropertyStorage(Node& node)
+{
+ if (!node.prediction() || !at(node.child1()).prediction() || !at(node.child2()).prediction()) {
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ return;
+ }
+
+ SpeculateCellOperand base(this, node.child1());
+ GPRReg baseReg = base.gpr();
+
+ PredictedType basePrediction = at(node.child2()).prediction();
+ if (!(basePrediction & PredictInt32) && basePrediction) {
+ ASSERT_NOT_REACHED();
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ noResult(m_compileIndex);
+ return;
+ }
+
+ GPRTemporary storage(this);
+ GPRReg storageReg = storage.gpr();
+ if (at(node.child1()).prediction() == PredictString) {
+ if (!isStringPrediction(m_state.forNode(node.child1()).m_type))
+ speculationCheck(BadType, JSValueSource::unboxedCell(baseReg), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseReg, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSString::s_info)));
+
+ m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), storageReg);
+
+ // Speculate that we're not accessing a rope
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchTest32(MacroAssembler::Zero, storageReg));
+
+ m_jit.loadPtr(MacroAssembler::Address(storageReg, StringImpl::dataOffset()), storageReg);
+ } else if (at(node.child1()).shouldSpeculateByteArray()) {
+ if (!isByteArrayPrediction(m_state.forNode(node.child1()).m_type))
+ speculationCheck(BadType, JSValueSource::unboxedCell(baseReg), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseReg, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSByteArray::s_info)));
+ m_jit.loadPtr(MacroAssembler::Address(baseReg, JSByteArray::offsetOfStorage()), storageReg);
+ } else if (at(node.child1()).shouldSpeculateInt8Array()) {
+ const TypedArrayDescriptor& descriptor = m_jit.globalData()->int8ArrayDescriptor();
+ if (!isInt8ArrayPrediction(m_state.forNode(node.child1()).m_type))
+ speculationCheck(BadType, JSValueSource::unboxedCell(baseReg), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseReg, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(descriptor.m_classInfo)));
+ m_jit.loadPtr(MacroAssembler::Address(baseReg, descriptor.m_storageOffset), storageReg);
+ } else if (at(node.child1()).shouldSpeculateInt16Array()) {
+ const TypedArrayDescriptor& descriptor = m_jit.globalData()->int16ArrayDescriptor();
+ if (!isInt16ArrayPrediction(m_state.forNode(node.child1()).m_type))
+ speculationCheck(BadType, JSValueSource::unboxedCell(baseReg), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseReg, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(descriptor.m_classInfo)));
+ m_jit.loadPtr(MacroAssembler::Address(baseReg, descriptor.m_storageOffset), storageReg);
+ } else if (at(node.child1()).shouldSpeculateInt32Array()) {
+ const TypedArrayDescriptor& descriptor = m_jit.globalData()->int32ArrayDescriptor();
+ if (!isInt32ArrayPrediction(m_state.forNode(node.child1()).m_type))
+ speculationCheck(BadType, JSValueSource::unboxedCell(baseReg), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseReg, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(descriptor.m_classInfo)));
+ m_jit.loadPtr(MacroAssembler::Address(baseReg, descriptor.m_storageOffset), storageReg);
+ } else if (at(node.child1()).shouldSpeculateUint8Array()) {
+ const TypedArrayDescriptor& descriptor = m_jit.globalData()->uint8ArrayDescriptor();
+ if (!isUint8ArrayPrediction(m_state.forNode(node.child1()).m_type))
+ speculationCheck(BadType, JSValueSource::unboxedCell(baseReg), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseReg, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(descriptor.m_classInfo)));
+ m_jit.loadPtr(MacroAssembler::Address(baseReg, descriptor.m_storageOffset), storageReg);
+ } else if (at(node.child1()).shouldSpeculateUint16Array()) {
+ const TypedArrayDescriptor& descriptor = m_jit.globalData()->uint16ArrayDescriptor();
+ if (!isUint16ArrayPrediction(m_state.forNode(node.child1()).m_type))
+ speculationCheck(BadType, JSValueSource::unboxedCell(baseReg), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseReg, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(descriptor.m_classInfo)));
+ m_jit.loadPtr(MacroAssembler::Address(baseReg, descriptor.m_storageOffset), storageReg);
+ } else if (at(node.child1()).shouldSpeculateUint32Array()) {
+ const TypedArrayDescriptor& descriptor = m_jit.globalData()->uint32ArrayDescriptor();
+ if (!isUint32ArrayPrediction(m_state.forNode(node.child1()).m_type))
+ speculationCheck(BadType, JSValueSource::unboxedCell(baseReg), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseReg, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(descriptor.m_classInfo)));
+ m_jit.loadPtr(MacroAssembler::Address(baseReg, descriptor.m_storageOffset), storageReg);
+ } else if (at(node.child1()).shouldSpeculateFloat32Array()) {
+ const TypedArrayDescriptor& descriptor = m_jit.globalData()->float32ArrayDescriptor();
+ if (!isFloat32ArrayPrediction(m_state.forNode(node.child1()).m_type))
+ speculationCheck(BadType, JSValueSource::unboxedCell(baseReg), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseReg, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(descriptor.m_classInfo)));
+ m_jit.loadPtr(MacroAssembler::Address(baseReg, descriptor.m_storageOffset), storageReg);
+ } else if (at(node.child1()).shouldSpeculateFloat64Array()) {
+ const TypedArrayDescriptor& descriptor = m_jit.globalData()->float64ArrayDescriptor();
+ if (!isFloat64ArrayPrediction(m_state.forNode(node.child1()).m_type))
+ speculationCheck(BadType, JSValueSource::unboxedCell(baseReg), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseReg, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(descriptor.m_classInfo)));
+ m_jit.loadPtr(MacroAssembler::Address(baseReg, descriptor.m_storageOffset), storageReg);
+ } else {
+ if (!isArrayPrediction(m_state.forNode(node.child1()).m_type))
+ speculationCheck(BadType, JSValueSource::unboxedCell(baseReg), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseReg, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info)));
+ m_jit.loadPtr(MacroAssembler::Address(baseReg, JSArray::storageOffset()), storageReg);
+ }
+ storageResult(storageReg, m_compileIndex);
+}
+
+} } // namespace JSC::DFG
+
+#endif
diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h
new file mode 100644
index 000000000..da48d3a2c
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h
@@ -0,0 +1,2758 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGSpeculativeJIT_h
+#define DFGSpeculativeJIT_h
+
+#if ENABLE(DFG_JIT)
+
+#include "DFGAbstractState.h"
+#include "DFGGenerationInfo.h"
+#include "DFGJITCompiler.h"
+#include "DFGOSRExit.h"
+#include "DFGOperations.h"
+#include "ValueRecovery.h"
+
+namespace JSC { namespace DFG {
+
+class JSValueOperand;
+class SpeculativeJIT;
+class SpeculateIntegerOperand;
+class SpeculateStrictInt32Operand;
+class SpeculateDoubleOperand;
+class SpeculateCellOperand;
+class SpeculateBooleanOperand;
+
+
+enum ValueSourceKind {
+ SourceNotSet,
+ ValueInRegisterFile,
+ Int32InRegisterFile,
+ CellInRegisterFile,
+ BooleanInRegisterFile,
+ DoubleInRegisterFile,
+ HaveNode
+};
+
+class ValueSource {
+public:
+ ValueSource()
+ : m_nodeIndex(nodeIndexFromKind(SourceNotSet))
+ {
+ }
+
+ explicit ValueSource(ValueSourceKind valueSourceKind)
+ : m_nodeIndex(nodeIndexFromKind(valueSourceKind))
+ {
+ ASSERT(kind() != SourceNotSet);
+ ASSERT(kind() != HaveNode);
+ }
+
+ explicit ValueSource(NodeIndex nodeIndex)
+ : m_nodeIndex(nodeIndex)
+ {
+ ASSERT(kind() == HaveNode);
+ }
+
+ static ValueSource forPrediction(PredictedType prediction)
+ {
+ if (isInt32Prediction(prediction))
+ return ValueSource(Int32InRegisterFile);
+ if (isArrayPrediction(prediction) || isByteArrayPrediction(prediction))
+ return ValueSource(CellInRegisterFile);
+ if (isBooleanPrediction(prediction))
+ return ValueSource(BooleanInRegisterFile);
+ return ValueSource(ValueInRegisterFile);
+ }
+
+ bool isSet() const
+ {
+ return kindFromNodeIndex(m_nodeIndex) != SourceNotSet;
+ }
+
+ ValueSourceKind kind() const
+ {
+ return kindFromNodeIndex(m_nodeIndex);
+ }
+
+ NodeIndex nodeIndex() const
+ {
+ ASSERT(kind() == HaveNode);
+ return m_nodeIndex;
+ }
+
+#ifndef NDEBUG
+ void dump(FILE* out) const;
+#endif
+
+private:
+ static NodeIndex nodeIndexFromKind(ValueSourceKind kind)
+ {
+ ASSERT(kind >= SourceNotSet && kind < HaveNode);
+ return NoNode - kind;
+ }
+
+ static ValueSourceKind kindFromNodeIndex(NodeIndex nodeIndex)
+ {
+ unsigned kind = static_cast<unsigned>(NoNode - nodeIndex);
+ if (kind >= static_cast<unsigned>(HaveNode))
+ return HaveNode;
+ return static_cast<ValueSourceKind>(kind);
+ }
+
+ NodeIndex m_nodeIndex;
+};
+
+// === SpeculativeJIT ===
+//
+// The SpeculativeJIT is used to generate a fast, but potentially
+// incomplete code path for the dataflow. When code generating
+// we may make assumptions about operand types, dynamically check,
+// and bail-out to an alternate code path if these checks fail.
+// Importantly, the speculative code path cannot be reentered once
+// a speculative check has failed. This allows the SpeculativeJIT
+// to propagate type information (including information that has
+// only speculatively been asserted) through the dataflow.
+class SpeculativeJIT {
+ friend struct OSRExit;
+private:
+ typedef JITCompiler::TrustedImm32 TrustedImm32;
+ typedef JITCompiler::Imm32 Imm32;
+ typedef JITCompiler::TrustedImmPtr TrustedImmPtr;
+ typedef JITCompiler::ImmPtr ImmPtr;
+
+ // These constants are used to set priorities for spill order for
+ // the register allocator.
+#if USE(JSVALUE64)
+ enum SpillOrder {
+ SpillOrderConstant = 1, // no spill, and cheap fill
+ SpillOrderSpilled = 2, // no spill
+ SpillOrderJS = 4, // needs spill
+ SpillOrderCell = 4, // needs spill
+ SpillOrderStorage = 4, // needs spill
+ SpillOrderInteger = 5, // needs spill and box
+ SpillOrderBoolean = 5, // needs spill and box
+ SpillOrderDouble = 6, // needs spill and convert
+ };
+#elif USE(JSVALUE32_64)
+ enum SpillOrder {
+ SpillOrderConstant = 1, // no spill, and cheap fill
+ SpillOrderSpilled = 2, // no spill
+ SpillOrderJS = 4, // needs spill
+ SpillOrderStorage = 4, // needs spill
+ SpillOrderDouble = 4, // needs spill
+ SpillOrderInteger = 5, // needs spill and box
+ SpillOrderCell = 5, // needs spill and box
+ SpillOrderBoolean = 5, // needs spill and box
+ };
+#endif
+
+ enum UseChildrenMode { CallUseChildren, UseChildrenCalledExplicitly };
+
+public:
+ SpeculativeJIT(JITCompiler&);
+
+ bool compile();
+ void linkOSREntries(LinkBuffer&);
+
+ Node& at(NodeIndex nodeIndex)
+ {
+ return m_jit.graph()[nodeIndex];
+ }
+
+ GPRReg fillInteger(NodeIndex, DataFormat& returnFormat);
+ FPRReg fillDouble(NodeIndex);
+#if USE(JSVALUE64)
+ GPRReg fillJSValue(NodeIndex);
+#elif USE(JSVALUE32_64)
+ bool fillJSValue(NodeIndex, GPRReg&, GPRReg&, FPRReg&);
+#endif
+ GPRReg fillStorage(NodeIndex);
+
+ // lock and unlock GPR & FPR registers.
+ void lock(GPRReg reg)
+ {
+ m_gprs.lock(reg);
+ }
+ void lock(FPRReg reg)
+ {
+ m_fprs.lock(reg);
+ }
+ void unlock(GPRReg reg)
+ {
+ m_gprs.unlock(reg);
+ }
+ void unlock(FPRReg reg)
+ {
+ m_fprs.unlock(reg);
+ }
+
+ // Used to check whether a child node is on its last use,
+ // and its machine registers may be reused.
+ bool canReuse(NodeIndex nodeIndex)
+ {
+ VirtualRegister virtualRegister = at(nodeIndex).virtualRegister();
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+ return info.canReuse();
+ }
+ GPRReg reuse(GPRReg reg)
+ {
+ m_gprs.lock(reg);
+ return reg;
+ }
+ FPRReg reuse(FPRReg reg)
+ {
+ m_fprs.lock(reg);
+ return reg;
+ }
+
+ // Allocate a gpr/fpr.
+ GPRReg allocate()
+ {
+ VirtualRegister spillMe;
+ GPRReg gpr = m_gprs.allocate(spillMe);
+ if (spillMe != InvalidVirtualRegister) {
+#if USE(JSVALUE32_64)
+ GenerationInfo& info = m_generationInfo[spillMe];
+ ASSERT(info.registerFormat() != DataFormatJSDouble);
+ if ((info.registerFormat() & DataFormatJS))
+ m_gprs.release(info.tagGPR() == gpr ? info.payloadGPR() : info.tagGPR());
+#endif
+ spill(spillMe);
+ }
+ return gpr;
+ }
+ GPRReg allocate(GPRReg specific)
+ {
+ VirtualRegister spillMe = m_gprs.allocateSpecific(specific);
+ if (spillMe != InvalidVirtualRegister) {
+#if USE(JSVALUE32_64)
+ GenerationInfo& info = m_generationInfo[spillMe];
+ ASSERT(info.registerFormat() != DataFormatJSDouble);
+ if ((info.registerFormat() & DataFormatJS))
+ m_gprs.release(info.tagGPR() == specific ? info.payloadGPR() : info.tagGPR());
+#endif
+ spill(spillMe);
+ }
+ return specific;
+ }
+ GPRReg tryAllocate()
+ {
+ return m_gprs.tryAllocate();
+ }
+ FPRReg fprAllocate()
+ {
+ VirtualRegister spillMe;
+ FPRReg fpr = m_fprs.allocate(spillMe);
+ if (spillMe != InvalidVirtualRegister)
+ spill(spillMe);
+ return fpr;
+ }
+
+ // Check whether a VirtualRegsiter is currently in a machine register.
+ // We use this when filling operands to fill those that are already in
+ // machine registers first (by locking VirtualRegsiters that are already
+ // in machine register before filling those that are not we attempt to
+ // avoid spilling values we will need immediately).
+ bool isFilled(NodeIndex nodeIndex)
+ {
+ VirtualRegister virtualRegister = at(nodeIndex).virtualRegister();
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+ return info.registerFormat() != DataFormatNone;
+ }
+ bool isFilledDouble(NodeIndex nodeIndex)
+ {
+ VirtualRegister virtualRegister = at(nodeIndex).virtualRegister();
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+ return info.registerFormat() == DataFormatDouble;
+ }
+
+ // Called on an operand once it has been consumed by a parent node.
+ void use(NodeIndex nodeIndex)
+ {
+ VirtualRegister virtualRegister = at(nodeIndex).virtualRegister();
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+
+ // use() returns true when the value becomes dead, and any
+ // associated resources may be freed.
+ if (!info.use())
+ return;
+
+ // Release the associated machine registers.
+ DataFormat registerFormat = info.registerFormat();
+#if USE(JSVALUE64)
+ if (registerFormat == DataFormatDouble)
+ m_fprs.release(info.fpr());
+ else if (registerFormat != DataFormatNone)
+ m_gprs.release(info.gpr());
+#elif USE(JSVALUE32_64)
+ if (registerFormat == DataFormatDouble || registerFormat == DataFormatJSDouble)
+ m_fprs.release(info.fpr());
+ else if (registerFormat & DataFormatJS) {
+ m_gprs.release(info.tagGPR());
+ m_gprs.release(info.payloadGPR());
+ } else if (registerFormat != DataFormatNone)
+ m_gprs.release(info.gpr());
+#endif
+ }
+
+ static void markCellCard(MacroAssembler&, GPRReg ownerGPR, GPRReg scratchGPR1, GPRReg scratchGPR2);
+ static void writeBarrier(MacroAssembler&, GPRReg ownerGPR, GPRReg scratchGPR1, GPRReg scratchGPR2, WriteBarrierUseKind);
+
+ void writeBarrier(GPRReg ownerGPR, GPRReg valueGPR, NodeIndex valueIndex, WriteBarrierUseKind, GPRReg scratchGPR1 = InvalidGPRReg, GPRReg scratchGPR2 = InvalidGPRReg);
+ void writeBarrier(GPRReg ownerGPR, JSCell* value, WriteBarrierUseKind, GPRReg scratchGPR1 = InvalidGPRReg, GPRReg scratchGPR2 = InvalidGPRReg);
+ void writeBarrier(JSCell* owner, GPRReg valueGPR, NodeIndex valueIndex, WriteBarrierUseKind, GPRReg scratchGPR1 = InvalidGPRReg);
+
+ static GPRReg selectScratchGPR(GPRReg preserve1 = InvalidGPRReg, GPRReg preserve2 = InvalidGPRReg, GPRReg preserve3 = InvalidGPRReg)
+ {
+ if (preserve1 != GPRInfo::regT0 && preserve2 != GPRInfo::regT0 && preserve3 != GPRInfo::regT0)
+ return GPRInfo::regT0;
+
+ if (preserve1 != GPRInfo::regT1 && preserve2 != GPRInfo::regT1 && preserve3 != GPRInfo::regT1)
+ return GPRInfo::regT1;
+
+ if (preserve1 != GPRInfo::regT2 && preserve2 != GPRInfo::regT2 && preserve3 != GPRInfo::regT2)
+ return GPRInfo::regT2;
+
+ return GPRInfo::regT3;
+ }
+
+ // Called by the speculative operand types, below, to fill operand to
+ // machine registers, implicitly generating speculation checks as needed.
+ GPRReg fillSpeculateInt(NodeIndex, DataFormat& returnFormat);
+ GPRReg fillSpeculateIntStrict(NodeIndex);
+ FPRReg fillSpeculateDouble(NodeIndex);
+ GPRReg fillSpeculateCell(NodeIndex);
+ GPRReg fillSpeculateBoolean(NodeIndex);
+
+private:
+ void compile(Node&);
+ void compileMovHint(Node&);
+ void compile(BasicBlock&);
+
+ void checkArgumentTypes();
+
+ void clearGenerationInfo();
+
+ // These methods are used when generating 'unexpected'
+ // calls out from JIT code to C++ helper routines -
+ // they spill all live values to the appropriate
+ // slots in the RegisterFile without changing any state
+ // in the GenerationInfo.
+ void silentSpillGPR(VirtualRegister spillMe, GPRReg source)
+ {
+ GenerationInfo& info = m_generationInfo[spillMe];
+ ASSERT(info.registerFormat() != DataFormatNone);
+ ASSERT(info.registerFormat() != DataFormatDouble);
+
+ if (!info.needsSpill())
+ return;
+
+ DataFormat registerFormat = info.registerFormat();
+
+#if USE(JSVALUE64)
+ ASSERT(info.gpr() == source);
+ if (registerFormat == DataFormatInteger)
+ m_jit.store32(source, JITCompiler::addressFor(spillMe));
+ else {
+ ASSERT(registerFormat & DataFormatJS || registerFormat == DataFormatCell || registerFormat == DataFormatStorage);
+ m_jit.storePtr(source, JITCompiler::addressFor(spillMe));
+ }
+#elif USE(JSVALUE32_64)
+ if (registerFormat & DataFormatJS) {
+ ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
+ m_jit.store32(source, source == info.tagGPR() ? JITCompiler::tagFor(spillMe) : JITCompiler::payloadFor(spillMe));
+ } else {
+ ASSERT(info.gpr() == source);
+ m_jit.store32(source, JITCompiler::payloadFor(spillMe));
+ }
+#endif
+ }
+ void silentSpillFPR(VirtualRegister spillMe, FPRReg source)
+ {
+ GenerationInfo& info = m_generationInfo[spillMe];
+ ASSERT(info.registerFormat() == DataFormatDouble);
+
+ if (!info.needsSpill()) {
+ // it's either a constant or it's already been spilled
+ ASSERT(at(info.nodeIndex()).hasConstant() || info.spillFormat() != DataFormatNone);
+ return;
+ }
+
+ // it's neither a constant nor has it been spilled.
+ ASSERT(!at(info.nodeIndex()).hasConstant());
+ ASSERT(info.spillFormat() == DataFormatNone);
+ ASSERT(info.fpr() == source);
+
+ m_jit.storeDouble(source, JITCompiler::addressFor(spillMe));
+ }
+
+ void silentFillGPR(VirtualRegister spillMe, GPRReg target)
+ {
+ GenerationInfo& info = m_generationInfo[spillMe];
+
+ NodeIndex nodeIndex = info.nodeIndex();
+ Node& node = at(nodeIndex);
+ ASSERT(info.registerFormat() != DataFormatNone);
+ ASSERT(info.registerFormat() != DataFormatDouble);
+ DataFormat registerFormat = info.registerFormat();
+
+ if (registerFormat == DataFormatInteger) {
+ ASSERT(info.gpr() == target);
+ ASSERT(isJSInteger(info.registerFormat()));
+ if (node.hasConstant()) {
+ ASSERT(isInt32Constant(nodeIndex));
+ m_jit.move(Imm32(valueOfInt32Constant(nodeIndex)), target);
+ } else
+ m_jit.load32(JITCompiler::payloadFor(spillMe), target);
+ return;
+ }
+
+ if (registerFormat == DataFormatBoolean) {
+#if USE(JSVALUE64)
+ ASSERT_NOT_REACHED();
+#elif USE(JSVALUE32_64)
+ ASSERT(info.gpr() == target);
+ if (node.hasConstant()) {
+ ASSERT(isBooleanConstant(nodeIndex));
+ m_jit.move(Imm32(valueOfBooleanConstant(nodeIndex)), target);
+ } else
+ m_jit.load32(JITCompiler::payloadFor(spillMe), target);
+#endif
+ return;
+ }
+
+ if (registerFormat == DataFormatCell) {
+ ASSERT(info.gpr() == target);
+ if (node.isConstant()) {
+ JSValue value = valueOfJSConstant(nodeIndex);
+ ASSERT(value.isCell());
+ m_jit.move(ImmPtr(value.asCell()), target);
+ } else
+ m_jit.loadPtr(JITCompiler::payloadFor(spillMe), target);
+ return;
+ }
+
+ if (registerFormat == DataFormatStorage) {
+ ASSERT(info.gpr() == target);
+ m_jit.loadPtr(JITCompiler::addressFor(spillMe), target);
+ return;
+ }
+
+ ASSERT(registerFormat & DataFormatJS);
+#if USE(JSVALUE64)
+ ASSERT(info.gpr() == target);
+ if (node.hasConstant())
+ m_jit.move(valueOfJSConstantAsImmPtr(nodeIndex), target);
+ else if (info.spillFormat() == DataFormatInteger) {
+ ASSERT(registerFormat == DataFormatJSInteger);
+ m_jit.load32(JITCompiler::payloadFor(spillMe), target);
+ m_jit.orPtr(GPRInfo::tagTypeNumberRegister, target);
+ } else if (info.spillFormat() == DataFormatDouble) {
+ ASSERT(registerFormat == DataFormatJSDouble);
+ m_jit.loadPtr(JITCompiler::addressFor(spillMe), target);
+ m_jit.subPtr(GPRInfo::tagTypeNumberRegister, target);
+ } else
+ m_jit.loadPtr(JITCompiler::addressFor(spillMe), target);
+#else
+ ASSERT(info.tagGPR() == target || info.payloadGPR() == target);
+ if (node.hasConstant()) {
+ JSValue v = valueOfJSConstant(nodeIndex);
+ m_jit.move(info.tagGPR() == target ? Imm32(v.tag()) : Imm32(v.payload()), target);
+ } else if (info.payloadGPR() == target)
+ m_jit.load32(JITCompiler::payloadFor(spillMe), target);
+ else { // Fill the Tag
+ switch (info.spillFormat()) {
+ case DataFormatInteger:
+ ASSERT(registerFormat == DataFormatJSInteger);
+ m_jit.move(TrustedImm32(JSValue::Int32Tag), target);
+ break;
+ case DataFormatCell:
+ ASSERT(registerFormat == DataFormatJSCell);
+ m_jit.move(TrustedImm32(JSValue::CellTag), target);
+ break;
+ case DataFormatBoolean:
+ ASSERT(registerFormat == DataFormatJSBoolean);
+ m_jit.move(TrustedImm32(JSValue::BooleanTag), target);
+ break;
+ default:
+ m_jit.load32(JITCompiler::tagFor(spillMe), target);
+ break;
+ }
+ }
+#endif
+ }
+
+ void silentFillFPR(VirtualRegister spillMe, GPRReg canTrample, FPRReg target)
+ {
+ GenerationInfo& info = m_generationInfo[spillMe];
+ ASSERT(info.fpr() == target);
+
+ NodeIndex nodeIndex = info.nodeIndex();
+ Node& node = at(nodeIndex);
+#if USE(JSVALUE64)
+ ASSERT(info.registerFormat() == DataFormatDouble);
+
+ if (node.hasConstant()) {
+ ASSERT(isNumberConstant(nodeIndex));
+ m_jit.move(ImmPtr(bitwise_cast<void*>(valueOfNumberConstant(nodeIndex))), canTrample);
+ m_jit.movePtrToDouble(canTrample, target);
+ return;
+ }
+
+ if (info.spillFormat() != DataFormatNone && info.spillFormat() != DataFormatDouble) {
+ // it was already spilled previously and not as a double, which means we need unboxing.
+ ASSERT(info.spillFormat() & DataFormatJS);
+ m_jit.loadPtr(JITCompiler::addressFor(spillMe), canTrample);
+ unboxDouble(canTrample, target);
+ return;
+ }
+
+ m_jit.loadDouble(JITCompiler::addressFor(spillMe), target);
+#elif USE(JSVALUE32_64)
+ UNUSED_PARAM(canTrample);
+ ASSERT(info.registerFormat() == DataFormatDouble || info.registerFormat() == DataFormatJSDouble);
+ if (node.hasConstant()) {
+ ASSERT(isNumberConstant(nodeIndex));
+ m_jit.loadDouble(addressOfDoubleConstant(nodeIndex), target);
+ } else
+ m_jit.loadDouble(JITCompiler::addressFor(spillMe), target);
+#endif
+ }
+
+ void silentSpillAllRegisters(GPRReg exclude, GPRReg exclude2 = InvalidGPRReg)
+ {
+ for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
+ GPRReg gpr = iter.regID();
+ if (iter.name() != InvalidVirtualRegister && gpr != exclude && gpr != exclude2)
+ silentSpillGPR(iter.name(), gpr);
+ }
+ for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
+ if (iter.name() != InvalidVirtualRegister)
+ silentSpillFPR(iter.name(), iter.regID());
+ }
+ }
+ void silentSpillAllRegisters(FPRReg exclude)
+ {
+ for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
+ if (iter.name() != InvalidVirtualRegister)
+ silentSpillGPR(iter.name(), iter.regID());
+ }
+ for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
+ FPRReg fpr = iter.regID();
+ if (iter.name() != InvalidVirtualRegister && fpr != exclude)
+ silentSpillFPR(iter.name(), fpr);
+ }
+ }
+
+ void silentFillAllRegisters(GPRReg exclude, GPRReg exclude2 = InvalidGPRReg)
+ {
+ GPRReg canTrample = GPRInfo::regT0;
+ if (exclude == GPRInfo::regT0)
+ canTrample = GPRInfo::regT1;
+
+ for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
+ if (iter.name() != InvalidVirtualRegister)
+ silentFillFPR(iter.name(), canTrample, iter.regID());
+ }
+ for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
+ GPRReg gpr = iter.regID();
+ if (iter.name() != InvalidVirtualRegister && gpr != exclude && gpr != exclude2)
+ silentFillGPR(iter.name(), gpr);
+ }
+ }
+ void silentFillAllRegisters(FPRReg exclude)
+ {
+ GPRReg canTrample = GPRInfo::regT0;
+
+ for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
+ FPRReg fpr = iter.regID();
+ if (iter.name() != InvalidVirtualRegister && fpr != exclude)
+ silentFillFPR(iter.name(), canTrample, fpr);
+ }
+ for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
+ if (iter.name() != InvalidVirtualRegister)
+ silentFillGPR(iter.name(), iter.regID());
+ }
+ }
+
+ // These methods convert between doubles, and doubles boxed and JSValues.
+#if USE(JSVALUE64)
+ GPRReg boxDouble(FPRReg fpr, GPRReg gpr)
+ {
+ return m_jit.boxDouble(fpr, gpr);
+ }
+ FPRReg unboxDouble(GPRReg gpr, FPRReg fpr)
+ {
+ return m_jit.unboxDouble(gpr, fpr);
+ }
+ GPRReg boxDouble(FPRReg fpr)
+ {
+ return boxDouble(fpr, allocate());
+ }
+#elif USE(JSVALUE32_64)
+ void boxDouble(FPRReg fpr, GPRReg tagGPR, GPRReg payloadGPR)
+ {
+ m_jit.boxDouble(fpr, tagGPR, payloadGPR);
+ }
+ void unboxDouble(GPRReg tagGPR, GPRReg payloadGPR, FPRReg fpr, FPRReg scratchFPR)
+ {
+ m_jit.unboxDouble(tagGPR, payloadGPR, fpr, scratchFPR);
+ }
+#endif
+
+ // Spill a VirtualRegister to the RegisterFile.
+ void spill(VirtualRegister spillMe)
+ {
+ GenerationInfo& info = m_generationInfo[spillMe];
+
+#if USE(JSVALUE32_64)
+ if (info.registerFormat() == DataFormatNone) // it has been spilled. JS values which have two GPRs can reach here
+ return;
+#endif
+ // Check the GenerationInfo to see if this value need writing
+ // to the RegisterFile - if not, mark it as spilled & return.
+ if (!info.needsSpill()) {
+ info.setSpilled();
+ return;
+ }
+
+ DataFormat spillFormat = info.registerFormat();
+ switch (spillFormat) {
+ case DataFormatStorage: {
+ // This is special, since it's not a JS value - as in it's not visible to JS
+ // code.
+ m_jit.storePtr(info.gpr(), JITCompiler::addressFor(spillMe));
+ info.spill(DataFormatStorage);
+ return;
+ }
+
+ case DataFormatInteger: {
+ m_jit.store32(info.gpr(), JITCompiler::payloadFor(spillMe));
+ info.spill(DataFormatInteger);
+ return;
+ }
+
+#if USE(JSVALUE64)
+ case DataFormatDouble: {
+ m_jit.storeDouble(info.fpr(), JITCompiler::addressFor(spillMe));
+ info.spill(DataFormatDouble);
+ return;
+ }
+
+ default:
+ // The following code handles JSValues, int32s, and cells.
+ ASSERT(spillFormat == DataFormatCell || spillFormat & DataFormatJS);
+
+ GPRReg reg = info.gpr();
+ // We need to box int32 and cell values ...
+ // but on JSVALUE64 boxing a cell is a no-op!
+ if (spillFormat == DataFormatInteger)
+ m_jit.orPtr(GPRInfo::tagTypeNumberRegister, reg);
+
+ // Spill the value, and record it as spilled in its boxed form.
+ m_jit.storePtr(reg, JITCompiler::addressFor(spillMe));
+ info.spill((DataFormat)(spillFormat | DataFormatJS));
+ return;
+#elif USE(JSVALUE32_64)
+ case DataFormatCell:
+ case DataFormatBoolean: {
+ m_jit.store32(info.gpr(), JITCompiler::payloadFor(spillMe));
+ info.spill(spillFormat);
+ return;
+ }
+
+ case DataFormatDouble:
+ case DataFormatJSDouble: {
+ // On JSVALUE32_64 boxing a double is a no-op.
+ m_jit.storeDouble(info.fpr(), JITCompiler::addressFor(spillMe));
+ info.spill(DataFormatJSDouble);
+ return;
+ }
+
+ default:
+ // The following code handles JSValues.
+ ASSERT(spillFormat & DataFormatJS);
+ m_jit.store32(info.tagGPR(), JITCompiler::tagFor(spillMe));
+ m_jit.store32(info.payloadGPR(), JITCompiler::payloadFor(spillMe));
+ info.spill(spillFormat);
+ return;
+#endif
+ }
+ }
+
+ bool isStrictInt32(NodeIndex);
+
+ bool isKnownInteger(NodeIndex);
+ bool isKnownNumeric(NodeIndex);
+ bool isKnownCell(NodeIndex);
+
+ bool isKnownNotInteger(NodeIndex);
+ bool isKnownNotNumber(NodeIndex);
+
+ bool isKnownBoolean(NodeIndex);
+ bool isKnownNotBoolean(NodeIndex);
+
+ bool isKnownNotCell(NodeIndex);
+
+ // Checks/accessors for constant values.
+ bool isConstant(NodeIndex nodeIndex) { return m_jit.isConstant(nodeIndex); }
+ bool isJSConstant(NodeIndex nodeIndex) { return m_jit.isJSConstant(nodeIndex); }
+ bool isInt32Constant(NodeIndex nodeIndex) { return m_jit.isInt32Constant(nodeIndex); }
+ bool isDoubleConstant(NodeIndex nodeIndex) { return m_jit.isDoubleConstant(nodeIndex); }
+ bool isNumberConstant(NodeIndex nodeIndex) { return m_jit.isNumberConstant(nodeIndex); }
+ bool isBooleanConstant(NodeIndex nodeIndex) { return m_jit.isBooleanConstant(nodeIndex); }
+ bool isFunctionConstant(NodeIndex nodeIndex) { return m_jit.isFunctionConstant(nodeIndex); }
+ int32_t valueOfInt32Constant(NodeIndex nodeIndex) { return m_jit.valueOfInt32Constant(nodeIndex); }
+ double valueOfNumberConstant(NodeIndex nodeIndex) { return m_jit.valueOfNumberConstant(nodeIndex); }
+#if USE(JSVALUE32_64)
+ void* addressOfDoubleConstant(NodeIndex nodeIndex) { return m_jit.addressOfDoubleConstant(nodeIndex); }
+#endif
+ JSValue valueOfJSConstant(NodeIndex nodeIndex) { return m_jit.valueOfJSConstant(nodeIndex); }
+ bool valueOfBooleanConstant(NodeIndex nodeIndex) { return m_jit.valueOfBooleanConstant(nodeIndex); }
+ JSFunction* valueOfFunctionConstant(NodeIndex nodeIndex) { return m_jit.valueOfFunctionConstant(nodeIndex); }
+ bool isNullConstant(NodeIndex nodeIndex)
+ {
+ if (!isConstant(nodeIndex))
+ return false;
+ return valueOfJSConstant(nodeIndex).isNull();
+ }
+
+ Identifier* identifier(unsigned index)
+ {
+ return &m_jit.codeBlock()->identifier(index);
+ }
+
+ // Spill all VirtualRegisters back to the RegisterFile.
+ void flushRegisters()
+ {
+ for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
+ if (iter.name() != InvalidVirtualRegister) {
+ spill(iter.name());
+ iter.release();
+ }
+ }
+ for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
+ if (iter.name() != InvalidVirtualRegister) {
+ spill(iter.name());
+ iter.release();
+ }
+ }
+ }
+
+#ifndef NDEBUG
+ // Used to ASSERT flushRegisters() has been called prior to
+ // calling out from JIT code to a C helper function.
+ bool isFlushed()
+ {
+ for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
+ if (iter.name() != InvalidVirtualRegister)
+ return false;
+ }
+ for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
+ if (iter.name() != InvalidVirtualRegister)
+ return false;
+ }
+ return true;
+ }
+#endif
+
+#if USE(JSVALUE64)
+ MacroAssembler::ImmPtr valueOfJSConstantAsImmPtr(NodeIndex nodeIndex)
+ {
+ return MacroAssembler::ImmPtr(JSValue::encode(valueOfJSConstant(nodeIndex)));
+ }
+#endif
+
+ // Helper functions to enable code sharing in implementations of bit/shift ops.
+ void bitOp(NodeType op, int32_t imm, GPRReg op1, GPRReg result)
+ {
+ switch (op) {
+ case BitAnd:
+ m_jit.and32(Imm32(imm), op1, result);
+ break;
+ case BitOr:
+ m_jit.or32(Imm32(imm), op1, result);
+ break;
+ case BitXor:
+ m_jit.xor32(Imm32(imm), op1, result);
+ break;
+ default:
+ ASSERT_NOT_REACHED();
+ }
+ }
+ void bitOp(NodeType op, GPRReg op1, GPRReg op2, GPRReg result)
+ {
+ switch (op) {
+ case BitAnd:
+ m_jit.and32(op1, op2, result);
+ break;
+ case BitOr:
+ m_jit.or32(op1, op2, result);
+ break;
+ case BitXor:
+ m_jit.xor32(op1, op2, result);
+ break;
+ default:
+ ASSERT_NOT_REACHED();
+ }
+ }
+ void shiftOp(NodeType op, GPRReg op1, int32_t shiftAmount, GPRReg result)
+ {
+ switch (op) {
+ case BitRShift:
+ m_jit.rshift32(op1, Imm32(shiftAmount), result);
+ break;
+ case BitLShift:
+ m_jit.lshift32(op1, Imm32(shiftAmount), result);
+ break;
+ case BitURShift:
+ m_jit.urshift32(op1, Imm32(shiftAmount), result);
+ break;
+ default:
+ ASSERT_NOT_REACHED();
+ }
+ }
+ void shiftOp(NodeType op, GPRReg op1, GPRReg shiftAmount, GPRReg result)
+ {
+ switch (op) {
+ case BitRShift:
+ m_jit.rshift32(op1, shiftAmount, result);
+ break;
+ case BitLShift:
+ m_jit.lshift32(op1, shiftAmount, result);
+ break;
+ case BitURShift:
+ m_jit.urshift32(op1, shiftAmount, result);
+ break;
+ default:
+ ASSERT_NOT_REACHED();
+ }
+ }
+
+ // Returns the node index of the branch node if peephole is okay, NoNode otherwise.
+ NodeIndex detectPeepHoleBranch()
+ {
+ NodeIndex lastNodeIndex = m_jit.graph().m_blocks[m_block]->end - 1;
+
+ // Check that no intervening nodes will be generated.
+ for (NodeIndex index = m_compileIndex + 1; index < lastNodeIndex; ++index) {
+ if (at(index).shouldGenerate())
+ return NoNode;
+ }
+
+ // Check if the lastNode is a branch on this node.
+ Node& lastNode = at(lastNodeIndex);
+ return lastNode.op == Branch && lastNode.child1() == m_compileIndex ? lastNodeIndex : NoNode;
+ }
+
+ void nonSpeculativeValueToNumber(Node&);
+ void nonSpeculativeValueToInt32(Node&);
+ void nonSpeculativeUInt32ToNumber(Node&);
+
+#if USE(JSVALUE64)
+ JITCompiler::Call cachedGetById(GPRReg baseGPR, GPRReg resultGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget = JITCompiler::Jump());
+ void cachedPutById(GPRReg base, GPRReg value, NodeIndex valueIndex, GPRReg scratchGPR, unsigned identifierNumber, PutKind, JITCompiler::Jump slowPathTarget = JITCompiler::Jump());
+#elif USE(JSVALUE32_64)
+ JITCompiler::Call cachedGetById(GPRReg baseTagGPROrNone, GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget = JITCompiler::Jump());
+ void cachedPutById(GPRReg basePayloadGPR, GPRReg valueTagGPR, GPRReg valuePayloadGPR, NodeIndex valueIndex, GPRReg scratchGPR, unsigned identifierNumber, PutKind, JITCompiler::Jump slowPathTarget = JITCompiler::Jump());
+#endif
+
+ void nonSpeculativeNonPeepholeCompareNull(NodeIndex operand, bool invert = false);
+ void nonSpeculativePeepholeBranchNull(NodeIndex operand, NodeIndex branchNodeIndex, bool invert = false);
+ bool nonSpeculativeCompareNull(Node&, NodeIndex operand, bool invert = false);
+
+ void nonSpeculativePeepholeBranch(Node&, NodeIndex branchNodeIndex, MacroAssembler::RelationalCondition, S_DFGOperation_EJJ helperFunction);
+ void nonSpeculativeNonPeepholeCompare(Node&, MacroAssembler::RelationalCondition, S_DFGOperation_EJJ helperFunction);
+ bool nonSpeculativeCompare(Node&, MacroAssembler::RelationalCondition, S_DFGOperation_EJJ helperFunction);
+
+ void nonSpeculativePeepholeStrictEq(Node&, NodeIndex branchNodeIndex, bool invert = false);
+ void nonSpeculativeNonPeepholeStrictEq(Node&, bool invert = false);
+ bool nonSpeculativeStrictEq(Node&, bool invert = false);
+
+ void compileInstanceOfForObject(Node&, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchAndResultReg);
+ void compileInstanceOf(Node&);
+
+ // Access to our fixed callee CallFrame.
+ MacroAssembler::Address callFrameSlot(int slot)
+ {
+ return MacroAssembler::Address(GPRInfo::callFrameRegister, (m_jit.codeBlock()->m_numCalleeRegisters + slot) * static_cast<int>(sizeof(Register)));
+ }
+
+ // Access to our fixed callee CallFrame.
+ MacroAssembler::Address argumentSlot(int argument)
+ {
+ return MacroAssembler::Address(GPRInfo::callFrameRegister, (m_jit.codeBlock()->m_numCalleeRegisters + argumentToOperand(argument)) * static_cast<int>(sizeof(Register)));
+ }
+
+ MacroAssembler::Address callFrameTagSlot(int slot)
+ {
+ return MacroAssembler::Address(GPRInfo::callFrameRegister, (m_jit.codeBlock()->m_numCalleeRegisters + slot) * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
+ }
+
+ MacroAssembler::Address callFramePayloadSlot(int slot)
+ {
+ return MacroAssembler::Address(GPRInfo::callFrameRegister, (m_jit.codeBlock()->m_numCalleeRegisters + slot) * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
+ }
+
+ MacroAssembler::Address argumentTagSlot(int argument)
+ {
+ return MacroAssembler::Address(GPRInfo::callFrameRegister, (m_jit.codeBlock()->m_numCalleeRegisters + argumentToOperand(argument)) * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
+ }
+
+ MacroAssembler::Address argumentPayloadSlot(int argument)
+ {
+ return MacroAssembler::Address(GPRInfo::callFrameRegister, (m_jit.codeBlock()->m_numCalleeRegisters + argumentToOperand(argument)) * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
+ }
+
+ void emitCall(Node&);
+
+ // Called once a node has completed code generation but prior to setting
+ // its result, to free up its children. (This must happen prior to setting
+ // the nodes result, since the node may have the same VirtualRegister as
+ // a child, and as such will use the same GeneratioInfo).
+ void useChildren(Node&);
+
+ // These method called to initialize the the GenerationInfo
+ // to describe the result of an operation.
+ void integerResult(GPRReg reg, NodeIndex nodeIndex, DataFormat format = DataFormatInteger, UseChildrenMode mode = CallUseChildren)
+ {
+ Node& node = at(nodeIndex);
+ if (mode == CallUseChildren)
+ useChildren(node);
+
+ VirtualRegister virtualRegister = node.virtualRegister();
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+
+ if (format == DataFormatInteger) {
+ m_jit.jitAssertIsInt32(reg);
+ m_gprs.retain(reg, virtualRegister, SpillOrderInteger);
+ info.initInteger(nodeIndex, node.refCount(), reg);
+ } else {
+#if USE(JSVALUE64)
+ ASSERT(format == DataFormatJSInteger);
+ m_jit.jitAssertIsJSInt32(reg);
+ m_gprs.retain(reg, virtualRegister, SpillOrderJS);
+ info.initJSValue(nodeIndex, node.refCount(), reg, format);
+#elif USE(JSVALUE32_64)
+ ASSERT_NOT_REACHED();
+#endif
+ }
+ }
+ void integerResult(GPRReg reg, NodeIndex nodeIndex, UseChildrenMode mode)
+ {
+ integerResult(reg, nodeIndex, DataFormatInteger, mode);
+ }
+ void noResult(NodeIndex nodeIndex, UseChildrenMode mode = CallUseChildren)
+ {
+ if (mode == UseChildrenCalledExplicitly)
+ return;
+ Node& node = at(nodeIndex);
+ useChildren(node);
+ }
+ void cellResult(GPRReg reg, NodeIndex nodeIndex, UseChildrenMode mode = CallUseChildren)
+ {
+ Node& node = at(nodeIndex);
+ if (mode == CallUseChildren)
+ useChildren(node);
+
+ VirtualRegister virtualRegister = node.virtualRegister();
+ m_gprs.retain(reg, virtualRegister, SpillOrderCell);
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+ info.initCell(nodeIndex, node.refCount(), reg);
+ }
+ void booleanResult(GPRReg reg, NodeIndex nodeIndex, UseChildrenMode mode = CallUseChildren)
+ {
+ Node& node = at(nodeIndex);
+ if (mode == CallUseChildren)
+ useChildren(node);
+
+ VirtualRegister virtualRegister = node.virtualRegister();
+ m_gprs.retain(reg, virtualRegister, SpillOrderBoolean);
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+ info.initBoolean(nodeIndex, node.refCount(), reg);
+ }
+#if USE(JSVALUE64)
+ void jsValueResult(GPRReg reg, NodeIndex nodeIndex, DataFormat format = DataFormatJS, UseChildrenMode mode = CallUseChildren)
+ {
+ if (format == DataFormatJSInteger)
+ m_jit.jitAssertIsJSInt32(reg);
+
+ Node& node = at(nodeIndex);
+ if (mode == CallUseChildren)
+ useChildren(node);
+
+ VirtualRegister virtualRegister = node.virtualRegister();
+ m_gprs.retain(reg, virtualRegister, SpillOrderJS);
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+ info.initJSValue(nodeIndex, node.refCount(), reg, format);
+ }
+ void jsValueResult(GPRReg reg, NodeIndex nodeIndex, UseChildrenMode mode)
+ {
+ jsValueResult(reg, nodeIndex, DataFormatJS, mode);
+ }
+#elif USE(JSVALUE32_64)
+ void jsValueResult(GPRReg tag, GPRReg payload, NodeIndex nodeIndex, DataFormat format = DataFormatJS, UseChildrenMode mode = CallUseChildren)
+ {
+ Node& node = at(nodeIndex);
+ if (mode == CallUseChildren)
+ useChildren(node);
+
+ VirtualRegister virtualRegister = node.virtualRegister();
+ m_gprs.retain(tag, virtualRegister, SpillOrderJS);
+ m_gprs.retain(payload, virtualRegister, SpillOrderJS);
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+ info.initJSValue(nodeIndex, node.refCount(), tag, payload, format);
+ }
+ void jsValueResult(GPRReg tag, GPRReg payload, NodeIndex nodeIndex, UseChildrenMode mode)
+ {
+ jsValueResult(tag, payload, nodeIndex, DataFormatJS, mode);
+ }
+#endif
+ void storageResult(GPRReg reg, NodeIndex nodeIndex, UseChildrenMode mode = CallUseChildren)
+ {
+ Node& node = at(nodeIndex);
+ if (mode == CallUseChildren)
+ useChildren(node);
+
+ VirtualRegister virtualRegister = node.virtualRegister();
+ m_gprs.retain(reg, virtualRegister, SpillOrderStorage);
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+ info.initStorage(nodeIndex, node.refCount(), reg);
+ }
+ void doubleResult(FPRReg reg, NodeIndex nodeIndex, UseChildrenMode mode = CallUseChildren)
+ {
+ Node& node = at(nodeIndex);
+ if (mode == CallUseChildren)
+ useChildren(node);
+
+ VirtualRegister virtualRegister = node.virtualRegister();
+ m_fprs.retain(reg, virtualRegister, SpillOrderDouble);
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+ info.initDouble(nodeIndex, node.refCount(), reg);
+ }
+ void initConstantInfo(NodeIndex nodeIndex)
+ {
+ ASSERT(isInt32Constant(nodeIndex) || isNumberConstant(nodeIndex) || isJSConstant(nodeIndex));
+ Node& node = at(nodeIndex);
+ m_generationInfo[node.virtualRegister()].initConstant(nodeIndex, node.refCount());
+ }
+
+ // These methods used to sort arguments into the correct registers.
+ // On X86 we use cdecl calling conventions, which pass all arguments on the
+ // stack. On other architectures we may need to sort values into the
+ // correct registers.
+#if !NUMBER_OF_ARGUMENT_REGISTERS
+ unsigned m_callArgumentOffset;
+ void resetCallArguments() { m_callArgumentOffset = 0; }
+
+ // These methods are using internally to implement the callOperation methods.
+ void addCallArgument(GPRReg value)
+ {
+ m_jit.poke(value, m_callArgumentOffset++);
+ }
+ void addCallArgument(TrustedImm32 imm)
+ {
+ m_jit.poke(imm, m_callArgumentOffset++);
+ }
+ void addCallArgument(TrustedImmPtr pointer)
+ {
+ m_jit.poke(pointer, m_callArgumentOffset++);
+ }
+ void addCallArgument(FPRReg value)
+ {
+ m_jit.storeDouble(value, JITCompiler::Address(JITCompiler::stackPointerRegister, m_callArgumentOffset * sizeof(void*)));
+ m_callArgumentOffset += sizeof(double) / sizeof(void*);
+ }
+
+ ALWAYS_INLINE void setupArguments(FPRReg arg1)
+ {
+ resetCallArguments();
+ addCallArgument(arg1);
+ }
+
+ ALWAYS_INLINE void setupArguments(FPRReg arg1, FPRReg arg2)
+ {
+ resetCallArguments();
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ }
+
+ ALWAYS_INLINE void setupArgumentsExecState()
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImm32 arg2)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImmPtr arg2)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2, TrustedImmPtr arg3)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImmPtr arg4)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImm32 arg4)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImm32 arg2, GPRReg arg3, GPRReg arg4)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ addCallArgument(arg5);
+ }
+#endif // !NUMBER_OF_ARGUMENT_REGISTERS
+ // These methods are suitable for any calling convention that provides for
+ // at least 4 argument registers, e.g. X86_64, ARMv7.
+#if NUMBER_OF_ARGUMENT_REGISTERS >= 4
+ template<GPRReg destA, GPRReg destB>
+ void setupTwoStubArgs(GPRReg srcA, GPRReg srcB)
+ {
+ // Assuming that srcA != srcB, there are 7 interesting states the registers may be in:
+ // (1) both are already in arg regs, the right way around.
+ // (2) both are already in arg regs, the wrong way around.
+ // (3) neither are currently in arg registers.
+ // (4) srcA in in its correct reg.
+ // (5) srcA in in the incorrect reg.
+ // (6) srcB in in its correct reg.
+ // (7) srcB in in the incorrect reg.
+ //
+ // The trivial approach is to simply emit two moves, to put srcA in place then srcB in
+ // place (the MacroAssembler will omit redundant moves). This apporach will be safe in
+ // cases 1, 3, 4, 5, 6, and in cases where srcA==srcB. The two problem cases are 2
+ // (requires a swap) and 7 (must move srcB first, to avoid trampling.)
+
+ if (srcB != destA) {
+ // Handle the easy cases - two simple moves.
+ m_jit.move(srcA, destA);
+ m_jit.move(srcB, destB);
+ } else if (srcA != destB) {
+ // Handle the non-swap case - just put srcB in place first.
+ m_jit.move(srcB, destB);
+ m_jit.move(srcA, destA);
+ } else
+ m_jit.swap(destA, destB);
+ }
+#if CPU(X86_64)
+ template<FPRReg destA, FPRReg destB>
+ void setupTwoStubArgs(FPRReg srcA, FPRReg srcB)
+ {
+ // Assuming that srcA != srcB, there are 7 interesting states the registers may be in:
+ // (1) both are already in arg regs, the right way around.
+ // (2) both are already in arg regs, the wrong way around.
+ // (3) neither are currently in arg registers.
+ // (4) srcA in in its correct reg.
+ // (5) srcA in in the incorrect reg.
+ // (6) srcB in in its correct reg.
+ // (7) srcB in in the incorrect reg.
+ //
+ // The trivial approach is to simply emit two moves, to put srcA in place then srcB in
+ // place (the MacroAssembler will omit redundant moves). This apporach will be safe in
+ // cases 1, 3, 4, 5, 6, and in cases where srcA==srcB. The two problem cases are 2
+ // (requires a swap) and 7 (must move srcB first, to avoid trampling.)
+
+ if (srcB != destA) {
+ // Handle the easy cases - two simple moves.
+ m_jit.moveDouble(srcA, destA);
+ m_jit.moveDouble(srcB, destB);
+ return;
+ }
+
+ if (srcA != destB) {
+ // Handle the non-swap case - just put srcB in place first.
+ m_jit.moveDouble(srcB, destB);
+ m_jit.moveDouble(srcA, destA);
+ return;
+ }
+
+ ASSERT(srcB == destA && srcA == destB);
+ // Need to swap; pick a temporary register.
+ FPRReg temp;
+ if (destA != FPRInfo::argumentFPR3 && destA != FPRInfo::argumentFPR3)
+ temp = FPRInfo::argumentFPR3;
+ else if (destA != FPRInfo::argumentFPR2 && destA != FPRInfo::argumentFPR2)
+ temp = FPRInfo::argumentFPR2;
+ else {
+ ASSERT(destA != FPRInfo::argumentFPR1 && destA != FPRInfo::argumentFPR1);
+ temp = FPRInfo::argumentFPR1;
+ }
+ m_jit.moveDouble(destA, temp);
+ m_jit.moveDouble(destB, destA);
+ m_jit.moveDouble(temp, destB);
+ }
+#endif
+ void setupStubArguments(GPRReg arg1, GPRReg arg2)
+ {
+ setupTwoStubArgs<GPRInfo::argumentGPR1, GPRInfo::argumentGPR2>(arg1, arg2);
+ }
+ void setupStubArguments(GPRReg arg1, GPRReg arg2, GPRReg arg3)
+ {
+ // If neither of arg2/arg3 are in our way, then we can move arg1 into place.
+ // Then we can use setupTwoStubArgs to fix arg2/arg3.
+ if (arg2 != GPRInfo::argumentGPR1 && arg3 != GPRInfo::argumentGPR1) {
+ m_jit.move(arg1, GPRInfo::argumentGPR1);
+ setupTwoStubArgs<GPRInfo::argumentGPR2, GPRInfo::argumentGPR3>(arg2, arg3);
+ return;
+ }
+
+ // If neither of arg1/arg3 are in our way, then we can move arg2 into place.
+ // Then we can use setupTwoStubArgs to fix arg1/arg3.
+ if (arg1 != GPRInfo::argumentGPR2 && arg3 != GPRInfo::argumentGPR2) {
+ m_jit.move(arg2, GPRInfo::argumentGPR2);
+ setupTwoStubArgs<GPRInfo::argumentGPR1, GPRInfo::argumentGPR3>(arg1, arg3);
+ return;
+ }
+
+ // If neither of arg1/arg2 are in our way, then we can move arg3 into place.
+ // Then we can use setupTwoStubArgs to fix arg1/arg2.
+ if (arg1 != GPRInfo::argumentGPR3 && arg2 != GPRInfo::argumentGPR3) {
+ m_jit.move(arg3, GPRInfo::argumentGPR3);
+ setupTwoStubArgs<GPRInfo::argumentGPR1, GPRInfo::argumentGPR2>(arg1, arg2);
+ return;
+ }
+
+ // If we get here, we haven't been able to move any of arg1/arg2/arg3.
+ // Since all three are blocked, then all three must already be in the argument register.
+ // But are they in the right ones?
+
+ // First, ensure arg1 is in place.
+ if (arg1 != GPRInfo::argumentGPR1) {
+ m_jit.swap(arg1, GPRInfo::argumentGPR1);
+
+ // If arg1 wasn't in argumentGPR1, one of arg2/arg3 must be.
+ ASSERT(arg2 == GPRInfo::argumentGPR1 || arg3 == GPRInfo::argumentGPR1);
+ // If arg2 was in argumentGPR1 it no longer is (due to the swap).
+ // Otherwise arg3 must have been. Mark him as moved.
+ if (arg2 == GPRInfo::argumentGPR1)
+ arg2 = arg1;
+ else
+ arg3 = arg1;
+ }
+
+ // Either arg2 & arg3 need swapping, or we're all done.
+ ASSERT((arg2 == GPRInfo::argumentGPR2 || arg3 == GPRInfo::argumentGPR3)
+ || (arg2 == GPRInfo::argumentGPR3 || arg3 == GPRInfo::argumentGPR2));
+
+ if (arg2 != GPRInfo::argumentGPR2)
+ m_jit.swap(GPRInfo::argumentGPR2, GPRInfo::argumentGPR3);
+ }
+
+#if CPU(X86_64)
+ ALWAYS_INLINE void setupArguments(FPRReg arg1)
+ {
+ m_jit.moveDouble(arg1, FPRInfo::argumentFPR0);
+ }
+
+ ALWAYS_INLINE void setupArguments(FPRReg arg1, FPRReg arg2)
+ {
+ setupTwoStubArgs<FPRInfo::argumentFPR0, FPRInfo::argumentFPR1>(arg1, arg2);
+ }
+#else
+ ALWAYS_INLINE void setupArguments(FPRReg arg1)
+ {
+ m_jit.assembler().vmov(GPRInfo::argumentGPR0, GPRInfo::argumentGPR1, arg1);
+ }
+
+ ALWAYS_INLINE void setupArguments(FPRReg arg1, FPRReg arg2)
+ {
+ m_jit.assembler().vmov(GPRInfo::argumentGPR0, GPRInfo::argumentGPR1, arg1);
+ m_jit.assembler().vmov(GPRInfo::argumentGPR2, GPRInfo::argumentGPR3, arg2);
+ }
+#endif
+
+ ALWAYS_INLINE void setupArgumentsExecState()
+ {
+ m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1)
+ {
+ m_jit.move(arg1, GPRInfo::argumentGPR1);
+ m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1)
+ {
+ m_jit.move(arg1, GPRInfo::argumentGPR1);
+ m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2)
+ {
+ setupStubArguments(arg1, arg2);
+ m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2)
+ {
+ m_jit.move(arg1, GPRInfo::argumentGPR1);
+ m_jit.move(arg2, GPRInfo::argumentGPR2);
+ m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2)
+ {
+ m_jit.move(arg2, GPRInfo::argumentGPR2); // Move this first, so setting arg1 does not trample!
+ m_jit.move(arg1, GPRInfo::argumentGPR1);
+ m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImm32 arg2)
+ {
+ m_jit.move(arg1, GPRInfo::argumentGPR1);
+ m_jit.move(arg2, GPRInfo::argumentGPR2);
+ m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImmPtr arg2)
+ {
+ m_jit.move(arg1, GPRInfo::argumentGPR1);
+ m_jit.move(arg2, GPRInfo::argumentGPR2);
+ m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3)
+ {
+ setupStubArguments(arg1, arg2, arg3);
+ m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3)
+ {
+ setupStubArguments(arg1, arg2);
+ m_jit.move(arg3, GPRInfo::argumentGPR3);
+ m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2, TrustedImmPtr arg3)
+ {
+ m_jit.move(arg1, GPRInfo::argumentGPR1);
+ m_jit.move(arg2, GPRInfo::argumentGPR2);
+ m_jit.move(arg3, GPRInfo::argumentGPR3);
+ m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3)
+ {
+ setupStubArguments(arg1, arg2);
+ m_jit.move(arg3, GPRInfo::argumentGPR3);
+ m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImm32 arg2, GPRReg arg3)
+ {
+ m_jit.move(arg1, GPRInfo::argumentGPR1);
+ m_jit.move(arg2, GPRInfo::argumentGPR2);
+ m_jit.move(arg3, GPRInfo::argumentGPR3);
+ m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+#endif // NUMBER_OF_ARGUMENT_REGISTERS >= 4
+ // These methods are suitable for any calling convention that provides for
+ // exactly 4 argument registers, e.g. ARMv7.
+#if NUMBER_OF_ARGUMENT_REGISTERS == 4
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4)
+ {
+ m_jit.poke(arg4);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImm32 arg4)
+ {
+ m_jit.poke(arg4);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImm32 arg2, GPRReg arg3, GPRReg arg4)
+ {
+ m_jit.poke(arg4);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImmPtr arg4)
+ {
+ m_jit.poke(arg4);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5)
+ {
+ m_jit.poke(arg5, 1);
+ m_jit.poke(arg4);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+#endif // NUMBER_OF_ARGUMENT_REGISTERS == 4
+
+ // These methods add calls to C++ helper functions.
+ // These methods are broadly value representation specific (i.e.
+ // deal with the fact that a JSValue may be passed in one or two
+ // machine registers, and delegate the calling convention specific
+ // decision as to how to fill the regsiters to setupArguments* methods.
+#if USE(JSVALUE64)
+ JITCompiler::Call callOperation(J_DFGOperation_EP operation, GPRReg result, void* pointer)
+ {
+ setupArgumentsWithExecState(TrustedImmPtr(pointer));
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(Z_DFGOperation_D operation, GPRReg result, FPRReg arg1)
+ {
+ setupArguments(arg1);
+ JITCompiler::Call call = m_jit.appendCall(operation);
+ m_jit.zeroExtend32ToPtr(GPRInfo::returnValueGPR, result);
+ return call;
+ }
+ JITCompiler::Call callOperation(J_DFGOperation_EGI operation, GPRReg result, GPRReg arg1, Identifier* identifier)
+ {
+ setupArgumentsWithExecState(arg1, TrustedImmPtr(identifier));
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(J_DFGOperation_EI operation, GPRReg result, Identifier* identifier)
+ {
+ setupArgumentsWithExecState(TrustedImmPtr(identifier));
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(J_DFGOperation_EA operation, GPRReg result, GPRReg arg1)
+ {
+ setupArgumentsWithExecState(arg1);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(J_DFGOperation_EPS operation, GPRReg result, void* pointer, size_t size)
+ {
+ setupArgumentsWithExecState(TrustedImmPtr(pointer), TrustedImmPtr(size));
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(J_DFGOperation_ESS operation, GPRReg result, int startConstant, int numConstants)
+ {
+ setupArgumentsWithExecState(Imm32(startConstant), Imm32(numConstants));
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(J_DFGOperation_EPP operation, GPRReg result, GPRReg arg1, void* pointer)
+ {
+ setupArgumentsWithExecState(arg1, TrustedImmPtr(pointer));
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(J_DFGOperation_ECI operation, GPRReg result, GPRReg arg1, Identifier* identifier)
+ {
+ setupArgumentsWithExecState(arg1, TrustedImmPtr(identifier));
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(J_DFGOperation_EJI operation, GPRReg result, GPRReg arg1, Identifier* identifier)
+ {
+ setupArgumentsWithExecState(arg1, TrustedImmPtr(identifier));
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(J_DFGOperation_EJA operation, GPRReg result, GPRReg arg1, GPRReg arg2)
+ {
+ setupArgumentsWithExecState(arg1, arg2);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(J_DFGOperation_EP operation, GPRReg result, GPRReg arg1)
+ {
+ setupArgumentsWithExecState(arg1);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(C_DFGOperation_E operation, GPRReg result)
+ {
+ setupArgumentsExecState();
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(C_DFGOperation_EC operation, GPRReg result, GPRReg arg1)
+ {
+ setupArgumentsWithExecState(arg1);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(C_DFGOperation_ECC operation, GPRReg result, GPRReg arg1, JSCell* cell)
+ {
+ setupArgumentsWithExecState(arg1, TrustedImmPtr(cell));
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(S_DFGOperation_EJ operation, GPRReg result, GPRReg arg1)
+ {
+ setupArgumentsWithExecState(arg1);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(S_DFGOperation_EJJ operation, GPRReg result, GPRReg arg1, GPRReg arg2)
+ {
+ setupArgumentsWithExecState(arg1, arg2);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(J_DFGOperation_EPP operation, GPRReg result, GPRReg arg1, GPRReg arg2)
+ {
+ setupArgumentsWithExecState(arg1, arg2);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(J_DFGOperation_EJJ operation, GPRReg result, GPRReg arg1, MacroAssembler::Imm32 imm)
+ {
+ setupArgumentsWithExecState(arg1, MacroAssembler::ImmPtr(static_cast<const void*>(JSValue::encode(jsNumber(imm.m_value)))));
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(J_DFGOperation_EJJ operation, GPRReg result, MacroAssembler::Imm32 imm, GPRReg arg2)
+ {
+ setupArgumentsWithExecState(MacroAssembler::ImmPtr(static_cast<const void*>(JSValue::encode(jsNumber(imm.m_value)))), arg2);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(J_DFGOperation_ECJ operation, GPRReg result, GPRReg arg1, GPRReg arg2)
+ {
+ setupArgumentsWithExecState(arg1, arg2);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(V_DFGOperation_EJPP operation, GPRReg arg1, GPRReg arg2, void* pointer)
+ {
+ setupArgumentsWithExecState(arg1, arg2, TrustedImmPtr(pointer));
+ return appendCallWithExceptionCheck(operation);
+ }
+ JITCompiler::Call callOperation(V_DFGOperation_EJCI operation, GPRReg arg1, GPRReg arg2, Identifier* identifier)
+ {
+ setupArgumentsWithExecState(arg1, arg2, TrustedImmPtr(identifier));
+ return appendCallWithExceptionCheck(operation);
+ }
+ JITCompiler::Call callOperation(V_DFGOperation_EJJJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3)
+ {
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ return appendCallWithExceptionCheck(operation);
+ }
+ JITCompiler::Call callOperation(V_DFGOperation_EPZJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3)
+ {
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ return appendCallWithExceptionCheck(operation);
+ }
+ JITCompiler::Call callOperation(V_DFGOperation_EAZJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3)
+ {
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ return appendCallWithExceptionCheck(operation);
+ }
+ JITCompiler::Call callOperation(V_DFGOperation_ECJJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3)
+ {
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ return appendCallWithExceptionCheck(operation);
+ }
+ JITCompiler::Call callOperation(D_DFGOperation_EJ operation, FPRReg result, GPRReg arg1)
+ {
+ setupArgumentsWithExecState(arg1);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(D_DFGOperation_DD operation, FPRReg result, FPRReg arg1, FPRReg arg2)
+ {
+ setupArguments(arg1, arg2);
+ return appendCallSetResult(operation, result);
+ }
+#else
+ JITCompiler::Call callOperation(Z_DFGOperation_D operation, GPRReg result, FPRReg arg1)
+ {
+ setupArguments(arg1);
+ JITCompiler::Call call = m_jit.appendCall(operation);
+ m_jit.zeroExtend32ToPtr(GPRInfo::returnValueGPR, result);
+ return call;
+ }
+ JITCompiler::Call callOperation(J_DFGOperation_EP operation, GPRReg resultTag, GPRReg resultPayload, void* pointer)
+ {
+ setupArgumentsWithExecState(TrustedImmPtr(pointer));
+ return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
+ }
+ JITCompiler::Call callOperation(J_DFGOperation_EPP operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, void* pointer)
+ {
+ setupArgumentsWithExecState(arg1, TrustedImmPtr(pointer));
+ return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
+ }
+ JITCompiler::Call callOperation(J_DFGOperation_EGI operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, Identifier* identifier)
+ {
+ setupArgumentsWithExecState(arg1, TrustedImmPtr(identifier));
+ return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
+ }
+ JITCompiler::Call callOperation(J_DFGOperation_EP operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1)
+ {
+ setupArgumentsWithExecState(arg1);
+ return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
+ }
+ JITCompiler::Call callOperation(J_DFGOperation_EI operation, GPRReg resultTag, GPRReg resultPayload, Identifier* identifier)
+ {
+ setupArgumentsWithExecState(TrustedImmPtr(identifier));
+ return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
+ }
+ JITCompiler::Call callOperation(J_DFGOperation_EA operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1)
+ {
+ setupArgumentsWithExecState(arg1);
+ return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
+ }
+ JITCompiler::Call callOperation(J_DFGOperation_EPS operation, GPRReg resultTag, GPRReg resultPayload, void* pointer, size_t size)
+ {
+ setupArgumentsWithExecState(TrustedImmPtr(pointer), TrustedImmPtr(size));
+ return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
+ }
+ JITCompiler::Call callOperation(J_DFGOperation_ESS operation, GPRReg resultTag, GPRReg resultPayload, int startConstant, int numConstants)
+ {
+ setupArgumentsWithExecState(TrustedImm32(startConstant), TrustedImm32(numConstants));
+ return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
+ }
+ JITCompiler::Call callOperation(J_DFGOperation_EJP operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, void* pointer)
+ {
+ setupArgumentsWithExecState(arg1Payload, arg1Tag, ImmPtr(pointer));
+ return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
+ }
+ JITCompiler::Call callOperation(J_DFGOperation_EJP operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2)
+ {
+ setupArgumentsWithExecState(arg1Payload, arg1Tag, arg2);
+ return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
+ }
+ JITCompiler::Call callOperation(J_DFGOperation_ECI operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, Identifier* identifier)
+ {
+ setupArgumentsWithExecState(arg1, TrustedImmPtr(identifier));
+ return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
+ }
+ JITCompiler::Call callOperation(J_DFGOperation_EJI operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, Identifier* identifier)
+ {
+ setupArgumentsWithExecState(arg1Payload, arg1Tag, TrustedImmPtr(identifier));
+ return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
+ }
+ JITCompiler::Call callOperation(J_DFGOperation_EJI operation, GPRReg resultTag, GPRReg resultPayload, int32_t arg1Tag, GPRReg arg1Payload, Identifier* identifier)
+ {
+ setupArgumentsWithExecState(arg1Payload, TrustedImm32(arg1Tag), TrustedImmPtr(identifier));
+ return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
+ }
+ JITCompiler::Call callOperation(J_DFGOperation_EJA operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2)
+ {
+ setupArgumentsWithExecState(arg1Payload, arg1Tag, arg2);
+ return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
+ }
+ JITCompiler::Call callOperation(J_DFGOperation_EJ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload)
+ {
+ setupArgumentsWithExecState(arg1Payload, arg1Tag);
+ return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
+ }
+ JITCompiler::Call callOperation(C_DFGOperation_E operation, GPRReg result)
+ {
+ setupArgumentsExecState();
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(C_DFGOperation_EC operation, GPRReg result, GPRReg arg1)
+ {
+ setupArgumentsWithExecState(arg1);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(C_DFGOperation_ECC operation, GPRReg result, GPRReg arg1, JSCell* cell)
+ {
+ setupArgumentsWithExecState(arg1, TrustedImmPtr(cell));
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(S_DFGOperation_EJ operation, GPRReg result, GPRReg arg1Tag, GPRReg arg1Payload)
+ {
+ setupArgumentsWithExecState(arg1Payload, arg1Tag);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(S_DFGOperation_EJJ operation, GPRReg result, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2Tag, GPRReg arg2Payload)
+ {
+ setupArgumentsWithExecState(arg1Payload, arg1Tag, arg2Payload, arg2Tag);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(J_DFGOperation_EJJ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2Tag, GPRReg arg2Payload)
+ {
+ setupArgumentsWithExecState(arg1Payload, arg1Tag, arg2Payload, arg2Tag);
+ return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
+ }
+ JITCompiler::Call callOperation(J_DFGOperation_EJJ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, MacroAssembler::Imm32 imm)
+ {
+ setupArgumentsWithExecState(arg1Payload, arg1Tag, imm, TrustedImm32(JSValue::Int32Tag));
+ return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
+ }
+ JITCompiler::Call callOperation(J_DFGOperation_EJJ operation, GPRReg resultTag, GPRReg resultPayload, MacroAssembler::Imm32 imm, GPRReg arg2Tag, GPRReg arg2Payload)
+ {
+ setupArgumentsWithExecState(imm, TrustedImm32(JSValue::Int32Tag), arg2Payload, arg2Tag);
+ return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
+ }
+ JITCompiler::Call callOperation(J_DFGOperation_ECJ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, GPRReg arg2Tag, GPRReg arg2Payload)
+ {
+ setupArgumentsWithExecState(arg1, arg2Payload, arg2Tag);
+ return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
+ }
+ JITCompiler::Call callOperation(V_DFGOperation_EJPP operation, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2, void* pointer)
+ {
+ setupArgumentsWithExecState(arg1Payload, arg1Tag, arg2, TrustedImmPtr(pointer));
+ return appendCallWithExceptionCheck(operation);
+ }
+ JITCompiler::Call callOperation(V_DFGOperation_EJCI operation, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2, Identifier* identifier)
+ {
+ setupArgumentsWithExecState(arg1Payload, arg1Tag, arg2, TrustedImmPtr(identifier));
+ return appendCallWithExceptionCheck(operation);
+ }
+ JITCompiler::Call callOperation(V_DFGOperation_ECJJ operation, GPRReg arg1, GPRReg arg2Tag, GPRReg arg2Payload, GPRReg arg3Tag, GPRReg arg3Payload)
+ {
+ setupArgumentsWithExecState(arg1, arg2Payload, arg2Tag, arg3Payload, arg3Tag);
+ return appendCallWithExceptionCheck(operation);
+ }
+ JITCompiler::Call callOperation(V_DFGOperation_EPZJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3Tag, GPRReg arg3Payload)
+ {
+ setupArgumentsWithExecState(arg1, arg2, arg3Payload, arg3Tag);
+ return appendCallWithExceptionCheck(operation);
+ }
+ JITCompiler::Call callOperation(V_DFGOperation_EAZJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3Tag, GPRReg arg3Payload)
+ {
+ setupArgumentsWithExecState(arg1, arg2, arg3Payload, arg3Tag);
+ return appendCallWithExceptionCheck(operation);
+ }
+
+ JITCompiler::Call callOperation(D_DFGOperation_EJ operation, FPRReg result, GPRReg arg1Tag, GPRReg arg1Payload)
+ {
+ setupArgumentsWithExecState(arg1Payload, arg1Tag);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+
+ JITCompiler::Call callOperation(D_DFGOperation_DD operation, FPRReg result, FPRReg arg1, FPRReg arg2)
+ {
+ setupArguments(arg1, arg2);
+ return appendCallSetResult(operation, result);
+ }
+#endif
+
+ // These methods add call instructions, with optional exception checks & setting results.
+ JITCompiler::Call appendCallWithExceptionCheck(const FunctionPtr& function)
+ {
+ return m_jit.addExceptionCheck(m_jit.appendCall(function), at(m_compileIndex).codeOrigin);
+ }
+ JITCompiler::Call appendCallWithExceptionCheckSetResult(const FunctionPtr& function, GPRReg result)
+ {
+ JITCompiler::Call call = appendCallWithExceptionCheck(function);
+ m_jit.move(GPRInfo::returnValueGPR, result);
+ return call;
+ }
+ void setupResults(GPRReg destA, GPRReg destB)
+ {
+ GPRReg srcA = GPRInfo::returnValueGPR;
+ GPRReg srcB = GPRInfo::returnValueGPR2;
+
+ if (srcB != destA) {
+ // Handle the easy cases - two simple moves.
+ m_jit.move(srcA, destA);
+ m_jit.move(srcB, destB);
+ } else if (srcA != destB) {
+ // Handle the non-swap case - just put srcB in place first.
+ m_jit.move(srcB, destB);
+ m_jit.move(srcA, destA);
+ } else
+ m_jit.swap(destA, destB);
+ }
+ JITCompiler::Call appendCallWithExceptionCheckSetResult(const FunctionPtr& function, GPRReg result1, GPRReg result2)
+ {
+ JITCompiler::Call call = appendCallWithExceptionCheck(function);
+ setupResults(result1, result2);
+ return call;
+ }
+#if CPU(X86)
+ JITCompiler::Call appendCallWithExceptionCheckSetResult(const FunctionPtr& function, FPRReg result)
+ {
+ JITCompiler::Call call = appendCallWithExceptionCheck(function);
+ m_jit.assembler().fstpl(0, JITCompiler::stackPointerRegister);
+ m_jit.loadDouble(JITCompiler::stackPointerRegister, result);
+ return call;
+ }
+ JITCompiler::Call appendCallSetResult(const FunctionPtr& function, FPRReg result)
+ {
+ JITCompiler::Call call = m_jit.appendCall(function);
+ m_jit.assembler().fstpl(0, JITCompiler::stackPointerRegister);
+ m_jit.loadDouble(JITCompiler::stackPointerRegister, result);
+ return call;
+ }
+#elif CPU(ARM)
+ JITCompiler::Call appendCallWithExceptionCheckSetResult(const FunctionPtr& function, FPRReg result)
+ {
+ JITCompiler::Call call = appendCallWithExceptionCheck(function);
+ m_jit.assembler().vmov(result, GPRInfo::returnValueGPR, GPRInfo::returnValueGPR2);
+ return call;
+ }
+ JITCompiler::Call appendCallSetResult(const FunctionPtr& function, FPRReg result)
+ {
+ JITCompiler::Call call = m_jit.appendCall(function);
+ m_jit.assembler().vmov(result, GPRInfo::returnValueGPR, GPRInfo::returnValueGPR2);
+ return call;
+ }
+#else
+ JITCompiler::Call appendCallWithExceptionCheckSetResult(const FunctionPtr& function, FPRReg result)
+ {
+ JITCompiler::Call call = appendCallWithExceptionCheck(function);
+ m_jit.moveDouble(FPRInfo::returnValueFPR, result);
+ return call;
+ }
+ JITCompiler::Call appendCallSetResult(const FunctionPtr& function, FPRReg result)
+ {
+ JITCompiler::Call call = m_jit.appendCall(function);
+ m_jit.moveDouble(FPRInfo::returnValueFPR, result);
+ return call;
+ }
+#endif
+
+ void addBranch(const MacroAssembler::Jump& jump, BlockIndex destination)
+ {
+ m_branches.append(BranchRecord(jump, destination));
+ }
+
+ void linkBranches()
+ {
+ for (size_t i = 0; i < m_branches.size(); ++i) {
+ BranchRecord& branch = m_branches[i];
+ branch.jump.linkTo(m_blockHeads[branch.destination], &m_jit);
+ }
+ }
+
+ BasicBlock* block()
+ {
+ return m_jit.graph().m_blocks[m_block].get();
+ }
+
+#ifndef NDEBUG
+ void dump(const char* label = 0);
+#endif
+
+#if DFG_ENABLE(CONSISTENCY_CHECK)
+ void checkConsistency();
+#else
+ void checkConsistency() { }
+#endif
+
+ bool isInteger(NodeIndex nodeIndex)
+ {
+ Node& node = at(nodeIndex);
+ if (node.hasInt32Result())
+ return true;
+
+ if (isInt32Constant(nodeIndex))
+ return true;
+
+ VirtualRegister virtualRegister = node.virtualRegister();
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+
+ return info.isJSInteger();
+ }
+
+ bool compare(Node&, MacroAssembler::RelationalCondition, MacroAssembler::DoubleCondition, S_DFGOperation_EJJ);
+ bool compilePeepHoleBranch(Node&, MacroAssembler::RelationalCondition, MacroAssembler::DoubleCondition, S_DFGOperation_EJJ);
+ void compilePeepHoleIntegerBranch(Node&, NodeIndex branchNodeIndex, JITCompiler::RelationalCondition);
+ void compilePeepHoleDoubleBranch(Node&, NodeIndex branchNodeIndex, JITCompiler::DoubleCondition);
+ void compilePeepHoleObjectEquality(Node&, NodeIndex branchNodeIndex, const ClassInfo*, PredictionChecker);
+ void compileObjectEquality(Node&, const ClassInfo*, PredictionChecker);
+ void compileValueAdd(Node&);
+ void compileObjectOrOtherLogicalNot(NodeIndex value, const ClassInfo*, bool needSpeculationCheck);
+ void compileLogicalNot(Node&);
+ void emitObjectOrOtherBranch(NodeIndex value, BlockIndex taken, BlockIndex notTaken, const ClassInfo*, bool needSpeculationCheck);
+ void emitBranch(Node&);
+
+ void compileIntegerCompare(Node&, MacroAssembler::RelationalCondition);
+ void compileDoubleCompare(Node&, MacroAssembler::DoubleCondition);
+
+ bool compileStrictEqForConstant(Node&, NodeIndex value, JSValue constant);
+
+ bool compileStrictEq(Node&);
+
+ void compileGetCharCodeAt(Node&);
+ void compileGetByValOnString(Node&);
+ void compileValueToInt32(Node&);
+ void compileUInt32ToNumber(Node&);
+ void compileGetByValOnByteArray(Node&);
+ void compilePutByValForByteArray(GPRReg base, GPRReg property, Node&);
+ void compileArithMul(Node&);
+ void compileArithMod(Node&);
+ void compileSoftModulo(Node&);
+ void compileGetTypedArrayLength(const TypedArrayDescriptor&, Node&, bool needsSpeculationCheck);
+ enum TypedArraySpeculationRequirements {
+ NoTypedArraySpecCheck,
+ NoTypedArrayTypeSpecCheck,
+ AllTypedArraySpecChecks
+ };
+ enum TypedArraySignedness {
+ SignedTypedArray,
+ UnsignedTypedArray
+ };
+ void compileGetIndexedPropertyStorage(Node&);
+ void compileGetByValOnIntTypedArray(const TypedArrayDescriptor&, Node&, size_t elementSize, TypedArraySpeculationRequirements, TypedArraySignedness);
+ void compilePutByValForIntTypedArray(const TypedArrayDescriptor&, GPRReg base, GPRReg property, Node&, size_t elementSize, TypedArraySpeculationRequirements, TypedArraySignedness);
+ void compileGetByValOnFloatTypedArray(const TypedArrayDescriptor&, Node&, size_t elementSize, TypedArraySpeculationRequirements);
+ void compilePutByValForFloatTypedArray(const TypedArrayDescriptor&, GPRReg base, GPRReg property, Node&, size_t elementSize, TypedArraySpeculationRequirements);
+
+ // It is acceptable to have structure be equal to scratch, so long as you're fine
+ // with the structure GPR being clobbered.
+ template<typename T>
+ void emitAllocateJSFinalObject(T structure, GPRReg resultGPR, GPRReg scratchGPR, MacroAssembler::JumpList& slowPath)
+ {
+ MarkedSpace::SizeClass* sizeClass = &m_jit.globalData()->heap.sizeClassForObject(sizeof(JSFinalObject));
+
+ m_jit.loadPtr(&sizeClass->firstFreeCell, resultGPR);
+ slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, resultGPR));
+
+ // The object is half-allocated: we have what we know is a fresh object, but
+ // it's still on the GC's free list.
+
+ // Ditch the structure by placing it into the structure slot, so that we can reuse
+ // scratchGPR.
+ m_jit.storePtr(structure, MacroAssembler::Address(resultGPR, JSObject::structureOffset()));
+
+ // Now that we have scratchGPR back, remove the object from the free list
+ m_jit.loadPtr(MacroAssembler::Address(resultGPR), scratchGPR);
+ m_jit.storePtr(scratchGPR, &sizeClass->firstFreeCell);
+
+ // Initialize the object's classInfo pointer
+ m_jit.storePtr(MacroAssembler::TrustedImmPtr(&JSFinalObject::s_info), MacroAssembler::Address(resultGPR, JSCell::classInfoOffset()));
+
+ // Initialize the object's inheritorID.
+ m_jit.storePtr(MacroAssembler::TrustedImmPtr(0), MacroAssembler::Address(resultGPR, JSObject::offsetOfInheritorID()));
+
+ // Initialize the object's property storage pointer.
+ m_jit.addPtr(MacroAssembler::TrustedImm32(sizeof(JSObject)), resultGPR, scratchGPR);
+ m_jit.storePtr(scratchGPR, MacroAssembler::Address(resultGPR, JSFinalObject::offsetOfPropertyStorage()));
+ }
+
+#if USE(JSVALUE64)
+ JITCompiler::Jump convertToDouble(GPRReg value, FPRReg result, GPRReg tmp);
+#elif USE(JSVALUE32_64)
+ JITCompiler::Jump convertToDouble(JSValueOperand&, FPRReg result);
+#endif
+
+ // Add a speculation check without additional recovery.
+ void speculationCheck(ExitKind kind, JSValueSource jsValueSource, NodeIndex nodeIndex, MacroAssembler::Jump jumpToFail)
+ {
+ if (!m_compileOkay)
+ return;
+ m_jit.codeBlock()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.valueProfileFor(nodeIndex), jumpToFail, this));
+ }
+ // Add a set of speculation checks without additional recovery.
+ void speculationCheck(ExitKind kind, JSValueSource jsValueSource, NodeIndex nodeIndex, MacroAssembler::JumpList& jumpsToFail)
+ {
+ Vector<MacroAssembler::Jump, 16> JumpVector = jumpsToFail.jumps();
+ for (unsigned i = 0; i < JumpVector.size(); ++i)
+ speculationCheck(kind, jsValueSource, nodeIndex, JumpVector[i]);
+ }
+ // Add a speculation check with additional recovery.
+ void speculationCheck(ExitKind kind, JSValueSource jsValueSource, NodeIndex nodeIndex, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
+ {
+ if (!m_compileOkay)
+ return;
+ m_jit.codeBlock()->appendSpeculationRecovery(recovery);
+ m_jit.codeBlock()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.valueProfileFor(nodeIndex), jumpToFail, this, m_jit.codeBlock()->numberOfSpeculationRecoveries()));
+ }
+
+ // Called when we statically determine that a speculation will fail.
+ void terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, NodeIndex nodeIndex)
+ {
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ fprintf(stderr, "SpeculativeJIT was terminated.\n");
+#endif
+ if (!m_compileOkay)
+ return;
+ speculationCheck(kind, jsValueRegs, nodeIndex, m_jit.jump());
+ m_compileOkay = false;
+ }
+
+ template<bool strict>
+ GPRReg fillSpeculateIntInternal(NodeIndex, DataFormat& returnFormat);
+
+ // It is possible, during speculative generation, to reach a situation in which we
+ // can statically determine a speculation will fail (for example, when two nodes
+ // will make conflicting speculations about the same operand). In such cases this
+ // flag is cleared, indicating no further code generation should take place.
+ bool m_compileOkay;
+
+ // Tracking for which nodes are currently holding the values of arguments and bytecode
+ // operand-indexed variables.
+
+ ValueSource valueSourceForOperand(int operand)
+ {
+ return valueSourceReferenceForOperand(operand);
+ }
+
+ void setNodeIndexForOperand(NodeIndex nodeIndex, int operand)
+ {
+ valueSourceReferenceForOperand(operand) = ValueSource(nodeIndex);
+ }
+
+ // Call this with care, since it both returns a reference into an array
+ // and potentially resizes the array. So it would not be right to call this
+ // twice and then perform operands on both references, since the one from
+ // the first call may no longer be valid.
+ ValueSource& valueSourceReferenceForOperand(int operand)
+ {
+ if (operandIsArgument(operand)) {
+ int argument = operandToArgument(operand);
+ return m_arguments[argument];
+ }
+
+ if ((unsigned)operand >= m_variables.size())
+ m_variables.resize(operand + 1);
+
+ return m_variables[operand];
+ }
+
+ // The JIT, while also provides MacroAssembler functionality.
+ JITCompiler& m_jit;
+ // The current node being generated.
+ BlockIndex m_block;
+ NodeIndex m_compileIndex;
+ // Virtual and physical register maps.
+ Vector<GenerationInfo, 32> m_generationInfo;
+ RegisterBank<GPRInfo> m_gprs;
+ RegisterBank<FPRInfo> m_fprs;
+
+ Vector<MacroAssembler::Label> m_blockHeads;
+ struct BranchRecord {
+ BranchRecord(MacroAssembler::Jump jump, BlockIndex destination)
+ : jump(jump)
+ , destination(destination)
+ {
+ }
+
+ MacroAssembler::Jump jump;
+ BlockIndex destination;
+ };
+ Vector<BranchRecord, 8> m_branches;
+
+ Vector<ValueSource, 0> m_arguments;
+ Vector<ValueSource, 0> m_variables;
+ int m_lastSetOperand;
+ CodeOrigin m_codeOriginForOSR;
+
+ AbstractState m_state;
+
+ ValueRecovery computeValueRecoveryFor(const ValueSource&);
+
+ ValueRecovery computeValueRecoveryFor(int operand)
+ {
+ return computeValueRecoveryFor(valueSourceForOperand(operand));
+ }
+};
+
+
+// === Operand types ===
+//
+// IntegerOperand, DoubleOperand and JSValueOperand.
+//
+// These classes are used to lock the operands to a node into machine
+// registers. These classes implement of pattern of locking a value
+// into register at the point of construction only if it is already in
+// registers, and otherwise loading it lazily at the point it is first
+// used. We do so in order to attempt to avoid spilling one operand
+// in order to make space available for another.
+
+class IntegerOperand {
+public:
+ explicit IntegerOperand(SpeculativeJIT* jit, NodeIndex index)
+ : m_jit(jit)
+ , m_index(index)
+ , m_gprOrInvalid(InvalidGPRReg)
+#ifndef NDEBUG
+ , m_format(DataFormatNone)
+#endif
+ {
+ ASSERT(m_jit);
+ if (jit->isFilled(index))
+ gpr();
+ }
+
+ ~IntegerOperand()
+ {
+ ASSERT(m_gprOrInvalid != InvalidGPRReg);
+ m_jit->unlock(m_gprOrInvalid);
+ }
+
+ NodeIndex index() const
+ {
+ return m_index;
+ }
+
+ DataFormat format()
+ {
+ gpr(); // m_format is set when m_gpr is locked.
+ ASSERT(m_format == DataFormatInteger || m_format == DataFormatJSInteger);
+ return m_format;
+ }
+
+ GPRReg gpr()
+ {
+ if (m_gprOrInvalid == InvalidGPRReg)
+ m_gprOrInvalid = m_jit->fillInteger(index(), m_format);
+ return m_gprOrInvalid;
+ }
+
+ void use()
+ {
+ m_jit->use(m_index);
+ }
+
+private:
+ SpeculativeJIT* m_jit;
+ NodeIndex m_index;
+ GPRReg m_gprOrInvalid;
+ DataFormat m_format;
+};
+
+class DoubleOperand {
+public:
+ explicit DoubleOperand(SpeculativeJIT* jit, NodeIndex index)
+ : m_jit(jit)
+ , m_index(index)
+ , m_fprOrInvalid(InvalidFPRReg)
+ {
+ ASSERT(m_jit);
+ if (jit->isFilledDouble(index))
+ fpr();
+ }
+
+ ~DoubleOperand()
+ {
+ ASSERT(m_fprOrInvalid != InvalidFPRReg);
+ m_jit->unlock(m_fprOrInvalid);
+ }
+
+ NodeIndex index() const
+ {
+ return m_index;
+ }
+
+ FPRReg fpr()
+ {
+ if (m_fprOrInvalid == InvalidFPRReg)
+ m_fprOrInvalid = m_jit->fillDouble(index());
+ return m_fprOrInvalid;
+ }
+
+ void use()
+ {
+ m_jit->use(m_index);
+ }
+
+private:
+ SpeculativeJIT* m_jit;
+ NodeIndex m_index;
+ FPRReg m_fprOrInvalid;
+};
+
+class JSValueOperand {
+public:
+ explicit JSValueOperand(SpeculativeJIT* jit, NodeIndex index)
+ : m_jit(jit)
+ , m_index(index)
+#if USE(JSVALUE64)
+ , m_gprOrInvalid(InvalidGPRReg)
+#elif USE(JSVALUE32_64)
+ , m_isDouble(false)
+#endif
+ {
+ ASSERT(m_jit);
+#if USE(JSVALUE64)
+ if (jit->isFilled(index))
+ gpr();
+#elif USE(JSVALUE32_64)
+ m_register.pair.tagGPR = InvalidGPRReg;
+ m_register.pair.payloadGPR = InvalidGPRReg;
+ if (jit->isFilled(index))
+ fill();
+#endif
+ }
+
+ ~JSValueOperand()
+ {
+#if USE(JSVALUE64)
+ ASSERT(m_gprOrInvalid != InvalidGPRReg);
+ m_jit->unlock(m_gprOrInvalid);
+#elif USE(JSVALUE32_64)
+ if (m_isDouble) {
+ ASSERT(m_register.fpr != InvalidFPRReg);
+ m_jit->unlock(m_register.fpr);
+ } else {
+ ASSERT(m_register.pair.tagGPR != InvalidGPRReg && m_register.pair.payloadGPR != InvalidGPRReg);
+ m_jit->unlock(m_register.pair.tagGPR);
+ m_jit->unlock(m_register.pair.payloadGPR);
+ }
+#endif
+ }
+
+ NodeIndex index() const
+ {
+ return m_index;
+ }
+
+#if USE(JSVALUE64)
+ GPRReg gpr()
+ {
+ if (m_gprOrInvalid == InvalidGPRReg)
+ m_gprOrInvalid = m_jit->fillJSValue(index());
+ return m_gprOrInvalid;
+ }
+ JSValueRegs jsValueRegs()
+ {
+ return JSValueRegs(gpr());
+ }
+#elif USE(JSVALUE32_64)
+ bool isDouble() { return m_isDouble; }
+
+ void fill()
+ {
+ if (m_register.pair.tagGPR == InvalidGPRReg && m_register.pair.payloadGPR == InvalidGPRReg)
+ m_isDouble = !m_jit->fillJSValue(index(), m_register.pair.tagGPR, m_register.pair.payloadGPR, m_register.fpr);
+ }
+
+ GPRReg tagGPR()
+ {
+ fill();
+ ASSERT(!m_isDouble);
+ return m_register.pair.tagGPR;
+ }
+
+ GPRReg payloadGPR()
+ {
+ fill();
+ ASSERT(!m_isDouble);
+ return m_register.pair.payloadGPR;
+ }
+
+ JSValueRegs jsValueRegs()
+ {
+ return JSValueRegs(tagGPR(), payloadGPR());
+ }
+
+ FPRReg fpr()
+ {
+ fill();
+ ASSERT(m_isDouble);
+ return m_register.fpr;
+ }
+#endif
+
+ void use()
+ {
+ m_jit->use(m_index);
+ }
+
+private:
+ SpeculativeJIT* m_jit;
+ NodeIndex m_index;
+#if USE(JSVALUE64)
+ GPRReg m_gprOrInvalid;
+#elif USE(JSVALUE32_64)
+ union {
+ struct {
+ GPRReg tagGPR;
+ GPRReg payloadGPR;
+ } pair;
+ FPRReg fpr;
+ } m_register;
+ bool m_isDouble;
+#endif
+};
+
+class StorageOperand {
+public:
+ explicit StorageOperand(SpeculativeJIT* jit, NodeIndex index)
+ : m_jit(jit)
+ , m_index(index)
+ , m_gprOrInvalid(InvalidGPRReg)
+ {
+ ASSERT(m_jit);
+ if (jit->isFilled(index))
+ gpr();
+ }
+
+ ~StorageOperand()
+ {
+ ASSERT(m_gprOrInvalid != InvalidGPRReg);
+ m_jit->unlock(m_gprOrInvalid);
+ }
+
+ NodeIndex index() const
+ {
+ return m_index;
+ }
+
+ GPRReg gpr()
+ {
+ if (m_gprOrInvalid == InvalidGPRReg)
+ m_gprOrInvalid = m_jit->fillStorage(index());
+ return m_gprOrInvalid;
+ }
+
+ void use()
+ {
+ m_jit->use(m_index);
+ }
+
+private:
+ SpeculativeJIT* m_jit;
+ NodeIndex m_index;
+ GPRReg m_gprOrInvalid;
+};
+
+
+// === Temporaries ===
+//
+// These classes are used to allocate temporary registers.
+// A mechanism is provided to attempt to reuse the registers
+// currently allocated to child nodes whose value is consumed
+// by, and not live after, this operation.
+
+class GPRTemporary {
+public:
+ GPRTemporary();
+ GPRTemporary(SpeculativeJIT*);
+ GPRTemporary(SpeculativeJIT*, GPRReg specific);
+ GPRTemporary(SpeculativeJIT*, SpeculateIntegerOperand&);
+ GPRTemporary(SpeculativeJIT*, SpeculateIntegerOperand&, SpeculateIntegerOperand&);
+ GPRTemporary(SpeculativeJIT*, SpeculateStrictInt32Operand&);
+ GPRTemporary(SpeculativeJIT*, IntegerOperand&);
+ GPRTemporary(SpeculativeJIT*, IntegerOperand&, IntegerOperand&);
+ GPRTemporary(SpeculativeJIT*, SpeculateCellOperand&);
+ GPRTemporary(SpeculativeJIT*, SpeculateBooleanOperand&);
+#if USE(JSVALUE64)
+ GPRTemporary(SpeculativeJIT*, JSValueOperand&);
+#elif USE(JSVALUE32_64)
+ GPRTemporary(SpeculativeJIT*, JSValueOperand&, bool tag = true);
+#endif
+ GPRTemporary(SpeculativeJIT*, StorageOperand&);
+
+ void adopt(GPRTemporary&);
+
+ ~GPRTemporary()
+ {
+ if (m_jit && m_gpr != InvalidGPRReg)
+ m_jit->unlock(gpr());
+ }
+
+ GPRReg gpr()
+ {
+ return m_gpr;
+ }
+
+private:
+ SpeculativeJIT* m_jit;
+ GPRReg m_gpr;
+};
+
+class FPRTemporary {
+public:
+ FPRTemporary(SpeculativeJIT*);
+ FPRTemporary(SpeculativeJIT*, DoubleOperand&);
+ FPRTemporary(SpeculativeJIT*, DoubleOperand&, DoubleOperand&);
+ FPRTemporary(SpeculativeJIT*, SpeculateDoubleOperand&);
+ FPRTemporary(SpeculativeJIT*, SpeculateDoubleOperand&, SpeculateDoubleOperand&);
+#if USE(JSVALUE32_64)
+ FPRTemporary(SpeculativeJIT*, JSValueOperand&);
+#endif
+
+ ~FPRTemporary()
+ {
+ m_jit->unlock(fpr());
+ }
+
+ FPRReg fpr() const
+ {
+ ASSERT(m_fpr != InvalidFPRReg);
+ return m_fpr;
+ }
+
+protected:
+ FPRTemporary(SpeculativeJIT* jit, FPRReg lockedFPR)
+ : m_jit(jit)
+ , m_fpr(lockedFPR)
+ {
+ }
+
+private:
+ SpeculativeJIT* m_jit;
+ FPRReg m_fpr;
+};
+
+
+// === Results ===
+//
+// These classes lock the result of a call to a C++ helper function.
+
+class GPRResult : public GPRTemporary {
+public:
+ GPRResult(SpeculativeJIT* jit)
+ : GPRTemporary(jit, GPRInfo::returnValueGPR)
+ {
+ }
+};
+
+#if USE(JSVALUE32_64)
+class GPRResult2 : public GPRTemporary {
+public:
+ GPRResult2(SpeculativeJIT* jit)
+ : GPRTemporary(jit, GPRInfo::returnValueGPR2)
+ {
+ }
+};
+#endif
+
+class FPRResult : public FPRTemporary {
+public:
+ FPRResult(SpeculativeJIT* jit)
+ : FPRTemporary(jit, lockedResult(jit))
+ {
+ }
+
+private:
+ static FPRReg lockedResult(SpeculativeJIT* jit)
+ {
+ jit->lock(FPRInfo::returnValueFPR);
+ return FPRInfo::returnValueFPR;
+ }
+};
+
+
+// === Speculative Operand types ===
+//
+// SpeculateIntegerOperand, SpeculateStrictInt32Operand and SpeculateCellOperand.
+//
+// These are used to lock the operands to a node into machine registers within the
+// SpeculativeJIT. The classes operate like those above, however these will
+// perform a speculative check for a more restrictive type than we can statically
+// determine the operand to have. If the operand does not have the requested type,
+// a bail-out to the non-speculative path will be taken.
+
+class SpeculateIntegerOperand {
+public:
+ explicit SpeculateIntegerOperand(SpeculativeJIT* jit, NodeIndex index)
+ : m_jit(jit)
+ , m_index(index)
+ , m_gprOrInvalid(InvalidGPRReg)
+#ifndef NDEBUG
+ , m_format(DataFormatNone)
+#endif
+ {
+ ASSERT(m_jit);
+ if (jit->isFilled(index))
+ gpr();
+ }
+
+ ~SpeculateIntegerOperand()
+ {
+ ASSERT(m_gprOrInvalid != InvalidGPRReg);
+ m_jit->unlock(m_gprOrInvalid);
+ }
+
+ NodeIndex index() const
+ {
+ return m_index;
+ }
+
+ DataFormat format()
+ {
+ gpr(); // m_format is set when m_gpr is locked.
+ ASSERT(m_format == DataFormatInteger || m_format == DataFormatJSInteger);
+ return m_format;
+ }
+
+ GPRReg gpr()
+ {
+ if (m_gprOrInvalid == InvalidGPRReg)
+ m_gprOrInvalid = m_jit->fillSpeculateInt(index(), m_format);
+ return m_gprOrInvalid;
+ }
+
+private:
+ SpeculativeJIT* m_jit;
+ NodeIndex m_index;
+ GPRReg m_gprOrInvalid;
+ DataFormat m_format;
+};
+
+class SpeculateStrictInt32Operand {
+public:
+ explicit SpeculateStrictInt32Operand(SpeculativeJIT* jit, NodeIndex index)
+ : m_jit(jit)
+ , m_index(index)
+ , m_gprOrInvalid(InvalidGPRReg)
+ {
+ ASSERT(m_jit);
+ if (jit->isFilled(index))
+ gpr();
+ }
+
+ ~SpeculateStrictInt32Operand()
+ {
+ ASSERT(m_gprOrInvalid != InvalidGPRReg);
+ m_jit->unlock(m_gprOrInvalid);
+ }
+
+ NodeIndex index() const
+ {
+ return m_index;
+ }
+
+ GPRReg gpr()
+ {
+ if (m_gprOrInvalid == InvalidGPRReg)
+ m_gprOrInvalid = m_jit->fillSpeculateIntStrict(index());
+ return m_gprOrInvalid;
+ }
+
+ void use()
+ {
+ m_jit->use(m_index);
+ }
+
+private:
+ SpeculativeJIT* m_jit;
+ NodeIndex m_index;
+ GPRReg m_gprOrInvalid;
+};
+
+class SpeculateDoubleOperand {
+public:
+ explicit SpeculateDoubleOperand(SpeculativeJIT* jit, NodeIndex index)
+ : m_jit(jit)
+ , m_index(index)
+ , m_fprOrInvalid(InvalidFPRReg)
+ {
+ ASSERT(m_jit);
+ if (jit->isFilled(index))
+ fpr();
+ }
+
+ ~SpeculateDoubleOperand()
+ {
+ ASSERT(m_fprOrInvalid != InvalidFPRReg);
+ m_jit->unlock(m_fprOrInvalid);
+ }
+
+ NodeIndex index() const
+ {
+ return m_index;
+ }
+
+ FPRReg fpr()
+ {
+ if (m_fprOrInvalid == InvalidFPRReg)
+ m_fprOrInvalid = m_jit->fillSpeculateDouble(index());
+ return m_fprOrInvalid;
+ }
+
+private:
+ SpeculativeJIT* m_jit;
+ NodeIndex m_index;
+ FPRReg m_fprOrInvalid;
+};
+
+class SpeculateCellOperand {
+public:
+ explicit SpeculateCellOperand(SpeculativeJIT* jit, NodeIndex index)
+ : m_jit(jit)
+ , m_index(index)
+ , m_gprOrInvalid(InvalidGPRReg)
+ {
+ ASSERT(m_jit);
+ if (jit->isFilled(index))
+ gpr();
+ }
+
+ ~SpeculateCellOperand()
+ {
+ ASSERT(m_gprOrInvalid != InvalidGPRReg);
+ m_jit->unlock(m_gprOrInvalid);
+ }
+
+ NodeIndex index() const
+ {
+ return m_index;
+ }
+
+ GPRReg gpr()
+ {
+ if (m_gprOrInvalid == InvalidGPRReg)
+ m_gprOrInvalid = m_jit->fillSpeculateCell(index());
+ return m_gprOrInvalid;
+ }
+
+ void use()
+ {
+ m_jit->use(m_index);
+ }
+
+private:
+ SpeculativeJIT* m_jit;
+ NodeIndex m_index;
+ GPRReg m_gprOrInvalid;
+};
+
+class SpeculateBooleanOperand {
+public:
+ explicit SpeculateBooleanOperand(SpeculativeJIT* jit, NodeIndex index)
+ : m_jit(jit)
+ , m_index(index)
+ , m_gprOrInvalid(InvalidGPRReg)
+ {
+ ASSERT(m_jit);
+ if (jit->isFilled(index))
+ gpr();
+ }
+
+ ~SpeculateBooleanOperand()
+ {
+ ASSERT(m_gprOrInvalid != InvalidGPRReg);
+ m_jit->unlock(m_gprOrInvalid);
+ }
+
+ NodeIndex index() const
+ {
+ return m_index;
+ }
+
+ GPRReg gpr()
+ {
+ if (m_gprOrInvalid == InvalidGPRReg)
+ m_gprOrInvalid = m_jit->fillSpeculateBoolean(index());
+ return m_gprOrInvalid;
+ }
+
+ void use()
+ {
+ m_jit->use(m_index);
+ }
+
+private:
+ SpeculativeJIT* m_jit;
+ NodeIndex m_index;
+ GPRReg m_gprOrInvalid;
+};
+
+inline SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
+ : m_compileOkay(true)
+ , m_jit(jit)
+ , m_compileIndex(0)
+ , m_generationInfo(m_jit.codeBlock()->m_numCalleeRegisters)
+ , m_blockHeads(jit.graph().m_blocks.size())
+ , m_arguments(jit.codeBlock()->m_numParameters)
+ , m_variables(jit.graph().m_localVars)
+ , m_lastSetOperand(std::numeric_limits<int>::max())
+ , m_state(m_jit.codeBlock(), m_jit.graph())
+{
+}
+
+} } // namespace JSC::DFG
+
+#endif
+#endif
+
diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp
new file mode 100644
index 000000000..bbe6171eb
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp
@@ -0,0 +1,3561 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2011 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "DFGSpeculativeJIT.h"
+
+#if ENABLE(DFG_JIT)
+
+#include "JSByteArray.h"
+
+namespace JSC { namespace DFG {
+
+#if USE(JSVALUE32_64)
+
+GPRReg SpeculativeJIT::fillInteger(NodeIndex nodeIndex, DataFormat& returnFormat)
+{
+ Node& node = at(nodeIndex);
+ VirtualRegister virtualRegister = node.virtualRegister();
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+
+ if (info.registerFormat() == DataFormatNone) {
+ GPRReg gpr = allocate();
+
+ if (node.hasConstant()) {
+ m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
+ if (isInt32Constant(nodeIndex))
+ m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(nodeIndex)), gpr);
+ else if (isNumberConstant(nodeIndex))
+ ASSERT_NOT_REACHED();
+ else {
+ ASSERT(isJSConstant(nodeIndex));
+ JSValue jsValue = valueOfJSConstant(nodeIndex);
+ m_jit.move(MacroAssembler::Imm32(jsValue.payload()), gpr);
+ }
+ } else {
+ ASSERT(info.spillFormat() == DataFormatJS || info.spillFormat() == DataFormatJSInteger || info.spillFormat() == DataFormatInteger);
+ m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
+ m_jit.load32(JITCompiler::payloadFor(virtualRegister), gpr);
+ }
+
+ info.fillInteger(gpr);
+ returnFormat = DataFormatInteger;
+ return gpr;
+ }
+
+ switch (info.registerFormat()) {
+ case DataFormatNone:
+ // Should have filled, above.
+ case DataFormatJSDouble:
+ case DataFormatDouble:
+ case DataFormatJS:
+ case DataFormatCell:
+ case DataFormatJSCell:
+ case DataFormatBoolean:
+ case DataFormatJSBoolean:
+ case DataFormatStorage:
+ // Should only be calling this function if we know this operand to be integer.
+ ASSERT_NOT_REACHED();
+
+ case DataFormatJSInteger: {
+ GPRReg tagGPR = info.tagGPR();
+ GPRReg payloadGPR = info.payloadGPR();
+ m_gprs.lock(tagGPR);
+ m_jit.jitAssertIsJSInt32(tagGPR);
+ m_gprs.unlock(tagGPR);
+ m_gprs.lock(payloadGPR);
+ m_gprs.release(tagGPR);
+ m_gprs.release(payloadGPR);
+ m_gprs.retain(payloadGPR, virtualRegister, SpillOrderInteger);
+ info.fillInteger(payloadGPR);
+ returnFormat = DataFormatInteger;
+ return payloadGPR;
+ }
+
+ case DataFormatInteger: {
+ GPRReg gpr = info.gpr();
+ m_gprs.lock(gpr);
+ m_jit.jitAssertIsInt32(gpr);
+ returnFormat = DataFormatInteger;
+ return gpr;
+ }
+ }
+
+ ASSERT_NOT_REACHED();
+ return InvalidGPRReg;
+}
+
+FPRReg SpeculativeJIT::fillDouble(NodeIndex nodeIndex)
+{
+ Node& node = at(nodeIndex);
+ VirtualRegister virtualRegister = node.virtualRegister();
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+
+ if (info.registerFormat() == DataFormatNone) {
+
+ if (node.hasConstant()) {
+ if (isInt32Constant(nodeIndex)) {
+ // FIXME: should not be reachable?
+ GPRReg gpr = allocate();
+ m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(nodeIndex)), gpr);
+ m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
+ info.fillInteger(gpr);
+ unlock(gpr);
+ } else if (isNumberConstant(nodeIndex)) {
+ FPRReg fpr = fprAllocate();
+ m_jit.loadDouble(addressOfDoubleConstant(nodeIndex), fpr);
+ m_fprs.retain(fpr, virtualRegister, SpillOrderDouble);
+ info.fillDouble(fpr);
+ return fpr;
+ } else {
+ // FIXME: should not be reachable?
+ ASSERT_NOT_REACHED();
+ }
+ } else {
+ DataFormat spillFormat = info.spillFormat();
+ ASSERT((spillFormat & DataFormatJS) || spillFormat == DataFormatInteger);
+ if (spillFormat == DataFormatJSDouble) {
+ FPRReg fpr = fprAllocate();
+ m_jit.loadDouble(JITCompiler::addressFor(virtualRegister), fpr);
+ m_fprs.retain(fpr, virtualRegister, SpillOrderSpilled);
+ info.fillDouble(fpr);
+ return fpr;
+ }
+
+ FPRReg fpr = fprAllocate();
+ JITCompiler::Jump hasUnboxedDouble;
+
+ if (spillFormat != DataFormatJSInteger && spillFormat != DataFormatInteger) {
+ JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag));
+ m_jit.loadDouble(JITCompiler::addressFor(virtualRegister), fpr);
+ hasUnboxedDouble = m_jit.jump();
+ isInteger.link(&m_jit);
+ }
+
+ m_jit.convertInt32ToDouble(JITCompiler::payloadFor(virtualRegister), fpr);
+
+ if (hasUnboxedDouble.isSet())
+ hasUnboxedDouble.link(&m_jit);
+
+ m_fprs.retain(fpr, virtualRegister, SpillOrderSpilled);
+ info.fillDouble(fpr);
+ return fpr;
+ }
+ }
+
+ switch (info.registerFormat()) {
+ case DataFormatNone:
+ // Should have filled, above.
+ case DataFormatCell:
+ case DataFormatJSCell:
+ case DataFormatBoolean:
+ case DataFormatJSBoolean:
+ case DataFormatStorage:
+ // Should only be calling this function if we know this operand to be numeric.
+ ASSERT_NOT_REACHED();
+
+ case DataFormatJSInteger:
+ case DataFormatJS: {
+ GPRReg tagGPR = info.tagGPR();
+ GPRReg payloadGPR = info.payloadGPR();
+ FPRReg fpr = fprAllocate();
+ m_gprs.lock(tagGPR);
+ m_gprs.lock(payloadGPR);
+
+ JITCompiler::Jump hasUnboxedDouble;
+
+ if (info.registerFormat() != DataFormatJSInteger) {
+ FPRTemporary scratch(this);
+ JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
+ m_jit.jitAssertIsJSDouble(tagGPR);
+ unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
+ hasUnboxedDouble = m_jit.jump();
+ isInteger.link(&m_jit);
+ }
+
+ m_jit.convertInt32ToDouble(payloadGPR, fpr);
+
+ if (hasUnboxedDouble.isSet())
+ hasUnboxedDouble.link(&m_jit);
+
+ m_gprs.release(tagGPR);
+ m_gprs.release(payloadGPR);
+ m_gprs.unlock(tagGPR);
+ m_gprs.unlock(payloadGPR);
+ m_fprs.retain(fpr, virtualRegister, SpillOrderDouble);
+ info.fillDouble(fpr);
+ info.killSpilled();
+ return fpr;
+ }
+
+ case DataFormatInteger: {
+ FPRReg fpr = fprAllocate();
+ GPRReg gpr = info.gpr();
+ m_gprs.lock(gpr);
+ m_jit.convertInt32ToDouble(gpr, fpr);
+ m_gprs.unlock(gpr);
+ return fpr;
+ }
+
+ case DataFormatJSDouble:
+ case DataFormatDouble: {
+ FPRReg fpr = info.fpr();
+ m_fprs.lock(fpr);
+ return fpr;
+ }
+ }
+
+ ASSERT_NOT_REACHED();
+ return InvalidFPRReg;
+}
+
+bool SpeculativeJIT::fillJSValue(NodeIndex nodeIndex, GPRReg& tagGPR, GPRReg& payloadGPR, FPRReg& fpr)
+{
+ // FIXME: For double we could fill with a FPR.
+ UNUSED_PARAM(fpr);
+
+ Node& node = at(nodeIndex);
+ VirtualRegister virtualRegister = node.virtualRegister();
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+
+ switch (info.registerFormat()) {
+ case DataFormatNone: {
+
+ if (node.hasConstant()) {
+ tagGPR = allocate();
+ payloadGPR = allocate();
+ m_jit.move(Imm32(valueOfJSConstant(nodeIndex).tag()), tagGPR);
+ m_jit.move(Imm32(valueOfJSConstant(nodeIndex).payload()), payloadGPR);
+ m_gprs.retain(tagGPR, virtualRegister, SpillOrderConstant);
+ m_gprs.retain(payloadGPR, virtualRegister, SpillOrderConstant);
+ info.fillJSValue(tagGPR, payloadGPR, isInt32Constant(nodeIndex) ? DataFormatJSInteger : DataFormatJS);
+ } else {
+ DataFormat spillFormat = info.spillFormat();
+ ASSERT(spillFormat != DataFormatNone && spillFormat != DataFormatStorage);
+ tagGPR = allocate();
+ payloadGPR = allocate();
+ switch (spillFormat) {
+ case DataFormatInteger:
+ m_jit.move(TrustedImm32(JSValue::Int32Tag), tagGPR);
+ spillFormat = DataFormatJSInteger; // This will be used as the new register format.
+ break;
+ case DataFormatCell:
+ m_jit.move(TrustedImm32(JSValue::CellTag), tagGPR);
+ spillFormat = DataFormatJSCell; // This will be used as the new register format.
+ break;
+ case DataFormatBoolean:
+ m_jit.move(TrustedImm32(JSValue::BooleanTag), tagGPR);
+ spillFormat = DataFormatJSBoolean; // This will be used as the new register format.
+ break;
+ default:
+ m_jit.load32(JITCompiler::tagFor(virtualRegister), tagGPR);
+ break;
+ }
+ m_jit.load32(JITCompiler::payloadFor(virtualRegister), payloadGPR);
+ m_gprs.retain(tagGPR, virtualRegister, SpillOrderSpilled);
+ m_gprs.retain(payloadGPR, virtualRegister, SpillOrderSpilled);
+ info.fillJSValue(tagGPR, payloadGPR, spillFormat == DataFormatJSDouble ? DataFormatJS : spillFormat);
+ }
+
+ return true;
+ }
+
+ case DataFormatInteger:
+ case DataFormatCell:
+ case DataFormatBoolean: {
+ GPRReg gpr = info.gpr();
+ // If the register has already been locked we need to take a copy.
+ if (m_gprs.isLocked(gpr)) {
+ payloadGPR = allocate();
+ m_jit.move(gpr, payloadGPR);
+ } else {
+ payloadGPR = gpr;
+ m_gprs.lock(gpr);
+ }
+ tagGPR = allocate();
+ uint32_t tag = JSValue::EmptyValueTag;
+ DataFormat fillFormat = DataFormatJS;
+ switch (info.registerFormat()) {
+ case DataFormatInteger:
+ tag = JSValue::Int32Tag;
+ fillFormat = DataFormatJSInteger;
+ break;
+ case DataFormatCell:
+ tag = JSValue::CellTag;
+ fillFormat = DataFormatJSCell;
+ break;
+ case DataFormatBoolean:
+ tag = JSValue::BooleanTag;
+ fillFormat = DataFormatJSBoolean;
+ break;
+ default:
+ ASSERT_NOT_REACHED();
+ break;
+ }
+ m_jit.move(TrustedImm32(tag), tagGPR);
+ m_gprs.release(gpr);
+ m_gprs.retain(tagGPR, virtualRegister, SpillOrderJS);
+ m_gprs.retain(payloadGPR, virtualRegister, SpillOrderJS);
+ info.fillJSValue(tagGPR, payloadGPR, fillFormat);
+ return true;
+ }
+
+ case DataFormatJSDouble:
+ case DataFormatDouble: {
+ FPRReg oldFPR = info.fpr();
+ m_fprs.lock(oldFPR);
+ tagGPR = allocate();
+ payloadGPR = allocate();
+ boxDouble(oldFPR, tagGPR, payloadGPR);
+ m_fprs.unlock(oldFPR);
+ m_fprs.release(oldFPR);
+ m_gprs.retain(tagGPR, virtualRegister, SpillOrderJS);
+ m_gprs.retain(payloadGPR, virtualRegister, SpillOrderJS);
+ info.fillJSValue(tagGPR, payloadGPR, DataFormatJS);
+ return true;
+ }
+
+ case DataFormatJS:
+ case DataFormatJSInteger:
+ case DataFormatJSCell:
+ case DataFormatJSBoolean: {
+ tagGPR = info.tagGPR();
+ payloadGPR = info.payloadGPR();
+ m_gprs.lock(tagGPR);
+ m_gprs.lock(payloadGPR);
+ return true;
+ }
+
+ case DataFormatStorage:
+ // this type currently never occurs
+ ASSERT_NOT_REACHED();
+ }
+
+ ASSERT_NOT_REACHED();
+ return true;
+}
+
+void SpeculativeJIT::nonSpeculativeValueToNumber(Node& node)
+{
+ if (isKnownNumeric(node.child1())) {
+ JSValueOperand op1(this, node.child1());
+ op1.fill();
+ if (op1.isDouble()) {
+ FPRTemporary result(this, op1);
+ m_jit.moveDouble(op1.fpr(), result.fpr());
+ doubleResult(result.fpr(), m_compileIndex);
+ } else {
+ GPRTemporary resultTag(this, op1);
+ GPRTemporary resultPayload(this, op1, false);
+ m_jit.move(op1.tagGPR(), resultTag.gpr());
+ m_jit.move(op1.payloadGPR(), resultPayload.gpr());
+ jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex);
+ }
+ return;
+ }
+
+ JSValueOperand op1(this, node.child1());
+ GPRTemporary resultTag(this, op1);
+ GPRTemporary resultPayload(this, op1, false);
+
+ ASSERT(!isInt32Constant(node.child1()));
+ ASSERT(!isNumberConstant(node.child1()));
+
+ GPRReg tagGPR = op1.tagGPR();
+ GPRReg payloadGPR = op1.payloadGPR();
+ GPRReg resultTagGPR = resultTag.gpr();
+ GPRReg resultPayloadGPR = resultPayload.gpr();
+ op1.use();
+
+ JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
+ JITCompiler::Jump nonNumeric = m_jit.branch32(MacroAssembler::AboveOrEqual, tagGPR, TrustedImm32(JSValue::LowestTag));
+
+ // First, if we get here we have a double encoded as a JSValue
+ JITCompiler::Jump hasUnboxedDouble = m_jit.jump();
+
+ // Next handle cells (& other JS immediates)
+ nonNumeric.link(&m_jit);
+ silentSpillAllRegisters(resultTagGPR, resultPayloadGPR);
+ callOperation(dfgConvertJSValueToNumber, FPRInfo::returnValueFPR, tagGPR, payloadGPR);
+ boxDouble(FPRInfo::returnValueFPR, resultTagGPR, resultPayloadGPR);
+ silentFillAllRegisters(resultTagGPR, resultPayloadGPR);
+ JITCompiler::Jump hasCalledToNumber = m_jit.jump();
+
+ // Finally, handle integers.
+ isInteger.link(&m_jit);
+ hasUnboxedDouble.link(&m_jit);
+ m_jit.move(tagGPR, resultTagGPR);
+ m_jit.move(payloadGPR, resultPayloadGPR);
+ hasCalledToNumber.link(&m_jit);
+ jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly);
+}
+
+void SpeculativeJIT::nonSpeculativeValueToInt32(Node& node)
+{
+ ASSERT(!isInt32Constant(node.child1()));
+
+ if (isKnownInteger(node.child1())) {
+ IntegerOperand op1(this, node.child1());
+ GPRTemporary result(this, op1);
+ m_jit.move(op1.gpr(), result.gpr());
+ integerResult(result.gpr(), m_compileIndex);
+ return;
+ }
+
+ GenerationInfo& childInfo = m_generationInfo[at(node.child1()).virtualRegister()];
+ if (childInfo.isJSDouble()) {
+ DoubleOperand op1(this, node.child1());
+ GPRTemporary result(this);
+ FPRReg fpr = op1.fpr();
+ GPRReg gpr = result.gpr();
+ op1.use();
+ JITCompiler::Jump truncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateSuccessful);
+
+ silentSpillAllRegisters(gpr);
+ callOperation(toInt32, gpr, fpr);
+ silentFillAllRegisters(gpr);
+
+ truncatedToInteger.link(&m_jit);
+ integerResult(gpr, m_compileIndex, UseChildrenCalledExplicitly);
+ return;
+ }
+
+ JSValueOperand op1(this, node.child1());
+ GPRTemporary result(this);
+ GPRReg tagGPR = op1.tagGPR();
+ GPRReg payloadGPR = op1.payloadGPR();
+ GPRReg resultGPR = result.gpr();
+ op1.use();
+
+ JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
+
+ // First handle non-integers
+ silentSpillAllRegisters(resultGPR);
+ callOperation(dfgConvertJSValueToInt32, GPRInfo::returnValueGPR, tagGPR, payloadGPR);
+ m_jit.move(GPRInfo::returnValueGPR, resultGPR);
+ silentFillAllRegisters(resultGPR);
+ JITCompiler::Jump hasCalledToInt32 = m_jit.jump();
+
+ // Then handle integers.
+ isInteger.link(&m_jit);
+ m_jit.move(payloadGPR, resultGPR);
+ hasCalledToInt32.link(&m_jit);
+ integerResult(resultGPR, m_compileIndex, UseChildrenCalledExplicitly);
+}
+
+void SpeculativeJIT::nonSpeculativeUInt32ToNumber(Node& node)
+{
+ IntegerOperand op1(this, node.child1());
+ FPRTemporary boxer(this);
+ GPRTemporary resultTag(this, op1);
+ GPRTemporary resultPayload(this);
+
+ JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, op1.gpr(), TrustedImm32(0));
+
+ m_jit.convertInt32ToDouble(op1.gpr(), boxer.fpr());
+ m_jit.move(JITCompiler::TrustedImmPtr(&AssemblyHelpers::twoToThe32), resultPayload.gpr()); // reuse resultPayload register here.
+ m_jit.addDouble(JITCompiler::Address(resultPayload.gpr(), 0), boxer.fpr());
+
+ boxDouble(boxer.fpr(), resultTag.gpr(), resultPayload.gpr());
+
+ JITCompiler::Jump done = m_jit.jump();
+
+ positive.link(&m_jit);
+
+ m_jit.move(TrustedImm32(JSValue::Int32Tag), resultTag.gpr());
+ m_jit.move(op1.gpr(), resultPayload.gpr());
+
+ done.link(&m_jit);
+
+ jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex);
+}
+
+JITCompiler::Call SpeculativeJIT::cachedGetById(GPRReg baseTagGPROrNone, GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget)
+{
+ m_jit.beginUninterruptedSequence();
+ JITCompiler::DataLabelPtr structureToCompare;
+ JITCompiler::Jump structureCheck = m_jit.branchPtrWithPatch(JITCompiler::NotEqual, JITCompiler::Address(basePayloadGPR, JSCell::structureOffset()), structureToCompare, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(-1)));
+ m_jit.endUninterruptedSequence();
+
+ m_jit.loadPtr(JITCompiler::Address(basePayloadGPR, JSObject::offsetOfPropertyStorage()), resultPayloadGPR);
+ JITCompiler::DataLabelCompact tagLoadWithPatch = m_jit.load32WithCompactAddressOffsetPatch(JITCompiler::Address(resultPayloadGPR, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
+ JITCompiler::DataLabelCompact payloadLoadWithPatch = m_jit.load32WithCompactAddressOffsetPatch(JITCompiler::Address(resultPayloadGPR, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultPayloadGPR);
+
+ JITCompiler::Jump done = m_jit.jump();
+
+ structureCheck.link(&m_jit);
+
+ if (slowPathTarget.isSet())
+ slowPathTarget.link(&m_jit);
+
+ JITCompiler::Label slowCase = m_jit.label();
+
+ silentSpillAllRegisters(resultTagGPR, resultPayloadGPR);
+ JITCompiler::Call functionCall;
+ if (baseTagGPROrNone == InvalidGPRReg)
+ functionCall = callOperation(operationGetByIdOptimize, resultTagGPR, resultPayloadGPR, JSValue::CellTag, basePayloadGPR, identifier(identifierNumber));
+ else
+ functionCall = callOperation(operationGetByIdOptimize, resultTagGPR, resultPayloadGPR, baseTagGPROrNone, basePayloadGPR, identifier(identifierNumber));
+ silentFillAllRegisters(resultTagGPR, resultPayloadGPR);
+
+ done.link(&m_jit);
+
+ JITCompiler::Label doneLabel = m_jit.label();
+
+ m_jit.addPropertyAccess(PropertyAccessRecord(structureToCompare, functionCall, structureCheck, tagLoadWithPatch, payloadLoadWithPatch, slowCase, doneLabel, safeCast<int8_t>(basePayloadGPR), safeCast<int8_t>(resultTagGPR), safeCast<int8_t>(resultPayloadGPR), safeCast<int8_t>(scratchGPR)));
+
+ return functionCall;
+}
+
+void SpeculativeJIT::cachedPutById(GPRReg basePayloadGPR, GPRReg valueTagGPR, GPRReg valuePayloadGPR, NodeIndex valueIndex, GPRReg scratchGPR, unsigned identifierNumber, PutKind putKind, JITCompiler::Jump slowPathTarget)
+{
+ m_jit.beginUninterruptedSequence();
+ JITCompiler::DataLabelPtr structureToCompare;
+ JITCompiler::Jump structureCheck = m_jit.branchPtrWithPatch(JITCompiler::NotEqual, JITCompiler::Address(basePayloadGPR, JSCell::structureOffset()), structureToCompare, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(-1)));
+ m_jit.endUninterruptedSequence();
+
+ writeBarrier(basePayloadGPR, valueTagGPR, valueIndex, WriteBarrierForPropertyAccess, scratchGPR);
+
+ m_jit.loadPtr(JITCompiler::Address(basePayloadGPR, JSObject::offsetOfPropertyStorage()), scratchGPR);
+ JITCompiler::DataLabel32 tagStoreWithPatch = m_jit.store32WithAddressOffsetPatch(valueTagGPR, JITCompiler::Address(scratchGPR, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
+ JITCompiler::DataLabel32 payloadStoreWithPatch = m_jit.store32WithAddressOffsetPatch(valuePayloadGPR, JITCompiler::Address(scratchGPR, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
+
+ JITCompiler::Jump done = m_jit.jump();
+
+ structureCheck.link(&m_jit);
+
+ if (slowPathTarget.isSet())
+ slowPathTarget.link(&m_jit);
+
+ JITCompiler::Label slowCase = m_jit.label();
+
+ silentSpillAllRegisters(InvalidGPRReg);
+ V_DFGOperation_EJCI optimizedCall;
+ if (m_jit.strictModeFor(at(m_compileIndex).codeOrigin)) {
+ if (putKind == Direct)
+ optimizedCall = operationPutByIdDirectStrictOptimize;
+ else
+ optimizedCall = operationPutByIdStrictOptimize;
+ } else {
+ if (putKind == Direct)
+ optimizedCall = operationPutByIdDirectNonStrictOptimize;
+ else
+ optimizedCall = operationPutByIdNonStrictOptimize;
+ }
+ JITCompiler::Call functionCall = callOperation(optimizedCall, valueTagGPR, valuePayloadGPR, basePayloadGPR, identifier(identifierNumber));
+ silentFillAllRegisters(InvalidGPRReg);
+
+ done.link(&m_jit);
+ JITCompiler::Label doneLabel = m_jit.label();
+
+ m_jit.addPropertyAccess(PropertyAccessRecord(structureToCompare, functionCall, structureCheck, JITCompiler::DataLabelCompact(tagStoreWithPatch.label()), JITCompiler::DataLabelCompact(payloadStoreWithPatch.label()), slowCase, doneLabel, safeCast<int8_t>(basePayloadGPR), safeCast<int8_t>(valueTagGPR), safeCast<int8_t>(valuePayloadGPR), safeCast<int8_t>(scratchGPR)));
+}
+
+void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNull(NodeIndex operand, bool invert)
+{
+ JSValueOperand arg(this, operand);
+ GPRReg argTagGPR = arg.tagGPR();
+ GPRReg argPayloadGPR = arg.payloadGPR();
+
+ GPRTemporary resultPayload(this, arg, false);
+ GPRReg resultPayloadGPR = resultPayload.gpr();
+
+ JITCompiler::Jump notCell;
+ if (!isKnownCell(operand))
+ notCell = m_jit.branch32(MacroAssembler::NotEqual, argTagGPR, TrustedImm32(JSValue::CellTag));
+
+ m_jit.loadPtr(JITCompiler::Address(argPayloadGPR, JSCell::structureOffset()), resultPayloadGPR);
+ m_jit.test8(invert ? JITCompiler::Zero : JITCompiler::NonZero, JITCompiler::Address(resultPayloadGPR, Structure::typeInfoFlagsOffset()), JITCompiler::TrustedImm32(MasqueradesAsUndefined), resultPayloadGPR);
+
+ if (!isKnownCell(operand)) {
+ JITCompiler::Jump done = m_jit.jump();
+
+ notCell.link(&m_jit);
+ // null or undefined?
+ COMPILE_ASSERT((JSValue::UndefinedTag | 1) == JSValue::NullTag, UndefinedTag_OR_1_EQUALS_NullTag);
+ m_jit.move(argTagGPR, resultPayloadGPR);
+ m_jit.or32(TrustedImm32(1), resultPayloadGPR);
+ m_jit.compare32(invert ? JITCompiler::NotEqual : JITCompiler::Equal, resultPayloadGPR, TrustedImm32(JSValue::NullTag), resultPayloadGPR);
+
+ done.link(&m_jit);
+ }
+
+ booleanResult(resultPayloadGPR, m_compileIndex);
+}
+
+void SpeculativeJIT::nonSpeculativePeepholeBranchNull(NodeIndex operand, NodeIndex branchNodeIndex, bool invert)
+{
+ Node& branchNode = at(branchNodeIndex);
+ BlockIndex taken = branchNode.takenBlockIndex();
+ BlockIndex notTaken = branchNode.notTakenBlockIndex();
+
+ if (taken == (m_block + 1)) {
+ invert = !invert;
+ BlockIndex tmp = taken;
+ taken = notTaken;
+ notTaken = tmp;
+ }
+
+ JSValueOperand arg(this, operand);
+ GPRReg argTagGPR = arg.tagGPR();
+ GPRReg argPayloadGPR = arg.payloadGPR();
+
+ GPRTemporary result(this, arg);
+ GPRReg resultGPR = result.gpr();
+
+ JITCompiler::Jump notCell;
+
+ if (!isKnownCell(operand))
+ notCell = m_jit.branch32(MacroAssembler::NotEqual, argTagGPR, TrustedImm32(JSValue::CellTag));
+
+ m_jit.loadPtr(JITCompiler::Address(argPayloadGPR, JSCell::structureOffset()), resultGPR);
+ addBranch(m_jit.branchTest8(invert ? JITCompiler::Zero : JITCompiler::NonZero, JITCompiler::Address(resultGPR, Structure::typeInfoFlagsOffset()), JITCompiler::TrustedImm32(MasqueradesAsUndefined)), taken);
+
+ if (!isKnownCell(operand)) {
+ addBranch(m_jit.jump(), notTaken);
+
+ notCell.link(&m_jit);
+ // null or undefined?
+ COMPILE_ASSERT((JSValue::UndefinedTag | 1) == JSValue::NullTag, UndefinedTag_OR_1_EQUALS_NullTag);
+ m_jit.move(argTagGPR, resultGPR);
+ m_jit.or32(TrustedImm32(1), resultGPR);
+ addBranch(m_jit.branch32(invert ? JITCompiler::NotEqual : JITCompiler::Equal, resultGPR, JITCompiler::TrustedImm32(JSValue::NullTag)), taken);
+ }
+
+ if (notTaken != (m_block + 1))
+ addBranch(m_jit.jump(), notTaken);
+}
+
+bool SpeculativeJIT::nonSpeculativeCompareNull(Node& node, NodeIndex operand, bool invert)
+{
+ NodeIndex branchNodeIndex = detectPeepHoleBranch();
+ if (branchNodeIndex != NoNode) {
+ ASSERT(node.adjustedRefCount() == 1);
+
+ nonSpeculativePeepholeBranchNull(operand, branchNodeIndex, invert);
+
+ use(node.child1());
+ use(node.child2());
+ m_compileIndex = branchNodeIndex;
+
+ return true;
+ }
+
+ nonSpeculativeNonPeepholeCompareNull(operand, invert);
+
+ return false;
+}
+
+void SpeculativeJIT::nonSpeculativePeepholeBranch(Node& node, NodeIndex branchNodeIndex, MacroAssembler::RelationalCondition cond, S_DFGOperation_EJJ helperFunction)
+{
+ Node& branchNode = at(branchNodeIndex);
+ BlockIndex taken = branchNode.takenBlockIndex();
+ BlockIndex notTaken = branchNode.notTakenBlockIndex();
+
+ JITCompiler::ResultCondition callResultCondition = JITCompiler::NonZero;
+
+ // The branch instruction will branch to the taken block.
+ // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
+ if (taken == (m_block + 1)) {
+ cond = JITCompiler::invert(cond);
+ callResultCondition = JITCompiler::Zero;
+ BlockIndex tmp = taken;
+ taken = notTaken;
+ notTaken = tmp;
+ }
+
+ JSValueOperand arg1(this, node.child1());
+ JSValueOperand arg2(this, node.child2());
+ GPRReg arg1TagGPR = arg1.tagGPR();
+ GPRReg arg1PayloadGPR = arg1.payloadGPR();
+ GPRReg arg2TagGPR = arg2.tagGPR();
+ GPRReg arg2PayloadGPR = arg2.payloadGPR();
+
+ JITCompiler::JumpList slowPath;
+
+ if (isKnownNotInteger(node.child1()) || isKnownNotInteger(node.child2())) {
+ GPRResult result(this);
+ GPRReg resultGPR = result.gpr();
+
+ arg1.use();
+ arg2.use();
+
+ flushRegisters();
+ callOperation(helperFunction, resultGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR);
+
+ addBranch(m_jit.branchTest32(callResultCondition, resultGPR), taken);
+ } else {
+ GPRTemporary result(this);
+ GPRReg resultGPR = result.gpr();
+
+ arg1.use();
+ arg2.use();
+
+ if (!isKnownInteger(node.child1()))
+ slowPath.append(m_jit.branch32(MacroAssembler::NotEqual, arg1TagGPR, JITCompiler::TrustedImm32(JSValue::Int32Tag)));
+ if (!isKnownInteger(node.child2()))
+ slowPath.append(m_jit.branch32(MacroAssembler::NotEqual, arg2TagGPR, JITCompiler::TrustedImm32(JSValue::Int32Tag)));
+
+ addBranch(m_jit.branch32(cond, arg1PayloadGPR, arg2PayloadGPR), taken);
+
+ if (!isKnownInteger(node.child1()) || !isKnownInteger(node.child2())) {
+ addBranch(m_jit.jump(), notTaken);
+
+ slowPath.link(&m_jit);
+
+ silentSpillAllRegisters(resultGPR);
+ callOperation(helperFunction, resultGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR);
+ silentFillAllRegisters(resultGPR);
+
+ addBranch(m_jit.branchTest32(callResultCondition, resultGPR), taken);
+ }
+ }
+
+ if (notTaken != (m_block + 1))
+ addBranch(m_jit.jump(), notTaken);
+}
+
+void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node& node, MacroAssembler::RelationalCondition cond, S_DFGOperation_EJJ helperFunction)
+{
+ JSValueOperand arg1(this, node.child1());
+ JSValueOperand arg2(this, node.child2());
+ GPRReg arg1TagGPR = arg1.tagGPR();
+ GPRReg arg1PayloadGPR = arg1.payloadGPR();
+ GPRReg arg2TagGPR = arg2.tagGPR();
+ GPRReg arg2PayloadGPR = arg2.payloadGPR();
+
+ JITCompiler::JumpList slowPath;
+
+ if (isKnownNotInteger(node.child1()) || isKnownNotInteger(node.child2())) {
+ GPRResult result(this);
+ GPRReg resultPayloadGPR = result.gpr();
+
+ arg1.use();
+ arg2.use();
+
+ flushRegisters();
+ callOperation(helperFunction, resultPayloadGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR);
+
+ booleanResult(resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly);
+ } else {
+ GPRTemporary resultPayload(this, arg1, false);
+ GPRReg resultPayloadGPR = resultPayload.gpr();
+
+ arg1.use();
+ arg2.use();
+
+ if (!isKnownInteger(node.child1()))
+ slowPath.append(m_jit.branch32(MacroAssembler::NotEqual, arg1TagGPR, JITCompiler::TrustedImm32(JSValue::Int32Tag)));
+ if (!isKnownInteger(node.child2()))
+ slowPath.append(m_jit.branch32(MacroAssembler::NotEqual, arg2TagGPR, JITCompiler::TrustedImm32(JSValue::Int32Tag)));
+
+ m_jit.compare32(cond, arg1PayloadGPR, arg2PayloadGPR, resultPayloadGPR);
+
+ if (!isKnownInteger(node.child1()) || !isKnownInteger(node.child2())) {
+ JITCompiler::Jump haveResult = m_jit.jump();
+
+ slowPath.link(&m_jit);
+
+ silentSpillAllRegisters(resultPayloadGPR);
+ callOperation(helperFunction, resultPayloadGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR);
+ silentFillAllRegisters(resultPayloadGPR);
+
+ m_jit.andPtr(TrustedImm32(1), resultPayloadGPR);
+
+ haveResult.link(&m_jit);
+ }
+
+ booleanResult(resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly);
+ }
+}
+
+void SpeculativeJIT::nonSpeculativePeepholeStrictEq(Node& node, NodeIndex branchNodeIndex, bool invert)
+{
+ Node& branchNode = at(branchNodeIndex);
+ BlockIndex taken = branchNode.takenBlockIndex();
+ BlockIndex notTaken = branchNode.notTakenBlockIndex();
+
+ // The branch instruction will branch to the taken block.
+ // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
+ if (taken == (m_block + 1)) {
+ invert = !invert;
+ BlockIndex tmp = taken;
+ taken = notTaken;
+ notTaken = tmp;
+ }
+
+ JSValueOperand arg1(this, node.child1());
+ JSValueOperand arg2(this, node.child2());
+ GPRReg arg1TagGPR = arg1.tagGPR();
+ GPRReg arg1PayloadGPR = arg1.payloadGPR();
+ GPRReg arg2TagGPR = arg2.tagGPR();
+ GPRReg arg2PayloadGPR = arg2.payloadGPR();
+
+ GPRTemporary resultPayload(this, arg1, false);
+ GPRReg resultPayloadGPR = resultPayload.gpr();
+
+ arg1.use();
+ arg2.use();
+
+ if (isKnownCell(node.child1()) && isKnownCell(node.child2())) {
+ // see if we get lucky: if the arguments are cells and they reference the same
+ // cell, then they must be strictly equal.
+ addBranch(m_jit.branchPtr(JITCompiler::Equal, arg1PayloadGPR, arg2PayloadGPR), invert ? notTaken : taken);
+
+ silentSpillAllRegisters(resultPayloadGPR);
+ callOperation(operationCompareStrictEqCell, resultPayloadGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR);
+ silentFillAllRegisters(resultPayloadGPR);
+
+ addBranch(m_jit.branchTest32(invert ? JITCompiler::Zero : JITCompiler::NonZero, resultPayloadGPR), taken);
+ } else {
+ // FIXME: Add fast paths for twoCells, number etc.
+
+ silentSpillAllRegisters(resultPayloadGPR);
+ callOperation(operationCompareStrictEq, resultPayloadGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR);
+ silentFillAllRegisters(resultPayloadGPR);
+
+ addBranch(m_jit.branchTest32(invert ? JITCompiler::Zero : JITCompiler::NonZero, resultPayloadGPR), taken);
+ }
+
+ if (notTaken != (m_block + 1))
+ addBranch(m_jit.jump(), notTaken);
+}
+
+void SpeculativeJIT::nonSpeculativeNonPeepholeStrictEq(Node& node, bool invert)
+{
+ JSValueOperand arg1(this, node.child1());
+ JSValueOperand arg2(this, node.child2());
+ GPRReg arg1TagGPR = arg1.tagGPR();
+ GPRReg arg1PayloadGPR = arg1.payloadGPR();
+ GPRReg arg2TagGPR = arg2.tagGPR();
+ GPRReg arg2PayloadGPR = arg2.payloadGPR();
+
+ GPRTemporary resultPayload(this, arg1, false);
+ GPRReg resultPayloadGPR = resultPayload.gpr();
+
+ arg1.use();
+ arg2.use();
+
+ if (isKnownCell(node.child1()) && isKnownCell(node.child2())) {
+ // see if we get lucky: if the arguments are cells and they reference the same
+ // cell, then they must be strictly equal.
+ JITCompiler::Jump notEqualCase = m_jit.branchPtr(JITCompiler::NotEqual, arg1PayloadGPR, arg2PayloadGPR);
+
+ m_jit.move(JITCompiler::TrustedImm32(!invert), resultPayloadGPR);
+ JITCompiler::Jump done = m_jit.jump();
+
+ notEqualCase.link(&m_jit);
+
+ silentSpillAllRegisters(resultPayloadGPR);
+ callOperation(operationCompareStrictEqCell, resultPayloadGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR);
+ silentFillAllRegisters(resultPayloadGPR);
+
+ m_jit.andPtr(JITCompiler::TrustedImm32(1), resultPayloadGPR);
+
+ done.link(&m_jit);
+ } else {
+ // FIXME: Add fast paths.
+
+ silentSpillAllRegisters(resultPayloadGPR);
+ callOperation(operationCompareStrictEq, resultPayloadGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR);
+ silentFillAllRegisters(resultPayloadGPR);
+
+ m_jit.andPtr(JITCompiler::TrustedImm32(1), resultPayloadGPR);
+ }
+
+ booleanResult(resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly);
+}
+
+void SpeculativeJIT::emitCall(Node& node)
+{
+ P_DFGOperation_E slowCallFunction;
+
+ if (node.op == Call)
+ slowCallFunction = operationLinkCall;
+ else {
+ ASSERT(node.op == Construct);
+ slowCallFunction = operationLinkConstruct;
+ }
+
+ // For constructors, the this argument is not passed but we have to make space
+ // for it.
+ int dummyThisArgument = node.op == Call ? 0 : 1;
+
+ CallLinkInfo::CallType callType = node.op == Call ? CallLinkInfo::Call : CallLinkInfo::Construct;
+
+ NodeIndex calleeNodeIndex = m_jit.graph().m_varArgChildren[node.firstChild()];
+ JSValueOperand callee(this, calleeNodeIndex);
+ GPRReg calleeTagGPR = callee.tagGPR();
+ GPRReg calleePayloadGPR = callee.payloadGPR();
+ use(calleeNodeIndex);
+
+ // The call instruction's first child is either the function (normal call) or the
+ // receiver (method call). subsequent children are the arguments.
+ int numPassedArgs = node.numChildren() - 1;
+
+ m_jit.store32(MacroAssembler::TrustedImm32(numPassedArgs + dummyThisArgument), callFramePayloadSlot(RegisterFile::ArgumentCount));
+ m_jit.storePtr(GPRInfo::callFrameRegister, callFramePayloadSlot(RegisterFile::CallerFrame));
+ m_jit.store32(calleePayloadGPR, callFramePayloadSlot(RegisterFile::Callee));
+ m_jit.store32(calleeTagGPR, callFrameTagSlot(RegisterFile::Callee));
+
+ for (int i = 0; i < numPassedArgs; i++) {
+ NodeIndex argNodeIndex = m_jit.graph().m_varArgChildren[node.firstChild() + 1 + i];
+ JSValueOperand arg(this, argNodeIndex);
+ GPRReg argTagGPR = arg.tagGPR();
+ GPRReg argPayloadGPR = arg.payloadGPR();
+ use(argNodeIndex);
+
+ m_jit.store32(argTagGPR, argumentTagSlot(i + dummyThisArgument));
+ m_jit.store32(argPayloadGPR, argumentPayloadSlot(i + dummyThisArgument));
+ }
+
+ flushRegisters();
+
+ GPRResult resultPayload(this);
+ GPRResult2 resultTag(this);
+ GPRReg resultPayloadGPR = resultPayload.gpr();
+ GPRReg resultTagGPR = resultTag.gpr();
+
+ JITCompiler::DataLabelPtr targetToCheck;
+ JITCompiler::JumpList slowPath;
+
+ slowPath.append(m_jit.branchPtrWithPatch(MacroAssembler::NotEqual, calleePayloadGPR, targetToCheck));
+ slowPath.append(m_jit.branch32(MacroAssembler::NotEqual, calleeTagGPR, TrustedImm32(JSValue::CellTag)));
+ m_jit.loadPtr(MacroAssembler::Address(calleePayloadGPR, OBJECT_OFFSETOF(JSFunction, m_scopeChain)), resultPayloadGPR);
+ m_jit.storePtr(resultPayloadGPR, callFramePayloadSlot(RegisterFile::ScopeChain));
+ m_jit.store32(MacroAssembler::TrustedImm32(JSValue::CellTag), callFrameTagSlot(RegisterFile::ScopeChain));
+
+ m_jit.addPtr(Imm32(m_jit.codeBlock()->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister);
+
+ JITCompiler::Call fastCall = m_jit.nearCall();
+ m_jit.notifyCall(fastCall, at(m_compileIndex).codeOrigin);
+
+ JITCompiler::Jump done = m_jit.jump();
+
+ slowPath.link(&m_jit);
+
+ m_jit.addPtr(Imm32(m_jit.codeBlock()->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ m_jit.poke(GPRInfo::argumentGPR0);
+ JITCompiler::Call slowCall = m_jit.addFastExceptionCheck(m_jit.appendCall(slowCallFunction), at(m_compileIndex).codeOrigin);
+ m_jit.addPtr(Imm32(m_jit.codeBlock()->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister);
+ m_jit.notifyCall(m_jit.call(GPRInfo::returnValueGPR), at(m_compileIndex).codeOrigin);
+
+ done.link(&m_jit);
+
+ setupResults(resultPayloadGPR, resultTagGPR);
+
+ jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex, DataFormatJS, UseChildrenCalledExplicitly);
+
+ m_jit.addJSCall(fastCall, slowCall, targetToCheck, callType, at(m_compileIndex).codeOrigin);
+}
+
+template<bool strict>
+GPRReg SpeculativeJIT::fillSpeculateIntInternal(NodeIndex nodeIndex, DataFormat& returnFormat)
+{
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ fprintf(stderr, "SpecInt@%d ", nodeIndex);
+#endif
+ if (isKnownNotInteger(nodeIndex)) {
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ returnFormat = DataFormatInteger;
+ return allocate();
+ }
+
+ Node& node = at(nodeIndex);
+ VirtualRegister virtualRegister = node.virtualRegister();
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+
+ switch (info.registerFormat()) {
+ case DataFormatNone: {
+
+ if (node.hasConstant()) {
+ ASSERT(isInt32Constant(nodeIndex));
+ GPRReg gpr = allocate();
+ m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(nodeIndex)), gpr);
+ m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
+ info.fillInteger(gpr);
+ returnFormat = DataFormatInteger;
+ return gpr;
+ }
+
+ DataFormat spillFormat = info.spillFormat();
+ ASSERT((spillFormat & DataFormatJS) || spillFormat == DataFormatInteger);
+
+ // If we know this was spilled as an integer we can fill without checking.
+ if (spillFormat != DataFormatJSInteger && spillFormat != DataFormatInteger)
+ speculationCheck(BadType, JSValueSource(JITCompiler::addressFor(virtualRegister)), nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
+
+ GPRReg gpr = allocate();
+ m_jit.load32(JITCompiler::payloadFor(virtualRegister), gpr);
+ m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
+ info.fillInteger(gpr);
+ returnFormat = DataFormatInteger;
+ return gpr;
+ }
+
+ case DataFormatJSInteger:
+ case DataFormatJS: {
+ // Check the value is an integer.
+ GPRReg tagGPR = info.tagGPR();
+ GPRReg payloadGPR = info.payloadGPR();
+ m_gprs.lock(tagGPR);
+ m_gprs.lock(payloadGPR);
+ if (info.registerFormat() != DataFormatJSInteger)
+ speculationCheck(BadType, JSValueRegs(tagGPR, payloadGPR), nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, tagGPR, TrustedImm32(JSValue::Int32Tag)));
+ m_gprs.unlock(tagGPR);
+ m_gprs.release(tagGPR);
+ m_gprs.release(payloadGPR);
+ m_gprs.retain(payloadGPR, virtualRegister, SpillOrderInteger);
+ info.fillInteger(payloadGPR);
+ // If !strict we're done, return.
+ returnFormat = DataFormatInteger;
+ return payloadGPR;
+ }
+
+ case DataFormatInteger: {
+ GPRReg gpr = info.gpr();
+ m_gprs.lock(gpr);
+ returnFormat = DataFormatInteger;
+ return gpr;
+ }
+
+ case DataFormatDouble:
+ case DataFormatCell:
+ case DataFormatBoolean:
+ case DataFormatJSDouble:
+ case DataFormatJSCell:
+ case DataFormatJSBoolean:
+ case DataFormatStorage:
+ ASSERT_NOT_REACHED();
+ }
+
+ ASSERT_NOT_REACHED();
+ return InvalidGPRReg;
+}
+
+GPRReg SpeculativeJIT::fillSpeculateInt(NodeIndex nodeIndex, DataFormat& returnFormat)
+{
+ return fillSpeculateIntInternal<false>(nodeIndex, returnFormat);
+}
+
+GPRReg SpeculativeJIT::fillSpeculateIntStrict(NodeIndex nodeIndex)
+{
+ DataFormat mustBeDataFormatInteger;
+ GPRReg result = fillSpeculateIntInternal<true>(nodeIndex, mustBeDataFormatInteger);
+ ASSERT(mustBeDataFormatInteger == DataFormatInteger);
+ return result;
+}
+
+FPRReg SpeculativeJIT::fillSpeculateDouble(NodeIndex nodeIndex)
+{
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ fprintf(stderr, "SpecDouble@%d ", nodeIndex);
+#endif
+ if (isKnownNotNumber(nodeIndex)) {
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ return fprAllocate();
+ }
+
+ Node& node = at(nodeIndex);
+ VirtualRegister virtualRegister = node.virtualRegister();
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+
+ if (info.registerFormat() == DataFormatNone) {
+
+ if (node.hasConstant()) {
+ if (isInt32Constant(nodeIndex)) {
+ GPRReg gpr = allocate();
+ m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(nodeIndex)), gpr);
+ m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
+ info.fillInteger(gpr);
+ unlock(gpr);
+ } else if (isNumberConstant(nodeIndex)) {
+ FPRReg fpr = fprAllocate();
+ m_jit.loadDouble(addressOfDoubleConstant(nodeIndex), fpr);
+ m_fprs.retain(fpr, virtualRegister, SpillOrderConstant);
+ info.fillDouble(fpr);
+ return fpr;
+ } else
+ ASSERT_NOT_REACHED();
+ } else {
+ DataFormat spillFormat = info.spillFormat();
+ ASSERT((spillFormat & DataFormatJS) || spillFormat == DataFormatInteger);
+ if (spillFormat == DataFormatJSDouble) {
+ FPRReg fpr = fprAllocate();
+ m_jit.loadDouble(JITCompiler::addressFor(virtualRegister), fpr);
+ m_fprs.retain(fpr, virtualRegister, SpillOrderSpilled);
+ info.fillDouble(fpr);
+ return fpr;
+ }
+
+ FPRReg fpr = fprAllocate();
+ JITCompiler::Jump hasUnboxedDouble;
+
+ if (spillFormat != DataFormatJSInteger && spillFormat != DataFormatInteger) {
+ JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag));
+ speculationCheck(BadType, JSValueSource(JITCompiler::addressFor(virtualRegister)), nodeIndex, m_jit.branch32(MacroAssembler::AboveOrEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::LowestTag)));
+ m_jit.loadDouble(JITCompiler::addressFor(virtualRegister), fpr);
+ hasUnboxedDouble = m_jit.jump();
+
+ isInteger.link(&m_jit);
+ }
+
+ m_jit.convertInt32ToDouble(JITCompiler::payloadFor(virtualRegister), fpr);
+
+ if (hasUnboxedDouble.isSet())
+ hasUnboxedDouble.link(&m_jit);
+
+ m_fprs.retain(fpr, virtualRegister, SpillOrderSpilled);
+ info.fillDouble(fpr);
+ return fpr;
+ }
+ }
+
+ switch (info.registerFormat()) {
+ case DataFormatJS:
+ case DataFormatJSInteger: {
+ GPRReg tagGPR = info.tagGPR();
+ GPRReg payloadGPR = info.payloadGPR();
+ FPRReg fpr = fprAllocate();
+
+ m_gprs.lock(tagGPR);
+ m_gprs.lock(payloadGPR);
+
+ JITCompiler::Jump hasUnboxedDouble;
+
+ if (info.registerFormat() != DataFormatJSInteger) {
+ FPRTemporary scratch(this);
+ JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
+ speculationCheck(BadType, JSValueRegs(tagGPR, payloadGPR), nodeIndex, m_jit.branch32(MacroAssembler::AboveOrEqual, tagGPR, TrustedImm32(JSValue::LowestTag)));
+ unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
+ hasUnboxedDouble = m_jit.jump();
+ isInteger.link(&m_jit);
+ }
+
+ m_jit.convertInt32ToDouble(payloadGPR, fpr);
+
+ if (hasUnboxedDouble.isSet())
+ hasUnboxedDouble.link(&m_jit);
+
+ m_gprs.release(tagGPR);
+ m_gprs.release(payloadGPR);
+ m_gprs.unlock(tagGPR);
+ m_gprs.unlock(payloadGPR);
+ m_fprs.retain(fpr, virtualRegister, SpillOrderDouble);
+ info.fillDouble(fpr);
+ info.killSpilled();
+ return fpr;
+ }
+
+ case DataFormatInteger: {
+ FPRReg fpr = fprAllocate();
+ GPRReg gpr = info.gpr();
+ m_gprs.lock(gpr);
+ m_jit.convertInt32ToDouble(gpr, fpr);
+ m_gprs.unlock(gpr);
+ return fpr;
+ }
+
+ case DataFormatJSDouble:
+ case DataFormatDouble: {
+ FPRReg fpr = info.fpr();
+ m_fprs.lock(fpr);
+ return fpr;
+ }
+
+ case DataFormatNone:
+ case DataFormatStorage:
+ case DataFormatCell:
+ case DataFormatJSCell:
+ case DataFormatBoolean:
+ case DataFormatJSBoolean:
+ ASSERT_NOT_REACHED();
+ }
+
+ ASSERT_NOT_REACHED();
+ return InvalidFPRReg;
+}
+
+GPRReg SpeculativeJIT::fillSpeculateCell(NodeIndex nodeIndex)
+{
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ fprintf(stderr, "SpecCell@%d ", nodeIndex);
+#endif
+ if (isKnownNotCell(nodeIndex)) {
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ return allocate();
+ }
+
+ Node& node = at(nodeIndex);
+ VirtualRegister virtualRegister = node.virtualRegister();
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+
+ switch (info.registerFormat()) {
+ case DataFormatNone: {
+
+ if (node.hasConstant()) {
+ JSValue jsValue = valueOfJSConstant(nodeIndex);
+ ASSERT(jsValue.isCell());
+ GPRReg gpr = allocate();
+ m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
+ m_jit.move(MacroAssembler::TrustedImmPtr(jsValue.asCell()), gpr);
+ info.fillCell(gpr);
+ return gpr;
+ }
+
+ ASSERT((info.spillFormat() & DataFormatJS) || info.spillFormat() == DataFormatCell);
+ if (info.spillFormat() != DataFormatJSCell && info.spillFormat() != DataFormatCell)
+ speculationCheck(BadType, JSValueSource(JITCompiler::addressFor(virtualRegister)), nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
+ GPRReg gpr = allocate();
+ m_jit.load32(JITCompiler::payloadFor(virtualRegister), gpr);
+ m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
+ info.fillCell(gpr);
+ return gpr;
+ }
+
+ case DataFormatCell: {
+ GPRReg gpr = info.gpr();
+ m_gprs.lock(gpr);
+ return gpr;
+ }
+
+ case DataFormatJSCell:
+ case DataFormatJS: {
+ GPRReg tagGPR = info.tagGPR();
+ GPRReg payloadGPR = info.payloadGPR();
+ m_gprs.lock(tagGPR);
+ m_gprs.lock(payloadGPR);
+ if (info.spillFormat() != DataFormatJSCell)
+ speculationCheck(BadType, JSValueRegs(tagGPR, payloadGPR), nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, tagGPR, TrustedImm32(JSValue::CellTag)));
+ m_gprs.unlock(tagGPR);
+ m_gprs.release(tagGPR);
+ m_gprs.release(payloadGPR);
+ m_gprs.retain(payloadGPR, virtualRegister, SpillOrderCell);
+ info.fillCell(payloadGPR);
+ return payloadGPR;
+ }
+
+ case DataFormatJSInteger:
+ case DataFormatInteger:
+ case DataFormatJSDouble:
+ case DataFormatDouble:
+ case DataFormatJSBoolean:
+ case DataFormatBoolean:
+ case DataFormatStorage:
+ ASSERT_NOT_REACHED();
+ }
+
+ ASSERT_NOT_REACHED();
+ return InvalidGPRReg;
+}
+
+GPRReg SpeculativeJIT::fillSpeculateBoolean(NodeIndex nodeIndex)
+{
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ fprintf(stderr, "SpecBool@%d ", nodeIndex);
+#endif
+ if (isKnownNotBoolean(nodeIndex)) {
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ return allocate();
+ }
+
+ Node& node = at(nodeIndex);
+ VirtualRegister virtualRegister = node.virtualRegister();
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+
+ switch (info.registerFormat()) {
+ case DataFormatNone: {
+
+ if (node.hasConstant()) {
+ JSValue jsValue = valueOfJSConstant(nodeIndex);
+ ASSERT(jsValue.isBoolean());
+ GPRReg gpr = allocate();
+ m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
+ m_jit.move(MacroAssembler::TrustedImm32(jsValue.asBoolean()), gpr);
+ info.fillBoolean(gpr);
+ return gpr;
+ }
+
+ ASSERT((info.spillFormat() & DataFormatJS) || info.spillFormat() == DataFormatBoolean);
+
+ if (info.spillFormat() != DataFormatJSBoolean && info.spillFormat() != DataFormatBoolean)
+ speculationCheck(BadType, JSValueSource(JITCompiler::addressFor(virtualRegister)), nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
+
+ GPRReg gpr = allocate();
+ m_jit.load32(JITCompiler::payloadFor(virtualRegister), gpr);
+ m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
+ info.fillBoolean(gpr);
+ return gpr;
+ }
+
+ case DataFormatBoolean: {
+ GPRReg gpr = info.gpr();
+ m_gprs.lock(gpr);
+ return gpr;
+ }
+
+ case DataFormatJSBoolean:
+ case DataFormatJS: {
+ GPRReg tagGPR = info.tagGPR();
+ GPRReg payloadGPR = info.payloadGPR();
+ m_gprs.lock(tagGPR);
+ m_gprs.lock(payloadGPR);
+ if (info.registerFormat() != DataFormatJSBoolean)
+ speculationCheck(BadType, JSValueRegs(tagGPR, payloadGPR), nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, tagGPR, TrustedImm32(JSValue::BooleanTag)));
+
+ m_gprs.unlock(tagGPR);
+ m_gprs.release(tagGPR);
+ m_gprs.release(payloadGPR);
+ m_gprs.retain(payloadGPR, virtualRegister, SpillOrderBoolean);
+ info.fillBoolean(payloadGPR);
+ return payloadGPR;
+ }
+
+ case DataFormatJSInteger:
+ case DataFormatInteger:
+ case DataFormatJSDouble:
+ case DataFormatDouble:
+ case DataFormatJSCell:
+ case DataFormatCell:
+ case DataFormatStorage:
+ ASSERT_NOT_REACHED();
+ }
+
+ ASSERT_NOT_REACHED();
+ return InvalidGPRReg;
+}
+
+JITCompiler::Jump SpeculativeJIT::convertToDouble(JSValueOperand& op, FPRReg result)
+{
+ FPRTemporary scratch(this);
+
+ JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, op.tagGPR(), TrustedImm32(JSValue::Int32Tag));
+ JITCompiler::Jump notNumber = m_jit.branch32(MacroAssembler::AboveOrEqual, op.payloadGPR(), TrustedImm32(JSValue::LowestTag));
+
+ unboxDouble(op.tagGPR(), op.payloadGPR(), result, scratch.fpr());
+ JITCompiler::Jump done = m_jit.jump();
+
+ isInteger.link(&m_jit);
+ m_jit.convertInt32ToDouble(op.payloadGPR(), result);
+
+ done.link(&m_jit);
+
+ return notNumber;
+}
+
+void SpeculativeJIT::compileObjectEquality(Node& node, const ClassInfo* classInfo, PredictionChecker predictionCheck)
+{
+ SpeculateCellOperand op1(this, node.child1());
+ SpeculateCellOperand op2(this, node.child2());
+ GPRReg op1GPR = op1.gpr();
+ GPRReg op2GPR = op2.gpr();
+
+ if (!predictionCheck(m_state.forNode(node.child1()).m_type))
+ speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(op1GPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(classInfo)));
+ if (!predictionCheck(m_state.forNode(node.child2()).m_type))
+ speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node.child2(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(op2GPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(classInfo)));
+
+ GPRTemporary resultPayload(this, op2);
+ GPRReg resultPayloadGPR = resultPayload.gpr();
+
+ MacroAssembler::Jump falseCase = m_jit.branchPtr(MacroAssembler::NotEqual, op1GPR, op2GPR);
+ m_jit.move(Imm32(1), resultPayloadGPR);
+ MacroAssembler::Jump done = m_jit.jump();
+ falseCase.link(&m_jit);
+ m_jit.move(Imm32(0), resultPayloadGPR);
+ done.link(&m_jit);
+
+ booleanResult(resultPayloadGPR, m_compileIndex);
+}
+
+void SpeculativeJIT::compileIntegerCompare(Node& node, MacroAssembler::RelationalCondition condition)
+{
+ SpeculateIntegerOperand op1(this, node.child1());
+ SpeculateIntegerOperand op2(this, node.child2());
+ GPRTemporary resultPayload(this);
+
+ m_jit.compare32(condition, op1.gpr(), op2.gpr(), resultPayload.gpr());
+
+ // If we add a DataFormatBool, we should use it here.
+ booleanResult(resultPayload.gpr(), m_compileIndex);
+}
+
+void SpeculativeJIT::compileDoubleCompare(Node& node, MacroAssembler::DoubleCondition condition)
+{
+ SpeculateDoubleOperand op1(this, node.child1());
+ SpeculateDoubleOperand op2(this, node.child2());
+ GPRTemporary resultPayload(this);
+
+ m_jit.move(Imm32(1), resultPayload.gpr());
+ MacroAssembler::Jump trueCase = m_jit.branchDouble(condition, op1.fpr(), op2.fpr());
+ m_jit.move(Imm32(0), resultPayload.gpr());
+ trueCase.link(&m_jit);
+
+ booleanResult(resultPayload.gpr(), m_compileIndex);
+}
+
+void SpeculativeJIT::compileValueAdd(Node& node)
+{
+ JSValueOperand op1(this, node.child1());
+ JSValueOperand op2(this, node.child2());
+
+ GPRReg op1TagGPR = op1.tagGPR();
+ GPRReg op1PayloadGPR = op1.payloadGPR();
+ GPRReg op2TagGPR = op2.tagGPR();
+ GPRReg op2PayloadGPR = op2.payloadGPR();
+
+ flushRegisters();
+
+ GPRResult2 resultTag(this);
+ GPRResult resultPayload(this);
+ if (isKnownNotNumber(node.child1()) || isKnownNotNumber(node.child2()))
+ callOperation(operationValueAddNotNumber, resultTag.gpr(), resultPayload.gpr(), op1TagGPR, op1PayloadGPR, op2TagGPR, op2PayloadGPR);
+ else
+ callOperation(operationValueAdd, resultTag.gpr(), resultPayload.gpr(), op1TagGPR, op1PayloadGPR, op2TagGPR, op2PayloadGPR);
+
+ jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex);
+}
+
+void SpeculativeJIT::compileObjectOrOtherLogicalNot(NodeIndex nodeIndex, const ClassInfo* classInfo, bool needSpeculationCheck)
+{
+ JSValueOperand value(this, nodeIndex);
+ GPRTemporary resultPayload(this);
+ GPRReg valueTagGPR = value.tagGPR();
+ GPRReg valuePayloadGPR = value.payloadGPR();
+ GPRReg resultPayloadGPR = resultPayload.gpr();
+
+ MacroAssembler::Jump notCell = m_jit.branch32(MacroAssembler::NotEqual, valueTagGPR, TrustedImm32(JSValue::CellTag));
+ if (needSpeculationCheck)
+ speculationCheck(BadType, JSValueRegs(valueTagGPR, valuePayloadGPR), nodeIndex, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(valuePayloadGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(classInfo)));
+ m_jit.move(TrustedImm32(0), resultPayloadGPR);
+ MacroAssembler::Jump done = m_jit.jump();
+
+ notCell.link(&m_jit);
+
+ COMPILE_ASSERT((JSValue::UndefinedTag | 1) == JSValue::NullTag, UndefinedTag_OR_1_EQUALS_NullTag);
+ if (needSpeculationCheck) {
+ m_jit.move(valueTagGPR, resultPayloadGPR);
+ m_jit.or32(TrustedImm32(1), resultPayloadGPR);
+ speculationCheck(BadType, JSValueRegs(valueTagGPR, valuePayloadGPR), nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, resultPayloadGPR, TrustedImm32(JSValue::NullTag)));
+ }
+ m_jit.move(TrustedImm32(1), resultPayloadGPR);
+
+ done.link(&m_jit);
+
+ booleanResult(resultPayloadGPR, m_compileIndex);
+}
+
+void SpeculativeJIT::compileLogicalNot(Node& node)
+{
+ if (isKnownBoolean(node.child1()) || isBooleanPrediction(m_jit.getPrediction(node.child1()))) {
+ SpeculateBooleanOperand value(this, node.child1());
+ GPRTemporary result(this, value);
+ m_jit.xor32(TrustedImm32(1), value.gpr(), result.gpr());
+ booleanResult(result.gpr(), m_compileIndex);
+ return;
+ }
+ if (at(node.child1()).shouldSpeculateFinalObjectOrOther()) {
+ compileObjectOrOtherLogicalNot(node.child1(), &JSFinalObject::s_info, !isFinalObjectOrOtherPrediction(m_state.forNode(node.child1()).m_type));
+ return;
+ }
+ if (at(node.child1()).shouldSpeculateArrayOrOther()) {
+ compileObjectOrOtherLogicalNot(node.child1(), &JSArray::s_info, !isArrayOrOtherPrediction(m_state.forNode(node.child1()).m_type));
+ return;
+ }
+ if (at(node.child1()).shouldSpeculateInteger()) {
+ SpeculateIntegerOperand value(this, node.child1());
+ GPRTemporary resultPayload(this, value);
+ m_jit.compare32(MacroAssembler::Equal, value.gpr(), MacroAssembler::TrustedImm32(0), resultPayload.gpr());
+ booleanResult(resultPayload.gpr(), m_compileIndex);
+ return;
+ }
+ if (at(node.child1()).shouldSpeculateNumber()) {
+ SpeculateDoubleOperand value(this, node.child1());
+ FPRTemporary scratch(this);
+ GPRTemporary resultPayload(this);
+ m_jit.move(TrustedImm32(0), resultPayload.gpr());
+ MacroAssembler::Jump nonZero = m_jit.branchDoubleNonZero(value.fpr(), scratch.fpr());
+ m_jit.move(TrustedImm32(1), resultPayload.gpr());
+ nonZero.link(&m_jit);
+ booleanResult(resultPayload.gpr(), m_compileIndex);
+ return;
+ }
+
+ JSValueOperand arg1(this, node.child1());
+ GPRTemporary resultPayload(this, arg1, false);
+ GPRReg arg1TagGPR = arg1.tagGPR();
+ GPRReg arg1PayloadGPR = arg1.payloadGPR();
+ GPRReg resultPayloadGPR = resultPayload.gpr();
+
+ arg1.use();
+
+ JITCompiler::Jump fastCase = m_jit.branch32(JITCompiler::Equal, arg1TagGPR, TrustedImm32(JSValue::BooleanTag));
+
+ silentSpillAllRegisters(resultPayloadGPR);
+ callOperation(dfgConvertJSValueToBoolean, resultPayloadGPR, arg1TagGPR, arg1PayloadGPR);
+ silentFillAllRegisters(resultPayloadGPR);
+ JITCompiler::Jump doNot = m_jit.jump();
+
+ fastCase.link(&m_jit);
+ m_jit.move(arg1PayloadGPR, resultPayloadGPR);
+
+ doNot.link(&m_jit);
+ m_jit.xor32(TrustedImm32(1), resultPayloadGPR);
+ booleanResult(resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly);
+}
+
+void SpeculativeJIT::emitObjectOrOtherBranch(NodeIndex nodeIndex, BlockIndex taken, BlockIndex notTaken, const ClassInfo* classInfo, bool needSpeculationCheck)
+{
+ JSValueOperand value(this, nodeIndex);
+ GPRTemporary scratch(this);
+ GPRReg valueTagGPR = value.tagGPR();
+ GPRReg valuePayloadGPR = value.payloadGPR();
+ GPRReg scratchGPR = scratch.gpr();
+
+ MacroAssembler::Jump notCell = m_jit.branch32(MacroAssembler::NotEqual, valueTagGPR, TrustedImm32(JSValue::CellTag));
+ if (needSpeculationCheck)
+ speculationCheck(BadType, JSValueRegs(valueTagGPR, valuePayloadGPR), nodeIndex, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(valuePayloadGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(classInfo)));
+ addBranch(m_jit.jump(), taken);
+
+ notCell.link(&m_jit);
+
+ COMPILE_ASSERT((JSValue::UndefinedTag | 1) == JSValue::NullTag, UndefinedTag_OR_1_EQUALS_NullTag);
+ if (needSpeculationCheck) {
+ m_jit.move(valueTagGPR, scratchGPR);
+ m_jit.or32(TrustedImm32(1), scratchGPR);
+ speculationCheck(BadType, JSValueRegs(valueTagGPR, valuePayloadGPR), nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, scratchGPR, TrustedImm32(JSValue::NullTag)));
+ }
+
+ if (notTaken != (m_block + 1))
+ addBranch(m_jit.jump(), notTaken);
+
+ noResult(m_compileIndex);
+}
+
+void SpeculativeJIT::emitBranch(Node& node)
+{
+ BlockIndex taken = node.takenBlockIndex();
+ BlockIndex notTaken = node.notTakenBlockIndex();
+
+ if (isKnownBoolean(node.child1())) {
+ SpeculateBooleanOperand value(this, node.child1());
+ MacroAssembler::ResultCondition condition = MacroAssembler::NonZero;
+
+ if (taken == (m_block + 1)) {
+ condition = MacroAssembler::Zero;
+ BlockIndex tmp = taken;
+ taken = notTaken;
+ notTaken = tmp;
+ }
+
+ addBranch(m_jit.branchTest32(condition, value.gpr(), TrustedImm32(1)), taken);
+ if (notTaken != (m_block + 1))
+ addBranch(m_jit.jump(), notTaken);
+
+ noResult(m_compileIndex);
+ } else if (at(node.child1()).shouldSpeculateFinalObjectOrOther()) {
+ emitObjectOrOtherBranch(node.child1(), taken, notTaken, &JSFinalObject::s_info, !isFinalObjectOrOtherPrediction(m_state.forNode(node.child1()).m_type));
+ } else if (at(node.child1()).shouldSpeculateArrayOrOther()) {
+ emitObjectOrOtherBranch(node.child1(), taken, notTaken, &JSArray::s_info, !isArrayOrOtherPrediction(m_state.forNode(node.child1()).m_type));
+ } else if (at(node.child1()).shouldSpeculateNumber()) {
+ if (at(node.child1()).shouldSpeculateInteger()) {
+ bool invert = false;
+
+ if (taken == (m_block + 1)) {
+ invert = true;
+ BlockIndex tmp = taken;
+ taken = notTaken;
+ notTaken = tmp;
+ }
+
+ SpeculateIntegerOperand value(this, node.child1());
+ addBranch(m_jit.branchTest32(invert ? MacroAssembler::Zero : MacroAssembler::NonZero, value.gpr()), taken);
+ } else {
+ SpeculateDoubleOperand value(this, node.child1());
+ FPRTemporary scratch(this);
+ addBranch(m_jit.branchDoubleNonZero(value.fpr(), scratch.fpr()), taken);
+ }
+
+ if (notTaken != (m_block + 1))
+ addBranch(m_jit.jump(), notTaken);
+
+ noResult(m_compileIndex);
+ } else {
+ JSValueOperand value(this, node.child1());
+ value.fill();
+ GPRReg valueTagGPR = value.tagGPR();
+ GPRReg valuePayloadGPR = value.payloadGPR();
+
+ GPRTemporary result(this);
+ GPRReg resultGPR = result.gpr();
+
+ use(node.child1());
+
+ JITCompiler::Jump fastPath = m_jit.branch32(JITCompiler::Equal, valueTagGPR, JITCompiler::TrustedImm32(JSValue::Int32Tag));
+ JITCompiler::Jump slowPath = m_jit.branch32(JITCompiler::NotEqual, valueTagGPR, JITCompiler::TrustedImm32(JSValue::BooleanTag));
+
+ fastPath.link(&m_jit);
+ addBranch(m_jit.branchTest32(JITCompiler::Zero, valuePayloadGPR), notTaken);
+ addBranch(m_jit.jump(), taken);
+
+ slowPath.link(&m_jit);
+ silentSpillAllRegisters(resultGPR);
+ callOperation(dfgConvertJSValueToBoolean, resultGPR, valueTagGPR, valuePayloadGPR);
+ silentFillAllRegisters(resultGPR);
+
+ addBranch(m_jit.branchTest32(JITCompiler::NonZero, resultGPR), taken);
+ if (notTaken != (m_block + 1))
+ addBranch(m_jit.jump(), notTaken);
+
+ noResult(m_compileIndex, UseChildrenCalledExplicitly);
+ }
+}
+
+void SpeculativeJIT::compile(Node& node)
+{
+ NodeType op = node.op;
+
+ switch (op) {
+ case JSConstant:
+ initConstantInfo(m_compileIndex);
+ break;
+
+ case WeakJSConstant:
+ m_jit.addWeakReference(node.weakConstant());
+ initConstantInfo(m_compileIndex);
+ break;
+
+ case GetLocal: {
+ PredictedType prediction = node.variableAccessData()->prediction();
+ AbstractValue& value = block()->valuesAtHead.operand(node.local());
+
+ // If we have no prediction for this local, then don't attempt to compile.
+ if (prediction == PredictNone) {
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ break;
+ }
+
+ if (node.variableAccessData()->shouldUseDoubleFormat()) {
+ FPRTemporary result(this);
+ m_jit.loadDouble(JITCompiler::addressFor(node.local()), result.fpr());
+ VirtualRegister virtualRegister = node.virtualRegister();
+ m_fprs.retain(result.fpr(), virtualRegister, SpillOrderDouble);
+ m_generationInfo[virtualRegister].initDouble(m_compileIndex, node.refCount(), result.fpr());
+ break;
+ }
+
+ GPRTemporary result(this);
+ if (isInt32Prediction(prediction)) {
+ m_jit.load32(JITCompiler::payloadFor(node.local()), result.gpr());
+
+ // Like integerResult, but don't useChildren - our children are phi nodes,
+ // and don't represent values within this dataflow with virtual registers.
+ VirtualRegister virtualRegister = node.virtualRegister();
+ m_gprs.retain(result.gpr(), virtualRegister, SpillOrderInteger);
+ m_generationInfo[virtualRegister].initInteger(m_compileIndex, node.refCount(), result.gpr());
+ break;
+ }
+
+ if (isArrayPrediction(prediction) || isByteArrayPrediction(prediction)) {
+ m_jit.load32(JITCompiler::payloadFor(node.local()), result.gpr());
+
+ // Like cellResult, but don't useChildren - our children are phi nodes,
+ // and don't represent values within this dataflow with virtual registers.
+ VirtualRegister virtualRegister = node.virtualRegister();
+ m_gprs.retain(result.gpr(), virtualRegister, SpillOrderCell);
+ m_generationInfo[virtualRegister].initCell(m_compileIndex, node.refCount(), result.gpr());
+ break;
+ }
+
+ if (isBooleanPrediction(prediction)) {
+ m_jit.load32(JITCompiler::payloadFor(node.local()), result.gpr());
+
+ // Like booleanResult, but don't useChildren - our children are phi nodes,
+ // and don't represent values within this dataflow with virtual registers.
+ VirtualRegister virtualRegister = node.virtualRegister();
+ m_gprs.retain(result.gpr(), virtualRegister, SpillOrderBoolean);
+ m_generationInfo[virtualRegister].initBoolean(m_compileIndex, node.refCount(), result.gpr());
+ break;
+ }
+
+ GPRTemporary tag(this);
+ m_jit.load32(JITCompiler::payloadFor(node.local()), result.gpr());
+ m_jit.load32(JITCompiler::tagFor(node.local()), tag.gpr());
+
+ // Like jsValueResult, but don't useChildren - our children are phi nodes,
+ // and don't represent values within this dataflow with virtual registers.
+ VirtualRegister virtualRegister = node.virtualRegister();
+ m_gprs.retain(result.gpr(), virtualRegister, SpillOrderJS);
+ m_gprs.retain(tag.gpr(), virtualRegister, SpillOrderJS);
+
+ DataFormat format = isCellPrediction(value.m_type) ? DataFormatJSCell : DataFormatJS;
+ m_generationInfo[virtualRegister].initJSValue(m_compileIndex, node.refCount(), tag.gpr(), result.gpr(), format);
+ break;
+ }
+
+ case SetLocal: {
+ // SetLocal doubles as a hint as to where a node will be stored and
+ // as a speculation point. So before we speculate make sure that we
+ // know where the child of this node needs to go in the virtual
+ // register file.
+ compileMovHint(node);
+
+ // As far as OSR is concerned, we're on the bytecode index corresponding
+ // to the *next* instruction, since we've already "executed" the
+ // SetLocal and whatever other DFG Nodes are associated with the same
+ // bytecode index as the SetLocal.
+ ASSERT(m_codeOriginForOSR == node.codeOrigin);
+ Node& nextNode = at(m_compileIndex + 1);
+
+ m_codeOriginForOSR = nextNode.codeOrigin;
+
+ if (node.variableAccessData()->shouldUseDoubleFormat()) {
+ SpeculateDoubleOperand value(this, node.child1());
+ m_jit.storeDouble(value.fpr(), JITCompiler::addressFor(node.local()));
+ noResult(m_compileIndex);
+ // Indicate that it's no longer necessary to retrieve the value of
+ // this bytecode variable from registers or other locations in the register file,
+ // but that it is stored as a double.
+ valueSourceReferenceForOperand(node.local()) = ValueSource(DoubleInRegisterFile);
+ } else {
+ PredictedType predictedType = node.variableAccessData()->prediction();
+ if (m_generationInfo[at(node.child1()).virtualRegister()].registerFormat() == DataFormatDouble) {
+ DoubleOperand value(this, node.child1());
+ m_jit.storeDouble(value.fpr(), JITCompiler::addressFor(node.local()));
+ noResult(m_compileIndex);
+ } else if (isInt32Prediction(predictedType)) {
+ SpeculateIntegerOperand value(this, node.child1());
+ m_jit.store32(value.gpr(), JITCompiler::payloadFor(node.local()));
+ noResult(m_compileIndex);
+ } else if (isArrayPrediction(predictedType)) {
+ SpeculateCellOperand cell(this, node.child1());
+ GPRReg cellGPR = cell.gpr();
+ if (!isArrayPrediction(m_state.forNode(node.child1()).m_type))
+ speculationCheck(BadType, JSValueSource::unboxedCell(cellGPR), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(cellGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info)));
+ m_jit.storePtr(cellGPR, JITCompiler::payloadFor(node.local()));
+ noResult(m_compileIndex);
+ } else if (isByteArrayPrediction(predictedType)) {
+ SpeculateCellOperand cell(this, node.child1());
+ GPRReg cellGPR = cell.gpr();
+ if (!isByteArrayPrediction(m_state.forNode(node.child1()).m_type))
+ speculationCheck(BadType, JSValueSource::unboxedCell(cellGPR), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(cellGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSByteArray::s_info)));
+ m_jit.storePtr(cellGPR, JITCompiler::payloadFor(node.local()));
+ noResult(m_compileIndex);
+ } else if (isBooleanPrediction(predictedType)) {
+ SpeculateBooleanOperand value(this, node.child1());
+ m_jit.store32(value.gpr(), JITCompiler::payloadFor(node.local()));
+ noResult(m_compileIndex);
+ } else {
+ JSValueOperand value(this, node.child1());
+ m_jit.store32(value.payloadGPR(), JITCompiler::payloadFor(node.local()));
+ m_jit.store32(value.tagGPR(), JITCompiler::tagFor(node.local()));
+ noResult(m_compileIndex);
+ }
+
+ // Indicate that it's no longer necessary to retrieve the value of
+ // this bytecode variable from registers or other locations in the register file.
+ valueSourceReferenceForOperand(node.local()) = ValueSource::forPrediction(predictedType);
+ }
+ break;
+ }
+
+ case SetArgument:
+ // This is a no-op; it just marks the fact that the argument is being used.
+ // But it may be profitable to use this as a hook to run speculation checks
+ // on arguments, thereby allowing us to trivially eliminate such checks if
+ // the argument is not used.
+ break;
+
+ case BitAnd:
+ case BitOr:
+ case BitXor:
+ if (isInt32Constant(node.child1())) {
+ SpeculateIntegerOperand op2(this, node.child2());
+ GPRTemporary result(this, op2);
+
+ bitOp(op, valueOfInt32Constant(node.child1()), op2.gpr(), result.gpr());
+
+ integerResult(result.gpr(), m_compileIndex);
+ } else if (isInt32Constant(node.child2())) {
+ SpeculateIntegerOperand op1(this, node.child1());
+ GPRTemporary result(this, op1);
+
+ bitOp(op, valueOfInt32Constant(node.child2()), op1.gpr(), result.gpr());
+
+ integerResult(result.gpr(), m_compileIndex);
+ } else {
+ SpeculateIntegerOperand op1(this, node.child1());
+ SpeculateIntegerOperand op2(this, node.child2());
+ GPRTemporary result(this, op1, op2);
+
+ GPRReg reg1 = op1.gpr();
+ GPRReg reg2 = op2.gpr();
+ bitOp(op, reg1, reg2, result.gpr());
+
+ integerResult(result.gpr(), m_compileIndex);
+ }
+ break;
+
+ case BitRShift:
+ case BitLShift:
+ case BitURShift:
+ if (isInt32Constant(node.child2())) {
+ SpeculateIntegerOperand op1(this, node.child1());
+ GPRTemporary result(this, op1);
+
+ shiftOp(op, op1.gpr(), valueOfInt32Constant(node.child2()) & 0x1f, result.gpr());
+
+ integerResult(result.gpr(), m_compileIndex);
+ } else {
+ // Do not allow shift amount to be used as the result, MacroAssembler does not permit this.
+ SpeculateIntegerOperand op1(this, node.child1());
+ SpeculateIntegerOperand op2(this, node.child2());
+ GPRTemporary result(this, op1);
+
+ GPRReg reg1 = op1.gpr();
+ GPRReg reg2 = op2.gpr();
+ shiftOp(op, reg1, reg2, result.gpr());
+
+ integerResult(result.gpr(), m_compileIndex);
+ }
+ break;
+
+ case UInt32ToNumber: {
+ compileUInt32ToNumber(node);
+ break;
+ }
+
+ case ValueToInt32: {
+ compileValueToInt32(node);
+ break;
+ }
+
+ case ValueToNumber: {
+ if (at(node.child1()).shouldNotSpeculateInteger()) {
+ SpeculateDoubleOperand op1(this, node.child1());
+ FPRTemporary result(this, op1);
+ m_jit.moveDouble(op1.fpr(), result.fpr());
+ doubleResult(result.fpr(), m_compileIndex);
+ break;
+ }
+
+ SpeculateIntegerOperand op1(this, node.child1());
+ GPRTemporary result(this, op1);
+ m_jit.move(op1.gpr(), result.gpr());
+ integerResult(result.gpr(), m_compileIndex, op1.format());
+ break;
+ }
+
+ case ValueToDouble: {
+ SpeculateDoubleOperand op1(this, node.child1());
+ FPRTemporary result(this, op1);
+ m_jit.moveDouble(op1.fpr(), result.fpr());
+ doubleResult(result.fpr(), m_compileIndex);
+ break;
+ }
+
+ case ValueAdd:
+ case ArithAdd: {
+ if (Node::shouldSpeculateInteger(at(node.child1()), at(node.child2())) && node.canSpeculateInteger()) {
+ if (isInt32Constant(node.child1())) {
+ int32_t imm1 = valueOfInt32Constant(node.child1());
+ SpeculateIntegerOperand op2(this, node.child2());
+ GPRTemporary result(this);
+
+ if (nodeCanTruncateInteger(node.arithNodeFlags())) {
+ m_jit.move(op2.gpr(), result.gpr());
+ m_jit.add32(Imm32(imm1), result.gpr());
+ } else
+ speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchAdd32(MacroAssembler::Overflow, op2.gpr(), Imm32(imm1), result.gpr()));
+
+ integerResult(result.gpr(), m_compileIndex);
+ break;
+ }
+
+ if (isInt32Constant(node.child2())) {
+ SpeculateIntegerOperand op1(this, node.child1());
+ int32_t imm2 = valueOfInt32Constant(node.child2());
+ GPRTemporary result(this);
+
+ if (nodeCanTruncateInteger(node.arithNodeFlags())) {
+ m_jit.move(op1.gpr(), result.gpr());
+ m_jit.add32(Imm32(imm2), result.gpr());
+ } else
+ speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchAdd32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
+
+ integerResult(result.gpr(), m_compileIndex);
+ break;
+ }
+
+ SpeculateIntegerOperand op1(this, node.child1());
+ SpeculateIntegerOperand op2(this, node.child2());
+ GPRTemporary result(this, op1, op2);
+
+ GPRReg gpr1 = op1.gpr();
+ GPRReg gpr2 = op2.gpr();
+ GPRReg gprResult = result.gpr();
+
+ if (nodeCanTruncateInteger(node.arithNodeFlags())) {
+ if (gpr1 == gprResult)
+ m_jit.add32(gpr2, gprResult);
+ else {
+ m_jit.move(gpr2, gprResult);
+ m_jit.add32(gpr1, gprResult);
+ }
+ } else {
+ MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
+
+ if (gpr1 == gprResult)
+ speculationCheck(Overflow, JSValueRegs(), NoNode, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
+ else if (gpr2 == gprResult)
+ speculationCheck(Overflow, JSValueRegs(), NoNode, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
+ else
+ speculationCheck(Overflow, JSValueRegs(), NoNode, check);
+ }
+
+ integerResult(gprResult, m_compileIndex);
+ break;
+ }
+
+ if (Node::shouldSpeculateNumber(at(node.child1()), at(node.child2()))) {
+ SpeculateDoubleOperand op1(this, node.child1());
+ SpeculateDoubleOperand op2(this, node.child2());
+ FPRTemporary result(this, op1, op2);
+
+ FPRReg reg1 = op1.fpr();
+ FPRReg reg2 = op2.fpr();
+ m_jit.addDouble(reg1, reg2, result.fpr());
+
+ doubleResult(result.fpr(), m_compileIndex);
+ break;
+ }
+
+ ASSERT(op == ValueAdd);
+ compileValueAdd(node);
+ break;
+ }
+
+ case ArithSub: {
+ if (Node::shouldSpeculateInteger(at(node.child1()), at(node.child2())) && node.canSpeculateInteger()) {
+ if (isInt32Constant(node.child2())) {
+ SpeculateIntegerOperand op1(this, node.child1());
+ int32_t imm2 = valueOfInt32Constant(node.child2());
+ GPRTemporary result(this);
+
+ if (nodeCanTruncateInteger(node.arithNodeFlags())) {
+ m_jit.move(op1.gpr(), result.gpr());
+ m_jit.sub32(Imm32(imm2), result.gpr());
+ } else
+ speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
+
+ integerResult(result.gpr(), m_compileIndex);
+ break;
+ }
+
+ SpeculateIntegerOperand op1(this, node.child1());
+ SpeculateIntegerOperand op2(this, node.child2());
+ GPRTemporary result(this);
+
+ if (nodeCanTruncateInteger(node.arithNodeFlags())) {
+ m_jit.move(op1.gpr(), result.gpr());
+ m_jit.sub32(op2.gpr(), result.gpr());
+ } else
+ speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), op2.gpr(), result.gpr()));
+
+ integerResult(result.gpr(), m_compileIndex);
+ break;
+ }
+
+ SpeculateDoubleOperand op1(this, node.child1());
+ SpeculateDoubleOperand op2(this, node.child2());
+ FPRTemporary result(this, op1);
+
+ FPRReg reg1 = op1.fpr();
+ FPRReg reg2 = op2.fpr();
+ m_jit.subDouble(reg1, reg2, result.fpr());
+
+ doubleResult(result.fpr(), m_compileIndex);
+ break;
+ }
+
+ case ArithMul: {
+ compileArithMul(node);
+ break;
+ }
+
+ case ArithDiv: {
+ if (Node::shouldSpeculateInteger(at(node.child1()), at(node.child2())) && node.canSpeculateInteger()) {
+#if CPU(X86)
+ SpeculateIntegerOperand op1(this, node.child1());
+ SpeculateIntegerOperand op2(this, node.child2());
+ GPRReg op1GPR = op1.gpr();
+ GPRReg op2GPR = op2.gpr();
+
+ speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
+
+ // If the user cares about negative zero, then speculate that we're not about
+ // to produce negative zero.
+ if (!nodeCanIgnoreNegativeZero(node.arithNodeFlags())) {
+ MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
+ speculationCheck(NegativeZero, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
+ numeratorNonZero.link(&m_jit);
+ }
+
+ GPRTemporary eax(this, X86Registers::eax);
+ GPRTemporary edx(this, X86Registers::edx);
+
+ GPRReg temp2 = InvalidGPRReg;
+ if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
+ temp2 = allocate();
+ m_jit.move(op2GPR, temp2);
+ op2GPR = temp2;
+ }
+
+ m_jit.move(op1GPR, eax.gpr());
+ m_jit.assembler().cdq();
+ m_jit.assembler().idivl_r(op2GPR);
+
+ if (temp2 != InvalidGPRReg)
+ unlock(temp2);
+
+ // Check that there was no remainder. If there had been, then we'd be obligated to
+ // produce a double result instead.
+ speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchTest32(JITCompiler::NonZero, edx.gpr()));
+
+ integerResult(eax.gpr(), m_compileIndex);
+#else // CPU(X86) -> so non-X86 code follows
+ SpeculateDoubleOperand op1(this, node.child1());
+ SpeculateDoubleOperand op2(this, node.child2());
+ FPRTemporary result(this);
+ FPRTemporary scratch(this);
+ GPRTemporary intResult(this);
+
+ FPRReg op1FPR = op1.fpr();
+ FPRReg op2FPR = op2.fpr();
+ FPRReg resultFPR = result.fpr();
+ FPRReg scratchFPR = scratch.fpr();
+ GPRReg resultGPR = intResult.gpr();
+
+ m_jit.divDouble(op1FPR, op2FPR, resultFPR);
+
+ JITCompiler::JumpList failureCases;
+ m_jit.branchConvertDoubleToInt32(resultFPR, resultGPR, failureCases, scratchFPR);
+ speculationCheck(Overflow, JSValueRegs(), NoNode, failureCases);
+
+ integerResult(resultGPR, m_compileIndex);
+#endif // CPU(X86)
+ break;
+ }
+
+ SpeculateDoubleOperand op1(this, node.child1());
+ SpeculateDoubleOperand op2(this, node.child2());
+ FPRTemporary result(this, op1);
+
+ FPRReg reg1 = op1.fpr();
+ FPRReg reg2 = op2.fpr();
+ m_jit.divDouble(reg1, reg2, result.fpr());
+
+ doubleResult(result.fpr(), m_compileIndex);
+ break;
+ }
+
+ case ArithMod: {
+ compileArithMod(node);
+ break;
+ }
+
+ case ArithAbs: {
+ if (at(node.child1()).shouldSpeculateInteger() && node.canSpeculateInteger()) {
+ SpeculateIntegerOperand op1(this, node.child1());
+ GPRTemporary result(this);
+ GPRTemporary scratch(this);
+
+ m_jit.zeroExtend32ToPtr(op1.gpr(), result.gpr());
+ m_jit.rshift32(result.gpr(), MacroAssembler::TrustedImm32(31), scratch.gpr());
+ m_jit.add32(scratch.gpr(), result.gpr());
+ m_jit.xor32(scratch.gpr(), result.gpr());
+ speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::Equal, result.gpr(), MacroAssembler::TrustedImm32(1 << 31)));
+ integerResult(result.gpr(), m_compileIndex);
+ break;
+ }
+
+ SpeculateDoubleOperand op1(this, node.child1());
+ FPRTemporary result(this);
+
+ m_jit.absDouble(op1.fpr(), result.fpr());
+ doubleResult(result.fpr(), m_compileIndex);
+ break;
+ }
+
+ case ArithMin:
+ case ArithMax: {
+ if (Node::shouldSpeculateInteger(at(node.child1()), at(node.child2())) && node.canSpeculateInteger()) {
+ SpeculateStrictInt32Operand op1(this, node.child1());
+ SpeculateStrictInt32Operand op2(this, node.child2());
+ GPRTemporary result(this, op1);
+
+ MacroAssembler::Jump op1Less = m_jit.branch32(op == ArithMin ? MacroAssembler::LessThan : MacroAssembler::GreaterThan, op1.gpr(), op2.gpr());
+ m_jit.move(op2.gpr(), result.gpr());
+ if (op1.gpr() != result.gpr()) {
+ MacroAssembler::Jump done = m_jit.jump();
+ op1Less.link(&m_jit);
+ m_jit.move(op1.gpr(), result.gpr());
+ done.link(&m_jit);
+ } else
+ op1Less.link(&m_jit);
+
+ integerResult(result.gpr(), m_compileIndex);
+ break;
+ }
+
+ SpeculateDoubleOperand op1(this, node.child1());
+ SpeculateDoubleOperand op2(this, node.child2());
+ FPRTemporary result(this, op1);
+
+ MacroAssembler::JumpList done;
+
+ MacroAssembler::Jump op1Less = m_jit.branchDouble(op == ArithMin ? MacroAssembler::DoubleLessThan : MacroAssembler::DoubleGreaterThan, op1.fpr(), op2.fpr());
+
+ // op2 is eather the lesser one or one of then is NaN
+ MacroAssembler::Jump op2Less = m_jit.branchDouble(op == ArithMin ? MacroAssembler::DoubleGreaterThanOrEqual : MacroAssembler::DoubleLessThanOrEqual, op1.fpr(), op2.fpr());
+
+ // Unordered case. We don't know which of op1, op2 is NaN. Manufacture NaN by adding
+ // op1 + op2 and putting it into result.
+ m_jit.addDouble(op1.fpr(), op2.fpr(), result.fpr());
+ done.append(m_jit.jump());
+
+ op2Less.link(&m_jit);
+ m_jit.moveDouble(op2.fpr(), result.fpr());
+
+ if (op1.fpr() != result.fpr()) {
+ done.append(m_jit.jump());
+
+ op1Less.link(&m_jit);
+ m_jit.moveDouble(op1.fpr(), result.fpr());
+ } else
+ op1Less.link(&m_jit);
+
+ done.link(&m_jit);
+
+ doubleResult(result.fpr(), m_compileIndex);
+ break;
+ }
+
+ case ArithSqrt: {
+ SpeculateDoubleOperand op1(this, node.child1());
+ FPRTemporary result(this, op1);
+
+ m_jit.sqrtDouble(op1.fpr(), result.fpr());
+
+ doubleResult(result.fpr(), m_compileIndex);
+ break;
+ }
+
+ case LogicalNot:
+ compileLogicalNot(node);
+ break;
+
+ case CompareLess:
+ if (compare(node, JITCompiler::LessThan, JITCompiler::DoubleLessThan, operationCompareLess))
+ return;
+ break;
+
+ case CompareLessEq:
+ if (compare(node, JITCompiler::LessThanOrEqual, JITCompiler::DoubleLessThanOrEqual, operationCompareLessEq))
+ return;
+ break;
+
+ case CompareGreater:
+ if (compare(node, JITCompiler::GreaterThan, JITCompiler::DoubleGreaterThan, operationCompareGreater))
+ return;
+ break;
+
+ case CompareGreaterEq:
+ if (compare(node, JITCompiler::GreaterThanOrEqual, JITCompiler::DoubleGreaterThanOrEqual, operationCompareGreaterEq))
+ return;
+ break;
+
+ case CompareEq:
+ if (isNullConstant(node.child1())) {
+ if (nonSpeculativeCompareNull(node, node.child2()))
+ return;
+ break;
+ }
+ if (isNullConstant(node.child2())) {
+ if (nonSpeculativeCompareNull(node, node.child1()))
+ return;
+ break;
+ }
+ if (compare(node, JITCompiler::Equal, JITCompiler::DoubleEqual, operationCompareEq))
+ return;
+ break;
+
+ case CompareStrictEq:
+ if (compileStrictEq(node))
+ return;
+ break;
+
+ case StringCharCodeAt: {
+ compileGetCharCodeAt(node);
+ break;
+ }
+
+ case StringCharAt: {
+ // Relies on StringCharAt node having same basic layout as GetByVal
+ compileGetByValOnString(node);
+ break;
+ }
+
+ case GetByVal: {
+ if (!node.prediction() || !at(node.child1()).prediction() || !at(node.child2()).prediction()) {
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ break;
+ }
+
+ if (!at(node.child2()).shouldSpeculateInteger() || !isActionableArrayPrediction(at(node.child1()).prediction())) {
+ SpeculateCellOperand base(this, node.child1()); // Save a register, speculate cell. We'll probably be right.
+ JSValueOperand property(this, node.child2());
+ GPRReg baseGPR = base.gpr();
+ GPRReg propertyTagGPR = property.tagGPR();
+ GPRReg propertyPayloadGPR = property.payloadGPR();
+
+ flushRegisters();
+ GPRResult2 resultTag(this);
+ GPRResult resultPayload(this);
+ callOperation(operationGetByValCell, resultTag.gpr(), resultPayload.gpr(), baseGPR, propertyTagGPR, propertyPayloadGPR);
+
+ jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex);
+ break;
+ }
+
+ if (at(node.child1()).prediction() == PredictString) {
+ compileGetByValOnString(node);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
+ if (at(node.child1()).shouldSpeculateByteArray()) {
+ compileGetByValOnByteArray(node);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
+ if (at(node.child1()).shouldSpeculateInt8Array()) {
+ compileGetByValOnIntTypedArray(m_jit.globalData()->int8ArrayDescriptor(), node, sizeof(int8_t), isInt8ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
+ if (at(node.child1()).shouldSpeculateInt16Array()) {
+ compileGetByValOnIntTypedArray(m_jit.globalData()->int16ArrayDescriptor(), node, sizeof(int16_t), isInt16ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
+ if (at(node.child1()).shouldSpeculateInt32Array()) {
+ compileGetByValOnIntTypedArray(m_jit.globalData()->int32ArrayDescriptor(), node, sizeof(int32_t), isInt32ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
+ if (at(node.child1()).shouldSpeculateUint8Array()) {
+ compileGetByValOnIntTypedArray(m_jit.globalData()->uint8ArrayDescriptor(), node, sizeof(uint8_t), isUint8ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
+ if (at(node.child1()).shouldSpeculateUint16Array()) {
+ compileGetByValOnIntTypedArray(m_jit.globalData()->uint16ArrayDescriptor(), node, sizeof(uint16_t), isUint16ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
+ if (at(node.child1()).shouldSpeculateUint32Array()) {
+ compileGetByValOnIntTypedArray(m_jit.globalData()->uint32ArrayDescriptor(), node, sizeof(uint32_t), isUint32ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
+ if (at(node.child1()).shouldSpeculateFloat32Array()) {
+ compileGetByValOnFloatTypedArray(m_jit.globalData()->float32ArrayDescriptor(), node, sizeof(float), isFloat32ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
+ if (at(node.child1()).shouldSpeculateFloat64Array()) {
+ compileGetByValOnFloatTypedArray(m_jit.globalData()->float64ArrayDescriptor(), node, sizeof(double), isFloat64ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
+ ASSERT(at(node.child1()).shouldSpeculateArray());
+
+ SpeculateStrictInt32Operand property(this, node.child2());
+ StorageOperand storage(this, node.child3());
+ GPRReg propertyReg = property.gpr();
+ GPRReg storageReg = storage.gpr();
+
+ if (!m_compileOkay)
+ return;
+
+ // Check that base is an array, and that property is contained within m_vector (< m_vectorLength).
+ // If we have predicted the base to be type array, we can skip the check.
+ {
+ SpeculateCellOperand base(this, node.child1());
+ GPRReg baseReg = base.gpr();
+ if (!isArrayPrediction(m_state.forNode(node.child1()).m_type))
+ speculationCheck(BadType, JSValueSource::unboxedCell(baseReg), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseReg, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info)));
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(baseReg, JSArray::vectorLengthOffset())));
+ }
+
+ GPRTemporary resultTag(this);
+ GPRTemporary resultPayload(this);
+
+ // FIXME: In cases where there are subsequent by_val accesses to the same base it might help to cache
+ // the storage pointer - especially if there happens to be another register free right now. If we do so,
+ // then we'll need to allocate a new temporary for result.
+ m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag.gpr());
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::Equal, resultTag.gpr(), TrustedImm32(JSValue::EmptyValueTag)));
+ m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload.gpr());
+
+ jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex);
+ break;
+ }
+
+ case PutByVal: {
+ if (!at(node.child1()).prediction() || !at(node.child2()).prediction()) {
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ break;
+ }
+
+ if (!at(node.child2()).shouldSpeculateInteger() || !isActionableMutableArrayPrediction(at(node.child1()).prediction())) {
+ SpeculateCellOperand base(this, node.child1()); // Save a register, speculate cell. We'll probably be right.
+ JSValueOperand property(this, node.child2());
+ JSValueOperand value(this, node.child3());
+ GPRReg baseGPR = base.gpr();
+ GPRReg propertyTagGPR = property.tagGPR();
+ GPRReg propertyPayloadGPR = property.payloadGPR();
+ GPRReg valueTagGPR = value.tagGPR();
+ GPRReg valuePayloadGPR = value.payloadGPR();
+
+ flushRegisters();
+ callOperation(m_jit.codeBlock()->isStrictMode() ? operationPutByValCellStrict : operationPutByValCellNonStrict, baseGPR, propertyTagGPR, propertyPayloadGPR, valueTagGPR, valuePayloadGPR);
+
+ noResult(m_compileIndex);
+ break;
+ }
+
+ SpeculateCellOperand base(this, node.child1());
+ SpeculateStrictInt32Operand property(this, node.child2());
+ if (at(node.child1()).shouldSpeculateByteArray()) {
+ compilePutByValForByteArray(base.gpr(), property.gpr(), node);
+ break;
+ }
+ if (at(node.child1()).shouldSpeculateByteArray()) {
+ compilePutByValForByteArray(base.gpr(), property.gpr(), node);
+ break;
+ }
+
+ if (at(node.child1()).shouldSpeculateInt8Array()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->int8ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int8_t), isInt8ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
+ if (at(node.child1()).shouldSpeculateInt16Array()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->int16ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int16_t), isInt16ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
+ if (at(node.child1()).shouldSpeculateInt32Array()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->int32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int32_t), isInt32ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
+ if (at(node.child1()).shouldSpeculateUint8Array()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->uint8ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint8_t), isUint8ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
+ if (at(node.child1()).shouldSpeculateUint16Array()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->uint16ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint16_t), isUint16ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
+ if (at(node.child1()).shouldSpeculateUint32Array()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->uint32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint32_t), isUint32ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
+ if (at(node.child1()).shouldSpeculateFloat32Array()) {
+ compilePutByValForFloatTypedArray(m_jit.globalData()->float32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(float), isFloat32ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
+ if (at(node.child1()).shouldSpeculateFloat64Array()) {
+ compilePutByValForFloatTypedArray(m_jit.globalData()->float64ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(double), isFloat64ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
+ ASSERT(at(node.child1()).shouldSpeculateArray());
+
+ JSValueOperand value(this, node.child3());
+ GPRTemporary scratch(this);
+
+ // Map base, property & value into registers, allocate a scratch register.
+ GPRReg baseReg = base.gpr();
+ GPRReg propertyReg = property.gpr();
+ GPRReg valueTagReg = value.tagGPR();
+ GPRReg valuePayloadReg = value.payloadGPR();
+ GPRReg scratchReg = scratch.gpr();
+
+ if (!m_compileOkay)
+ return;
+
+ writeBarrier(baseReg, valueTagReg, node.child3(), WriteBarrierForPropertyAccess, scratchReg);
+
+ // Check that base is an array, and that property is contained within m_vector (< m_vectorLength).
+ // If we have predicted the base to be type array, we can skip the check.
+ if (!isArrayPrediction(m_state.forNode(node.child1()).m_type))
+ speculationCheck(BadType, JSValueSource::unboxedCell(baseReg), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseReg, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info)));
+
+ base.use();
+ property.use();
+ value.use();
+
+ MacroAssembler::Jump withinArrayBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(baseReg, JSArray::vectorLengthOffset()));
+
+ // Code to handle put beyond array bounds.
+ silentSpillAllRegisters(scratchReg);
+ callOperation(operationPutByValBeyondArrayBounds, baseReg, propertyReg, valueTagReg, valuePayloadReg);
+ silentFillAllRegisters(scratchReg);
+ JITCompiler::Jump wasBeyondArrayBounds = m_jit.jump();
+
+ withinArrayBounds.link(&m_jit);
+
+ // Get the array storage.
+ GPRReg storageReg = scratchReg;
+ m_jit.loadPtr(MacroAssembler::Address(baseReg, JSArray::storageOffset()), storageReg);
+
+ // Check if we're writing to a hole; if so increment m_numValuesInVector.
+ MacroAssembler::Jump notHoleValue = m_jit.branch32(MacroAssembler::NotEqual, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag));
+ m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageReg, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
+
+ // If we're writing to a hole we might be growing the array;
+ MacroAssembler::Jump lengthDoesNotNeedUpdate = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, OBJECT_OFFSETOF(ArrayStorage, m_length)));
+ m_jit.add32(TrustedImm32(1), propertyReg);
+ m_jit.store32(propertyReg, MacroAssembler::Address(storageReg, OBJECT_OFFSETOF(ArrayStorage, m_length)));
+ m_jit.sub32(TrustedImm32(1), propertyReg);
+
+ lengthDoesNotNeedUpdate.link(&m_jit);
+ notHoleValue.link(&m_jit);
+
+ // Store the value to the array.
+ m_jit.store32(valueTagReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
+ m_jit.store32(valuePayloadReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
+
+ wasBeyondArrayBounds.link(&m_jit);
+
+ noResult(m_compileIndex, UseChildrenCalledExplicitly);
+ break;
+ }
+
+ case PutByValAlias: {
+ if (!at(node.child1()).prediction() || !at(node.child2()).prediction()) {
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ break;
+ }
+
+ ASSERT(isActionableMutableArrayPrediction(at(node.child1()).prediction()));
+ ASSERT(at(node.child2()).shouldSpeculateInteger());
+
+ SpeculateCellOperand base(this, node.child1());
+ SpeculateStrictInt32Operand property(this, node.child2());
+
+ if (at(node.child1()).shouldSpeculateByteArray()) {
+ compilePutByValForByteArray(base.gpr(), property.gpr(), node);
+ break;
+ }
+
+ if (at(node.child1()).shouldSpeculateInt8Array()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->int8ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int8_t), NoTypedArraySpecCheck, SignedTypedArray);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
+ if (at(node.child1()).shouldSpeculateInt16Array()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->int16ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int16_t), NoTypedArraySpecCheck, SignedTypedArray);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
+ if (at(node.child1()).shouldSpeculateInt32Array()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->int32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int32_t), NoTypedArraySpecCheck, SignedTypedArray);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
+ if (at(node.child1()).shouldSpeculateUint8Array()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->uint8ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint8_t), NoTypedArraySpecCheck, UnsignedTypedArray);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
+ if (at(node.child1()).shouldSpeculateUint16Array()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->uint16ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint16_t), NoTypedArraySpecCheck, UnsignedTypedArray);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
+ if (at(node.child1()).shouldSpeculateUint32Array()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->uint32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint32_t), NoTypedArraySpecCheck, UnsignedTypedArray);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
+ if (at(node.child1()).shouldSpeculateFloat32Array()) {
+ compilePutByValForFloatTypedArray(m_jit.globalData()->float32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(float), NoTypedArraySpecCheck);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
+ if (at(node.child1()).shouldSpeculateFloat64Array()) {
+ compilePutByValForFloatTypedArray(m_jit.globalData()->float64ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(double), NoTypedArraySpecCheck);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
+ ASSERT(at(node.child1()).shouldSpeculateArray());
+
+ JSValueOperand value(this, node.child3());
+ GPRTemporary scratch(this, base);
+
+ GPRReg baseReg = base.gpr();
+ GPRReg scratchReg = scratch.gpr();
+
+ writeBarrier(baseReg, value.tagGPR(), node.child3(), WriteBarrierForPropertyAccess, scratchReg);
+
+ // Get the array storage.
+ GPRReg storageReg = scratchReg;
+ m_jit.loadPtr(MacroAssembler::Address(baseReg, JSArray::storageOffset()), storageReg);
+
+ // Store the value to the array.
+ GPRReg propertyReg = property.gpr();
+ m_jit.store32(value.tagGPR(), MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
+ m_jit.store32(value.payloadGPR(), MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
+
+ noResult(m_compileIndex);
+ break;
+ }
+
+ case ArrayPush: {
+ SpeculateCellOperand base(this, node.child1());
+ JSValueOperand value(this, node.child2());
+ GPRTemporary storage(this);
+ GPRTemporary storageLength(this);
+
+ GPRReg baseGPR = base.gpr();
+ GPRReg valueTagGPR = value.tagGPR();
+ GPRReg valuePayloadGPR = value.payloadGPR();
+ GPRReg storageGPR = storage.gpr();
+ GPRReg storageLengthGPR = storageLength.gpr();
+
+ writeBarrier(baseGPR, valueTagGPR, node.child2(), WriteBarrierForPropertyAccess, storageGPR, storageLengthGPR);
+
+ if (!isArrayPrediction(m_state.forNode(node.child1()).m_type))
+ speculationCheck(BadType, JSValueSource::unboxedCell(baseGPR), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info)));
+
+ m_jit.loadPtr(MacroAssembler::Address(baseGPR, JSArray::storageOffset()), storageGPR);
+ m_jit.load32(MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_length)), storageLengthGPR);
+
+ // Refuse to handle bizarre lengths.
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::Above, storageLengthGPR, TrustedImm32(0x7ffffffe)));
+
+ MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(baseGPR, JSArray::vectorLengthOffset()));
+
+ m_jit.store32(valueTagGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
+ m_jit.store32(valuePayloadGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
+
+ m_jit.add32(Imm32(1), storageLengthGPR);
+ m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_length)));
+ m_jit.add32(Imm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
+ m_jit.move(Imm32(JSValue::Int32Tag), storageGPR);
+
+ MacroAssembler::Jump done = m_jit.jump();
+
+ slowPath.link(&m_jit);
+
+ silentSpillAllRegisters(storageGPR, storageLengthGPR);
+ callOperation(operationArrayPush, storageGPR, storageLengthGPR, valueTagGPR, valuePayloadGPR, baseGPR);
+ silentFillAllRegisters(storageGPR, storageLengthGPR);
+
+ done.link(&m_jit);
+
+ jsValueResult(storageGPR, storageLengthGPR, m_compileIndex);
+ break;
+ }
+
+ case ArrayPop: {
+ SpeculateCellOperand base(this, node.child1());
+ GPRTemporary valueTag(this);
+ GPRTemporary valuePayload(this);
+ GPRTemporary storage(this);
+ GPRTemporary storageLength(this);
+
+ GPRReg baseGPR = base.gpr();
+ GPRReg valueTagGPR = valueTag.gpr();
+ GPRReg valuePayloadGPR = valuePayload.gpr();
+ GPRReg storageGPR = storage.gpr();
+ GPRReg storageLengthGPR = storageLength.gpr();
+
+ if (!isArrayPrediction(m_state.forNode(node.child1()).m_type))
+ speculationCheck(BadType, JSValueSource::unboxedCell(baseGPR), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info)));
+
+ m_jit.loadPtr(MacroAssembler::Address(baseGPR, JSArray::storageOffset()), storageGPR);
+ m_jit.load32(MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_length)), storageLengthGPR);
+
+ MacroAssembler::Jump emptyArrayCase = m_jit.branchTest32(MacroAssembler::Zero, storageLengthGPR);
+
+ m_jit.sub32(Imm32(1), storageLengthGPR);
+
+ MacroAssembler::Jump slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(baseGPR, JSArray::vectorLengthOffset()));
+
+ m_jit.load32(MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), valueTagGPR);
+ m_jit.load32(MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), valuePayloadGPR);
+
+ m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_length)));
+
+ MacroAssembler::Jump holeCase = m_jit.branch32(MacroAssembler::Equal, Imm32(JSValue::EmptyValueTag), valueTagGPR);
+
+ m_jit.store32(TrustedImm32(JSValue::EmptyValueTag), MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
+
+ m_jit.sub32(MacroAssembler::Imm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
+
+ MacroAssembler::JumpList done;
+
+ done.append(m_jit.jump());
+
+ holeCase.link(&m_jit);
+ emptyArrayCase.link(&m_jit);
+ m_jit.move(MacroAssembler::Imm32(jsUndefined().tag()), valueTagGPR);
+ m_jit.move(MacroAssembler::Imm32(jsUndefined().payload()), valuePayloadGPR);
+ done.append(m_jit.jump());
+
+ slowCase.link(&m_jit);
+
+ silentSpillAllRegisters(valueTagGPR, valuePayloadGPR);
+ callOperation(operationArrayPop, valueTagGPR, valuePayloadGPR, baseGPR);
+ silentFillAllRegisters(valueTagGPR, valuePayloadGPR);
+
+ done.link(&m_jit);
+
+ jsValueResult(valueTagGPR, valuePayloadGPR, m_compileIndex);
+ break;
+ }
+
+ case DFG::Jump: {
+ BlockIndex taken = node.takenBlockIndex();
+ if (taken != (m_block + 1))
+ addBranch(m_jit.jump(), taken);
+ noResult(m_compileIndex);
+ break;
+ }
+
+ case Branch:
+ if (isStrictInt32(node.child1()) || at(node.child1()).shouldSpeculateInteger()) {
+ SpeculateIntegerOperand op(this, node.child1());
+
+ BlockIndex taken = node.takenBlockIndex();
+ BlockIndex notTaken = node.notTakenBlockIndex();
+
+ MacroAssembler::ResultCondition condition = MacroAssembler::NonZero;
+
+ if (taken == (m_block + 1)) {
+ condition = MacroAssembler::Zero;
+ BlockIndex tmp = taken;
+ taken = notTaken;
+ notTaken = tmp;
+ }
+
+ addBranch(m_jit.branchTest32(condition, op.gpr()), taken);
+ if (notTaken != (m_block + 1))
+ addBranch(m_jit.jump(), notTaken);
+
+ noResult(m_compileIndex);
+ break;
+ }
+ emitBranch(node);
+ break;
+
+ case Return: {
+ ASSERT(GPRInfo::callFrameRegister != GPRInfo::regT2);
+ ASSERT(GPRInfo::regT1 != GPRInfo::returnValueGPR);
+ ASSERT(GPRInfo::returnValueGPR != GPRInfo::callFrameRegister);
+
+#if DFG_ENABLE(SUCCESS_STATS)
+ static SamplingCounter counter("SpeculativeJIT");
+ m_jit.emitCount(counter);
+#endif
+
+ // Return the result in returnValueGPR.
+ JSValueOperand op1(this, node.child1());
+ op1.fill();
+ if (op1.isDouble())
+ boxDouble(op1.fpr(), GPRInfo::returnValueGPR2, GPRInfo::returnValueGPR);
+ else {
+ if (op1.payloadGPR() == GPRInfo::returnValueGPR2 && op1.tagGPR() == GPRInfo::returnValueGPR)
+ m_jit.swap(GPRInfo::returnValueGPR, GPRInfo::returnValueGPR2);
+ else if (op1.payloadGPR() == GPRInfo::returnValueGPR2) {
+ m_jit.move(op1.payloadGPR(), GPRInfo::returnValueGPR);
+ m_jit.move(op1.tagGPR(), GPRInfo::returnValueGPR2);
+ } else {
+ m_jit.move(op1.tagGPR(), GPRInfo::returnValueGPR2);
+ m_jit.move(op1.payloadGPR(), GPRInfo::returnValueGPR);
+ }
+ }
+
+ // Grab the return address.
+ m_jit.emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, GPRInfo::regT2);
+ // Restore our caller's "r".
+ m_jit.emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, GPRInfo::callFrameRegister);
+ // Return.
+ m_jit.restoreReturnAddressBeforeReturn(GPRInfo::regT2);
+ m_jit.ret();
+
+ noResult(m_compileIndex);
+ break;
+ }
+
+ case Throw:
+ case ThrowReferenceError: {
+ // We expect that throw statements are rare and are intended to exit the code block
+ // anyway, so we just OSR back to the old JIT for now.
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ break;
+ }
+
+ case ToPrimitive: {
+ if (at(node.child1()).shouldSpeculateInteger()) {
+ // It's really profitable to speculate integer, since it's really cheap,
+ // it means we don't have to do any real work, and we emit a lot less code.
+
+ SpeculateIntegerOperand op1(this, node.child1());
+ GPRTemporary result(this, op1);
+
+ ASSERT(op1.format() == DataFormatInteger);
+ m_jit.move(op1.gpr(), result.gpr());
+
+ integerResult(result.gpr(), m_compileIndex);
+ break;
+ }
+
+ // FIXME: Add string speculation here.
+
+ bool wasPrimitive = isKnownNumeric(node.child1()) || isKnownBoolean(node.child1());
+
+ JSValueOperand op1(this, node.child1());
+ GPRTemporary resultTag(this, op1);
+ GPRTemporary resultPayload(this, op1, false);
+
+ GPRReg op1TagGPR = op1.tagGPR();
+ GPRReg op1PayloadGPR = op1.payloadGPR();
+ GPRReg resultTagGPR = resultTag.gpr();
+ GPRReg resultPayloadGPR = resultPayload.gpr();
+
+ op1.use();
+
+ if (wasPrimitive) {
+ m_jit.move(op1TagGPR, resultTagGPR);
+ m_jit.move(op1PayloadGPR, resultPayloadGPR);
+ } else {
+ MacroAssembler::JumpList alreadyPrimitive;
+
+ alreadyPrimitive.append(m_jit.branch32(MacroAssembler::NotEqual, op1TagGPR, TrustedImm32(JSValue::CellTag)));
+ alreadyPrimitive.append(m_jit.branchPtr(MacroAssembler::Equal, MacroAssembler::Address(op1PayloadGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSString::s_info)));
+
+ silentSpillAllRegisters(resultTagGPR, resultPayloadGPR);
+ callOperation(operationToPrimitive, resultTagGPR, resultPayloadGPR, op1TagGPR, op1PayloadGPR);
+ silentFillAllRegisters(resultTagGPR, resultPayloadGPR);
+
+ MacroAssembler::Jump done = m_jit.jump();
+
+ alreadyPrimitive.link(&m_jit);
+ m_jit.move(op1TagGPR, resultTagGPR);
+ m_jit.move(op1PayloadGPR, resultPayloadGPR);
+
+ done.link(&m_jit);
+ }
+
+ jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly);
+ break;
+ }
+
+ case StrCat:
+ case NewArray: {
+ // We really don't want to grow the register file just to do a StrCat or NewArray.
+ // Say we have 50 functions on the stack that all have a StrCat in them that has
+ // upwards of 10 operands. In the DFG this would mean that each one gets
+ // some random virtual register, and then to do the StrCat we'd need a second
+ // span of 10 operands just to have somewhere to copy the 10 operands to, where
+ // they'd be contiguous and we could easily tell the C code how to find them.
+ // Ugly! So instead we use the scratchBuffer infrastructure in JSGlobalData. That
+ // way, those 50 functions will share the same scratchBuffer for offloading their
+ // StrCat operands. It's about as good as we can do, unless we start doing
+ // virtual register coalescing to ensure that operands to StrCat get spilled
+ // in exactly the place where StrCat wants them, or else have the StrCat
+ // refer to those operands' SetLocal instructions to force them to spill in
+ // the right place. Basically, any way you cut it, the current approach
+ // probably has the best balance of performance and sensibility in the sense
+ // that it does not increase the complexity of the DFG JIT just to make StrCat
+ // fast and pretty.
+
+ EncodedJSValue* buffer = static_cast<EncodedJSValue*>(m_jit.globalData()->scratchBufferForSize(sizeof(EncodedJSValue) * node.numChildren()));
+
+ for (unsigned operandIdx = 0; operandIdx < node.numChildren(); ++operandIdx) {
+ JSValueOperand operand(this, m_jit.graph().m_varArgChildren[node.firstChild() + operandIdx]);
+ GPRReg opTagGPR = operand.tagGPR();
+ GPRReg opPayloadGPR = operand.payloadGPR();
+ operand.use();
+
+ m_jit.store32(opTagGPR, reinterpret_cast<char*>(buffer + operandIdx) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
+ m_jit.store32(opPayloadGPR, reinterpret_cast<char*>(buffer + operandIdx) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
+ }
+
+ flushRegisters();
+
+ GPRResult resultPayload(this);
+ GPRResult2 resultTag(this);
+
+ callOperation(op == StrCat ? operationStrCat : operationNewArray, resultTag.gpr(), resultPayload.gpr(), buffer, node.numChildren());
+
+ // FIXME: make the callOperation above explicitly return a cell result, or jitAssert the tag is a cell tag.
+ cellResult(resultPayload.gpr(), m_compileIndex, UseChildrenCalledExplicitly);
+ break;
+ }
+
+ case NewArrayBuffer: {
+ flushRegisters();
+ GPRResult resultPayload(this);
+ GPRResult2 resultTag(this);
+
+ callOperation(operationNewArrayBuffer, resultTag.gpr(), resultPayload.gpr(), node.startConstant(), node.numConstants());
+
+ // FIXME: make the callOperation above explicitly return a cell result, or jitAssert the tag is a cell tag.
+ cellResult(resultPayload.gpr(), m_compileIndex);
+ break;
+ }
+
+ case NewRegexp: {
+ flushRegisters();
+ GPRResult resultPayload(this);
+ GPRResult2 resultTag(this);
+
+ callOperation(operationNewRegexp, resultTag.gpr(), resultPayload.gpr(), m_jit.codeBlock()->regexp(node.regexpIndex()));
+
+ // FIXME: make the callOperation above explicitly return a cell result, or jitAssert the tag is a cell tag.
+ cellResult(resultPayload.gpr(), m_compileIndex);
+ break;
+ }
+
+ case ConvertThis: {
+ if (isObjectPrediction(m_state.forNode(node.child1()).m_type)) {
+ SpeculateCellOperand thisValue(this, node.child1());
+ GPRTemporary result(this, thisValue);
+ m_jit.move(thisValue.gpr(), result.gpr());
+ cellResult(result.gpr(), m_compileIndex);
+ break;
+ }
+
+ if (isOtherPrediction(at(node.child1()).prediction())) {
+ JSValueOperand thisValue(this, node.child1());
+ GPRTemporary scratch(this);
+
+ GPRReg thisValueTagGPR = thisValue.tagGPR();
+ GPRReg scratchGPR = scratch.gpr();
+
+ COMPILE_ASSERT((JSValue::UndefinedTag | 1) == JSValue::NullTag, UndefinedTag_OR_1_EQUALS_NullTag);
+ m_jit.move(thisValueTagGPR, scratchGPR);
+ m_jit.or32(TrustedImm32(1), scratchGPR);
+ // This is hard. It would be better to save the value, but we can't quite do it,
+ // since this operation does not otherwise get the payload.
+ speculationCheck(BadType, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::NotEqual, scratchGPR, TrustedImm32(JSValue::NullTag)));
+
+ m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.globalThisObjectFor(node.codeOrigin)), scratchGPR);
+ cellResult(scratchGPR, m_compileIndex);
+ break;
+ }
+
+ if (isObjectPrediction(at(node.child1()).prediction())) {
+ SpeculateCellOperand thisValue(this, node.child1());
+ GPRReg thisValueGPR = thisValue.gpr();
+
+ if (!isObjectPrediction(m_state.forNode(node.child1()).m_type))
+ speculationCheck(BadType, JSValueSource::unboxedCell(thisValueGPR), node.child1(), m_jit.branchPtr(JITCompiler::Equal, JITCompiler::Address(thisValueGPR, JSCell::classInfoOffset()), JITCompiler::TrustedImmPtr(&JSString::s_info)));
+
+ GPRTemporary result(this, thisValue);
+ GPRReg resultGPR = result.gpr();
+ m_jit.move(thisValueGPR, resultGPR);
+ cellResult(resultGPR, m_compileIndex);
+ break;
+ }
+
+ JSValueOperand thisValue(this, node.child1());
+ GPRReg thisValueTagGPR = thisValue.tagGPR();
+ GPRReg thisValuePayloadGPR = thisValue.payloadGPR();
+
+ flushRegisters();
+
+ GPRResult2 resultTag(this);
+ GPRResult resultPayload(this);
+ callOperation(operationConvertThis, resultTag.gpr(), resultPayload.gpr(), thisValueTagGPR, thisValuePayloadGPR);
+
+ cellResult(resultPayload.gpr(), m_compileIndex);
+ break;
+ }
+
+ case CreateThis: {
+ // Note that there is not so much profit to speculate here. The only things we
+ // speculate on are (1) that it's a cell, since that eliminates cell checks
+ // later if the proto is reused, and (2) if we have a FinalObject prediction
+ // then we speculate because we want to get recompiled if it isn't (since
+ // otherwise we'd start taking slow path a lot).
+
+ SpeculateCellOperand proto(this, node.child1());
+ GPRTemporary result(this);
+ GPRTemporary scratch(this);
+
+ GPRReg protoGPR = proto.gpr();
+ GPRReg resultGPR = result.gpr();
+ GPRReg scratchGPR = scratch.gpr();
+
+ proto.use();
+
+ MacroAssembler::JumpList slowPath;
+
+ // Need to verify that the prototype is an object. If we have reason to believe
+ // that it's a FinalObject then we speculate on that directly. Otherwise we
+ // do the slow (structure-based) check.
+ if (at(node.child1()).shouldSpeculateFinalObject()) {
+ if (!isFinalObjectPrediction(m_state.forNode(node.child1()).m_type))
+ speculationCheck(BadType, JSValueSource::unboxedCell(protoGPR), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(protoGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSFinalObject::s_info)));
+ } else {
+ m_jit.loadPtr(MacroAssembler::Address(protoGPR, JSCell::structureOffset()), scratchGPR);
+ slowPath.append(m_jit.branch8(MacroAssembler::Below, MacroAssembler::Address(scratchGPR, Structure::typeInfoTypeOffset()), MacroAssembler::TrustedImm32(ObjectType)));
+ }
+
+ // Load the inheritorID (the Structure that objects who have protoGPR as the prototype
+ // use to refer to that prototype). If the inheritorID is not set, go to slow path.
+ m_jit.loadPtr(MacroAssembler::Address(protoGPR, JSObject::offsetOfInheritorID()), scratchGPR);
+ slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, scratchGPR));
+
+ emitAllocateJSFinalObject(scratchGPR, resultGPR, scratchGPR, slowPath);
+
+ MacroAssembler::Jump done = m_jit.jump();
+
+ slowPath.link(&m_jit);
+
+ silentSpillAllRegisters(resultGPR);
+ if (node.codeOrigin.inlineCallFrame)
+ callOperation(operationCreateThisInlined, resultGPR, protoGPR, node.codeOrigin.inlineCallFrame->callee.get());
+ else
+ callOperation(operationCreateThis, resultGPR, protoGPR);
+ silentFillAllRegisters(resultGPR);
+
+ done.link(&m_jit);
+
+ cellResult(resultGPR, m_compileIndex, UseChildrenCalledExplicitly);
+ break;
+ }
+
+ case NewObject: {
+ GPRTemporary result(this);
+ GPRTemporary scratch(this);
+
+ GPRReg resultGPR = result.gpr();
+ GPRReg scratchGPR = scratch.gpr();
+
+ MacroAssembler::JumpList slowPath;
+
+ emitAllocateJSFinalObject(MacroAssembler::TrustedImmPtr(m_jit.globalObjectFor(node.codeOrigin)->emptyObjectStructure()), resultGPR, scratchGPR, slowPath);
+
+ MacroAssembler::Jump done = m_jit.jump();
+
+ slowPath.link(&m_jit);
+
+ silentSpillAllRegisters(resultGPR);
+ callOperation(operationNewObject, resultGPR);
+ silentFillAllRegisters(resultGPR);
+
+ done.link(&m_jit);
+
+ cellResult(resultGPR, m_compileIndex);
+ break;
+ }
+
+ case GetCallee: {
+ GPRTemporary result(this);
+ m_jit.loadPtr(JITCompiler::addressFor(static_cast<VirtualRegister>(RegisterFile::Callee)), result.gpr());
+ cellResult(result.gpr(), m_compileIndex);
+ break;
+ }
+
+ case GetScopeChain: {
+ GPRTemporary result(this);
+ GPRReg resultGPR = result.gpr();
+
+ m_jit.loadPtr(JITCompiler::addressFor(static_cast<VirtualRegister>(RegisterFile::ScopeChain)), resultGPR);
+ bool checkTopLevel = m_jit.codeBlock()->codeType() == FunctionCode && m_jit.codeBlock()->needsFullScopeChain();
+ int skip = node.scopeChainDepth();
+ ASSERT(skip || !checkTopLevel);
+ if (checkTopLevel && skip--) {
+ JITCompiler::Jump activationNotCreated;
+ if (checkTopLevel)
+ activationNotCreated = m_jit.branchTestPtr(JITCompiler::Zero, JITCompiler::addressFor(static_cast<VirtualRegister>(m_jit.codeBlock()->activationRegister())));
+ m_jit.loadPtr(JITCompiler::Address(resultGPR, OBJECT_OFFSETOF(ScopeChainNode, next)), resultGPR);
+ activationNotCreated.link(&m_jit);
+ }
+ while (skip--)
+ m_jit.loadPtr(JITCompiler::Address(resultGPR, OBJECT_OFFSETOF(ScopeChainNode, next)), resultGPR);
+
+ m_jit.loadPtr(JITCompiler::Address(resultGPR, OBJECT_OFFSETOF(ScopeChainNode, object)), resultGPR);
+
+ cellResult(resultGPR, m_compileIndex);
+ break;
+ }
+ case GetScopedVar: {
+ SpeculateCellOperand scopeChain(this, node.child1());
+ GPRTemporary resultTag(this);
+ GPRTemporary resultPayload(this);
+ GPRReg resultTagGPR = resultTag.gpr();
+ GPRReg resultPayloadGPR = resultPayload.gpr();
+ m_jit.loadPtr(JITCompiler::Address(scopeChain.gpr(), JSVariableObject::offsetOfRegisters()), resultPayloadGPR);
+ m_jit.load32(JITCompiler::Address(resultPayloadGPR, node.varNumber() * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
+ m_jit.load32(JITCompiler::Address(resultPayloadGPR, node.varNumber() * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultPayloadGPR);
+ jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex);
+ break;
+ }
+ case PutScopedVar: {
+ SpeculateCellOperand scopeChain(this, node.child1());
+ GPRTemporary scratchRegister(this);
+ GPRReg scratchGPR = scratchRegister.gpr();
+ m_jit.loadPtr(JITCompiler::Address(scopeChain.gpr(), JSVariableObject::offsetOfRegisters()), scratchGPR);
+ JSValueOperand value(this, node.child2());
+ m_jit.store32(value.tagGPR(), JITCompiler::Address(scratchGPR, node.varNumber() * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
+ m_jit.store32(value.payloadGPR(), JITCompiler::Address(scratchGPR, node.varNumber() * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
+ writeBarrier(scopeChain.gpr(), value.tagGPR(), node.child2(), WriteBarrierForVariableAccess, scratchGPR);
+ noResult(m_compileIndex);
+ break;
+ }
+
+ case GetById: {
+ if (!node.prediction()) {
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ break;
+ }
+
+ if (isCellPrediction(at(node.child1()).prediction())) {
+ SpeculateCellOperand base(this, node.child1());
+ GPRTemporary resultTag(this, base);
+ GPRTemporary resultPayload(this);
+
+ GPRReg baseGPR = base.gpr();
+ GPRReg resultTagGPR = resultTag.gpr();
+ GPRReg resultPayloadGPR = resultPayload.gpr();
+ GPRReg scratchGPR;
+
+ if (resultTagGPR == baseGPR)
+ scratchGPR = resultPayloadGPR;
+ else
+ scratchGPR = resultTagGPR;
+
+ base.use();
+
+ cachedGetById(InvalidGPRReg, baseGPR, resultTagGPR, resultPayloadGPR, scratchGPR, node.identifierNumber());
+
+ jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly);
+ break;
+ }
+
+ JSValueOperand base(this, node.child1());
+ GPRTemporary resultTag(this, base);
+ GPRTemporary resultPayload(this);
+
+ GPRReg baseTagGPR = base.tagGPR();
+ GPRReg basePayloadGPR = base.payloadGPR();
+ GPRReg resultTagGPR = resultTag.gpr();
+ GPRReg resultPayloadGPR = resultPayload.gpr();
+ GPRReg scratchGPR;
+
+ if (resultTagGPR == basePayloadGPR)
+ scratchGPR = resultPayloadGPR;
+ else
+ scratchGPR = resultTagGPR;
+
+ base.use();
+
+ JITCompiler::Jump notCell = m_jit.branch32(JITCompiler::NotEqual, baseTagGPR, TrustedImm32(JSValue::CellTag));
+
+ cachedGetById(baseTagGPR, basePayloadGPR, resultTagGPR, resultPayloadGPR, scratchGPR, node.identifierNumber(), notCell);
+
+ jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly);
+ break;
+ }
+
+ case GetArrayLength: {
+ SpeculateCellOperand base(this, node.child1());
+ GPRReg baseGPR = base.gpr();
+
+ if (!isArrayPrediction(m_state.forNode(node.child1()).m_type))
+ speculationCheck(BadType, JSValueSource::unboxedCell(baseGPR), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info)));
+
+ GPRTemporary result(this);
+ GPRReg resultGPR = result.gpr();
+
+ m_jit.loadPtr(MacroAssembler::Address(baseGPR, JSArray::storageOffset()), resultGPR);
+ m_jit.load32(MacroAssembler::Address(resultGPR, OBJECT_OFFSETOF(ArrayStorage, m_length)), resultGPR);
+
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::LessThan, resultGPR, MacroAssembler::TrustedImm32(0)));
+
+ integerResult(resultGPR, m_compileIndex);
+ break;
+ }
+
+ case GetStringLength: {
+ SpeculateCellOperand base(this, node.child1());
+ GPRTemporary result(this);
+
+ GPRReg baseGPR = base.gpr();
+ GPRReg resultGPR = result.gpr();
+
+ if (!isStringPrediction(m_state.forNode(node.child1()).m_type))
+ speculationCheck(BadType, JSValueSource::unboxedCell(baseGPR), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSString::s_info)));
+
+ m_jit.load32(MacroAssembler::Address(baseGPR, JSString::offsetOfLength()), resultGPR);
+
+ integerResult(resultGPR, m_compileIndex);
+ break;
+ }
+
+ case GetByteArrayLength: {
+ SpeculateCellOperand base(this, node.child1());
+ GPRReg baseGPR = base.gpr();
+
+ if (!isByteArrayPrediction(m_state.forNode(node.child1()).m_type))
+ speculationCheck(BadType, JSValueSource::unboxedCell(baseGPR), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSByteArray::s_info)));
+
+ GPRTemporary result(this);
+ GPRReg resultGPR = result.gpr();
+
+ m_jit.loadPtr(MacroAssembler::Address(baseGPR, JSByteArray::offsetOfStorage()), resultGPR);
+ m_jit.load32(MacroAssembler::Address(baseGPR, ByteArray::offsetOfSize()), resultGPR);
+
+ integerResult(resultGPR, m_compileIndex);
+ break;
+ }
+ case GetInt8ArrayLength: {
+ compileGetTypedArrayLength(m_jit.globalData()->int8ArrayDescriptor(), node, !isInt8ArrayPrediction(m_state.forNode(node.child1()).m_type));
+ break;
+ }
+ case GetInt16ArrayLength: {
+ compileGetTypedArrayLength(m_jit.globalData()->int16ArrayDescriptor(), node, !isInt16ArrayPrediction(m_state.forNode(node.child1()).m_type));
+ break;
+ }
+ case GetInt32ArrayLength: {
+ compileGetTypedArrayLength(m_jit.globalData()->int32ArrayDescriptor(), node, !isInt32ArrayPrediction(m_state.forNode(node.child1()).m_type));
+ break;
+ }
+ case GetUint8ArrayLength: {
+ compileGetTypedArrayLength(m_jit.globalData()->uint8ArrayDescriptor(), node, !isUint8ArrayPrediction(m_state.forNode(node.child1()).m_type));
+ break;
+ }
+ case GetUint16ArrayLength: {
+ compileGetTypedArrayLength(m_jit.globalData()->uint16ArrayDescriptor(), node, !isUint16ArrayPrediction(m_state.forNode(node.child1()).m_type));
+ break;
+ }
+ case GetUint32ArrayLength: {
+ compileGetTypedArrayLength(m_jit.globalData()->uint32ArrayDescriptor(), node, !isUint32ArrayPrediction(m_state.forNode(node.child1()).m_type));
+ break;
+ }
+ case GetFloat32ArrayLength: {
+ compileGetTypedArrayLength(m_jit.globalData()->float32ArrayDescriptor(), node, !isFloat32ArrayPrediction(m_state.forNode(node.child1()).m_type));
+ break;
+ }
+ case GetFloat64ArrayLength: {
+ compileGetTypedArrayLength(m_jit.globalData()->float64ArrayDescriptor(), node, !isFloat64ArrayPrediction(m_state.forNode(node.child1()).m_type));
+ break;
+ }
+
+ case CheckFunction: {
+ SpeculateCellOperand function(this, node.child1());
+ speculationCheck(BadCache, JSValueRegs(), NoNode, m_jit.branchWeakPtr(JITCompiler::NotEqual, function.gpr(), node.function()));
+ noResult(m_compileIndex);
+ break;
+ }
+
+ case CheckStructure: {
+ if (m_state.forNode(node.child1()).m_structure.isSubsetOf(node.structureSet())) {
+ noResult(m_compileIndex);
+ break;
+ }
+
+ SpeculateCellOperand base(this, node.child1());
+
+ ASSERT(node.structureSet().size());
+
+ if (node.structureSet().size() == 1)
+ speculationCheck(BadCache, JSValueRegs(), NoNode, m_jit.branchWeakPtr(JITCompiler::NotEqual, JITCompiler::Address(base.gpr(), JSCell::structureOffset()), node.structureSet()[0]));
+ else {
+ GPRTemporary structure(this);
+
+ m_jit.loadPtr(JITCompiler::Address(base.gpr(), JSCell::structureOffset()), structure.gpr());
+
+ JITCompiler::JumpList done;
+
+ for (size_t i = 0; i < node.structureSet().size() - 1; ++i)
+ done.append(m_jit.branchWeakPtr(JITCompiler::Equal, structure.gpr(), node.structureSet()[i]));
+
+ speculationCheck(BadCache, JSValueRegs(), NoNode, m_jit.branchWeakPtr(JITCompiler::NotEqual, structure.gpr(), node.structureSet().last()));
+
+ done.link(&m_jit);
+ }
+
+ noResult(m_compileIndex);
+ break;
+ }
+
+ case PutStructure: {
+ SpeculateCellOperand base(this, node.child1());
+ GPRReg baseGPR = base.gpr();
+
+ m_jit.addWeakReferenceTransition(
+ node.codeOrigin.codeOriginOwner(),
+ node.structureTransitionData().previousStructure,
+ node.structureTransitionData().newStructure);
+
+#if ENABLE(GGC) || ENABLE(WRITE_BARRIER_PROFILING)
+ // Must always emit this write barrier as the structure transition itself requires it
+ writeBarrier(baseGPR, node.structureTransitionData().newStructure, WriteBarrierForGenericAccess);
+#endif
+
+ m_jit.storePtr(MacroAssembler::TrustedImmPtr(node.structureTransitionData().newStructure), MacroAssembler::Address(baseGPR, JSCell::structureOffset()));
+
+ noResult(m_compileIndex);
+ break;
+ }
+
+ case GetPropertyStorage: {
+ SpeculateCellOperand base(this, node.child1());
+ GPRTemporary result(this, base);
+
+ GPRReg baseGPR = base.gpr();
+ GPRReg resultGPR = result.gpr();
+
+ m_jit.loadPtr(JITCompiler::Address(baseGPR, JSObject::offsetOfPropertyStorage()), resultGPR);
+
+ storageResult(resultGPR, m_compileIndex);
+ break;
+ }
+
+ case GetIndexedPropertyStorage: {
+ compileGetIndexedPropertyStorage(node);
+ break;
+ }
+
+ case GetByOffset: {
+ StorageOperand storage(this, node.child1());
+ GPRTemporary resultTag(this, storage);
+ GPRTemporary resultPayload(this);
+
+ GPRReg storageGPR = storage.gpr();
+ GPRReg resultTagGPR = resultTag.gpr();
+ GPRReg resultPayloadGPR = resultPayload.gpr();
+
+ StorageAccessData& storageAccessData = m_jit.graph().m_storageAccessData[node.storageAccessDataIndex()];
+
+ m_jit.load32(JITCompiler::Address(storageGPR, storageAccessData.offset * sizeof(EncodedJSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultPayloadGPR);
+ m_jit.load32(JITCompiler::Address(storageGPR, storageAccessData.offset * sizeof(EncodedJSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
+
+ jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex);
+ break;
+ }
+
+ case PutByOffset: {
+#if ENABLE(GGC) || ENABLE(WRITE_BARRIER_PROFILING)
+ SpeculateCellOperand base(this, node.child1());
+#endif
+ StorageOperand storage(this, node.child2());
+ JSValueOperand value(this, node.child3());
+
+ GPRReg storageGPR = storage.gpr();
+ GPRReg valueTagGPR = value.tagGPR();
+ GPRReg valuePayloadGPR = value.payloadGPR();
+
+#if ENABLE(GGC) || ENABLE(WRITE_BARRIER_PROFILING)
+ writeBarrier(base.gpr(), valueTagGPR, node.child3(), WriteBarrierForPropertyAccess);
+#endif
+
+ StorageAccessData& storageAccessData = m_jit.graph().m_storageAccessData[node.storageAccessDataIndex()];
+
+ m_jit.storePtr(valueTagGPR, JITCompiler::Address(storageGPR, storageAccessData.offset * sizeof(EncodedJSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
+ m_jit.storePtr(valuePayloadGPR, JITCompiler::Address(storageGPR, storageAccessData.offset * sizeof(EncodedJSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
+
+ noResult(m_compileIndex);
+ break;
+ }
+
+ case PutById: {
+ SpeculateCellOperand base(this, node.child1());
+ JSValueOperand value(this, node.child2());
+ GPRTemporary scratch(this);
+
+ GPRReg baseGPR = base.gpr();
+ GPRReg valueTagGPR = value.tagGPR();
+ GPRReg valuePayloadGPR = value.payloadGPR();
+ GPRReg scratchGPR = scratch.gpr();
+
+ base.use();
+ value.use();
+
+ cachedPutById(baseGPR, valueTagGPR, valuePayloadGPR, node.child2(), scratchGPR, node.identifierNumber(), NotDirect);
+
+ noResult(m_compileIndex, UseChildrenCalledExplicitly);
+ break;
+ }
+
+ case PutByIdDirect: {
+ SpeculateCellOperand base(this, node.child1());
+ JSValueOperand value(this, node.child2());
+ GPRTemporary scratch(this);
+
+ GPRReg baseGPR = base.gpr();
+ GPRReg valueTagGPR = value.tagGPR();
+ GPRReg valuePayloadGPR = value.payloadGPR();
+ GPRReg scratchGPR = scratch.gpr();
+
+ base.use();
+ value.use();
+
+ cachedPutById(baseGPR, valueTagGPR, valuePayloadGPR, node.child2(), scratchGPR, node.identifierNumber(), Direct);
+
+ noResult(m_compileIndex, UseChildrenCalledExplicitly);
+ break;
+ }
+
+ case GetGlobalVar: {
+ GPRTemporary result(this);
+ GPRTemporary scratch(this);
+
+ JSVariableObject* globalObject = m_jit.globalObjectFor(node.codeOrigin);
+ m_jit.loadPtr(const_cast<WriteBarrier<Unknown>**>(globalObject->addressOfRegisters()), result.gpr());
+ m_jit.load32(JITCompiler::tagForGlobalVar(result.gpr(), node.varNumber()), scratch.gpr());
+ m_jit.load32(JITCompiler::payloadForGlobalVar(result.gpr(), node.varNumber()), result.gpr());
+
+ jsValueResult(scratch.gpr(), result.gpr(), m_compileIndex);
+ break;
+ }
+
+ case PutGlobalVar: {
+ JSValueOperand value(this, node.child1());
+ GPRTemporary globalObject(this);
+ GPRTemporary scratch(this);
+
+ GPRReg globalObjectReg = globalObject.gpr();
+ GPRReg scratchReg = scratch.gpr();
+
+ m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.globalObjectFor(node.codeOrigin)), globalObjectReg);
+
+ writeBarrier(m_jit.globalObjectFor(node.codeOrigin), value.tagGPR(), node.child1(), WriteBarrierForVariableAccess, scratchReg);
+
+ m_jit.loadPtr(MacroAssembler::Address(globalObjectReg, JSVariableObject::offsetOfRegisters()), scratchReg);
+ m_jit.store32(value.tagGPR(), JITCompiler::tagForGlobalVar(scratchReg, node.varNumber()));
+ m_jit.store32(value.payloadGPR(), JITCompiler::payloadForGlobalVar(scratchReg, node.varNumber()));
+
+ noResult(m_compileIndex);
+ break;
+ }
+
+ case CheckHasInstance: {
+ SpeculateCellOperand base(this, node.child1());
+ GPRTemporary structure(this);
+
+ // Speculate that base 'ImplementsDefaultHasInstance'.
+ m_jit.loadPtr(MacroAssembler::Address(base.gpr(), JSCell::structureOffset()), structure.gpr());
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchTest8(MacroAssembler::Zero, MacroAssembler::Address(structure.gpr(), Structure::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(ImplementsDefaultHasInstance)));
+
+ noResult(m_compileIndex);
+ break;
+ }
+
+ case InstanceOf: {
+ compileInstanceOf(node);
+ break;
+ }
+
+ case Phi:
+ case Flush:
+ ASSERT_NOT_REACHED();
+
+ case Breakpoint:
+#if ENABLE(DEBUG_WITH_BREAKPOINT)
+ m_jit.breakpoint();
+#else
+ ASSERT_NOT_REACHED();
+#endif
+ break;
+
+ case Call:
+ case Construct:
+ emitCall(node);
+ break;
+
+ case Resolve: {
+ flushRegisters();
+ GPRResult resultPayload(this);
+ GPRResult2 resultTag(this);
+ callOperation(operationResolve, resultTag.gpr(), resultPayload.gpr(), identifier(node.identifierNumber()));
+ jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex);
+ break;
+ }
+
+ case ResolveBase: {
+ flushRegisters();
+ GPRResult resultPayload(this);
+ GPRResult2 resultTag(this);
+ callOperation(operationResolveBase, resultTag.gpr(), resultPayload.gpr(), identifier(node.identifierNumber()));
+ jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex);
+ break;
+ }
+
+ case ResolveBaseStrictPut: {
+ flushRegisters();
+ GPRResult resultPayload(this);
+ GPRResult2 resultTag(this);
+ callOperation(operationResolveBaseStrictPut, resultTag.gpr(), resultPayload.gpr(), identifier(node.identifierNumber()));
+ jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex);
+ break;
+ }
+
+ case ResolveGlobal: {
+ GPRTemporary globalObject(this);
+ GPRTemporary resolveInfo(this);
+ GPRTemporary resultTag(this);
+ GPRTemporary resultPayload(this);
+
+ GPRReg globalObjectGPR = globalObject.gpr();
+ GPRReg resolveInfoGPR = resolveInfo.gpr();
+ GPRReg resultTagGPR = resultTag.gpr();
+ GPRReg resultPayloadGPR = resultPayload.gpr();
+
+ ResolveGlobalData& data = m_jit.graph().m_resolveGlobalData[node.resolveGlobalDataIndex()];
+ GlobalResolveInfo* resolveInfoAddress = &(m_jit.codeBlock()->globalResolveInfo(data.resolveInfoIndex));
+
+ // Check Structure of global object
+ m_jit.move(JITCompiler::TrustedImmPtr(m_jit.globalObjectFor(node.codeOrigin)), globalObjectGPR);
+ m_jit.move(JITCompiler::TrustedImmPtr(resolveInfoAddress), resolveInfoGPR);
+ m_jit.loadPtr(JITCompiler::Address(resolveInfoGPR, OBJECT_OFFSETOF(GlobalResolveInfo, structure)), resultPayloadGPR);
+
+ JITCompiler::Jump structuresNotMatch = m_jit.branchPtr(JITCompiler::NotEqual, resultPayloadGPR, JITCompiler::Address(globalObjectGPR, JSCell::structureOffset()));
+
+ // Fast case
+ m_jit.loadPtr(JITCompiler::Address(globalObjectGPR, JSObject::offsetOfPropertyStorage()), resultPayloadGPR);
+ m_jit.load32(JITCompiler::Address(resolveInfoGPR, OBJECT_OFFSETOF(GlobalResolveInfo, offset)), resolveInfoGPR);
+ m_jit.load32(JITCompiler::BaseIndex(resultPayloadGPR, resolveInfoGPR, JITCompiler::TimesEight, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
+ m_jit.load32(JITCompiler::BaseIndex(resultPayloadGPR, resolveInfoGPR, JITCompiler::TimesEight, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultPayloadGPR);
+
+ JITCompiler::Jump wasFast = m_jit.jump();
+
+ structuresNotMatch.link(&m_jit);
+ silentSpillAllRegisters(resultTagGPR, resultPayloadGPR);
+ callOperation(operationResolveGlobal, resultTagGPR, resultPayloadGPR, resolveInfoGPR, &m_jit.codeBlock()->identifier(data.identifierNumber));
+ silentFillAllRegisters(resultTagGPR, resultPayloadGPR);
+
+ wasFast.link(&m_jit);
+
+ jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex);
+ break;
+ }
+
+ case ForceOSRExit: {
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ break;
+ }
+
+ case Phantom:
+ // This is a no-op.
+ noResult(m_compileIndex);
+ break;
+
+ case InlineStart:
+ case Nop:
+ ASSERT_NOT_REACHED();
+ break;
+ }
+
+ if (!m_compileOkay)
+ return;
+
+ if (node.hasResult() && node.mustGenerate())
+ use(m_compileIndex);
+}
+
+#endif
+
+} } // namespace JSC::DFG
+
+#endif
diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp
new file mode 100644
index 000000000..c6586a679
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp
@@ -0,0 +1,3537 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "DFGSpeculativeJIT.h"
+
+#include "JSByteArray.h"
+
+#if ENABLE(DFG_JIT)
+
+namespace JSC { namespace DFG {
+
+#if USE(JSVALUE64)
+
+GPRReg SpeculativeJIT::fillInteger(NodeIndex nodeIndex, DataFormat& returnFormat)
+{
+ Node& node = at(nodeIndex);
+ VirtualRegister virtualRegister = node.virtualRegister();
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+
+ if (info.registerFormat() == DataFormatNone) {
+ GPRReg gpr = allocate();
+
+ if (node.hasConstant()) {
+ m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
+ if (isInt32Constant(nodeIndex)) {
+ m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(nodeIndex)), gpr);
+ info.fillInteger(gpr);
+ returnFormat = DataFormatInteger;
+ return gpr;
+ }
+ if (isNumberConstant(nodeIndex)) {
+ JSValue jsValue = jsNumber(valueOfNumberConstant(nodeIndex));
+ m_jit.move(MacroAssembler::ImmPtr(JSValue::encode(jsValue)), gpr);
+ } else {
+ ASSERT(isJSConstant(nodeIndex));
+ JSValue jsValue = valueOfJSConstant(nodeIndex);
+ m_jit.move(MacroAssembler::ImmPtr(JSValue::encode(jsValue)), gpr);
+ }
+ } else if (info.spillFormat() == DataFormatInteger) {
+ m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
+ m_jit.load32(JITCompiler::payloadFor(virtualRegister), gpr);
+ // Tag it, since fillInteger() is used when we want a boxed integer.
+ m_jit.orPtr(GPRInfo::tagTypeNumberRegister, gpr);
+ } else {
+ ASSERT(info.spillFormat() == DataFormatJS || info.spillFormat() == DataFormatJSInteger);
+ m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
+ m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
+ }
+
+ // Since we statically know that we're filling an integer, and values
+ // in the RegisterFile are boxed, this must be DataFormatJSInteger.
+ // We will check this with a jitAssert below.
+ info.fillJSValue(gpr, DataFormatJSInteger);
+ unlock(gpr);
+ }
+
+ switch (info.registerFormat()) {
+ case DataFormatNone:
+ // Should have filled, above.
+ case DataFormatJSDouble:
+ case DataFormatDouble:
+ case DataFormatJS:
+ case DataFormatCell:
+ case DataFormatJSCell:
+ case DataFormatBoolean:
+ case DataFormatJSBoolean:
+ case DataFormatStorage:
+ // Should only be calling this function if we know this operand to be integer.
+ ASSERT_NOT_REACHED();
+
+ case DataFormatJSInteger: {
+ GPRReg gpr = info.gpr();
+ m_gprs.lock(gpr);
+ m_jit.jitAssertIsJSInt32(gpr);
+ returnFormat = DataFormatJSInteger;
+ return gpr;
+ }
+
+ case DataFormatInteger: {
+ GPRReg gpr = info.gpr();
+ m_gprs.lock(gpr);
+ m_jit.jitAssertIsInt32(gpr);
+ returnFormat = DataFormatInteger;
+ return gpr;
+ }
+ }
+
+ ASSERT_NOT_REACHED();
+ return InvalidGPRReg;
+}
+
+FPRReg SpeculativeJIT::fillDouble(NodeIndex nodeIndex)
+{
+ Node& node = at(nodeIndex);
+ VirtualRegister virtualRegister = node.virtualRegister();
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+
+ if (info.registerFormat() == DataFormatNone) {
+ if (node.hasConstant()) {
+ GPRReg gpr = allocate();
+
+ if (isInt32Constant(nodeIndex)) {
+ // FIXME: should not be reachable?
+ m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(nodeIndex)), gpr);
+ m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
+ info.fillInteger(gpr);
+ unlock(gpr);
+ } else if (isNumberConstant(nodeIndex)) {
+ FPRReg fpr = fprAllocate();
+ m_jit.move(MacroAssembler::ImmPtr(reinterpret_cast<void*>(reinterpretDoubleToIntptr(valueOfNumberConstant(nodeIndex)))), gpr);
+ m_jit.movePtrToDouble(gpr, fpr);
+ unlock(gpr);
+
+ m_fprs.retain(fpr, virtualRegister, SpillOrderDouble);
+ info.fillDouble(fpr);
+ return fpr;
+ } else {
+ // FIXME: should not be reachable?
+ ASSERT(isJSConstant(nodeIndex));
+ JSValue jsValue = valueOfJSConstant(nodeIndex);
+ m_jit.move(MacroAssembler::ImmPtr(JSValue::encode(jsValue)), gpr);
+ m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
+ info.fillJSValue(gpr, DataFormatJS);
+ unlock(gpr);
+ }
+ } else {
+ DataFormat spillFormat = info.spillFormat();
+ switch (spillFormat) {
+ case DataFormatDouble: {
+ FPRReg fpr = fprAllocate();
+ m_jit.loadDouble(JITCompiler::addressFor(virtualRegister), fpr);
+ m_fprs.retain(fpr, virtualRegister, SpillOrderDouble);
+ info.fillDouble(fpr);
+ return fpr;
+ }
+
+ case DataFormatInteger: {
+ GPRReg gpr = allocate();
+
+ m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
+ m_jit.load32(JITCompiler::addressFor(virtualRegister), gpr);
+ info.fillInteger(gpr);
+ unlock(gpr);
+ break;
+ }
+
+ default:
+ GPRReg gpr = allocate();
+
+ ASSERT(spillFormat & DataFormatJS);
+ m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
+ m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
+ info.fillJSValue(gpr, spillFormat);
+ unlock(gpr);
+ break;
+ }
+ }
+ }
+
+ switch (info.registerFormat()) {
+ case DataFormatNone:
+ // Should have filled, above.
+ case DataFormatCell:
+ case DataFormatJSCell:
+ case DataFormatBoolean:
+ case DataFormatJSBoolean:
+ case DataFormatStorage:
+ // Should only be calling this function if we know this operand to be numeric.
+ ASSERT_NOT_REACHED();
+
+ case DataFormatJS: {
+ GPRReg jsValueGpr = info.gpr();
+ m_gprs.lock(jsValueGpr);
+ FPRReg fpr = fprAllocate();
+ GPRReg tempGpr = allocate(); // FIXME: can we skip this allocation on the last use of the virtual register?
+
+ JITCompiler::Jump isInteger = m_jit.branchPtr(MacroAssembler::AboveOrEqual, jsValueGpr, GPRInfo::tagTypeNumberRegister);
+
+ m_jit.jitAssertIsJSDouble(jsValueGpr);
+
+ // First, if we get here we have a double encoded as a JSValue
+ m_jit.move(jsValueGpr, tempGpr);
+ unboxDouble(tempGpr, fpr);
+ JITCompiler::Jump hasUnboxedDouble = m_jit.jump();
+
+ // Finally, handle integers.
+ isInteger.link(&m_jit);
+ m_jit.convertInt32ToDouble(jsValueGpr, fpr);
+ hasUnboxedDouble.link(&m_jit);
+
+ m_gprs.release(jsValueGpr);
+ m_gprs.unlock(jsValueGpr);
+ m_gprs.unlock(tempGpr);
+ m_fprs.retain(fpr, virtualRegister, SpillOrderDouble);
+ info.fillDouble(fpr);
+ info.killSpilled();
+ return fpr;
+ }
+
+ case DataFormatJSInteger:
+ case DataFormatInteger: {
+ FPRReg fpr = fprAllocate();
+ GPRReg gpr = info.gpr();
+ m_gprs.lock(gpr);
+ m_jit.convertInt32ToDouble(gpr, fpr);
+ m_gprs.unlock(gpr);
+ return fpr;
+ }
+
+ // Unbox the double
+ case DataFormatJSDouble: {
+ GPRReg gpr = info.gpr();
+ FPRReg fpr = fprAllocate();
+ if (m_gprs.isLocked(gpr)) {
+ // Make sure we don't trample gpr if it is in use.
+ GPRReg temp = allocate();
+ m_jit.move(gpr, temp);
+ unboxDouble(temp, fpr);
+ unlock(temp);
+ } else
+ unboxDouble(gpr, fpr);
+
+ m_gprs.release(gpr);
+ m_fprs.retain(fpr, virtualRegister, SpillOrderDouble);
+
+ info.fillDouble(fpr);
+ return fpr;
+ }
+
+ case DataFormatDouble: {
+ FPRReg fpr = info.fpr();
+ m_fprs.lock(fpr);
+ return fpr;
+ }
+ }
+
+ ASSERT_NOT_REACHED();
+ return InvalidFPRReg;
+}
+
+GPRReg SpeculativeJIT::fillJSValue(NodeIndex nodeIndex)
+{
+ Node& node = at(nodeIndex);
+ VirtualRegister virtualRegister = node.virtualRegister();
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+
+ switch (info.registerFormat()) {
+ case DataFormatNone: {
+ GPRReg gpr = allocate();
+
+ if (node.hasConstant()) {
+ if (isInt32Constant(nodeIndex)) {
+ info.fillJSValue(gpr, DataFormatJSInteger);
+ JSValue jsValue = jsNumber(valueOfInt32Constant(nodeIndex));
+ m_jit.move(MacroAssembler::ImmPtr(JSValue::encode(jsValue)), gpr);
+ } else if (isNumberConstant(nodeIndex)) {
+ info.fillJSValue(gpr, DataFormatJSDouble);
+ JSValue jsValue(JSValue::EncodeAsDouble, valueOfNumberConstant(nodeIndex));
+ m_jit.move(MacroAssembler::ImmPtr(JSValue::encode(jsValue)), gpr);
+ } else {
+ ASSERT(isJSConstant(nodeIndex));
+ JSValue jsValue = valueOfJSConstant(nodeIndex);
+ m_jit.move(MacroAssembler::ImmPtr(JSValue::encode(jsValue)), gpr);
+ info.fillJSValue(gpr, DataFormatJS);
+ }
+
+ m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
+ } else {
+ DataFormat spillFormat = info.spillFormat();
+ m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
+ if (spillFormat == DataFormatInteger) {
+ m_jit.load32(JITCompiler::addressFor(virtualRegister), gpr);
+ m_jit.orPtr(GPRInfo::tagTypeNumberRegister, gpr);
+ spillFormat = DataFormatJSInteger;
+ } else {
+ m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
+ if (spillFormat == DataFormatDouble) {
+ // Need to box the double, since we want a JSValue.
+ m_jit.subPtr(GPRInfo::tagTypeNumberRegister, gpr);
+ spillFormat = DataFormatJSDouble;
+ } else
+ ASSERT(spillFormat & DataFormatJS);
+ }
+ info.fillJSValue(gpr, spillFormat);
+ }
+ return gpr;
+ }
+
+ case DataFormatInteger: {
+ GPRReg gpr = info.gpr();
+ // If the register has already been locked we need to take a copy.
+ // If not, we'll zero extend in place, so mark on the info that this is now type DataFormatInteger, not DataFormatJSInteger.
+ if (m_gprs.isLocked(gpr)) {
+ GPRReg result = allocate();
+ m_jit.orPtr(GPRInfo::tagTypeNumberRegister, gpr, result);
+ return result;
+ }
+ m_gprs.lock(gpr);
+ m_jit.orPtr(GPRInfo::tagTypeNumberRegister, gpr);
+ info.fillJSValue(gpr, DataFormatJSInteger);
+ return gpr;
+ }
+
+ case DataFormatDouble: {
+ FPRReg fpr = info.fpr();
+ GPRReg gpr = boxDouble(fpr);
+
+ // Update all info
+ info.fillJSValue(gpr, DataFormatJSDouble);
+ m_fprs.release(fpr);
+ m_gprs.retain(gpr, virtualRegister, SpillOrderJS);
+
+ return gpr;
+ }
+
+ case DataFormatCell:
+ // No retag required on JSVALUE64!
+ case DataFormatJS:
+ case DataFormatJSInteger:
+ case DataFormatJSDouble:
+ case DataFormatJSCell:
+ case DataFormatJSBoolean: {
+ GPRReg gpr = info.gpr();
+ m_gprs.lock(gpr);
+ return gpr;
+ }
+
+ case DataFormatBoolean:
+ case DataFormatStorage:
+ // this type currently never occurs
+ ASSERT_NOT_REACHED();
+ }
+
+ ASSERT_NOT_REACHED();
+ return InvalidGPRReg;
+}
+
+void SpeculativeJIT::nonSpeculativeValueToNumber(Node& node)
+{
+ if (isKnownNumeric(node.child1())) {
+ JSValueOperand op1(this, node.child1());
+ GPRTemporary result(this, op1);
+ m_jit.move(op1.gpr(), result.gpr());
+ jsValueResult(result.gpr(), m_compileIndex);
+ return;
+ }
+
+ JSValueOperand op1(this, node.child1());
+ GPRTemporary result(this);
+
+ ASSERT(!isInt32Constant(node.child1()));
+ ASSERT(!isNumberConstant(node.child1()));
+
+ GPRReg jsValueGpr = op1.gpr();
+ GPRReg gpr = result.gpr();
+ op1.use();
+
+ JITCompiler::Jump isInteger = m_jit.branchPtr(MacroAssembler::AboveOrEqual, jsValueGpr, GPRInfo::tagTypeNumberRegister);
+ JITCompiler::Jump nonNumeric = m_jit.branchTestPtr(MacroAssembler::Zero, jsValueGpr, GPRInfo::tagTypeNumberRegister);
+
+ // First, if we get here we have a double encoded as a JSValue
+ m_jit.move(jsValueGpr, gpr);
+ JITCompiler::Jump hasUnboxedDouble = m_jit.jump();
+
+ // Next handle cells (& other JS immediates)
+ nonNumeric.link(&m_jit);
+ silentSpillAllRegisters(gpr);
+ callOperation(dfgConvertJSValueToNumber, FPRInfo::returnValueFPR, jsValueGpr);
+ boxDouble(FPRInfo::returnValueFPR, gpr);
+ silentFillAllRegisters(gpr);
+ JITCompiler::Jump hasCalledToNumber = m_jit.jump();
+
+ // Finally, handle integers.
+ isInteger.link(&m_jit);
+ m_jit.orPtr(GPRInfo::tagTypeNumberRegister, jsValueGpr, gpr);
+ hasUnboxedDouble.link(&m_jit);
+ hasCalledToNumber.link(&m_jit);
+
+ jsValueResult(result.gpr(), m_compileIndex, UseChildrenCalledExplicitly);
+}
+
+void SpeculativeJIT::nonSpeculativeValueToInt32(Node& node)
+{
+ ASSERT(!isInt32Constant(node.child1()));
+
+ if (isKnownInteger(node.child1())) {
+ IntegerOperand op1(this, node.child1());
+ GPRTemporary result(this, op1);
+ m_jit.move(op1.gpr(), result.gpr());
+ integerResult(result.gpr(), m_compileIndex);
+ return;
+ }
+
+ GenerationInfo& childInfo = m_generationInfo[at(node.child1()).virtualRegister()];
+ if (childInfo.isJSDouble()) {
+ DoubleOperand op1(this, node.child1());
+ GPRTemporary result(this);
+ FPRReg fpr = op1.fpr();
+ GPRReg gpr = result.gpr();
+ op1.use();
+ JITCompiler::Jump truncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateSuccessful);
+
+ silentSpillAllRegisters(gpr);
+ callOperation(toInt32, gpr, fpr);
+ silentFillAllRegisters(gpr);
+
+ truncatedToInteger.link(&m_jit);
+ integerResult(gpr, m_compileIndex, UseChildrenCalledExplicitly);
+ return;
+ }
+
+ JSValueOperand op1(this, node.child1());
+ GPRTemporary result(this, op1);
+ GPRReg jsValueGpr = op1.gpr();
+ GPRReg resultGPR = result.gpr();
+ op1.use();
+
+ JITCompiler::Jump isInteger = m_jit.branchPtr(MacroAssembler::AboveOrEqual, jsValueGpr, GPRInfo::tagTypeNumberRegister);
+
+ // First handle non-integers
+ silentSpillAllRegisters(resultGPR);
+ callOperation(dfgConvertJSValueToInt32, resultGPR, jsValueGpr);
+ silentFillAllRegisters(resultGPR);
+ JITCompiler::Jump hasCalledToInt32 = m_jit.jump();
+
+ // Then handle integers.
+ isInteger.link(&m_jit);
+ m_jit.zeroExtend32ToPtr(jsValueGpr, resultGPR);
+ hasCalledToInt32.link(&m_jit);
+ integerResult(resultGPR, m_compileIndex, UseChildrenCalledExplicitly);
+}
+
+void SpeculativeJIT::nonSpeculativeUInt32ToNumber(Node& node)
+{
+ IntegerOperand op1(this, node.child1());
+ FPRTemporary boxer(this);
+ GPRTemporary result(this, op1);
+
+ JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, op1.gpr(), TrustedImm32(0));
+
+ m_jit.convertInt32ToDouble(op1.gpr(), boxer.fpr());
+ m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), boxer.fpr());
+
+ boxDouble(boxer.fpr(), result.gpr());
+
+ JITCompiler::Jump done = m_jit.jump();
+
+ positive.link(&m_jit);
+
+ m_jit.orPtr(GPRInfo::tagTypeNumberRegister, op1.gpr(), result.gpr());
+
+ done.link(&m_jit);
+
+ jsValueResult(result.gpr(), m_compileIndex);
+}
+
+JITCompiler::Call SpeculativeJIT::cachedGetById(GPRReg baseGPR, GPRReg resultGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget)
+{
+ JITCompiler::DataLabelPtr structureToCompare;
+ JITCompiler::Jump structureCheck = m_jit.branchPtrWithPatch(JITCompiler::NotEqual, JITCompiler::Address(baseGPR, JSCell::structureOffset()), structureToCompare, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(-1)));
+
+ m_jit.loadPtr(JITCompiler::Address(baseGPR, JSObject::offsetOfPropertyStorage()), resultGPR);
+ JITCompiler::DataLabelCompact loadWithPatch = m_jit.loadPtrWithCompactAddressOffsetPatch(JITCompiler::Address(resultGPR, 0), resultGPR);
+
+ JITCompiler::Jump done = m_jit.jump();
+
+ structureCheck.link(&m_jit);
+
+ if (slowPathTarget.isSet())
+ slowPathTarget.link(&m_jit);
+
+ JITCompiler::Label slowCase = m_jit.label();
+
+ silentSpillAllRegisters(resultGPR);
+ JITCompiler::Call functionCall = callOperation(operationGetByIdOptimize, resultGPR, baseGPR, identifier(identifierNumber));
+ silentFillAllRegisters(resultGPR);
+
+ done.link(&m_jit);
+
+ JITCompiler::Label doneLabel = m_jit.label();
+
+ m_jit.addPropertyAccess(PropertyAccessRecord(structureToCompare, functionCall, structureCheck, loadWithPatch, slowCase, doneLabel, safeCast<int8_t>(baseGPR), safeCast<int8_t>(resultGPR), safeCast<int8_t>(scratchGPR)));
+
+ if (scratchGPR != resultGPR && scratchGPR != InvalidGPRReg)
+ unlock(scratchGPR);
+
+ return functionCall;
+}
+
+void SpeculativeJIT::cachedPutById(GPRReg baseGPR, GPRReg valueGPR, NodeIndex valueIndex, GPRReg scratchGPR, unsigned identifierNumber, PutKind putKind, JITCompiler::Jump slowPathTarget)
+{
+
+ JITCompiler::DataLabelPtr structureToCompare;
+ JITCompiler::Jump structureCheck = m_jit.branchPtrWithPatch(JITCompiler::NotEqual, JITCompiler::Address(baseGPR, JSCell::structureOffset()), structureToCompare, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(-1)));
+
+ writeBarrier(baseGPR, valueGPR, valueIndex, WriteBarrierForPropertyAccess, scratchGPR);
+
+ m_jit.loadPtr(JITCompiler::Address(baseGPR, JSObject::offsetOfPropertyStorage()), scratchGPR);
+ JITCompiler::DataLabel32 storeWithPatch = m_jit.storePtrWithAddressOffsetPatch(valueGPR, JITCompiler::Address(scratchGPR, 0));
+
+ JITCompiler::Jump done = m_jit.jump();
+
+ structureCheck.link(&m_jit);
+
+ if (slowPathTarget.isSet())
+ slowPathTarget.link(&m_jit);
+
+ JITCompiler::Label slowCase = m_jit.label();
+
+ silentSpillAllRegisters(InvalidGPRReg);
+ V_DFGOperation_EJCI optimizedCall;
+ if (m_jit.strictModeFor(at(m_compileIndex).codeOrigin)) {
+ if (putKind == Direct)
+ optimizedCall = operationPutByIdDirectStrictOptimize;
+ else
+ optimizedCall = operationPutByIdStrictOptimize;
+ } else {
+ if (putKind == Direct)
+ optimizedCall = operationPutByIdDirectNonStrictOptimize;
+ else
+ optimizedCall = operationPutByIdNonStrictOptimize;
+ }
+ JITCompiler::Call functionCall = callOperation(optimizedCall, valueGPR, baseGPR, identifier(identifierNumber));
+ silentFillAllRegisters(InvalidGPRReg);
+
+ done.link(&m_jit);
+ JITCompiler::Label doneLabel = m_jit.label();
+
+ m_jit.addPropertyAccess(PropertyAccessRecord(structureToCompare, functionCall, structureCheck, JITCompiler::DataLabelCompact(storeWithPatch.label()), slowCase, doneLabel, safeCast<int8_t>(baseGPR), safeCast<int8_t>(valueGPR), safeCast<int8_t>(scratchGPR)));
+}
+
+void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNull(NodeIndex operand, bool invert)
+{
+ JSValueOperand arg(this, operand);
+ GPRReg argGPR = arg.gpr();
+
+ GPRTemporary result(this, arg);
+ GPRReg resultGPR = result.gpr();
+
+ JITCompiler::Jump notCell;
+
+ if (!isKnownCell(operand))
+ notCell = m_jit.branchTestPtr(MacroAssembler::NonZero, argGPR, GPRInfo::tagMaskRegister);
+
+ m_jit.loadPtr(JITCompiler::Address(argGPR, JSCell::structureOffset()), resultGPR);
+ m_jit.test8(invert ? JITCompiler::Zero : JITCompiler::NonZero, JITCompiler::Address(resultGPR, Structure::typeInfoFlagsOffset()), JITCompiler::TrustedImm32(MasqueradesAsUndefined), resultGPR);
+
+ if (!isKnownCell(operand)) {
+ JITCompiler::Jump done = m_jit.jump();
+
+ notCell.link(&m_jit);
+
+ m_jit.move(argGPR, resultGPR);
+ m_jit.andPtr(JITCompiler::TrustedImm32(~TagBitUndefined), resultGPR);
+ m_jit.comparePtr(invert ? JITCompiler::NotEqual : JITCompiler::Equal, resultGPR, JITCompiler::TrustedImm32(ValueNull), resultGPR);
+
+ done.link(&m_jit);
+ }
+
+ m_jit.or32(TrustedImm32(ValueFalse), resultGPR);
+ jsValueResult(resultGPR, m_compileIndex, DataFormatJSBoolean);
+}
+
+void SpeculativeJIT::nonSpeculativePeepholeBranchNull(NodeIndex operand, NodeIndex branchNodeIndex, bool invert)
+{
+ Node& branchNode = at(branchNodeIndex);
+ BlockIndex taken = branchNode.takenBlockIndex();
+ BlockIndex notTaken = branchNode.notTakenBlockIndex();
+
+ if (taken == (m_block + 1)) {
+ invert = !invert;
+ BlockIndex tmp = taken;
+ taken = notTaken;
+ notTaken = tmp;
+ }
+
+ JSValueOperand arg(this, operand);
+ GPRReg argGPR = arg.gpr();
+
+ GPRTemporary result(this, arg);
+ GPRReg resultGPR = result.gpr();
+
+ JITCompiler::Jump notCell;
+
+ if (!isKnownCell(operand))
+ notCell = m_jit.branchTestPtr(MacroAssembler::NonZero, argGPR, GPRInfo::tagMaskRegister);
+
+ m_jit.loadPtr(JITCompiler::Address(argGPR, JSCell::structureOffset()), resultGPR);
+ addBranch(m_jit.branchTest8(invert ? JITCompiler::Zero : JITCompiler::NonZero, JITCompiler::Address(resultGPR, Structure::typeInfoFlagsOffset()), JITCompiler::TrustedImm32(MasqueradesAsUndefined)), taken);
+
+ if (!isKnownCell(operand)) {
+ addBranch(m_jit.jump(), notTaken);
+
+ notCell.link(&m_jit);
+
+ m_jit.move(argGPR, resultGPR);
+ m_jit.andPtr(JITCompiler::TrustedImm32(~TagBitUndefined), resultGPR);
+ addBranch(m_jit.branchPtr(invert ? JITCompiler::NotEqual : JITCompiler::Equal, resultGPR, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(ValueNull))), taken);
+ }
+
+ if (notTaken != (m_block + 1))
+ addBranch(m_jit.jump(), notTaken);
+}
+
+bool SpeculativeJIT::nonSpeculativeCompareNull(Node& node, NodeIndex operand, bool invert)
+{
+ NodeIndex branchNodeIndex = detectPeepHoleBranch();
+ if (branchNodeIndex != NoNode) {
+ ASSERT(node.adjustedRefCount() == 1);
+
+ nonSpeculativePeepholeBranchNull(operand, branchNodeIndex, invert);
+
+ use(node.child1());
+ use(node.child2());
+ m_compileIndex = branchNodeIndex;
+
+ return true;
+ }
+
+ nonSpeculativeNonPeepholeCompareNull(operand, invert);
+
+ return false;
+}
+
+void SpeculativeJIT::nonSpeculativePeepholeBranch(Node& node, NodeIndex branchNodeIndex, MacroAssembler::RelationalCondition cond, S_DFGOperation_EJJ helperFunction)
+{
+ Node& branchNode = at(branchNodeIndex);
+ BlockIndex taken = branchNode.takenBlockIndex();
+ BlockIndex notTaken = branchNode.notTakenBlockIndex();
+
+ JITCompiler::ResultCondition callResultCondition = JITCompiler::NonZero;
+
+ // The branch instruction will branch to the taken block.
+ // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
+ if (taken == (m_block + 1)) {
+ cond = JITCompiler::invert(cond);
+ callResultCondition = JITCompiler::Zero;
+ BlockIndex tmp = taken;
+ taken = notTaken;
+ notTaken = tmp;
+ }
+
+ JSValueOperand arg1(this, node.child1());
+ JSValueOperand arg2(this, node.child2());
+ GPRReg arg1GPR = arg1.gpr();
+ GPRReg arg2GPR = arg2.gpr();
+
+ JITCompiler::JumpList slowPath;
+
+ if (isKnownNotInteger(node.child1()) || isKnownNotInteger(node.child2())) {
+ GPRResult result(this);
+ GPRReg resultGPR = result.gpr();
+
+ arg1.use();
+ arg2.use();
+
+ flushRegisters();
+ callOperation(helperFunction, resultGPR, arg1GPR, arg2GPR);
+
+ addBranch(m_jit.branchTest32(callResultCondition, resultGPR), taken);
+ } else {
+ GPRTemporary result(this, arg2);
+ GPRReg resultGPR = result.gpr();
+
+ arg1.use();
+ arg2.use();
+
+ if (!isKnownInteger(node.child1()))
+ slowPath.append(m_jit.branchPtr(MacroAssembler::Below, arg1GPR, GPRInfo::tagTypeNumberRegister));
+ if (!isKnownInteger(node.child2()))
+ slowPath.append(m_jit.branchPtr(MacroAssembler::Below, arg2GPR, GPRInfo::tagTypeNumberRegister));
+
+ addBranch(m_jit.branch32(cond, arg1GPR, arg2GPR), taken);
+
+ if (!isKnownInteger(node.child1()) || !isKnownInteger(node.child2())) {
+ addBranch(m_jit.jump(), notTaken);
+
+ slowPath.link(&m_jit);
+
+ silentSpillAllRegisters(resultGPR);
+ callOperation(helperFunction, resultGPR, arg1GPR, arg2GPR);
+ silentFillAllRegisters(resultGPR);
+
+ addBranch(m_jit.branchTest32(callResultCondition, resultGPR), taken);
+ }
+ }
+
+ if (notTaken != (m_block + 1))
+ addBranch(m_jit.jump(), notTaken);
+}
+
+void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node& node, MacroAssembler::RelationalCondition cond, S_DFGOperation_EJJ helperFunction)
+{
+ JSValueOperand arg1(this, node.child1());
+ JSValueOperand arg2(this, node.child2());
+ GPRReg arg1GPR = arg1.gpr();
+ GPRReg arg2GPR = arg2.gpr();
+
+ JITCompiler::JumpList slowPath;
+
+ if (isKnownNotInteger(node.child1()) || isKnownNotInteger(node.child2())) {
+ GPRResult result(this);
+ GPRReg resultGPR = result.gpr();
+
+ arg1.use();
+ arg2.use();
+
+ flushRegisters();
+ callOperation(helperFunction, resultGPR, arg1GPR, arg2GPR);
+
+ m_jit.or32(TrustedImm32(ValueFalse), resultGPR);
+ jsValueResult(resultGPR, m_compileIndex, DataFormatJSBoolean, UseChildrenCalledExplicitly);
+ } else {
+ GPRTemporary result(this, arg2);
+ GPRReg resultGPR = result.gpr();
+
+ arg1.use();
+ arg2.use();
+
+ if (!isKnownInteger(node.child1()))
+ slowPath.append(m_jit.branchPtr(MacroAssembler::Below, arg1GPR, GPRInfo::tagTypeNumberRegister));
+ if (!isKnownInteger(node.child2()))
+ slowPath.append(m_jit.branchPtr(MacroAssembler::Below, arg2GPR, GPRInfo::tagTypeNumberRegister));
+
+ m_jit.compare32(cond, arg1GPR, arg2GPR, resultGPR);
+
+ if (!isKnownInteger(node.child1()) || !isKnownInteger(node.child2())) {
+ JITCompiler::Jump haveResult = m_jit.jump();
+
+ slowPath.link(&m_jit);
+
+ silentSpillAllRegisters(resultGPR);
+ callOperation(helperFunction, resultGPR, arg1GPR, arg2GPR);
+ silentFillAllRegisters(resultGPR);
+
+ m_jit.andPtr(TrustedImm32(1), resultGPR);
+
+ haveResult.link(&m_jit);
+ }
+
+ m_jit.or32(TrustedImm32(ValueFalse), resultGPR);
+
+ jsValueResult(resultGPR, m_compileIndex, DataFormatJSBoolean, UseChildrenCalledExplicitly);
+ }
+}
+
+void SpeculativeJIT::nonSpeculativePeepholeStrictEq(Node& node, NodeIndex branchNodeIndex, bool invert)
+{
+ Node& branchNode = at(branchNodeIndex);
+ BlockIndex taken = branchNode.takenBlockIndex();
+ BlockIndex notTaken = branchNode.notTakenBlockIndex();
+
+ // The branch instruction will branch to the taken block.
+ // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
+ if (taken == (m_block + 1)) {
+ invert = !invert;
+ BlockIndex tmp = taken;
+ taken = notTaken;
+ notTaken = tmp;
+ }
+
+ JSValueOperand arg1(this, node.child1());
+ JSValueOperand arg2(this, node.child2());
+ GPRReg arg1GPR = arg1.gpr();
+ GPRReg arg2GPR = arg2.gpr();
+
+ GPRTemporary result(this);
+ GPRReg resultGPR = result.gpr();
+
+ arg1.use();
+ arg2.use();
+
+ if (isKnownCell(node.child1()) && isKnownCell(node.child2())) {
+ // see if we get lucky: if the arguments are cells and they reference the same
+ // cell, then they must be strictly equal.
+ addBranch(m_jit.branchPtr(JITCompiler::Equal, arg1GPR, arg2GPR), invert ? notTaken : taken);
+
+ silentSpillAllRegisters(resultGPR);
+ callOperation(operationCompareStrictEqCell, resultGPR, arg1GPR, arg2GPR);
+ silentFillAllRegisters(resultGPR);
+
+ addBranch(m_jit.branchTest32(invert ? JITCompiler::Zero : JITCompiler::NonZero, resultGPR), taken);
+ } else {
+ m_jit.orPtr(arg1GPR, arg2GPR, resultGPR);
+
+ JITCompiler::Jump twoCellsCase = m_jit.branchTestPtr(JITCompiler::Zero, resultGPR, GPRInfo::tagMaskRegister);
+
+ JITCompiler::Jump numberCase = m_jit.branchTestPtr(JITCompiler::NonZero, resultGPR, GPRInfo::tagTypeNumberRegister);
+
+ addBranch(m_jit.branch32(invert ? JITCompiler::NotEqual : JITCompiler::Equal, arg1GPR, arg2GPR), taken);
+ addBranch(m_jit.jump(), notTaken);
+
+ twoCellsCase.link(&m_jit);
+ addBranch(m_jit.branchPtr(JITCompiler::Equal, arg1GPR, arg2GPR), invert ? notTaken : taken);
+
+ numberCase.link(&m_jit);
+
+ silentSpillAllRegisters(resultGPR);
+ callOperation(operationCompareStrictEq, resultGPR, arg1GPR, arg2GPR);
+ silentFillAllRegisters(resultGPR);
+
+ addBranch(m_jit.branchTest32(invert ? JITCompiler::Zero : JITCompiler::NonZero, resultGPR), taken);
+ }
+
+ if (notTaken != (m_block + 1))
+ addBranch(m_jit.jump(), notTaken);
+}
+
+void SpeculativeJIT::nonSpeculativeNonPeepholeStrictEq(Node& node, bool invert)
+{
+ JSValueOperand arg1(this, node.child1());
+ JSValueOperand arg2(this, node.child2());
+ GPRReg arg1GPR = arg1.gpr();
+ GPRReg arg2GPR = arg2.gpr();
+
+ GPRTemporary result(this);
+ GPRReg resultGPR = result.gpr();
+
+ arg1.use();
+ arg2.use();
+
+ if (isKnownCell(node.child1()) && isKnownCell(node.child2())) {
+ // see if we get lucky: if the arguments are cells and they reference the same
+ // cell, then they must be strictly equal.
+ JITCompiler::Jump notEqualCase = m_jit.branchPtr(JITCompiler::NotEqual, arg1GPR, arg2GPR);
+
+ m_jit.move(JITCompiler::TrustedImmPtr(JSValue::encode(jsBoolean(!invert))), resultGPR);
+
+ JITCompiler::Jump done = m_jit.jump();
+
+ notEqualCase.link(&m_jit);
+
+ silentSpillAllRegisters(resultGPR);
+ callOperation(operationCompareStrictEqCell, resultGPR, arg1GPR, arg2GPR);
+ silentFillAllRegisters(resultGPR);
+
+ m_jit.andPtr(JITCompiler::TrustedImm32(1), resultGPR);
+ m_jit.or32(JITCompiler::TrustedImm32(ValueFalse), resultGPR);
+
+ done.link(&m_jit);
+ } else {
+ m_jit.orPtr(arg1GPR, arg2GPR, resultGPR);
+
+ JITCompiler::Jump twoCellsCase = m_jit.branchTestPtr(JITCompiler::Zero, resultGPR, GPRInfo::tagMaskRegister);
+
+ JITCompiler::Jump numberCase = m_jit.branchTestPtr(JITCompiler::NonZero, resultGPR, GPRInfo::tagTypeNumberRegister);
+
+ m_jit.compare32(invert ? JITCompiler::NotEqual : JITCompiler::Equal, arg1GPR, arg2GPR, resultGPR);
+
+ JITCompiler::Jump done1 = m_jit.jump();
+
+ twoCellsCase.link(&m_jit);
+ JITCompiler::Jump notEqualCase = m_jit.branchPtr(JITCompiler::NotEqual, arg1GPR, arg2GPR);
+
+ m_jit.move(JITCompiler::TrustedImmPtr(JSValue::encode(jsBoolean(!invert))), resultGPR);
+
+ JITCompiler::Jump done2 = m_jit.jump();
+
+ numberCase.link(&m_jit);
+ notEqualCase.link(&m_jit);
+
+ silentSpillAllRegisters(resultGPR);
+ callOperation(operationCompareStrictEq, resultGPR, arg1GPR, arg2GPR);
+ silentFillAllRegisters(resultGPR);
+
+ m_jit.andPtr(JITCompiler::TrustedImm32(1), resultGPR);
+
+ done1.link(&m_jit);
+
+ m_jit.or32(JITCompiler::TrustedImm32(ValueFalse), resultGPR);
+
+ done2.link(&m_jit);
+ }
+
+ jsValueResult(resultGPR, m_compileIndex, DataFormatJSBoolean, UseChildrenCalledExplicitly);
+}
+
+void SpeculativeJIT::emitCall(Node& node)
+{
+ P_DFGOperation_E slowCallFunction;
+
+ if (node.op == Call)
+ slowCallFunction = operationLinkCall;
+ else {
+ ASSERT(node.op == Construct);
+ slowCallFunction = operationLinkConstruct;
+ }
+
+ // For constructors, the this argument is not passed but we have to make space
+ // for it.
+ int dummyThisArgument = node.op == Call ? 0 : 1;
+
+ CallLinkInfo::CallType callType = node.op == Call ? CallLinkInfo::Call : CallLinkInfo::Construct;
+
+ NodeIndex calleeNodeIndex = m_jit.graph().m_varArgChildren[node.firstChild()];
+ JSValueOperand callee(this, calleeNodeIndex);
+ GPRReg calleeGPR = callee.gpr();
+ use(calleeNodeIndex);
+
+ // The call instruction's first child is either the function (normal call) or the
+ // receiver (method call). subsequent children are the arguments.
+ int numPassedArgs = node.numChildren() - 1;
+
+ m_jit.store32(MacroAssembler::TrustedImm32(numPassedArgs + dummyThisArgument), callFramePayloadSlot(RegisterFile::ArgumentCount));
+ m_jit.storePtr(GPRInfo::callFrameRegister, callFrameSlot(RegisterFile::CallerFrame));
+ m_jit.storePtr(calleeGPR, callFrameSlot(RegisterFile::Callee));
+
+ for (int i = 0; i < numPassedArgs; i++) {
+ NodeIndex argNodeIndex = m_jit.graph().m_varArgChildren[node.firstChild() + 1 + i];
+ JSValueOperand arg(this, argNodeIndex);
+ GPRReg argGPR = arg.gpr();
+ use(argNodeIndex);
+
+ m_jit.storePtr(argGPR, argumentSlot(i + dummyThisArgument));
+ }
+
+ flushRegisters();
+
+ GPRResult result(this);
+ GPRReg resultGPR = result.gpr();
+
+ JITCompiler::DataLabelPtr targetToCheck;
+ JITCompiler::Jump slowPath;
+
+ slowPath = m_jit.branchPtrWithPatch(MacroAssembler::NotEqual, calleeGPR, targetToCheck, MacroAssembler::TrustedImmPtr(JSValue::encode(JSValue())));
+ m_jit.loadPtr(MacroAssembler::Address(calleeGPR, OBJECT_OFFSETOF(JSFunction, m_scopeChain)), resultGPR);
+ m_jit.storePtr(resultGPR, callFrameSlot(RegisterFile::ScopeChain));
+
+ m_jit.addPtr(Imm32(m_jit.codeBlock()->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister);
+
+ JITCompiler::Call fastCall = m_jit.nearCall();
+ m_jit.notifyCall(fastCall, at(m_compileIndex).codeOrigin);
+
+ JITCompiler::Jump done = m_jit.jump();
+
+ slowPath.link(&m_jit);
+
+ m_jit.addPtr(Imm32(m_jit.codeBlock()->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ JITCompiler::Call slowCall = m_jit.addFastExceptionCheck(m_jit.appendCall(slowCallFunction), at(m_compileIndex).codeOrigin);
+ m_jit.addPtr(Imm32(m_jit.codeBlock()->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister);
+ m_jit.notifyCall(m_jit.call(GPRInfo::returnValueGPR), at(m_compileIndex).codeOrigin);
+
+ done.link(&m_jit);
+
+ m_jit.move(GPRInfo::returnValueGPR, resultGPR);
+
+ jsValueResult(resultGPR, m_compileIndex, DataFormatJS, UseChildrenCalledExplicitly);
+
+ m_jit.addJSCall(fastCall, slowCall, targetToCheck, callType, at(m_compileIndex).codeOrigin);
+}
+
+template<bool strict>
+GPRReg SpeculativeJIT::fillSpeculateIntInternal(NodeIndex nodeIndex, DataFormat& returnFormat)
+{
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ fprintf(stderr, "SpecInt@%d ", nodeIndex);
+#endif
+ Node& node = at(nodeIndex);
+ VirtualRegister virtualRegister = node.virtualRegister();
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+
+ switch (info.registerFormat()) {
+ case DataFormatNone: {
+ if ((node.hasConstant() && !isInt32Constant(nodeIndex)) || info.spillFormat() == DataFormatDouble) {
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ returnFormat = DataFormatInteger;
+ return allocate();
+ }
+
+ GPRReg gpr = allocate();
+
+ if (node.hasConstant()) {
+ m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
+ ASSERT(isInt32Constant(nodeIndex));
+ m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(nodeIndex)), gpr);
+ info.fillInteger(gpr);
+ returnFormat = DataFormatInteger;
+ return gpr;
+ }
+
+ DataFormat spillFormat = info.spillFormat();
+
+ ASSERT((spillFormat & DataFormatJS) || spillFormat == DataFormatInteger);
+
+ m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
+
+ if (spillFormat == DataFormatJSInteger || spillFormat == DataFormatInteger) {
+ // If we know this was spilled as an integer we can fill without checking.
+ if (strict) {
+ m_jit.load32(JITCompiler::addressFor(virtualRegister), gpr);
+ info.fillInteger(gpr);
+ returnFormat = DataFormatInteger;
+ return gpr;
+ }
+ if (spillFormat == DataFormatInteger) {
+ m_jit.load32(JITCompiler::addressFor(virtualRegister), gpr);
+ m_jit.orPtr(GPRInfo::tagTypeNumberRegister, gpr);
+ } else
+ m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
+ info.fillJSValue(gpr, DataFormatJSInteger);
+ returnFormat = DataFormatJSInteger;
+ return gpr;
+ }
+ m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
+
+ // Fill as JSValue, and fall through.
+ info.fillJSValue(gpr, DataFormatJSInteger);
+ m_gprs.unlock(gpr);
+ }
+
+ case DataFormatJS: {
+ // Check the value is an integer.
+ GPRReg gpr = info.gpr();
+ m_gprs.lock(gpr);
+ speculationCheck(BadType, JSValueRegs(gpr), nodeIndex, m_jit.branchPtr(MacroAssembler::Below, gpr, GPRInfo::tagTypeNumberRegister));
+ info.fillJSValue(gpr, DataFormatJSInteger);
+ // If !strict we're done, return.
+ if (!strict) {
+ returnFormat = DataFormatJSInteger;
+ return gpr;
+ }
+ // else fall through & handle as DataFormatJSInteger.
+ m_gprs.unlock(gpr);
+ }
+
+ case DataFormatJSInteger: {
+ // In a strict fill we need to strip off the value tag.
+ if (strict) {
+ GPRReg gpr = info.gpr();
+ GPRReg result;
+ // If the register has already been locked we need to take a copy.
+ // If not, we'll zero extend in place, so mark on the info that this is now type DataFormatInteger, not DataFormatJSInteger.
+ if (m_gprs.isLocked(gpr))
+ result = allocate();
+ else {
+ m_gprs.lock(gpr);
+ info.fillInteger(gpr);
+ result = gpr;
+ }
+ m_jit.zeroExtend32ToPtr(gpr, result);
+ returnFormat = DataFormatInteger;
+ return result;
+ }
+
+ GPRReg gpr = info.gpr();
+ m_gprs.lock(gpr);
+ returnFormat = DataFormatJSInteger;
+ return gpr;
+ }
+
+ case DataFormatInteger: {
+ GPRReg gpr = info.gpr();
+ m_gprs.lock(gpr);
+ returnFormat = DataFormatInteger;
+ return gpr;
+ }
+
+ case DataFormatDouble:
+ case DataFormatJSDouble: {
+ if (node.hasConstant() && isInt32Constant(nodeIndex)) {
+ GPRReg gpr = allocate();
+ ASSERT(isInt32Constant(nodeIndex));
+ m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(nodeIndex)), gpr);
+ returnFormat = DataFormatInteger;
+ return gpr;
+ }
+ }
+ case DataFormatCell:
+ case DataFormatBoolean:
+ case DataFormatJSCell:
+ case DataFormatJSBoolean: {
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ returnFormat = DataFormatInteger;
+ return allocate();
+ }
+
+ case DataFormatStorage:
+ ASSERT_NOT_REACHED();
+ }
+
+ ASSERT_NOT_REACHED();
+ return InvalidGPRReg;
+}
+
+GPRReg SpeculativeJIT::fillSpeculateInt(NodeIndex nodeIndex, DataFormat& returnFormat)
+{
+ return fillSpeculateIntInternal<false>(nodeIndex, returnFormat);
+}
+
+GPRReg SpeculativeJIT::fillSpeculateIntStrict(NodeIndex nodeIndex)
+{
+ DataFormat mustBeDataFormatInteger;
+ GPRReg result = fillSpeculateIntInternal<true>(nodeIndex, mustBeDataFormatInteger);
+ ASSERT(mustBeDataFormatInteger == DataFormatInteger);
+ return result;
+}
+
+FPRReg SpeculativeJIT::fillSpeculateDouble(NodeIndex nodeIndex)
+{
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ fprintf(stderr, "SpecDouble@%d ", nodeIndex);
+#endif
+ Node& node = at(nodeIndex);
+ VirtualRegister virtualRegister = node.virtualRegister();
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+
+ if (info.registerFormat() == DataFormatNone) {
+ if (node.hasConstant()) {
+ GPRReg gpr = allocate();
+
+ if (isInt32Constant(nodeIndex)) {
+ FPRReg fpr = fprAllocate();
+ m_jit.move(MacroAssembler::ImmPtr(reinterpret_cast<void*>(reinterpretDoubleToIntptr(static_cast<double>(valueOfInt32Constant(nodeIndex))))), gpr);
+ m_jit.movePtrToDouble(gpr, fpr);
+ unlock(gpr);
+
+ m_fprs.retain(fpr, virtualRegister, SpillOrderDouble);
+ info.fillDouble(fpr);
+ return fpr;
+ }
+ if (isNumberConstant(nodeIndex)) {
+ FPRReg fpr = fprAllocate();
+ m_jit.move(MacroAssembler::ImmPtr(reinterpret_cast<void*>(reinterpretDoubleToIntptr(valueOfNumberConstant(nodeIndex)))), gpr);
+ m_jit.movePtrToDouble(gpr, fpr);
+ unlock(gpr);
+
+ m_fprs.retain(fpr, virtualRegister, SpillOrderDouble);
+ info.fillDouble(fpr);
+ return fpr;
+ }
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ return fprAllocate();
+ }
+
+ DataFormat spillFormat = info.spillFormat();
+ switch (spillFormat) {
+ case DataFormatDouble: {
+ FPRReg fpr = fprAllocate();
+ m_jit.loadDouble(JITCompiler::addressFor(virtualRegister), fpr);
+ m_fprs.retain(fpr, virtualRegister, SpillOrderDouble);
+ info.fillDouble(fpr);
+ return fpr;
+ }
+
+ case DataFormatInteger: {
+ GPRReg gpr = allocate();
+
+ m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
+ m_jit.load32(JITCompiler::addressFor(virtualRegister), gpr);
+ info.fillInteger(gpr);
+ unlock(gpr);
+ break;
+ }
+
+ default:
+ GPRReg gpr = allocate();
+
+ ASSERT(spillFormat & DataFormatJS);
+ m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
+ m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
+ info.fillJSValue(gpr, spillFormat);
+ unlock(gpr);
+ break;
+ }
+ }
+
+ switch (info.registerFormat()) {
+ case DataFormatNone: // Should have filled, above.
+ case DataFormatBoolean: // This type never occurs.
+ case DataFormatStorage:
+ ASSERT_NOT_REACHED();
+
+ case DataFormatCell:
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ return fprAllocate();
+
+ case DataFormatJSCell:
+ case DataFormatJS:
+ case DataFormatJSBoolean: {
+ GPRReg jsValueGpr = info.gpr();
+ m_gprs.lock(jsValueGpr);
+ FPRReg fpr = fprAllocate();
+ GPRReg tempGpr = allocate();
+
+ JITCompiler::Jump isInteger = m_jit.branchPtr(MacroAssembler::AboveOrEqual, jsValueGpr, GPRInfo::tagTypeNumberRegister);
+
+ speculationCheck(BadType, JSValueRegs(jsValueGpr), nodeIndex, m_jit.branchTestPtr(MacroAssembler::Zero, jsValueGpr, GPRInfo::tagTypeNumberRegister));
+
+ // First, if we get here we have a double encoded as a JSValue
+ m_jit.move(jsValueGpr, tempGpr);
+ unboxDouble(tempGpr, fpr);
+ JITCompiler::Jump hasUnboxedDouble = m_jit.jump();
+
+ // Finally, handle integers.
+ isInteger.link(&m_jit);
+ m_jit.convertInt32ToDouble(jsValueGpr, fpr);
+ hasUnboxedDouble.link(&m_jit);
+
+ m_gprs.release(jsValueGpr);
+ m_gprs.unlock(jsValueGpr);
+ m_gprs.unlock(tempGpr);
+ m_fprs.retain(fpr, virtualRegister, SpillOrderDouble);
+ info.fillDouble(fpr);
+ info.killSpilled();
+ return fpr;
+ }
+
+ case DataFormatJSInteger:
+ case DataFormatInteger: {
+ FPRReg fpr = fprAllocate();
+ GPRReg gpr = info.gpr();
+ m_gprs.lock(gpr);
+ m_jit.convertInt32ToDouble(gpr, fpr);
+ m_gprs.unlock(gpr);
+ return fpr;
+ }
+
+ // Unbox the double
+ case DataFormatJSDouble: {
+ GPRReg gpr = info.gpr();
+ FPRReg fpr = fprAllocate();
+ if (m_gprs.isLocked(gpr)) {
+ // Make sure we don't trample gpr if it is in use.
+ GPRReg temp = allocate();
+ m_jit.move(gpr, temp);
+ unboxDouble(temp, fpr);
+ unlock(temp);
+ } else
+ unboxDouble(gpr, fpr);
+
+ m_gprs.release(gpr);
+ m_fprs.retain(fpr, virtualRegister, SpillOrderDouble);
+
+ info.fillDouble(fpr);
+ return fpr;
+ }
+
+ case DataFormatDouble: {
+ FPRReg fpr = info.fpr();
+ m_fprs.lock(fpr);
+ return fpr;
+ }
+ }
+
+ ASSERT_NOT_REACHED();
+ return InvalidFPRReg;
+}
+
+GPRReg SpeculativeJIT::fillSpeculateCell(NodeIndex nodeIndex)
+{
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ fprintf(stderr, "SpecCell@%d ", nodeIndex);
+#endif
+ Node& node = at(nodeIndex);
+ VirtualRegister virtualRegister = node.virtualRegister();
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+
+ switch (info.registerFormat()) {
+ case DataFormatNone: {
+ if (info.spillFormat() == DataFormatInteger || info.spillFormat() == DataFormatDouble) {
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ return allocate();
+ }
+
+ GPRReg gpr = allocate();
+
+ if (node.hasConstant()) {
+ JSValue jsValue = valueOfJSConstant(nodeIndex);
+ if (jsValue.isCell()) {
+ m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
+ m_jit.move(MacroAssembler::TrustedImmPtr(jsValue.asCell()), gpr);
+ info.fillJSValue(gpr, DataFormatJSCell);
+ return gpr;
+ }
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ return gpr;
+ }
+ ASSERT(info.spillFormat() & DataFormatJS);
+ m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
+ m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
+
+ info.fillJSValue(gpr, DataFormatJS);
+ if (info.spillFormat() != DataFormatJSCell)
+ speculationCheck(BadType, JSValueRegs(gpr), nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, gpr, GPRInfo::tagMaskRegister));
+ info.fillJSValue(gpr, DataFormatJSCell);
+ return gpr;
+ }
+
+ case DataFormatCell:
+ case DataFormatJSCell: {
+ GPRReg gpr = info.gpr();
+ m_gprs.lock(gpr);
+ return gpr;
+ }
+
+ case DataFormatJS: {
+ GPRReg gpr = info.gpr();
+ m_gprs.lock(gpr);
+ speculationCheck(BadType, JSValueRegs(gpr), nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, gpr, GPRInfo::tagMaskRegister));
+ info.fillJSValue(gpr, DataFormatJSCell);
+ return gpr;
+ }
+
+ case DataFormatJSInteger:
+ case DataFormatInteger:
+ case DataFormatJSDouble:
+ case DataFormatDouble:
+ case DataFormatJSBoolean:
+ case DataFormatBoolean: {
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ return allocate();
+ }
+
+ case DataFormatStorage:
+ ASSERT_NOT_REACHED();
+ }
+
+ ASSERT_NOT_REACHED();
+ return InvalidGPRReg;
+}
+
+GPRReg SpeculativeJIT::fillSpeculateBoolean(NodeIndex nodeIndex)
+{
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ fprintf(stderr, "SpecBool@%d ", nodeIndex);
+#endif
+ Node& node = at(nodeIndex);
+ VirtualRegister virtualRegister = node.virtualRegister();
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+
+ switch (info.registerFormat()) {
+ case DataFormatNone: {
+ if (info.spillFormat() == DataFormatInteger || info.spillFormat() == DataFormatDouble) {
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ return allocate();
+ }
+
+ GPRReg gpr = allocate();
+
+ if (node.hasConstant()) {
+ JSValue jsValue = valueOfJSConstant(nodeIndex);
+ if (jsValue.isBoolean()) {
+ m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
+ m_jit.move(MacroAssembler::TrustedImmPtr(JSValue::encode(jsValue)), gpr);
+ info.fillJSValue(gpr, DataFormatJSBoolean);
+ return gpr;
+ }
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ return gpr;
+ }
+ ASSERT(info.spillFormat() & DataFormatJS);
+ m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
+ m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
+
+ info.fillJSValue(gpr, DataFormatJS);
+ if (info.spillFormat() != DataFormatJSBoolean) {
+ m_jit.xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), gpr);
+ speculationCheck(BadType, JSValueRegs(gpr), nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, gpr, TrustedImm32(static_cast<int32_t>(~1))), SpeculationRecovery(BooleanSpeculationCheck, gpr, InvalidGPRReg));
+ m_jit.xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), gpr);
+ }
+ info.fillJSValue(gpr, DataFormatJSBoolean);
+ return gpr;
+ }
+
+ case DataFormatBoolean:
+ case DataFormatJSBoolean: {
+ GPRReg gpr = info.gpr();
+ m_gprs.lock(gpr);
+ return gpr;
+ }
+
+ case DataFormatJS: {
+ GPRReg gpr = info.gpr();
+ m_gprs.lock(gpr);
+ m_jit.xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), gpr);
+ speculationCheck(BadType, JSValueRegs(gpr), nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, gpr, TrustedImm32(static_cast<int32_t>(~1))), SpeculationRecovery(BooleanSpeculationCheck, gpr, InvalidGPRReg));
+ m_jit.xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), gpr);
+ info.fillJSValue(gpr, DataFormatJSBoolean);
+ return gpr;
+ }
+
+ case DataFormatJSInteger:
+ case DataFormatInteger:
+ case DataFormatJSDouble:
+ case DataFormatDouble:
+ case DataFormatJSCell:
+ case DataFormatCell: {
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ return allocate();
+ }
+
+ case DataFormatStorage:
+ ASSERT_NOT_REACHED();
+ }
+
+ ASSERT_NOT_REACHED();
+ return InvalidGPRReg;
+}
+
+JITCompiler::Jump SpeculativeJIT::convertToDouble(GPRReg value, FPRReg result, GPRReg tmp)
+{
+ JITCompiler::Jump isInteger = m_jit.branchPtr(MacroAssembler::AboveOrEqual, value, GPRInfo::tagTypeNumberRegister);
+
+ JITCompiler::Jump notNumber = m_jit.branchTestPtr(MacroAssembler::Zero, value, GPRInfo::tagTypeNumberRegister);
+
+ m_jit.move(value, tmp);
+ unboxDouble(tmp, result);
+
+ JITCompiler::Jump done = m_jit.jump();
+
+ isInteger.link(&m_jit);
+
+ m_jit.convertInt32ToDouble(value, result);
+
+ done.link(&m_jit);
+
+ return notNumber;
+}
+
+void SpeculativeJIT::compileObjectEquality(Node& node, const ClassInfo* classInfo, PredictionChecker predictionCheck)
+{
+ SpeculateCellOperand op1(this, node.child1());
+ SpeculateCellOperand op2(this, node.child2());
+ GPRTemporary result(this, op1);
+
+ GPRReg op1GPR = op1.gpr();
+ GPRReg op2GPR = op2.gpr();
+ GPRReg resultGPR = result.gpr();
+
+ if (!predictionCheck(m_state.forNode(node.child1()).m_type))
+ speculationCheck(BadType, JSValueRegs(op1GPR), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(op1GPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(classInfo)));
+ if (!predictionCheck(m_state.forNode(node.child2()).m_type))
+ speculationCheck(BadType, JSValueRegs(op2GPR), node.child2(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(op2GPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(classInfo)));
+
+ MacroAssembler::Jump falseCase = m_jit.branchPtr(MacroAssembler::NotEqual, op1GPR, op2GPR);
+ m_jit.move(Imm32(ValueTrue), resultGPR);
+ MacroAssembler::Jump done = m_jit.jump();
+ falseCase.link(&m_jit);
+ m_jit.move(Imm32(ValueFalse), resultGPR);
+ done.link(&m_jit);
+
+ jsValueResult(resultGPR, m_compileIndex, DataFormatJSBoolean);
+}
+
+void SpeculativeJIT::compileIntegerCompare(Node& node, MacroAssembler::RelationalCondition condition)
+{
+ SpeculateIntegerOperand op1(this, node.child1());
+ SpeculateIntegerOperand op2(this, node.child2());
+ GPRTemporary result(this, op1, op2);
+
+ m_jit.compare32(condition, op1.gpr(), op2.gpr(), result.gpr());
+
+ // If we add a DataFormatBool, we should use it here.
+ m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
+ jsValueResult(result.gpr(), m_compileIndex, DataFormatJSBoolean);
+}
+
+void SpeculativeJIT::compileDoubleCompare(Node& node, MacroAssembler::DoubleCondition condition)
+{
+ SpeculateDoubleOperand op1(this, node.child1());
+ SpeculateDoubleOperand op2(this, node.child2());
+ GPRTemporary result(this);
+
+ m_jit.move(TrustedImm32(ValueTrue), result.gpr());
+ MacroAssembler::Jump trueCase = m_jit.branchDouble(condition, op1.fpr(), op2.fpr());
+ m_jit.xorPtr(Imm32(true), result.gpr());
+ trueCase.link(&m_jit);
+
+ jsValueResult(result.gpr(), m_compileIndex, DataFormatJSBoolean);
+}
+
+void SpeculativeJIT::compileValueAdd(Node& node)
+{
+ JSValueOperand op1(this, node.child1());
+ JSValueOperand op2(this, node.child2());
+
+ GPRReg op1GPR = op1.gpr();
+ GPRReg op2GPR = op2.gpr();
+
+ flushRegisters();
+
+ GPRResult result(this);
+ if (isKnownNotNumber(node.child1()) || isKnownNotNumber(node.child2()))
+ callOperation(operationValueAddNotNumber, result.gpr(), op1GPR, op2GPR);
+ else
+ callOperation(operationValueAdd, result.gpr(), op1GPR, op2GPR);
+
+ jsValueResult(result.gpr(), m_compileIndex);
+}
+
+void SpeculativeJIT::compileObjectOrOtherLogicalNot(NodeIndex nodeIndex, const ClassInfo* classInfo, bool needSpeculationCheck)
+{
+ JSValueOperand value(this, nodeIndex);
+ GPRTemporary result(this);
+ GPRReg valueGPR = value.gpr();
+ GPRReg resultGPR = result.gpr();
+
+ MacroAssembler::Jump notCell = m_jit.branchTestPtr(MacroAssembler::NonZero, valueGPR, GPRInfo::tagMaskRegister);
+ if (needSpeculationCheck)
+ speculationCheck(BadType, JSValueRegs(valueGPR), nodeIndex, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(valueGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(classInfo)));
+ m_jit.move(TrustedImm32(static_cast<int32_t>(ValueFalse)), resultGPR);
+ MacroAssembler::Jump done = m_jit.jump();
+
+ notCell.link(&m_jit);
+
+ if (needSpeculationCheck) {
+ m_jit.move(valueGPR, resultGPR);
+ m_jit.andPtr(MacroAssembler::TrustedImm32(~TagBitUndefined), resultGPR);
+ speculationCheck(BadType, JSValueRegs(valueGPR), nodeIndex, m_jit.branchPtr(MacroAssembler::NotEqual, resultGPR, MacroAssembler::TrustedImmPtr(reinterpret_cast<void*>(ValueNull))));
+ }
+ m_jit.move(TrustedImm32(static_cast<int32_t>(ValueTrue)), resultGPR);
+
+ done.link(&m_jit);
+
+ jsValueResult(resultGPR, m_compileIndex, DataFormatJSBoolean);
+}
+
+void SpeculativeJIT::compileLogicalNot(Node& node)
+{
+ if (isKnownBoolean(node.child1())) {
+ SpeculateBooleanOperand value(this, node.child1());
+ GPRTemporary result(this, value);
+
+ m_jit.move(value.gpr(), result.gpr());
+ m_jit.xorPtr(TrustedImm32(true), result.gpr());
+
+ jsValueResult(result.gpr(), m_compileIndex, DataFormatJSBoolean);
+ return;
+ }
+ if (at(node.child1()).shouldSpeculateFinalObjectOrOther()) {
+ compileObjectOrOtherLogicalNot(node.child1(), &JSFinalObject::s_info, !isFinalObjectOrOtherPrediction(m_state.forNode(node.child1()).m_type));
+ return;
+ }
+ if (at(node.child1()).shouldSpeculateArrayOrOther()) {
+ compileObjectOrOtherLogicalNot(node.child1(), &JSArray::s_info, !isArrayOrOtherPrediction(m_state.forNode(node.child1()).m_type));
+ return;
+ }
+ if (at(node.child1()).shouldSpeculateInteger()) {
+ SpeculateIntegerOperand value(this, node.child1());
+ GPRTemporary result(this, value);
+ m_jit.compare32(MacroAssembler::Equal, value.gpr(), MacroAssembler::TrustedImm32(0), result.gpr());
+ m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
+ jsValueResult(result.gpr(), m_compileIndex, DataFormatJSBoolean);
+ return;
+ }
+ if (at(node.child1()).shouldSpeculateNumber()) {
+ SpeculateDoubleOperand value(this, node.child1());
+ FPRTemporary scratch(this);
+ GPRTemporary result(this);
+ m_jit.move(TrustedImm32(ValueFalse), result.gpr());
+ MacroAssembler::Jump nonZero = m_jit.branchDoubleNonZero(value.fpr(), scratch.fpr());
+ m_jit.xor32(Imm32(true), result.gpr());
+ nonZero.link(&m_jit);
+ jsValueResult(result.gpr(), m_compileIndex, DataFormatJSBoolean);
+ return;
+ }
+
+ PredictedType prediction = m_jit.getPrediction(node.child1());
+ if (isBooleanPrediction(prediction) || !prediction) {
+ JSValueOperand value(this, node.child1());
+ GPRTemporary result(this); // FIXME: We could reuse, but on speculation fail would need recovery to restore tag (akin to add).
+
+ m_jit.move(value.gpr(), result.gpr());
+ m_jit.xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), result.gpr());
+ speculationCheck(BadType, JSValueRegs(value.gpr()), node.child1(), m_jit.branchTestPtr(JITCompiler::NonZero, result.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
+ m_jit.xorPtr(TrustedImm32(static_cast<int32_t>(ValueTrue)), result.gpr());
+
+ // If we add a DataFormatBool, we should use it here.
+ jsValueResult(result.gpr(), m_compileIndex, DataFormatJSBoolean);
+ return;
+ }
+
+ JSValueOperand arg1(this, node.child1());
+ GPRTemporary result(this);
+
+ GPRReg arg1GPR = arg1.gpr();
+ GPRReg resultGPR = result.gpr();
+
+ arg1.use();
+
+ m_jit.move(arg1GPR, resultGPR);
+ m_jit.xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), resultGPR);
+ JITCompiler::Jump fastCase = m_jit.branchTestPtr(JITCompiler::Zero, resultGPR, TrustedImm32(static_cast<int32_t>(~1)));
+
+ silentSpillAllRegisters(resultGPR);
+ callOperation(dfgConvertJSValueToBoolean, resultGPR, arg1GPR);
+ silentFillAllRegisters(resultGPR);
+
+ fastCase.link(&m_jit);
+
+ m_jit.xorPtr(TrustedImm32(static_cast<int32_t>(ValueTrue)), resultGPR);
+ jsValueResult(resultGPR, m_compileIndex, DataFormatJSBoolean, UseChildrenCalledExplicitly);
+}
+
+void SpeculativeJIT::emitObjectOrOtherBranch(NodeIndex nodeIndex, BlockIndex taken, BlockIndex notTaken, const ClassInfo* classInfo, bool needSpeculationCheck)
+{
+ JSValueOperand value(this, nodeIndex);
+ GPRTemporary scratch(this);
+ GPRReg valueGPR = value.gpr();
+ GPRReg scratchGPR = scratch.gpr();
+
+ MacroAssembler::Jump notCell = m_jit.branchTestPtr(MacroAssembler::NonZero, valueGPR, GPRInfo::tagMaskRegister);
+ if (needSpeculationCheck)
+ speculationCheck(BadType, JSValueRegs(valueGPR), nodeIndex, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(valueGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(classInfo)));
+ addBranch(m_jit.jump(), taken);
+
+ notCell.link(&m_jit);
+
+ if (needSpeculationCheck) {
+ m_jit.move(valueGPR, scratchGPR);
+ m_jit.andPtr(MacroAssembler::TrustedImm32(~TagBitUndefined), scratchGPR);
+ speculationCheck(BadType, JSValueRegs(valueGPR), nodeIndex, m_jit.branchPtr(MacroAssembler::NotEqual, scratchGPR, MacroAssembler::TrustedImmPtr(reinterpret_cast<void*>(ValueNull))));
+ }
+ if (notTaken != (m_block + 1))
+ addBranch(m_jit.jump(), notTaken);
+
+ noResult(m_compileIndex);
+}
+
+void SpeculativeJIT::emitBranch(Node& node)
+{
+ JSValueOperand value(this, node.child1());
+ GPRReg valueGPR = value.gpr();
+
+ BlockIndex taken = node.takenBlockIndex();
+ BlockIndex notTaken = node.notTakenBlockIndex();
+
+ if (isKnownBoolean(node.child1())) {
+ MacroAssembler::ResultCondition condition = MacroAssembler::NonZero;
+
+ if (taken == (m_block + 1)) {
+ condition = MacroAssembler::Zero;
+ BlockIndex tmp = taken;
+ taken = notTaken;
+ notTaken = tmp;
+ }
+
+ addBranch(m_jit.branchTest32(condition, valueGPR, TrustedImm32(true)), taken);
+ if (notTaken != (m_block + 1))
+ addBranch(m_jit.jump(), notTaken);
+
+ noResult(m_compileIndex);
+ } else if (at(node.child1()).shouldSpeculateFinalObjectOrOther()) {
+ emitObjectOrOtherBranch(node.child1(), taken, notTaken, &JSFinalObject::s_info, !isFinalObjectOrOtherPrediction(m_state.forNode(node.child1()).m_type));
+ } else if (at(node.child1()).shouldSpeculateArrayOrOther()) {
+ emitObjectOrOtherBranch(node.child1(), taken, notTaken, &JSArray::s_info, !isArrayOrOtherPrediction(m_state.forNode(node.child1()).m_type));
+ } else if (at(node.child1()).shouldSpeculateNumber()) {
+ if (at(node.child1()).shouldSpeculateInteger()) {
+ bool invert = false;
+
+ if (taken == (m_block + 1)) {
+ invert = true;
+ BlockIndex tmp = taken;
+ taken = notTaken;
+ notTaken = tmp;
+ }
+
+ SpeculateIntegerOperand value(this, node.child1());
+ addBranch(m_jit.branchTest32(invert ? MacroAssembler::Zero : MacroAssembler::NonZero, value.gpr()), taken);
+ } else {
+ SpeculateDoubleOperand value(this, node.child1());
+ FPRTemporary scratch(this);
+ addBranch(m_jit.branchDoubleNonZero(value.fpr(), scratch.fpr()), taken);
+ }
+
+ if (notTaken != (m_block + 1))
+ addBranch(m_jit.jump(), notTaken);
+
+ noResult(m_compileIndex);
+ } else {
+ GPRTemporary result(this);
+ GPRReg resultGPR = result.gpr();
+
+ bool predictBoolean = isBooleanPrediction(m_jit.getPrediction(node.child1()));
+
+ if (predictBoolean) {
+ addBranch(m_jit.branchPtr(MacroAssembler::Equal, valueGPR, MacroAssembler::ImmPtr(JSValue::encode(jsBoolean(false)))), notTaken);
+ addBranch(m_jit.branchPtr(MacroAssembler::Equal, valueGPR, MacroAssembler::ImmPtr(JSValue::encode(jsBoolean(true)))), taken);
+
+ speculationCheck(BadType, JSValueRegs(valueGPR), node.child1(), m_jit.jump());
+ value.use();
+ } else {
+ addBranch(m_jit.branchPtr(MacroAssembler::Equal, valueGPR, MacroAssembler::ImmPtr(JSValue::encode(jsNumber(0)))), notTaken);
+ addBranch(m_jit.branchPtr(MacroAssembler::AboveOrEqual, valueGPR, GPRInfo::tagTypeNumberRegister), taken);
+
+ if (!predictBoolean) {
+ addBranch(m_jit.branchPtr(MacroAssembler::Equal, valueGPR, MacroAssembler::ImmPtr(JSValue::encode(jsBoolean(false)))), notTaken);
+ addBranch(m_jit.branchPtr(MacroAssembler::Equal, valueGPR, MacroAssembler::ImmPtr(JSValue::encode(jsBoolean(true)))), taken);
+ }
+
+ value.use();
+
+ silentSpillAllRegisters(resultGPR);
+ callOperation(dfgConvertJSValueToBoolean, resultGPR, valueGPR);
+ silentFillAllRegisters(resultGPR);
+
+ addBranch(m_jit.branchTest32(MacroAssembler::NonZero, resultGPR), taken);
+ if (notTaken != (m_block + 1))
+ addBranch(m_jit.jump(), notTaken);
+ }
+
+ noResult(m_compileIndex, UseChildrenCalledExplicitly);
+ }
+}
+
+void SpeculativeJIT::compile(Node& node)
+{
+ NodeType op = node.op;
+
+ switch (op) {
+ case JSConstant:
+ initConstantInfo(m_compileIndex);
+ break;
+
+ case WeakJSConstant:
+ m_jit.addWeakReference(node.weakConstant());
+ initConstantInfo(m_compileIndex);
+ break;
+
+ case GetLocal: {
+ PredictedType prediction = node.variableAccessData()->prediction();
+ AbstractValue& value = block()->valuesAtHead.operand(node.local());
+
+ // If we have no prediction for this local, then don't attempt to compile.
+ if (prediction == PredictNone || value.isClear()) {
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ break;
+ }
+
+ if (node.variableAccessData()->shouldUseDoubleFormat()) {
+ FPRTemporary result(this);
+ m_jit.loadDouble(JITCompiler::addressFor(node.local()), result.fpr());
+ VirtualRegister virtualRegister = node.virtualRegister();
+ m_fprs.retain(result.fpr(), virtualRegister, SpillOrderDouble);
+ m_generationInfo[virtualRegister].initDouble(m_compileIndex, node.refCount(), result.fpr());
+ break;
+ }
+
+ GPRTemporary result(this);
+ if (isInt32Prediction(value.m_type)) {
+ m_jit.load32(JITCompiler::payloadFor(node.local()), result.gpr());
+
+ // Like integerResult, but don't useChildren - our children are phi nodes,
+ // and don't represent values within this dataflow with virtual registers.
+ VirtualRegister virtualRegister = node.virtualRegister();
+ m_gprs.retain(result.gpr(), virtualRegister, SpillOrderInteger);
+ m_generationInfo[virtualRegister].initInteger(m_compileIndex, node.refCount(), result.gpr());
+ break;
+ }
+
+ m_jit.loadPtr(JITCompiler::addressFor(node.local()), result.gpr());
+
+ // Like jsValueResult, but don't useChildren - our children are phi nodes,
+ // and don't represent values within this dataflow with virtual registers.
+ VirtualRegister virtualRegister = node.virtualRegister();
+ m_gprs.retain(result.gpr(), virtualRegister, SpillOrderJS);
+
+ DataFormat format;
+ if (isCellPrediction(value.m_type))
+ format = DataFormatJSCell;
+ else if (isBooleanPrediction(value.m_type))
+ format = DataFormatJSBoolean;
+ else
+ format = DataFormatJS;
+
+ m_generationInfo[virtualRegister].initJSValue(m_compileIndex, node.refCount(), result.gpr(), format);
+ break;
+ }
+
+ case SetLocal: {
+ // SetLocal doubles as a hint as to where a node will be stored and
+ // as a speculation point. So before we speculate make sure that we
+ // know where the child of this node needs to go in the virtual
+ // register file.
+ compileMovHint(node);
+
+ // As far as OSR is concerned, we're on the bytecode index corresponding
+ // to the *next* instruction, since we've already "executed" the
+ // SetLocal and whatever other DFG Nodes are associated with the same
+ // bytecode index as the SetLocal.
+ ASSERT(m_codeOriginForOSR == node.codeOrigin);
+ Node& nextNode = at(m_compileIndex + 1);
+
+ // Oddly, it's possible for the bytecode index for the next node to be
+ // equal to ours. This will happen for op_post_inc. And, even more oddly,
+ // this is just fine. Ordinarily, this wouldn't be fine, since if the
+ // next node failed OSR then we'd be OSR-ing with this SetLocal's local
+ // variable already set even though from the standpoint of the old JIT,
+ // this SetLocal should not have executed. But for op_post_inc, it's just
+ // fine, because this SetLocal's local (i.e. the LHS in a x = y++
+ // statement) would be dead anyway - so the fact that DFG would have
+ // already made the assignment, and baked it into the register file during
+ // OSR exit, would not be visible to the old JIT in any way.
+ m_codeOriginForOSR = nextNode.codeOrigin;
+
+ if (node.variableAccessData()->shouldUseDoubleFormat()) {
+ SpeculateDoubleOperand value(this, node.child1());
+ m_jit.storeDouble(value.fpr(), JITCompiler::addressFor(node.local()));
+ noResult(m_compileIndex);
+ // Indicate that it's no longer necessary to retrieve the value of
+ // this bytecode variable from registers or other locations in the register file,
+ // but that it is stored as a double.
+ valueSourceReferenceForOperand(node.local()) = ValueSource(DoubleInRegisterFile);
+ } else {
+ PredictedType predictedType = node.variableAccessData()->prediction();
+ if (isInt32Prediction(predictedType)) {
+ SpeculateIntegerOperand value(this, node.child1());
+ m_jit.store32(value.gpr(), JITCompiler::payloadFor(node.local()));
+ noResult(m_compileIndex);
+ } else if (isArrayPrediction(predictedType)) {
+ SpeculateCellOperand cell(this, node.child1());
+ GPRReg cellGPR = cell.gpr();
+ if (!isArrayPrediction(m_state.forNode(node.child1()).m_type))
+ speculationCheck(BadType, JSValueRegs(cellGPR), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(cellGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info)));
+ m_jit.storePtr(cellGPR, JITCompiler::addressFor(node.local()));
+ noResult(m_compileIndex);
+ } else if (isByteArrayPrediction(predictedType)) {
+ SpeculateCellOperand cell(this, node.child1());
+ GPRReg cellGPR = cell.gpr();
+ if (!isByteArrayPrediction(m_state.forNode(node.child1()).m_type))
+ speculationCheck(BadType, JSValueRegs(cellGPR), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(cellGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSByteArray::s_info)));
+ m_jit.storePtr(cellGPR, JITCompiler::addressFor(node.local()));
+ noResult(m_compileIndex);
+ } else if (isBooleanPrediction(predictedType)) {
+ SpeculateBooleanOperand boolean(this, node.child1());
+ m_jit.storePtr(boolean.gpr(), JITCompiler::addressFor(node.local()));
+ noResult(m_compileIndex);
+ } else {
+ JSValueOperand value(this, node.child1());
+ m_jit.storePtr(value.gpr(), JITCompiler::addressFor(node.local()));
+ noResult(m_compileIndex);
+ }
+
+ // Indicate that it's no longer necessary to retrieve the value of
+ // this bytecode variable from registers or other locations in the register file.
+ valueSourceReferenceForOperand(node.local()) = ValueSource::forPrediction(predictedType);
+ }
+ break;
+ }
+
+ case SetArgument:
+ // This is a no-op; it just marks the fact that the argument is being used.
+ // But it may be profitable to use this as a hook to run speculation checks
+ // on arguments, thereby allowing us to trivially eliminate such checks if
+ // the argument is not used.
+ break;
+
+ case BitAnd:
+ case BitOr:
+ case BitXor:
+ if (isInt32Constant(node.child1())) {
+ SpeculateIntegerOperand op2(this, node.child2());
+ GPRTemporary result(this, op2);
+
+ bitOp(op, valueOfInt32Constant(node.child1()), op2.gpr(), result.gpr());
+
+ integerResult(result.gpr(), m_compileIndex);
+ } else if (isInt32Constant(node.child2())) {
+ SpeculateIntegerOperand op1(this, node.child1());
+ GPRTemporary result(this, op1);
+
+ bitOp(op, valueOfInt32Constant(node.child2()), op1.gpr(), result.gpr());
+
+ integerResult(result.gpr(), m_compileIndex);
+ } else {
+ SpeculateIntegerOperand op1(this, node.child1());
+ SpeculateIntegerOperand op2(this, node.child2());
+ GPRTemporary result(this, op1, op2);
+
+ GPRReg reg1 = op1.gpr();
+ GPRReg reg2 = op2.gpr();
+ bitOp(op, reg1, reg2, result.gpr());
+
+ integerResult(result.gpr(), m_compileIndex);
+ }
+ break;
+
+ case BitRShift:
+ case BitLShift:
+ case BitURShift:
+ if (isInt32Constant(node.child2())) {
+ SpeculateIntegerOperand op1(this, node.child1());
+ GPRTemporary result(this, op1);
+
+ shiftOp(op, op1.gpr(), valueOfInt32Constant(node.child2()) & 0x1f, result.gpr());
+
+ integerResult(result.gpr(), m_compileIndex);
+ } else {
+ // Do not allow shift amount to be used as the result, MacroAssembler does not permit this.
+ SpeculateIntegerOperand op1(this, node.child1());
+ SpeculateIntegerOperand op2(this, node.child2());
+ GPRTemporary result(this, op1);
+
+ GPRReg reg1 = op1.gpr();
+ GPRReg reg2 = op2.gpr();
+ shiftOp(op, reg1, reg2, result.gpr());
+
+ integerResult(result.gpr(), m_compileIndex);
+ }
+ break;
+
+ case UInt32ToNumber: {
+ compileUInt32ToNumber(node);
+ break;
+ }
+
+ case ValueToInt32: {
+ compileValueToInt32(node);
+ break;
+ }
+
+ case ValueToNumber: {
+ if (at(node.child1()).shouldNotSpeculateInteger()) {
+ SpeculateDoubleOperand op1(this, node.child1());
+ FPRTemporary result(this, op1);
+ m_jit.moveDouble(op1.fpr(), result.fpr());
+ doubleResult(result.fpr(), m_compileIndex);
+ break;
+ }
+
+ SpeculateIntegerOperand op1(this, node.child1());
+ GPRTemporary result(this, op1);
+ m_jit.move(op1.gpr(), result.gpr());
+ integerResult(result.gpr(), m_compileIndex, op1.format());
+ break;
+ }
+
+ case ValueToDouble: {
+ SpeculateDoubleOperand op1(this, node.child1());
+ FPRTemporary result(this, op1);
+ m_jit.moveDouble(op1.fpr(), result.fpr());
+ doubleResult(result.fpr(), m_compileIndex);
+ break;
+ }
+
+ case ValueAdd:
+ case ArithAdd: {
+ if (Node::shouldSpeculateInteger(at(node.child1()), at(node.child2())) && node.canSpeculateInteger()) {
+ if (isInt32Constant(node.child1())) {
+ int32_t imm1 = valueOfInt32Constant(node.child1());
+ SpeculateIntegerOperand op2(this, node.child2());
+ GPRTemporary result(this);
+
+ if (nodeCanTruncateInteger(node.arithNodeFlags())) {
+ m_jit.move(op2.gpr(), result.gpr());
+ m_jit.add32(Imm32(imm1), result.gpr());
+ } else
+ speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchAdd32(MacroAssembler::Overflow, op2.gpr(), Imm32(imm1), result.gpr()));
+
+ integerResult(result.gpr(), m_compileIndex);
+ break;
+ }
+
+ if (isInt32Constant(node.child2())) {
+ SpeculateIntegerOperand op1(this, node.child1());
+ int32_t imm2 = valueOfInt32Constant(node.child2());
+ GPRTemporary result(this);
+
+ if (nodeCanTruncateInteger(node.arithNodeFlags())) {
+ m_jit.move(op1.gpr(), result.gpr());
+ m_jit.add32(Imm32(imm2), result.gpr());
+ } else
+ speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchAdd32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
+
+ integerResult(result.gpr(), m_compileIndex);
+ break;
+ }
+
+ SpeculateIntegerOperand op1(this, node.child1());
+ SpeculateIntegerOperand op2(this, node.child2());
+ GPRTemporary result(this, op1, op2);
+
+ GPRReg gpr1 = op1.gpr();
+ GPRReg gpr2 = op2.gpr();
+ GPRReg gprResult = result.gpr();
+
+ if (nodeCanTruncateInteger(node.arithNodeFlags())) {
+ if (gpr1 == gprResult)
+ m_jit.add32(gpr2, gprResult);
+ else {
+ m_jit.move(gpr2, gprResult);
+ m_jit.add32(gpr1, gprResult);
+ }
+ } else {
+ MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
+
+ if (gpr1 == gprResult)
+ speculationCheck(Overflow, JSValueRegs(), NoNode, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
+ else if (gpr2 == gprResult)
+ speculationCheck(Overflow, JSValueRegs(), NoNode, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
+ else
+ speculationCheck(Overflow, JSValueRegs(), NoNode, check);
+ }
+
+ integerResult(gprResult, m_compileIndex);
+ break;
+ }
+
+ if (Node::shouldSpeculateNumber(at(node.child1()), at(node.child2()))) {
+ SpeculateDoubleOperand op1(this, node.child1());
+ SpeculateDoubleOperand op2(this, node.child2());
+ FPRTemporary result(this, op1, op2);
+
+ FPRReg reg1 = op1.fpr();
+ FPRReg reg2 = op2.fpr();
+ m_jit.addDouble(reg1, reg2, result.fpr());
+
+ doubleResult(result.fpr(), m_compileIndex);
+ break;
+ }
+
+ ASSERT(op == ValueAdd);
+ compileValueAdd(node);
+ break;
+ }
+
+ case ArithSub: {
+ if (Node::shouldSpeculateInteger(at(node.child1()), at(node.child2())) && node.canSpeculateInteger()) {
+ if (isInt32Constant(node.child2())) {
+ SpeculateIntegerOperand op1(this, node.child1());
+ int32_t imm2 = valueOfInt32Constant(node.child2());
+ GPRTemporary result(this);
+
+ if (nodeCanTruncateInteger(node.arithNodeFlags())) {
+ m_jit.move(op1.gpr(), result.gpr());
+ m_jit.sub32(Imm32(imm2), result.gpr());
+ } else
+ speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
+
+ integerResult(result.gpr(), m_compileIndex);
+ break;
+ }
+
+ SpeculateIntegerOperand op1(this, node.child1());
+ SpeculateIntegerOperand op2(this, node.child2());
+ GPRTemporary result(this);
+
+ if (nodeCanTruncateInteger(node.arithNodeFlags())) {
+ m_jit.move(op1.gpr(), result.gpr());
+ m_jit.sub32(op2.gpr(), result.gpr());
+ } else
+ speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), op2.gpr(), result.gpr()));
+
+ integerResult(result.gpr(), m_compileIndex);
+ break;
+ }
+
+ SpeculateDoubleOperand op1(this, node.child1());
+ SpeculateDoubleOperand op2(this, node.child2());
+ FPRTemporary result(this, op1);
+
+ FPRReg reg1 = op1.fpr();
+ FPRReg reg2 = op2.fpr();
+ m_jit.subDouble(reg1, reg2, result.fpr());
+
+ doubleResult(result.fpr(), m_compileIndex);
+ break;
+ }
+
+ case ArithMul: {
+ compileArithMul(node);
+ break;
+ }
+
+ case ArithDiv: {
+ if (Node::shouldSpeculateInteger(at(node.child1()), at(node.child2())) && node.canSpeculateInteger()) {
+ SpeculateIntegerOperand op1(this, node.child1());
+ SpeculateIntegerOperand op2(this, node.child2());
+ GPRTemporary eax(this, X86Registers::eax);
+ GPRTemporary edx(this, X86Registers::edx);
+ GPRReg op1GPR = op1.gpr();
+ GPRReg op2GPR = op2.gpr();
+
+ speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
+
+ // If the user cares about negative zero, then speculate that we're not about
+ // to produce negative zero.
+ if (!nodeCanIgnoreNegativeZero(node.arithNodeFlags())) {
+ MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
+ speculationCheck(NegativeZero, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
+ numeratorNonZero.link(&m_jit);
+ }
+
+ GPRReg temp2 = InvalidGPRReg;
+ if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
+ temp2 = allocate();
+ m_jit.move(op2GPR, temp2);
+ op2GPR = temp2;
+ }
+
+ m_jit.move(op1GPR, eax.gpr());
+ m_jit.assembler().cdq();
+ m_jit.assembler().idivl_r(op2GPR);
+
+ if (temp2 != InvalidGPRReg)
+ unlock(temp2);
+
+ // Check that there was no remainder. If there had been, then we'd be obligated to
+ // produce a double result instead.
+ speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchTest32(JITCompiler::NonZero, edx.gpr()));
+
+ integerResult(eax.gpr(), m_compileIndex);
+ break;
+ }
+
+ SpeculateDoubleOperand op1(this, node.child1());
+ SpeculateDoubleOperand op2(this, node.child2());
+ FPRTemporary result(this, op1);
+
+ FPRReg reg1 = op1.fpr();
+ FPRReg reg2 = op2.fpr();
+ m_jit.divDouble(reg1, reg2, result.fpr());
+
+ doubleResult(result.fpr(), m_compileIndex);
+ break;
+ }
+
+ case ArithMod: {
+ compileArithMod(node);
+ break;
+ }
+
+ case ArithAbs: {
+ if (at(node.child1()).shouldSpeculateInteger() && node.canSpeculateInteger()) {
+ SpeculateIntegerOperand op1(this, node.child1());
+ GPRTemporary result(this, op1);
+ GPRTemporary scratch(this);
+
+ m_jit.zeroExtend32ToPtr(op1.gpr(), result.gpr());
+ m_jit.rshift32(result.gpr(), MacroAssembler::TrustedImm32(31), scratch.gpr());
+ m_jit.add32(scratch.gpr(), result.gpr());
+ m_jit.xor32(scratch.gpr(), result.gpr());
+ speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::Equal, result.gpr(), MacroAssembler::TrustedImm32(1 << 31)));
+ integerResult(result.gpr(), m_compileIndex);
+ break;
+ }
+
+ SpeculateDoubleOperand op1(this, node.child1());
+ FPRTemporary result(this);
+
+ m_jit.absDouble(op1.fpr(), result.fpr());
+ doubleResult(result.fpr(), m_compileIndex);
+ break;
+ }
+
+ case ArithMin:
+ case ArithMax: {
+ if (Node::shouldSpeculateInteger(at(node.child1()), at(node.child2())) && node.canSpeculateInteger()) {
+ SpeculateStrictInt32Operand op1(this, node.child1());
+ SpeculateStrictInt32Operand op2(this, node.child2());
+ GPRTemporary result(this, op1);
+
+ MacroAssembler::Jump op1Less = m_jit.branch32(op == ArithMin ? MacroAssembler::LessThan : MacroAssembler::GreaterThan, op1.gpr(), op2.gpr());
+ m_jit.move(op2.gpr(), result.gpr());
+ if (op1.gpr() != result.gpr()) {
+ MacroAssembler::Jump done = m_jit.jump();
+ op1Less.link(&m_jit);
+ m_jit.move(op1.gpr(), result.gpr());
+ done.link(&m_jit);
+ } else
+ op1Less.link(&m_jit);
+
+ integerResult(result.gpr(), m_compileIndex);
+ break;
+ }
+
+ SpeculateDoubleOperand op1(this, node.child1());
+ SpeculateDoubleOperand op2(this, node.child2());
+ FPRTemporary result(this, op1);
+
+ MacroAssembler::JumpList done;
+
+ MacroAssembler::Jump op1Less = m_jit.branchDouble(op == ArithMin ? MacroAssembler::DoubleLessThan : MacroAssembler::DoubleGreaterThan, op1.fpr(), op2.fpr());
+
+ // op2 is eather the lesser one or one of then is NaN
+ MacroAssembler::Jump op2Less = m_jit.branchDouble(op == ArithMin ? MacroAssembler::DoubleGreaterThanOrEqual : MacroAssembler::DoubleLessThanOrEqual, op1.fpr(), op2.fpr());
+
+ // Unordered case. We don't know which of op1, op2 is NaN. Manufacture NaN by adding
+ // op1 + op2 and putting it into result.
+ m_jit.addDouble(op1.fpr(), op2.fpr(), result.fpr());
+ done.append(m_jit.jump());
+
+ op2Less.link(&m_jit);
+ m_jit.moveDouble(op2.fpr(), result.fpr());
+
+ if (op1.fpr() != result.fpr()) {
+ done.append(m_jit.jump());
+
+ op1Less.link(&m_jit);
+ m_jit.moveDouble(op1.fpr(), result.fpr());
+ } else
+ op1Less.link(&m_jit);
+
+ done.link(&m_jit);
+
+ doubleResult(result.fpr(), m_compileIndex);
+ break;
+ }
+
+ case ArithSqrt: {
+ SpeculateDoubleOperand op1(this, node.child1());
+ FPRTemporary result(this, op1);
+
+ m_jit.sqrtDouble(op1.fpr(), result.fpr());
+
+ doubleResult(result.fpr(), m_compileIndex);
+ break;
+ }
+
+ case LogicalNot:
+ compileLogicalNot(node);
+ break;
+
+ case CompareLess:
+ if (compare(node, JITCompiler::LessThan, JITCompiler::DoubleLessThan, operationCompareLess))
+ return;
+ break;
+
+ case CompareLessEq:
+ if (compare(node, JITCompiler::LessThanOrEqual, JITCompiler::DoubleLessThanOrEqual, operationCompareLessEq))
+ return;
+ break;
+
+ case CompareGreater:
+ if (compare(node, JITCompiler::GreaterThan, JITCompiler::DoubleGreaterThan, operationCompareGreater))
+ return;
+ break;
+
+ case CompareGreaterEq:
+ if (compare(node, JITCompiler::GreaterThanOrEqual, JITCompiler::DoubleGreaterThanOrEqual, operationCompareGreaterEq))
+ return;
+ break;
+
+ case CompareEq:
+ if (isNullConstant(node.child1())) {
+ if (nonSpeculativeCompareNull(node, node.child2()))
+ return;
+ break;
+ }
+ if (isNullConstant(node.child2())) {
+ if (nonSpeculativeCompareNull(node, node.child1()))
+ return;
+ break;
+ }
+ if (compare(node, JITCompiler::Equal, JITCompiler::DoubleEqual, operationCompareEq))
+ return;
+ break;
+
+ case CompareStrictEq:
+ if (compileStrictEq(node))
+ return;
+ break;
+
+ case StringCharCodeAt: {
+ compileGetCharCodeAt(node);
+ break;
+ }
+
+ case StringCharAt: {
+ // Relies on StringCharAt node having same basic layout as GetByVal
+ compileGetByValOnString(node);
+ break;
+ }
+
+ case GetByVal: {
+ if (!node.prediction() || !at(node.child1()).prediction() || !at(node.child2()).prediction()) {
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ break;
+ }
+
+ if (!at(node.child2()).shouldSpeculateInteger() || !isActionableArrayPrediction(at(node.child1()).prediction())) {
+ JSValueOperand base(this, node.child1());
+ JSValueOperand property(this, node.child2());
+ GPRReg baseGPR = base.gpr();
+ GPRReg propertyGPR = property.gpr();
+
+ flushRegisters();
+ GPRResult result(this);
+ callOperation(operationGetByVal, result.gpr(), baseGPR, propertyGPR);
+
+ jsValueResult(result.gpr(), m_compileIndex);
+ break;
+ }
+
+ if (at(node.child1()).prediction() == PredictString) {
+ compileGetByValOnString(node);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
+ if (at(node.child1()).shouldSpeculateByteArray()) {
+ compileGetByValOnByteArray(node);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
+ if (at(node.child1()).shouldSpeculateInt8Array()) {
+ compileGetByValOnIntTypedArray(m_jit.globalData()->int8ArrayDescriptor(), node, sizeof(int8_t), isInt8ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
+ if (at(node.child1()).shouldSpeculateInt16Array()) {
+ compileGetByValOnIntTypedArray(m_jit.globalData()->int16ArrayDescriptor(), node, sizeof(int16_t), isInt16ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
+ if (at(node.child1()).shouldSpeculateInt32Array()) {
+ compileGetByValOnIntTypedArray(m_jit.globalData()->int32ArrayDescriptor(), node, sizeof(int32_t), isInt32ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
+ if (at(node.child1()).shouldSpeculateUint8Array()) {
+ compileGetByValOnIntTypedArray(m_jit.globalData()->uint8ArrayDescriptor(), node, sizeof(uint8_t), isUint8ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
+ if (at(node.child1()).shouldSpeculateUint16Array()) {
+ compileGetByValOnIntTypedArray(m_jit.globalData()->uint16ArrayDescriptor(), node, sizeof(uint16_t), isUint16ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
+ if (at(node.child1()).shouldSpeculateUint32Array()) {
+ compileGetByValOnIntTypedArray(m_jit.globalData()->uint32ArrayDescriptor(), node, sizeof(uint32_t), isUint32ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
+ if (at(node.child1()).shouldSpeculateFloat32Array()) {
+ compileGetByValOnFloatTypedArray(m_jit.globalData()->float32ArrayDescriptor(), node, sizeof(float), isFloat32ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
+ if (at(node.child1()).shouldSpeculateFloat64Array()) {
+ compileGetByValOnFloatTypedArray(m_jit.globalData()->float64ArrayDescriptor(), node, sizeof(double), isFloat64ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
+ ASSERT(at(node.child1()).shouldSpeculateArray());
+
+ SpeculateCellOperand base(this, node.child1());
+ SpeculateStrictInt32Operand property(this, node.child2());
+ StorageOperand storage(this, node.child3());
+
+ GPRReg baseReg = base.gpr();
+ GPRReg propertyReg = property.gpr();
+ GPRReg storageReg = storage.gpr();
+
+ if (!m_compileOkay)
+ return;
+
+ if (!isArrayPrediction(m_state.forNode(node.child1()).m_type))
+ speculationCheck(BadType, JSValueRegs(baseReg), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseReg, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info)));
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(baseReg, JSArray::vectorLengthOffset())));
+
+ // FIXME: In cases where there are subsequent by_val accesses to the same base it might help to cache
+ // the storage pointer - especially if there happens to be another register free right now. If we do so,
+ // then we'll need to allocate a new temporary for result.
+ GPRTemporary result(this);
+ m_jit.loadPtr(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), result.gpr());
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchTestPtr(MacroAssembler::Zero, result.gpr()));
+
+ jsValueResult(result.gpr(), m_compileIndex);
+ break;
+ }
+
+ case PutByVal: {
+ if (!at(node.child1()).prediction() || !at(node.child2()).prediction()) {
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ break;
+ }
+
+ if (!at(node.child2()).shouldSpeculateInteger() || !isActionableMutableArrayPrediction(at(node.child1()).prediction())) {
+ JSValueOperand arg1(this, node.child1());
+ JSValueOperand arg2(this, node.child2());
+ JSValueOperand arg3(this, node.child3());
+ GPRReg arg1GPR = arg1.gpr();
+ GPRReg arg2GPR = arg2.gpr();
+ GPRReg arg3GPR = arg3.gpr();
+ flushRegisters();
+
+ callOperation(m_jit.strictModeFor(node.codeOrigin) ? operationPutByValStrict : operationPutByValNonStrict, arg1GPR, arg2GPR, arg3GPR);
+
+ noResult(m_compileIndex);
+ break;
+ }
+
+ SpeculateCellOperand base(this, node.child1());
+ SpeculateStrictInt32Operand property(this, node.child2());
+ if (at(node.child1()).shouldSpeculateByteArray()) {
+ compilePutByValForByteArray(base.gpr(), property.gpr(), node);
+ break;
+ }
+
+ if (at(node.child1()).shouldSpeculateInt8Array()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->int8ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int8_t), isInt8ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
+ if (at(node.child1()).shouldSpeculateInt16Array()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->int16ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int16_t), isInt16ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
+ if (at(node.child1()).shouldSpeculateInt32Array()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->int32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int32_t), isInt32ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
+ if (at(node.child1()).shouldSpeculateUint8Array()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->uint8ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint8_t), isUint8ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
+ if (at(node.child1()).shouldSpeculateUint16Array()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->uint16ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint16_t), isUint16ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
+ if (at(node.child1()).shouldSpeculateUint32Array()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->uint32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint32_t), isUint32ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
+ if (at(node.child1()).shouldSpeculateFloat32Array()) {
+ compilePutByValForFloatTypedArray(m_jit.globalData()->float32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(float), isFloat32ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
+ if (at(node.child1()).shouldSpeculateFloat64Array()) {
+ compilePutByValForFloatTypedArray(m_jit.globalData()->float64ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(double), isFloat64ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
+ ASSERT(at(node.child1()).shouldSpeculateArray());
+
+ JSValueOperand value(this, node.child3());
+ GPRTemporary scratch(this);
+
+ // Map base, property & value into registers, allocate a scratch register.
+ GPRReg baseReg = base.gpr();
+ GPRReg propertyReg = property.gpr();
+ GPRReg valueReg = value.gpr();
+ GPRReg scratchReg = scratch.gpr();
+
+ if (!m_compileOkay)
+ return;
+
+ writeBarrier(baseReg, value.gpr(), node.child3(), WriteBarrierForPropertyAccess, scratchReg);
+
+ // Check that base is an array, and that property is contained within m_vector (< m_vectorLength).
+ // If we have predicted the base to be type array, we can skip the check.
+ if (!isArrayPrediction(m_state.forNode(node.child1()).m_type))
+ speculationCheck(BadType, JSValueRegs(baseReg), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseReg, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info)));
+
+ base.use();
+ property.use();
+ value.use();
+
+ MacroAssembler::Jump withinArrayBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(baseReg, JSArray::vectorLengthOffset()));
+
+ // Code to handle put beyond array bounds.
+ silentSpillAllRegisters(scratchReg);
+ callOperation(operationPutByValBeyondArrayBounds, baseReg, propertyReg, valueReg);
+ silentFillAllRegisters(scratchReg);
+ JITCompiler::Jump wasBeyondArrayBounds = m_jit.jump();
+
+ withinArrayBounds.link(&m_jit);
+
+ // Get the array storage.
+ GPRReg storageReg = scratchReg;
+ m_jit.loadPtr(MacroAssembler::Address(baseReg, JSArray::storageOffset()), storageReg);
+
+ // Check if we're writing to a hole; if so increment m_numValuesInVector.
+ MacroAssembler::Jump notHoleValue = m_jit.branchTestPtr(MacroAssembler::NonZero, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
+ m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageReg, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
+
+ // If we're writing to a hole we might be growing the array;
+ MacroAssembler::Jump lengthDoesNotNeedUpdate = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, OBJECT_OFFSETOF(ArrayStorage, m_length)));
+ m_jit.add32(TrustedImm32(1), propertyReg);
+ m_jit.store32(propertyReg, MacroAssembler::Address(storageReg, OBJECT_OFFSETOF(ArrayStorage, m_length)));
+ m_jit.sub32(TrustedImm32(1), propertyReg);
+
+ lengthDoesNotNeedUpdate.link(&m_jit);
+ notHoleValue.link(&m_jit);
+
+ // Store the value to the array.
+ m_jit.storePtr(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
+
+ wasBeyondArrayBounds.link(&m_jit);
+
+ noResult(m_compileIndex, UseChildrenCalledExplicitly);
+ break;
+ }
+
+ case PutByValAlias: {
+ if (!at(node.child1()).prediction() || !at(node.child2()).prediction()) {
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ break;
+ }
+
+ ASSERT(isActionableMutableArrayPrediction(at(node.child1()).prediction()));
+ ASSERT(at(node.child2()).shouldSpeculateInteger());
+
+ SpeculateCellOperand base(this, node.child1());
+ SpeculateStrictInt32Operand property(this, node.child2());
+ if (at(node.child1()).shouldSpeculateByteArray()) {
+ compilePutByValForByteArray(base.gpr(), property.gpr(), node);
+ break;
+ }
+
+ if (at(node.child1()).shouldSpeculateInt8Array()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->int8ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int8_t), NoTypedArraySpecCheck, SignedTypedArray);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
+ if (at(node.child1()).shouldSpeculateInt16Array()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->int16ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int16_t), NoTypedArraySpecCheck, SignedTypedArray);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
+ if (at(node.child1()).shouldSpeculateInt32Array()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->int32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int32_t), NoTypedArraySpecCheck, SignedTypedArray);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
+ if (at(node.child1()).shouldSpeculateUint8Array()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->uint8ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint8_t), NoTypedArraySpecCheck, UnsignedTypedArray);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
+ if (at(node.child1()).shouldSpeculateUint16Array()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->uint16ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint16_t), NoTypedArraySpecCheck, UnsignedTypedArray);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
+ if (at(node.child1()).shouldSpeculateUint32Array()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->uint32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint32_t), NoTypedArraySpecCheck, UnsignedTypedArray);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
+ if (at(node.child1()).shouldSpeculateFloat32Array()) {
+ compilePutByValForFloatTypedArray(m_jit.globalData()->float32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(float), NoTypedArraySpecCheck);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
+ if (at(node.child1()).shouldSpeculateFloat64Array()) {
+ compilePutByValForFloatTypedArray(m_jit.globalData()->float64ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(double), NoTypedArraySpecCheck);
+ if (!m_compileOkay)
+ return;
+ break;
+ }
+
+ ASSERT(at(node.child1()).shouldSpeculateArray());
+
+ JSValueOperand value(this, node.child3());
+ GPRTemporary scratch(this);
+
+ GPRReg baseReg = base.gpr();
+ GPRReg scratchReg = scratch.gpr();
+
+ writeBarrier(base.gpr(), value.gpr(), node.child3(), WriteBarrierForPropertyAccess, scratchReg);
+
+ // Get the array storage.
+ GPRReg storageReg = scratchReg;
+ m_jit.loadPtr(MacroAssembler::Address(baseReg, JSArray::storageOffset()), storageReg);
+
+ // Store the value to the array.
+ GPRReg propertyReg = property.gpr();
+ GPRReg valueReg = value.gpr();
+ m_jit.storePtr(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
+
+ noResult(m_compileIndex);
+ break;
+ }
+
+ case ArrayPush: {
+ SpeculateCellOperand base(this, node.child1());
+ JSValueOperand value(this, node.child2());
+ GPRTemporary storage(this);
+ GPRTemporary storageLength(this);
+
+ GPRReg baseGPR = base.gpr();
+ GPRReg valueGPR = value.gpr();
+ GPRReg storageGPR = storage.gpr();
+ GPRReg storageLengthGPR = storageLength.gpr();
+
+ writeBarrier(baseGPR, valueGPR, node.child2(), WriteBarrierForPropertyAccess, storageGPR, storageLengthGPR);
+
+ if (!isArrayPrediction(m_state.forNode(node.child1()).m_type))
+ speculationCheck(BadType, JSValueRegs(baseGPR), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info)));
+
+ m_jit.loadPtr(MacroAssembler::Address(baseGPR, JSArray::storageOffset()), storageGPR);
+ m_jit.load32(MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_length)), storageLengthGPR);
+
+ // Refuse to handle bizarre lengths.
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::Above, storageLengthGPR, TrustedImm32(0x7ffffffe)));
+
+ MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(baseGPR, JSArray::vectorLengthOffset()));
+
+ m_jit.storePtr(valueGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
+
+ m_jit.add32(Imm32(1), storageLengthGPR);
+ m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_length)));
+ m_jit.add32(Imm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
+ m_jit.orPtr(GPRInfo::tagTypeNumberRegister, storageLengthGPR);
+
+ MacroAssembler::Jump done = m_jit.jump();
+
+ slowPath.link(&m_jit);
+
+ silentSpillAllRegisters(storageLengthGPR);
+ callOperation(operationArrayPush, storageLengthGPR, valueGPR, baseGPR);
+ silentFillAllRegisters(storageLengthGPR);
+
+ done.link(&m_jit);
+
+ jsValueResult(storageLengthGPR, m_compileIndex);
+ break;
+ }
+
+ case ArrayPop: {
+ SpeculateCellOperand base(this, node.child1());
+ GPRTemporary value(this);
+ GPRTemporary storage(this);
+ GPRTemporary storageLength(this);
+
+ GPRReg baseGPR = base.gpr();
+ GPRReg valueGPR = value.gpr();
+ GPRReg storageGPR = storage.gpr();
+ GPRReg storageLengthGPR = storageLength.gpr();
+
+ if (!isArrayPrediction(m_state.forNode(node.child1()).m_type))
+ speculationCheck(BadType, JSValueRegs(baseGPR), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info)));
+
+ m_jit.loadPtr(MacroAssembler::Address(baseGPR, JSArray::storageOffset()), storageGPR);
+ m_jit.load32(MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_length)), storageLengthGPR);
+
+ MacroAssembler::Jump emptyArrayCase = m_jit.branchTest32(MacroAssembler::Zero, storageLengthGPR);
+
+ m_jit.sub32(Imm32(1), storageLengthGPR);
+
+ MacroAssembler::Jump slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(baseGPR, JSArray::vectorLengthOffset()));
+
+ m_jit.loadPtr(MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), valueGPR);
+
+ m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_length)));
+
+ MacroAssembler::Jump holeCase = m_jit.branchTestPtr(MacroAssembler::Zero, valueGPR);
+
+ m_jit.storePtr(MacroAssembler::TrustedImmPtr(0), MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
+ m_jit.sub32(MacroAssembler::Imm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
+
+ MacroAssembler::JumpList done;
+
+ done.append(m_jit.jump());
+
+ holeCase.link(&m_jit);
+ emptyArrayCase.link(&m_jit);
+ m_jit.move(MacroAssembler::TrustedImmPtr(JSValue::encode(jsUndefined())), valueGPR);
+ done.append(m_jit.jump());
+
+ slowCase.link(&m_jit);
+
+ silentSpillAllRegisters(valueGPR);
+ callOperation(operationArrayPop, valueGPR, baseGPR);
+ silentFillAllRegisters(valueGPR);
+
+ done.link(&m_jit);
+
+ jsValueResult(valueGPR, m_compileIndex);
+ break;
+ }
+
+ case DFG::Jump: {
+ BlockIndex taken = node.takenBlockIndex();
+ if (taken != (m_block + 1))
+ addBranch(m_jit.jump(), taken);
+ noResult(m_compileIndex);
+ break;
+ }
+
+ case Branch:
+ if (isStrictInt32(node.child1()) || at(node.child1()).shouldSpeculateInteger()) {
+ SpeculateIntegerOperand op(this, node.child1());
+
+ BlockIndex taken = node.takenBlockIndex();
+ BlockIndex notTaken = node.notTakenBlockIndex();
+
+ MacroAssembler::ResultCondition condition = MacroAssembler::NonZero;
+
+ if (taken == (m_block + 1)) {
+ condition = MacroAssembler::Zero;
+ BlockIndex tmp = taken;
+ taken = notTaken;
+ notTaken = tmp;
+ }
+
+ addBranch(m_jit.branchTest32(condition, op.gpr()), taken);
+ if (notTaken != (m_block + 1))
+ addBranch(m_jit.jump(), notTaken);
+
+ noResult(m_compileIndex);
+ break;
+ }
+ emitBranch(node);
+ break;
+
+ case Return: {
+ ASSERT(GPRInfo::callFrameRegister != GPRInfo::regT1);
+ ASSERT(GPRInfo::regT1 != GPRInfo::returnValueGPR);
+ ASSERT(GPRInfo::returnValueGPR != GPRInfo::callFrameRegister);
+
+#if DFG_ENABLE(SUCCESS_STATS)
+ static SamplingCounter counter("SpeculativeJIT");
+ m_jit.emitCount(counter);
+#endif
+
+ // Return the result in returnValueGPR.
+ JSValueOperand op1(this, node.child1());
+ m_jit.move(op1.gpr(), GPRInfo::returnValueGPR);
+
+ // Grab the return address.
+ m_jit.emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, GPRInfo::regT1);
+ // Restore our caller's "r".
+ m_jit.emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, GPRInfo::callFrameRegister);
+ // Return.
+ m_jit.restoreReturnAddressBeforeReturn(GPRInfo::regT1);
+ m_jit.ret();
+
+ noResult(m_compileIndex);
+ break;
+ }
+
+ case Throw:
+ case ThrowReferenceError: {
+ // We expect that throw statements are rare and are intended to exit the code block
+ // anyway, so we just OSR back to the old JIT for now.
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ break;
+ }
+
+ case ToPrimitive: {
+ if (at(node.child1()).shouldSpeculateInteger()) {
+ // It's really profitable to speculate integer, since it's really cheap,
+ // it means we don't have to do any real work, and we emit a lot less code.
+
+ SpeculateIntegerOperand op1(this, node.child1());
+ GPRTemporary result(this, op1);
+
+ m_jit.move(op1.gpr(), result.gpr());
+ if (op1.format() == DataFormatInteger)
+ m_jit.orPtr(GPRInfo::tagTypeNumberRegister, result.gpr());
+
+ jsValueResult(result.gpr(), m_compileIndex);
+ break;
+ }
+
+ // FIXME: Add string speculation here.
+
+ bool wasPrimitive = isKnownNumeric(node.child1()) || isKnownBoolean(node.child1());
+
+ JSValueOperand op1(this, node.child1());
+ GPRTemporary result(this, op1);
+
+ GPRReg op1GPR = op1.gpr();
+ GPRReg resultGPR = result.gpr();
+
+ op1.use();
+
+ if (wasPrimitive)
+ m_jit.move(op1GPR, resultGPR);
+ else {
+ MacroAssembler::JumpList alreadyPrimitive;
+
+ alreadyPrimitive.append(m_jit.branchTestPtr(MacroAssembler::NonZero, op1GPR, GPRInfo::tagMaskRegister));
+ alreadyPrimitive.append(m_jit.branchPtr(MacroAssembler::Equal, MacroAssembler::Address(op1GPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSString::s_info)));
+
+ silentSpillAllRegisters(resultGPR);
+ callOperation(operationToPrimitive, resultGPR, op1GPR);
+ silentFillAllRegisters(resultGPR);
+
+ MacroAssembler::Jump done = m_jit.jump();
+
+ alreadyPrimitive.link(&m_jit);
+ m_jit.move(op1GPR, resultGPR);
+
+ done.link(&m_jit);
+ }
+
+ jsValueResult(resultGPR, m_compileIndex, UseChildrenCalledExplicitly);
+ break;
+ }
+
+ case StrCat:
+ case NewArray: {
+ // We really don't want to grow the register file just to do a StrCat or NewArray.
+ // Say we have 50 functions on the stack that all have a StrCat in them that has
+ // upwards of 10 operands. In the DFG this would mean that each one gets
+ // some random virtual register, and then to do the StrCat we'd need a second
+ // span of 10 operands just to have somewhere to copy the 10 operands to, where
+ // they'd be contiguous and we could easily tell the C code how to find them.
+ // Ugly! So instead we use the scratchBuffer infrastructure in JSGlobalData. That
+ // way, those 50 functions will share the same scratchBuffer for offloading their
+ // StrCat operands. It's about as good as we can do, unless we start doing
+ // virtual register coalescing to ensure that operands to StrCat get spilled
+ // in exactly the place where StrCat wants them, or else have the StrCat
+ // refer to those operands' SetLocal instructions to force them to spill in
+ // the right place. Basically, any way you cut it, the current approach
+ // probably has the best balance of performance and sensibility in the sense
+ // that it does not increase the complexity of the DFG JIT just to make StrCat
+ // fast and pretty.
+
+ EncodedJSValue* buffer = static_cast<EncodedJSValue*>(m_jit.globalData()->scratchBufferForSize(sizeof(EncodedJSValue) * node.numChildren()));
+
+ for (unsigned operandIdx = 0; operandIdx < node.numChildren(); ++operandIdx) {
+ JSValueOperand operand(this, m_jit.graph().m_varArgChildren[node.firstChild() + operandIdx]);
+ GPRReg opGPR = operand.gpr();
+ operand.use();
+
+ m_jit.storePtr(opGPR, buffer + operandIdx);
+ }
+
+ flushRegisters();
+
+ GPRResult result(this);
+
+ callOperation(op == StrCat ? operationStrCat : operationNewArray, result.gpr(), buffer, node.numChildren());
+
+ cellResult(result.gpr(), m_compileIndex, UseChildrenCalledExplicitly);
+ break;
+ }
+
+ case NewArrayBuffer: {
+ flushRegisters();
+ GPRResult result(this);
+
+ callOperation(operationNewArrayBuffer, result.gpr(), node.startConstant(), node.numConstants());
+
+ cellResult(result.gpr(), m_compileIndex);
+ break;
+ }
+
+ case NewRegexp: {
+ flushRegisters();
+ GPRResult result(this);
+
+ callOperation(operationNewRegexp, result.gpr(), m_jit.codeBlock()->regexp(node.regexpIndex()));
+
+ cellResult(result.gpr(), m_compileIndex);
+ break;
+ }
+
+ case ConvertThis: {
+ if (isObjectPrediction(m_state.forNode(node.child1()).m_type)) {
+ SpeculateCellOperand thisValue(this, node.child1());
+ GPRTemporary result(this, thisValue);
+ m_jit.move(thisValue.gpr(), result.gpr());
+ cellResult(result.gpr(), m_compileIndex);
+ break;
+ }
+
+ if (isOtherPrediction(at(node.child1()).prediction())) {
+ JSValueOperand thisValue(this, node.child1());
+ GPRTemporary scratch(this, thisValue);
+ GPRReg thisValueGPR = thisValue.gpr();
+ GPRReg scratchGPR = scratch.gpr();
+
+ if (!isOtherPrediction(m_state.forNode(node.child1()).m_type)) {
+ m_jit.move(thisValueGPR, scratchGPR);
+ m_jit.andPtr(MacroAssembler::TrustedImm32(~TagBitUndefined), scratchGPR);
+ speculationCheck(BadType, JSValueRegs(thisValueGPR), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, scratchGPR, MacroAssembler::TrustedImmPtr(reinterpret_cast<void*>(ValueNull))));
+ }
+
+ m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.globalThisObjectFor(node.codeOrigin)), scratchGPR);
+ cellResult(scratchGPR, m_compileIndex);
+ break;
+ }
+
+ if (isObjectPrediction(at(node.child1()).prediction())) {
+ SpeculateCellOperand thisValue(this, node.child1());
+ GPRTemporary result(this, thisValue);
+ GPRReg thisValueGPR = thisValue.gpr();
+ GPRReg resultGPR = result.gpr();
+
+ if (!isObjectPrediction(m_state.forNode(node.child1()).m_type))
+ speculationCheck(BadType, JSValueRegs(thisValueGPR), node.child1(), m_jit.branchPtr(JITCompiler::Equal, JITCompiler::Address(thisValueGPR, JSCell::classInfoOffset()), JITCompiler::TrustedImmPtr(&JSString::s_info)));
+
+ m_jit.move(thisValueGPR, resultGPR);
+
+ cellResult(resultGPR, m_compileIndex);
+ break;
+ }
+
+ JSValueOperand thisValue(this, node.child1());
+ GPRReg thisValueGPR = thisValue.gpr();
+
+ flushRegisters();
+
+ GPRResult result(this);
+ callOperation(operationConvertThis, result.gpr(), thisValueGPR);
+
+ cellResult(result.gpr(), m_compileIndex);
+ break;
+ }
+
+ case CreateThis: {
+ // Note that there is not so much profit to speculate here. The only things we
+ // speculate on are (1) that it's a cell, since that eliminates cell checks
+ // later if the proto is reused, and (2) if we have a FinalObject prediction
+ // then we speculate because we want to get recompiled if it isn't (since
+ // otherwise we'd start taking slow path a lot).
+
+ SpeculateCellOperand proto(this, node.child1());
+ GPRTemporary result(this);
+ GPRTemporary scratch(this);
+
+ GPRReg protoGPR = proto.gpr();
+ GPRReg resultGPR = result.gpr();
+ GPRReg scratchGPR = scratch.gpr();
+
+ proto.use();
+
+ MacroAssembler::JumpList slowPath;
+
+ // Need to verify that the prototype is an object. If we have reason to believe
+ // that it's a FinalObject then we speculate on that directly. Otherwise we
+ // do the slow (structure-based) check.
+ if (at(node.child1()).shouldSpeculateFinalObject()) {
+ if (!isFinalObjectPrediction(m_state.forNode(node.child1()).m_type))
+ speculationCheck(BadType, JSValueRegs(protoGPR), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(protoGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSFinalObject::s_info)));
+ } else {
+ m_jit.loadPtr(MacroAssembler::Address(protoGPR, JSCell::structureOffset()), scratchGPR);
+ slowPath.append(m_jit.branch8(MacroAssembler::Below, MacroAssembler::Address(scratchGPR, Structure::typeInfoTypeOffset()), MacroAssembler::TrustedImm32(ObjectType)));
+ }
+
+ // Load the inheritorID (the Structure that objects who have protoGPR as the prototype
+ // use to refer to that prototype). If the inheritorID is not set, go to slow path.
+ m_jit.loadPtr(MacroAssembler::Address(protoGPR, JSObject::offsetOfInheritorID()), scratchGPR);
+ slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, scratchGPR));
+
+ emitAllocateJSFinalObject(scratchGPR, resultGPR, scratchGPR, slowPath);
+
+ MacroAssembler::Jump done = m_jit.jump();
+
+ slowPath.link(&m_jit);
+
+ silentSpillAllRegisters(resultGPR);
+ if (node.codeOrigin.inlineCallFrame)
+ callOperation(operationCreateThisInlined, resultGPR, protoGPR, node.codeOrigin.inlineCallFrame->callee.get());
+ else
+ callOperation(operationCreateThis, resultGPR, protoGPR);
+ silentFillAllRegisters(resultGPR);
+
+ done.link(&m_jit);
+
+ cellResult(resultGPR, m_compileIndex, UseChildrenCalledExplicitly);
+ break;
+ }
+
+ case NewObject: {
+ GPRTemporary result(this);
+ GPRTemporary scratch(this);
+
+ GPRReg resultGPR = result.gpr();
+ GPRReg scratchGPR = scratch.gpr();
+
+ MacroAssembler::JumpList slowPath;
+
+ emitAllocateJSFinalObject(MacroAssembler::TrustedImmPtr(m_jit.globalObjectFor(node.codeOrigin)->emptyObjectStructure()), resultGPR, scratchGPR, slowPath);
+
+ MacroAssembler::Jump done = m_jit.jump();
+
+ slowPath.link(&m_jit);
+
+ silentSpillAllRegisters(resultGPR);
+ callOperation(operationNewObject, resultGPR);
+ silentFillAllRegisters(resultGPR);
+
+ done.link(&m_jit);
+
+ cellResult(resultGPR, m_compileIndex);
+ break;
+ }
+
+ case GetCallee: {
+ GPRTemporary result(this);
+ m_jit.loadPtr(JITCompiler::addressFor(static_cast<VirtualRegister>(RegisterFile::Callee)), result.gpr());
+ cellResult(result.gpr(), m_compileIndex);
+ break;
+ }
+
+ case GetScopeChain: {
+ GPRTemporary result(this);
+ GPRReg resultGPR = result.gpr();
+
+ m_jit.loadPtr(JITCompiler::addressFor(static_cast<VirtualRegister>(RegisterFile::ScopeChain)), resultGPR);
+ bool checkTopLevel = m_jit.codeBlock()->codeType() == FunctionCode && m_jit.codeBlock()->needsFullScopeChain();
+ int skip = node.scopeChainDepth();
+ ASSERT(skip || !checkTopLevel);
+ if (checkTopLevel && skip--) {
+ JITCompiler::Jump activationNotCreated;
+ if (checkTopLevel)
+ activationNotCreated = m_jit.branchTestPtr(JITCompiler::Zero, JITCompiler::addressFor(static_cast<VirtualRegister>(m_jit.codeBlock()->activationRegister())));
+ m_jit.loadPtr(JITCompiler::Address(resultGPR, OBJECT_OFFSETOF(ScopeChainNode, next)), resultGPR);
+ activationNotCreated.link(&m_jit);
+ }
+ while (skip--)
+ m_jit.loadPtr(JITCompiler::Address(resultGPR, OBJECT_OFFSETOF(ScopeChainNode, next)), resultGPR);
+
+ m_jit.loadPtr(JITCompiler::Address(resultGPR, OBJECT_OFFSETOF(ScopeChainNode, object)), resultGPR);
+
+ cellResult(resultGPR, m_compileIndex);
+ break;
+ }
+ case GetScopedVar: {
+ SpeculateCellOperand scopeChain(this, node.child1());
+ GPRTemporary result(this);
+ GPRReg resultGPR = result.gpr();
+ m_jit.loadPtr(JITCompiler::Address(scopeChain.gpr(), JSVariableObject::offsetOfRegisters()), resultGPR);
+ m_jit.loadPtr(JITCompiler::Address(resultGPR, node.varNumber() * sizeof(Register)), resultGPR);
+ jsValueResult(resultGPR, m_compileIndex);
+ break;
+ }
+ case PutScopedVar: {
+ SpeculateCellOperand scopeChain(this, node.child1());
+ GPRTemporary scratchRegister(this);
+ GPRReg scratchGPR = scratchRegister.gpr();
+ m_jit.loadPtr(JITCompiler::Address(scopeChain.gpr(), JSVariableObject::offsetOfRegisters()), scratchGPR);
+ JSValueOperand value(this, node.child2());
+ m_jit.storePtr(value.gpr(), JITCompiler::Address(scratchGPR, node.varNumber() * sizeof(Register)));
+ writeBarrier(scopeChain.gpr(), value.gpr(), node.child2(), WriteBarrierForVariableAccess, scratchGPR);
+ noResult(m_compileIndex);
+ break;
+ }
+ case GetById: {
+ if (!node.prediction()) {
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ break;
+ }
+
+ if (isCellPrediction(at(node.child1()).prediction())) {
+ SpeculateCellOperand base(this, node.child1());
+ GPRTemporary result(this, base);
+
+ GPRReg baseGPR = base.gpr();
+ GPRReg resultGPR = result.gpr();
+ GPRReg scratchGPR;
+
+ if (resultGPR == baseGPR)
+ scratchGPR = tryAllocate();
+ else
+ scratchGPR = resultGPR;
+
+ base.use();
+
+ cachedGetById(baseGPR, resultGPR, scratchGPR, node.identifierNumber());
+
+ jsValueResult(resultGPR, m_compileIndex, UseChildrenCalledExplicitly);
+ break;
+ }
+
+ JSValueOperand base(this, node.child1());
+ GPRTemporary result(this, base);
+
+ GPRReg baseGPR = base.gpr();
+ GPRReg resultGPR = result.gpr();
+ GPRReg scratchGPR;
+
+ if (resultGPR == baseGPR)
+ scratchGPR = tryAllocate();
+ else
+ scratchGPR = resultGPR;
+
+ base.use();
+
+ JITCompiler::Jump notCell = m_jit.branchTestPtr(JITCompiler::NonZero, baseGPR, GPRInfo::tagMaskRegister);
+
+ cachedGetById(baseGPR, resultGPR, scratchGPR, node.identifierNumber(), notCell);
+
+ jsValueResult(resultGPR, m_compileIndex, UseChildrenCalledExplicitly);
+
+ break;
+ }
+
+ case GetArrayLength: {
+ SpeculateCellOperand base(this, node.child1());
+ GPRTemporary result(this);
+
+ GPRReg baseGPR = base.gpr();
+ GPRReg resultGPR = result.gpr();
+
+ if (!isArrayPrediction(m_state.forNode(node.child1()).m_type))
+ speculationCheck(BadType, JSValueRegs(baseGPR), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info)));
+
+ m_jit.loadPtr(MacroAssembler::Address(baseGPR, JSArray::storageOffset()), resultGPR);
+ m_jit.load32(MacroAssembler::Address(resultGPR, OBJECT_OFFSETOF(ArrayStorage, m_length)), resultGPR);
+
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::LessThan, resultGPR, MacroAssembler::TrustedImm32(0)));
+
+ integerResult(resultGPR, m_compileIndex);
+ break;
+ }
+
+ case GetStringLength: {
+ SpeculateCellOperand base(this, node.child1());
+ GPRTemporary result(this);
+
+ GPRReg baseGPR = base.gpr();
+ GPRReg resultGPR = result.gpr();
+
+ if (!isStringPrediction(m_state.forNode(node.child1()).m_type))
+ speculationCheck(BadType, JSValueRegs(baseGPR), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSString::s_info)));
+
+ m_jit.load32(MacroAssembler::Address(baseGPR, JSString::offsetOfLength()), resultGPR);
+
+ integerResult(resultGPR, m_compileIndex);
+ break;
+ }
+
+ case GetByteArrayLength: {
+ SpeculateCellOperand base(this, node.child1());
+ GPRTemporary result(this);
+
+ GPRReg baseGPR = base.gpr();
+ GPRReg resultGPR = result.gpr();
+
+ if (!isByteArrayPrediction(m_state.forNode(node.child1()).m_type))
+ speculationCheck(BadType, JSValueRegs(baseGPR), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSByteArray::s_info)));
+
+ m_jit.loadPtr(MacroAssembler::Address(baseGPR, JSByteArray::offsetOfStorage()), resultGPR);
+ m_jit.load32(MacroAssembler::Address(baseGPR, ByteArray::offsetOfSize()), resultGPR);
+
+ integerResult(resultGPR, m_compileIndex);
+ break;
+ }
+ case GetInt8ArrayLength: {
+ compileGetTypedArrayLength(m_jit.globalData()->int8ArrayDescriptor(), node, !isInt8ArrayPrediction(m_state.forNode(node.child1()).m_type));
+ break;
+ }
+ case GetInt16ArrayLength: {
+ compileGetTypedArrayLength(m_jit.globalData()->int16ArrayDescriptor(), node, !isInt16ArrayPrediction(m_state.forNode(node.child1()).m_type));
+ break;
+ }
+ case GetInt32ArrayLength: {
+ compileGetTypedArrayLength(m_jit.globalData()->int32ArrayDescriptor(), node, !isInt32ArrayPrediction(m_state.forNode(node.child1()).m_type));
+ break;
+ }
+ case GetUint8ArrayLength: {
+ compileGetTypedArrayLength(m_jit.globalData()->uint8ArrayDescriptor(), node, !isUint8ArrayPrediction(m_state.forNode(node.child1()).m_type));
+ break;
+ }
+ case GetUint16ArrayLength: {
+ compileGetTypedArrayLength(m_jit.globalData()->uint16ArrayDescriptor(), node, !isUint16ArrayPrediction(m_state.forNode(node.child1()).m_type));
+ break;
+ }
+ case GetUint32ArrayLength: {
+ compileGetTypedArrayLength(m_jit.globalData()->uint32ArrayDescriptor(), node, !isUint32ArrayPrediction(m_state.forNode(node.child1()).m_type));
+ break;
+ }
+ case GetFloat32ArrayLength: {
+ compileGetTypedArrayLength(m_jit.globalData()->float32ArrayDescriptor(), node, !isFloat32ArrayPrediction(m_state.forNode(node.child1()).m_type));
+ break;
+ }
+ case GetFloat64ArrayLength: {
+ compileGetTypedArrayLength(m_jit.globalData()->float64ArrayDescriptor(), node, !isFloat64ArrayPrediction(m_state.forNode(node.child1()).m_type));
+ break;
+ }
+ case CheckFunction: {
+ SpeculateCellOperand function(this, node.child1());
+ speculationCheck(BadCache, JSValueRegs(), NoNode, m_jit.branchWeakPtr(JITCompiler::NotEqual, function.gpr(), node.function()));
+ noResult(m_compileIndex);
+ break;
+ }
+ case CheckStructure: {
+ if (m_state.forNode(node.child1()).m_structure.isSubsetOf(node.structureSet())) {
+ noResult(m_compileIndex);
+ break;
+ }
+
+ SpeculateCellOperand base(this, node.child1());
+
+ ASSERT(node.structureSet().size());
+
+ if (node.structureSet().size() == 1)
+ speculationCheck(BadCache, JSValueRegs(), NoNode, m_jit.branchWeakPtr(JITCompiler::NotEqual, JITCompiler::Address(base.gpr(), JSCell::structureOffset()), node.structureSet()[0]));
+ else {
+ GPRTemporary structure(this);
+
+ m_jit.loadPtr(JITCompiler::Address(base.gpr(), JSCell::structureOffset()), structure.gpr());
+
+ JITCompiler::JumpList done;
+
+ for (size_t i = 0; i < node.structureSet().size() - 1; ++i)
+ done.append(m_jit.branchWeakPtr(JITCompiler::Equal, structure.gpr(), node.structureSet()[i]));
+
+ speculationCheck(BadCache, JSValueRegs(), NoNode, m_jit.branchWeakPtr(JITCompiler::NotEqual, structure.gpr(), node.structureSet().last()));
+
+ done.link(&m_jit);
+ }
+
+ noResult(m_compileIndex);
+ break;
+ }
+
+ case PutStructure: {
+ SpeculateCellOperand base(this, node.child1());
+ GPRReg baseGPR = base.gpr();
+
+ m_jit.addWeakReferenceTransition(
+ node.codeOrigin.codeOriginOwner(),
+ node.structureTransitionData().previousStructure,
+ node.structureTransitionData().newStructure);
+
+#if ENABLE(GGC) || ENABLE(WRITE_BARRIER_PROFILING)
+ // Must always emit this write barrier as the structure transition itself requires it
+ writeBarrier(baseGPR, node.structureTransitionData().newStructure, WriteBarrierForGenericAccess);
+#endif
+
+ m_jit.storePtr(MacroAssembler::TrustedImmPtr(node.structureTransitionData().newStructure), MacroAssembler::Address(baseGPR, JSCell::structureOffset()));
+
+ noResult(m_compileIndex);
+ break;
+ }
+
+ case GetPropertyStorage: {
+ SpeculateCellOperand base(this, node.child1());
+ GPRTemporary result(this, base);
+
+ GPRReg baseGPR = base.gpr();
+ GPRReg resultGPR = result.gpr();
+
+ m_jit.loadPtr(JITCompiler::Address(baseGPR, JSObject::offsetOfPropertyStorage()), resultGPR);
+
+ storageResult(resultGPR, m_compileIndex);
+ break;
+ }
+
+ case GetIndexedPropertyStorage: {
+ compileGetIndexedPropertyStorage(node);
+ break;
+ }
+
+ case GetByOffset: {
+ StorageOperand storage(this, node.child1());
+ GPRTemporary result(this, storage);
+
+ GPRReg storageGPR = storage.gpr();
+ GPRReg resultGPR = result.gpr();
+
+ StorageAccessData& storageAccessData = m_jit.graph().m_storageAccessData[node.storageAccessDataIndex()];
+
+ m_jit.loadPtr(JITCompiler::Address(storageGPR, storageAccessData.offset * sizeof(EncodedJSValue)), resultGPR);
+
+ jsValueResult(resultGPR, m_compileIndex);
+ break;
+ }
+
+ case PutByOffset: {
+#if ENABLE(GGC) || ENABLE(WRITE_BARRIER_PROFILING)
+ SpeculateCellOperand base(this, node.child1());
+#endif
+ StorageOperand storage(this, node.child2());
+ JSValueOperand value(this, node.child3());
+
+ GPRReg storageGPR = storage.gpr();
+ GPRReg valueGPR = value.gpr();
+
+#if ENABLE(GGC) || ENABLE(WRITE_BARRIER_PROFILING)
+ writeBarrier(base.gpr(), value.gpr(), node.child3(), WriteBarrierForPropertyAccess);
+#endif
+
+ StorageAccessData& storageAccessData = m_jit.graph().m_storageAccessData[node.storageAccessDataIndex()];
+
+ m_jit.storePtr(valueGPR, JITCompiler::Address(storageGPR, storageAccessData.offset * sizeof(EncodedJSValue)));
+
+ noResult(m_compileIndex);
+ break;
+ }
+
+ case PutById: {
+ SpeculateCellOperand base(this, node.child1());
+ JSValueOperand value(this, node.child2());
+ GPRTemporary scratch(this);
+
+ GPRReg baseGPR = base.gpr();
+ GPRReg valueGPR = value.gpr();
+ GPRReg scratchGPR = scratch.gpr();
+
+ base.use();
+ value.use();
+
+ cachedPutById(baseGPR, valueGPR, node.child2(), scratchGPR, node.identifierNumber(), NotDirect);
+
+ noResult(m_compileIndex, UseChildrenCalledExplicitly);
+ break;
+ }
+
+ case PutByIdDirect: {
+ SpeculateCellOperand base(this, node.child1());
+ JSValueOperand value(this, node.child2());
+ GPRTemporary scratch(this);
+
+ GPRReg baseGPR = base.gpr();
+ GPRReg valueGPR = value.gpr();
+ GPRReg scratchGPR = scratch.gpr();
+
+ base.use();
+ value.use();
+
+ cachedPutById(baseGPR, valueGPR, node.child2(), scratchGPR, node.identifierNumber(), Direct);
+
+ noResult(m_compileIndex, UseChildrenCalledExplicitly);
+ break;
+ }
+
+ case GetGlobalVar: {
+ GPRTemporary result(this);
+
+ JSVariableObject* globalObject = m_jit.globalObjectFor(node.codeOrigin);
+ m_jit.loadPtr(globalObject->addressOfRegisters(), result.gpr());
+ m_jit.loadPtr(JITCompiler::addressForGlobalVar(result.gpr(), node.varNumber()), result.gpr());
+
+ jsValueResult(result.gpr(), m_compileIndex);
+ break;
+ }
+
+ case PutGlobalVar: {
+ JSValueOperand value(this, node.child1());
+ GPRTemporary globalObject(this);
+ GPRTemporary scratch(this);
+
+ GPRReg globalObjectReg = globalObject.gpr();
+ GPRReg scratchReg = scratch.gpr();
+
+ m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.globalObjectFor(node.codeOrigin)), globalObjectReg);
+
+ writeBarrier(m_jit.globalObjectFor(node.codeOrigin), value.gpr(), node.child1(), WriteBarrierForVariableAccess, scratchReg);
+
+ m_jit.loadPtr(MacroAssembler::Address(globalObjectReg, JSVariableObject::offsetOfRegisters()), scratchReg);
+ m_jit.storePtr(value.gpr(), JITCompiler::addressForGlobalVar(scratchReg, node.varNumber()));
+
+ noResult(m_compileIndex);
+ break;
+ }
+
+ case CheckHasInstance: {
+ SpeculateCellOperand base(this, node.child1());
+ GPRTemporary structure(this);
+
+ // Speculate that base 'ImplementsDefaultHasInstance'.
+ m_jit.loadPtr(MacroAssembler::Address(base.gpr(), JSCell::structureOffset()), structure.gpr());
+ speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchTest8(MacroAssembler::Zero, MacroAssembler::Address(structure.gpr(), Structure::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(ImplementsDefaultHasInstance)));
+
+ noResult(m_compileIndex);
+ break;
+ }
+
+ case InstanceOf: {
+ compileInstanceOf(node);
+ break;
+ }
+
+ case Phi:
+ case Flush:
+ ASSERT_NOT_REACHED();
+
+ case Breakpoint:
+#if ENABLE(DEBUG_WITH_BREAKPOINT)
+ m_jit.breakpoint();
+#else
+ ASSERT_NOT_REACHED();
+#endif
+ break;
+
+ case Call:
+ case Construct:
+ emitCall(node);
+ break;
+
+ case Resolve: {
+ flushRegisters();
+ GPRResult result(this);
+ callOperation(operationResolve, result.gpr(), identifier(node.identifierNumber()));
+ jsValueResult(result.gpr(), m_compileIndex);
+ break;
+ }
+
+ case ResolveBase: {
+ flushRegisters();
+ GPRResult result(this);
+ callOperation(operationResolveBase, result.gpr(), identifier(node.identifierNumber()));
+ jsValueResult(result.gpr(), m_compileIndex);
+ break;
+ }
+
+ case ResolveBaseStrictPut: {
+ flushRegisters();
+ GPRResult result(this);
+ callOperation(operationResolveBaseStrictPut, result.gpr(), identifier(node.identifierNumber()));
+ jsValueResult(result.gpr(), m_compileIndex);
+ break;
+ }
+
+ case ResolveGlobal: {
+ GPRTemporary globalObject(this);
+ GPRTemporary resolveInfo(this);
+ GPRTemporary result(this);
+
+ GPRReg globalObjectGPR = globalObject.gpr();
+ GPRReg resolveInfoGPR = resolveInfo.gpr();
+ GPRReg resultGPR = result.gpr();
+
+ ResolveGlobalData& data = m_jit.graph().m_resolveGlobalData[node.resolveGlobalDataIndex()];
+ GlobalResolveInfo* resolveInfoAddress = &(m_jit.codeBlock()->globalResolveInfo(data.resolveInfoIndex));
+
+ // Check Structure of global object
+ m_jit.move(JITCompiler::TrustedImmPtr(m_jit.globalObjectFor(node.codeOrigin)), globalObjectGPR);
+ m_jit.move(JITCompiler::TrustedImmPtr(resolveInfoAddress), resolveInfoGPR);
+ m_jit.loadPtr(JITCompiler::Address(resolveInfoGPR, OBJECT_OFFSETOF(GlobalResolveInfo, structure)), resultGPR);
+ JITCompiler::Jump structuresMatch = m_jit.branchPtr(JITCompiler::Equal, resultGPR, JITCompiler::Address(globalObjectGPR, JSCell::structureOffset()));
+
+ silentSpillAllRegisters(resultGPR);
+ callOperation(operationResolveGlobal, resultGPR, resolveInfoGPR, &m_jit.codeBlock()->identifier(data.identifierNumber));
+ silentFillAllRegisters(resultGPR);
+
+ JITCompiler::Jump wasSlow = m_jit.jump();
+
+ // Fast case
+ structuresMatch.link(&m_jit);
+ m_jit.loadPtr(JITCompiler::Address(globalObjectGPR, JSObject::offsetOfPropertyStorage()), resultGPR);
+ m_jit.load32(JITCompiler::Address(resolveInfoGPR, OBJECT_OFFSETOF(GlobalResolveInfo, offset)), resolveInfoGPR);
+ m_jit.loadPtr(JITCompiler::BaseIndex(resultGPR, resolveInfoGPR, JITCompiler::ScalePtr), resultGPR);
+
+ wasSlow.link(&m_jit);
+
+ jsValueResult(resultGPR, m_compileIndex);
+ break;
+ }
+
+ case ForceOSRExit: {
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ break;
+ }
+
+ case Phantom:
+ // This is a no-op.
+ noResult(m_compileIndex);
+ break;
+
+ case InlineStart:
+ case Nop:
+ ASSERT_NOT_REACHED();
+ break;
+ }
+
+ if (!m_compileOkay)
+ return;
+
+ if (node.hasResult() && node.mustGenerate())
+ use(m_compileIndex);
+}
+
+#endif
+
+} } // namespace JSC::DFG
+
+#endif
diff --git a/Source/JavaScriptCore/dfg/DFGStructureSet.h b/Source/JavaScriptCore/dfg/DFGStructureSet.h
new file mode 100644
index 000000000..181c32910
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGStructureSet.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGStructureSet_h
+#define DFGStructureSet_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(DFG_JIT)
+
+#include "PredictedType.h"
+#include <stdio.h>
+#include <wtf/Vector.h>
+
+namespace JSC {
+
+class Structure;
+
+namespace DFG {
+
+class StructureAbstractValue;
+
+class StructureSet {
+public:
+ StructureSet() { }
+
+ StructureSet(Structure* structure)
+ {
+ m_structures.append(structure);
+ }
+
+ void clear()
+ {
+ m_structures.clear();
+ }
+
+ void add(Structure* structure)
+ {
+ ASSERT(!contains(structure));
+ m_structures.append(structure);
+ }
+
+ bool addAll(const StructureSet& other)
+ {
+ bool changed = false;
+ for (size_t i = 0; i < other.size(); ++i) {
+ if (contains(other[i]))
+ continue;
+ add(other[i]);
+ changed = true;
+ }
+ return changed;
+ }
+
+ void remove(Structure* structure)
+ {
+ for (size_t i = 0; i < m_structures.size(); ++i) {
+ if (m_structures[i] != structure)
+ continue;
+
+ m_structures[i] = m_structures.last();
+ m_structures.removeLast();
+ return;
+ }
+ }
+
+ bool contains(Structure* structure) const
+ {
+ for (size_t i = 0; i < m_structures.size(); ++i) {
+ if (m_structures[i] == structure)
+ return true;
+ }
+ return false;
+ }
+
+ bool isSubsetOf(const StructureSet& other) const
+ {
+ for (size_t i = 0; i < m_structures.size(); ++i) {
+ if (!other.contains(m_structures[i]))
+ return false;
+ }
+ return true;
+ }
+
+ bool isSupersetOf(const StructureSet& other) const
+ {
+ return other.isSubsetOf(*this);
+ }
+
+ size_t size() const { return m_structures.size(); }
+
+ Structure* at(size_t i) const { return m_structures.at(i); }
+
+ Structure* operator[](size_t i) const { return at(i); }
+
+ Structure* last() const { return m_structures.last(); }
+
+ PredictedType predictionFromStructures() const
+ {
+ PredictedType result = PredictNone;
+
+ for (size_t i = 0; i < m_structures.size(); ++i)
+ mergePrediction(result, predictionFromStructure(m_structures[i]));
+
+ return result;
+ }
+
+ bool operator==(const StructureSet& other) const
+ {
+ if (m_structures.size() != other.m_structures.size())
+ return false;
+
+ for (size_t i = 0; i < m_structures.size(); ++i) {
+ if (!other.contains(m_structures[i]))
+ return false;
+ }
+
+ return true;
+ }
+
+#ifndef NDEBUG
+ void dump(FILE* out)
+ {
+ fprintf(out, "[");
+ for (size_t i = 0; i < m_structures.size(); ++i) {
+ if (i)
+ fprintf(out, ", ");
+ fprintf(out, "%p", m_structures[i]);
+ }
+ fprintf(out, "]");
+ }
+#endif
+
+private:
+ friend class StructureAbstractValue;
+
+ Vector<Structure*, 2> m_structures;
+};
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
+
+#endif // DFGStructureSet_h
diff --git a/Source/JavaScriptCore/dfg/DFGThunks.cpp b/Source/JavaScriptCore/dfg/DFGThunks.cpp
new file mode 100644
index 000000000..fddb656cc
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGThunks.cpp
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "DFGThunks.h"
+
+#if ENABLE(DFG_JIT)
+
+#include "DFGFPRInfo.h"
+#include "DFGGPRInfo.h"
+#include "DFGOSRExitCompiler.h"
+#include "MacroAssembler.h"
+
+namespace JSC { namespace DFG {
+
+MacroAssemblerCodeRef osrExitGenerationThunkGenerator(JSGlobalData* globalData)
+{
+ MacroAssembler jit;
+
+ EncodedJSValue* buffer = static_cast<EncodedJSValue*>(globalData->scratchBufferForSize(sizeof(EncodedJSValue) * (GPRInfo::numberOfRegisters + FPRInfo::numberOfRegisters)));
+
+ for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i)
+ jit.storePtr(GPRInfo::toRegister(i), buffer + i);
+ for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
+ jit.move(MacroAssembler::TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0);
+ jit.storeDouble(FPRInfo::toRegister(i), GPRInfo::regT0);
+ }
+
+ // Set up one argument.
+#if CPU(X86)
+ jit.poke(GPRInfo::callFrameRegister, 0);
+#else
+ jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+#endif
+
+ MacroAssembler::Call functionCall = jit.call();
+
+ for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
+ jit.move(MacroAssembler::TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0);
+ jit.loadDouble(GPRInfo::regT0, FPRInfo::toRegister(i));
+ }
+ for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i)
+ jit.loadPtr(buffer + i, GPRInfo::toRegister(i));
+
+ jit.jump(MacroAssembler::AbsoluteAddress(&globalData->osrExitJumpDestination));
+
+ LinkBuffer patchBuffer(*globalData, &jit);
+
+ patchBuffer.link(functionCall, compileOSRExit);
+
+ return patchBuffer.finalizeCode();
+}
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
diff --git a/Source/JavaScriptCore/dfg/DFGThunks.h b/Source/JavaScriptCore/dfg/DFGThunks.h
new file mode 100644
index 000000000..3db62442a
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGThunks.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGThunks_h
+#define DFGThunks_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(DFG_JIT)
+
+#include "MacroAssemblerCodeRef.h"
+
+namespace JSC {
+
+class JSGlobalData;
+
+namespace DFG {
+
+MacroAssemblerCodeRef osrExitGenerationThunkGenerator(JSGlobalData*);
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
+
+#endif // DFGThunks_h
diff --git a/Source/JavaScriptCore/dfg/DFGVariableAccessData.h b/Source/JavaScriptCore/dfg/DFGVariableAccessData.h
new file mode 100644
index 000000000..3cc53748a
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGVariableAccessData.h
@@ -0,0 +1,143 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGVariableAccessData_h
+#define DFGVariableAccessData_h
+
+#include "DFGOperands.h"
+#include "PredictedType.h"
+#include "VirtualRegister.h"
+#include <wtf/Platform.h>
+#include <wtf/UnionFind.h>
+
+namespace JSC { namespace DFG {
+
+class VariableAccessData : public UnionFind<VariableAccessData> {
+public:
+ enum Ballot { VoteValue, VoteDouble };
+
+ VariableAccessData()
+ : m_local(static_cast<VirtualRegister>(std::numeric_limits<int>::min()))
+ , m_prediction(PredictNone)
+ , m_shouldUseDoubleFormat(false)
+ {
+ clearVotes();
+ }
+
+ VariableAccessData(VirtualRegister local)
+ : m_local(local)
+ , m_prediction(PredictNone)
+ , m_shouldUseDoubleFormat(false)
+ {
+ clearVotes();
+ }
+
+ VirtualRegister local()
+ {
+ ASSERT(m_local == find()->m_local);
+ return m_local;
+ }
+
+ int operand()
+ {
+ return static_cast<int>(local());
+ }
+
+ bool predict(PredictedType prediction)
+ {
+ return mergePrediction(find()->m_prediction, prediction);
+ }
+
+ PredictedType prediction()
+ {
+ return find()->m_prediction;
+ }
+
+ void clearVotes()
+ {
+ ASSERT(find() == this);
+ m_votes[VoteValue] = 0;
+ m_votes[VoteDouble] = 0;
+ }
+
+ void vote(Ballot ballot)
+ {
+ ASSERT(static_cast<unsigned>(ballot) < 2);
+ m_votes[ballot]++;
+ }
+
+ double doubleVoteRatio()
+ {
+ ASSERT(find() == this);
+ return static_cast<double>(m_votes[VoteDouble]) / m_votes[VoteValue];
+ }
+
+ bool shouldUseDoubleFormatAccordingToVote()
+ {
+ // FIXME: make this work for arguments.
+ return !operandIsArgument(operand()) && ((isNumberPrediction(prediction()) && doubleVoteRatio() >= Options::doubleVoteRatioForDoubleFormat) || isDoublePrediction(prediction()));
+ }
+
+ bool shouldUseDoubleFormat()
+ {
+ ASSERT(find() == this);
+ return m_shouldUseDoubleFormat;
+ }
+
+ bool tallyVotesForShouldUseDoubleFormat()
+ {
+ ASSERT(find() == this);
+
+ bool newValueOfShouldUseDoubleFormat = shouldUseDoubleFormatAccordingToVote();
+ if (!newValueOfShouldUseDoubleFormat) {
+ // We monotonically convert to double. Hence, if the fixpoint leads us to conclude that we should
+ // switch back to int, we instead ignore this and stick with double.
+ return false;
+ }
+
+ if (m_shouldUseDoubleFormat)
+ return false;
+
+ m_shouldUseDoubleFormat = true;
+ mergePrediction(m_prediction, PredictDouble);
+ return true;
+ }
+
+private:
+ // This is slightly space-inefficient, since anything we're unified with
+ // will have the same operand and should have the same prediction. But
+ // putting them here simplifies the code, and we don't expect DFG space
+ // usage for variable access nodes do be significant.
+
+ VirtualRegister m_local;
+ PredictedType m_prediction;
+
+ float m_votes[2];
+ bool m_shouldUseDoubleFormat;
+};
+
+} } // namespace JSC::DFG
+
+#endif // DFGVariableAccessData_h