summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/dfg
diff options
context:
space:
mode:
Diffstat (limited to 'Source/JavaScriptCore/dfg')
-rw-r--r--Source/JavaScriptCore/dfg/DFGAbstractState.cpp123
-rw-r--r--Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp5
-rw-r--r--Source/JavaScriptCore/dfg/DFGCCallHelpers.h52
-rw-r--r--Source/JavaScriptCore/dfg/DFGCSEPhase.cpp23
-rw-r--r--Source/JavaScriptCore/dfg/DFGFixupPhase.cpp17
-rw-r--r--Source/JavaScriptCore/dfg/DFGGPRInfo.h6
-rw-r--r--Source/JavaScriptCore/dfg/DFGGraph.h39
-rw-r--r--Source/JavaScriptCore/dfg/DFGJITCompiler.cpp2
-rw-r--r--Source/JavaScriptCore/dfg/DFGJITCompiler.h9
-rw-r--r--Source/JavaScriptCore/dfg/DFGNodeType.h7
-rw-r--r--Source/JavaScriptCore/dfg/DFGOperations.cpp9
-rw-r--r--Source/JavaScriptCore/dfg/DFGOperations.h1
-rw-r--r--Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp6
-rw-r--r--Source/JavaScriptCore/dfg/DFGRegisterBank.h5
-rw-r--r--Source/JavaScriptCore/dfg/DFGRegisterSet.h217
-rw-r--r--Source/JavaScriptCore/dfg/DFGRepatch.cpp218
-rw-r--r--Source/JavaScriptCore/dfg/DFGScratchRegisterAllocator.h192
-rw-r--r--Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp8
-rw-r--r--Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h20
-rw-r--r--Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp143
-rw-r--r--Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp152
21 files changed, 956 insertions, 298 deletions
diff --git a/Source/JavaScriptCore/dfg/DFGAbstractState.cpp b/Source/JavaScriptCore/dfg/DFGAbstractState.cpp
index 95f44c092..e4561da06 100644
--- a/Source/JavaScriptCore/dfg/DFGAbstractState.cpp
+++ b/Source/JavaScriptCore/dfg/DFGAbstractState.cpp
@@ -946,13 +946,18 @@ bool AbstractState::execute(unsigned indexInBlock)
case PutByVal:
case PutByValAlias: {
node.setCanExit(true);
- if (!m_graph[node.child1()].prediction() || !m_graph[node.child2()].prediction()) {
+
+ Edge child1 = m_graph.varArgChild(node, 0);
+ Edge child2 = m_graph.varArgChild(node, 1);
+ Edge child3 = m_graph.varArgChild(node, 2);
+
+ if (!m_graph[child1].prediction() || !m_graph[child2].prediction()) {
m_isValid = false;
break;
}
- if (!m_graph[node.child2()].shouldSpeculateInteger() || !isActionableMutableArraySpeculation(m_graph[node.child1()].prediction())
+ if (!m_graph[child2].shouldSpeculateInteger() || !isActionableMutableArraySpeculation(m_graph[child1].prediction())
#if USE(JSVALUE32_64)
- || m_graph[node.child1()].shouldSpeculateArguments()
+ || m_graph[child1].shouldSpeculateArguments()
#endif
) {
ASSERT(node.op() == PutByVal);
@@ -961,89 +966,89 @@ bool AbstractState::execute(unsigned indexInBlock)
break;
}
- if (m_graph[node.child1()].shouldSpeculateArguments()) {
- forNode(node.child1()).filter(SpecArguments);
- forNode(node.child2()).filter(SpecInt32);
+ if (m_graph[child1].shouldSpeculateArguments()) {
+ forNode(child1).filter(SpecArguments);
+ forNode(child2).filter(SpecInt32);
break;
}
- if (m_graph[node.child1()].shouldSpeculateInt8Array()) {
- forNode(node.child1()).filter(SpecInt8Array);
- forNode(node.child2()).filter(SpecInt32);
- if (m_graph[node.child3()].shouldSpeculateInteger())
- forNode(node.child3()).filter(SpecInt32);
+ if (m_graph[child1].shouldSpeculateInt8Array()) {
+ forNode(child1).filter(SpecInt8Array);
+ forNode(child2).filter(SpecInt32);
+ if (m_graph[child3].shouldSpeculateInteger())
+ forNode(child3).filter(SpecInt32);
else
- forNode(node.child3()).filter(SpecNumber);
+ forNode(child3).filter(SpecNumber);
break;
}
- if (m_graph[node.child1()].shouldSpeculateInt16Array()) {
- forNode(node.child1()).filter(SpecInt16Array);
- forNode(node.child2()).filter(SpecInt32);
- if (m_graph[node.child3()].shouldSpeculateInteger())
- forNode(node.child3()).filter(SpecInt32);
+ if (m_graph[child1].shouldSpeculateInt16Array()) {
+ forNode(child1).filter(SpecInt16Array);
+ forNode(child2).filter(SpecInt32);
+ if (m_graph[child3].shouldSpeculateInteger())
+ forNode(child3).filter(SpecInt32);
else
- forNode(node.child3()).filter(SpecNumber);
+ forNode(child3).filter(SpecNumber);
break;
}
- if (m_graph[node.child1()].shouldSpeculateInt32Array()) {
- forNode(node.child1()).filter(SpecInt32Array);
- forNode(node.child2()).filter(SpecInt32);
- if (m_graph[node.child3()].shouldSpeculateInteger())
- forNode(node.child3()).filter(SpecInt32);
+ if (m_graph[child1].shouldSpeculateInt32Array()) {
+ forNode(child1).filter(SpecInt32Array);
+ forNode(child2).filter(SpecInt32);
+ if (m_graph[child3].shouldSpeculateInteger())
+ forNode(child3).filter(SpecInt32);
else
- forNode(node.child3()).filter(SpecNumber);
+ forNode(child3).filter(SpecNumber);
break;
}
- if (m_graph[node.child1()].shouldSpeculateUint8Array()) {
- forNode(node.child1()).filter(SpecUint8Array);
- forNode(node.child2()).filter(SpecInt32);
- if (m_graph[node.child3()].shouldSpeculateInteger())
- forNode(node.child3()).filter(SpecInt32);
+ if (m_graph[child1].shouldSpeculateUint8Array()) {
+ forNode(child1).filter(SpecUint8Array);
+ forNode(child2).filter(SpecInt32);
+ if (m_graph[child3].shouldSpeculateInteger())
+ forNode(child3).filter(SpecInt32);
else
- forNode(node.child3()).filter(SpecNumber);
+ forNode(child3).filter(SpecNumber);
break;
}
- if (m_graph[node.child1()].shouldSpeculateUint8ClampedArray()) {
- forNode(node.child1()).filter(SpecUint8ClampedArray);
- forNode(node.child2()).filter(SpecInt32);
- if (m_graph[node.child3()].shouldSpeculateInteger())
- forNode(node.child3()).filter(SpecInt32);
+ if (m_graph[child1].shouldSpeculateUint8ClampedArray()) {
+ forNode(child1).filter(SpecUint8ClampedArray);
+ forNode(child2).filter(SpecInt32);
+ if (m_graph[child3].shouldSpeculateInteger())
+ forNode(child3).filter(SpecInt32);
else
- forNode(node.child3()).filter(SpecNumber);
+ forNode(child3).filter(SpecNumber);
break;
}
- if (m_graph[node.child1()].shouldSpeculateUint16Array()) {
- forNode(node.child1()).filter(SpecUint16Array);
- forNode(node.child2()).filter(SpecInt32);
- if (m_graph[node.child3()].shouldSpeculateInteger())
- forNode(node.child3()).filter(SpecInt32);
+ if (m_graph[child1].shouldSpeculateUint16Array()) {
+ forNode(child1).filter(SpecUint16Array);
+ forNode(child2).filter(SpecInt32);
+ if (m_graph[child3].shouldSpeculateInteger())
+ forNode(child3).filter(SpecInt32);
else
- forNode(node.child3()).filter(SpecNumber);
+ forNode(child3).filter(SpecNumber);
break;
}
- if (m_graph[node.child1()].shouldSpeculateUint32Array()) {
- forNode(node.child1()).filter(SpecUint32Array);
- forNode(node.child2()).filter(SpecInt32);
- if (m_graph[node.child3()].shouldSpeculateInteger())
- forNode(node.child3()).filter(SpecInt32);
+ if (m_graph[child1].shouldSpeculateUint32Array()) {
+ forNode(child1).filter(SpecUint32Array);
+ forNode(child2).filter(SpecInt32);
+ if (m_graph[child3].shouldSpeculateInteger())
+ forNode(child3).filter(SpecInt32);
else
- forNode(node.child3()).filter(SpecNumber);
+ forNode(child3).filter(SpecNumber);
break;
}
- if (m_graph[node.child1()].shouldSpeculateFloat32Array()) {
- forNode(node.child1()).filter(SpecFloat32Array);
- forNode(node.child2()).filter(SpecInt32);
- forNode(node.child3()).filter(SpecNumber);
+ if (m_graph[child1].shouldSpeculateFloat32Array()) {
+ forNode(child1).filter(SpecFloat32Array);
+ forNode(child2).filter(SpecInt32);
+ forNode(child3).filter(SpecNumber);
break;
}
- if (m_graph[node.child1()].shouldSpeculateFloat64Array()) {
- forNode(node.child1()).filter(SpecFloat64Array);
- forNode(node.child2()).filter(SpecInt32);
- forNode(node.child3()).filter(SpecNumber);
+ if (m_graph[child1].shouldSpeculateFloat64Array()) {
+ forNode(child1).filter(SpecFloat64Array);
+ forNode(child2).filter(SpecInt32);
+ forNode(child3).filter(SpecNumber);
break;
}
- ASSERT(m_graph[node.child1()].shouldSpeculateArray());
- forNode(node.child1()).filter(SpecArray);
- forNode(node.child2()).filter(SpecInt32);
+ ASSERT(m_graph[child1].shouldSpeculateArray());
+ forNode(child1).filter(SpecArray);
+ forNode(child2).filter(SpecInt32);
if (node.op() == PutByVal)
clobberWorld(node.codeOrigin, indexInBlock);
break;
diff --git a/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp b/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
index 91b882399..1b1395934 100644
--- a/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
+++ b/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
@@ -2152,7 +2152,10 @@ bool ByteCodeParser::parseBlock(unsigned limit)
NodeIndex property = get(currentInstruction[2].u.operand);
NodeIndex value = get(currentInstruction[3].u.operand);
- addToGraph(PutByVal, base, property, value);
+ addVarArgChild(base);
+ addVarArgChild(property);
+ addVarArgChild(value);
+ addToGraph(Node::VarArg, PutByVal, OpInfo(0), OpInfo(0));
NEXT_OPCODE(op_put_by_val);
}
diff --git a/Source/JavaScriptCore/dfg/DFGCCallHelpers.h b/Source/JavaScriptCore/dfg/DFGCCallHelpers.h
index 5985b251e..fd4e1cae0 100644
--- a/Source/JavaScriptCore/dfg/DFGCCallHelpers.h
+++ b/Source/JavaScriptCore/dfg/DFGCCallHelpers.h
@@ -229,6 +229,27 @@ public:
addCallArgument(arg3);
}
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, TrustedImm32 arg3, GPRReg arg4)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ addCallArgument(arg5);
+ }
+
ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4)
{
resetCallArguments();
@@ -570,6 +591,14 @@ public:
move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
}
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, TrustedImm32 arg3)
+ {
+ move(arg1, GPRInfo::argumentGPR1);
+ move(arg2, GPRInfo::argumentGPR2);
+ move(arg3, GPRInfo::argumentGPR3);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, TrustedImmPtr arg3)
{
move(arg1, GPRInfo::argumentGPR1);
@@ -642,6 +671,19 @@ public:
setupArgumentsWithExecState(arg1, arg2, arg3);
}
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, TrustedImm32 arg3, GPRReg arg4)
+ {
+ poke(arg4);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5)
+ {
+ poke(arg5, 1);
+ poke(arg4);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImm32 arg4)
{
poke(arg4);
@@ -722,6 +764,16 @@ public:
#endif // NUMBER_OF_ARGUMENT_REGISTERS == 4
+#if NUMBER_OF_ARGUMENT_REGISTERS >= 5
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, TrustedImm32 arg3, GPRReg arg4)
+ {
+ setupTwoStubArgs<GPRInfo::argumentGPR1, GPRInfo::argumentGPR4>(arg1, arg4);
+ move(arg2, GPRInfo::argumentGPR2);
+ move(arg3, GPRInfo::argumentGPR3);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+#endif
+
void setupResults(GPRReg destA, GPRReg destB)
{
GPRReg srcA = GPRInfo::returnValueGPR;
diff --git a/Source/JavaScriptCore/dfg/DFGCSEPhase.cpp b/Source/JavaScriptCore/dfg/DFGCSEPhase.cpp
index 108cf1965..04c3ebc66 100644
--- a/Source/JavaScriptCore/dfg/DFGCSEPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGCSEPhase.cpp
@@ -284,15 +284,16 @@ private:
return index;
break;
case PutByVal:
- case PutByValAlias:
+ case PutByValAlias: {
if (!m_graph.byValIsPure(node))
return NoNode;
- if (node.child1() == child1 && canonicalize(node.child2()) == canonicalize(child2))
- return node.child3().index();
+ if (m_graph.varArgChild(node, 0) == child1 && canonicalize(m_graph.varArgChild(node, 1)) == canonicalize(child2))
+ return m_graph.varArgChild(node, 2).index();
// We must assume that the PutByVal will clobber the location we're getting from.
// FIXME: We can do better; if we know that the PutByVal is accessing an array of a
// different type than the GetByVal, then we know that they won't clobber each other.
return NoNode;
+ }
case PutStructure:
case PutByOffset:
// GetByVal currently always speculates that it's accessing an
@@ -634,7 +635,7 @@ private:
break;
case PutByVal:
- if (isFixedIndexedStorageObjectSpeculation(m_graph[node.child1()].prediction()) && m_graph.byValIsPure(node))
+ if (isFixedIndexedStorageObjectSpeculation(m_graph[m_graph.varArgChild(node, 0)].prediction()) && m_graph.byValIsPure(node))
break;
return NoNode;
@@ -1079,17 +1080,19 @@ private:
setReplacement(getByValLoadElimination(node.child1().index(), node.child2().index()));
break;
- case PutByVal:
- if (isActionableMutableArraySpeculation(m_graph[node.child1()].prediction())
- && m_graph[node.child2()].shouldSpeculateInteger()
- && !m_graph[node.child1()].shouldSpeculateArguments()) {
- NodeIndex nodeIndex = getByValLoadElimination(
- node.child1().index(), node.child2().index());
+ case PutByVal: {
+ Edge child1 = m_graph.varArgChild(node, 0);
+ Edge child2 = m_graph.varArgChild(node, 1);
+ if (isActionableMutableArraySpeculation(m_graph[child1].prediction())
+ && m_graph[child2].shouldSpeculateInteger()
+ && !m_graph[child1].shouldSpeculateArguments()) {
+ NodeIndex nodeIndex = getByValLoadElimination(child1.index(), child2.index());
if (nodeIndex == NoNode)
break;
node.setOp(PutByValAlias);
}
break;
+ }
case CheckStructure:
if (checkStructureLoadElimination(node.structureSet(), node.child1().index()))
diff --git a/Source/JavaScriptCore/dfg/DFGFixupPhase.cpp b/Source/JavaScriptCore/dfg/DFGFixupPhase.cpp
index 2e7389f21..a1954d7e0 100644
--- a/Source/JavaScriptCore/dfg/DFGFixupPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGFixupPhase.cpp
@@ -315,19 +315,22 @@ private:
}
case PutByVal: {
- if (!m_graph[node.child1()].prediction() || !m_graph[node.child2()].prediction())
+ Edge child1 = m_graph.varArgChild(node, 0);
+ Edge child2 = m_graph.varArgChild(node, 1);
+ Edge child3 = m_graph.varArgChild(node, 2);
+ if (!m_graph[child1].prediction() || !m_graph[child2].prediction())
break;
- if (!m_graph[node.child2()].shouldSpeculateInteger())
+ if (!m_graph[child2].shouldSpeculateInteger())
break;
- if (isActionableIntMutableArraySpeculation(m_graph[node.child1()].prediction())) {
- if (m_graph[node.child3()].isConstant())
+ if (isActionableIntMutableArraySpeculation(m_graph[child1].prediction())) {
+ if (m_graph[child3].isConstant())
break;
- if (m_graph[node.child3()].shouldSpeculateInteger())
+ if (m_graph[child3].shouldSpeculateInteger())
break;
fixDoubleEdge(2);
break;
}
- if (isActionableFloatMutableArraySpeculation(m_graph[node.child1()].prediction())) {
+ if (isActionableFloatMutableArraySpeculation(m_graph[child1].prediction())) {
fixDoubleEdge(2);
break;
}
@@ -368,7 +371,7 @@ private:
void fixDoubleEdge(unsigned childIndex)
{
Node& source = m_graph[m_compileIndex];
- Edge& edge = source.children.child(childIndex);
+ Edge& edge = m_graph.child(source, childIndex);
if (!m_graph[edge].shouldSpeculateInteger()) {
edge.setUseKind(DoubleUse);
diff --git a/Source/JavaScriptCore/dfg/DFGGPRInfo.h b/Source/JavaScriptCore/dfg/DFGGPRInfo.h
index 23f1697a6..498b116ec 100644
--- a/Source/JavaScriptCore/dfg/DFGGPRInfo.h
+++ b/Source/JavaScriptCore/dfg/DFGGPRInfo.h
@@ -26,10 +26,12 @@
#ifndef DFGGPRInfo_h
#define DFGGPRInfo_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
-#include <assembler/MacroAssembler.h>
-#include <dfg/DFGRegisterBank.h>
+#include "DFGRegisterBank.h"
+#include "MacroAssembler.h"
namespace JSC { namespace DFG {
diff --git a/Source/JavaScriptCore/dfg/DFGGraph.h b/Source/JavaScriptCore/dfg/DFGGraph.h
index a9080d117..4091c48f7 100644
--- a/Source/JavaScriptCore/dfg/DFGGraph.h
+++ b/Source/JavaScriptCore/dfg/DFGGraph.h
@@ -463,26 +463,35 @@ public:
bool byValIsPure(Node& node)
{
- if (!at(node.child2()).shouldSpeculateInteger())
- return false;
- SpeculatedType prediction = at(node.child1()).prediction();
switch (node.op()) {
- case PutByVal:
+ case PutByVal: {
+ if (!at(varArgChild(node, 1)).shouldSpeculateInteger())
+ return false;
+ SpeculatedType prediction = at(varArgChild(node, 0)).prediction();
if (!isActionableMutableArraySpeculation(prediction))
return false;
if (isArraySpeculation(prediction))
return false;
return true;
+ }
- case PutByValAlias:
+ case PutByValAlias: {
+ if (!at(varArgChild(node, 1)).shouldSpeculateInteger())
+ return false;
+ SpeculatedType prediction = at(varArgChild(node, 0)).prediction();
if (!isActionableMutableArraySpeculation(prediction))
return false;
return true;
+ }
- case GetByVal:
+ case GetByVal: {
+ if (!at(node.child2()).shouldSpeculateInteger())
+ return false;
+ SpeculatedType prediction = at(node.child1()).prediction();
if (!isActionableArraySpeculation(prediction))
return false;
return true;
+ }
default:
ASSERT_NOT_REACHED();
@@ -524,17 +533,29 @@ public:
void resetExitStates();
+ unsigned varArgNumChildren(Node& node)
+ {
+ ASSERT(node.flags() & NodeHasVarArgs);
+ return node.numChildren();
+ }
+
unsigned numChildren(Node& node)
{
if (node.flags() & NodeHasVarArgs)
- return node.numChildren();
+ return varArgNumChildren(node);
return AdjacencyList::Size;
}
- Edge child(Node& node, unsigned index)
+ Edge& varArgChild(Node& node, unsigned index)
+ {
+ ASSERT(node.flags() & NodeHasVarArgs);
+ return m_varArgChildren[node.firstChild() + index];
+ }
+
+ Edge& child(Node& node, unsigned index)
{
if (node.flags() & NodeHasVarArgs)
- return m_varArgChildren[node.firstChild() + index];
+ return varArgChild(node, index);
return node.children.child(index);
}
diff --git a/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp b/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp
index 2ebee13c1..5a9f972b8 100644
--- a/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp
+++ b/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp
@@ -179,7 +179,7 @@ void JITCompiler::link(LinkBuffer& linkBuffer)
info.patch.dfg.valueTagGPR = m_propertyAccesses[i].m_valueTagGPR;
info.patch.dfg.valueGPR = m_propertyAccesses[i].m_valueGPR;
#endif
- info.patch.dfg.scratchGPR = m_propertyAccesses[i].m_scratchGPR;
+ m_propertyAccesses[i].m_usedRegisters.copyInfo(info.patch.dfg.usedRegisters);
info.patch.dfg.registersFlushed = m_propertyAccesses[i].m_registerMode == PropertyAccessRecord::RegistersFlushed;
}
diff --git a/Source/JavaScriptCore/dfg/DFGJITCompiler.h b/Source/JavaScriptCore/dfg/DFGJITCompiler.h
index d6374b790..7ff399f78 100644
--- a/Source/JavaScriptCore/dfg/DFGJITCompiler.h
+++ b/Source/JavaScriptCore/dfg/DFGJITCompiler.h
@@ -35,6 +35,7 @@
#include "DFGGPRInfo.h"
#include "DFGGraph.h"
#include "DFGRegisterBank.h"
+#include "DFGRegisterSet.h"
#include "JITCode.h"
#include "LinkBuffer.h"
#include "MacroAssembler.h"
@@ -169,7 +170,7 @@ struct PropertyAccessRecord {
MacroAssembler::Label done,
int8_t baseGPR,
int8_t valueGPR,
- int8_t scratchGPR,
+ const RegisterSet& usedRegisters,
RegisterMode registerMode = RegistersInUse)
#elif USE(JSVALUE32_64)
PropertyAccessRecord(
@@ -184,7 +185,7 @@ struct PropertyAccessRecord {
int8_t baseGPR,
int8_t valueTagGPR,
int8_t valueGPR,
- int8_t scratchGPR,
+ const RegisterSet& usedRegisters,
RegisterMode registerMode = RegistersInUse)
#endif
: m_codeOrigin(codeOrigin)
@@ -204,7 +205,7 @@ struct PropertyAccessRecord {
, m_valueTagGPR(valueTagGPR)
#endif
, m_valueGPR(valueGPR)
- , m_scratchGPR(scratchGPR)
+ , m_usedRegisters(usedRegisters)
, m_registerMode(registerMode)
{
}
@@ -226,7 +227,7 @@ struct PropertyAccessRecord {
int8_t m_valueTagGPR;
#endif
int8_t m_valueGPR;
- int8_t m_scratchGPR;
+ RegisterSet m_usedRegisters;
RegisterMode m_registerMode;
};
diff --git a/Source/JavaScriptCore/dfg/DFGNodeType.h b/Source/JavaScriptCore/dfg/DFGNodeType.h
index 8c2f96222..7fcd2ec14 100644
--- a/Source/JavaScriptCore/dfg/DFGNodeType.h
+++ b/Source/JavaScriptCore/dfg/DFGNodeType.h
@@ -109,10 +109,11 @@ namespace JSC { namespace DFG {
/* Property access. */\
/* PutByValAlias indicates a 'put' aliases a prior write to the same property. */\
/* Since a put to 'length' may invalidate optimizations here, */\
- /* this must be the directly subsequent property put. */\
+ /* this must be the directly subsequent property put. Note that PutByVal */\
+ /* opcodes use VarArgs beause they may have up to 4 children. */\
macro(GetByVal, NodeResultJS | NodeMustGenerate | NodeMightClobber) \
- macro(PutByVal, NodeMustGenerate | NodeMightClobber) \
- macro(PutByValAlias, NodeMustGenerate | NodeMightClobber) \
+ macro(PutByVal, NodeMustGenerate | NodeHasVarArgs | NodeMightClobber) \
+ macro(PutByValAlias, NodeMustGenerate | NodeHasVarArgs | NodeMightClobber) \
macro(GetById, NodeResultJS | NodeMustGenerate | NodeClobbersWorld) \
macro(GetByIdFlush, NodeResultJS | NodeMustGenerate | NodeClobbersWorld) \
macro(PutById, NodeMustGenerate | NodeClobbersWorld) \
diff --git a/Source/JavaScriptCore/dfg/DFGOperations.cpp b/Source/JavaScriptCore/dfg/DFGOperations.cpp
index bbe55d351..03c0666b7 100644
--- a/Source/JavaScriptCore/dfg/DFGOperations.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOperations.cpp
@@ -1232,6 +1232,15 @@ size_t DFG_OPERATION operationIsFunction(EncodedJSValue value)
return jsIsFunctionType(JSValue::decode(value));
}
+void DFG_OPERATION operationReallocateStorageAndFinishPut(ExecState* exec, JSObject* base, Structure* structure, PropertyOffset offset, EncodedJSValue value)
+{
+ JSGlobalData& globalData = exec->globalData();
+ ASSERT(structure->outOfLineCapacity() > base->structure()->outOfLineCapacity());
+ ASSERT(!globalData.heap.storageAllocator().fastPathShouldSucceed(structure->outOfLineCapacity() * sizeof(JSValue)));
+ base->setStructureAndReallocateStorageIfNecessary(globalData, structure);
+ base->putDirectOffset(globalData, offset, JSValue::decode(value));
+}
+
double DFG_OPERATION operationFModOnInts(int32_t a, int32_t b)
{
return fmod(a, b);
diff --git a/Source/JavaScriptCore/dfg/DFGOperations.h b/Source/JavaScriptCore/dfg/DFGOperations.h
index 3c85ee761..109dcb2eb 100644
--- a/Source/JavaScriptCore/dfg/DFGOperations.h
+++ b/Source/JavaScriptCore/dfg/DFGOperations.h
@@ -180,6 +180,7 @@ JSCell* DFG_OPERATION operationNewFunctionExpression(ExecState*, JSCell*) WTF_IN
double DFG_OPERATION operationFModOnInts(int32_t, int32_t) WTF_INTERNAL;
size_t DFG_OPERATION operationIsObject(EncodedJSValue) WTF_INTERNAL;
size_t DFG_OPERATION operationIsFunction(EncodedJSValue) WTF_INTERNAL;
+void DFG_OPERATION operationReallocateStorageAndFinishPut(ExecState*, JSObject*, Structure*, PropertyOffset, EncodedJSValue) WTF_INTERNAL;
// This method is used to lookup an exception hander, keyed by faultLocation, which is
// the return location from one of the calls out to one of the helper operations above.
diff --git a/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp b/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp
index 320eb6cb6..d23cd8265 100644
--- a/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp
@@ -635,9 +635,9 @@ private:
}
case PutByVal:
- changed |= m_graph[node.child1()].mergeFlags(NodeUsedAsValue);
- changed |= m_graph[node.child2()].mergeFlags(NodeUsedAsNumber | NodeUsedAsInt);
- changed |= m_graph[node.child3()].mergeFlags(NodeUsedAsValue);
+ changed |= m_graph[m_graph.varArgChild(node, 0)].mergeFlags(NodeUsedAsValue);
+ changed |= m_graph[m_graph.varArgChild(node, 1)].mergeFlags(NodeUsedAsNumber | NodeUsedAsInt);
+ changed |= m_graph[m_graph.varArgChild(node, 2)].mergeFlags(NodeUsedAsValue);
break;
case PutScopedVar:
diff --git a/Source/JavaScriptCore/dfg/DFGRegisterBank.h b/Source/JavaScriptCore/dfg/DFGRegisterBank.h
index 85dc246f2..1d1d6fa52 100644
--- a/Source/JavaScriptCore/dfg/DFGRegisterBank.h
+++ b/Source/JavaScriptCore/dfg/DFGRegisterBank.h
@@ -226,6 +226,11 @@ public:
return nameAtIndex(BankInfo::toIndex(reg));
}
+ bool isInUse(RegID reg) const
+ {
+ return isLocked(reg) || name(reg) != InvalidVirtualRegister;
+ }
+
#ifndef NDEBUG
void dump()
{
diff --git a/Source/JavaScriptCore/dfg/DFGRegisterSet.h b/Source/JavaScriptCore/dfg/DFGRegisterSet.h
new file mode 100644
index 000000000..bb36359f0
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGRegisterSet.h
@@ -0,0 +1,217 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGRegisterSet_h
+#define DFGRegisterSet_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(DFG_JIT)
+
+#include "DFGFPRInfo.h"
+#include "DFGGPRInfo.h"
+#include <wtf/Bitmap.h>
+
+namespace JSC { namespace DFG {
+
+static const unsigned totalNumberOfRegisters =
+ GPRInfo::numberOfRegisters + FPRInfo::numberOfRegisters;
+
+static const unsigned numberOfBytesInRegisterSet =
+ (totalNumberOfRegisters + 7) >> 3;
+
+typedef uint8_t RegisterSetPOD[numberOfBytesInRegisterSet];
+
+class RegisterSet {
+public:
+ RegisterSet()
+ {
+ for (unsigned i = numberOfBytesInRegisterSet; i--;)
+ m_set[i] = 0;
+ }
+
+ RegisterSet(const RegisterSetPOD& other)
+ {
+ for (unsigned i = numberOfBytesInRegisterSet; i--;)
+ m_set[i] = other[i];
+ }
+
+ const RegisterSetPOD& asPOD() const { return m_set; }
+
+ void copyInfo(RegisterSetPOD& other) const
+ {
+ for (unsigned i = numberOfBytesInRegisterSet; i--;)
+ other[i] = m_set[i];
+ }
+
+ void set(GPRReg reg)
+ {
+ setBit(GPRInfo::toIndex(reg));
+ }
+
+ void setGPRByIndex(unsigned index)
+ {
+ ASSERT(index < GPRInfo::numberOfRegisters);
+ setBit(index);
+ }
+
+ void clear(GPRReg reg)
+ {
+ clearBit(GPRInfo::toIndex(reg));
+ }
+
+ bool get(GPRReg reg) const
+ {
+ return getBit(GPRInfo::toIndex(reg));
+ }
+
+ bool getGPRByIndex(unsigned index) const
+ {
+ ASSERT(index < GPRInfo::numberOfRegisters);
+ return getBit(index);
+ }
+
+ // Return the index'th free GPR.
+ GPRReg getFreeGPR(unsigned index = 0) const
+ {
+ for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
+ if (!getGPRByIndex(i) && !index--)
+ return GPRInfo::toRegister(i);
+ }
+ return InvalidGPRReg;
+ }
+
+ void set(FPRReg reg)
+ {
+ setBit(GPRInfo::numberOfRegisters + FPRInfo::toIndex(reg));
+ }
+
+ void setFPRByIndex(unsigned index)
+ {
+ ASSERT(index < FPRInfo::numberOfRegisters);
+ setBit(GPRInfo::numberOfRegisters + index);
+ }
+
+ void clear(FPRReg reg)
+ {
+ clearBit(GPRInfo::numberOfRegisters + FPRInfo::toIndex(reg));
+ }
+
+ bool get(FPRReg reg) const
+ {
+ return getBit(GPRInfo::numberOfRegisters + FPRInfo::toIndex(reg));
+ }
+
+ bool getFPRByIndex(unsigned index) const
+ {
+ ASSERT(index < FPRInfo::numberOfRegisters);
+ return getBit(GPRInfo::numberOfRegisters + index);
+ }
+
+ template<typename BankInfo>
+ void setByIndex(unsigned index)
+ {
+ set(BankInfo::toRegister(index));
+ }
+
+ template<typename BankInfo>
+ bool getByIndex(unsigned index)
+ {
+ return get(BankInfo::toRegister(index));
+ }
+
+ unsigned numberOfSetGPRs() const
+ {
+ unsigned result = 0;
+ for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
+ if (!getBit(i))
+ continue;
+ result++;
+ }
+ return result;
+ }
+
+ unsigned numberOfSetFPRs() const
+ {
+ unsigned result = 0;
+ for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
+ if (!getBit(GPRInfo::numberOfRegisters + i))
+ continue;
+ result++;
+ }
+ return result;
+ }
+
+ unsigned numberOfSetRegisters() const
+ {
+ unsigned result = 0;
+ for (unsigned i = totalNumberOfRegisters; i--;) {
+ if (!getBit(i))
+ continue;
+ result++;
+ }
+ return result;
+ }
+
+private:
+ void setBit(unsigned i)
+ {
+ ASSERT(i < totalNumberOfRegisters);
+ m_set[i >> 3] |= (1 << (i & 7));
+ }
+
+ void clearBit(unsigned i)
+ {
+ ASSERT(i < totalNumberOfRegisters);
+ m_set[i >> 3] &= ~(1 << (i & 7));
+ }
+
+ bool getBit(unsigned i) const
+ {
+ ASSERT(i < totalNumberOfRegisters);
+ return !!(m_set[i >> 3] & (1 << (i & 7)));
+ }
+
+ RegisterSetPOD m_set;
+};
+
+} } // namespace JSC::DFG
+
+#else // ENABLE(DFG_JIT) -> so if DFG is disabled
+
+namespace JSC { namespace DFG {
+
+// Define RegisterSetPOD to something that is a POD, but is otherwise useless,
+// to make it easier to refer to this type in code that may be compiled when
+// the DFG is disabled.
+
+struct RegisterSetPOD { };
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
+
+#endif // DFGRegisterSet_h
+
diff --git a/Source/JavaScriptCore/dfg/DFGRepatch.cpp b/Source/JavaScriptCore/dfg/DFGRepatch.cpp
index e25c6aa27..cfc2cd664 100644
--- a/Source/JavaScriptCore/dfg/DFGRepatch.cpp
+++ b/Source/JavaScriptCore/dfg/DFGRepatch.cpp
@@ -29,6 +29,7 @@
#if ENABLE(DFG_JIT)
#include "DFGCCallHelpers.h"
+#include "DFGScratchRegisterAllocator.h"
#include "DFGSpeculativeJIT.h"
#include "DFGThunks.h"
#include "GCAwareJITStubRoutine.h"
@@ -161,11 +162,15 @@ static void generateProtoChainAccessStub(ExecState* exec, StructureStubInfo& stu
GPRReg resultTagGPR = static_cast<GPRReg>(stubInfo.patch.dfg.valueTagGPR);
#endif
GPRReg resultGPR = static_cast<GPRReg>(stubInfo.patch.dfg.valueGPR);
- GPRReg scratchGPR = static_cast<GPRReg>(stubInfo.patch.dfg.scratchGPR);
+ GPRReg scratchGPR = RegisterSet(stubInfo.patch.dfg.usedRegisters).getFreeGPR();
bool needToRestoreScratch = false;
if (scratchGPR == InvalidGPRReg) {
+#if USE(JSVALUE64)
scratchGPR = SpeculativeJIT::selectScratchGPR(baseGPR, resultGPR);
+#else
+ scratchGPR = SpeculativeJIT::selectScratchGPR(baseGPR, resultGPR, resultTagGPR);
+#endif
stubJit.push(scratchGPR);
needToRestoreScratch = true;
}
@@ -231,13 +236,17 @@ static bool tryCacheGetByID(ExecState* exec, JSValue baseValue, const Identifier
GPRReg resultTagGPR = static_cast<GPRReg>(stubInfo.patch.dfg.valueTagGPR);
#endif
GPRReg resultGPR = static_cast<GPRReg>(stubInfo.patch.dfg.valueGPR);
- GPRReg scratchGPR = static_cast<GPRReg>(stubInfo.patch.dfg.scratchGPR);
+ GPRReg scratchGPR = RegisterSet(stubInfo.patch.dfg.usedRegisters).getFreeGPR();
bool needToRestoreScratch = false;
MacroAssembler stubJit;
if (scratchGPR == InvalidGPRReg) {
+#if USE(JSVALUE64)
scratchGPR = SpeculativeJIT::selectScratchGPR(baseGPR, resultGPR);
+#else
+ scratchGPR = SpeculativeJIT::selectScratchGPR(baseGPR, resultGPR, resultTagGPR);
+#endif
stubJit.push(scratchGPR);
needToRestoreScratch = true;
}
@@ -384,7 +393,7 @@ static bool tryBuildGetByIDList(ExecState* exec, JSValue baseValue, const Identi
GPRReg resultTagGPR = static_cast<GPRReg>(stubInfo.patch.dfg.valueTagGPR);
#endif
GPRReg resultGPR = static_cast<GPRReg>(stubInfo.patch.dfg.valueGPR);
- GPRReg scratchGPR = static_cast<GPRReg>(stubInfo.patch.dfg.scratchGPR);
+ GPRReg scratchGPR = RegisterSet(stubInfo.patch.dfg.usedRegisters).getFreeGPR();
CCallHelpers stubJit(globalData, codeBlock);
@@ -404,6 +413,7 @@ static bool tryBuildGetByIDList(ExecState* exec, JSValue baseValue, const Identi
if (slot.cachedPropertyType() == PropertySlot::Getter
|| slot.cachedPropertyType() == PropertySlot::Custom) {
if (slot.cachedPropertyType() == PropertySlot::Getter) {
+ ASSERT(scratchGPR != InvalidGPRReg);
ASSERT(baseGPR != scratchGPR);
if (isInlineOffset(slot.cachedOffset())) {
#if USE(JSVALUE64)
@@ -629,7 +639,7 @@ static void emitPutReplaceStub(
GPRReg valueTagGPR = static_cast<GPRReg>(stubInfo.patch.dfg.valueTagGPR);
#endif
GPRReg valueGPR = static_cast<GPRReg>(stubInfo.patch.dfg.valueGPR);
- GPRReg scratchGPR = static_cast<GPRReg>(stubInfo.patch.dfg.scratchGPR);
+ GPRReg scratchGPR = RegisterSet(stubInfo.patch.dfg.usedRegisters).getFreeGPR();
bool needToRestoreScratch = false;
#if ENABLE(GGC) || ENABLE(WRITE_BARRIER_PROFILING)
GPRReg scratchGPR2;
@@ -641,7 +651,11 @@ static void emitPutReplaceStub(
MacroAssembler stubJit;
if (scratchGPR == InvalidGPRReg && (writeBarrierNeeded || isOutOfLineOffset(slot.cachedOffset()))) {
+#if USE(JSVALUE64)
scratchGPR = SpeculativeJIT::selectScratchGPR(baseGPR, valueGPR);
+#else
+ scratchGPR = SpeculativeJIT::selectScratchGPR(baseGPR, valueGPR, valueTagGPR);
+#endif
needToRestoreScratch = true;
stubJit.push(scratchGPR);
}
@@ -652,7 +666,11 @@ static void emitPutReplaceStub(
MacroAssembler::TrustedImmPtr(structure));
#if ENABLE(GGC) || ENABLE(WRITE_BARRIER_PROFILING)
+#if USE(JSVALUE64)
scratchGPR2 = SpeculativeJIT::selectScratchGPR(baseGPR, valueGPR, scratchGPR);
+#else
+ scratchGPR2 = SpeculativeJIT::selectScratchGPR(baseGPR, valueGPR, valueTagGPR, scratchGPR);
+#endif
stubJit.push(scratchGPR2);
SpeculativeJIT::writeBarrier(stubJit, baseGPR, scratchGPR, scratchGPR2, WriteBarrierForPropertyAccess);
stubJit.pop(scratchGPR2);
@@ -722,89 +740,203 @@ static void emitPutTransitionStub(
GPRReg valueTagGPR = static_cast<GPRReg>(stubInfo.patch.dfg.valueTagGPR);
#endif
GPRReg valueGPR = static_cast<GPRReg>(stubInfo.patch.dfg.valueGPR);
- GPRReg scratchGPR = static_cast<GPRReg>(stubInfo.patch.dfg.scratchGPR);
- bool needToRestoreScratch = false;
-
- ASSERT(scratchGPR != baseGPR);
+
+ ScratchRegisterAllocator allocator(stubInfo.patch.dfg.usedRegisters);
+ allocator.lock(baseGPR);
+#if USE(JSVALUE32_64)
+ allocator.lock(valueTagGPR);
+#endif
+ allocator.lock(valueGPR);
+
+ CCallHelpers stubJit(globalData);
- MacroAssembler stubJit;
+ GPRReg scratchGPR1 = allocator.allocateScratchGPR();
+ ASSERT(scratchGPR1 != baseGPR);
+ ASSERT(scratchGPR1 != valueGPR);
+
+ bool needSecondScratch = false;
+ bool needThirdScratch = false;
+#if ENABLE(GGC) || ENABLE(WRITE_BARRIER_PROFILING)
+ needSecondScratch = true;
+#endif
+ if (structure->outOfLineCapacity() != oldStructure->outOfLineCapacity()
+ && oldStructure->outOfLineCapacity()) {
+ needSecondScratch = true;
+ needThirdScratch = true;
+ }
+
+ GPRReg scratchGPR2;
+ if (needSecondScratch) {
+ scratchGPR2 = allocator.allocateScratchGPR();
+ ASSERT(scratchGPR2 != baseGPR);
+ ASSERT(scratchGPR2 != valueGPR);
+ ASSERT(scratchGPR2 != scratchGPR1);
+ } else
+ scratchGPR2 = InvalidGPRReg;
+ GPRReg scratchGPR3;
+ if (needThirdScratch) {
+ scratchGPR3 = allocator.allocateScratchGPR();
+ ASSERT(scratchGPR3 != baseGPR);
+ ASSERT(scratchGPR3 != valueGPR);
+ ASSERT(scratchGPR3 != scratchGPR1);
+ ASSERT(scratchGPR3 != scratchGPR2);
+ } else
+ scratchGPR3 = InvalidGPRReg;
+ allocator.preserveReusedRegistersByPushing(stubJit);
+
MacroAssembler::JumpList failureCases;
- if (scratchGPR == InvalidGPRReg) {
- scratchGPR = SpeculativeJIT::selectScratchGPR(baseGPR, valueGPR);
- stubJit.push(scratchGPR);
- needToRestoreScratch = true;
- }
-
ASSERT(oldStructure->transitionWatchpointSetHasBeenInvalidated());
failureCases.append(stubJit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::structureOffset()), MacroAssembler::TrustedImmPtr(oldStructure)));
addStructureTransitionCheck(
oldStructure->storedPrototype(), exec->codeBlock(), stubInfo, stubJit, failureCases,
- scratchGPR);
+ scratchGPR1);
if (putKind == NotDirect) {
for (WriteBarrier<Structure>* it = prototypeChain->head(); *it; ++it) {
addStructureTransitionCheck(
(*it)->storedPrototype(), exec->codeBlock(), stubInfo, stubJit, failureCases,
- scratchGPR);
+ scratchGPR1);
}
}
#if ENABLE(GGC) || ENABLE(WRITE_BARRIER_PROFILING)
+ ASSERT(needSecondScratch);
+ ASSERT(scratchGPR2 != InvalidGPRReg);
// Must always emit this write barrier as the structure transition itself requires it
- GPRReg scratch2 = SpeculativeJIT::selectScratchGPR(baseGPR, valueGPR, scratchGPR);
- stubJit.push(scratch2);
- SpeculativeJIT::writeBarrier(stubJit, baseGPR, scratchGPR, scratch2, WriteBarrierForPropertyAccess);
- stubJit.pop(scratch2);
+ SpeculativeJIT::writeBarrier(stubJit, baseGPR, scratchGPR1, scratchGPR2, WriteBarrierForPropertyAccess);
#endif
+
+ MacroAssembler::JumpList slowPath;
+
+ bool scratchGPR1HasStorage = false;
+
+ if (structure->outOfLineCapacity() != oldStructure->outOfLineCapacity()) {
+ size_t newSize = structure->outOfLineCapacity() * sizeof(JSValue);
+ CopiedAllocator* copiedAllocator = &globalData->heap.storageAllocator();
+
+ if (!oldStructure->outOfLineCapacity()) {
+ stubJit.loadPtr(&copiedAllocator->m_currentRemaining, scratchGPR1);
+ slowPath.append(stubJit.branchSubPtr(MacroAssembler::Signed, MacroAssembler::TrustedImm32(newSize), scratchGPR1));
+ stubJit.storePtr(scratchGPR1, &copiedAllocator->m_currentRemaining);
+ stubJit.negPtr(scratchGPR1);
+ stubJit.addPtr(MacroAssembler::AbsoluteAddress(&copiedAllocator->m_currentPayloadEnd), scratchGPR1);
+ stubJit.subPtr(MacroAssembler::TrustedImm32(newSize), scratchGPR1);
+ } else {
+ size_t oldSize = oldStructure->outOfLineCapacity() * sizeof(JSValue);
+ ASSERT(newSize > oldSize);
+
+ // Optimistically assume that the old storage was the very last thing
+ // allocated.
+ stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::offsetOfOutOfLineStorage()), scratchGPR3);
+ stubJit.loadPtr(&copiedAllocator->m_currentPayloadEnd, scratchGPR2);
+ stubJit.loadPtr(&copiedAllocator->m_currentRemaining, scratchGPR1);
+ stubJit.subPtr(scratchGPR1, scratchGPR2);
+ stubJit.subPtr(MacroAssembler::TrustedImm32(oldSize), scratchGPR2);
+ MacroAssembler::Jump needFullRealloc =
+ stubJit.branchPtr(MacroAssembler::NotEqual, scratchGPR2, scratchGPR3);
+ slowPath.append(stubJit.branchSubPtr(MacroAssembler::Signed, MacroAssembler::TrustedImm32(newSize - oldSize), scratchGPR1));
+ stubJit.storePtr(scratchGPR1, &copiedAllocator->m_currentRemaining);
+ stubJit.move(scratchGPR2, scratchGPR1);
+ MacroAssembler::Jump doneRealloc = stubJit.jump();
+
+ needFullRealloc.link(&stubJit);
+ slowPath.append(stubJit.branchSubPtr(MacroAssembler::Signed, MacroAssembler::TrustedImm32(newSize), scratchGPR1));
+ stubJit.storePtr(scratchGPR1, &copiedAllocator->m_currentRemaining);
+ stubJit.negPtr(scratchGPR1);
+ stubJit.addPtr(MacroAssembler::AbsoluteAddress(&copiedAllocator->m_currentPayloadEnd), scratchGPR1);
+ stubJit.subPtr(MacroAssembler::TrustedImm32(newSize), scratchGPR1);
+ // We have scratchGPR1 = new storage, scratchGPR3 = old storage, scratchGPR2 = available
+ for (size_t offset = 0; offset < oldSize; offset += sizeof(JSValue)) {
+ stubJit.loadPtr(MacroAssembler::Address(scratchGPR3, offset), scratchGPR2);
+ stubJit.storePtr(scratchGPR2, MacroAssembler::Address(scratchGPR1, offset));
+ }
+
+ doneRealloc.link(&stubJit);
+ }
+
+ stubJit.storePtr(scratchGPR1, MacroAssembler::Address(baseGPR, JSObject::offsetOfOutOfLineStorage()));
+ scratchGPR1HasStorage = true;
+ }
stubJit.storePtr(MacroAssembler::TrustedImmPtr(structure), MacroAssembler::Address(baseGPR, JSCell::structureOffset()));
#if USE(JSVALUE64)
if (isInlineOffset(slot.cachedOffset()))
stubJit.storePtr(valueGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue)));
else {
- stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::offsetOfOutOfLineStorage()), scratchGPR);
- stubJit.storePtr(valueGPR, MacroAssembler::Address(scratchGPR, offsetInOutOfLineStorage(slot.cachedOffset()) * sizeof(JSValue)));
+ if (!scratchGPR1HasStorage)
+ stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::offsetOfOutOfLineStorage()), scratchGPR1);
+ stubJit.storePtr(valueGPR, MacroAssembler::Address(scratchGPR1, offsetInOutOfLineStorage(slot.cachedOffset()) * sizeof(JSValue)));
}
#elif USE(JSVALUE32_64)
if (isInlineOffset(slot.cachedOffset())) {
stubJit.store32(valueGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
stubJit.store32(valueTagGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
} else {
- stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::offsetOfOutOfLineStorage()), scratchGPR);
- stubJit.store32(valueGPR, MacroAssembler::Address(scratchGPR, offsetInOutOfLineStorage(slot.cachedOffset()) * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
- stubJit.store32(valueTagGPR, MacroAssembler::Address(scratchGPR, offsetInOutOfLineStorage(slot.cachedOffset()) * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
+ if (!scratchGPR1HasStorage)
+ stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::offsetOfOutOfLineStorage()), scratchGPR1);
+ stubJit.store32(valueGPR, MacroAssembler::Address(scratchGPR1, offsetInOutOfLineStorage(slot.cachedOffset()) * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
+ stubJit.store32(valueTagGPR, MacroAssembler::Address(scratchGPR1, offsetInOutOfLineStorage(slot.cachedOffset()) * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
}
#endif
MacroAssembler::Jump success;
MacroAssembler::Jump failure;
- if (needToRestoreScratch) {
- stubJit.pop(scratchGPR);
+ if (allocator.didReuseRegisters()) {
+ allocator.restoreReusedRegistersByPopping(stubJit);
success = stubJit.jump();
failureCases.link(&stubJit);
- stubJit.pop(scratchGPR);
+ allocator.restoreReusedRegistersByPopping(stubJit);
failure = stubJit.jump();
} else
success = stubJit.jump();
-
+
+ MacroAssembler::Call operationCall;
+ MacroAssembler::Jump successInSlowPath;
+
+ if (structure->outOfLineCapacity() != oldStructure->outOfLineCapacity()) {
+ slowPath.link(&stubJit);
+
+ allocator.restoreReusedRegistersByPopping(stubJit);
+ ScratchBuffer* scratchBuffer = globalData->scratchBufferForSize(allocator.desiredScratchBufferSize());
+ allocator.preserveUsedRegistersToScratchBuffer(stubJit, scratchBuffer, scratchGPR1);
+#if USE(JSVALUE64)
+ stubJit.setupArgumentsWithExecState(baseGPR, MacroAssembler::TrustedImmPtr(structure), MacroAssembler::TrustedImm32(slot.cachedOffset()), valueGPR);
+#else
+ stubJit.setupArgumentsWithExecState(baseGPR, MacroAssembler::TrustedImmPtr(structure), MacroAssembler::TrustedImm32(slot.cachedOffset()), valueGPR, valueTagGPR);
+#endif
+ operationCall = stubJit.call();
+ allocator.restoreUsedRegistersFromScratchBuffer(stubJit, scratchBuffer, scratchGPR1);
+ successInSlowPath = stubJit.jump();
+ }
+
LinkBuffer patchBuffer(*globalData, &stubJit, exec->codeBlock());
patchBuffer.link(success, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.dfg.deltaCallToDone));
- if (needToRestoreScratch)
+ if (allocator.didReuseRegisters())
patchBuffer.link(failure, failureLabel);
else
patchBuffer.link(failureCases, failureLabel);
+ if (structure->outOfLineCapacity() != oldStructure->outOfLineCapacity()) {
+ patchBuffer.link(operationCall, operationReallocateStorageAndFinishPut);
+ patchBuffer.link(successInSlowPath, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.dfg.deltaCallToDone));
+ }
- stubRoutine = FINALIZE_CODE_FOR_STUB(
- patchBuffer,
- ("DFG PutById transition stub for CodeBlock %p, return point %p",
- exec->codeBlock(), stubInfo.callReturnLocation.labelAtOffset(
- stubInfo.patch.dfg.deltaCallToDone).executableAddress()));
+ stubRoutine =
+ createJITStubRoutine(
+ FINALIZE_CODE(
+ patchBuffer,
+ ("DFG PutById transition stub for CodeBlock %p, return point %p",
+ exec->codeBlock(), stubInfo.callReturnLocation.labelAtOffset(
+ stubInfo.patch.dfg.deltaCallToDone).executableAddress())),
+ *globalData,
+ exec->codeBlock()->ownerExecutable(),
+ structure->outOfLineCapacity() != oldStructure->outOfLineCapacity(),
+ structure);
}
static bool tryCachePutByID(ExecState* exec, JSValue baseValue, const Identifier& ident, const PutPropertySlot& slot, StructureStubInfo& stubInfo, PutKind putKind)
@@ -829,8 +961,11 @@ static bool tryCachePutByID(ExecState* exec, JSValue baseValue, const Identifier
if (structure->isDictionary())
return false;
- // skip optimizing the case where we need a realloc
- if (oldStructure->outOfLineCapacity() != structure->outOfLineCapacity())
+ // Skip optimizing the case where we need a realloc, if we don't have
+ // enough registers to make it happen.
+ if (GPRInfo::numberOfRegisters < 6
+ && oldStructure->outOfLineCapacity() != structure->outOfLineCapacity()
+ && oldStructure->outOfLineCapacity())
return false;
normalizePrototypeChain(exec, baseCell);
@@ -892,8 +1027,11 @@ static bool tryBuildPutByIdList(ExecState* exec, JSValue baseValue, const Identi
if (structure->isDictionary())
return false;
- // skip optimizing the case where we need a realloc
- if (oldStructure->outOfLineCapacity() != structure->outOfLineCapacity())
+ // Skip optimizing the case where we need a realloc, if we don't have
+ // enough registers to make it happen.
+ if (GPRInfo::numberOfRegisters < 6
+ && oldStructure->outOfLineCapacity() != structure->outOfLineCapacity()
+ && oldStructure->outOfLineCapacity())
return false;
normalizePrototypeChain(exec, baseCell);
diff --git a/Source/JavaScriptCore/dfg/DFGScratchRegisterAllocator.h b/Source/JavaScriptCore/dfg/DFGScratchRegisterAllocator.h
new file mode 100644
index 000000000..9a65e8b7d
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGScratchRegisterAllocator.h
@@ -0,0 +1,192 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGScratchRegisterAllocator_h
+#define DFGScratchRegisterAllocator_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(DFG_JIT)
+
+#include "DFGRegisterSet.h"
+#include "MacroAssembler.h"
+
+namespace JSC { namespace DFG {
+
+// This class provides a low-level register allocator for use in stubs.
+
+class ScratchRegisterAllocator {
+public:
+ ScratchRegisterAllocator(const RegisterSet& usedRegisters)
+ : m_usedRegisters(usedRegisters)
+ , m_didReuseRegisters(false)
+ {
+ }
+
+ template<typename T>
+ void lock(T reg) { m_lockedRegisters.set(reg); }
+
+ template<typename BankInfo>
+ typename BankInfo::RegisterType allocateScratch()
+ {
+ // First try to allocate a register that is totally free.
+ for (unsigned i = 0; i < BankInfo::numberOfRegisters; ++i) {
+ typename BankInfo::RegisterType reg = BankInfo::toRegister(i);
+ if (!m_lockedRegisters.get(reg)
+ && !m_usedRegisters.get(reg)
+ && !m_scratchRegisters.get(reg)) {
+ m_scratchRegisters.set(reg);
+ return reg;
+ }
+ }
+
+ // Since that failed, try to allocate a register that is not yet
+ // locked or used for scratch.
+ for (unsigned i = 0; i < BankInfo::numberOfRegisters; ++i) {
+ typename BankInfo::RegisterType reg = BankInfo::toRegister(i);
+ if (!m_lockedRegisters.get(reg) && !m_scratchRegisters.get(reg)) {
+ m_scratchRegisters.set(reg);
+ m_didReuseRegisters = true;
+ return reg;
+ }
+ }
+
+ // We failed.
+ CRASH();
+ // Make some silly compilers happy.
+ return static_cast<typename BankInfo::RegisterType>(-1);
+ }
+
+ GPRReg allocateScratchGPR() { return allocateScratch<GPRInfo>(); }
+ FPRReg allocateScratchFPR() { return allocateScratch<FPRInfo>(); }
+
+ bool didReuseRegisters() const
+ {
+ return m_didReuseRegisters;
+ }
+
+ void preserveReusedRegistersByPushing(MacroAssembler& jit)
+ {
+ if (!m_didReuseRegisters)
+ return;
+
+ for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
+ if (m_scratchRegisters.getFPRByIndex(i) && m_usedRegisters.getFPRByIndex(i)) {
+ jit.subPtr(MacroAssembler::TrustedImm32(8), MacroAssembler::stackPointerRegister);
+ jit.storeDouble(FPRInfo::toRegister(i), MacroAssembler::stackPointerRegister);
+ }
+ }
+ for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
+ if (m_scratchRegisters.getGPRByIndex(i) && m_usedRegisters.getGPRByIndex(i))
+ jit.push(GPRInfo::toRegister(i));
+ }
+ }
+
+ void restoreReusedRegistersByPopping(MacroAssembler& jit)
+ {
+ if (!m_didReuseRegisters)
+ return;
+
+ for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
+ if (m_scratchRegisters.getGPRByIndex(i) && m_usedRegisters.getGPRByIndex(i))
+ jit.pop(GPRInfo::toRegister(i));
+ }
+ for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
+ if (m_scratchRegisters.getFPRByIndex(i) && m_usedRegisters.getFPRByIndex(i)) {
+ jit.loadDouble(MacroAssembler::stackPointerRegister, FPRInfo::toRegister(i));
+ jit.addPtr(MacroAssembler::TrustedImm32(8), MacroAssembler::stackPointerRegister);
+ }
+ }
+ }
+
+ unsigned desiredScratchBufferSize() const { return m_usedRegisters.numberOfSetRegisters() * sizeof(JSValue); }
+
+ void preserveUsedRegistersToScratchBuffer(MacroAssembler& jit, ScratchBuffer* scratchBuffer, GPRReg scratchGPR = InvalidGPRReg)
+ {
+ unsigned count = 0;
+ for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
+ if (m_usedRegisters.getGPRByIndex(i))
+ jit.storePtr(GPRInfo::toRegister(i), scratchBuffer->m_buffer + (count++));
+ if (scratchGPR == InvalidGPRReg && !m_lockedRegisters.getGPRByIndex(i) && !m_scratchRegisters.getGPRByIndex(i))
+ scratchGPR = GPRInfo::toRegister(i);
+ }
+ ASSERT(scratchGPR != InvalidGPRReg);
+ for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
+ if (m_usedRegisters.getFPRByIndex(i)) {
+ jit.move(MacroAssembler::TrustedImmPtr(scratchBuffer->m_buffer + (count++)), scratchGPR);
+ jit.storeDouble(FPRInfo::toRegister(i), scratchGPR);
+ }
+ }
+ ASSERT(count * sizeof(JSValue) == desiredScratchBufferSize());
+
+ jit.move(MacroAssembler::TrustedImmPtr(&scratchBuffer->m_activeLength), scratchGPR);
+ jit.storePtr(MacroAssembler::TrustedImmPtr(static_cast<size_t>(count * sizeof(JSValue))), scratchGPR);
+ }
+
+ void restoreUsedRegistersFromScratchBuffer(MacroAssembler& jit, ScratchBuffer* scratchBuffer, GPRReg scratchGPR = InvalidGPRReg)
+ {
+ if (scratchGPR == InvalidGPRReg) {
+ // Find a scratch register.
+ for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
+ if (m_lockedRegisters.getGPRByIndex(i) || m_scratchRegisters.getGPRByIndex(i))
+ continue;
+ scratchGPR = GPRInfo::toRegister(i);
+ break;
+ }
+ }
+ ASSERT(scratchGPR != InvalidGPRReg);
+
+ jit.move(MacroAssembler::TrustedImmPtr(&scratchBuffer->m_activeLength), scratchGPR);
+ jit.storePtr(MacroAssembler::TrustedImmPtr(0), scratchGPR);
+
+ // Restore double registers first.
+ unsigned count = m_usedRegisters.numberOfSetGPRs();
+ for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
+ if (m_usedRegisters.getFPRByIndex(i)) {
+ jit.move(MacroAssembler::TrustedImmPtr(scratchBuffer->m_buffer + (count++)), scratchGPR);
+ jit.loadDouble(scratchGPR, FPRInfo::toRegister(i));
+ }
+ }
+
+ count = 0;
+ for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
+ if (m_usedRegisters.getGPRByIndex(i))
+ jit.loadPtr(scratchBuffer->m_buffer + (count++), GPRInfo::toRegister(i));
+ }
+ }
+
+private:
+ RegisterSet m_usedRegisters;
+ RegisterSet m_lockedRegisters;
+ RegisterSet m_scratchRegisters;
+ bool m_didReuseRegisters;
+};
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
+
+#endif // DFGScratchRegisterAllocator_h
+
diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
index c6ec62129..e8824b832 100644
--- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
+++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
@@ -1911,8 +1911,8 @@ void SpeculativeJIT::compileGetByValOnIntTypedArray(const TypedArrayDescriptor&
void SpeculativeJIT::compilePutByValForIntTypedArray(const TypedArrayDescriptor& descriptor, GPRReg base, GPRReg property, Node& node, size_t elementSize, TypedArraySpeculationRequirements speculationRequirements, TypedArraySignedness signedness, TypedArrayRounding rounding)
{
- Edge baseUse = node.child1();
- Edge valueUse = node.child3();
+ Edge baseUse = m_jit.graph().varArgChild(node, 0);
+ Edge valueUse = m_jit.graph().varArgChild(node, 2);
if (speculationRequirements != NoTypedArrayTypeSpecCheck)
speculationCheck(BadType, JSValueSource::unboxedCell(base), baseUse, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(base, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(descriptor.m_classInfo)));
@@ -2052,8 +2052,8 @@ void SpeculativeJIT::compileGetByValOnFloatTypedArray(const TypedArrayDescriptor
void SpeculativeJIT::compilePutByValForFloatTypedArray(const TypedArrayDescriptor& descriptor, GPRReg base, GPRReg property, Node& node, size_t elementSize, TypedArraySpeculationRequirements speculationRequirements)
{
- Edge baseUse = node.child1();
- Edge valueUse = node.child3();
+ Edge baseUse = m_jit.graph().varArgChild(node, 0);
+ Edge valueUse = m_jit.graph().varArgChild(node, 2);
SpeculateDoubleOperand valueOp(this, valueUse);
diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h
index 28d8033cb..487addd7f 100644
--- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h
+++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h
@@ -272,6 +272,22 @@ public:
{
use(nodeUse.index());
}
+
+ RegisterSet usedRegisters()
+ {
+ RegisterSet result;
+ for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
+ GPRReg gpr = GPRInfo::toRegister(i);
+ if (m_gprs.isInUse(gpr))
+ result.set(gpr);
+ }
+ for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
+ FPRReg fpr = FPRInfo::toRegister(i);
+ if (m_fprs.isInUse(fpr))
+ result.set(fpr);
+ }
+ return result;
+ }
static void markCellCard(MacroAssembler&, GPRReg ownerGPR, GPRReg scratchGPR1, GPRReg scratchGPR2);
static void writeBarrier(MacroAssembler&, GPRReg ownerGPR, GPRReg scratchGPR1, GPRReg scratchGPR2, WriteBarrierUseKind);
@@ -942,10 +958,10 @@ public:
void nonSpeculativeUInt32ToNumber(Node&);
#if USE(JSVALUE64)
- void cachedGetById(CodeOrigin, GPRReg baseGPR, GPRReg resultGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), SpillRegistersMode = NeedToSpill);
+ void cachedGetById(CodeOrigin, GPRReg baseGPR, GPRReg resultGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), SpillRegistersMode = NeedToSpill);
void cachedPutById(CodeOrigin, GPRReg base, GPRReg value, Edge valueUse, GPRReg scratchGPR, unsigned identifierNumber, PutKind, JITCompiler::Jump slowPathTarget = JITCompiler::Jump());
#elif USE(JSVALUE32_64)
- void cachedGetById(CodeOrigin, GPRReg baseTagGPROrNone, GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), SpillRegistersMode = NeedToSpill);
+ void cachedGetById(CodeOrigin, GPRReg baseTagGPROrNone, GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), SpillRegistersMode = NeedToSpill);
void cachedPutById(CodeOrigin, GPRReg basePayloadGPR, GPRReg valueTagGPR, GPRReg valuePayloadGPR, Edge valueUse, GPRReg scratchGPR, unsigned identifierNumber, PutKind, JITCompiler::Jump slowPathTarget = JITCompiler::Jump());
#endif
diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp
index ec2377389..ed98e0800 100644
--- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp
+++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp
@@ -503,7 +503,7 @@ void SpeculativeJIT::nonSpeculativeUInt32ToNumber(Node& node)
jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex);
}
-void SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin, GPRReg baseTagGPROrNone, GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode)
+void SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin, GPRReg baseTagGPROrNone, GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode)
{
JITCompiler::DataLabelPtr structureToCompare;
JITCompiler::PatchableJump structureCheck = m_jit.patchableBranchPtrWithPatch(JITCompiler::NotEqual, JITCompiler::Address(basePayloadGPR, JSCell::structureOffset()), structureToCompare, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(-1)));
@@ -553,7 +553,7 @@ void SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin, GPRReg baseTagGPROrNon
codeOrigin, structureToCompare, structureCheck, propertyStorageLoad,
tagLoadWithPatch, payloadLoadWithPatch, slowPath.get(), doneLabel,
safeCast<int8_t>(basePayloadGPR), safeCast<int8_t>(resultTagGPR),
- safeCast<int8_t>(resultPayloadGPR), safeCast<int8_t>(scratchGPR),
+ safeCast<int8_t>(resultPayloadGPR), usedRegisters(),
spillMode == NeedToSpill ? PropertyAccessRecord::RegistersInUse : PropertyAccessRecord::RegistersFlushed));
addSlowPathGenerator(slowPath.release());
}
@@ -595,6 +595,11 @@ void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg basePayloadGPR,
slowCases, this, optimizedCall, NoResult, valueTagGPR, valuePayloadGPR,
basePayloadGPR, identifier(identifierNumber));
}
+ RegisterSet currentlyUsedRegisters = usedRegisters();
+ currentlyUsedRegisters.clear(scratchGPR);
+ ASSERT(currentlyUsedRegisters.get(basePayloadGPR));
+ ASSERT(currentlyUsedRegisters.get(valueTagGPR));
+ ASSERT(currentlyUsedRegisters.get(valuePayloadGPR));
m_jit.addPropertyAccess(
PropertyAccessRecord(
codeOrigin, structureToCompare, structureCheck, propertyStorageLoad,
@@ -602,7 +607,7 @@ void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg basePayloadGPR,
JITCompiler::DataLabelCompact(payloadStoreWithPatch.label()),
slowPath.get(), doneLabel, safeCast<int8_t>(basePayloadGPR),
safeCast<int8_t>(valueTagGPR), safeCast<int8_t>(valuePayloadGPR),
- safeCast<int8_t>(scratchGPR)));
+ usedRegisters()));
addSlowPathGenerator(slowPath.release());
}
@@ -2471,17 +2476,21 @@ void SpeculativeJIT::compile(Node& node)
}
case PutByVal: {
- if (!at(node.child1()).prediction() || !at(node.child2()).prediction()) {
+ Edge child1 = m_jit.graph().varArgChild(node, 0);
+ Edge child2 = m_jit.graph().varArgChild(node, 1);
+ Edge child3 = m_jit.graph().varArgChild(node, 2);
+
+ if (!at(child1).prediction() || !at(child2).prediction()) {
terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
break;
}
- if (!at(node.child2()).shouldSpeculateInteger()
- || !isActionableMutableArraySpeculation(at(node.child1()).prediction())
- || at(node.child1()).shouldSpeculateArguments()) {
- SpeculateCellOperand base(this, node.child1()); // Save a register, speculate cell. We'll probably be right.
- JSValueOperand property(this, node.child2());
- JSValueOperand value(this, node.child3());
+ if (!at(child2).shouldSpeculateInteger()
+ || !isActionableMutableArraySpeculation(at(child1).prediction())
+ || at(child1).shouldSpeculateArguments()) {
+ SpeculateCellOperand base(this, child1); // Save a register, speculate cell. We'll probably be right.
+ JSValueOperand property(this, child2);
+ JSValueOperand value(this, child3);
GPRReg baseGPR = base.gpr();
GPRReg propertyTagGPR = property.tagGPR();
GPRReg propertyPayloadGPR = property.payloadGPR();
@@ -2495,74 +2504,74 @@ void SpeculativeJIT::compile(Node& node)
break;
}
- SpeculateCellOperand base(this, node.child1());
- SpeculateStrictInt32Operand property(this, node.child2());
- if (at(node.child1()).shouldSpeculateInt8Array()) {
- compilePutByValForIntTypedArray(m_jit.globalData()->int8ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int8_t), isInt8ArraySpeculation(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray);
+ SpeculateCellOperand base(this, child1);
+ SpeculateStrictInt32Operand property(this, child2);
+ if (at(child1).shouldSpeculateInt8Array()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->int8ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int8_t), isInt8ArraySpeculation(m_state.forNode(child1).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateInt16Array()) {
- compilePutByValForIntTypedArray(m_jit.globalData()->int16ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int16_t), isInt16ArraySpeculation(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray);
+ if (at(child1).shouldSpeculateInt16Array()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->int16ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int16_t), isInt16ArraySpeculation(m_state.forNode(child1).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateInt32Array()) {
- compilePutByValForIntTypedArray(m_jit.globalData()->int32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int32_t), isInt32ArraySpeculation(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray);
+ if (at(child1).shouldSpeculateInt32Array()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->int32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int32_t), isInt32ArraySpeculation(m_state.forNode(child1).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateUint8Array()) {
- compilePutByValForIntTypedArray(m_jit.globalData()->uint8ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint8_t), isUint8ArraySpeculation(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray);
+ if (at(child1).shouldSpeculateUint8Array()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->uint8ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint8_t), isUint8ArraySpeculation(m_state.forNode(child1).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateUint8ClampedArray()) {
- compilePutByValForIntTypedArray(m_jit.globalData()->uint8ClampedArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint8_t), isUint8ClampedArraySpeculation(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray, ClampRounding);
+ if (at(child1).shouldSpeculateUint8ClampedArray()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->uint8ClampedArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint8_t), isUint8ClampedArraySpeculation(m_state.forNode(child1).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray, ClampRounding);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateUint16Array()) {
- compilePutByValForIntTypedArray(m_jit.globalData()->uint16ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint16_t), isUint16ArraySpeculation(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray);
+ if (at(child1).shouldSpeculateUint16Array()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->uint16ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint16_t), isUint16ArraySpeculation(m_state.forNode(child1).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateUint32Array()) {
- compilePutByValForIntTypedArray(m_jit.globalData()->uint32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint32_t), isUint32ArraySpeculation(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray);
+ if (at(child1).shouldSpeculateUint32Array()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->uint32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint32_t), isUint32ArraySpeculation(m_state.forNode(child1).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateFloat32Array()) {
- compilePutByValForFloatTypedArray(m_jit.globalData()->float32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(float), isFloat32ArraySpeculation(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks);
+ if (at(child1).shouldSpeculateFloat32Array()) {
+ compilePutByValForFloatTypedArray(m_jit.globalData()->float32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(float), isFloat32ArraySpeculation(m_state.forNode(child1).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateFloat64Array()) {
- compilePutByValForFloatTypedArray(m_jit.globalData()->float64ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(double), isFloat64ArraySpeculation(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks);
+ if (at(child1).shouldSpeculateFloat64Array()) {
+ compilePutByValForFloatTypedArray(m_jit.globalData()->float64ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(double), isFloat64ArraySpeculation(m_state.forNode(child1).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks);
if (!m_compileOkay)
return;
break;
}
- ASSERT(at(node.child1()).shouldSpeculateArray());
+ ASSERT(at(child1).shouldSpeculateArray());
- JSValueOperand value(this, node.child3());
+ JSValueOperand value(this, child3);
GPRTemporary scratch(this);
// Map base, property & value into registers, allocate a scratch register.
@@ -2575,12 +2584,12 @@ void SpeculativeJIT::compile(Node& node)
if (!m_compileOkay)
return;
- writeBarrier(baseReg, valueTagReg, node.child3(), WriteBarrierForPropertyAccess, scratchReg);
+ writeBarrier(baseReg, valueTagReg, child3, WriteBarrierForPropertyAccess, scratchReg);
// Check that base is an array, and that property is contained within m_vector (< m_vectorLength).
// If we have predicted the base to be type array, we can skip the check.
- if (!isArraySpeculation(m_state.forNode(node.child1()).m_type))
- speculationCheck(BadType, JSValueSource::unboxedCell(baseReg), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseReg, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info)));
+ if (!isArraySpeculation(m_state.forNode(child1).m_type))
+ speculationCheck(BadType, JSValueSource::unboxedCell(baseReg), child1, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseReg, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info)));
base.use();
property.use();
@@ -2620,89 +2629,93 @@ void SpeculativeJIT::compile(Node& node)
}
case PutByValAlias: {
- if (!at(node.child1()).prediction() || !at(node.child2()).prediction()) {
+ Edge child1 = m_jit.graph().varArgChild(node, 0);
+ Edge child2 = m_jit.graph().varArgChild(node, 1);
+ Edge child3 = m_jit.graph().varArgChild(node, 2);
+
+ if (!at(child1).prediction() || !at(child2).prediction()) {
terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
break;
}
- ASSERT(isActionableMutableArraySpeculation(at(node.child1()).prediction()));
- ASSERT(at(node.child2()).shouldSpeculateInteger());
+ ASSERT(isActionableMutableArraySpeculation(at(child1).prediction()));
+ ASSERT(at(child2).shouldSpeculateInteger());
- SpeculateCellOperand base(this, node.child1());
- SpeculateStrictInt32Operand property(this, node.child2());
+ SpeculateCellOperand base(this, child1);
+ SpeculateStrictInt32Operand property(this, child2);
- if (at(node.child1()).shouldSpeculateInt8Array()) {
+ if (at(child1).shouldSpeculateInt8Array()) {
compilePutByValForIntTypedArray(m_jit.globalData()->int8ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int8_t), NoTypedArraySpecCheck, SignedTypedArray);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateInt16Array()) {
+ if (at(child1).shouldSpeculateInt16Array()) {
compilePutByValForIntTypedArray(m_jit.globalData()->int16ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int16_t), NoTypedArraySpecCheck, SignedTypedArray);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateInt32Array()) {
+ if (at(child1).shouldSpeculateInt32Array()) {
compilePutByValForIntTypedArray(m_jit.globalData()->int32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int32_t), NoTypedArraySpecCheck, SignedTypedArray);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateUint8Array()) {
+ if (at(child1).shouldSpeculateUint8Array()) {
compilePutByValForIntTypedArray(m_jit.globalData()->uint8ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint8_t), NoTypedArraySpecCheck, UnsignedTypedArray);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateUint8ClampedArray()) {
+ if (at(child1).shouldSpeculateUint8ClampedArray()) {
compilePutByValForIntTypedArray(m_jit.globalData()->uint8ClampedArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint8_t), NoTypedArraySpecCheck, UnsignedTypedArray, ClampRounding);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateUint16Array()) {
+ if (at(child1).shouldSpeculateUint16Array()) {
compilePutByValForIntTypedArray(m_jit.globalData()->uint16ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint16_t), NoTypedArraySpecCheck, UnsignedTypedArray);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateUint32Array()) {
+ if (at(child1).shouldSpeculateUint32Array()) {
compilePutByValForIntTypedArray(m_jit.globalData()->uint32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint32_t), NoTypedArraySpecCheck, UnsignedTypedArray);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateFloat32Array()) {
+ if (at(child1).shouldSpeculateFloat32Array()) {
compilePutByValForFloatTypedArray(m_jit.globalData()->float32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(float), NoTypedArraySpecCheck);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateFloat64Array()) {
+ if (at(child1).shouldSpeculateFloat64Array()) {
compilePutByValForFloatTypedArray(m_jit.globalData()->float64ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(double), NoTypedArraySpecCheck);
if (!m_compileOkay)
return;
break;
}
- ASSERT(at(node.child1()).shouldSpeculateArray());
+ ASSERT(at(child1).shouldSpeculateArray());
- JSValueOperand value(this, node.child3());
+ JSValueOperand value(this, child3);
GPRTemporary scratch(this, base);
GPRReg baseReg = base.gpr();
GPRReg scratchReg = scratch.gpr();
- writeBarrier(baseReg, value.tagGPR(), node.child3(), WriteBarrierForPropertyAccess, scratchReg);
+ writeBarrier(baseReg, value.tagGPR(), child3, WriteBarrierForPropertyAccess, scratchReg);
// Get the array storage.
GPRReg storageReg = scratchReg;
@@ -3290,16 +3303,10 @@ void SpeculativeJIT::compile(Node& node)
GPRReg baseGPR = base.gpr();
GPRReg resultTagGPR = resultTag.gpr();
GPRReg resultPayloadGPR = resultPayload.gpr();
- GPRReg scratchGPR;
-
- if (resultTagGPR == baseGPR)
- scratchGPR = resultPayloadGPR;
- else
- scratchGPR = resultTagGPR;
-
+
base.use();
- cachedGetById(node.codeOrigin, InvalidGPRReg, baseGPR, resultTagGPR, resultPayloadGPR, scratchGPR, node.identifierNumber());
+ cachedGetById(node.codeOrigin, InvalidGPRReg, baseGPR, resultTagGPR, resultPayloadGPR, node.identifierNumber());
jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly);
break;
@@ -3313,18 +3320,12 @@ void SpeculativeJIT::compile(Node& node)
GPRReg basePayloadGPR = base.payloadGPR();
GPRReg resultTagGPR = resultTag.gpr();
GPRReg resultPayloadGPR = resultPayload.gpr();
- GPRReg scratchGPR;
-
- if (resultTagGPR == basePayloadGPR)
- scratchGPR = resultPayloadGPR;
- else
- scratchGPR = resultTagGPR;
base.use();
JITCompiler::Jump notCell = m_jit.branch32(JITCompiler::NotEqual, baseTagGPR, TrustedImm32(JSValue::CellTag));
- cachedGetById(node.codeOrigin, baseTagGPR, basePayloadGPR, resultTagGPR, resultPayloadGPR, scratchGPR, node.identifierNumber(), notCell);
+ cachedGetById(node.codeOrigin, baseTagGPR, basePayloadGPR, resultTagGPR, resultPayloadGPR, node.identifierNumber(), notCell);
jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly);
break;
@@ -3346,13 +3347,11 @@ void SpeculativeJIT::compile(Node& node)
GPRReg resultTagGPR = resultTag.gpr();
GPRReg resultPayloadGPR = resultPayload.gpr();
- GPRReg scratchGPR = selectScratchGPR(baseGPR, resultTagGPR, resultPayloadGPR);
-
base.use();
flushRegisters();
- cachedGetById(node.codeOrigin, InvalidGPRReg, baseGPR, resultTagGPR, resultPayloadGPR, scratchGPR, node.identifierNumber(), JITCompiler::Jump(), DontSpill);
+ cachedGetById(node.codeOrigin, InvalidGPRReg, baseGPR, resultTagGPR, resultPayloadGPR, node.identifierNumber(), JITCompiler::Jump(), DontSpill);
jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly);
break;
@@ -3367,15 +3366,13 @@ void SpeculativeJIT::compile(Node& node)
GPRReg resultTagGPR = resultTag.gpr();
GPRReg resultPayloadGPR = resultPayload.gpr();
- GPRReg scratchGPR = selectScratchGPR(baseTagGPR, basePayloadGPR, resultTagGPR, resultPayloadGPR);
-
base.use();
flushRegisters();
JITCompiler::Jump notCell = m_jit.branch32(JITCompiler::NotEqual, baseTagGPR, TrustedImm32(JSValue::CellTag));
- cachedGetById(node.codeOrigin, baseTagGPR, basePayloadGPR, resultTagGPR, resultPayloadGPR, scratchGPR, node.identifierNumber(), notCell, DontSpill);
+ cachedGetById(node.codeOrigin, baseTagGPR, basePayloadGPR, resultTagGPR, resultPayloadGPR, node.identifierNumber(), notCell, DontSpill);
jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly);
break;
diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp
index b5058e35a..9e468e758 100644
--- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp
+++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp
@@ -492,7 +492,7 @@ void SpeculativeJIT::nonSpeculativeUInt32ToNumber(Node& node)
jsValueResult(result.gpr(), m_compileIndex);
}
-void SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg resultGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode)
+void SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg resultGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode)
{
JITCompiler::DataLabelPtr structureToCompare;
JITCompiler::PatchableJump structureCheck = m_jit.patchableBranchPtrWithPatch(JITCompiler::NotEqual, JITCompiler::Address(baseGPR, JSCell::structureOffset()), structureToCompare, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(-1)));
@@ -520,13 +520,9 @@ void SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg
PropertyAccessRecord(
codeOrigin, structureToCompare, structureCheck, propertyStorageLoad, loadWithPatch,
slowPath.get(), doneLabel, safeCast<int8_t>(baseGPR), safeCast<int8_t>(resultGPR),
- safeCast<int8_t>(scratchGPR),
+ usedRegisters(),
spillMode == NeedToSpill ? PropertyAccessRecord::RegistersInUse : PropertyAccessRecord::RegistersFlushed));
addSlowPathGenerator(slowPath.release());
-
-
- if (scratchGPR != resultGPR && scratchGPR != InvalidGPRReg && spillMode == NeedToSpill)
- unlock(scratchGPR);
}
void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg valueGPR, Edge valueUse, GPRReg scratchGPR, unsigned identifierNumber, PutKind putKind, JITCompiler::Jump slowPathTarget)
@@ -568,11 +564,15 @@ void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg
slowCases, this, optimizedCall, NoResult, valueGPR, baseGPR,
identifier(identifierNumber));
}
+ RegisterSet currentlyUsedRegisters = usedRegisters();
+ currentlyUsedRegisters.clear(scratchGPR);
+ ASSERT(currentlyUsedRegisters.get(baseGPR));
+ ASSERT(currentlyUsedRegisters.get(valueGPR));
m_jit.addPropertyAccess(
PropertyAccessRecord(
codeOrigin, structureToCompare, structureCheck, propertyStorageLoad,
JITCompiler::DataLabelCompact(storeWithPatch.label()), slowPath.get(), doneLabel,
- safeCast<int8_t>(baseGPR), safeCast<int8_t>(valueGPR), safeCast<int8_t>(scratchGPR)));
+ safeCast<int8_t>(baseGPR), safeCast<int8_t>(valueGPR), currentlyUsedRegisters));
addSlowPathGenerator(slowPath.release());
}
@@ -2501,15 +2501,19 @@ void SpeculativeJIT::compile(Node& node)
}
case PutByVal: {
- if (!at(node.child1()).prediction() || !at(node.child2()).prediction()) {
+ Edge child1 = m_jit.graph().varArgChild(node, 0);
+ Edge child2 = m_jit.graph().varArgChild(node, 1);
+ Edge child3 = m_jit.graph().varArgChild(node, 2);
+
+ if (!at(child1).prediction() || !at(child2).prediction()) {
terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
break;
}
- if (!at(node.child2()).shouldSpeculateInteger() || !isActionableMutableArraySpeculation(at(node.child1()).prediction())) {
- JSValueOperand arg1(this, node.child1());
- JSValueOperand arg2(this, node.child2());
- JSValueOperand arg3(this, node.child3());
+ if (!at(child2).shouldSpeculateInteger() || !isActionableMutableArraySpeculation(at(child1).prediction())) {
+ JSValueOperand arg1(this, child1);
+ JSValueOperand arg2(this, child2);
+ JSValueOperand arg3(this, child3);
GPRReg arg1GPR = arg1.gpr();
GPRReg arg2GPR = arg2.gpr();
GPRReg arg3GPR = arg3.gpr();
@@ -2521,12 +2525,12 @@ void SpeculativeJIT::compile(Node& node)
break;
}
- SpeculateCellOperand base(this, node.child1());
- SpeculateStrictInt32Operand property(this, node.child2());
- if (at(node.child1()).shouldSpeculateArguments()) {
- JSValueOperand value(this, node.child3());
- SpeculateCellOperand base(this, node.child1());
- SpeculateStrictInt32Operand property(this, node.child2());
+ SpeculateCellOperand base(this, child1);
+ SpeculateStrictInt32Operand property(this, child2);
+ if (at(child1).shouldSpeculateArguments()) {
+ JSValueOperand value(this, child3);
+ SpeculateCellOperand base(this, child1);
+ SpeculateStrictInt32Operand property(this, child2);
GPRTemporary scratch(this);
GPRTemporary scratch2(this);
@@ -2539,9 +2543,9 @@ void SpeculativeJIT::compile(Node& node)
if (!m_compileOkay)
return;
- if (!isArgumentsSpeculation(m_state.forNode(node.child1()).m_type)) {
+ if (!isArgumentsSpeculation(m_state.forNode(child1).m_type)) {
speculationCheck(
- BadType, JSValueSource::unboxedCell(baseReg), node.child1(),
+ BadType, JSValueSource::unboxedCell(baseReg), child1,
m_jit.branchPtr(
MacroAssembler::NotEqual,
MacroAssembler::Address(baseReg, JSCell::classInfoOffset()),
@@ -2582,70 +2586,70 @@ void SpeculativeJIT::compile(Node& node)
break;
}
- if (at(node.child1()).shouldSpeculateInt8Array()) {
- compilePutByValForIntTypedArray(m_jit.globalData()->int8ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int8_t), isInt8ArraySpeculation(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray);
+ if (at(child1).shouldSpeculateInt8Array()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->int8ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int8_t), isInt8ArraySpeculation(m_state.forNode(child1).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateInt16Array()) {
- compilePutByValForIntTypedArray(m_jit.globalData()->int16ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int16_t), isInt16ArraySpeculation(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray);
+ if (at(child1).shouldSpeculateInt16Array()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->int16ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int16_t), isInt16ArraySpeculation(m_state.forNode(child1).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateInt32Array()) {
- compilePutByValForIntTypedArray(m_jit.globalData()->int32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int32_t), isInt32ArraySpeculation(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray);
+ if (at(child1).shouldSpeculateInt32Array()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->int32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int32_t), isInt32ArraySpeculation(m_state.forNode(child1).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateUint8Array()) {
- compilePutByValForIntTypedArray(m_jit.globalData()->uint8ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint8_t), isUint8ArraySpeculation(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray);
+ if (at(child1).shouldSpeculateUint8Array()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->uint8ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint8_t), isUint8ArraySpeculation(m_state.forNode(child1).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateUint8ClampedArray()) {
- compilePutByValForIntTypedArray(m_jit.globalData()->uint8ClampedArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint8_t), isUint8ClampedArraySpeculation(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray, ClampRounding);
+ if (at(child1).shouldSpeculateUint8ClampedArray()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->uint8ClampedArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint8_t), isUint8ClampedArraySpeculation(m_state.forNode(child1).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray, ClampRounding);
break;
}
- if (at(node.child1()).shouldSpeculateUint16Array()) {
- compilePutByValForIntTypedArray(m_jit.globalData()->uint16ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint16_t), isUint16ArraySpeculation(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray);
+ if (at(child1).shouldSpeculateUint16Array()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->uint16ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint16_t), isUint16ArraySpeculation(m_state.forNode(child1).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateUint32Array()) {
- compilePutByValForIntTypedArray(m_jit.globalData()->uint32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint32_t), isUint32ArraySpeculation(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray);
+ if (at(child1).shouldSpeculateUint32Array()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->uint32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint32_t), isUint32ArraySpeculation(m_state.forNode(child1).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateFloat32Array()) {
- compilePutByValForFloatTypedArray(m_jit.globalData()->float32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(float), isFloat32ArraySpeculation(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks);
+ if (at(child1).shouldSpeculateFloat32Array()) {
+ compilePutByValForFloatTypedArray(m_jit.globalData()->float32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(float), isFloat32ArraySpeculation(m_state.forNode(child1).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateFloat64Array()) {
- compilePutByValForFloatTypedArray(m_jit.globalData()->float64ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(double), isFloat64ArraySpeculation(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks);
+ if (at(child1).shouldSpeculateFloat64Array()) {
+ compilePutByValForFloatTypedArray(m_jit.globalData()->float64ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(double), isFloat64ArraySpeculation(m_state.forNode(child1).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks);
if (!m_compileOkay)
return;
break;
}
- ASSERT(at(node.child1()).shouldSpeculateArray());
+ ASSERT(at(child1).shouldSpeculateArray());
- JSValueOperand value(this, node.child3());
+ JSValueOperand value(this, child3);
GPRTemporary scratch(this);
// Map base, property & value into registers, allocate a scratch register.
@@ -2657,12 +2661,12 @@ void SpeculativeJIT::compile(Node& node)
if (!m_compileOkay)
return;
- writeBarrier(baseReg, value.gpr(), node.child3(), WriteBarrierForPropertyAccess, scratchReg);
+ writeBarrier(baseReg, value.gpr(), child3, WriteBarrierForPropertyAccess, scratchReg);
// Check that base is an array, and that property is contained within m_vector (< m_vectorLength).
// If we have predicted the base to be type array, we can skip the check.
- if (!isArraySpeculation(m_state.forNode(node.child1()).m_type))
- speculationCheck(BadType, JSValueRegs(baseReg), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseReg, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info)));
+ if (!isArraySpeculation(m_state.forNode(child1).m_type))
+ speculationCheck(BadType, JSValueRegs(baseReg), child1, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseReg, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info)));
base.use();
property.use();
@@ -2701,88 +2705,92 @@ void SpeculativeJIT::compile(Node& node)
}
case PutByValAlias: {
- if (!at(node.child1()).prediction() || !at(node.child2()).prediction()) {
+ Edge child1 = m_jit.graph().varArgChild(node, 0);
+ Edge child2 = m_jit.graph().varArgChild(node, 1);
+ Edge child3 = m_jit.graph().varArgChild(node, 2);
+
+ if (!at(child1).prediction() || !at(child2).prediction()) {
terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
break;
}
- ASSERT(isActionableMutableArraySpeculation(at(node.child1()).prediction()));
- ASSERT(at(node.child2()).shouldSpeculateInteger());
+ ASSERT(isActionableMutableArraySpeculation(at(child1).prediction()));
+ ASSERT(at(child2).shouldSpeculateInteger());
- SpeculateCellOperand base(this, node.child1());
- SpeculateStrictInt32Operand property(this, node.child2());
- if (at(node.child1()).shouldSpeculateInt8Array()) {
+ SpeculateCellOperand base(this, child1);
+ SpeculateStrictInt32Operand property(this, child2);
+ if (at(child1).shouldSpeculateInt8Array()) {
compilePutByValForIntTypedArray(m_jit.globalData()->int8ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int8_t), NoTypedArraySpecCheck, SignedTypedArray);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateInt16Array()) {
+ if (at(child1).shouldSpeculateInt16Array()) {
compilePutByValForIntTypedArray(m_jit.globalData()->int16ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int16_t), NoTypedArraySpecCheck, SignedTypedArray);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateInt32Array()) {
+ if (at(child1).shouldSpeculateInt32Array()) {
compilePutByValForIntTypedArray(m_jit.globalData()->int32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int32_t), NoTypedArraySpecCheck, SignedTypedArray);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateUint8Array()) {
+ if (at(child1).shouldSpeculateUint8Array()) {
compilePutByValForIntTypedArray(m_jit.globalData()->uint8ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint8_t), NoTypedArraySpecCheck, UnsignedTypedArray);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateUint8ClampedArray()) {
+ if (at(child1).shouldSpeculateUint8ClampedArray()) {
compilePutByValForIntTypedArray(m_jit.globalData()->uint8ClampedArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint8_t), NoTypedArraySpecCheck, UnsignedTypedArray, ClampRounding);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateUint16Array()) {
+ if (at(child1).shouldSpeculateUint16Array()) {
compilePutByValForIntTypedArray(m_jit.globalData()->uint16ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint16_t), NoTypedArraySpecCheck, UnsignedTypedArray);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateUint32Array()) {
+ if (at(child1).shouldSpeculateUint32Array()) {
compilePutByValForIntTypedArray(m_jit.globalData()->uint32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint32_t), NoTypedArraySpecCheck, UnsignedTypedArray);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateFloat32Array()) {
+ if (at(child1).shouldSpeculateFloat32Array()) {
compilePutByValForFloatTypedArray(m_jit.globalData()->float32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(float), NoTypedArraySpecCheck);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateFloat64Array()) {
+ if (at(child1).shouldSpeculateFloat64Array()) {
compilePutByValForFloatTypedArray(m_jit.globalData()->float64ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(double), NoTypedArraySpecCheck);
if (!m_compileOkay)
return;
break;
}
- ASSERT(at(node.child1()).shouldSpeculateArray());
+ ASSERT(at(child1).shouldSpeculateArray());
- JSValueOperand value(this, node.child3());
+ JSValueOperand value(this, child3);
GPRTemporary scratch(this);
GPRReg baseReg = base.gpr();
GPRReg scratchReg = scratch.gpr();
- writeBarrier(base.gpr(), value.gpr(), node.child3(), WriteBarrierForPropertyAccess, scratchReg);
+ writeBarrier(base.gpr(), value.gpr(), child3, WriteBarrierForPropertyAccess, scratchReg);
// Get the array storage.
GPRReg storageReg = scratchReg;
@@ -3319,16 +3327,10 @@ void SpeculativeJIT::compile(Node& node)
GPRReg baseGPR = base.gpr();
GPRReg resultGPR = result.gpr();
- GPRReg scratchGPR;
-
- if (resultGPR == baseGPR)
- scratchGPR = tryAllocate();
- else
- scratchGPR = resultGPR;
base.use();
- cachedGetById(node.codeOrigin, baseGPR, resultGPR, scratchGPR, node.identifierNumber());
+ cachedGetById(node.codeOrigin, baseGPR, resultGPR, node.identifierNumber());
jsValueResult(resultGPR, m_compileIndex, UseChildrenCalledExplicitly);
break;
@@ -3339,18 +3341,12 @@ void SpeculativeJIT::compile(Node& node)
GPRReg baseGPR = base.gpr();
GPRReg resultGPR = result.gpr();
- GPRReg scratchGPR;
-
- if (resultGPR == baseGPR)
- scratchGPR = tryAllocate();
- else
- scratchGPR = resultGPR;
base.use();
JITCompiler::Jump notCell = m_jit.branchTestPtr(JITCompiler::NonZero, baseGPR, GPRInfo::tagMaskRegister);
- cachedGetById(node.codeOrigin, baseGPR, resultGPR, scratchGPR, node.identifierNumber(), notCell);
+ cachedGetById(node.codeOrigin, baseGPR, resultGPR, node.identifierNumber(), notCell);
jsValueResult(resultGPR, m_compileIndex, UseChildrenCalledExplicitly);
@@ -3371,13 +3367,11 @@ void SpeculativeJIT::compile(Node& node)
GPRReg resultGPR = result.gpr();
- GPRReg scratchGPR = selectScratchGPR(baseGPR, resultGPR);
-
base.use();
flushRegisters();
- cachedGetById(node.codeOrigin, baseGPR, resultGPR, scratchGPR, node.identifierNumber(), JITCompiler::Jump(), DontSpill);
+ cachedGetById(node.codeOrigin, baseGPR, resultGPR, node.identifierNumber(), JITCompiler::Jump(), DontSpill);
jsValueResult(resultGPR, m_compileIndex, UseChildrenCalledExplicitly);
break;
@@ -3389,14 +3383,12 @@ void SpeculativeJIT::compile(Node& node)
GPRResult result(this);
GPRReg resultGPR = result.gpr();
- GPRReg scratchGPR = selectScratchGPR(baseGPR, resultGPR);
-
base.use();
flushRegisters();
JITCompiler::Jump notCell = m_jit.branchTestPtr(JITCompiler::NonZero, baseGPR, GPRInfo::tagMaskRegister);
- cachedGetById(node.codeOrigin, baseGPR, resultGPR, scratchGPR, node.identifierNumber(), notCell, DontSpill);
+ cachedGetById(node.codeOrigin, baseGPR, resultGPR, node.identifierNumber(), notCell, DontSpill);
jsValueResult(resultGPR, m_compileIndex, UseChildrenCalledExplicitly);