diff options
author | Simon Hausmann <simon.hausmann@digia.com> | 2012-10-16 14:56:46 +0200 |
---|---|---|
committer | Simon Hausmann <simon.hausmann@digia.com> | 2012-10-16 14:57:30 +0200 |
commit | b297e0fa5c217c9467033b7c8b46891a52870120 (patch) | |
tree | 43fc14689295e9e64f2719d05aad94e3049f6cd7 /Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp | |
parent | 69d517dbfa69903d8593cc1737f0474b21e3251e (diff) | |
download | qtwebkit-b297e0fa5c217c9467033b7c8b46891a52870120.tar.gz |
Revert "Imported WebKit commit 0dc6cd75e1d4836eaffbb520be96fac4847cc9d2 (http://svn.webkit.org/repository/webkit/trunk@131300)"
This reverts commit 5466563f4b5b6b86523e3f89bb7f77e5b5270c78.
Caused OOM issues on some CI machines :(
Diffstat (limited to 'Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp')
-rw-r--r-- | Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp | 560 |
1 files changed, 116 insertions, 444 deletions
diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp index 0928dfa58..d7cec27c1 100644 --- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp +++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp @@ -29,7 +29,6 @@ #if ENABLE(DFG_JIT) #include "Arguments.h" -#include "DFGCallArrayAllocatorSlowPathGenerator.h" #include "DFGSlowPathGenerator.h" namespace JSC { namespace DFG { @@ -73,7 +72,7 @@ GPRReg SpeculativeJIT::fillInteger(NodeIndex nodeIndex, DataFormat& returnFormat } // Since we statically know that we're filling an integer, and values - // in the JSStack are boxed, this must be DataFormatJSInteger. + // in the RegisterFile are boxed, this must be DataFormatJSInteger. // We will check this with a jitAssert below. info.fillJSValue(*m_stream, gpr, DataFormatJSInteger); unlock(gpr); @@ -1006,9 +1005,9 @@ void SpeculativeJIT::emitCall(Node& node) // arguments. int numPassedArgs = node.numChildren() - 1; - m_jit.store32(MacroAssembler::TrustedImm32(numPassedArgs + dummyThisArgument), callFramePayloadSlot(JSStack::ArgumentCount)); - m_jit.storePtr(GPRInfo::callFrameRegister, callFrameSlot(JSStack::CallerFrame)); - m_jit.storePtr(calleeGPR, callFrameSlot(JSStack::Callee)); + m_jit.store32(MacroAssembler::TrustedImm32(numPassedArgs + dummyThisArgument), callFramePayloadSlot(RegisterFile::ArgumentCount)); + m_jit.storePtr(GPRInfo::callFrameRegister, callFrameSlot(RegisterFile::CallerFrame)); + m_jit.storePtr(calleeGPR, callFrameSlot(RegisterFile::Callee)); for (int i = 0; i < numPassedArgs; i++) { Edge argEdge = m_jit.graph().m_varArgChildren[node.firstChild() + 1 + i]; @@ -1034,7 +1033,7 @@ void SpeculativeJIT::emitCall(Node& node) slowPath = m_jit.branchPtrWithPatch(MacroAssembler::NotEqual, calleeGPR, targetToCheck, MacroAssembler::TrustedImmPtr(JSValue::encode(JSValue()))); m_jit.loadPtr(MacroAssembler::Address(calleeGPR, OBJECT_OFFSETOF(JSFunction, m_scope)), resultGPR); - m_jit.storePtr(resultGPR, MacroAssembler::Address(GPRInfo::callFrameRegister, static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ScopeChain)); + m_jit.storePtr(resultGPR, MacroAssembler::Address(GPRInfo::callFrameRegister, static_cast<ptrdiff_t>(sizeof(Register)) * RegisterFile::ScopeChain)); CodeOrigin codeOrigin = at(m_compileIndex).codeOrigin; JITCompiler::Call fastCall = m_jit.nearCall(); @@ -2110,111 +2109,10 @@ void SpeculativeJIT::emitBranch(Node& node) } } -MacroAssembler::JumpList SpeculativeJIT::compileContiguousGetByVal(Node&, GPRReg, GPRReg propertyReg, GPRReg storageReg, GPRReg resultReg) -{ - MacroAssembler::JumpList slowCases; - slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()))); - - m_jit.loadPtr(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr), resultReg); - slowCases.append(m_jit.branchTestPtr(MacroAssembler::Zero, resultReg)); - - return slowCases; -} - -MacroAssembler::JumpList SpeculativeJIT::compileArrayStorageGetByVal(Node&, GPRReg, GPRReg propertyReg, GPRReg storageReg, GPRReg resultReg) -{ - MacroAssembler::Jump outOfBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset())); - - m_jit.loadPtr(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), resultReg); - MacroAssembler::Jump hole = m_jit.branchTestPtr(MacroAssembler::Zero, resultReg); - - MacroAssembler::JumpList slowCases; - slowCases.append(outOfBounds); - slowCases.append(hole); - return slowCases; -} - -MacroAssembler::JumpList SpeculativeJIT::compileContiguousPutByVal(Node& node, GPRReg, GPRReg propertyReg, GPRReg storageReg, GPRReg valueReg, GPRReg tempReg) -{ - Array::Mode arrayMode = node.arrayMode(); - - MacroAssembler::JumpList slowCases; - - if (!mayStoreToTail(arrayMode)) { - speculationCheck( - Uncountable, JSValueRegs(), NoNode, - m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()))); - } else { - MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())); - - slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()))); - - if (isInBoundsAccess(arrayMode)) - speculationCheck(Uncountable, JSValueRegs(), NoNode, slowCases); - - m_jit.add32(TrustedImm32(1), propertyReg, tempReg); - m_jit.store32(tempReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())); - - inBounds.link(&m_jit); - } - - m_jit.storePtr(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr)); - - if (isInBoundsAccess(arrayMode)) - return MacroAssembler::JumpList(); - return slowCases; -} - -MacroAssembler::JumpList SpeculativeJIT::compileArrayStoragePutByVal(Node& node, GPRReg, GPRReg propertyReg, GPRReg storageReg, GPRReg valueReg, GPRReg tempReg) -{ - Array::Mode arrayMode = node.arrayMode(); - - MacroAssembler::JumpList slowCases; - - MacroAssembler::Jump beyondArrayBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset())); - if (isInBoundsAccess(arrayMode)) - speculationCheck(OutOfBounds, JSValueRegs(), NoNode, beyondArrayBounds); - else - slowCases.append(beyondArrayBounds); - - // Check if we're writing to a hole; if so increment m_numValuesInVector. - if (!mayStoreToHole(arrayMode)) { - // This is uncountable because if we take this exit, then the baseline JIT - // will immediately count the hole store. So there is no need for exit - // profiling. - speculationCheck( - Uncountable, JSValueRegs(), NoNode, - m_jit.branchTestPtr(MacroAssembler::Zero, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])))); - } else { - MacroAssembler::Jump notHoleValue = m_jit.branchTestPtr(MacroAssembler::NonZero, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); - if (isSlowPutAccess(arrayMode)) { - // This is sort of strange. If we wanted to optimize this code path, we would invert - // the above branch. But it's simply not worth it since this only happens if we're - // already having a bad time. - slowCases.append(m_jit.jump()); - } else { - m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageReg, ArrayStorage::numValuesInVectorOffset())); - - // If we're writing to a hole we might be growing the array; - MacroAssembler::Jump lengthDoesNotNeedUpdate = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::lengthOffset())); - m_jit.add32(TrustedImm32(1), propertyReg, tempReg); - m_jit.store32(tempReg, MacroAssembler::Address(storageReg, ArrayStorage::lengthOffset())); - - lengthDoesNotNeedUpdate.link(&m_jit); - } - notHoleValue.link(&m_jit); - } - - // Store the value to the array. - m_jit.storePtr(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); - - return slowCases; -} - void SpeculativeJIT::compile(Node& node) { NodeType op = node.op(); - + switch (op) { case JSConstant: initConstantInfo(m_compileIndex); @@ -2304,7 +2202,7 @@ void SpeculativeJIT::compile(Node& node) // SetLocal doubles as a hint as to where a node will be stored and // as a speculation point. So before we speculate make sure that we // know where the child of this node needs to go in the virtual - // stack. + // register file. compileMovHint(node); // As far as OSR is concerned, we're on the bytecode index corresponding @@ -2333,7 +2231,7 @@ void SpeculativeJIT::compile(Node& node) // this SetLocal should not have executed. But for op_post_inc, it's just // fine, because this SetLocal's local (i.e. the LHS in a x = y++ // statement) would be dead anyway - so the fact that DFG would have - // already made the assignment, and baked it into the stack during + // already made the assignment, and baked it into the register file during // OSR exit, would not be visible to the old JIT in any way. m_codeOriginForOSR = nextNode->codeOrigin; @@ -2343,9 +2241,9 @@ void SpeculativeJIT::compile(Node& node) m_jit.storeDouble(value.fpr(), JITCompiler::addressFor(node.local())); noResult(m_compileIndex); // Indicate that it's no longer necessary to retrieve the value of - // this bytecode variable from registers or other locations in the stack, + // this bytecode variable from registers or other locations in the register file, // but that it is stored as a double. - recordSetLocal(node.local(), ValueSource(DoubleInJSStack)); + recordSetLocal(node.local(), ValueSource(DoubleInRegisterFile)); break; } @@ -2354,7 +2252,7 @@ void SpeculativeJIT::compile(Node& node) SpeculateIntegerOperand value(this, node.child1()); m_jit.store32(value.gpr(), JITCompiler::payloadFor(node.local())); noResult(m_compileIndex); - recordSetLocal(node.local(), ValueSource(Int32InJSStack)); + recordSetLocal(node.local(), ValueSource(Int32InRegisterFile)); break; } if (isCellSpeculation(predictedType)) { @@ -2362,14 +2260,14 @@ void SpeculativeJIT::compile(Node& node) GPRReg cellGPR = cell.gpr(); m_jit.storePtr(cellGPR, JITCompiler::addressFor(node.local())); noResult(m_compileIndex); - recordSetLocal(node.local(), ValueSource(CellInJSStack)); + recordSetLocal(node.local(), ValueSource(CellInRegisterFile)); break; } if (isBooleanSpeculation(predictedType)) { SpeculateBooleanOperand boolean(this, node.child1()); m_jit.storePtr(boolean.gpr(), JITCompiler::addressFor(node.local())); noResult(m_compileIndex); - recordSetLocal(node.local(), ValueSource(BooleanInJSStack)); + recordSetLocal(node.local(), ValueSource(BooleanInRegisterFile)); break; } } @@ -2378,7 +2276,7 @@ void SpeculativeJIT::compile(Node& node) m_jit.storePtr(value.gpr(), JITCompiler::addressFor(node.local())); noResult(m_compileIndex); - recordSetLocal(node.local(), ValueSource(ValueInJSStack)); + recordSetLocal(node.local(), ValueSource(ValueInRegisterFile)); // If we're storing an arguments object that has been optimized away, // our variable event stream for OSR exit now reflects the optimized @@ -2697,51 +2595,6 @@ void SpeculativeJIT::compile(Node& node) jsValueResult(result.gpr(), m_compileIndex); break; } - case IN_BOUNDS_CONTIGUOUS_MODES: { - SpeculateStrictInt32Operand property(this, node.child2()); - StorageOperand storage(this, node.child3()); - - GPRReg propertyReg = property.gpr(); - GPRReg storageReg = storage.gpr(); - - if (!m_compileOkay) - return; - - speculationCheck(OutOfBounds, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()))); - - GPRTemporary result(this); - m_jit.loadPtr(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr), result.gpr()); - speculationCheck(OutOfBounds, JSValueRegs(), NoNode, m_jit.branchTestPtr(MacroAssembler::Zero, result.gpr())); - jsValueResult(result.gpr(), m_compileIndex); - break; - } - case CONTIGUOUS_TO_TAIL_MODES: - case OUT_OF_BOUNDS_CONTIGUOUS_MODES: - case ALL_EFFECTFUL_CONTIGUOUS_MODES: { - SpeculateCellOperand base(this, node.child1()); - SpeculateStrictInt32Operand property(this, node.child2()); - StorageOperand storage(this, node.child3()); - - GPRReg baseReg = base.gpr(); - GPRReg propertyReg = property.gpr(); - GPRReg storageReg = storage.gpr(); - - if (!m_compileOkay) - return; - - GPRTemporary result(this); - GPRReg resultReg = result.gpr(); - - MacroAssembler::JumpList slowCases = - compileContiguousGetByVal(node, baseReg, propertyReg, storageReg, resultReg); - addSlowPathGenerator( - slowPathCall( - slowCases, this, operationGetByValArrayInt, - result.gpr(), baseReg, propertyReg)); - - jsValueResult(resultReg, m_compileIndex); - break; - } case IN_BOUNDS_ARRAY_STORAGE_MODES: { SpeculateStrictInt32Operand property(this, node.child2()); StorageOperand storage(this, node.child3()); @@ -2762,7 +2615,6 @@ void SpeculativeJIT::compile(Node& node) break; } case OUT_OF_BOUNDS_ARRAY_STORAGE_MODES: - case SLOW_PUT_ARRAY_STORAGE_MODES: case ALL_EFFECTFUL_ARRAY_STORAGE_MODES: { SpeculateCellOperand base(this, node.child1()); SpeculateStrictInt32Operand property(this, node.child2()); @@ -2775,17 +2627,21 @@ void SpeculativeJIT::compile(Node& node) if (!m_compileOkay) return; - GPRTemporary result(this); - GPRReg resultReg = result.gpr(); + MacroAssembler::Jump outOfBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset())); - MacroAssembler::JumpList slowCases = - compileArrayStorageGetByVal(node, baseReg, propertyReg, storageReg, resultReg); + GPRTemporary result(this); + m_jit.loadPtr(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), result.gpr()); + MacroAssembler::Jump hole = m_jit.branchTestPtr(MacroAssembler::Zero, result.gpr()); + + MacroAssembler::JumpList slowCases; + slowCases.append(outOfBounds); + slowCases.append(hole); addSlowPathGenerator( slowPathCall( slowCases, this, operationGetByValArrayInt, result.gpr(), baseReg, propertyReg)); - jsValueResult(resultReg, m_compileIndex); + jsValueResult(result.gpr(), m_compileIndex); break; } case Array::String: @@ -2879,57 +2735,6 @@ void SpeculativeJIT::compile(Node& node) GPRReg propertyReg = property.gpr(); switch (arrayMode) { - case ALL_CONTIGUOUS_MODES: - case ALL_EFFECTFUL_CONTIGUOUS_MODES: { - JSValueOperand value(this, child3); - - GPRReg valueReg = value.gpr(); - - if (!m_compileOkay) - return; - - if (Heap::isWriteBarrierEnabled()) { - GPRTemporary scratch(this); - writeBarrier(baseReg, value.gpr(), child3, WriteBarrierForPropertyAccess, scratch.gpr()); - } - - StorageOperand storage(this, child4); - GPRReg storageReg = storage.gpr(); - - if (node.op() == PutByValAlias) { - // Store the value to the array. - GPRReg propertyReg = property.gpr(); - GPRReg valueReg = value.gpr(); - m_jit.storePtr(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr)); - - noResult(m_compileIndex); - break; - } - - GPRTemporary temporary; - GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node); - - MacroAssembler::JumpList slowCases = - compileContiguousPutByVal( - node, baseReg, propertyReg, storageReg, valueReg, temporaryReg); - - base.use(); - property.use(); - value.use(); - storage.use(); - - if (!slowCases.empty()) { - addSlowPathGenerator( - slowPathCall( - slowCases, this, - m_jit.codeBlock()->isStrictMode() ? operationPutByValBeyondArrayBoundsStrict : operationPutByValBeyondArrayBoundsNonStrict, - NoResult, baseReg, propertyReg, valueReg)); - } - - noResult(m_compileIndex, UseChildrenCalledExplicitly); - break; - } - case ALL_ARRAY_STORAGE_MODES: case ALL_EFFECTFUL_ARRAY_STORAGE_MODES: { JSValueOperand value(this, child3); @@ -2957,19 +2762,53 @@ void SpeculativeJIT::compile(Node& node) break; } - GPRTemporary temporary; - GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node); - - MacroAssembler::JumpList slowCases = - compileArrayStoragePutByVal( - node, baseReg, propertyReg, storageReg, valueReg, temporaryReg); - + MacroAssembler::Jump beyondArrayBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset())); + if (isInBoundsAccess(arrayMode)) + speculationCheck(OutOfBounds, JSValueRegs(), NoNode, beyondArrayBounds); + + // Check if we're writing to a hole; if so increment m_numValuesInVector. + MacroAssembler::Jump isHoleValue; + if (!mayStoreToHole(arrayMode)) { + // This is uncountable because if we take this exit, then the baseline JIT + // will immediately count the hole store. So there is no need for exit + // profiling. + speculationCheck( + Uncountable, JSValueRegs(), NoNode, + m_jit.branchTestPtr(MacroAssembler::Zero, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])))); + } else { + MacroAssembler::Jump notHoleValue = m_jit.branchTestPtr(MacroAssembler::NonZero, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); + if (isSlowPutAccess(arrayMode)) { + // This is sort of strange. If we wanted to optimize this code path, we would invert + // the above branch. But it's simply not worth it since this only happens if we're + // already having a bad time. + isHoleValue = m_jit.jump(); + } else { + m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageReg, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector))); + + // If we're writing to a hole we might be growing the array; + MacroAssembler::Jump lengthDoesNotNeedUpdate = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::lengthOffset())); + m_jit.add32(TrustedImm32(1), propertyReg); + m_jit.store32(propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::lengthOffset())); + m_jit.sub32(TrustedImm32(1), propertyReg); + + lengthDoesNotNeedUpdate.link(&m_jit); + } + notHoleValue.link(&m_jit); + } + base.use(); property.use(); value.use(); storage.use(); - - if (!slowCases.empty()) { + + // Store the value to the array. + m_jit.storePtr(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); + + if (!isInBoundsAccess(arrayMode)) { + MacroAssembler::JumpList slowCases; + slowCases.append(beyondArrayBounds); + if (isSlowPutAccess(arrayMode)) + slowCases.append(isHoleValue); addSlowPathGenerator( slowPathCall( slowCases, this, @@ -3134,54 +2973,26 @@ void SpeculativeJIT::compile(Node& node) StorageOperand storage(this, node.child3()); GPRReg storageGPR = storage.gpr(); - switch (node.arrayMode()) { - case Array::ArrayWithContiguous: - case Array::ArrayWithContiguousOutOfBounds: { - m_jit.load32(MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), storageLengthGPR); - MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength())); - m_jit.storePtr(valueGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::ScalePtr)); - m_jit.add32(TrustedImm32(1), storageLengthGPR); - m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength())); - m_jit.orPtr(GPRInfo::tagTypeNumberRegister, storageLengthGPR); - - addSlowPathGenerator( - slowPathCall( - slowPath, this, operationArrayPush, NoResult, storageLengthGPR, - valueGPR, baseGPR)); - - jsValueResult(storageLengthGPR, m_compileIndex); - break; - } - - case Array::ArrayWithArrayStorage: - case Array::ArrayWithArrayStorageOutOfBounds: { - m_jit.load32(MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset()), storageLengthGPR); + m_jit.load32(MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset()), storageLengthGPR); - // Refuse to handle bizarre lengths. - speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::Above, storageLengthGPR, TrustedImm32(0x7ffffffe))); + // Refuse to handle bizarre lengths. + speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::Above, storageLengthGPR, TrustedImm32(0x7ffffffe))); - MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::vectorLengthOffset())); + MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::vectorLengthOffset())); - m_jit.storePtr(valueGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); + m_jit.storePtr(valueGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); - m_jit.add32(TrustedImm32(1), storageLengthGPR); - m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset())); - m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector))); - m_jit.orPtr(GPRInfo::tagTypeNumberRegister, storageLengthGPR); + m_jit.add32(TrustedImm32(1), storageLengthGPR); + m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset())); + m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector))); + m_jit.orPtr(GPRInfo::tagTypeNumberRegister, storageLengthGPR); - addSlowPathGenerator( - slowPathCall( - slowPath, this, operationArrayPush, NoResult, storageLengthGPR, - valueGPR, baseGPR)); + addSlowPathGenerator( + slowPathCall( + slowPath, this, operationArrayPush, NoResult, storageLengthGPR, + valueGPR, baseGPR)); - jsValueResult(storageLengthGPR, m_compileIndex); - break; - } - - default: - CRASH(); - break; - } + jsValueResult(storageLengthGPR, m_compileIndex); break; } @@ -3191,77 +3002,41 @@ void SpeculativeJIT::compile(Node& node) SpeculateCellOperand base(this, node.child1()); StorageOperand storage(this, node.child2()); GPRTemporary value(this); + GPRTemporary storageLength(this); GPRReg baseGPR = base.gpr(); GPRReg storageGPR = storage.gpr(); GPRReg valueGPR = value.gpr(); + GPRReg storageLengthGPR = storageLength.gpr(); - switch (node.arrayMode()) { - case Array::ArrayWithContiguous: - case Array::ArrayWithContiguousOutOfBounds: { - m_jit.load32( - MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), valueGPR); - MacroAssembler::Jump undefinedCase = - m_jit.branchTest32(MacroAssembler::Zero, valueGPR); - m_jit.sub32(TrustedImm32(1), valueGPR); - m_jit.store32( - valueGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength())); - m_jit.loadPtr( - MacroAssembler::BaseIndex(storageGPR, valueGPR, MacroAssembler::ScalePtr), - valueGPR); - MacroAssembler::Jump slowCase = m_jit.branchTestPtr(MacroAssembler::Zero, valueGPR); - - addSlowPathGenerator( - slowPathMove( - undefinedCase, this, - MacroAssembler::TrustedImmPtr(JSValue::encode(jsUndefined())), valueGPR)); - addSlowPathGenerator( - slowPathCall( - slowCase, this, operationArrayPopAndRecoverLength, valueGPR, baseGPR)); - - jsValueResult(valueGPR, m_compileIndex); - break; - } - - case Array::ArrayWithArrayStorage: - case Array::ArrayWithArrayStorageOutOfBounds: { - GPRTemporary storageLength(this); - GPRReg storageLengthGPR = storageLength.gpr(); - m_jit.load32(MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset()), storageLengthGPR); + m_jit.load32(MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset()), storageLengthGPR); - JITCompiler::Jump undefinedCase = - m_jit.branchTest32(MacroAssembler::Zero, storageLengthGPR); + JITCompiler::JumpList setUndefinedCases; + setUndefinedCases.append(m_jit.branchTest32(MacroAssembler::Zero, storageLengthGPR)); - m_jit.sub32(TrustedImm32(1), storageLengthGPR); + m_jit.sub32(TrustedImm32(1), storageLengthGPR); - JITCompiler::JumpList slowCases; - slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::vectorLengthOffset()))); + MacroAssembler::Jump slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::vectorLengthOffset())); - m_jit.loadPtr(MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), valueGPR); - slowCases.append(m_jit.branchTestPtr(MacroAssembler::Zero, valueGPR)); + m_jit.loadPtr(MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), valueGPR); - m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset())); + m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset())); + + setUndefinedCases.append(m_jit.branchTestPtr(MacroAssembler::Zero, valueGPR)); - m_jit.storePtr(MacroAssembler::TrustedImmPtr(0), MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); - m_jit.sub32(MacroAssembler::TrustedImm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector))); + m_jit.storePtr(MacroAssembler::TrustedImmPtr(0), MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); + m_jit.sub32(MacroAssembler::TrustedImm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector))); - addSlowPathGenerator( - slowPathMove( - undefinedCase, this, - MacroAssembler::TrustedImmPtr(JSValue::encode(jsUndefined())), valueGPR)); + addSlowPathGenerator( + slowPathMove( + setUndefinedCases, this, + MacroAssembler::TrustedImmPtr(JSValue::encode(jsUndefined())), valueGPR)); - addSlowPathGenerator( - slowPathCall( - slowCases, this, operationArrayPop, valueGPR, baseGPR)); + addSlowPathGenerator( + slowPathCall( + slowCase, this, operationArrayPop, valueGPR, baseGPR)); - jsValueResult(valueGPR, m_compileIndex); - break; - } - - default: - CRASH(); - break; - } + jsValueResult(valueGPR, m_compileIndex); break; } @@ -3312,9 +3087,9 @@ void SpeculativeJIT::compile(Node& node) m_jit.move(op1.gpr(), GPRInfo::returnValueGPR); // Grab the return address. - m_jit.emitGetFromCallFrameHeaderPtr(JSStack::ReturnPC, GPRInfo::regT1); + m_jit.emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, GPRInfo::regT1); // Restore our caller's "r". - m_jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, GPRInfo::callFrameRegister); + m_jit.emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, GPRInfo::callFrameRegister); // Return. m_jit.restoreReturnAddressBeforeReturn(GPRInfo::regT1); m_jit.ret(); @@ -3376,40 +3151,8 @@ void SpeculativeJIT::compile(Node& node) case NewArray: { JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node.codeOrigin); - if (!globalObject->isHavingABadTime()) { + if (!globalObject->isHavingABadTime()) globalObject->havingABadTimeWatchpoint()->add(speculationWatchpoint()); - - ASSERT(hasContiguous(globalObject->arrayStructure()->indexingType())); - - unsigned numElements = node.numChildren(); - - GPRTemporary result(this); - GPRTemporary storage(this); - - GPRReg resultGPR = result.gpr(); - GPRReg storageGPR = storage.gpr(); - - emitAllocateJSArray(globalObject->arrayStructure(), resultGPR, storageGPR, numElements); - - // At this point, one way or another, resultGPR and storageGPR have pointers to - // the JSArray and the Butterfly, respectively. - - for (unsigned operandIdx = 0; operandIdx < node.numChildren(); ++operandIdx) { - JSValueOperand operand(this, m_jit.graph().m_varArgChildren[node.firstChild() + operandIdx]); - GPRReg opGPR = operand.gpr(); - m_jit.storePtr(opGPR, MacroAssembler::Address(storageGPR, sizeof(JSValue) * operandIdx)); - } - - // Yuck, we should *really* have a way of also returning the storageGPR. But - // that's the least of what's wrong with this code. We really shouldn't be - // allocating the array after having computed - and probably spilled to the - // stack - all of the things that will go into the array. The solution to that - // bigger problem will also likely fix the redundancy in reloading the storage - // pointer that we currently have. - - cellResult(resultGPR, m_compileIndex); - break; - } if (!node.numChildren()) { flushRegisters(); @@ -3460,61 +3203,15 @@ void SpeculativeJIT::compile(Node& node) case NewArrayWithSize: { JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node.codeOrigin); - if (!globalObject->isHavingABadTime()) { + if (!globalObject->isHavingABadTime()) globalObject->havingABadTimeWatchpoint()->add(speculationWatchpoint()); - - SpeculateStrictInt32Operand size(this, node.child1()); - GPRTemporary result(this); - GPRTemporary storage(this); - GPRTemporary scratch(this); - - GPRReg sizeGPR = size.gpr(); - GPRReg resultGPR = result.gpr(); - GPRReg storageGPR = storage.gpr(); - GPRReg scratchGPR = scratch.gpr(); - - MacroAssembler::JumpList slowCases; - slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, sizeGPR, TrustedImm32(MIN_SPARSE_ARRAY_INDEX))); - - ASSERT((1 << 3) == sizeof(JSValue)); - m_jit.move(sizeGPR, scratchGPR); - m_jit.lshift32(TrustedImm32(3), scratchGPR); - m_jit.add32(TrustedImm32(sizeof(IndexingHeader)), scratchGPR, resultGPR); - slowCases.append( - emitAllocateBasicStorage(resultGPR, storageGPR)); - m_jit.subPtr(scratchGPR, storageGPR); - emitAllocateBasicJSObject<JSArray, MarkedBlock::None>( - TrustedImmPtr(globalObject->arrayStructure()), resultGPR, scratchGPR, - storageGPR, sizeof(JSArray), slowCases); - - m_jit.store32(sizeGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength())); - m_jit.store32(sizeGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength())); - - addSlowPathGenerator(adoptPtr( - new CallArrayAllocatorWithVariableSizeSlowPathGenerator( - slowCases, this, operationNewArrayWithSize, resultGPR, - globalObject->arrayStructure(), - globalObject->arrayStructureWithArrayStorage(), - sizeGPR))); - - cellResult(resultGPR, m_compileIndex); - break; - } SpeculateStrictInt32Operand size(this, node.child1()); GPRReg sizeGPR = size.gpr(); flushRegisters(); GPRResult result(this); - GPRReg resultGPR = result.gpr(); - GPRReg structureGPR = selectScratchGPR(sizeGPR); - MacroAssembler::Jump bigLength = m_jit.branch32(MacroAssembler::AboveOrEqual, sizeGPR, TrustedImm32(MIN_SPARSE_ARRAY_INDEX)); - m_jit.move(TrustedImmPtr(globalObject->arrayStructure()), structureGPR); - MacroAssembler::Jump done = m_jit.jump(); - bigLength.link(&m_jit); - m_jit.move(TrustedImmPtr(globalObject->arrayStructureWithArrayStorage()), structureGPR); - done.link(&m_jit); - callOperation(operationNewArrayWithSize, resultGPR, structureGPR, sizeGPR); - cellResult(resultGPR, m_compileIndex); + callOperation(operationNewArrayWithSize, result.gpr(), globalObject->arrayStructure(), sizeGPR); + cellResult(result.gpr(), m_compileIndex); break; } @@ -3557,35 +3254,10 @@ void SpeculativeJIT::compile(Node& node) } case NewArrayBuffer: { - JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node.codeOrigin); - if (!globalObject->isHavingABadTime()) { - globalObject->havingABadTimeWatchpoint()->add(speculationWatchpoint()); - - unsigned numElements = node.numConstants(); - - GPRTemporary result(this); - GPRTemporary storage(this); - - GPRReg resultGPR = result.gpr(); - GPRReg storageGPR = storage.gpr(); - - emitAllocateJSArray(globalObject->arrayStructure(), resultGPR, storageGPR, numElements); - - JSValue* data = m_jit.codeBlock()->constantBuffer(node.startConstant()); - for (unsigned index = 0; index < node.numConstants(); ++index) { - m_jit.storePtr( - ImmPtr(bitwise_cast<void*>(JSValue::encode(data[index]))), - MacroAssembler::Address(storageGPR, sizeof(JSValue) * index)); - } - - cellResult(resultGPR, m_compileIndex); - break; - } - flushRegisters(); GPRResult result(this); - callOperation(operationNewArrayBuffer, result.gpr(), globalObject->arrayStructure(), node.startConstant(), node.numConstants()); + callOperation(operationNewArrayBuffer, result.gpr(), node.startConstant(), node.numConstants()); cellResult(result.gpr(), m_compileIndex); break; @@ -3703,7 +3375,7 @@ void SpeculativeJIT::compile(Node& node) case GetCallee: { GPRTemporary result(this); - m_jit.loadPtr(JITCompiler::addressFor(static_cast<VirtualRegister>(JSStack::Callee)), result.gpr()); + m_jit.loadPtr(JITCompiler::addressFor(static_cast<VirtualRegister>(RegisterFile::Callee)), result.gpr()); cellResult(result.gpr(), m_compileIndex); break; } @@ -3712,7 +3384,7 @@ void SpeculativeJIT::compile(Node& node) GPRTemporary result(this); GPRReg resultGPR = result.gpr(); - m_jit.loadPtr(JITCompiler::addressFor(static_cast<VirtualRegister>(JSStack::ScopeChain)), resultGPR); + m_jit.loadPtr(JITCompiler::addressFor(static_cast<VirtualRegister>(RegisterFile::ScopeChain)), resultGPR); bool checkTopLevel = m_jit.codeBlock()->codeType() == FunctionCode && m_jit.codeBlock()->needsFullScopeChain(); int skip = node.scopeChainDepth(); ASSERT(skip || !checkTopLevel); @@ -4301,14 +3973,14 @@ void SpeculativeJIT::compile(Node& node) // Fast case m_jit.load32(JITCompiler::Address(resolveInfoGPR, OBJECT_OFFSETOF(GlobalResolveInfo, offset)), resolveInfoGPR); #if DFG_ENABLE(JIT_ASSERT) - JITCompiler::Jump isOutOfLine = m_jit.branch32(JITCompiler::GreaterThanOrEqual, resolveInfoGPR, TrustedImm32(firstOutOfLineOffset)); + JITCompiler::Jump isOutOfLine = m_jit.branch32(JITCompiler::GreaterThanOrEqual, resolveInfoGPR, TrustedImm32(inlineStorageCapacity)); m_jit.breakpoint(); isOutOfLine.link(&m_jit); #endif m_jit.neg32(resolveInfoGPR); m_jit.signExtend32ToPtr(resolveInfoGPR, resolveInfoGPR); m_jit.loadPtr(JITCompiler::Address(globalObjectGPR, JSObject::butterflyOffset()), resultGPR); - m_jit.loadPtr(JITCompiler::BaseIndex(resultGPR, resolveInfoGPR, JITCompiler::ScalePtr, (firstOutOfLineOffset - 2) * static_cast<ptrdiff_t>(sizeof(JSValue))), resultGPR); + m_jit.loadPtr(JITCompiler::BaseIndex(resultGPR, resolveInfoGPR, JITCompiler::ScalePtr, (inlineStorageCapacity - 2) * static_cast<ptrdiff_t>(sizeof(JSValue))), resultGPR); addSlowPathGenerator( slowPathCall( @@ -4434,7 +4106,7 @@ void SpeculativeJIT::compile(Node& node) } ASSERT(!node.codeOrigin.inlineCallFrame); - m_jit.load32(JITCompiler::payloadFor(JSStack::ArgumentCount), resultGPR); + m_jit.load32(JITCompiler::payloadFor(RegisterFile::ArgumentCount), resultGPR); m_jit.sub32(TrustedImm32(1), resultGPR); integerResult(resultGPR, m_compileIndex); break; @@ -4457,7 +4129,7 @@ void SpeculativeJIT::compile(Node& node) jsNumber(node.codeOrigin.inlineCallFrame->arguments.size() - 1)))), resultGPR); } else { - m_jit.load32(JITCompiler::payloadFor(JSStack::ArgumentCount), resultGPR); + m_jit.load32(JITCompiler::payloadFor(RegisterFile::ArgumentCount), resultGPR); m_jit.sub32(TrustedImm32(1), resultGPR); m_jit.orPtr(GPRInfo::tagTypeNumberRegister, resultGPR); } @@ -4506,7 +4178,7 @@ void SpeculativeJIT::compile(Node& node) m_jit.branch32( JITCompiler::AboveOrEqual, resultGPR, - JITCompiler::payloadFor(JSStack::ArgumentCount))); + JITCompiler::payloadFor(RegisterFile::ArgumentCount))); } JITCompiler::JumpList slowArgument; @@ -4571,7 +4243,7 @@ void SpeculativeJIT::compile(Node& node) m_jit.branch32( JITCompiler::AboveOrEqual, resultGPR, - JITCompiler::payloadFor(JSStack::ArgumentCount))); + JITCompiler::payloadFor(RegisterFile::ArgumentCount))); } JITCompiler::JumpList slowArgument; |