summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/jit/JITInlineMethods.h
diff options
context:
space:
mode:
Diffstat (limited to 'Source/JavaScriptCore/jit/JITInlineMethods.h')
-rw-r--r--Source/JavaScriptCore/jit/JITInlineMethods.h87
1 files changed, 81 insertions, 6 deletions
diff --git a/Source/JavaScriptCore/jit/JITInlineMethods.h b/Source/JavaScriptCore/jit/JITInlineMethods.h
index e0310569d..998d5ac18 100644
--- a/Source/JavaScriptCore/jit/JITInlineMethods.h
+++ b/Source/JavaScriptCore/jit/JITInlineMethods.h
@@ -269,7 +269,7 @@ ALWAYS_INLINE void JIT::updateTopCallFrame()
#if USE(JSVALUE32_64)
storePtr(TrustedImmPtr(m_codeBlock->instructions().begin() + m_bytecodeOffset + 1), intTagFor(RegisterFile::ArgumentCount));
#else
- store32(Imm32(m_bytecodeOffset + 1), intTagFor(RegisterFile::ArgumentCount));
+ store32(TrustedImm32(m_bytecodeOffset + 1), intTagFor(RegisterFile::ArgumentCount));
#endif
}
storePtr(callFrameRegister, &m_globalData->topCallFrame);
@@ -459,6 +459,78 @@ inline void JIT::emitAllocateJSFunction(FunctionExecutable* executable, Register
#endif
}
+inline void JIT::emitAllocateBasicStorage(size_t size, RegisterID result, RegisterID storagePtr)
+{
+ CopiedAllocator* allocator = &m_globalData->heap.storageAllocator();
+
+ // FIXME: We need to check for wrap-around.
+ // Check to make sure that the allocation will fit in the current block.
+ loadPtr(&allocator->m_currentOffset, result);
+ addPtr(TrustedImm32(size), result);
+ loadPtr(&allocator->m_currentBlock, storagePtr);
+ addPtr(TrustedImm32(HeapBlock::s_blockSize), storagePtr);
+ addSlowCase(branchPtr(AboveOrEqual, result, storagePtr));
+
+ // Load the original offset.
+ loadPtr(&allocator->m_currentOffset, result);
+
+ // Bump the pointer forward.
+ move(result, storagePtr);
+ addPtr(TrustedImm32(size), storagePtr);
+ storePtr(storagePtr, &allocator->m_currentOffset);
+}
+
+inline void JIT::emitAllocateJSArray(unsigned valuesRegister, unsigned length, RegisterID cellResult, RegisterID storageResult, RegisterID storagePtr)
+{
+ unsigned initialLength = std::max(length, 4U);
+ size_t initialStorage = JSArray::storageSize(initialLength);
+
+ // Allocate the cell for the array.
+ emitAllocateBasicJSObject<JSArray, false>(TrustedImmPtr(m_codeBlock->globalObject()->arrayStructure()), cellResult, storagePtr);
+
+ // Allocate the backing store for the array.
+ emitAllocateBasicStorage(initialStorage, storageResult, storagePtr);
+
+ // Store all the necessary info in the ArrayStorage.
+ storePtr(storageResult, Address(storageResult, ArrayStorage::allocBaseOffset()));
+ store32(Imm32(length), Address(storageResult, ArrayStorage::lengthOffset()));
+ store32(Imm32(length), Address(storageResult, ArrayStorage::numValuesInVectorOffset()));
+
+ // Store the newly allocated ArrayStorage.
+ storePtr(storageResult, Address(cellResult, JSArray::storageOffset()));
+
+ // Store the vector length and index bias.
+ store32(Imm32(initialLength), Address(cellResult, JSArray::vectorLengthOffset()));
+ store32(TrustedImm32(0), Address(cellResult, JSArray::indexBiasOffset()));
+
+ // Initialize the subclass data and the sparse value map.
+ storePtr(TrustedImmPtr(0), Address(cellResult, JSArray::subclassDataOffset()));
+ storePtr(TrustedImmPtr(0), Address(cellResult, JSArray::sparseValueMapOffset()));
+
+ // Store the values we have.
+ for (unsigned i = 0; i < length; i++) {
+#if USE(JSVALUE64)
+ loadPtr(Address(callFrameRegister, (valuesRegister + i) * sizeof(Register)), storagePtr);
+ storePtr(storagePtr, Address(storageResult, ArrayStorage::vectorOffset() + sizeof(WriteBarrier<Unknown>) * i));
+#else
+ load32(Address(callFrameRegister, (valuesRegister + i) * sizeof(Register)), storagePtr);
+ store32(storagePtr, Address(storageResult, ArrayStorage::vectorOffset() + sizeof(WriteBarrier<Unknown>) * i));
+ load32(Address(callFrameRegister, (valuesRegister + i) * sizeof(Register) + sizeof(uint32_t)), storagePtr);
+ store32(storagePtr, Address(storageResult, ArrayStorage::vectorOffset() + sizeof(WriteBarrier<Unknown>) * i + sizeof(uint32_t)));
+#endif
+ }
+
+ // Zero out the remaining slots.
+ for (unsigned i = length; i < initialLength; i++) {
+#if USE(JSVALUE64)
+ storePtr(TrustedImmPtr(0), Address(storageResult, ArrayStorage::vectorOffset() + sizeof(WriteBarrier<Unknown>) * i));
+#else
+ store32(TrustedImm32(static_cast<int>(JSValue::EmptyValueTag)), Address(storageResult, ArrayStorage::vectorOffset() + sizeof(WriteBarrier<Unknown>) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
+ store32(TrustedImm32(0), Address(storageResult, ArrayStorage::vectorOffset() + sizeof(WriteBarrier<Unknown>) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
+#endif
+ }
+}
+
#if ENABLE(VALUE_PROFILER)
inline void JIT::emitValueProfilingSite(ValueProfile* valueProfile)
{
@@ -485,11 +557,11 @@ inline void JIT::emitValueProfilingSite(ValueProfile* valueProfile)
}
if (m_randomGenerator.getUint32() & 1)
- add32(Imm32(1), bucketCounterRegister);
+ add32(TrustedImm32(1), bucketCounterRegister);
else
- add32(Imm32(3), bucketCounterRegister);
- and32(Imm32(ValueProfile::bucketIndexMask), bucketCounterRegister);
- move(ImmPtr(valueProfile->m_buckets), scratch);
+ add32(TrustedImm32(3), bucketCounterRegister);
+ and32(TrustedImm32(ValueProfile::bucketIndexMask), bucketCounterRegister);
+ move(TrustedImmPtr(valueProfile->m_buckets), scratch);
#if USE(JSVALUE64)
storePtr(value, BaseIndex(scratch, bucketCounterRegister, TimesEight));
#elif USE(JSVALUE32_64)
@@ -796,7 +868,10 @@ ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst)
// TODO: we want to reuse values that are already in registers if we can - add a register allocator!
if (m_codeBlock->isConstantRegisterIndex(src)) {
JSValue value = m_codeBlock->getConstant(src);
- move(ImmPtr(JSValue::encode(value)), dst);
+ if (!value.isNumber())
+ move(TrustedImmPtr(JSValue::encode(value)), dst);
+ else
+ move(ImmPtr(JSValue::encode(value)), dst);
killLastResultRegister();
return;
}