summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/assembler
diff options
context:
space:
mode:
authorSimon Hausmann <simon.hausmann@nokia.com>2012-03-12 14:11:15 +0100
committerSimon Hausmann <simon.hausmann@nokia.com>2012-03-12 14:11:15 +0100
commitdd91e772430dc294e3bf478c119ef8d43c0a3358 (patch)
tree6f33ce4d5872a5691e0291eb45bf6ab373a5f567 /Source/JavaScriptCore/assembler
parentad0d549d4cc13433f77c1ac8f0ab379c83d93f28 (diff)
downloadqtwebkit-dd91e772430dc294e3bf478c119ef8d43c0a3358.tar.gz
Imported WebKit commit 3db4eb1820ac8fb03065d7ea73a4d9db1e8fea1a (http://svn.webkit.org/repository/webkit/trunk@110422)
This includes build fixes for the latest qtbase/qtdeclarative as well as the final QML2 API.
Diffstat (limited to 'Source/JavaScriptCore/assembler')
-rw-r--r--Source/JavaScriptCore/assembler/ARMAssembler.cpp4
-rw-r--r--Source/JavaScriptCore/assembler/ARMAssembler.h3
-rw-r--r--Source/JavaScriptCore/assembler/ARMv7Assembler.h18
-rw-r--r--Source/JavaScriptCore/assembler/AbstractMacroAssembler.h61
-rw-r--r--Source/JavaScriptCore/assembler/AssemblerBuffer.h5
-rw-r--r--Source/JavaScriptCore/assembler/AssemblerBufferWithConstantPool.h4
-rw-r--r--Source/JavaScriptCore/assembler/LinkBuffer.h26
-rw-r--r--Source/JavaScriptCore/assembler/MIPSAssembler.h5
-rw-r--r--Source/JavaScriptCore/assembler/MacroAssembler.h552
-rw-r--r--Source/JavaScriptCore/assembler/MacroAssemblerARM.h43
-rw-r--r--Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h71
-rw-r--r--Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.h16
-rw-r--r--Source/JavaScriptCore/assembler/MacroAssemblerMIPS.h47
-rw-r--r--Source/JavaScriptCore/assembler/MacroAssemblerSH4.h50
-rw-r--r--Source/JavaScriptCore/assembler/MacroAssemblerX86.h14
-rw-r--r--Source/JavaScriptCore/assembler/MacroAssemblerX86Common.h59
-rw-r--r--Source/JavaScriptCore/assembler/MacroAssemblerX86_64.h43
-rw-r--r--Source/JavaScriptCore/assembler/SH4Assembler.h5
-rw-r--r--Source/JavaScriptCore/assembler/X86Assembler.h32
19 files changed, 934 insertions, 124 deletions
diff --git a/Source/JavaScriptCore/assembler/ARMAssembler.cpp b/Source/JavaScriptCore/assembler/ARMAssembler.cpp
index 4ded0e88e..b880f50bf 100644
--- a/Source/JavaScriptCore/assembler/ARMAssembler.cpp
+++ b/Source/JavaScriptCore/assembler/ARMAssembler.cpp
@@ -344,14 +344,14 @@ void ARMAssembler::doubleTransfer(bool isLoad, FPRegisterID srcDst, RegisterID b
fdtr_u(isLoad, srcDst, ARMRegisters::S0, 0);
}
-PassRefPtr<ExecutableMemoryHandle> ARMAssembler::executableCopy(JSGlobalData& globalData, void* ownerUID)
+PassRefPtr<ExecutableMemoryHandle> ARMAssembler::executableCopy(JSGlobalData& globalData, void* ownerUID, JITCompilationEffort effort)
{
// 64-bit alignment is required for next constant pool and JIT code as well
m_buffer.flushWithoutBarrier(true);
if (!m_buffer.isAligned(8))
bkpt(0);
- RefPtr<ExecutableMemoryHandle> result = m_buffer.executableCopy(globalData, ownerUID);
+ RefPtr<ExecutableMemoryHandle> result = m_buffer.executableCopy(globalData, ownerUID, effort);
char* data = reinterpret_cast<char*>(result->start());
for (Jumps::Iterator iter = m_jumps.begin(); iter != m_jumps.end(); ++iter) {
diff --git a/Source/JavaScriptCore/assembler/ARMAssembler.h b/Source/JavaScriptCore/assembler/ARMAssembler.h
index ef199d2d1..a9ecf5091 100644
--- a/Source/JavaScriptCore/assembler/ARMAssembler.h
+++ b/Source/JavaScriptCore/assembler/ARMAssembler.h
@@ -30,6 +30,7 @@
#if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
#include "AssemblerBufferWithConstantPool.h"
+#include "JITCompilationEffort.h"
#include <wtf/Assertions.h>
namespace JSC {
@@ -679,7 +680,7 @@ namespace JSC {
return loadBranchTarget(ARMRegisters::pc, cc, useConstantPool);
}
- PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData&, void* ownerUID);
+ PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData&, void* ownerUID, JITCompilationEffort);
unsigned debugOffset() { return m_buffer.debugOffset(); }
diff --git a/Source/JavaScriptCore/assembler/ARMv7Assembler.h b/Source/JavaScriptCore/assembler/ARMv7Assembler.h
index 5f376bf3d..51788da08 100644
--- a/Source/JavaScriptCore/assembler/ARMv7Assembler.h
+++ b/Source/JavaScriptCore/assembler/ARMv7Assembler.h
@@ -584,6 +584,7 @@ private:
OP_VMOV_T2 = 0xEEB0,
OP_VMOV_IMM_T2 = 0xEEB0,
OP_VMRS = 0xEEB0,
+ OP_VNEG_T2 = 0xEEB0,
OP_VSQRT_T1 = 0xEEB0,
OP_B_T3a = 0xF000,
OP_B_T4a = 0xF000,
@@ -601,6 +602,7 @@ private:
OP_SUB_S_imm_T3 = 0xF1B0,
OP_CMP_imm_T2 = 0xF1B0,
OP_RSB_imm_T2 = 0xF1C0,
+ OP_RSB_S_imm_T2 = 0xF1D0,
OP_ADD_imm_T4 = 0xF200,
OP_MOV_imm_T3 = 0xF240,
OP_SUB_imm_T4 = 0xF2A0,
@@ -649,6 +651,7 @@ private:
OP_VABS_T2b = 0x0A40,
OP_VCMPb = 0x0A40,
OP_VCVT_FPIVFPb = 0x0A40,
+ OP_VNEG_T2b = 0x0A40,
OP_VSUB_T2b = 0x0A40,
OP_VSQRT_T1b = 0x0A40,
OP_NOP_T2b = 0x8000,
@@ -1584,6 +1587,16 @@ public:
m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_S_imm_T3, rn, rd, imm);
}
+ ALWAYS_INLINE void sub_S(RegisterID rd, ARMThumbImmediate imm, RegisterID rn)
+ {
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isValid());
+ ASSERT(imm.isUInt12());
+
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_RSB_S_imm_T2, rn, rd, imm);
+ }
+
// Not allowed in an IT (if then) block?
ALWAYS_INLINE void sub_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
{
@@ -1734,6 +1747,11 @@ public:
m_formatter.vfpOp(OP_VABS_T2, OP_VABS_T2b, true, VFPOperand(16), rd, rm);
}
+ void vneg(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
+ {
+ m_formatter.vfpOp(OP_VNEG_T2, OP_VNEG_T2b, true, VFPOperand(1), rd, rm);
+ }
+
void vsqrt(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
{
m_formatter.vfpOp(OP_VSQRT_T1, OP_VSQRT_T1b, true, VFPOperand(17), rd, rm);
diff --git a/Source/JavaScriptCore/assembler/AbstractMacroAssembler.h b/Source/JavaScriptCore/assembler/AbstractMacroAssembler.h
index b4262e894..ab343977e 100644
--- a/Source/JavaScriptCore/assembler/AbstractMacroAssembler.h
+++ b/Source/JavaScriptCore/assembler/AbstractMacroAssembler.h
@@ -28,11 +28,21 @@
#include "CodeLocation.h"
#include "MacroAssemblerCodeRef.h"
+#include <wtf/CryptographicallyRandomNumber.h>
#include <wtf/Noncopyable.h>
#include <wtf/UnusedParam.h>
#if ENABLE(ASSEMBLER)
+
+#if PLATFORM(QT)
+#define ENABLE_JIT_CONSTANT_BLINDING 0
+#endif
+
+#ifndef ENABLE_JIT_CONSTANT_BLINDING
+#define ENABLE_JIT_CONSTANT_BLINDING 1
+#endif
+
namespace JSC {
class LinkBuffer;
@@ -186,11 +196,19 @@ public:
const void* m_value;
};
- struct ImmPtr : public TrustedImmPtr {
+ struct ImmPtr :
+#if ENABLE(JIT_CONSTANT_BLINDING)
+ private TrustedImmPtr
+#else
+ public TrustedImmPtr
+#endif
+ {
explicit ImmPtr(const void* value)
: TrustedImmPtr(value)
{
}
+
+ TrustedImmPtr asTrustedImmPtr() { return *this; }
};
// TrustedImm32:
@@ -232,7 +250,13 @@ public:
};
- struct Imm32 : public TrustedImm32 {
+ struct Imm32 :
+#if ENABLE(JIT_CONSTANT_BLINDING)
+ private TrustedImm32
+#else
+ public TrustedImm32
+#endif
+ {
explicit Imm32(int32_t value)
: TrustedImm32(value)
{
@@ -243,6 +267,8 @@ public:
{
}
#endif
+ const TrustedImm32& asTrustedImm32() const { return *this; }
+
};
// Section 2: MacroAssembler code buffer handles
@@ -535,14 +561,41 @@ public:
return reinterpret_cast<ptrdiff_t>(b.executableAddress()) - reinterpret_cast<ptrdiff_t>(a.executableAddress());
}
- void beginUninterruptedSequence() { }
- void endUninterruptedSequence() { }
+ void beginUninterruptedSequence() { m_inUninterruptedSequence = true; }
+ void endUninterruptedSequence() { m_inUninterruptedSequence = false; }
unsigned debugOffset() { return m_assembler.debugOffset(); }
protected:
+ AbstractMacroAssembler()
+ : m_inUninterruptedSequence(false)
+ , m_randomSource(cryptographicallyRandomNumber())
+ {
+ }
+
AssemblerType m_assembler;
+ bool inUninterruptedSequence()
+ {
+ return m_inUninterruptedSequence;
+ }
+
+ bool m_inUninterruptedSequence;
+
+
+ uint32_t random()
+ {
+ return m_randomSource.getUint32();
+ }
+
+ WeakRandom m_randomSource;
+
+#if ENABLE(JIT_CONSTANT_BLINDING)
+ static bool scratchRegisterForBlinding() { return false; }
+ static bool shouldBlindForSpecificArch(uint32_t) { return true; }
+ static bool shouldBlindForSpecificArch(uint64_t) { return true; }
+#endif
+
friend class LinkBuffer;
friend class RepatchBuffer;
diff --git a/Source/JavaScriptCore/assembler/AssemblerBuffer.h b/Source/JavaScriptCore/assembler/AssemblerBuffer.h
index 55706c1ab..d1deef234 100644
--- a/Source/JavaScriptCore/assembler/AssemblerBuffer.h
+++ b/Source/JavaScriptCore/assembler/AssemblerBuffer.h
@@ -28,6 +28,7 @@
#if ENABLE(ASSEMBLER)
+#include "JITCompilationEffort.h"
#include "JSGlobalData.h"
#include "stdint.h"
#include <string.h>
@@ -129,12 +130,12 @@ namespace JSC {
return AssemblerLabel(m_index);
}
- PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData& globalData, void* ownerUID)
+ PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData& globalData, void* ownerUID, JITCompilationEffort effort)
{
if (!m_index)
return 0;
- RefPtr<ExecutableMemoryHandle> result = globalData.executableAllocator.allocate(globalData, m_index, ownerUID);
+ RefPtr<ExecutableMemoryHandle> result = globalData.executableAllocator.allocate(globalData, m_index, ownerUID, effort);
if (!result)
return 0;
diff --git a/Source/JavaScriptCore/assembler/AssemblerBufferWithConstantPool.h b/Source/JavaScriptCore/assembler/AssemblerBufferWithConstantPool.h
index 68afa766b..e2ea261ee 100644
--- a/Source/JavaScriptCore/assembler/AssemblerBufferWithConstantPool.h
+++ b/Source/JavaScriptCore/assembler/AssemblerBufferWithConstantPool.h
@@ -195,10 +195,10 @@ public:
putIntegralUnchecked(value.low);
}
- PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData& globalData, void* ownerUID)
+ PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData& globalData, void* ownerUID, JITCompilationEffort effort)
{
flushConstantPool(false);
- return AssemblerBuffer::executableCopy(globalData, ownerUID);
+ return AssemblerBuffer::executableCopy(globalData, ownerUID, effort);
}
void putShortWithConstantInt(uint16_t insn, uint32_t constant, bool isReusable = false)
diff --git a/Source/JavaScriptCore/assembler/LinkBuffer.h b/Source/JavaScriptCore/assembler/LinkBuffer.h
index 2c07d13fc..6ec9a7eb9 100644
--- a/Source/JavaScriptCore/assembler/LinkBuffer.h
+++ b/Source/JavaScriptCore/assembler/LinkBuffer.h
@@ -34,6 +34,7 @@
#define GLOBAL_THUNK_ID reinterpret_cast<void*>(static_cast<intptr_t>(-1))
#define REGEXP_CODE_ID reinterpret_cast<void*>(static_cast<intptr_t>(-2))
+#include "JITCompilationEffort.h"
#include "MacroAssembler.h"
#include <wtf/DataLog.h>
#include <wtf/Noncopyable.h>
@@ -73,7 +74,7 @@ class LinkBuffer {
#endif
public:
- LinkBuffer(JSGlobalData& globalData, MacroAssembler* masm, void* ownerUID)
+ LinkBuffer(JSGlobalData& globalData, MacroAssembler* masm, void* ownerUID, JITCompilationEffort effort = JITCompilationMustSucceed)
: m_size(0)
#if ENABLE(BRANCH_COMPACTION)
, m_initialSize(0)
@@ -83,16 +84,27 @@ public:
, m_globalData(&globalData)
#ifndef NDEBUG
, m_completed(false)
+ , m_effort(effort)
#endif
{
- linkCode(ownerUID);
+ linkCode(ownerUID, effort);
}
~LinkBuffer()
{
- ASSERT(m_completed);
+ ASSERT(m_completed || (!m_executableMemory && m_effort == JITCompilationCanFail));
+ }
+
+ bool didFailToAllocate() const
+ {
+ return !m_executableMemory;
}
+ bool isValid() const
+ {
+ return !didFailToAllocate();
+ }
+
// These methods are used to link or set values at code generation time.
void link(Call call, FunctionPtr function)
@@ -218,11 +230,11 @@ private:
return m_code;
}
- void linkCode(void* ownerUID)
+ void linkCode(void* ownerUID, JITCompilationEffort effort)
{
ASSERT(!m_code);
#if !ENABLE(BRANCH_COMPACTION)
- m_executableMemory = m_assembler->m_assembler.executableCopy(*m_globalData, ownerUID);
+ m_executableMemory = m_assembler->m_assembler.executableCopy(*m_globalData, ownerUID, effort);
if (!m_executableMemory)
return;
m_code = m_executableMemory->start();
@@ -230,7 +242,7 @@ private:
ASSERT(m_code);
#else
m_initialSize = m_assembler->m_assembler.codeSize();
- m_executableMemory = m_globalData->executableAllocator.allocate(*m_globalData, m_initialSize, ownerUID);
+ m_executableMemory = m_globalData->executableAllocator.allocate(*m_globalData, m_initialSize, ownerUID, effort);
if (!m_executableMemory)
return;
m_code = (uint8_t*)m_executableMemory->start();
@@ -307,6 +319,7 @@ private:
{
#ifndef NDEBUG
ASSERT(!m_completed);
+ ASSERT(isValid());
m_completed = true;
#endif
@@ -375,6 +388,7 @@ private:
JSGlobalData* m_globalData;
#ifndef NDEBUG
bool m_completed;
+ JITCompilationEffort m_effort;
#endif
};
diff --git a/Source/JavaScriptCore/assembler/MIPSAssembler.h b/Source/JavaScriptCore/assembler/MIPSAssembler.h
index b59fa0b8f..5e0645129 100644
--- a/Source/JavaScriptCore/assembler/MIPSAssembler.h
+++ b/Source/JavaScriptCore/assembler/MIPSAssembler.h
@@ -32,6 +32,7 @@
#if ENABLE(ASSEMBLER) && CPU(MIPS)
#include "AssemblerBuffer.h"
+#include "JITCompilationEffort.h"
#include <wtf/Assertions.h>
#include <wtf/SegmentedVector.h>
@@ -645,9 +646,9 @@ public:
return m_buffer.codeSize();
}
- PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData& globalData, void* ownerUID)
+ PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData& globalData, void* ownerUID, JITCompilationEffort effort)
{
- RefPtr<ExecutableMemoryHandle> result = m_buffer.executableCopy(globalData, ownerUID);
+ RefPtr<ExecutableMemoryHandle> result = m_buffer.executableCopy(globalData, ownerUID, effort);
if (!result)
return 0;
diff --git a/Source/JavaScriptCore/assembler/MacroAssembler.h b/Source/JavaScriptCore/assembler/MacroAssembler.h
index 347cd0ea0..4c54e29aa 100644
--- a/Source/JavaScriptCore/assembler/MacroAssembler.h
+++ b/Source/JavaScriptCore/assembler/MacroAssembler.h
@@ -60,7 +60,6 @@ typedef MacroAssemblerSH4 MacroAssemblerBase;
#error "The MacroAssembler is not supported on this platform."
#endif
-
namespace JSC {
class MacroAssembler : public MacroAssemblerBase {
@@ -73,6 +72,22 @@ public:
using MacroAssemblerBase::branchPtr;
using MacroAssemblerBase::branchTestPtr;
#endif
+ using MacroAssemblerBase::move;
+
+#if ENABLE(JIT_CONSTANT_BLINDING)
+ using MacroAssemblerBase::add32;
+ using MacroAssemblerBase::and32;
+ using MacroAssemblerBase::branchAdd32;
+ using MacroAssemblerBase::branchMul32;
+ using MacroAssemblerBase::branchSub32;
+ using MacroAssemblerBase::lshift32;
+ using MacroAssemblerBase::or32;
+ using MacroAssemblerBase::rshift32;
+ using MacroAssemblerBase::store32;
+ using MacroAssemblerBase::sub32;
+ using MacroAssemblerBase::urshift32;
+ using MacroAssemblerBase::xor32;
+#endif
// Utilities used by the DFG JIT.
#if ENABLE(DFG_JIT)
@@ -148,19 +163,24 @@ public:
loadPtr(Address(stackPointerRegister, (index * sizeof(void*))), dest);
}
+ Address addressForPoke(int index)
+ {
+ return Address(stackPointerRegister, (index * sizeof(void*)));
+ }
+
void poke(RegisterID src, int index = 0)
{
- storePtr(src, Address(stackPointerRegister, (index * sizeof(void*))));
+ storePtr(src, addressForPoke(index));
}
void poke(TrustedImm32 value, int index = 0)
{
- store32(value, Address(stackPointerRegister, (index * sizeof(void*))));
+ store32(value, addressForPoke(index));
}
void poke(TrustedImmPtr imm, int index = 0)
{
- storePtr(imm, Address(stackPointerRegister, (index * sizeof(void*))));
+ storePtr(imm, addressForPoke(index));
}
@@ -169,6 +189,10 @@ public:
{
branchPtr(cond, op1, imm).linkTo(target, this);
}
+ void branchPtr(RelationalCondition cond, RegisterID op1, ImmPtr imm, Label target)
+ {
+ branchPtr(cond, op1, imm).linkTo(target, this);
+ }
void branch32(RelationalCondition cond, RegisterID op1, RegisterID op2, Label target)
{
@@ -179,6 +203,11 @@ public:
{
branch32(cond, op1, imm).linkTo(target, this);
}
+
+ void branch32(RelationalCondition cond, RegisterID op1, Imm32 imm, Label target)
+ {
+ branch32(cond, op1, imm).linkTo(target, this);
+ }
void branch32(RelationalCondition cond, RegisterID left, Address right, Label target)
{
@@ -190,6 +219,11 @@ public:
return branch32(commute(cond), right, left);
}
+ Jump branch32(RelationalCondition cond, Imm32 left, RegisterID right)
+ {
+ return branch32(commute(cond), right, left);
+ }
+
void branchTestPtr(ResultCondition cond, RegisterID reg, Label target)
{
branchTestPtr(cond, reg).linkTo(target, this);
@@ -340,6 +374,11 @@ public:
return load32WithCompactAddressOffsetPatch(address, dest);
}
+ void move(ImmPtr imm, RegisterID dest)
+ {
+ move(Imm32(imm.asTrustedImmPtr()), dest);
+ }
+
void comparePtr(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
{
compare32(cond, left, right, dest);
@@ -364,6 +403,11 @@ public:
{
store32(TrustedImm32(imm), address);
}
+
+ void storePtr(ImmPtr imm, Address address)
+ {
+ store32(Imm32(imm.asTrustedImmPtr()), address);
+ }
void storePtr(TrustedImmPtr imm, void* address)
{
@@ -375,7 +419,6 @@ public:
return store32WithAddressOffsetPatch(src, address);
}
-
Jump branchPtr(RelationalCondition cond, RegisterID left, RegisterID right)
{
return branch32(cond, left, right);
@@ -385,6 +428,11 @@ public:
{
return branch32(cond, left, TrustedImm32(right));
}
+
+ Jump branchPtr(RelationalCondition cond, RegisterID left, ImmPtr right)
+ {
+ return branch32(cond, left, Imm32(right.asTrustedImmPtr()));
+ }
Jump branchPtr(RelationalCondition cond, RegisterID left, Address right)
{
@@ -405,7 +453,7 @@ public:
{
return branch32(cond, left, TrustedImm32(right));
}
-
+
Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, TrustedImmPtr right)
{
return branch32(cond, left, TrustedImm32(right));
@@ -431,7 +479,6 @@ public:
return branchTest32(cond, address, mask);
}
-
Jump branchAddPtr(ResultCondition cond, RegisterID src, RegisterID dest)
{
return branchAdd32(cond, src, dest);
@@ -446,8 +493,499 @@ public:
{
return MacroAssemblerBase::branchTest8(cond, Address(address.base, address.offset), mask);
}
+#else
+
+#if ENABLE(JIT_CONSTANT_BLINDING)
+ using MacroAssemblerBase::addPtr;
+ using MacroAssemblerBase::andPtr;
+ using MacroAssemblerBase::branchSubPtr;
+ using MacroAssemblerBase::convertInt32ToDouble;
+ using MacroAssemblerBase::storePtr;
+ using MacroAssemblerBase::subPtr;
+ using MacroAssemblerBase::xorPtr;
+
+ bool shouldBlindDouble(double value)
+ {
+ // Don't trust NaN or +/-Infinity
+ if (!isfinite(value))
+ return true;
+
+ // Try to force normalisation, and check that there's no change
+ // in the bit pattern
+ if (bitwise_cast<uintptr_t>(value * 1.0) != bitwise_cast<uintptr_t>(value))
+ return true;
+
+ value = abs(value);
+ // Only allow a limited set of fractional components
+ double scaledValue = value * 8;
+ if (scaledValue / 8 != value)
+ return true;
+ double frac = scaledValue - floor(scaledValue);
+ if (frac != 0.0)
+ return true;
+
+ return value > 0xff;
+ }
+
+ bool shouldBlind(ImmPtr imm)
+ {
+ ASSERT(!inUninterruptedSequence());
+#if !defined(NDEBUG)
+ UNUSED_PARAM(imm);
+ // Debug always blind all constants, if only so we know
+ // if we've broken blinding during patch development.
+ return true;
+#endif
+
+ // First off we'll special case common, "safe" values to avoid hurting
+ // performance too much
+ uintptr_t value = imm.asTrustedImmPtr().asIntptr();
+ switch (value) {
+ case 0xffff:
+ case 0xffffff:
+ case 0xffffffffL:
+ case 0xffffffffffL:
+ case 0xffffffffffffL:
+ case 0xffffffffffffffL:
+ case 0xffffffffffffffffL:
+ return false;
+ default: {
+ if (value <= 0xff)
+ return false;
+#if CPU(X86_64)
+ JSValue jsValue = JSValue::decode(reinterpret_cast<void*>(value));
+ if (jsValue.isInt32())
+ return shouldBlind(Imm32(jsValue.asInt32()));
+ if (jsValue.isDouble() && !shouldBlindDouble(jsValue.asDouble()))
+ return false;
+
+ if (!shouldBlindDouble(bitwise_cast<double>(value)))
+ return false;
+#endif
+ }
+ }
+ return shouldBlindForSpecificArch(value);
+ }
+
+ struct RotatedImmPtr {
+ RotatedImmPtr(uintptr_t v1, uint8_t v2)
+ : value(v1)
+ , rotation(v2)
+ {
+ }
+ TrustedImmPtr value;
+ TrustedImm32 rotation;
+ };
+
+ RotatedImmPtr rotationBlindConstant(ImmPtr imm)
+ {
+ uint8_t rotation = random() % (sizeof(void*) * 8);
+ uintptr_t value = imm.asTrustedImmPtr().asIntptr();
+ value = (value << rotation) | (value >> (sizeof(void*) * 8 - rotation));
+ return RotatedImmPtr(value, rotation);
+ }
+
+ void loadRotationBlindedConstant(RotatedImmPtr constant, RegisterID dest)
+ {
+ move(constant.value, dest);
+ rotateRightPtr(constant.rotation, dest);
+ }
+
+ void convertInt32ToDouble(Imm32 imm, FPRegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ RegisterID scratchRegister = scratchRegisterForBlinding();
+ loadXorBlindedConstant(xorBlindConstant(imm), scratchRegister);
+ convertInt32ToDouble(scratchRegister, dest);
+ } else
+ convertInt32ToDouble(imm.asTrustedImm32(), dest);
+ }
+
+ void move(ImmPtr imm, RegisterID dest)
+ {
+ if (shouldBlind(imm))
+ loadRotationBlindedConstant(rotationBlindConstant(imm), dest);
+ else
+ move(imm.asTrustedImmPtr(), dest);
+ }
+
+ Jump branchPtr(RelationalCondition cond, RegisterID left, ImmPtr right)
+ {
+ if (shouldBlind(right)) {
+ RegisterID scratchRegister = scratchRegisterForBlinding();
+ loadRotationBlindedConstant(rotationBlindConstant(right), scratchRegister);
+ return branchPtr(cond, left, scratchRegister);
+ }
+ return branchPtr(cond, left, right.asTrustedImmPtr());
+ }
+
+ void storePtr(ImmPtr imm, Address dest)
+ {
+ if (shouldBlind(imm)) {
+ RegisterID scratchRegister = scratchRegisterForBlinding();
+ loadRotationBlindedConstant(rotationBlindConstant(imm), scratchRegister);
+ storePtr(scratchRegister, dest);
+ } else
+ storePtr(imm.asTrustedImmPtr(), dest);
+ }
+
+#endif
+
#endif // !CPU(X86_64)
+#if ENABLE(JIT_CONSTANT_BLINDING)
+ bool shouldBlind(Imm32 imm)
+ {
+ ASSERT(!inUninterruptedSequence());
+#if !defined(NDEBUG)
+ UNUSED_PARAM(imm);
+ // Debug always blind all constants, if only so we know
+ // if we've broken blinding during patch development.
+ return true;
+#else
+
+ // First off we'll special case common, "safe" values to avoid hurting
+ // performance too much
+ uint32_t value = imm.asTrustedImm32().m_value;
+ switch (value) {
+ case 0xffff:
+ case 0xffffff:
+ case 0xffffffff:
+ return false;
+ default:
+ if (value <= 0xff)
+ return false;
+ }
+ return shouldBlindForSpecificArch(value);
+#endif
+ }
+
+ struct BlindedImm32 {
+ BlindedImm32(int32_t v1, int32_t v2)
+ : value1(v1)
+ , value2(v2)
+ {
+ }
+ TrustedImm32 value1;
+ TrustedImm32 value2;
+ };
+
+ uint32_t keyForConstant(uint32_t value, uint32_t& mask)
+ {
+ uint32_t key = random();
+ if (value <= 0xff)
+ mask = 0xff;
+ else if (value <= 0xffff)
+ mask = 0xffff;
+ else if (value <= 0xffffff)
+ mask = 0xffffff;
+ else
+ mask = 0xffffffff;
+ return key & mask;
+ }
+
+ uint32_t keyForConstant(uint32_t value)
+ {
+ uint32_t mask = 0;
+ return keyForConstant(value, mask);
+ }
+
+ BlindedImm32 xorBlindConstant(Imm32 imm)
+ {
+ uint32_t baseValue = imm.asTrustedImm32().m_value;
+ uint32_t key = keyForConstant(baseValue);
+ return BlindedImm32(baseValue ^ key, key);
+ }
+
+ BlindedImm32 additionBlindedConstant(Imm32 imm)
+ {
+ uint32_t baseValue = imm.asTrustedImm32().m_value;
+ uint32_t key = keyForConstant(baseValue);
+ if (key > baseValue)
+ key = key - baseValue;
+ return BlindedImm32(baseValue - key, key);
+ }
+
+ BlindedImm32 andBlindedConstant(Imm32 imm)
+ {
+ uint32_t baseValue = imm.asTrustedImm32().m_value;
+ uint32_t mask = 0;
+ uint32_t key = keyForConstant(baseValue, mask);
+ ASSERT((baseValue & mask) == baseValue);
+ return BlindedImm32(((baseValue & key) | ~key) & mask, ((baseValue & ~key) | key) & mask);
+ }
+
+ BlindedImm32 orBlindedConstant(Imm32 imm)
+ {
+ uint32_t baseValue = imm.asTrustedImm32().m_value;
+ uint32_t mask = 0;
+ uint32_t key = keyForConstant(baseValue, mask);
+ ASSERT((baseValue & mask) == baseValue);
+ return BlindedImm32((baseValue & key) & mask, (baseValue & ~key) & mask);
+ }
+
+ void loadXorBlindedConstant(BlindedImm32 constant, RegisterID dest)
+ {
+ move(constant.value1, dest);
+ xor32(constant.value2, dest);
+ }
+
+ void add32(Imm32 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ BlindedImm32 key = additionBlindedConstant(imm);
+ add32(key.value1, dest);
+ add32(key.value2, dest);
+ } else
+ add32(imm.asTrustedImm32(), dest);
+ }
+
+ void addPtr(Imm32 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ BlindedImm32 key = additionBlindedConstant(imm);
+ addPtr(key.value1, dest);
+ addPtr(key.value2, dest);
+ } else
+ addPtr(imm.asTrustedImm32(), dest);
+ }
+
+ void and32(Imm32 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ BlindedImm32 key = andBlindedConstant(imm);
+ and32(key.value1, dest);
+ and32(key.value2, dest);
+ } else
+ and32(imm.asTrustedImm32(), dest);
+ }
+
+ void andPtr(Imm32 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ BlindedImm32 key = andBlindedConstant(imm);
+ andPtr(key.value1, dest);
+ andPtr(key.value2, dest);
+ } else
+ andPtr(imm.asTrustedImm32(), dest);
+ }
+
+ void and32(Imm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ if (src == dest)
+ return and32(imm.asTrustedImm32(), dest);
+ loadXorBlindedConstant(xorBlindConstant(imm), dest);
+ and32(src, dest);
+ } else
+ and32(imm.asTrustedImm32(), src, dest);
+ }
+
+ void move(Imm32 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm))
+ loadXorBlindedConstant(xorBlindConstant(imm), dest);
+ else
+ move(imm.asTrustedImm32(), dest);
+ }
+
+ void or32(Imm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ if (src == dest)
+ return or32(imm, dest);
+ loadXorBlindedConstant(xorBlindConstant(imm), dest);
+ or32(src, dest);
+ } else
+ or32(imm.asTrustedImm32(), src, dest);
+ }
+
+ void or32(Imm32 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ BlindedImm32 key = orBlindedConstant(imm);
+ or32(key.value1, dest);
+ or32(key.value2, dest);
+ } else
+ or32(imm.asTrustedImm32(), dest);
+ }
+
+ void poke(Imm32 value, int index = 0)
+ {
+ store32(value, addressForPoke(index));
+ }
+
+ void poke(ImmPtr value, int index = 0)
+ {
+ storePtr(value, addressForPoke(index));
+ }
+
+ void store32(Imm32 imm, Address dest)
+ {
+ if (shouldBlind(imm)) {
+#if CPU(X86) || CPU(X86_64)
+ BlindedImm32 blind = xorBlindConstant(imm);
+ store32(blind.value1, dest);
+ xor32(blind.value2, dest);
+#else
+ RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding();
+ loadXorBlindedConstant(xorBlindConstant(imm), scratchRegister);
+ store32(scratchRegister, dest);
+#endif
+ } else
+ store32(imm.asTrustedImm32(), dest);
+ }
+
+ void sub32(Imm32 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ BlindedImm32 key = additionBlindedConstant(imm);
+ sub32(key.value1, dest);
+ sub32(key.value2, dest);
+ } else
+ sub32(imm.asTrustedImm32(), dest);
+ }
+
+ void subPtr(Imm32 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ BlindedImm32 key = additionBlindedConstant(imm);
+ subPtr(key.value1, dest);
+ subPtr(key.value2, dest);
+ } else
+ subPtr(imm.asTrustedImm32(), dest);
+ }
+
+ void xor32(Imm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ BlindedImm32 blind = xorBlindConstant(imm);
+ xor32(blind.value1, src, dest);
+ xor32(blind.value2, dest);
+ } else
+ xor32(imm.asTrustedImm32(), src, dest);
+ }
+
+ void xor32(Imm32 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ BlindedImm32 blind = xorBlindConstant(imm);
+ xor32(blind.value1, dest);
+ xor32(blind.value2, dest);
+ } else
+ xor32(imm.asTrustedImm32(), dest);
+ }
+
+ Jump branch32(RelationalCondition cond, RegisterID left, Imm32 right)
+ {
+ if (shouldBlind(right)) {
+ if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) {
+ loadXorBlindedConstant(xorBlindConstant(right), scratchRegister);
+ return branch32(cond, left, scratchRegister);
+ }
+ // If we don't have a scratch register available for use, we'll just
+ // place a random number of nops.
+ uint32_t nopCount = random() & 3;
+ while (nopCount--)
+ nop();
+ return branch32(cond, left, right.asTrustedImm32());
+ }
+
+ return branch32(cond, left, right.asTrustedImm32());
+ }
+
+ Jump branchAdd32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest)
+ {
+ if (src == dest) {
+ if (!scratchRegisterForBlinding()) {
+ // Release mode ASSERT, if this fails we will perform incorrect codegen.
+ CRASH();
+ }
+ }
+ if (shouldBlind(imm)) {
+ if (src == dest) {
+ if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) {
+ move(src, scratchRegister);
+ src = scratchRegister;
+ }
+ }
+ loadXorBlindedConstant(xorBlindConstant(imm), dest);
+ return branchAdd32(cond, src, dest);
+ }
+ return branchAdd32(cond, src, imm.asTrustedImm32(), dest);
+ }
+
+ Jump branchMul32(ResultCondition cond, Imm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (src == dest) {
+ if (!scratchRegisterForBlinding()) {
+ // Release mode ASSERT, if this fails we will perform incorrect codegen.
+ CRASH();
+ }
+ }
+ if (shouldBlind(imm)) {
+ if (src == dest) {
+ if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) {
+ move(src, scratchRegister);
+ src = scratchRegister;
+ }
+ }
+ loadXorBlindedConstant(xorBlindConstant(imm), dest);
+ return branchMul32(cond, src, dest);
+ }
+ return branchMul32(cond, imm.asTrustedImm32(), src, dest);
+ }
+
+ // branchSub32 takes a scratch register as 32 bit platforms make use of this,
+ // with src == dst, and on x86-32 we don't have a platform scratch register.
+ Jump branchSub32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest, RegisterID scratch)
+ {
+ if (shouldBlind(imm)) {
+ ASSERT(scratch != dest);
+ ASSERT(scratch != src);
+ loadXorBlindedConstant(xorBlindConstant(imm), scratch);
+ return branchSub32(cond, src, scratch, dest);
+ }
+ return branchSub32(cond, src, imm.asTrustedImm32(), dest);
+ }
+
+ // Immediate shifts only have 5 controllable bits
+ // so we'll consider them safe for now.
+ TrustedImm32 trustedImm32ForShift(Imm32 imm)
+ {
+ return TrustedImm32(imm.asTrustedImm32().m_value & 31);
+ }
+
+ void lshift32(Imm32 imm, RegisterID dest)
+ {
+ lshift32(trustedImm32ForShift(imm), dest);
+ }
+
+ void lshift32(RegisterID src, Imm32 amount, RegisterID dest)
+ {
+ lshift32(src, trustedImm32ForShift(amount), dest);
+ }
+
+ void rshift32(Imm32 imm, RegisterID dest)
+ {
+ rshift32(trustedImm32ForShift(imm), dest);
+ }
+
+ void rshift32(RegisterID src, Imm32 amount, RegisterID dest)
+ {
+ rshift32(src, trustedImm32ForShift(amount), dest);
+ }
+
+ void urshift32(Imm32 imm, RegisterID dest)
+ {
+ urshift32(trustedImm32ForShift(imm), dest);
+ }
+
+ void urshift32(RegisterID src, Imm32 amount, RegisterID dest)
+ {
+ urshift32(src, trustedImm32ForShift(amount), dest);
+ }
+#endif
};
} // namespace JSC
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerARM.h b/Source/JavaScriptCore/assembler/MacroAssemblerARM.h
index 51173895a..c0cd766cb 100644
--- a/Source/JavaScriptCore/assembler/MacroAssemblerARM.h
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerARM.h
@@ -108,6 +108,11 @@ public:
add32(ARMRegisters::S1, dest);
}
+ void add32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.adds_r(dest, src, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
+ }
+
void and32(RegisterID src, RegisterID dest)
{
m_assembler.ands_r(dest, dest, src);
@@ -122,6 +127,15 @@ public:
m_assembler.ands_r(dest, dest, w);
}
+ void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ ARMWord w = m_assembler.getImm(imm.m_value, ARMRegisters::S0, true);
+ if (w & ARMAssembler::OP2_INV_IMM)
+ m_assembler.bics_r(dest, src, w & ~ARMAssembler::OP2_INV_IMM);
+ else
+ m_assembler.ands_r(dest, src, w);
+ }
+
void lshift32(RegisterID shift_amount, RegisterID dest)
{
ARMWord w = ARMAssembler::getOp2(0x1f);
@@ -156,11 +170,6 @@ public:
m_assembler.rsbs_r(srcDest, srcDest, ARMAssembler::getOp2(0));
}
- void not32(RegisterID dest)
- {
- m_assembler.mvns_r(dest, dest);
- }
-
void or32(RegisterID src, RegisterID dest)
{
m_assembler.orrs_r(dest, dest, src);
@@ -232,6 +241,11 @@ public:
sub32(ARMRegisters::S1, dest);
}
+ void sub32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.subs_r(dest, src, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
+ }
+
void xor32(RegisterID src, RegisterID dest)
{
m_assembler.eors_r(dest, dest, src);
@@ -239,7 +253,10 @@ public:
void xor32(TrustedImm32 imm, RegisterID dest)
{
- m_assembler.eors_r(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
+ if (imm.m_value == -1)
+ m_assembler.mvns_r(dest, dest);
+ else
+ m_assembler.eors_r(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
}
void countLeadingZeros32(RegisterID src, RegisterID dest)
@@ -547,6 +564,13 @@ public:
return Jump(m_assembler.jmp(ARMCondition(cond)));
}
+ Jump branchAdd32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ add32(src, imm, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
void mull32(RegisterID src1, RegisterID src2, RegisterID dest)
{
if (src1 == dest) {
@@ -596,6 +620,13 @@ public:
return Jump(m_assembler.jmp(ARMCondition(cond)));
}
+ Jump branchSub32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ sub32(src, imm, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
Jump branchNeg32(ResultCondition cond, RegisterID srcDest)
{
ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h b/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h
index d883abf4f..43ea2ed5a 100644
--- a/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h
@@ -51,14 +51,7 @@ public:
// Magic number is the biggest useful offset we can get on ARMv7 with
// a LDR_imm_T2 encoding
static const int MaximumCompactPtrAlignedAddressOffset = 124;
-
- MacroAssemblerARMv7()
- : m_inUninterruptedSequence(false)
- {
- }
- void beginUninterruptedSequence() { m_inUninterruptedSequence = true; }
- void endUninterruptedSequence() { m_inUninterruptedSequence = false; }
Vector<LinkRecord>& jumpsToLink() { return m_assembler.jumpsToLink(); }
void* unlinkedCode() { return m_assembler.unlinkedCode(); }
bool canCompact(JumpType jumpType) { return m_assembler.canCompact(jumpType); }
@@ -303,11 +296,6 @@ public:
m_assembler.neg(srcDest, srcDest);
}
- void not32(RegisterID srcDest)
- {
- m_assembler.mvn(srcDest, srcDest);
- }
-
void or32(RegisterID src, RegisterID dest)
{
m_assembler.orr(dest, dest, src);
@@ -447,6 +435,11 @@ public:
void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
{
+ if (imm.m_value == -1) {
+ m_assembler.mvn(dest, src);
+ return;
+ }
+
ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
if (armImm.isValid())
m_assembler.eor(dest, src, armImm);
@@ -463,7 +456,10 @@ public:
void xor32(TrustedImm32 imm, RegisterID dest)
{
- xor32(imm, dest, dest);
+ if (imm.m_value == -1)
+ m_assembler.mvn(dest, dest);
+ else
+ xor32(imm, dest, dest);
}
@@ -527,6 +523,7 @@ private:
unreachableForPlatform();
}
+protected:
void store32(RegisterID src, ArmAddress address)
{
if (address.type == ArmAddress::HasIndex)
@@ -541,6 +538,7 @@ private:
}
}
+private:
void store8(RegisterID src, ArmAddress address)
{
if (address.type == ArmAddress::HasIndex)
@@ -723,6 +721,26 @@ public:
store16(src, setupArmAddress(address));
}
+#if ENABLE(JIT_CONSTANT_BLINDING)
+ static RegisterID scratchRegisterForBlinding() { return dataTempRegister; }
+ static bool shouldBlindForSpecificArch(uint32_t value)
+ {
+ ARMThumbImmediate immediate = ARMThumbImmediate::makeEncodedImm(value);
+
+ // Couldn't be encoded as an immediate, so assume it's untrusted.
+ if (!immediate.isValid())
+ return true;
+
+ // If we can encode the immediate, we have less than 16 attacker
+ // controlled bits.
+ if (immediate.isEncodedImm())
+ return false;
+
+ // Don't let any more than 12 bits of an instruction word
+ // be controlled by an attacker.
+ return !immediate.isUInt12();
+ }
+#endif
// Floating-point operations:
@@ -789,7 +807,7 @@ public:
void storeDouble(FPRegisterID src, const void* address)
{
- move(ImmPtr(address), addressTempRegister);
+ move(TrustedImmPtr(address), addressTempRegister);
storeDouble(src, addressTempRegister);
}
@@ -883,6 +901,11 @@ public:
m_assembler.vabs(dest, src);
}
+ void negateDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vneg(dest, src);
+ }
+
void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
{
m_assembler.vmov(fpTempRegisterAsSingle(), src);
@@ -954,7 +977,7 @@ public:
// clamped to 0x80000000, so 2x dest is zero in this case. In the case of
// overflow the result will be equal to -2.
Jump underflow = branchAdd32(Zero, dest, dest, dataTempRegister);
- Jump noOverflow = branch32(NotEqual, dataTempRegister, Imm32(-2));
+ Jump noOverflow = branch32(NotEqual, dataTempRegister, TrustedImm32(-2));
// For BranchIfTruncateSuccessful, we branch if 'noOverflow' jumps.
underflow.link(this);
@@ -1356,7 +1379,7 @@ public:
{
// Move the high bits of the address into addressTempRegister,
// and load the value into dataTempRegister.
- move(ImmPtr(dest.m_ptr), addressTempRegister);
+ move(TrustedImmPtr(dest.m_ptr), addressTempRegister);
m_assembler.ldr(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
// Do the add.
@@ -1369,7 +1392,7 @@ public:
// we'll need to reload it with the high bits of the address afterwards.
move(imm, addressTempRegister);
m_assembler.add_S(dataTempRegister, dataTempRegister, addressTempRegister);
- move(ImmPtr(dest.m_ptr), addressTempRegister);
+ move(TrustedImmPtr(dest.m_ptr), addressTempRegister);
}
// Store the result.
@@ -1401,6 +1424,13 @@ public:
return branchMul32(cond, dataTempRegister, src, dest);
}
+ Jump branchNeg32(ResultCondition cond, RegisterID srcDest)
+ {
+ ARMThumbImmediate zero = ARMThumbImmediate::makeUInt12(0);
+ m_assembler.sub_S(srcDest, zero, srcDest);
+ return Jump(makeBranch(cond));
+ }
+
Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest)
{
m_assembler.orr_S(dest, dest, src);
@@ -1586,10 +1616,6 @@ public:
}
protected:
- bool inUninterruptedSequence()
- {
- return m_inUninterruptedSequence;
- }
ALWAYS_INLINE Jump jump()
{
@@ -1697,8 +1723,7 @@ private:
{
ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
}
-
- bool m_inUninterruptedSequence;
+
};
} // namespace JSC
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.h b/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.h
index 3d7d84534..ac62c4221 100644
--- a/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.h
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.h
@@ -27,9 +27,9 @@
#define MacroAssemblerCodeRef_h
#include "ExecutableAllocator.h"
-#include "PassRefPtr.h"
-#include "RefPtr.h"
-#include "UnusedParam.h"
+#include <wtf/PassRefPtr.h>
+#include <wtf/RefPtr.h>
+#include <wtf/UnusedParam.h>
// ASSERT_VALID_CODE_POINTER checks that ptr is a non-null pointer, and that it is a valid
// instruction address on the platform (for example, check any alignment requirements).
@@ -280,6 +280,10 @@ public:
return result;
}
+ static MacroAssemblerCodePtr createLLIntCodePtr(void (*function)())
+ {
+ return createFromExecutableAddress(bitwise_cast<void*>(function));
+ }
explicit MacroAssemblerCodePtr(ReturnAddressPtr ra)
: m_value(ra.value())
{
@@ -340,6 +344,12 @@ public:
return MacroAssemblerCodeRef(codePtr);
}
+ // Helper for creating self-managed code refs from LLInt.
+ static MacroAssemblerCodeRef createLLIntCodeRef(void (*function)())
+ {
+ return createSelfManagedCodeRef(MacroAssemblerCodePtr::createFromExecutableAddress(bitwise_cast<void*>(function)));
+ }
+
ExecutableMemoryHandle* executableMemory() const
{
return m_executableMemory.get();
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerMIPS.h b/Source/JavaScriptCore/assembler/MacroAssemblerMIPS.h
index f1aa6d4ad..910bc5a47 100644
--- a/Source/JavaScriptCore/assembler/MacroAssemblerMIPS.h
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerMIPS.h
@@ -132,6 +132,11 @@ public:
}
}
+ void add32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ add32(imm, src, dest);
+ }
+
void add32(TrustedImm32 imm, Address address)
{
if (address.offset >= -32768 && address.offset <= 32767
@@ -291,11 +296,6 @@ public:
m_assembler.subu(srcDest, MIPSRegisters::zero, srcDest);
}
- void not32(RegisterID srcDest)
- {
- m_assembler.nor(srcDest, srcDest, MIPSRegisters::zero);
- }
-
void or32(RegisterID src, RegisterID dest)
{
m_assembler.orInsn(dest, dest, src);
@@ -373,6 +373,24 @@ public:
}
}
+ void sub32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ if (!imm.m_isPointer && imm.m_value >= -32767 && imm.m_value <= 32768
+ && !m_fixedWidth) {
+ /*
+ addiu dest, src, imm
+ */
+ m_assembler.addiu(dest, src, -imm.m_value);
+ } else {
+ /*
+ li immTemp, imm
+ subu dest, src, immTemp
+ */
+ move(imm, immTempRegister);
+ m_assembler.subu(dest, src, immTempRegister);
+ }
+ }
+
void sub32(TrustedImm32 imm, Address address)
{
if (address.offset >= -32768 && address.offset <= 32767
@@ -458,6 +476,11 @@ public:
void xor32(TrustedImm32 imm, RegisterID dest)
{
+ if (imm.m_value == -1) {
+ m_assembler.nor(dest, dest, MIPSRegisters::zero);
+ return;
+ }
+
/*
li immTemp, imm
xor dest, dest, immTemp
@@ -1203,6 +1226,13 @@ public:
return branchAdd32(cond, immTempRegister, dest);
}
+ Jump branchAdd32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ move(imm, immTempRegister);
+ move(src, dest);
+ return branchAdd32(cond, immTempRegister, dest);
+ }
+
Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
{
ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
@@ -1308,6 +1338,13 @@ public:
return branchSub32(cond, immTempRegister, dest);
}
+ Jump branchSub32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ move(imm, immTempRegister);
+ move(src, dest);
+ return branchSub32(cond, immTempRegister, dest);
+ }
+
Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest)
{
ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero));
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerSH4.h b/Source/JavaScriptCore/assembler/MacroAssemblerSH4.h
index a84f33748..2b5c0cc44 100644
--- a/Source/JavaScriptCore/assembler/MacroAssemblerSH4.h
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerSH4.h
@@ -209,11 +209,6 @@ public:
releaseScratch(scr);
}
- void not32(RegisterID src, RegisterID dest)
- {
- m_assembler.notlReg(src, dest);
- }
-
void or32(RegisterID src, RegisterID dest)
{
m_assembler.orlRegReg(src, dest);
@@ -380,6 +375,11 @@ public:
void xor32(TrustedImm32 imm, RegisterID srcDest)
{
+ if (imm.m_value == -1) {
+ m_assembler.notlReg(srcDest, srcDest);
+ return;
+ }
+
if ((srcDest != SH4Registers::r0) || (imm.m_value > 255) || (imm.m_value < 0)) {
RegisterID scr = claimScratch();
m_assembler.loadConstant((imm.m_value), scr);
@@ -1565,6 +1565,33 @@ public:
return branchAdd32(cond, scratchReg3, dest);
}
+ Jump branchAdd32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+
+ if (src != dest)
+ move(src, dest);
+
+ if (cond == Overflow) {
+ move(imm, scratchReg3);
+ m_assembler.addvlRegReg(scratchReg3, dest);
+ return branchTrue();
+ }
+
+ add32(imm, dest);
+
+ if (cond == Signed) {
+ m_assembler.cmppz(dest);
+ return branchFalse();
+ }
+
+ compare32(0, dest, Equal);
+
+ if (cond == NotEqual)
+ return branchFalse();
+ return branchTrue();
+ }
+
Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
{
ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
@@ -1642,6 +1669,14 @@ public:
return branchSub32(cond, scratchReg3, dest);
}
+ Jump branchSub32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ move(imm, scratchReg3);
+ if (src != dest)
+ move(src, dest);
+ return branchSub32(cond, scratchReg3, dest);
+ }
+
Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest)
{
ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero));
@@ -1681,11 +1716,6 @@ public:
m_assembler.neg(dst, dst);
}
- void not32(RegisterID dst)
- {
- m_assembler.notlReg(dst, dst);
- }
-
void urshift32(RegisterID shiftamount, RegisterID dest)
{
if (shiftamount == SH4Registers::r0)
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerX86.h b/Source/JavaScriptCore/assembler/MacroAssemblerX86.h
index cb2450f62..088fe196b 100644
--- a/Source/JavaScriptCore/assembler/MacroAssemblerX86.h
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerX86.h
@@ -93,12 +93,6 @@ public:
m_assembler.addsd_mr(address.m_ptr, dest);
}
- void loadDouble(const void* address, FPRegisterID dest)
- {
- ASSERT(isSSE2Present());
- m_assembler.movsd_mr(address, dest);
- }
-
void storeDouble(FPRegisterID src, const void* address)
{
ASSERT(isSSE2Present());
@@ -110,14 +104,6 @@ public:
m_assembler.cvtsi2sd_mr(src.m_ptr, dest);
}
- void absDouble(FPRegisterID src, FPRegisterID dst)
- {
- ASSERT(src != dst);
- static const double negativeZeroConstant = -0.0;
- loadDouble(&negativeZeroConstant, dst);
- m_assembler.andnpd_rr(src, dst);
- }
-
void store32(TrustedImm32 imm, void* address)
{
m_assembler.movl_i32m(imm.m_value, address);
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.h b/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.h
index e6c39598b..8cb442cc5 100644
--- a/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.h
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.h
@@ -34,6 +34,11 @@
namespace JSC {
class MacroAssemblerX86Common : public AbstractMacroAssembler<X86Assembler> {
+protected:
+#if CPU(X86_64)
+ static const X86Registers::RegisterID scratchRegister = X86Registers::r11;
+#endif
+
static const int DoubleConditionBitInvert = 0x10;
static const int DoubleConditionBitSpecial = 0x20;
static const int DoubleConditionBits = DoubleConditionBitInvert | DoubleConditionBitSpecial;
@@ -86,6 +91,13 @@ public:
static const RegisterID stackPointerRegister = X86Registers::esp;
+#if ENABLE(JIT_CONSTANT_BLINDING)
+ static bool shouldBlindForSpecificArch(uint32_t value) { return value >= 0x00ffffff; }
+#if CPU(X86_64)
+ static bool shouldBlindForSpecificArch(uintptr_t value) { return value >= 0x00ffffff; }
+#endif
+#endif
+
// Integer arithmetic operations:
//
// Operations are typically two operand - operation(source, srcDst)
@@ -223,16 +235,6 @@ public:
m_assembler.negl_m(srcDest.offset, srcDest.base);
}
- void not32(RegisterID srcDest)
- {
- m_assembler.notl_r(srcDest);
- }
-
- void not32(Address srcDest)
- {
- m_assembler.notl_m(srcDest.offset, srcDest.base);
- }
-
void or32(RegisterID src, RegisterID dest)
{
m_assembler.orl_rr(src, dest);
@@ -375,7 +377,6 @@ public:
m_assembler.subl_rm(src, dest.offset, dest.base);
}
-
void xor32(RegisterID src, RegisterID dest)
{
m_assembler.xorl_rr(src, dest);
@@ -383,11 +384,17 @@ public:
void xor32(TrustedImm32 imm, Address dest)
{
- m_assembler.xorl_im(imm.m_value, dest.offset, dest.base);
+ if (imm.m_value == -1)
+ m_assembler.notl_m(dest.offset, dest.base);
+ else
+ m_assembler.xorl_im(imm.m_value, dest.offset, dest.base);
}
void xor32(TrustedImm32 imm, RegisterID dest)
{
+ if (imm.m_value == -1)
+ m_assembler.notl_r(dest);
+ else
m_assembler.xorl_ir(imm.m_value, dest);
}
@@ -424,6 +431,23 @@ public:
m_assembler.sqrtsd_rr(src, dst);
}
+ void absDouble(FPRegisterID src, FPRegisterID dst)
+ {
+ ASSERT(src != dst);
+ static const double negativeZeroConstant = -0.0;
+ loadDouble(&negativeZeroConstant, dst);
+ m_assembler.andnpd_rr(src, dst);
+ }
+
+ void negateDouble(FPRegisterID src, FPRegisterID dst)
+ {
+ ASSERT(src != dst);
+ static const double negativeZeroConstant = -0.0;
+ loadDouble(&negativeZeroConstant, dst);
+ m_assembler.xorpd_rr(src, dst);
+ }
+
+
// Memory access operations:
//
// Loads are of the form load(address, destination) and stores of the form
@@ -620,6 +644,17 @@ public:
m_assembler.movsd_rr(src, dest);
}
+ void loadDouble(const void* address, FPRegisterID dest)
+ {
+#if CPU(X86)
+ ASSERT(isSSE2Present());
+ m_assembler.movsd_mr(address, dest);
+#else
+ move(TrustedImmPtr(address), scratchRegister);
+ loadDouble(scratchRegister, dest);
+#endif
+ }
+
void loadDouble(ImplicitAddress address, FPRegisterID dest)
{
ASSERT(isSSE2Present());
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerX86_64.h b/Source/JavaScriptCore/assembler/MacroAssemblerX86_64.h
index 846049999..a2b4311e5 100644
--- a/Source/JavaScriptCore/assembler/MacroAssemblerX86_64.h
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerX86_64.h
@@ -35,9 +35,6 @@
namespace JSC {
class MacroAssemblerX86_64 : public MacroAssemblerX86Common {
-protected:
- static const X86Registers::RegisterID scratchRegister = X86Registers::r11;
-
public:
static const Scale ScalePtr = TimesEight;
@@ -88,12 +85,6 @@ public:
}
}
- void loadDouble(const void* address, FPRegisterID dest)
- {
- move(TrustedImmPtr(address), scratchRegister);
- loadDouble(scratchRegister, dest);
- }
-
void addDouble(AbsoluteAddress address, FPRegisterID dest)
{
move(TrustedImmPtr(address.m_ptr), scratchRegister);
@@ -106,14 +97,6 @@ public:
m_assembler.cvtsi2sd_rr(scratchRegister, dest);
}
- void absDouble(FPRegisterID src, FPRegisterID dst)
- {
- ASSERT(src != dst);
- static const double negativeZeroConstant = -0.0;
- loadDouble(&negativeZeroConstant, dst);
- m_assembler.andnpd_rr(src, dst);
- }
-
void store32(TrustedImm32 imm, void* address)
{
move(TrustedImmPtr(address), scratchRegister);
@@ -233,6 +216,11 @@ public:
move(src, dest);
orPtr(imm, dest);
}
+
+ void rotateRightPtr(TrustedImm32 imm, RegisterID srcDst)
+ {
+ m_assembler.rorq_i8r(imm.m_value, srcDst);
+ }
void subPtr(RegisterID src, RegisterID dest)
{
@@ -254,13 +242,17 @@ public:
{
m_assembler.xorq_rr(src, dest);
}
+
+ void xorPtr(RegisterID src, Address dest)
+ {
+ m_assembler.xorq_rm(src, dest.offset, dest.base);
+ }
void xorPtr(TrustedImm32 imm, RegisterID srcDest)
{
m_assembler.xorq_ir(imm.m_value, srcDest);
}
-
void loadPtr(ImplicitAddress address, RegisterID dest)
{
m_assembler.movq_mr(address.offset, address.base, dest);
@@ -351,6 +343,13 @@ public:
m_assembler.movzbl_rr(dest, dest);
}
+ void comparePtr(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
+ {
+ m_assembler.cmpq_rr(right, left);
+ m_assembler.setCC_r(x86Condition(cond), dest);
+ m_assembler.movzbl_rr(dest, dest);
+ }
+
Jump branchAdd32(ResultCondition cond, TrustedImm32 src, AbsoluteAddress dest)
{
move(TrustedImmPtr(dest.m_ptr), scratchRegister);
@@ -459,6 +458,12 @@ public:
return Jump(m_assembler.jCC(x86Condition(cond)));
}
+ Jump branchSubPtr(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest)
+ {
+ move(src1, dest);
+ return branchSubPtr(cond, src2, dest);
+ }
+
DataLabelPtr moveWithPatch(TrustedImmPtr initialValue, RegisterID dest)
{
m_assembler.movq_i64r(initialValue.asIntptr(), dest);
@@ -503,6 +508,8 @@ public:
return FunctionPtr(X86Assembler::readPointer(call.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).dataLocation()));
}
+ static RegisterID scratchRegisterForBlinding() { return scratchRegister; }
+
private:
friend class LinkBuffer;
friend class RepatchBuffer;
diff --git a/Source/JavaScriptCore/assembler/SH4Assembler.h b/Source/JavaScriptCore/assembler/SH4Assembler.h
index 280a5de85..1cf96b735 100644
--- a/Source/JavaScriptCore/assembler/SH4Assembler.h
+++ b/Source/JavaScriptCore/assembler/SH4Assembler.h
@@ -31,6 +31,7 @@
#include "AssemblerBuffer.h"
#include "AssemblerBufferWithConstantPool.h"
+#include "JITCompilationEffort.h"
#include <stdarg.h>
#include <stdint.h>
#include <stdio.h>
@@ -1514,9 +1515,9 @@ public:
return reinterpret_cast<void*>(readPCrelativeAddress((*instructionPtr & 0xff), instructionPtr));
}
- PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData& globalData, void* ownerUID)
+ PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData& globalData, void* ownerUID, JITCompilationEffort effort)
{
- return m_buffer.executableCopy(globalData, ownerUID);
+ return m_buffer.executableCopy(globalData, ownerUID, effort);
}
void prefix(uint16_t pre)
diff --git a/Source/JavaScriptCore/assembler/X86Assembler.h b/Source/JavaScriptCore/assembler/X86Assembler.h
index 25f8602e8..b24fffb8f 100644
--- a/Source/JavaScriptCore/assembler/X86Assembler.h
+++ b/Source/JavaScriptCore/assembler/X86Assembler.h
@@ -29,6 +29,7 @@
#if ENABLE(ASSEMBLER) && (CPU(X86) || CPU(X86_64))
#include "AssemblerBuffer.h"
+#include "JITCompilationEffort.h"
#include <stdint.h>
#include <wtf/Assertions.h>
#include <wtf/Vector.h>
@@ -215,7 +216,12 @@ private:
GROUP1_OP_CMP = 7,
GROUP1A_OP_POP = 0,
-
+
+ GROUP2_OP_ROL = 0,
+ GROUP2_OP_ROR = 1,
+ GROUP2_OP_RCL = 2,
+ GROUP2_OP_RCR = 3,
+
GROUP2_OP_SHL = 4,
GROUP2_OP_SHR = 5,
GROUP2_OP_SAR = 7,
@@ -635,6 +641,22 @@ public:
m_formatter.immediate32(imm);
}
}
+
+ void xorq_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp64(OP_XOR_EvGv, src, base, offset);
+ }
+
+ void rorq_i8r(int imm, RegisterID dst)
+ {
+ if (imm == 1)
+ m_formatter.oneByteOp64(OP_GROUP2_Ev1, GROUP2_OP_ROR, dst);
+ else {
+ m_formatter.oneByteOp64(OP_GROUP2_EvIb, GROUP2_OP_ROR, dst);
+ m_formatter.immediate8(imm);
+ }
+ }
+
#endif
void sarl_i8r(int imm, RegisterID dst)
@@ -1782,9 +1804,9 @@ public:
return b.m_offset - a.m_offset;
}
- PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData& globalData, void* ownerUID)
+ PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData& globalData, void* ownerUID, JITCompilationEffort effort)
{
- return m_formatter.executableCopy(globalData, ownerUID);
+ return m_formatter.executableCopy(globalData, ownerUID, effort);
}
unsigned debugOffset() { return m_formatter.debugOffset(); }
@@ -2130,9 +2152,9 @@ private:
bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
void* data() const { return m_buffer.data(); }
- PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData& globalData, void* ownerUID)
+ PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData& globalData, void* ownerUID, JITCompilationEffort effort)
{
- return m_buffer.executableCopy(globalData, ownerUID);
+ return m_buffer.executableCopy(globalData, ownerUID, effort);
}
unsigned debugOffset() { return m_buffer.debugOffset(); }