summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/assembler/MacroAssemblerARM.h
diff options
context:
space:
mode:
authorOswald Buddenhagen <oswald.buddenhagen@qt.io>2017-05-30 12:48:17 +0200
committerOswald Buddenhagen <oswald.buddenhagen@qt.io>2017-05-30 12:48:17 +0200
commit881da28418d380042aa95a97f0cbd42560a64f7c (patch)
treea794dff3274695e99c651902dde93d934ea7a5af /Source/JavaScriptCore/assembler/MacroAssemblerARM.h
parent7e104c57a70fdf551bb3d22a5d637cdcbc69dbea (diff)
parent0fcedcd17cc00d3dd44c718b3cb36c1033319671 (diff)
downloadqtwebkit-881da28418d380042aa95a97f0cbd42560a64f7c.tar.gz
Merge 'wip/next' into dev
Change-Id: Iff9ee5e23bb326c4371ec8ed81d56f2f05d680e9
Diffstat (limited to 'Source/JavaScriptCore/assembler/MacroAssemblerARM.h')
-rw-r--r--Source/JavaScriptCore/assembler/MacroAssemblerARM.h208
1 files changed, 186 insertions, 22 deletions
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerARM.h b/Source/JavaScriptCore/assembler/MacroAssemblerARM.h
index d9093413f..749cbab11 100644
--- a/Source/JavaScriptCore/assembler/MacroAssemblerARM.h
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerARM.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008 Apple Inc.
+ * Copyright (C) 2008, 2013-2015 Apple Inc.
* Copyright (C) 2009, 2010 University of Szeged
* All rights reserved.
*
@@ -35,7 +35,7 @@
namespace JSC {
-class MacroAssemblerARM : public AbstractMacroAssembler<ARMAssembler> {
+class MacroAssemblerARM : public AbstractMacroAssembler<ARMAssembler, MacroAssemblerARM> {
static const int DoubleConditionMask = 0x0f;
static const int DoubleConditionBitSpecial = 0x10;
COMPILE_ASSERT(!(DoubleConditionBitSpecial & DoubleConditionMask), DoubleConditionBitSpecial_should_not_interfere_with_ARMAssembler_Condition_codes);
@@ -81,6 +81,7 @@ public:
};
static const RegisterID stackPointerRegister = ARMRegisters::sp;
+ static const RegisterID framePointerRegister = ARMRegisters::fp;
static const RegisterID linkRegister = ARMRegisters::lr;
static const Scale ScalePtr = TimesFour;
@@ -227,13 +228,24 @@ public:
store32(ARMRegisters::S1, ARMRegisters::S0);
}
+ void or32(TrustedImm32 imm, AbsoluteAddress dest)
+ {
+ move(TrustedImmPtr(dest.m_ptr), ARMRegisters::S0);
+ load32(Address(ARMRegisters::S0), ARMRegisters::S1);
+ or32(imm, ARMRegisters::S1); // It uses S0 as temporary register, we need to reload the address.
+ move(TrustedImmPtr(dest.m_ptr), ARMRegisters::S0);
+ store32(ARMRegisters::S1, ARMRegisters::S0);
+ }
+
void or32(TrustedImm32 imm, RegisterID dest)
{
+ ASSERT(dest != ARMRegisters::S0);
m_assembler.orrs(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
}
void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
{
+ ASSERT(src != ARMRegisters::S0);
m_assembler.orrs(dest, src, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
}
@@ -262,7 +274,10 @@ public:
void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
{
- m_assembler.movs(dest, m_assembler.asr(src, imm.m_value & 0x1f));
+ if (!imm.m_value)
+ move(src, dest);
+ else
+ m_assembler.movs(dest, m_assembler.asr(src, imm.m_value & 0x1f));
}
void urshift32(RegisterID shiftAmount, RegisterID dest)
@@ -285,7 +300,10 @@ public:
void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
{
- m_assembler.movs(dest, m_assembler.lsr(src, imm.m_value & 0x1f));
+ if (!imm.m_value)
+ move(src, dest);
+ else
+ m_assembler.movs(dest, m_assembler.lsr(src, imm.m_value & 0x1f));
}
void sub32(RegisterID src, RegisterID dest)
@@ -363,7 +381,13 @@ public:
m_assembler.baseIndexTransfer32(ARMAssembler::LoadUint8, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
}
- void load8Signed(BaseIndex address, RegisterID dest)
+ void load8(const void* address, RegisterID dest)
+ {
+ move(TrustedImmPtr(address), ARMRegisters::S0);
+ m_assembler.dataTransfer32(ARMAssembler::LoadUint8, dest, ARMRegisters::S0, 0);
+ }
+
+ void load8SignedExtendTo32(BaseIndex address, RegisterID dest)
{
m_assembler.baseIndexTransfer16(ARMAssembler::LoadInt8, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
}
@@ -378,7 +402,7 @@ public:
m_assembler.baseIndexTransfer16(ARMAssembler::LoadUint16, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
}
- void load16Signed(BaseIndex address, RegisterID dest)
+ void load16SignedExtendTo32(BaseIndex address, RegisterID dest)
{
m_assembler.baseIndexTransfer16(ARMAssembler::LoadInt16, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
}
@@ -393,7 +417,7 @@ public:
m_assembler.baseIndexTransfer32(ARMAssembler::LoadUint32, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
}
-#if !CPU(ARM_FEATURE_UNALIGNED)
+#if CPU(ARMV5_OR_LOWER)
void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest);
#else
void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
@@ -407,6 +431,18 @@ public:
load16(address, dest);
}
+ void abortWithReason(AbortReason reason)
+ {
+ move(TrustedImm32(reason), ARMRegisters::S0);
+ breakpoint();
+ }
+
+ void abortWithReason(AbortReason reason, intptr_t misc)
+ {
+ move(TrustedImm32(misc), ARMRegisters::S1);
+ abortWithReason(reason);
+ }
+
ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
{
ConvertibleLoadLabel result(this);
@@ -452,6 +488,23 @@ public:
m_assembler.baseIndexTransfer32(ARMAssembler::StoreUint8, src, address.base, address.index, static_cast<int>(address.scale), address.offset);
}
+ void store8(RegisterID src, ImplicitAddress address)
+ {
+ m_assembler.dtrUp(ARMAssembler::StoreUint8, src, address.base, address.offset);
+ }
+
+ void store8(RegisterID src, const void* address)
+ {
+ move(TrustedImmPtr(address), ARMRegisters::S0);
+ m_assembler.dtrUp(ARMAssembler::StoreUint8, src, ARMRegisters::S0, 0);
+ }
+
+ void store8(TrustedImm32 imm, ImplicitAddress address)
+ {
+ move(imm, ARMRegisters::S1);
+ store8(ARMRegisters::S1, address);
+ }
+
void store8(TrustedImm32 imm, const void* address)
{
move(TrustedImm32(reinterpret_cast<ARMWord>(address)), ARMRegisters::S0);
@@ -504,6 +557,12 @@ public:
m_assembler.pop(dest);
}
+ void popPair(RegisterID dest1, RegisterID dest2)
+ {
+ m_assembler.pop(dest1);
+ m_assembler.pop(dest2);
+ }
+
void push(RegisterID src)
{
m_assembler.push(src);
@@ -521,6 +580,12 @@ public:
push(ARMRegisters::S0);
}
+ void pushPair(RegisterID src1, RegisterID src2)
+ {
+ m_assembler.push(src2);
+ m_assembler.push(src1);
+ }
+
void move(TrustedImm32 imm, RegisterID dest)
{
m_assembler.moveImm(imm.m_value, dest);
@@ -569,6 +634,13 @@ public:
return branch32(cond, ARMRegisters::S1, right);
}
+ Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
+ {
+ move(TrustedImmPtr(left.m_ptr), ARMRegisters::S1);
+ load8(Address(ARMRegisters::S1), ARMRegisters::S1);
+ return branch32(cond, ARMRegisters::S1, right);
+ }
+
Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right, int useConstantPool = 0)
{
m_assembler.cmp(left, right);
@@ -617,6 +689,12 @@ public:
return branchTest32(cond, ARMRegisters::S1, mask);
}
+ Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ load8(address, ARMRegisters::S1);
+ return branchTest32(cond, ARMRegisters::S1, mask);
+ }
+
Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
{
move(TrustedImmPtr(address.m_ptr), ARMRegisters::S1);
@@ -626,14 +704,14 @@ public:
Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
{
- ASSERT((cond == Zero) || (cond == NonZero));
+ ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == PositiveOrZero);
m_assembler.tst(reg, mask);
return Jump(m_assembler.jmp(ARMCondition(cond)));
}
Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
{
- ASSERT((cond == Zero) || (cond == NonZero));
+ ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == PositiveOrZero);
ARMWord w = m_assembler.getImm(mask.m_value, ARMRegisters::S0, true);
if (w & ARMAssembler::Op2InvertedImmediate)
m_assembler.bics(ARMRegisters::S0, reg, w & ~ARMAssembler::Op2InvertedImmediate);
@@ -764,7 +842,7 @@ public:
return branchMul32(cond, src, dest, dest);
}
- Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
+ Jump branchMul32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
{
ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
if (cond == Overflow) {
@@ -819,6 +897,11 @@ public:
return Jump(m_assembler.jmp(ARMCondition(cond)));
}
+ PatchableJump patchableJump()
+ {
+ return PatchableJump(m_assembler.jmp(ARMAssembler::AL, 1));
+ }
+
PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm)
{
internalCompare32(reg, imm);
@@ -838,6 +921,11 @@ public:
return Call(m_assembler.blx(ARMRegisters::S1), Call::LinkableNear);
}
+ Call nearTailCall()
+ {
+ return Call(m_assembler.jmp(), Call::LinkableNearTail);
+ }
+
Call call(RegisterID target)
{
return Call(m_assembler.blx(target), Call::None);
@@ -876,7 +964,7 @@ public:
void test32(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest)
{
if (mask.m_value == -1)
- m_assembler.cmp(0, reg);
+ m_assembler.tst(reg, reg);
else
m_assembler.tst(reg, m_assembler.getImm(mask.m_value, ARMRegisters::S0));
m_assembler.mov(dest, ARMAssembler::getOp2Byte(0));
@@ -990,6 +1078,13 @@ public:
return dataLabel;
}
+ DataLabel32 moveWithPatch(TrustedImm32 initialValue, RegisterID dest)
+ {
+ DataLabel32 dataLabel(this);
+ m_assembler.ldrUniqueImmediate(dest, static_cast<ARMWord>(initialValue.m_value));
+ return dataLabel;
+ }
+
Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
{
ensureSpace(3 * sizeof(ARMWord), 2 * sizeof(ARMWord));
@@ -1007,6 +1102,15 @@ public:
return jump;
}
+ Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
+ {
+ load32(left, ARMRegisters::S1);
+ ensureSpace(3 * sizeof(ARMWord), 2 * sizeof(ARMWord));
+ dataLabel = moveWithPatch(initialRightValue, ARMRegisters::S0);
+ Jump jump = branch32(cond, ARMRegisters::S0, ARMRegisters::S1, true);
+ return jump;
+ }
+
DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
{
DataLabelPtr dataLabel = moveWithPatch(initialValue, ARMRegisters::S1);
@@ -1035,6 +1139,7 @@ public:
return s_isVFPPresent;
}
static bool supportsFloatingPointAbs() { return false; }
+ static bool supportsFloatingPointRounding() { return false; }
void loadFloat(BaseIndex address, FPRegisterID dest)
{
@@ -1051,12 +1156,24 @@ public:
m_assembler.baseIndexTransferFloat(ARMAssembler::LoadDouble, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
}
- void loadDouble(const void* address, FPRegisterID dest)
+ void loadDouble(TrustedImmPtr address, FPRegisterID dest)
{
- move(TrustedImm32(reinterpret_cast<ARMWord>(address)), ARMRegisters::S0);
+ move(TrustedImm32(reinterpret_cast<ARMWord>(address.m_value)), ARMRegisters::S0);
m_assembler.doubleDtrUp(ARMAssembler::LoadDouble, dest, ARMRegisters::S0, 0);
}
+ NO_RETURN_DUE_TO_CRASH void ceilDouble(FPRegisterID, FPRegisterID)
+ {
+ ASSERT(!supportsFloatingPointRounding());
+ CRASH();
+ }
+
+ NO_RETURN_DUE_TO_CRASH void floorDouble(FPRegisterID, FPRegisterID)
+ {
+ ASSERT(!supportsFloatingPointRounding());
+ CRASH();
+ }
+
void storeFloat(FPRegisterID src, BaseIndex address)
{
m_assembler.baseIndexTransferFloat(ARMAssembler::StoreFloat, src, address.base, address.index, static_cast<int>(address.scale), address.offset);
@@ -1072,9 +1189,9 @@ public:
m_assembler.baseIndexTransferFloat(ARMAssembler::StoreDouble, src, address.base, address.index, static_cast<int>(address.scale), address.offset);
}
- void storeDouble(FPRegisterID src, const void* address)
+ void storeDouble(FPRegisterID src, TrustedImmPtr address)
{
- move(TrustedImm32(reinterpret_cast<ARMWord>(address)), ARMRegisters::S0);
+ move(TrustedImm32(reinterpret_cast<ARMWord>(address.m_value)), ARMRegisters::S0);
m_assembler.dataTransferFloat(ARMAssembler::StoreDouble, src, ARMRegisters::S0, 0);
}
@@ -1084,6 +1201,12 @@ public:
m_assembler.vmov_f64(dest, src);
}
+ void moveZeroToDouble(FPRegisterID reg)
+ {
+ static double zeroConstant = 0.;
+ loadDouble(TrustedImmPtr(&zeroConstant), reg);
+ }
+
void addDouble(FPRegisterID src, FPRegisterID dest)
{
m_assembler.vadd_f64(dest, dest, src);
@@ -1102,7 +1225,7 @@ public:
void addDouble(AbsoluteAddress address, FPRegisterID dest)
{
- loadDouble(address.m_ptr, ARMRegisters::SD0);
+ loadDouble(TrustedImmPtr(address.m_ptr), ARMRegisters::SD0);
addDouble(ARMRegisters::SD0, dest);
}
@@ -1294,6 +1417,11 @@ public:
m_assembler.nop();
}
+ void memoryFence()
+ {
+ m_assembler.dmbSY();
+ }
+
static FunctionPtr readCallTarget(CodeLocationCall call)
{
return FunctionPtr(reinterpret_cast<void(*)()>(ARMAssembler::readCallTarget(call.dataLocation())));
@@ -1311,6 +1439,13 @@ public:
}
static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
+ static bool canJumpReplacePatchableBranch32WithPatch() { return false; }
+
+ static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32)
+ {
+ UNREACHABLE_FOR_PLATFORM();
+ return CodeLocationLabel();
+ }
static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr)
{
@@ -1328,11 +1463,30 @@ public:
ARMAssembler::revertBranchPtrWithPatch(instructionStart.dataLocation(), reg, reinterpret_cast<uintptr_t>(initialValue) & 0xffff);
}
+ static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel, Address, int32_t)
+ {
+ UNREACHABLE_FOR_PLATFORM();
+ }
+
static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*)
{
UNREACHABLE_FOR_PLATFORM();
}
+ static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
+ {
+ ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
+ }
+
+ static void repatchCall(CodeLocationCall call, FunctionPtr destination)
+ {
+ ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
+ }
+
+#if ENABLE(MASM_PROBE)
+ void probe(ProbeFunction, void* arg1, void* arg2);
+#endif // ENABLE(MASM_PROBE)
+
protected:
ARMAssembler::Condition ARMCondition(RelationalCondition cond)
{
@@ -1362,7 +1516,6 @@ protected:
private:
friend class LinkBuffer;
- friend class RepatchBuffer;
void internalCompare32(RegisterID left, TrustedImm32 right)
{
@@ -1375,18 +1528,29 @@ private:
static void linkCall(void* code, Call call, FunctionPtr function)
{
- ARMAssembler::linkCall(code, call.m_label, function.value());
+ if (call.isFlagSet(Call::Tail))
+ ARMAssembler::linkJump(code, call.m_label, function.value());
+ else
+ ARMAssembler::linkCall(code, call.m_label, function.value());
}
- static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
+
+#if ENABLE(MASM_PROBE)
+ inline TrustedImm32 trustedImm32FromPtr(void* ptr)
{
- ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
+ return TrustedImm32(TrustedImmPtr(ptr));
}
- static void repatchCall(CodeLocationCall call, FunctionPtr destination)
+ inline TrustedImm32 trustedImm32FromPtr(ProbeFunction function)
{
- ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
+ return TrustedImm32(TrustedImmPtr(reinterpret_cast<void*>(function)));
+ }
+
+ inline TrustedImm32 trustedImm32FromPtr(void (*function)())
+ {
+ return TrustedImm32(TrustedImmPtr(reinterpret_cast<void*>(function)));
}
+#endif
static const bool s_isVFPPresent;
};