summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/assembler
diff options
context:
space:
mode:
Diffstat (limited to 'Source/JavaScriptCore/assembler')
-rw-r--r--Source/JavaScriptCore/assembler/ARM64Assembler.h3688
-rw-r--r--Source/JavaScriptCore/assembler/ARMAssembler.cpp425
-rw-r--r--Source/JavaScriptCore/assembler/ARMAssembler.h1187
-rw-r--r--Source/JavaScriptCore/assembler/ARMv7Assembler.h2872
-rw-r--r--Source/JavaScriptCore/assembler/AbortReason.h77
-rw-r--r--Source/JavaScriptCore/assembler/AbstractMacroAssembler.h1328
-rw-r--r--Source/JavaScriptCore/assembler/AssemblerBuffer.h209
-rw-r--r--Source/JavaScriptCore/assembler/AssemblerBufferWithConstantPool.h336
-rw-r--r--Source/JavaScriptCore/assembler/CodeLocation.h218
-rw-r--r--Source/JavaScriptCore/assembler/LinkBuffer.cpp311
-rw-r--r--Source/JavaScriptCore/assembler/LinkBuffer.h355
-rw-r--r--Source/JavaScriptCore/assembler/MIPSAssembler.h1090
-rw-r--r--Source/JavaScriptCore/assembler/MacroAssembler.cpp38
-rw-r--r--Source/JavaScriptCore/assembler/MacroAssembler.h1607
-rw-r--r--Source/JavaScriptCore/assembler/MacroAssemblerARM.cpp170
-rw-r--r--Source/JavaScriptCore/assembler/MacroAssemblerARM.h1523
-rw-r--r--Source/JavaScriptCore/assembler/MacroAssemblerARM64.h2949
-rw-r--r--Source/JavaScriptCore/assembler/MacroAssemblerARMv7.cpp107
-rw-r--r--Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h2047
-rw-r--r--Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.h467
-rw-r--r--Source/JavaScriptCore/assembler/MacroAssemblerMIPS.h2822
-rw-r--r--Source/JavaScriptCore/assembler/MacroAssemblerSH4.h2629
-rw-r--r--Source/JavaScriptCore/assembler/MacroAssemblerX86.h372
-rw-r--r--Source/JavaScriptCore/assembler/MacroAssemblerX86Common.cpp150
-rw-r--r--Source/JavaScriptCore/assembler/MacroAssemblerX86Common.h1620
-rw-r--r--Source/JavaScriptCore/assembler/MacroAssemblerX86_64.h884
-rw-r--r--Source/JavaScriptCore/assembler/MaxFrameExtentForSlowPathCall.h88
-rw-r--r--Source/JavaScriptCore/assembler/RepatchBuffer.h201
-rw-r--r--Source/JavaScriptCore/assembler/SH4Assembler.h2225
-rw-r--r--Source/JavaScriptCore/assembler/X86Assembler.h2850
30 files changed, 34845 insertions, 0 deletions
diff --git a/Source/JavaScriptCore/assembler/ARM64Assembler.h b/Source/JavaScriptCore/assembler/ARM64Assembler.h
new file mode 100644
index 000000000..2b5fec622
--- /dev/null
+++ b/Source/JavaScriptCore/assembler/ARM64Assembler.h
@@ -0,0 +1,3688 @@
+/*
+ * Copyright (C) 2012, 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ARM64Assembler_h
+#define ARM64Assembler_h
+
+#if ENABLE(ASSEMBLER) && CPU(ARM64)
+
+#include "AssemblerBuffer.h"
+#include <limits.h>
+#include <wtf/Assertions.h>
+#include <wtf/Vector.h>
+#include <stdint.h>
+
+#define CHECK_DATASIZE_OF(datasize) ASSERT(datasize == 32 || datasize == 64)
+#define DATASIZE_OF(datasize) ((datasize == 64) ? Datasize_64 : Datasize_32)
+#define MEMOPSIZE_OF(datasize) ((datasize == 8 || datasize == 128) ? MemOpSize_8_or_128 : (datasize == 16) ? MemOpSize_16 : (datasize == 32) ? MemOpSize_32 : MemOpSize_64)
+#define CHECK_DATASIZE() CHECK_DATASIZE_OF(datasize)
+#define DATASIZE DATASIZE_OF(datasize)
+#define MEMOPSIZE MEMOPSIZE_OF(datasize)
+#define CHECK_FP_MEMOP_DATASIZE() ASSERT(datasize == 8 || datasize == 16 || datasize == 32 || datasize == 64 || datasize == 128)
+#define MEMPAIROPSIZE_INT(datasize) ((datasize == 64) ? MemPairOp_64 : MemPairOp_32)
+#define MEMPAIROPSIZE_FP(datasize) ((datasize == 128) ? MemPairOp_V128 : (datasize == 64) ? MemPairOp_V64 : MemPairOp_32)
+
+namespace JSC {
+
+ALWAYS_INLINE bool isInt7(int32_t value)
+{
+ return value == ((value << 25) >> 25);
+}
+
+ALWAYS_INLINE bool isInt9(int32_t value)
+{
+ return value == ((value << 23) >> 23);
+}
+
+ALWAYS_INLINE bool isInt11(int32_t value)
+{
+ return value == ((value << 21) >> 21);
+}
+
+ALWAYS_INLINE bool isUInt5(int32_t value)
+{
+ return !(value & ~0x1f);
+}
+
+ALWAYS_INLINE bool isUInt12(int32_t value)
+{
+ return !(value & ~0xfff);
+}
+
+ALWAYS_INLINE bool isUInt12(intptr_t value)
+{
+ return !(value & ~0xfffL);
+}
+
+class UInt5 {
+public:
+ explicit UInt5(int value)
+ : m_value(value)
+ {
+ ASSERT(isUInt5(value));
+ }
+
+ operator int() { return m_value; }
+
+private:
+ int m_value;
+};
+
+class UInt12 {
+public:
+ explicit UInt12(int value)
+ : m_value(value)
+ {
+ ASSERT(isUInt12(value));
+ }
+
+ operator int() { return m_value; }
+
+private:
+ int m_value;
+};
+
+class PostIndex {
+public:
+ explicit PostIndex(int value)
+ : m_value(value)
+ {
+ ASSERT(isInt9(value));
+ }
+
+ operator int() { return m_value; }
+
+private:
+ int m_value;
+};
+
+class PreIndex {
+public:
+ explicit PreIndex(int value)
+ : m_value(value)
+ {
+ ASSERT(isInt9(value));
+ }
+
+ operator int() { return m_value; }
+
+private:
+ int m_value;
+};
+
+class PairPostIndex {
+public:
+ explicit PairPostIndex(int value)
+ : m_value(value)
+ {
+ ASSERT(isInt11(value));
+ }
+
+ operator int() { return m_value; }
+
+private:
+ int m_value;
+};
+
+class PairPreIndex {
+public:
+ explicit PairPreIndex(int value)
+ : m_value(value)
+ {
+ ASSERT(isInt11(value));
+ }
+
+ operator int() { return m_value; }
+
+private:
+ int m_value;
+};
+
+class LogicalImmediate {
+public:
+ static LogicalImmediate create32(uint32_t value)
+ {
+ // Check for 0, -1 - these cannot be encoded.
+ if (!value || !~value)
+ return InvalidLogicalImmediate;
+
+ // First look for a 32-bit pattern, then for repeating 16-bit
+ // patterns, 8-bit, 4-bit, and finally 2-bit.
+
+ unsigned hsb, lsb;
+ bool inverted;
+ if (findBitRange<32>(value, hsb, lsb, inverted))
+ return encodeLogicalImmediate<32>(hsb, lsb, inverted);
+
+ if ((value & 0xffff) != (value >> 16))
+ return InvalidLogicalImmediate;
+ value &= 0xffff;
+
+ if (findBitRange<16>(value, hsb, lsb, inverted))
+ return encodeLogicalImmediate<16>(hsb, lsb, inverted);
+
+ if ((value & 0xff) != (value >> 8))
+ return InvalidLogicalImmediate;
+ value &= 0xff;
+
+ if (findBitRange<8>(value, hsb, lsb, inverted))
+ return encodeLogicalImmediate<8>(hsb, lsb, inverted);
+
+ if ((value & 0xf) != (value >> 4))
+ return InvalidLogicalImmediate;
+ value &= 0xf;
+
+ if (findBitRange<4>(value, hsb, lsb, inverted))
+ return encodeLogicalImmediate<4>(hsb, lsb, inverted);
+
+ if ((value & 0x3) != (value >> 2))
+ return InvalidLogicalImmediate;
+ value &= 0x3;
+
+ if (findBitRange<2>(value, hsb, lsb, inverted))
+ return encodeLogicalImmediate<2>(hsb, lsb, inverted);
+
+ return InvalidLogicalImmediate;
+ }
+
+ static LogicalImmediate create64(uint64_t value)
+ {
+ // Check for 0, -1 - these cannot be encoded.
+ if (!value || !~value)
+ return InvalidLogicalImmediate;
+
+ // Look for a contiguous bit range.
+ unsigned hsb, lsb;
+ bool inverted;
+ if (findBitRange<64>(value, hsb, lsb, inverted))
+ return encodeLogicalImmediate<64>(hsb, lsb, inverted);
+
+ // If the high & low 32 bits are equal, we can try for a 32-bit (or narrower) pattern.
+ if (static_cast<uint32_t>(value) == static_cast<uint32_t>(value >> 32))
+ return create32(static_cast<uint32_t>(value));
+ return InvalidLogicalImmediate;
+ }
+
+ int value() const
+ {
+ ASSERT(isValid());
+ return m_value;
+ }
+
+ bool isValid() const
+ {
+ return m_value != InvalidLogicalImmediate;
+ }
+
+ bool is64bit() const
+ {
+ return m_value & (1 << 12);
+ }
+
+private:
+ LogicalImmediate(int value)
+ : m_value(value)
+ {
+ }
+
+ // Generate a mask with bits in the range hsb..0 set, for example:
+ // hsb:63 = 0xffffffffffffffff
+ // hsb:42 = 0x000007ffffffffff
+ // hsb: 0 = 0x0000000000000001
+ static uint64_t mask(unsigned hsb)
+ {
+ ASSERT(hsb < 64);
+ return 0xffffffffffffffffull >> (63 - hsb);
+ }
+
+ template<unsigned N>
+ static void partialHSB(uint64_t& value, unsigned&result)
+ {
+ if (value & (0xffffffffffffffffull << N)) {
+ result += N;
+ value >>= N;
+ }
+ }
+
+ // Find the bit number of the highest bit set in a non-zero value, for example:
+ // 0x8080808080808080 = hsb:63
+ // 0x0000000000000001 = hsb: 0
+ // 0x000007ffffe00000 = hsb:42
+ static unsigned highestSetBit(uint64_t value)
+ {
+ ASSERT(value);
+ unsigned hsb = 0;
+ partialHSB<32>(value, hsb);
+ partialHSB<16>(value, hsb);
+ partialHSB<8>(value, hsb);
+ partialHSB<4>(value, hsb);
+ partialHSB<2>(value, hsb);
+ partialHSB<1>(value, hsb);
+ return hsb;
+ }
+
+ // This function takes a value and a bit width, where value obeys the following constraints:
+ // * bits outside of the width of the value must be zero.
+ // * bits within the width of value must neither be all clear or all set.
+ // The input is inspected to detect values that consist of either two or three contiguous
+ // ranges of bits. The output range hsb..lsb will describe the second range of the value.
+ // if the range is set, inverted will be false, and if the range is clear, inverted will
+ // be true. For example (with width 8):
+ // 00001111 = hsb:3, lsb:0, inverted:false
+ // 11110000 = hsb:3, lsb:0, inverted:true
+ // 00111100 = hsb:5, lsb:2, inverted:false
+ // 11000011 = hsb:5, lsb:2, inverted:true
+ template<unsigned width>
+ static bool findBitRange(uint64_t value, unsigned& hsb, unsigned& lsb, bool& inverted)
+ {
+ ASSERT(value & mask(width - 1));
+ ASSERT(value != mask(width - 1));
+ ASSERT(!(value & ~mask(width - 1)));
+
+ // Detect cases where the top bit is set; if so, flip all the bits & set invert.
+ // This halves the number of patterns we need to look for.
+ const uint64_t msb = 1ull << (width - 1);
+ if ((inverted = (value & msb)))
+ value ^= mask(width - 1);
+
+ // Find the highest set bit in value, generate a corresponding mask & flip all
+ // bits under it.
+ hsb = highestSetBit(value);
+ value ^= mask(hsb);
+ if (!value) {
+ // If this cleared the value, then the range hsb..0 was all set.
+ lsb = 0;
+ return true;
+ }
+
+ // Try making one more mask, and flipping the bits!
+ lsb = highestSetBit(value);
+ value ^= mask(lsb);
+ if (!value) {
+ // Success - but lsb actually points to the hsb of a third range - add one
+ // to get to the lsb of the mid range.
+ ++lsb;
+ return true;
+ }
+
+ return false;
+ }
+
+ // Encodes the set of immN:immr:imms fields found in a logical immediate.
+ template<unsigned width>
+ static int encodeLogicalImmediate(unsigned hsb, unsigned lsb, bool inverted)
+ {
+ // Check width is a power of 2!
+ ASSERT(!(width & (width -1)));
+ ASSERT(width <= 64 && width >= 2);
+ ASSERT(hsb >= lsb);
+ ASSERT(hsb < width);
+
+ int immN = 0;
+ int imms = 0;
+ int immr = 0;
+
+ // For 64-bit values this is easy - just set immN to true, and imms just
+ // contains the bit number of the highest set bit of the set range. For
+ // values with narrower widths, these are encoded by a leading set of
+ // one bits, followed by a zero bit, followed by the remaining set of bits
+ // being the high bit of the range. For a 32-bit immediate there are no
+ // leading one bits, just a zero followed by a five bit number. For a
+ // 16-bit immediate there is one one bit, a zero bit, and then a four bit
+ // bit-position, etc.
+ if (width == 64)
+ immN = 1;
+ else
+ imms = 63 & ~(width + width - 1);
+
+ if (inverted) {
+ // if width is 64 & hsb is 62, then we have a value something like:
+ // 0x80000000ffffffff (in this case with lsb 32).
+ // The ror should be by 1, imms (effectively set width minus 1) is
+ // 32. Set width is full width minus cleared width.
+ immr = (width - 1) - hsb;
+ imms |= (width - ((hsb - lsb) + 1)) - 1;
+ } else {
+ // if width is 64 & hsb is 62, then we have a value something like:
+ // 0x7fffffff00000000 (in this case with lsb 32).
+ // The value is effectively rol'ed by lsb, which is equivalent to
+ // a ror by width - lsb (or 0, in the case where lsb is 0). imms
+ // is hsb - lsb.
+ immr = (width - lsb) & (width - 1);
+ imms |= hsb - lsb;
+ }
+
+ return immN << 12 | immr << 6 | imms;
+ }
+
+ static const int InvalidLogicalImmediate = -1;
+
+ int m_value;
+};
+
+inline uint16_t getHalfword(uint64_t value, int which)
+{
+ return value >> (which << 4);
+}
+
+namespace ARM64Registers {
+ typedef enum {
+ // Parameter/result registers
+ x0,
+ x1,
+ x2,
+ x3,
+ x4,
+ x5,
+ x6,
+ x7,
+ // Indirect result location register
+ x8,
+ // Temporary registers
+ x9,
+ x10,
+ x11,
+ x12,
+ x13,
+ x14,
+ x15,
+ // Intra-procedure-call scratch registers (temporary)
+ x16, ip0 = x16,
+ x17, ip1 = x17,
+ // Platform Register (temporary)
+ x18,
+ // Callee-saved
+ x19,
+ x20,
+ x21,
+ x22,
+ x23,
+ x24,
+ x25,
+ x26,
+ x27,
+ x28,
+ // Special
+ x29, fp = x29,
+ x30, lr = x30,
+ sp,
+ zr = 0x3f,
+ } RegisterID;
+
+ typedef enum {
+ // Parameter/result registers
+ q0,
+ q1,
+ q2,
+ q3,
+ q4,
+ q5,
+ q6,
+ q7,
+ // Callee-saved (up to 64-bits only!)
+ q8,
+ q9,
+ q10,
+ q11,
+ q12,
+ q13,
+ q14,
+ q15,
+ // Temporary registers
+ q16,
+ q17,
+ q18,
+ q19,
+ q20,
+ q21,
+ q22,
+ q23,
+ q24,
+ q25,
+ q26,
+ q27,
+ q28,
+ q29,
+ q30,
+ q31,
+ } FPRegisterID;
+
+ static bool isSp(RegisterID reg) { return reg == sp; }
+ static bool isZr(RegisterID reg) { return reg == zr; }
+}
+
+class ARM64Assembler {
+public:
+ typedef ARM64Registers::RegisterID RegisterID;
+ typedef ARM64Registers::FPRegisterID FPRegisterID;
+
+ static RegisterID firstRegister() { return ARM64Registers::x0; }
+ static RegisterID lastRegister() { return ARM64Registers::sp; }
+
+ static FPRegisterID firstFPRegister() { return ARM64Registers::q0; }
+ static FPRegisterID lastFPRegister() { return ARM64Registers::q31; }
+
+private:
+ static bool isSp(RegisterID reg) { return ARM64Registers::isSp(reg); }
+ static bool isZr(RegisterID reg) { return ARM64Registers::isZr(reg); }
+
+public:
+ ARM64Assembler()
+ : m_indexOfLastWatchpoint(INT_MIN)
+ , m_indexOfTailOfLastWatchpoint(INT_MIN)
+ {
+ }
+
+ AssemblerBuffer& buffer() { return m_buffer; }
+
+ // (HS, LO, HI, LS) -> (AE, B, A, BE)
+ // (VS, VC) -> (O, NO)
+ typedef enum {
+ ConditionEQ,
+ ConditionNE,
+ ConditionHS, ConditionCS = ConditionHS,
+ ConditionLO, ConditionCC = ConditionLO,
+ ConditionMI,
+ ConditionPL,
+ ConditionVS,
+ ConditionVC,
+ ConditionHI,
+ ConditionLS,
+ ConditionGE,
+ ConditionLT,
+ ConditionGT,
+ ConditionLE,
+ ConditionAL,
+ ConditionInvalid
+ } Condition;
+
+ static Condition invert(Condition cond)
+ {
+ return static_cast<Condition>(cond ^ 1);
+ }
+
+ typedef enum {
+ LSL,
+ LSR,
+ ASR,
+ ROR
+ } ShiftType;
+
+ typedef enum {
+ UXTB,
+ UXTH,
+ UXTW,
+ UXTX,
+ SXTB,
+ SXTH,
+ SXTW,
+ SXTX
+ } ExtendType;
+
+ enum SetFlags {
+ DontSetFlags,
+ S
+ };
+
+#define JUMP_ENUM_WITH_SIZE(index, value) (((value) << 4) | (index))
+#define JUMP_ENUM_SIZE(jump) ((jump) >> 4)
+ enum JumpType { JumpFixed = JUMP_ENUM_WITH_SIZE(0, 0),
+ JumpNoCondition = JUMP_ENUM_WITH_SIZE(1, 1 * sizeof(uint32_t)),
+ JumpCondition = JUMP_ENUM_WITH_SIZE(2, 2 * sizeof(uint32_t)),
+ JumpCompareAndBranch = JUMP_ENUM_WITH_SIZE(3, 2 * sizeof(uint32_t)),
+ JumpTestBit = JUMP_ENUM_WITH_SIZE(4, 2 * sizeof(uint32_t)),
+ JumpNoConditionFixedSize = JUMP_ENUM_WITH_SIZE(5, 1 * sizeof(uint32_t)),
+ JumpConditionFixedSize = JUMP_ENUM_WITH_SIZE(6, 2 * sizeof(uint32_t)),
+ JumpCompareAndBranchFixedSize = JUMP_ENUM_WITH_SIZE(7, 2 * sizeof(uint32_t)),
+ JumpTestBitFixedSize = JUMP_ENUM_WITH_SIZE(8, 2 * sizeof(uint32_t)),
+ };
+ enum JumpLinkType {
+ LinkInvalid = JUMP_ENUM_WITH_SIZE(0, 0),
+ LinkJumpNoCondition = JUMP_ENUM_WITH_SIZE(1, 1 * sizeof(uint32_t)),
+ LinkJumpConditionDirect = JUMP_ENUM_WITH_SIZE(2, 1 * sizeof(uint32_t)),
+ LinkJumpCondition = JUMP_ENUM_WITH_SIZE(3, 2 * sizeof(uint32_t)),
+ LinkJumpCompareAndBranch = JUMP_ENUM_WITH_SIZE(4, 2 * sizeof(uint32_t)),
+ LinkJumpCompareAndBranchDirect = JUMP_ENUM_WITH_SIZE(5, 1 * sizeof(uint32_t)),
+ LinkJumpTestBit = JUMP_ENUM_WITH_SIZE(6, 2 * sizeof(uint32_t)),
+ LinkJumpTestBitDirect = JUMP_ENUM_WITH_SIZE(7, 1 * sizeof(uint32_t)),
+ };
+
+ class LinkRecord {
+ public:
+ LinkRecord(intptr_t from, intptr_t to, JumpType type, Condition condition)
+ {
+ data.realTypes.m_from = from;
+ data.realTypes.m_to = to;
+ data.realTypes.m_type = type;
+ data.realTypes.m_linkType = LinkInvalid;
+ data.realTypes.m_condition = condition;
+ }
+ LinkRecord(intptr_t from, intptr_t to, JumpType type, Condition condition, bool is64Bit, RegisterID compareRegister)
+ {
+ data.realTypes.m_from = from;
+ data.realTypes.m_to = to;
+ data.realTypes.m_type = type;
+ data.realTypes.m_linkType = LinkInvalid;
+ data.realTypes.m_condition = condition;
+ data.realTypes.m_is64Bit = is64Bit;
+ data.realTypes.m_compareRegister = compareRegister;
+ }
+ LinkRecord(intptr_t from, intptr_t to, JumpType type, Condition condition, unsigned bitNumber, RegisterID compareRegister)
+ {
+ data.realTypes.m_from = from;
+ data.realTypes.m_to = to;
+ data.realTypes.m_type = type;
+ data.realTypes.m_linkType = LinkInvalid;
+ data.realTypes.m_condition = condition;
+ data.realTypes.m_bitNumber = bitNumber;
+ data.realTypes.m_compareRegister = compareRegister;
+ }
+ void operator=(const LinkRecord& other)
+ {
+ data.copyTypes.content[0] = other.data.copyTypes.content[0];
+ data.copyTypes.content[1] = other.data.copyTypes.content[1];
+ data.copyTypes.content[2] = other.data.copyTypes.content[2];
+ }
+ intptr_t from() const { return data.realTypes.m_from; }
+ void setFrom(intptr_t from) { data.realTypes.m_from = from; }
+ intptr_t to() const { return data.realTypes.m_to; }
+ JumpType type() const { return data.realTypes.m_type; }
+ JumpLinkType linkType() const { return data.realTypes.m_linkType; }
+ void setLinkType(JumpLinkType linkType) { ASSERT(data.realTypes.m_linkType == LinkInvalid); data.realTypes.m_linkType = linkType; }
+ Condition condition() const { return data.realTypes.m_condition; }
+ bool is64Bit() const { return data.realTypes.m_is64Bit; }
+ unsigned bitNumber() const { return data.realTypes.m_bitNumber; }
+ RegisterID compareRegister() const { return data.realTypes.m_compareRegister; }
+
+ private:
+ union {
+ struct RealTypes {
+ intptr_t m_from : 48;
+ intptr_t m_to : 48;
+ JumpType m_type : 8;
+ JumpLinkType m_linkType : 8;
+ Condition m_condition : 4;
+ unsigned m_bitNumber : 6;
+ RegisterID m_compareRegister : 6;
+ bool m_is64Bit : 1;
+ } realTypes;
+ struct CopyTypes {
+ uint64_t content[3];
+ } copyTypes;
+ COMPILE_ASSERT(sizeof(RealTypes) == sizeof(CopyTypes), LinkRecordCopyStructSizeEqualsRealStruct);
+ } data;
+ };
+
+ // bits(N) VFPExpandImm(bits(8) imm8);
+ //
+ // Encoding of floating point immediates is a litte complicated. Here's a
+ // high level description:
+ // +/-m*2-n where m and n are integers, 16 <= m <= 31, 0 <= n <= 7
+ // and the algirithm for expanding to a single precision float:
+ // return imm8<7>:NOT(imm8<6>):Replicate(imm8<6>,5):imm8<5:0>:Zeros(19);
+ //
+ // The trickiest bit is how the exponent is handled. The following table
+ // may help clarify things a little:
+ // 654
+ // 100 01111100 124 -3 1020 01111111100
+ // 101 01111101 125 -2 1021 01111111101
+ // 110 01111110 126 -1 1022 01111111110
+ // 111 01111111 127 0 1023 01111111111
+ // 000 10000000 128 1 1024 10000000000
+ // 001 10000001 129 2 1025 10000000001
+ // 010 10000010 130 3 1026 10000000010
+ // 011 10000011 131 4 1027 10000000011
+ // The first column shows the bit pattern stored in bits 6-4 of the arm
+ // encoded immediate. The second column shows the 8-bit IEEE 754 single
+ // -precision exponent in binary, the third column shows the raw decimal
+ // value. IEEE 754 single-precision numbers are stored with a bias of 127
+ // to the exponent, so the fourth column shows the resulting exponent.
+ // From this was can see that the exponent can be in the range -3..4,
+ // which agrees with the high level description given above. The fifth
+ // and sixth columns shows the value stored in a IEEE 754 double-precision
+ // number to represent these exponents in decimal and binary, given the
+ // bias of 1023.
+ //
+ // Ultimately, detecting doubles that can be encoded as immediates on arm
+ // and encoding doubles is actually not too bad. A floating point value can
+ // be encoded by retaining the sign bit, the low three bits of the exponent
+ // and the high 4 bits of the mantissa. To validly be able to encode an
+ // immediate the remainder of the mantissa must be zero, and the high part
+ // of the exponent must match the top bit retained, bar the highest bit
+ // which must be its inverse.
+ static bool canEncodeFPImm(double d)
+ {
+ // Discard the sign bit, the low two bits of the exponent & the highest
+ // four bits of the mantissa.
+ uint64_t masked = bitwise_cast<uint64_t>(d) & 0x7fc0ffffffffffffull;
+ return (masked == 0x3fc0000000000000ull) || (masked == 0x4000000000000000ull);
+ }
+
+ template<int datasize>
+ static bool canEncodePImmOffset(int32_t offset)
+ {
+ int32_t maxPImm = 4095 * (datasize / 8);
+ if (offset < 0)
+ return false;
+ if (offset > maxPImm)
+ return false;
+ if (offset & ((datasize / 8 ) - 1))
+ return false;
+ return true;
+ }
+
+ static bool canEncodeSImmOffset(int32_t offset)
+ {
+ return isInt9(offset);
+ }
+
+private:
+ int encodeFPImm(double d)
+ {
+ ASSERT(canEncodeFPImm(d));
+ uint64_t u64 = bitwise_cast<uint64_t>(d);
+ return (static_cast<int>(u64 >> 56) & 0x80) | (static_cast<int>(u64 >> 48) & 0x7f);
+ }
+
+ template<int datasize>
+ int encodeShiftAmount(int amount)
+ {
+ ASSERT(!amount || datasize == (8 << amount));
+ return amount;
+ }
+
+ template<int datasize>
+ static int encodePositiveImmediate(unsigned pimm)
+ {
+ ASSERT(!(pimm & ((datasize / 8) - 1)));
+ return pimm / (datasize / 8);
+ }
+
+ enum Datasize {
+ Datasize_32,
+ Datasize_64,
+ Datasize_64_top,
+ Datasize_16
+ };
+
+ enum MemOpSize {
+ MemOpSize_8_or_128,
+ MemOpSize_16,
+ MemOpSize_32,
+ MemOpSize_64,
+ };
+
+ enum BranchType {
+ BranchType_JMP,
+ BranchType_CALL,
+ BranchType_RET
+ };
+
+ enum AddOp {
+ AddOp_ADD,
+ AddOp_SUB
+ };
+
+ enum BitfieldOp {
+ BitfieldOp_SBFM,
+ BitfieldOp_BFM,
+ BitfieldOp_UBFM
+ };
+
+ enum DataOp1Source {
+ DataOp_RBIT,
+ DataOp_REV16,
+ DataOp_REV32,
+ DataOp_REV64,
+ DataOp_CLZ,
+ DataOp_CLS
+ };
+
+ enum DataOp2Source {
+ DataOp_UDIV = 2,
+ DataOp_SDIV = 3,
+ DataOp_LSLV = 8,
+ DataOp_LSRV = 9,
+ DataOp_ASRV = 10,
+ DataOp_RORV = 11
+ };
+
+ enum DataOp3Source {
+ DataOp_MADD = 0,
+ DataOp_MSUB = 1,
+ DataOp_SMADDL = 2,
+ DataOp_SMSUBL = 3,
+ DataOp_SMULH = 4,
+ DataOp_UMADDL = 10,
+ DataOp_UMSUBL = 11,
+ DataOp_UMULH = 12
+ };
+
+ enum ExcepnOp {
+ ExcepnOp_EXCEPTION = 0,
+ ExcepnOp_BREAKPOINT = 1,
+ ExcepnOp_HALT = 2,
+ ExcepnOp_DCPS = 5
+ };
+
+ enum FPCmpOp {
+ FPCmpOp_FCMP = 0x00,
+ FPCmpOp_FCMP0 = 0x08,
+ FPCmpOp_FCMPE = 0x10,
+ FPCmpOp_FCMPE0 = 0x18
+ };
+
+ enum FPCondCmpOp {
+ FPCondCmpOp_FCMP,
+ FPCondCmpOp_FCMPE
+ };
+
+ enum FPDataOp1Source {
+ FPDataOp_FMOV = 0,
+ FPDataOp_FABS = 1,
+ FPDataOp_FNEG = 2,
+ FPDataOp_FSQRT = 3,
+ FPDataOp_FCVT_toSingle = 4,
+ FPDataOp_FCVT_toDouble = 5,
+ FPDataOp_FCVT_toHalf = 7,
+ FPDataOp_FRINTN = 8,
+ FPDataOp_FRINTP = 9,
+ FPDataOp_FRINTM = 10,
+ FPDataOp_FRINTZ = 11,
+ FPDataOp_FRINTA = 12,
+ FPDataOp_FRINTX = 14,
+ FPDataOp_FRINTI = 15
+ };
+
+ enum FPDataOp2Source {
+ FPDataOp_FMUL,
+ FPDataOp_FDIV,
+ FPDataOp_FADD,
+ FPDataOp_FSUB,
+ FPDataOp_FMAX,
+ FPDataOp_FMIN,
+ FPDataOp_FMAXNM,
+ FPDataOp_FMINNM,
+ FPDataOp_FNMUL
+ };
+
+ enum FPIntConvOp {
+ FPIntConvOp_FCVTNS = 0x00,
+ FPIntConvOp_FCVTNU = 0x01,
+ FPIntConvOp_SCVTF = 0x02,
+ FPIntConvOp_UCVTF = 0x03,
+ FPIntConvOp_FCVTAS = 0x04,
+ FPIntConvOp_FCVTAU = 0x05,
+ FPIntConvOp_FMOV_QtoX = 0x06,
+ FPIntConvOp_FMOV_XtoQ = 0x07,
+ FPIntConvOp_FCVTPS = 0x08,
+ FPIntConvOp_FCVTPU = 0x09,
+ FPIntConvOp_FMOV_QtoX_top = 0x0e,
+ FPIntConvOp_FMOV_XtoQ_top = 0x0f,
+ FPIntConvOp_FCVTMS = 0x10,
+ FPIntConvOp_FCVTMU = 0x11,
+ FPIntConvOp_FCVTZS = 0x18,
+ FPIntConvOp_FCVTZU = 0x19,
+ };
+
+ enum LogicalOp {
+ LogicalOp_AND,
+ LogicalOp_ORR,
+ LogicalOp_EOR,
+ LogicalOp_ANDS
+ };
+
+ enum MemOp {
+ MemOp_STORE,
+ MemOp_LOAD,
+ MemOp_STORE_V128,
+ MemOp_LOAD_V128,
+ MemOp_PREFETCH = 2, // size must be 3
+ MemOp_LOAD_signed64 = 2, // size may be 0, 1 or 2
+ MemOp_LOAD_signed32 = 3 // size may be 0 or 1
+ };
+
+ enum MemPairOpSize {
+ MemPairOp_32 = 0,
+ MemPairOp_LoadSigned_32 = 1,
+ MemPairOp_64 = 2,
+
+ MemPairOp_V32 = MemPairOp_32,
+ MemPairOp_V64 = 1,
+ MemPairOp_V128 = 2
+ };
+
+ enum MoveWideOp {
+ MoveWideOp_N = 0,
+ MoveWideOp_Z = 2,
+ MoveWideOp_K = 3
+ };
+
+ enum LdrLiteralOp {
+ LdrLiteralOp_32BIT = 0,
+ LdrLiteralOp_64BIT = 1,
+ LdrLiteralOp_LDRSW = 2,
+ LdrLiteralOp_128BIT = 2
+ };
+
+ static unsigned memPairOffsetShift(bool V, MemPairOpSize size)
+ {
+ // return the log2 of the size in bytes, e.g. 64 bit size returns 3
+ if (V)
+ return size + 2;
+ return (size >> 1) + 2;
+ }
+
+public:
+ // Integer Instructions:
+
+ template<int datasize, SetFlags setFlags = DontSetFlags>
+ ALWAYS_INLINE void adc(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ CHECK_DATASIZE();
+ insn(addSubtractWithCarry(DATASIZE, AddOp_ADD, setFlags, rm, rn, rd));
+ }
+
+ template<int datasize, SetFlags setFlags = DontSetFlags>
+ ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, UInt12 imm12, int shift = 0)
+ {
+ CHECK_DATASIZE();
+ ASSERT(!shift || shift == 12);
+ insn(addSubtractImmediate(DATASIZE, AddOp_ADD, setFlags, shift == 12, imm12, rn, rd));
+ }
+
+ template<int datasize, SetFlags setFlags = DontSetFlags>
+ ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ add<datasize, setFlags>(rd, rn, rm, LSL, 0);
+ }
+
+ template<int datasize, SetFlags setFlags = DontSetFlags>
+ ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
+ {
+ CHECK_DATASIZE();
+ insn(addSubtractExtendedRegister(DATASIZE, AddOp_ADD, setFlags, rm, extend, amount, rn, rd));
+ }
+
+ template<int datasize, SetFlags setFlags = DontSetFlags>
+ ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount)
+ {
+ CHECK_DATASIZE();
+ if (isSp(rd) || isSp(rn)) {
+ ASSERT(shift == LSL);
+ ASSERT(!isSp(rm));
+ add<datasize, setFlags>(rd, rn, rm, UXTX, amount);
+ } else
+ insn(addSubtractShiftedRegister(DATASIZE, AddOp_ADD, setFlags, shift, rm, amount, rn, rd));
+ }
+
+ ALWAYS_INLINE void adr(RegisterID rd, int offset)
+ {
+ insn(pcRelative(false, offset, rd));
+ }
+
+ ALWAYS_INLINE void adrp(RegisterID rd, int offset)
+ {
+ ASSERT(!(offset & 0xfff));
+ insn(pcRelative(true, offset >> 12, rd));
+ nopCortexA53Fix843419();
+ }
+
+ template<int datasize, SetFlags setFlags = DontSetFlags>
+ ALWAYS_INLINE void and_(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ and_<datasize, setFlags>(rd, rn, rm, LSL, 0);
+ }
+
+ template<int datasize, SetFlags setFlags = DontSetFlags>
+ ALWAYS_INLINE void and_(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount)
+ {
+ CHECK_DATASIZE();
+ insn(logicalShiftedRegister(DATASIZE, setFlags ? LogicalOp_ANDS : LogicalOp_AND, shift, false, rm, amount, rn, rd));
+ }
+
+ template<int datasize, SetFlags setFlags = DontSetFlags>
+ ALWAYS_INLINE void and_(RegisterID rd, RegisterID rn, LogicalImmediate imm)
+ {
+ CHECK_DATASIZE();
+ insn(logicalImmediate(DATASIZE, setFlags ? LogicalOp_ANDS : LogicalOp_AND, imm.value(), rn, rd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void asr(RegisterID rd, RegisterID rn, int shift)
+ {
+ ASSERT(shift < datasize);
+ sbfm<datasize>(rd, rn, shift, datasize - 1);
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void asr(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ asrv<datasize>(rd, rn, rm);
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void asrv(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ CHECK_DATASIZE();
+ insn(dataProcessing2Source(DATASIZE, rm, DataOp_ASRV, rn, rd));
+ }
+
+ ALWAYS_INLINE void b(int32_t offset = 0)
+ {
+ ASSERT(!(offset & 3));
+ offset >>= 2;
+ ASSERT(offset == (offset << 6) >> 6);
+ insn(unconditionalBranchImmediate(false, offset));
+ }
+
+ ALWAYS_INLINE void b_cond(Condition cond, int32_t offset = 0)
+ {
+ ASSERT(!(offset & 3));
+ offset >>= 2;
+ ASSERT(offset == (offset << 13) >> 13);
+ insn(conditionalBranchImmediate(offset, cond));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void bfi(RegisterID rd, RegisterID rn, int lsb, int width)
+ {
+ bfm<datasize>(rd, rn, (datasize - lsb) & (datasize - 1), width - 1);
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void bfm(RegisterID rd, RegisterID rn, int immr, int imms)
+ {
+ CHECK_DATASIZE();
+ insn(bitfield(DATASIZE, BitfieldOp_BFM, immr, imms, rn, rd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void bfxil(RegisterID rd, RegisterID rn, int lsb, int width)
+ {
+ bfm<datasize>(rd, rn, lsb, lsb + width - 1);
+ }
+
+ template<int datasize, SetFlags setFlags = DontSetFlags>
+ ALWAYS_INLINE void bic(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ bic<datasize, setFlags>(rd, rn, rm, LSL, 0);
+ }
+
+ template<int datasize, SetFlags setFlags = DontSetFlags>
+ ALWAYS_INLINE void bic(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount)
+ {
+ CHECK_DATASIZE();
+ insn(logicalShiftedRegister(DATASIZE, setFlags ? LogicalOp_ANDS : LogicalOp_AND, shift, true, rm, amount, rn, rd));
+ }
+
+ ALWAYS_INLINE void bl(int32_t offset = 0)
+ {
+ ASSERT(!(offset & 3));
+ offset >>= 2;
+ insn(unconditionalBranchImmediate(true, offset));
+ }
+
+ ALWAYS_INLINE void blr(RegisterID rn)
+ {
+ insn(unconditionalBranchRegister(BranchType_CALL, rn));
+ }
+
+ ALWAYS_INLINE void br(RegisterID rn)
+ {
+ insn(unconditionalBranchRegister(BranchType_JMP, rn));
+ }
+
+ ALWAYS_INLINE void brk(uint16_t imm)
+ {
+ insn(excepnGeneration(ExcepnOp_BREAKPOINT, imm, 0));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void cbnz(RegisterID rt, int32_t offset = 0)
+ {
+ CHECK_DATASIZE();
+ ASSERT(!(offset & 3));
+ offset >>= 2;
+ insn(compareAndBranchImmediate(DATASIZE, true, offset, rt));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void cbz(RegisterID rt, int32_t offset = 0)
+ {
+ CHECK_DATASIZE();
+ ASSERT(!(offset & 3));
+ offset >>= 2;
+ insn(compareAndBranchImmediate(DATASIZE, false, offset, rt));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void ccmn(RegisterID rn, RegisterID rm, int nzcv, Condition cond)
+ {
+ CHECK_DATASIZE();
+ insn(conditionalCompareRegister(DATASIZE, AddOp_ADD, rm, cond, rn, nzcv));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void ccmn(RegisterID rn, UInt5 imm, int nzcv, Condition cond)
+ {
+ CHECK_DATASIZE();
+ insn(conditionalCompareImmediate(DATASIZE, AddOp_ADD, imm, cond, rn, nzcv));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void ccmp(RegisterID rn, RegisterID rm, int nzcv, Condition cond)
+ {
+ CHECK_DATASIZE();
+ insn(conditionalCompareRegister(DATASIZE, AddOp_SUB, rm, cond, rn, nzcv));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void ccmp(RegisterID rn, UInt5 imm, int nzcv, Condition cond)
+ {
+ CHECK_DATASIZE();
+ insn(conditionalCompareImmediate(DATASIZE, AddOp_SUB, imm, cond, rn, nzcv));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void cinc(RegisterID rd, RegisterID rn, Condition cond)
+ {
+ csinc<datasize>(rd, rn, rn, invert(cond));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void cinv(RegisterID rd, RegisterID rn, Condition cond)
+ {
+ csinv<datasize>(rd, rn, rn, invert(cond));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void cls(RegisterID rd, RegisterID rn)
+ {
+ CHECK_DATASIZE();
+ insn(dataProcessing1Source(DATASIZE, DataOp_CLS, rn, rd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void clz(RegisterID rd, RegisterID rn)
+ {
+ CHECK_DATASIZE();
+ insn(dataProcessing1Source(DATASIZE, DataOp_CLZ, rn, rd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void cmn(RegisterID rn, UInt12 imm12, int shift = 0)
+ {
+ add<datasize, S>(ARM64Registers::zr, rn, imm12, shift);
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void cmn(RegisterID rn, RegisterID rm)
+ {
+ add<datasize, S>(ARM64Registers::zr, rn, rm);
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void cmn(RegisterID rn, RegisterID rm, ExtendType extend, int amount)
+ {
+ add<datasize, S>(ARM64Registers::zr, rn, rm, extend, amount);
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void cmn(RegisterID rn, RegisterID rm, ShiftType shift, int amount)
+ {
+ add<datasize, S>(ARM64Registers::zr, rn, rm, shift, amount);
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void cmp(RegisterID rn, UInt12 imm12, int shift = 0)
+ {
+ sub<datasize, S>(ARM64Registers::zr, rn, imm12, shift);
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm)
+ {
+ sub<datasize, S>(ARM64Registers::zr, rn, rm);
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm, ExtendType extend, int amount)
+ {
+ sub<datasize, S>(ARM64Registers::zr, rn, rm, extend, amount);
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm, ShiftType shift, int amount)
+ {
+ sub<datasize, S>(ARM64Registers::zr, rn, rm, shift, amount);
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void cneg(RegisterID rd, RegisterID rn, Condition cond)
+ {
+ csneg<datasize>(rd, rn, rn, invert(cond));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void csel(RegisterID rd, RegisterID rn, RegisterID rm, Condition cond)
+ {
+ CHECK_DATASIZE();
+ insn(conditionalSelect(DATASIZE, false, rm, cond, false, rn, rd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void cset(RegisterID rd, Condition cond)
+ {
+ csinc<datasize>(rd, ARM64Registers::zr, ARM64Registers::zr, invert(cond));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void csetm(RegisterID rd, Condition cond)
+ {
+ csinv<datasize>(rd, ARM64Registers::zr, ARM64Registers::zr, invert(cond));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void csinc(RegisterID rd, RegisterID rn, RegisterID rm, Condition cond)
+ {
+ CHECK_DATASIZE();
+ insn(conditionalSelect(DATASIZE, false, rm, cond, true, rn, rd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void csinv(RegisterID rd, RegisterID rn, RegisterID rm, Condition cond)
+ {
+ CHECK_DATASIZE();
+ insn(conditionalSelect(DATASIZE, true, rm, cond, false, rn, rd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void csneg(RegisterID rd, RegisterID rn, RegisterID rm, Condition cond)
+ {
+ CHECK_DATASIZE();
+ insn(conditionalSelect(DATASIZE, true, rm, cond, true, rn, rd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void eon(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ eon<datasize>(rd, rn, rm, LSL, 0);
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void eon(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount)
+ {
+ CHECK_DATASIZE();
+ insn(logicalShiftedRegister(DATASIZE, LogicalOp_EOR, shift, true, rm, amount, rn, rd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ eor<datasize>(rd, rn, rm, LSL, 0);
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount)
+ {
+ CHECK_DATASIZE();
+ insn(logicalShiftedRegister(DATASIZE, LogicalOp_EOR, shift, false, rm, amount, rn, rd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, LogicalImmediate imm)
+ {
+ CHECK_DATASIZE();
+ insn(logicalImmediate(DATASIZE, LogicalOp_EOR, imm.value(), rn, rd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void extr(RegisterID rd, RegisterID rn, RegisterID rm, int lsb)
+ {
+ CHECK_DATASIZE();
+ insn(extract(DATASIZE, rm, lsb, rn, rd));
+ }
+
+ ALWAYS_INLINE void hint(int imm)
+ {
+ insn(hintPseudo(imm));
+ }
+
+ ALWAYS_INLINE void hlt(uint16_t imm)
+ {
+ insn(excepnGeneration(ExcepnOp_HALT, imm, 0));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void ldp(RegisterID rt, RegisterID rt2, RegisterID rn, PairPostIndex simm)
+ {
+ CHECK_DATASIZE();
+ insn(loadStoreRegisterPairPostIndex(MEMPAIROPSIZE_INT(datasize), false, MemOp_LOAD, simm, rn, rt, rt2));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void ldp(RegisterID rt, RegisterID rt2, RegisterID rn, PairPreIndex simm)
+ {
+ CHECK_DATASIZE();
+ insn(loadStoreRegisterPairPreIndex(MEMPAIROPSIZE_INT(datasize), false, MemOp_LOAD, simm, rn, rt, rt2));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, RegisterID rm)
+ {
+ ldr<datasize>(rt, rn, rm, UXTX, 0);
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
+ {
+ CHECK_DATASIZE();
+ insn(loadStoreRegisterRegisterOffset(MEMOPSIZE, false, MemOp_LOAD, rm, extend, encodeShiftAmount<datasize>(amount), rn, rt));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, unsigned pimm)
+ {
+ CHECK_DATASIZE();
+ insn(loadStoreRegisterUnsignedImmediate(MEMOPSIZE, false, MemOp_LOAD, encodePositiveImmediate<datasize>(pimm), rn, rt));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, PostIndex simm)
+ {
+ CHECK_DATASIZE();
+ insn(loadStoreRegisterPostIndex(MEMOPSIZE, false, MemOp_LOAD, simm, rn, rt));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, PreIndex simm)
+ {
+ CHECK_DATASIZE();
+ insn(loadStoreRegisterPreIndex(MEMOPSIZE, false, MemOp_LOAD, simm, rn, rt));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void ldr_literal(RegisterID rt, int offset = 0)
+ {
+ CHECK_DATASIZE();
+ ASSERT(!(offset & 3));
+ insn(loadRegisterLiteral(datasize == 64 ? LdrLiteralOp_64BIT : LdrLiteralOp_32BIT, false, offset >> 2, rt));
+ }
+
+ ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, RegisterID rm)
+ {
+ // Not calling the 5 argument form of ldrb, since is amount is ommitted S is false.
+ insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128, false, MemOp_LOAD, rm, UXTX, false, rn, rt));
+ }
+
+ ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
+ {
+ ASSERT_UNUSED(amount, !amount);
+ insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128, false, MemOp_LOAD, rm, extend, true, rn, rt));
+ }
+
+ ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, unsigned pimm)
+ {
+ insn(loadStoreRegisterUnsignedImmediate(MemOpSize_8_or_128, false, MemOp_LOAD, encodePositiveImmediate<8>(pimm), rn, rt));
+ }
+
+ ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, PostIndex simm)
+ {
+ insn(loadStoreRegisterPostIndex(MemOpSize_8_or_128, false, MemOp_LOAD, simm, rn, rt));
+ }
+
+ ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, PreIndex simm)
+ {
+ insn(loadStoreRegisterPreIndex(MemOpSize_8_or_128, false, MemOp_LOAD, simm, rn, rt));
+ }
+
+ ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, RegisterID rm)
+ {
+ ldrh(rt, rn, rm, UXTX, 0);
+ }
+
+ ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
+ {
+ ASSERT(!amount || amount == 1);
+ insn(loadStoreRegisterRegisterOffset(MemOpSize_16, false, MemOp_LOAD, rm, extend, amount == 1, rn, rt));
+ }
+
+ ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, unsigned pimm)
+ {
+ insn(loadStoreRegisterUnsignedImmediate(MemOpSize_16, false, MemOp_LOAD, encodePositiveImmediate<16>(pimm), rn, rt));
+ }
+
+ ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, PostIndex simm)
+ {
+ insn(loadStoreRegisterPostIndex(MemOpSize_16, false, MemOp_LOAD, simm, rn, rt));
+ }
+
+ ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, PreIndex simm)
+ {
+ insn(loadStoreRegisterPreIndex(MemOpSize_16, false, MemOp_LOAD, simm, rn, rt));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void ldrsb(RegisterID rt, RegisterID rn, RegisterID rm)
+ {
+ CHECK_DATASIZE();
+ // Not calling the 5 argument form of ldrsb, since is amount is ommitted S is false.
+ insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, rm, UXTX, false, rn, rt));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void ldrsb(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
+ {
+ CHECK_DATASIZE();
+ ASSERT_UNUSED(amount, !amount);
+ insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, rm, extend, true, rn, rt));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void ldrsb(RegisterID rt, RegisterID rn, unsigned pimm)
+ {
+ CHECK_DATASIZE();
+ insn(loadStoreRegisterUnsignedImmediate(MemOpSize_8_or_128, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, encodePositiveImmediate<8>(pimm), rn, rt));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void ldrsb(RegisterID rt, RegisterID rn, PostIndex simm)
+ {
+ CHECK_DATASIZE();
+ insn(loadStoreRegisterPostIndex(MemOpSize_8_or_128, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, simm, rn, rt));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void ldrsb(RegisterID rt, RegisterID rn, PreIndex simm)
+ {
+ CHECK_DATASIZE();
+ insn(loadStoreRegisterPreIndex(MemOpSize_8_or_128, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, simm, rn, rt));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void ldrsh(RegisterID rt, RegisterID rn, RegisterID rm)
+ {
+ ldrsh<datasize>(rt, rn, rm, UXTX, 0);
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void ldrsh(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
+ {
+ CHECK_DATASIZE();
+ ASSERT(!amount || amount == 1);
+ insn(loadStoreRegisterRegisterOffset(MemOpSize_16, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, rm, extend, amount == 1, rn, rt));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void ldrsh(RegisterID rt, RegisterID rn, unsigned pimm)
+ {
+ CHECK_DATASIZE();
+ insn(loadStoreRegisterUnsignedImmediate(MemOpSize_16, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, encodePositiveImmediate<16>(pimm), rn, rt));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void ldrsh(RegisterID rt, RegisterID rn, PostIndex simm)
+ {
+ CHECK_DATASIZE();
+ insn(loadStoreRegisterPostIndex(MemOpSize_16, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, simm, rn, rt));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void ldrsh(RegisterID rt, RegisterID rn, PreIndex simm)
+ {
+ CHECK_DATASIZE();
+ insn(loadStoreRegisterPreIndex(MemOpSize_16, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, simm, rn, rt));
+ }
+
+ ALWAYS_INLINE void ldrsw(RegisterID rt, RegisterID rn, RegisterID rm)
+ {
+ ldrsw(rt, rn, rm, UXTX, 0);
+ }
+
+ ALWAYS_INLINE void ldrsw(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
+ {
+ ASSERT(!amount || amount == 2);
+ insn(loadStoreRegisterRegisterOffset(MemOpSize_32, false, MemOp_LOAD_signed64, rm, extend, amount == 2, rn, rt));
+ }
+
+ ALWAYS_INLINE void ldrsw(RegisterID rt, RegisterID rn, unsigned pimm)
+ {
+ insn(loadStoreRegisterUnsignedImmediate(MemOpSize_32, false, MemOp_LOAD_signed64, encodePositiveImmediate<32>(pimm), rn, rt));
+ }
+
+ ALWAYS_INLINE void ldrsw(RegisterID rt, RegisterID rn, PostIndex simm)
+ {
+ insn(loadStoreRegisterPostIndex(MemOpSize_32, false, MemOp_LOAD_signed64, simm, rn, rt));
+ }
+
+ ALWAYS_INLINE void ldrsw(RegisterID rt, RegisterID rn, PreIndex simm)
+ {
+ insn(loadStoreRegisterPreIndex(MemOpSize_32, false, MemOp_LOAD_signed64, simm, rn, rt));
+ }
+
+ ALWAYS_INLINE void ldrsw_literal(RegisterID rt, int offset = 0)
+ {
+ ASSERT(!(offset & 3));
+ insn(loadRegisterLiteral(LdrLiteralOp_LDRSW, false, offset >> 2, rt));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void ldur(RegisterID rt, RegisterID rn, int simm)
+ {
+ CHECK_DATASIZE();
+ insn(loadStoreRegisterUnscaledImmediate(MEMOPSIZE, false, MemOp_LOAD, simm, rn, rt));
+ }
+
+ ALWAYS_INLINE void ldurb(RegisterID rt, RegisterID rn, int simm)
+ {
+ insn(loadStoreRegisterUnscaledImmediate(MemOpSize_8_or_128, false, MemOp_LOAD, simm, rn, rt));
+ }
+
+ ALWAYS_INLINE void ldurh(RegisterID rt, RegisterID rn, int simm)
+ {
+ insn(loadStoreRegisterUnscaledImmediate(MemOpSize_16, false, MemOp_LOAD, simm, rn, rt));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void ldursb(RegisterID rt, RegisterID rn, int simm)
+ {
+ CHECK_DATASIZE();
+ insn(loadStoreRegisterUnscaledImmediate(MemOpSize_8_or_128, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, simm, rn, rt));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void ldursh(RegisterID rt, RegisterID rn, int simm)
+ {
+ CHECK_DATASIZE();
+ insn(loadStoreRegisterUnscaledImmediate(MemOpSize_16, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, simm, rn, rt));
+ }
+
+ ALWAYS_INLINE void ldursw(RegisterID rt, RegisterID rn, int simm)
+ {
+ insn(loadStoreRegisterUnscaledImmediate(MemOpSize_32, false, MemOp_LOAD_signed64, simm, rn, rt));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void lsl(RegisterID rd, RegisterID rn, int shift)
+ {
+ ASSERT(shift < datasize);
+ ubfm<datasize>(rd, rn, (datasize - shift) & (datasize - 1), datasize - 1 - shift);
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void lsl(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ lslv<datasize>(rd, rn, rm);
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void lslv(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ CHECK_DATASIZE();
+ insn(dataProcessing2Source(DATASIZE, rm, DataOp_LSLV, rn, rd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void lsr(RegisterID rd, RegisterID rn, int shift)
+ {
+ ASSERT(shift < datasize);
+ ubfm<datasize>(rd, rn, shift, datasize - 1);
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void lsr(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ lsrv<datasize>(rd, rn, rm);
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void lsrv(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ CHECK_DATASIZE();
+ insn(dataProcessing2Source(DATASIZE, rm, DataOp_LSRV, rn, rd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void madd(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra)
+ {
+ CHECK_DATASIZE();
+ nopCortexA53Fix835769<datasize>();
+ insn(dataProcessing3Source(DATASIZE, DataOp_MADD, rm, ra, rn, rd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void mneg(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ msub<datasize>(rd, rn, rm, ARM64Registers::zr);
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void mov(RegisterID rd, RegisterID rm)
+ {
+ if (isSp(rd) || isSp(rm))
+ add<datasize>(rd, rm, UInt12(0));
+ else
+ orr<datasize>(rd, ARM64Registers::zr, rm);
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void movi(RegisterID rd, LogicalImmediate imm)
+ {
+ orr<datasize>(rd, ARM64Registers::zr, imm);
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void movk(RegisterID rd, uint16_t value, int shift = 0)
+ {
+ CHECK_DATASIZE();
+ ASSERT(!(shift & 0xf));
+ insn(moveWideImediate(DATASIZE, MoveWideOp_K, shift >> 4, value, rd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void movn(RegisterID rd, uint16_t value, int shift = 0)
+ {
+ CHECK_DATASIZE();
+ ASSERT(!(shift & 0xf));
+ insn(moveWideImediate(DATASIZE, MoveWideOp_N, shift >> 4, value, rd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void movz(RegisterID rd, uint16_t value, int shift = 0)
+ {
+ CHECK_DATASIZE();
+ ASSERT(!(shift & 0xf));
+ insn(moveWideImediate(DATASIZE, MoveWideOp_Z, shift >> 4, value, rd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void msub(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra)
+ {
+ CHECK_DATASIZE();
+ nopCortexA53Fix835769<datasize>();
+ insn(dataProcessing3Source(DATASIZE, DataOp_MSUB, rm, ra, rn, rd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void mul(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ madd<datasize>(rd, rn, rm, ARM64Registers::zr);
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void mvn(RegisterID rd, RegisterID rm)
+ {
+ orn<datasize>(rd, ARM64Registers::zr, rm);
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void mvn(RegisterID rd, RegisterID rm, ShiftType shift, int amount)
+ {
+ orn<datasize>(rd, ARM64Registers::zr, rm, shift, amount);
+ }
+
+ template<int datasize, SetFlags setFlags = DontSetFlags>
+ ALWAYS_INLINE void neg(RegisterID rd, RegisterID rm)
+ {
+ sub<datasize, setFlags>(rd, ARM64Registers::zr, rm);
+ }
+
+ template<int datasize, SetFlags setFlags = DontSetFlags>
+ ALWAYS_INLINE void neg(RegisterID rd, RegisterID rm, ShiftType shift, int amount)
+ {
+ sub<datasize, setFlags>(rd, ARM64Registers::zr, rm, shift, amount);
+ }
+
+ template<int datasize, SetFlags setFlags = DontSetFlags>
+ ALWAYS_INLINE void ngc(RegisterID rd, RegisterID rm)
+ {
+ sbc<datasize, setFlags>(rd, ARM64Registers::zr, rm);
+ }
+
+ template<int datasize, SetFlags setFlags = DontSetFlags>
+ ALWAYS_INLINE void ngc(RegisterID rd, RegisterID rm, ShiftType shift, int amount)
+ {
+ sbc<datasize, setFlags>(rd, ARM64Registers::zr, rm, shift, amount);
+ }
+
+ ALWAYS_INLINE void nop()
+ {
+ insn(nopPseudo());
+ }
+
+ static void fillNops(void* base, size_t size)
+ {
+ RELEASE_ASSERT(!(size % sizeof(int32_t)));
+ size_t n = size / sizeof(int32_t);
+ for (int32_t* ptr = static_cast<int32_t*>(base); n--;)
+ *ptr++ = nopPseudo();
+ }
+
+ ALWAYS_INLINE void dmbSY()
+ {
+ insn(0xd5033fbf);
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void orn(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ orn<datasize>(rd, rn, rm, LSL, 0);
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void orn(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount)
+ {
+ CHECK_DATASIZE();
+ insn(logicalShiftedRegister(DATASIZE, LogicalOp_ORR, shift, true, rm, amount, rn, rd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ orr<datasize>(rd, rn, rm, LSL, 0);
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount)
+ {
+ CHECK_DATASIZE();
+ insn(logicalShiftedRegister(DATASIZE, LogicalOp_ORR, shift, false, rm, amount, rn, rd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, LogicalImmediate imm)
+ {
+ CHECK_DATASIZE();
+ insn(logicalImmediate(DATASIZE, LogicalOp_ORR, imm.value(), rn, rd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void rbit(RegisterID rd, RegisterID rn)
+ {
+ CHECK_DATASIZE();
+ insn(dataProcessing1Source(DATASIZE, DataOp_RBIT, rn, rd));
+ }
+
+ ALWAYS_INLINE void ret(RegisterID rn = ARM64Registers::lr)
+ {
+ insn(unconditionalBranchRegister(BranchType_RET, rn));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void rev(RegisterID rd, RegisterID rn)
+ {
+ CHECK_DATASIZE();
+ if (datasize == 32) // 'rev' mnemonic means REV32 or REV64 depending on the operand width.
+ insn(dataProcessing1Source(Datasize_32, DataOp_REV32, rn, rd));
+ else
+ insn(dataProcessing1Source(Datasize_64, DataOp_REV64, rn, rd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void rev16(RegisterID rd, RegisterID rn)
+ {
+ CHECK_DATASIZE();
+ insn(dataProcessing1Source(DATASIZE, DataOp_REV16, rn, rd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void rev32(RegisterID rd, RegisterID rn)
+ {
+ ASSERT(datasize == 64); // 'rev32' only valid with 64-bit operands.
+ insn(dataProcessing1Source(Datasize_64, DataOp_REV32, rn, rd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void ror(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ rorv<datasize>(rd, rn, rm);
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void ror(RegisterID rd, RegisterID rs, int shift)
+ {
+ extr<datasize>(rd, rs, rs, shift);
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void rorv(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ CHECK_DATASIZE();
+ insn(dataProcessing2Source(DATASIZE, rm, DataOp_RORV, rn, rd));
+ }
+
+ template<int datasize, SetFlags setFlags = DontSetFlags>
+ ALWAYS_INLINE void sbc(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ CHECK_DATASIZE();
+ insn(addSubtractWithCarry(DATASIZE, AddOp_SUB, setFlags, rm, rn, rd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void sbfiz(RegisterID rd, RegisterID rn, int lsb, int width)
+ {
+ sbfm<datasize>(rd, rn, (datasize - lsb) & (datasize - 1), width - 1);
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void sbfm(RegisterID rd, RegisterID rn, int immr, int imms)
+ {
+ CHECK_DATASIZE();
+ insn(bitfield(DATASIZE, BitfieldOp_SBFM, immr, imms, rn, rd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void sbfx(RegisterID rd, RegisterID rn, int lsb, int width)
+ {
+ sbfm<datasize>(rd, rn, lsb, lsb + width - 1);
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void sdiv(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ CHECK_DATASIZE();
+ insn(dataProcessing2Source(DATASIZE, rm, DataOp_SDIV, rn, rd));
+ }
+
+ ALWAYS_INLINE void smaddl(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra)
+ {
+ nopCortexA53Fix835769<64>();
+ insn(dataProcessing3Source(Datasize_64, DataOp_SMADDL, rm, ra, rn, rd));
+ }
+
+ ALWAYS_INLINE void smnegl(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ smsubl(rd, rn, rm, ARM64Registers::zr);
+ }
+
+ ALWAYS_INLINE void smsubl(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra)
+ {
+ nopCortexA53Fix835769<64>();
+ insn(dataProcessing3Source(Datasize_64, DataOp_SMSUBL, rm, ra, rn, rd));
+ }
+
+ ALWAYS_INLINE void smulh(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ insn(dataProcessing3Source(Datasize_64, DataOp_SMULH, rm, ARM64Registers::zr, rn, rd));
+ }
+
+ ALWAYS_INLINE void smull(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ smaddl(rd, rn, rm, ARM64Registers::zr);
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void stp(RegisterID rt, RegisterID rt2, RegisterID rn, PairPostIndex simm)
+ {
+ CHECK_DATASIZE();
+ insn(loadStoreRegisterPairPostIndex(MEMPAIROPSIZE_INT(datasize), false, MemOp_STORE, simm, rn, rt, rt2));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void stp(RegisterID rt, RegisterID rt2, RegisterID rn, PairPreIndex simm)
+ {
+ CHECK_DATASIZE();
+ insn(loadStoreRegisterPairPreIndex(MEMPAIROPSIZE_INT(datasize), false, MemOp_STORE, simm, rn, rt, rt2));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, RegisterID rm)
+ {
+ str<datasize>(rt, rn, rm, UXTX, 0);
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
+ {
+ CHECK_DATASIZE();
+ insn(loadStoreRegisterRegisterOffset(MEMOPSIZE, false, MemOp_STORE, rm, extend, encodeShiftAmount<datasize>(amount), rn, rt));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, unsigned pimm)
+ {
+ CHECK_DATASIZE();
+ insn(loadStoreRegisterUnsignedImmediate(MEMOPSIZE, false, MemOp_STORE, encodePositiveImmediate<datasize>(pimm), rn, rt));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, PostIndex simm)
+ {
+ CHECK_DATASIZE();
+ insn(loadStoreRegisterPostIndex(MEMOPSIZE, false, MemOp_STORE, simm, rn, rt));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, PreIndex simm)
+ {
+ CHECK_DATASIZE();
+ insn(loadStoreRegisterPreIndex(MEMOPSIZE, false, MemOp_STORE, simm, rn, rt));
+ }
+
+ ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, RegisterID rm)
+ {
+ // Not calling the 5 argument form of strb, since is amount is ommitted S is false.
+ insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128, false, MemOp_STORE, rm, UXTX, false, rn, rt));
+ }
+
+ ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
+ {
+ ASSERT_UNUSED(amount, !amount);
+ insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128, false, MemOp_STORE, rm, extend, true, rn, rt));
+ }
+
+ ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, unsigned pimm)
+ {
+ insn(loadStoreRegisterUnsignedImmediate(MemOpSize_8_or_128, false, MemOp_STORE, encodePositiveImmediate<8>(pimm), rn, rt));
+ }
+
+ ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, PostIndex simm)
+ {
+ insn(loadStoreRegisterPostIndex(MemOpSize_8_or_128, false, MemOp_STORE, simm, rn, rt));
+ }
+
+ ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, PreIndex simm)
+ {
+ insn(loadStoreRegisterPreIndex(MemOpSize_8_or_128, false, MemOp_STORE, simm, rn, rt));
+ }
+
+ ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, RegisterID rm)
+ {
+ strh(rt, rn, rm, UXTX, 0);
+ }
+
+ ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
+ {
+ ASSERT(!amount || amount == 1);
+ insn(loadStoreRegisterRegisterOffset(MemOpSize_16, false, MemOp_STORE, rm, extend, amount == 1, rn, rt));
+ }
+
+ ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, unsigned pimm)
+ {
+ insn(loadStoreRegisterUnsignedImmediate(MemOpSize_16, false, MemOp_STORE, encodePositiveImmediate<16>(pimm), rn, rt));
+ }
+
+ ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, PostIndex simm)
+ {
+ insn(loadStoreRegisterPostIndex(MemOpSize_16, false, MemOp_STORE, simm, rn, rt));
+ }
+
+ ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, PreIndex simm)
+ {
+ insn(loadStoreRegisterPreIndex(MemOpSize_16, false, MemOp_STORE, simm, rn, rt));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void stur(RegisterID rt, RegisterID rn, int simm)
+ {
+ CHECK_DATASIZE();
+ insn(loadStoreRegisterUnscaledImmediate(MEMOPSIZE, false, MemOp_STORE, simm, rn, rt));
+ }
+
+ ALWAYS_INLINE void sturb(RegisterID rt, RegisterID rn, int simm)
+ {
+ insn(loadStoreRegisterUnscaledImmediate(MemOpSize_8_or_128, false, MemOp_STORE, simm, rn, rt));
+ }
+
+ ALWAYS_INLINE void sturh(RegisterID rt, RegisterID rn, int simm)
+ {
+ insn(loadStoreRegisterUnscaledImmediate(MemOpSize_16, false, MemOp_STORE, simm, rn, rt));
+ }
+
+ template<int datasize, SetFlags setFlags = DontSetFlags>
+ ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, UInt12 imm12, int shift = 0)
+ {
+ CHECK_DATASIZE();
+ ASSERT(!shift || shift == 12);
+ insn(addSubtractImmediate(DATASIZE, AddOp_SUB, setFlags, shift == 12, imm12, rn, rd));
+ }
+
+ template<int datasize, SetFlags setFlags = DontSetFlags>
+ ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ ASSERT_WITH_MESSAGE(!isSp(rd) || setFlags == DontSetFlags, "SUBS with shifted register does not support SP for Xd, it uses XZR for the register 31. SUBS with extended register support SP for Xd, but only if SetFlag is not used, otherwise register 31 is Xd.");
+ ASSERT_WITH_MESSAGE(!isSp(rm), "No encoding of SUBS supports SP for the third operand.");
+
+ if (isSp(rd) || isSp(rn))
+ sub<datasize, setFlags>(rd, rn, rm, UXTX, 0);
+ else
+ sub<datasize, setFlags>(rd, rn, rm, LSL, 0);
+ }
+
+ template<int datasize, SetFlags setFlags = DontSetFlags>
+ ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
+ {
+ CHECK_DATASIZE();
+ insn(addSubtractExtendedRegister(DATASIZE, AddOp_SUB, setFlags, rm, extend, amount, rn, rd));
+ }
+
+ template<int datasize, SetFlags setFlags = DontSetFlags>
+ ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount)
+ {
+ CHECK_DATASIZE();
+ ASSERT(!isSp(rd) && !isSp(rn) && !isSp(rm));
+ insn(addSubtractShiftedRegister(DATASIZE, AddOp_SUB, setFlags, shift, rm, amount, rn, rd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void sxtb(RegisterID rd, RegisterID rn)
+ {
+ sbfm<datasize>(rd, rn, 0, 7);
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void sxth(RegisterID rd, RegisterID rn)
+ {
+ sbfm<datasize>(rd, rn, 0, 15);
+ }
+
+ ALWAYS_INLINE void sxtw(RegisterID rd, RegisterID rn)
+ {
+ sbfm<64>(rd, rn, 0, 31);
+ }
+
+ ALWAYS_INLINE void tbz(RegisterID rt, int imm, int offset = 0)
+ {
+ ASSERT(!(offset & 3));
+ offset >>= 2;
+ insn(testAndBranchImmediate(false, imm, offset, rt));
+ }
+
+ ALWAYS_INLINE void tbnz(RegisterID rt, int imm, int offset = 0)
+ {
+ ASSERT(!(offset & 3));
+ offset >>= 2;
+ insn(testAndBranchImmediate(true, imm, offset, rt));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void tst(RegisterID rn, RegisterID rm)
+ {
+ and_<datasize, S>(ARM64Registers::zr, rn, rm);
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void tst(RegisterID rn, RegisterID rm, ShiftType shift, int amount)
+ {
+ and_<datasize, S>(ARM64Registers::zr, rn, rm, shift, amount);
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void tst(RegisterID rn, LogicalImmediate imm)
+ {
+ and_<datasize, S>(ARM64Registers::zr, rn, imm);
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void ubfiz(RegisterID rd, RegisterID rn, int lsb, int width)
+ {
+ ubfm<datasize>(rd, rn, (datasize - lsb) & (datasize - 1), width - 1);
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void ubfm(RegisterID rd, RegisterID rn, int immr, int imms)
+ {
+ CHECK_DATASIZE();
+ insn(bitfield(DATASIZE, BitfieldOp_UBFM, immr, imms, rn, rd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void ubfx(RegisterID rd, RegisterID rn, int lsb, int width)
+ {
+ ubfm<datasize>(rd, rn, lsb, lsb + width - 1);
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void udiv(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ CHECK_DATASIZE();
+ insn(dataProcessing2Source(DATASIZE, rm, DataOp_UDIV, rn, rd));
+ }
+
+ ALWAYS_INLINE void umaddl(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra)
+ {
+ nopCortexA53Fix835769<64>();
+ insn(dataProcessing3Source(Datasize_64, DataOp_UMADDL, rm, ra, rn, rd));
+ }
+
+ ALWAYS_INLINE void umnegl(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ umsubl(rd, rn, rm, ARM64Registers::zr);
+ }
+
+ ALWAYS_INLINE void umsubl(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra)
+ {
+ nopCortexA53Fix835769<64>();
+ insn(dataProcessing3Source(Datasize_64, DataOp_UMSUBL, rm, ra, rn, rd));
+ }
+
+ ALWAYS_INLINE void umulh(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ insn(dataProcessing3Source(Datasize_64, DataOp_UMULH, rm, ARM64Registers::zr, rn, rd));
+ }
+
+ ALWAYS_INLINE void umull(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ umaddl(rd, rn, rm, ARM64Registers::zr);
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void uxtb(RegisterID rd, RegisterID rn)
+ {
+ ubfm<datasize>(rd, rn, 0, 7);
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void uxth(RegisterID rd, RegisterID rn)
+ {
+ ubfm<datasize>(rd, rn, 0, 15);
+ }
+
+ ALWAYS_INLINE void uxtw(RegisterID rd, RegisterID rn)
+ {
+ ubfm<64>(rd, rn, 0, 31);
+ }
+
+ // Floating Point Instructions:
+
+ template<int datasize>
+ ALWAYS_INLINE void fabs(FPRegisterID vd, FPRegisterID vn)
+ {
+ CHECK_DATASIZE();
+ insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FABS, vn, vd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void fadd(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm)
+ {
+ CHECK_DATASIZE();
+ insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FADD, vn, vd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void fccmp(FPRegisterID vn, FPRegisterID vm, int nzcv, Condition cond)
+ {
+ CHECK_DATASIZE();
+ insn(floatingPointConditionalCompare(DATASIZE, vm, cond, vn, FPCondCmpOp_FCMP, nzcv));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void fccmpe(FPRegisterID vn, FPRegisterID vm, int nzcv, Condition cond)
+ {
+ CHECK_DATASIZE();
+ insn(floatingPointConditionalCompare(DATASIZE, vm, cond, vn, FPCondCmpOp_FCMPE, nzcv));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void fcmp(FPRegisterID vn, FPRegisterID vm)
+ {
+ CHECK_DATASIZE();
+ insn(floatingPointCompare(DATASIZE, vm, vn, FPCmpOp_FCMP));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void fcmp_0(FPRegisterID vn)
+ {
+ CHECK_DATASIZE();
+ insn(floatingPointCompare(DATASIZE, static_cast<FPRegisterID>(0), vn, FPCmpOp_FCMP0));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void fcmpe(FPRegisterID vn, FPRegisterID vm)
+ {
+ CHECK_DATASIZE();
+ insn(floatingPointCompare(DATASIZE, vm, vn, FPCmpOp_FCMPE));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void fcmpe_0(FPRegisterID vn)
+ {
+ CHECK_DATASIZE();
+ insn(floatingPointCompare(DATASIZE, static_cast<FPRegisterID>(0), vn, FPCmpOp_FCMPE0));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void fcsel(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm, Condition cond)
+ {
+ CHECK_DATASIZE();
+ insn(floatingPointConditionalSelect(DATASIZE, vm, cond, vn, vd));
+ }
+
+ template<int dstsize, int srcsize>
+ ALWAYS_INLINE void fcvt(FPRegisterID vd, FPRegisterID vn)
+ {
+ ASSERT(dstsize == 16 || dstsize == 32 || dstsize == 64);
+ ASSERT(srcsize == 16 || srcsize == 32 || srcsize == 64);
+ ASSERT(dstsize != srcsize);
+ Datasize type = (srcsize == 64) ? Datasize_64 : (srcsize == 32) ? Datasize_32 : Datasize_16;
+ FPDataOp1Source opcode = (dstsize == 64) ? FPDataOp_FCVT_toDouble : (dstsize == 32) ? FPDataOp_FCVT_toSingle : FPDataOp_FCVT_toHalf;
+ insn(floatingPointDataProcessing1Source(type, opcode, vn, vd));
+ }
+
+ template<int dstsize, int srcsize>
+ ALWAYS_INLINE void fcvtas(RegisterID rd, FPRegisterID vn)
+ {
+ CHECK_DATASIZE_OF(dstsize);
+ CHECK_DATASIZE_OF(srcsize);
+ insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTAS, vn, rd));
+ }
+
+ template<int dstsize, int srcsize>
+ ALWAYS_INLINE void fcvtau(RegisterID rd, FPRegisterID vn)
+ {
+ CHECK_DATASIZE_OF(dstsize);
+ CHECK_DATASIZE_OF(srcsize);
+ insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTAU, vn, rd));
+ }
+
+ template<int dstsize, int srcsize>
+ ALWAYS_INLINE void fcvtms(RegisterID rd, FPRegisterID vn)
+ {
+ CHECK_DATASIZE_OF(dstsize);
+ CHECK_DATASIZE_OF(srcsize);
+ insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTMS, vn, rd));
+ }
+
+ template<int dstsize, int srcsize>
+ ALWAYS_INLINE void fcvtmu(RegisterID rd, FPRegisterID vn)
+ {
+ CHECK_DATASIZE_OF(dstsize);
+ CHECK_DATASIZE_OF(srcsize);
+ insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTMU, vn, rd));
+ }
+
+ template<int dstsize, int srcsize>
+ ALWAYS_INLINE void fcvtns(RegisterID rd, FPRegisterID vn)
+ {
+ CHECK_DATASIZE_OF(dstsize);
+ CHECK_DATASIZE_OF(srcsize);
+ insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTNS, vn, rd));
+ }
+
+ template<int dstsize, int srcsize>
+ ALWAYS_INLINE void fcvtnu(RegisterID rd, FPRegisterID vn)
+ {
+ CHECK_DATASIZE_OF(dstsize);
+ CHECK_DATASIZE_OF(srcsize);
+ insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTNU, vn, rd));
+ }
+
+ template<int dstsize, int srcsize>
+ ALWAYS_INLINE void fcvtps(RegisterID rd, FPRegisterID vn)
+ {
+ CHECK_DATASIZE_OF(dstsize);
+ CHECK_DATASIZE_OF(srcsize);
+ insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTPS, vn, rd));
+ }
+
+ template<int dstsize, int srcsize>
+ ALWAYS_INLINE void fcvtpu(RegisterID rd, FPRegisterID vn)
+ {
+ CHECK_DATASIZE_OF(dstsize);
+ CHECK_DATASIZE_OF(srcsize);
+ insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTPU, vn, rd));
+ }
+
+ template<int dstsize, int srcsize>
+ ALWAYS_INLINE void fcvtzs(RegisterID rd, FPRegisterID vn)
+ {
+ CHECK_DATASIZE_OF(dstsize);
+ CHECK_DATASIZE_OF(srcsize);
+ insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTZS, vn, rd));
+ }
+
+ template<int dstsize, int srcsize>
+ ALWAYS_INLINE void fcvtzu(RegisterID rd, FPRegisterID vn)
+ {
+ CHECK_DATASIZE_OF(dstsize);
+ CHECK_DATASIZE_OF(srcsize);
+ insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTZU, vn, rd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void fdiv(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm)
+ {
+ CHECK_DATASIZE();
+ insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FDIV, vn, vd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void fmadd(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm, FPRegisterID va)
+ {
+ CHECK_DATASIZE();
+ insn(floatingPointDataProcessing3Source(DATASIZE, false, vm, AddOp_ADD, va, vn, vd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void fmax(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm)
+ {
+ CHECK_DATASIZE();
+ insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FMAX, vn, vd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void fmaxnm(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm)
+ {
+ CHECK_DATASIZE();
+ insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FMAXNM, vn, vd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void fmin(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm)
+ {
+ CHECK_DATASIZE();
+ insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FMIN, vn, vd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void fminnm(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm)
+ {
+ CHECK_DATASIZE();
+ insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FMINNM, vn, vd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void fmov(FPRegisterID vd, FPRegisterID vn)
+ {
+ CHECK_DATASIZE();
+ insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FMOV, vn, vd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void fmov(FPRegisterID vd, RegisterID rn)
+ {
+ CHECK_DATASIZE();
+ insn(floatingPointIntegerConversions(DATASIZE, DATASIZE, FPIntConvOp_FMOV_XtoQ, rn, vd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void fmov(RegisterID rd, FPRegisterID vn)
+ {
+ CHECK_DATASIZE();
+ insn(floatingPointIntegerConversions(DATASIZE, DATASIZE, FPIntConvOp_FMOV_QtoX, vn, rd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void fmov(FPRegisterID vd, double imm)
+ {
+ CHECK_DATASIZE();
+ insn(floatingPointImmediate(DATASIZE, encodeFPImm(imm), vd));
+ }
+
+ ALWAYS_INLINE void fmov_top(FPRegisterID vd, RegisterID rn)
+ {
+ insn(floatingPointIntegerConversions(Datasize_64, Datasize_64, FPIntConvOp_FMOV_XtoQ_top, rn, vd));
+ }
+
+ ALWAYS_INLINE void fmov_top(RegisterID rd, FPRegisterID vn)
+ {
+ insn(floatingPointIntegerConversions(Datasize_64, Datasize_64, FPIntConvOp_FMOV_QtoX_top, vn, rd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void fmsub(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm, FPRegisterID va)
+ {
+ CHECK_DATASIZE();
+ insn(floatingPointDataProcessing3Source(DATASIZE, false, vm, AddOp_SUB, va, vn, vd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void fmul(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm)
+ {
+ CHECK_DATASIZE();
+ insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FMUL, vn, vd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void fneg(FPRegisterID vd, FPRegisterID vn)
+ {
+ CHECK_DATASIZE();
+ insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FNEG, vn, vd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void fnmadd(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm, FPRegisterID va)
+ {
+ CHECK_DATASIZE();
+ insn(floatingPointDataProcessing3Source(DATASIZE, true, vm, AddOp_ADD, va, vn, vd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void fnmsub(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm, FPRegisterID va)
+ {
+ CHECK_DATASIZE();
+ insn(floatingPointDataProcessing3Source(DATASIZE, true, vm, AddOp_SUB, va, vn, vd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void fnmul(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm)
+ {
+ CHECK_DATASIZE();
+ insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FNMUL, vn, vd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void frinta(FPRegisterID vd, FPRegisterID vn)
+ {
+ CHECK_DATASIZE();
+ insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTA, vn, vd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void frinti(FPRegisterID vd, FPRegisterID vn)
+ {
+ CHECK_DATASIZE();
+ insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTI, vn, vd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void frintm(FPRegisterID vd, FPRegisterID vn)
+ {
+ CHECK_DATASIZE();
+ insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTM, vn, vd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void frintn(FPRegisterID vd, FPRegisterID vn)
+ {
+ CHECK_DATASIZE();
+ insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTN, vn, vd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void frintp(FPRegisterID vd, FPRegisterID vn)
+ {
+ CHECK_DATASIZE();
+ insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTP, vn, vd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void frintx(FPRegisterID vd, FPRegisterID vn)
+ {
+ CHECK_DATASIZE();
+ insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTX, vn, vd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void frintz(FPRegisterID vd, FPRegisterID vn)
+ {
+ CHECK_DATASIZE();
+ insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTZ, vn, vd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void fsqrt(FPRegisterID vd, FPRegisterID vn)
+ {
+ CHECK_DATASIZE();
+ insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FSQRT, vn, vd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void fsub(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm)
+ {
+ CHECK_DATASIZE();
+ insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FSUB, vn, vd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void ldr(FPRegisterID rt, RegisterID rn, RegisterID rm)
+ {
+ ldr<datasize>(rt, rn, rm, UXTX, 0);
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void ldr(FPRegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
+ {
+ CHECK_FP_MEMOP_DATASIZE();
+ insn(loadStoreRegisterRegisterOffset(MEMOPSIZE, true, datasize == 128 ? MemOp_LOAD_V128 : MemOp_LOAD, rm, extend, encodeShiftAmount<datasize>(amount), rn, rt));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void ldr(FPRegisterID rt, RegisterID rn, unsigned pimm)
+ {
+ CHECK_FP_MEMOP_DATASIZE();
+ insn(loadStoreRegisterUnsignedImmediate(MEMOPSIZE, true, datasize == 128 ? MemOp_LOAD_V128 : MemOp_LOAD, encodePositiveImmediate<datasize>(pimm), rn, rt));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void ldr(FPRegisterID rt, RegisterID rn, PostIndex simm)
+ {
+ CHECK_FP_MEMOP_DATASIZE();
+ insn(loadStoreRegisterPostIndex(MEMOPSIZE, true, datasize == 128 ? MemOp_LOAD_V128 : MemOp_LOAD, simm, rn, rt));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void ldr(FPRegisterID rt, RegisterID rn, PreIndex simm)
+ {
+ CHECK_FP_MEMOP_DATASIZE();
+ insn(loadStoreRegisterPreIndex(MEMOPSIZE, true, datasize == 128 ? MemOp_LOAD_V128 : MemOp_LOAD, simm, rn, rt));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void ldr_literal(FPRegisterID rt, int offset = 0)
+ {
+ CHECK_FP_MEMOP_DATASIZE();
+ ASSERT(datasize >= 32);
+ ASSERT(!(offset & 3));
+ insn(loadRegisterLiteral(datasize == 128 ? LdrLiteralOp_128BIT : datasize == 64 ? LdrLiteralOp_64BIT : LdrLiteralOp_32BIT, true, offset >> 2, rt));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void ldur(FPRegisterID rt, RegisterID rn, int simm)
+ {
+ CHECK_FP_MEMOP_DATASIZE();
+ insn(loadStoreRegisterUnscaledImmediate(MEMOPSIZE, true, datasize == 128 ? MemOp_LOAD_V128 : MemOp_LOAD, simm, rn, rt));
+ }
+
+ template<int dstsize, int srcsize>
+ ALWAYS_INLINE void scvtf(FPRegisterID vd, RegisterID rn)
+ {
+ CHECK_DATASIZE_OF(dstsize);
+ CHECK_DATASIZE_OF(srcsize);
+ insn(floatingPointIntegerConversions(DATASIZE_OF(srcsize), DATASIZE_OF(dstsize), FPIntConvOp_SCVTF, rn, vd));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void str(FPRegisterID rt, RegisterID rn, RegisterID rm)
+ {
+ str<datasize>(rt, rn, rm, UXTX, 0);
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void str(FPRegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
+ {
+ CHECK_FP_MEMOP_DATASIZE();
+ insn(loadStoreRegisterRegisterOffset(MEMOPSIZE, true, datasize == 128 ? MemOp_STORE_V128 : MemOp_STORE, rm, extend, encodeShiftAmount<datasize>(amount), rn, rt));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void str(FPRegisterID rt, RegisterID rn, unsigned pimm)
+ {
+ CHECK_FP_MEMOP_DATASIZE();
+ insn(loadStoreRegisterUnsignedImmediate(MEMOPSIZE, true, datasize == 128 ? MemOp_STORE_V128 : MemOp_STORE, encodePositiveImmediate<datasize>(pimm), rn, rt));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void str(FPRegisterID rt, RegisterID rn, PostIndex simm)
+ {
+ CHECK_FP_MEMOP_DATASIZE();
+ insn(loadStoreRegisterPostIndex(MEMOPSIZE, true, datasize == 128 ? MemOp_STORE_V128 : MemOp_STORE, simm, rn, rt));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void str(FPRegisterID rt, RegisterID rn, PreIndex simm)
+ {
+ CHECK_FP_MEMOP_DATASIZE();
+ insn(loadStoreRegisterPreIndex(MEMOPSIZE, true, datasize == 128 ? MemOp_STORE_V128 : MemOp_STORE, simm, rn, rt));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void stur(FPRegisterID rt, RegisterID rn, int simm)
+ {
+ CHECK_DATASIZE();
+ insn(loadStoreRegisterUnscaledImmediate(MEMOPSIZE, true, datasize == 128 ? MemOp_STORE_V128 : MemOp_STORE, simm, rn, rt));
+ }
+
+ template<int dstsize, int srcsize>
+ ALWAYS_INLINE void ucvtf(FPRegisterID vd, RegisterID rn)
+ {
+ CHECK_DATASIZE_OF(dstsize);
+ CHECK_DATASIZE_OF(srcsize);
+ insn(floatingPointIntegerConversions(DATASIZE_OF(srcsize), DATASIZE_OF(dstsize), FPIntConvOp_UCVTF, rn, vd));
+ }
+
+ // Admin methods:
+
+ AssemblerLabel labelIgnoringWatchpoints()
+ {
+ return m_buffer.label();
+ }
+
+ AssemblerLabel labelForWatchpoint()
+ {
+ AssemblerLabel result = m_buffer.label();
+ if (static_cast<int>(result.m_offset) != m_indexOfLastWatchpoint)
+ result = label();
+ m_indexOfLastWatchpoint = result.m_offset;
+ m_indexOfTailOfLastWatchpoint = result.m_offset + maxJumpReplacementSize();
+ return result;
+ }
+
+ AssemblerLabel label()
+ {
+ AssemblerLabel result = m_buffer.label();
+ while (UNLIKELY(static_cast<int>(result.m_offset) < m_indexOfTailOfLastWatchpoint)) {
+ nop();
+ result = m_buffer.label();
+ }
+ return result;
+ }
+
+ AssemblerLabel align(int alignment)
+ {
+ ASSERT(!(alignment & 3));
+ while (!m_buffer.isAligned(alignment))
+ brk(0);
+ return label();
+ }
+
+ static void* getRelocatedAddress(void* code, AssemblerLabel label)
+ {
+ ASSERT(label.isSet());
+ return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + label.m_offset);
+ }
+
+ static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b)
+ {
+ return b.m_offset - a.m_offset;
+ }
+
+ void* unlinkedCode() { return m_buffer.data(); }
+ size_t codeSize() const { return m_buffer.codeSize(); }
+
+ static unsigned getCallReturnOffset(AssemblerLabel call)
+ {
+ ASSERT(call.isSet());
+ return call.m_offset;
+ }
+
+ // Linking & patching:
+ //
+ // 'link' and 'patch' methods are for use on unprotected code - such as the code
+ // within the AssemblerBuffer, and code being patched by the patch buffer. Once
+ // code has been finalized it is (platform support permitting) within a non-
+ // writable region of memory; to modify the code in an execute-only execuable
+ // pool the 'repatch' and 'relink' methods should be used.
+
+ void linkJump(AssemblerLabel from, AssemblerLabel to, JumpType type, Condition condition)
+ {
+ ASSERT(to.isSet());
+ ASSERT(from.isSet());
+ m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset, type, condition));
+ }
+
+ void linkJump(AssemblerLabel from, AssemblerLabel to, JumpType type, Condition condition, bool is64Bit, RegisterID compareRegister)
+ {
+ ASSERT(to.isSet());
+ ASSERT(from.isSet());
+ m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset, type, condition, is64Bit, compareRegister));
+ }
+
+ void linkJump(AssemblerLabel from, AssemblerLabel to, JumpType type, Condition condition, unsigned bitNumber, RegisterID compareRegister)
+ {
+ ASSERT(to.isSet());
+ ASSERT(from.isSet());
+ m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset, type, condition, bitNumber, compareRegister));
+ }
+
+ void linkJump(AssemblerLabel from, AssemblerLabel to)
+ {
+ ASSERT(from.isSet());
+ ASSERT(to.isSet());
+ relinkJumpOrCall<false>(addressOf(from), addressOf(to));
+ }
+
+ static void linkJump(void* code, AssemblerLabel from, void* to)
+ {
+ ASSERT(from.isSet());
+ relinkJumpOrCall<false>(addressOf(code, from), to);
+ }
+
+ static void linkCall(void* code, AssemblerLabel from, void* to)
+ {
+ ASSERT(from.isSet());
+ linkJumpOrCall<true>(addressOf(code, from) - 1, to);
+ }
+
+ static void linkPointer(void* code, AssemblerLabel where, void* valuePtr)
+ {
+ linkPointer(addressOf(code, where), valuePtr);
+ }
+
+ static void replaceWithJump(void* where, void* to)
+ {
+ intptr_t offset = (reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(where)) >> 2;
+ ASSERT(static_cast<int>(offset) == offset);
+ *static_cast<int*>(where) = unconditionalBranchImmediate(false, static_cast<int>(offset));
+ cacheFlush(where, sizeof(int));
+ }
+
+ static ptrdiff_t maxJumpReplacementSize()
+ {
+ return 4;
+ }
+
+ static void replaceWithLoad(void* where)
+ {
+ Datasize sf;
+ AddOp op;
+ SetFlags S;
+ int shift;
+ int imm12;
+ RegisterID rn;
+ RegisterID rd;
+ if (disassembleAddSubtractImmediate(where, sf, op, S, shift, imm12, rn, rd)) {
+ ASSERT(sf == Datasize_64);
+ ASSERT(op == AddOp_ADD);
+ ASSERT(!S);
+ ASSERT(!shift);
+ ASSERT(!(imm12 & ~0xff8));
+ *static_cast<int*>(where) = loadStoreRegisterUnsignedImmediate(MemOpSize_64, false, MemOp_LOAD, encodePositiveImmediate<64>(imm12), rn, rd);
+ cacheFlush(where, sizeof(int));
+ }
+#if !ASSERT_DISABLED
+ else {
+ MemOpSize size;
+ bool V;
+ MemOp opc;
+ int imm12;
+ RegisterID rn;
+ RegisterID rt;
+ ASSERT(disassembleLoadStoreRegisterUnsignedImmediate(where, size, V, opc, imm12, rn, rt));
+ ASSERT(size == MemOpSize_64);
+ ASSERT(!V);
+ ASSERT(opc == MemOp_LOAD);
+ ASSERT(!(imm12 & ~0x1ff));
+ }
+#endif
+ }
+
+ static void replaceWithAddressComputation(void* where)
+ {
+ MemOpSize size;
+ bool V;
+ MemOp opc;
+ int imm12;
+ RegisterID rn;
+ RegisterID rt;
+ if (disassembleLoadStoreRegisterUnsignedImmediate(where, size, V, opc, imm12, rn, rt)) {
+ ASSERT(size == MemOpSize_64);
+ ASSERT(!V);
+ ASSERT(opc == MemOp_LOAD);
+ ASSERT(!(imm12 & ~0x1ff));
+ *static_cast<int*>(where) = addSubtractImmediate(Datasize_64, AddOp_ADD, DontSetFlags, 0, imm12 * sizeof(void*), rn, rt);
+ cacheFlush(where, sizeof(int));
+ }
+#if !ASSERT_DISABLED
+ else {
+ Datasize sf;
+ AddOp op;
+ SetFlags S;
+ int shift;
+ int imm12;
+ RegisterID rn;
+ RegisterID rd;
+ ASSERT(disassembleAddSubtractImmediate(where, sf, op, S, shift, imm12, rn, rd));
+ ASSERT(sf == Datasize_64);
+ ASSERT(op == AddOp_ADD);
+ ASSERT(!S);
+ ASSERT(!shift);
+ ASSERT(!(imm12 & ~0xff8));
+ }
+#endif
+ }
+
+ static void repatchPointer(void* where, void* valuePtr)
+ {
+ linkPointer(static_cast<int*>(where), valuePtr, true);
+ }
+
+ static void setPointer(int* address, void* valuePtr, RegisterID rd, bool flush)
+ {
+ uintptr_t value = reinterpret_cast<uintptr_t>(valuePtr);
+ address[0] = moveWideImediate(Datasize_64, MoveWideOp_Z, 0, getHalfword(value, 0), rd);
+ address[1] = moveWideImediate(Datasize_64, MoveWideOp_K, 1, getHalfword(value, 1), rd);
+ address[2] = moveWideImediate(Datasize_64, MoveWideOp_K, 2, getHalfword(value, 2), rd);
+
+ if (flush)
+ cacheFlush(address, sizeof(int) * 3);
+ }
+
+ static void repatchInt32(void* where, int32_t value)
+ {
+ int* address = static_cast<int*>(where);
+
+ Datasize sf;
+ MoveWideOp opc;
+ int hw;
+ uint16_t imm16;
+ RegisterID rd;
+ bool expected = disassembleMoveWideImediate(address, sf, opc, hw, imm16, rd);
+ ASSERT_UNUSED(expected, expected && !sf && (opc == MoveWideOp_Z || opc == MoveWideOp_N) && !hw);
+ ASSERT(checkMovk<Datasize_32>(address[1], 1, rd));
+
+ if (value >= 0) {
+ address[0] = moveWideImediate(Datasize_32, MoveWideOp_Z, 0, getHalfword(value, 0), rd);
+ address[1] = moveWideImediate(Datasize_32, MoveWideOp_K, 1, getHalfword(value, 1), rd);
+ } else {
+ address[0] = moveWideImediate(Datasize_32, MoveWideOp_N, 0, ~getHalfword(value, 0), rd);
+ address[1] = moveWideImediate(Datasize_32, MoveWideOp_K, 1, getHalfword(value, 1), rd);
+ }
+
+ cacheFlush(where, sizeof(int) * 2);
+ }
+
+ static void* readPointer(void* where)
+ {
+ int* address = static_cast<int*>(where);
+
+ Datasize sf;
+ MoveWideOp opc;
+ int hw;
+ uint16_t imm16;
+ RegisterID rdFirst, rd;
+
+ bool expected = disassembleMoveWideImediate(address, sf, opc, hw, imm16, rdFirst);
+ ASSERT_UNUSED(expected, expected && sf && opc == MoveWideOp_Z && !hw);
+ uintptr_t result = imm16;
+
+ expected = disassembleMoveWideImediate(address + 1, sf, opc, hw, imm16, rd);
+ ASSERT_UNUSED(expected, expected && sf && opc == MoveWideOp_K && hw == 1 && rd == rdFirst);
+ result |= static_cast<uintptr_t>(imm16) << 16;
+
+ expected = disassembleMoveWideImediate(address + 2, sf, opc, hw, imm16, rd);
+ ASSERT_UNUSED(expected, expected && sf && opc == MoveWideOp_K && hw == 2 && rd == rdFirst);
+ result |= static_cast<uintptr_t>(imm16) << 32;
+
+ return reinterpret_cast<void*>(result);
+ }
+
+ static void* readCallTarget(void* from)
+ {
+ return readPointer(reinterpret_cast<int*>(from) - 4);
+ }
+
+ static void relinkJump(void* from, void* to)
+ {
+ relinkJumpOrCall<false>(reinterpret_cast<int*>(from), to);
+ cacheFlush(from, sizeof(int));
+ }
+
+ static void relinkCall(void* from, void* to)
+ {
+ relinkJumpOrCall<true>(reinterpret_cast<int*>(from) - 1, to);
+ cacheFlush(reinterpret_cast<int*>(from) - 1, sizeof(int));
+ }
+
+ static void repatchCompact(void* where, int32_t value)
+ {
+ ASSERT(!(value & ~0x3ff8));
+
+ MemOpSize size;
+ bool V;
+ MemOp opc;
+ int imm12;
+ RegisterID rn;
+ RegisterID rt;
+ bool expected = disassembleLoadStoreRegisterUnsignedImmediate(where, size, V, opc, imm12, rn, rt);
+ ASSERT_UNUSED(expected, expected && size >= MemOpSize_32 && !V && opc == MemOp_LOAD); // expect 32/64 bit load to GPR.
+
+ if (size == MemOpSize_32)
+ imm12 = encodePositiveImmediate<32>(value);
+ else
+ imm12 = encodePositiveImmediate<64>(value);
+ *static_cast<int*>(where) = loadStoreRegisterUnsignedImmediate(size, V, opc, imm12, rn, rt);
+
+ cacheFlush(where, sizeof(int));
+ }
+
+ unsigned debugOffset() { return m_buffer.debugOffset(); }
+
+#if OS(LINUX) && COMPILER(GCC_OR_CLANG)
+ static inline void linuxPageFlush(uintptr_t begin, uintptr_t end)
+ {
+ __builtin___clear_cache(reinterpret_cast<void*>(begin), reinterpret_cast<void*>(end));
+ }
+#endif
+
+ static void cacheFlush(void* code, size_t size)
+ {
+#if OS(IOS)
+ sys_cache_control(kCacheFunctionPrepareForExecution, code, size);
+#elif OS(LINUX)
+ size_t page = pageSize();
+ uintptr_t current = reinterpret_cast<uintptr_t>(code);
+ uintptr_t end = current + size;
+ uintptr_t firstPageEnd = (current & ~(page - 1)) + page;
+
+ if (end <= firstPageEnd) {
+ linuxPageFlush(current, end);
+ return;
+ }
+
+ linuxPageFlush(current, firstPageEnd);
+
+ for (current = firstPageEnd; current + page < end; current += page)
+ linuxPageFlush(current, current + page);
+
+ linuxPageFlush(current, end);
+#else
+#error "The cacheFlush support is missing on this platform."
+#endif
+ }
+
+ // Assembler admin methods:
+
+ static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return JUMP_ENUM_SIZE(jumpType) - JUMP_ENUM_SIZE(jumpLinkType); }
+
+ static ALWAYS_INLINE bool linkRecordSourceComparator(const LinkRecord& a, const LinkRecord& b)
+ {
+ return a.from() < b.from();
+ }
+
+ static bool canCompact(JumpType jumpType)
+ {
+ // Fixed jumps cannot be compacted
+ return (jumpType == JumpNoCondition) || (jumpType == JumpCondition) || (jumpType == JumpCompareAndBranch) || (jumpType == JumpTestBit);
+ }
+
+ static JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to)
+ {
+ switch (jumpType) {
+ case JumpFixed:
+ return LinkInvalid;
+ case JumpNoConditionFixedSize:
+ return LinkJumpNoCondition;
+ case JumpConditionFixedSize:
+ return LinkJumpCondition;
+ case JumpCompareAndBranchFixedSize:
+ return LinkJumpCompareAndBranch;
+ case JumpTestBitFixedSize:
+ return LinkJumpTestBit;
+ case JumpNoCondition:
+ return LinkJumpNoCondition;
+ case JumpCondition: {
+ ASSERT(!(reinterpret_cast<intptr_t>(from) & 0x3));
+ ASSERT(!(reinterpret_cast<intptr_t>(to) & 0x3));
+ intptr_t relative = reinterpret_cast<intptr_t>(to) - (reinterpret_cast<intptr_t>(from));
+
+ if (((relative << 43) >> 43) == relative)
+ return LinkJumpConditionDirect;
+
+ return LinkJumpCondition;
+ }
+ case JumpCompareAndBranch: {
+ ASSERT(!(reinterpret_cast<intptr_t>(from) & 0x3));
+ ASSERT(!(reinterpret_cast<intptr_t>(to) & 0x3));
+ intptr_t relative = reinterpret_cast<intptr_t>(to) - (reinterpret_cast<intptr_t>(from));
+
+ if (((relative << 43) >> 43) == relative)
+ return LinkJumpCompareAndBranchDirect;
+
+ return LinkJumpCompareAndBranch;
+ }
+ case JumpTestBit: {
+ ASSERT(!(reinterpret_cast<intptr_t>(from) & 0x3));
+ ASSERT(!(reinterpret_cast<intptr_t>(to) & 0x3));
+ intptr_t relative = reinterpret_cast<intptr_t>(to) - (reinterpret_cast<intptr_t>(from));
+
+ if (((relative << 50) >> 50) == relative)
+ return LinkJumpTestBitDirect;
+
+ return LinkJumpTestBit;
+ }
+ default:
+ ASSERT_NOT_REACHED();
+ }
+
+ return LinkJumpNoCondition;
+ }
+
+ static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to)
+ {
+ JumpLinkType linkType = computeJumpType(record.type(), from, to);
+ record.setLinkType(linkType);
+ return linkType;
+ }
+
+ Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink()
+ {
+ std::sort(m_jumpsToLink.begin(), m_jumpsToLink.end(), linkRecordSourceComparator);
+ return m_jumpsToLink;
+ }
+
+ static void ALWAYS_INLINE link(LinkRecord& record, uint8_t* from, uint8_t* to)
+ {
+ switch (record.linkType()) {
+ case LinkJumpNoCondition:
+ linkJumpOrCall<false>(reinterpret_cast<int*>(from), to);
+ break;
+ case LinkJumpConditionDirect:
+ linkConditionalBranch<true>(record.condition(), reinterpret_cast<int*>(from), to);
+ break;
+ case LinkJumpCondition:
+ linkConditionalBranch<false>(record.condition(), reinterpret_cast<int*>(from) - 1, to);
+ break;
+ case LinkJumpCompareAndBranchDirect:
+ linkCompareAndBranch<true>(record.condition(), record.is64Bit(), record.compareRegister(), reinterpret_cast<int*>(from), to);
+ break;
+ case LinkJumpCompareAndBranch:
+ linkCompareAndBranch<false>(record.condition(), record.is64Bit(), record.compareRegister(), reinterpret_cast<int*>(from) - 1, to);
+ break;
+ case LinkJumpTestBitDirect:
+ linkTestAndBranch<true>(record.condition(), record.bitNumber(), record.compareRegister(), reinterpret_cast<int*>(from), to);
+ break;
+ case LinkJumpTestBit:
+ linkTestAndBranch<false>(record.condition(), record.bitNumber(), record.compareRegister(), reinterpret_cast<int*>(from) - 1, to);
+ break;
+ default:
+ ASSERT_NOT_REACHED();
+ break;
+ }
+ }
+
+private:
+ template<Datasize size>
+ static bool checkMovk(int insn, int _hw, RegisterID _rd)
+ {
+ Datasize sf;
+ MoveWideOp opc;
+ int hw;
+ uint16_t imm16;
+ RegisterID rd;
+ bool expected = disassembleMoveWideImediate(&insn, sf, opc, hw, imm16, rd);
+
+ return expected
+ && sf == size
+ && opc == MoveWideOp_K
+ && hw == _hw
+ && rd == _rd;
+ }
+
+ static void linkPointer(int* address, void* valuePtr, bool flush = false)
+ {
+ Datasize sf;
+ MoveWideOp opc;
+ int hw;
+ uint16_t imm16;
+ RegisterID rd;
+ bool expected = disassembleMoveWideImediate(address, sf, opc, hw, imm16, rd);
+ ASSERT_UNUSED(expected, expected && sf && opc == MoveWideOp_Z && !hw);
+ ASSERT(checkMovk<Datasize_64>(address[1], 1, rd));
+ ASSERT(checkMovk<Datasize_64>(address[2], 2, rd));
+
+ setPointer(address, valuePtr, rd, flush);
+ }
+
+ template<bool isCall>
+ static void linkJumpOrCall(int* from, void* to)
+ {
+ bool link;
+ int imm26;
+ bool isUnconditionalBranchImmediateOrNop = disassembleUnconditionalBranchImmediate(from, link, imm26) || disassembleNop(from);
+
+ ASSERT_UNUSED(isUnconditionalBranchImmediateOrNop, isUnconditionalBranchImmediateOrNop);
+ ASSERT_UNUSED(isCall, (link == isCall) || disassembleNop(from));
+ ASSERT(!(reinterpret_cast<intptr_t>(from) & 3));
+ ASSERT(!(reinterpret_cast<intptr_t>(to) & 3));
+ intptr_t offset = (reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from)) >> 2;
+ ASSERT(static_cast<int>(offset) == offset);
+
+ *from = unconditionalBranchImmediate(isCall, static_cast<int>(offset));
+ }
+
+ template<bool isDirect>
+ static void linkCompareAndBranch(Condition condition, bool is64Bit, RegisterID rt, int* from, void* to)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(from) & 3));
+ ASSERT(!(reinterpret_cast<intptr_t>(to) & 3));
+ intptr_t offset = (reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from)) >> 2;
+ ASSERT(((offset << 38) >> 38) == offset);
+
+ bool useDirect = ((offset << 45) >> 45) == offset; // Fits in 19 bits
+ ASSERT(!isDirect || useDirect);
+
+ if (useDirect || isDirect) {
+ *from = compareAndBranchImmediate(is64Bit ? Datasize_64 : Datasize_32, condition == ConditionNE, static_cast<int>(offset), rt);
+ if (!isDirect)
+ *(from + 1) = nopPseudo();
+ } else {
+ *from = compareAndBranchImmediate(is64Bit ? Datasize_64 : Datasize_32, invert(condition) == ConditionNE, 2, rt);
+ linkJumpOrCall<false>(from + 1, to);
+ }
+ }
+
+ template<bool isDirect>
+ static void linkConditionalBranch(Condition condition, int* from, void* to)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(from) & 3));
+ ASSERT(!(reinterpret_cast<intptr_t>(to) & 3));
+ intptr_t offset = (reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from)) >> 2;
+ ASSERT(((offset << 38) >> 38) == offset);
+
+ bool useDirect = ((offset << 45) >> 45) == offset; // Fits in 19 bits
+ ASSERT(!isDirect || useDirect);
+
+ if (useDirect || isDirect) {
+ *from = conditionalBranchImmediate(static_cast<int>(offset), condition);
+ if (!isDirect)
+ *(from + 1) = nopPseudo();
+ } else {
+ *from = conditionalBranchImmediate(2, invert(condition));
+ linkJumpOrCall<false>(from + 1, to);
+ }
+ }
+
+ template<bool isDirect>
+ static void linkTestAndBranch(Condition condition, unsigned bitNumber, RegisterID rt, int* from, void* to)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(from) & 3));
+ ASSERT(!(reinterpret_cast<intptr_t>(to) & 3));
+ intptr_t offset = (reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from)) >> 2;
+ ASSERT(static_cast<int>(offset) == offset);
+ ASSERT(((offset << 38) >> 38) == offset);
+
+ bool useDirect = ((offset << 50) >> 50) == offset; // Fits in 14 bits
+ ASSERT(!isDirect || useDirect);
+
+ if (useDirect || isDirect) {
+ *from = testAndBranchImmediate(condition == ConditionNE, static_cast<int>(bitNumber), static_cast<int>(offset), rt);
+ if (!isDirect)
+ *(from + 1) = nopPseudo();
+ } else {
+ *from = testAndBranchImmediate(invert(condition) == ConditionNE, static_cast<int>(bitNumber), 2, rt);
+ linkJumpOrCall<false>(from + 1, to);
+ }
+ }
+
+ template<bool isCall>
+ static void relinkJumpOrCall(int* from, void* to)
+ {
+ if (!isCall && disassembleNop(from)) {
+ unsigned op01;
+ int imm19;
+ Condition condition;
+ bool isConditionalBranchImmediate = disassembleConditionalBranchImmediate(from - 1, op01, imm19, condition);
+
+ if (isConditionalBranchImmediate) {
+ ASSERT_UNUSED(op01, !op01);
+ ASSERT_UNUSED(isCall, !isCall);
+
+ if (imm19 == 8)
+ condition = invert(condition);
+
+ linkConditionalBranch<false>(condition, from - 1, to);
+ return;
+ }
+
+ Datasize opSize;
+ bool op;
+ RegisterID rt;
+ bool isCompareAndBranchImmediate = disassembleCompareAndBranchImmediate(from - 1, opSize, op, imm19, rt);
+
+ if (isCompareAndBranchImmediate) {
+ if (imm19 == 8)
+ op = !op;
+
+ linkCompareAndBranch<false>(op ? ConditionNE : ConditionEQ, opSize == Datasize_64, rt, from - 1, to);
+ return;
+ }
+
+ int imm14;
+ unsigned bitNumber;
+ bool isTestAndBranchImmediate = disassembleTestAndBranchImmediate(from - 1, op, bitNumber, imm14, rt);
+
+ if (isTestAndBranchImmediate) {
+ if (imm14 == 8)
+ op = !op;
+
+ linkTestAndBranch<false>(op ? ConditionNE : ConditionEQ, bitNumber, rt, from - 1, to);
+ return;
+ }
+ }
+
+ linkJumpOrCall<isCall>(from, to);
+ }
+
+ static int* addressOf(void* code, AssemblerLabel label)
+ {
+ return reinterpret_cast<int*>(static_cast<char*>(code) + label.m_offset);
+ }
+
+ int* addressOf(AssemblerLabel label)
+ {
+ return addressOf(m_buffer.data(), label);
+ }
+
+ static RegisterID disassembleXOrSp(int reg) { return reg == 31 ? ARM64Registers::sp : static_cast<RegisterID>(reg); }
+ static RegisterID disassembleXOrZr(int reg) { return reg == 31 ? ARM64Registers::zr : static_cast<RegisterID>(reg); }
+ static RegisterID disassembleXOrZrOrSp(bool useZr, int reg) { return reg == 31 ? (useZr ? ARM64Registers::zr : ARM64Registers::sp) : static_cast<RegisterID>(reg); }
+
+ static bool disassembleAddSubtractImmediate(void* address, Datasize& sf, AddOp& op, SetFlags& S, int& shift, int& imm12, RegisterID& rn, RegisterID& rd)
+ {
+ int insn = *static_cast<int*>(address);
+ sf = static_cast<Datasize>((insn >> 31) & 1);
+ op = static_cast<AddOp>((insn >> 30) & 1);
+ S = static_cast<SetFlags>((insn >> 29) & 1);
+ shift = (insn >> 22) & 3;
+ imm12 = (insn >> 10) & 0x3ff;
+ rn = disassembleXOrSp((insn >> 5) & 0x1f);
+ rd = disassembleXOrZrOrSp(S, insn & 0x1f);
+ return (insn & 0x1f000000) == 0x11000000;
+ }
+
+ static bool disassembleLoadStoreRegisterUnsignedImmediate(void* address, MemOpSize& size, bool& V, MemOp& opc, int& imm12, RegisterID& rn, RegisterID& rt)
+ {
+ int insn = *static_cast<int*>(address);
+ size = static_cast<MemOpSize>((insn >> 30) & 3);
+ V = (insn >> 26) & 1;
+ opc = static_cast<MemOp>((insn >> 22) & 3);
+ imm12 = (insn >> 10) & 0xfff;
+ rn = disassembleXOrSp((insn >> 5) & 0x1f);
+ rt = disassembleXOrZr(insn & 0x1f);
+ return (insn & 0x3b000000) == 0x39000000;
+ }
+
+ static bool disassembleMoveWideImediate(void* address, Datasize& sf, MoveWideOp& opc, int& hw, uint16_t& imm16, RegisterID& rd)
+ {
+ int insn = *static_cast<int*>(address);
+ sf = static_cast<Datasize>((insn >> 31) & 1);
+ opc = static_cast<MoveWideOp>((insn >> 29) & 3);
+ hw = (insn >> 21) & 3;
+ imm16 = insn >> 5;
+ rd = disassembleXOrZr(insn & 0x1f);
+ return (insn & 0x1f800000) == 0x12800000;
+ }
+
+ static bool disassembleNop(void* address)
+ {
+ unsigned insn = *static_cast<unsigned*>(address);
+ return insn == 0xd503201f;
+ }
+
+ static bool disassembleCompareAndBranchImmediate(void* address, Datasize& sf, bool& op, int& imm19, RegisterID& rt)
+ {
+ int insn = *static_cast<int*>(address);
+ sf = static_cast<Datasize>((insn >> 31) & 1);
+ op = (insn >> 24) & 0x1;
+ imm19 = (insn << 8) >> 13;
+ rt = static_cast<RegisterID>(insn & 0x1f);
+ return (insn & 0x7e000000) == 0x34000000;
+
+ }
+
+ static bool disassembleConditionalBranchImmediate(void* address, unsigned& op01, int& imm19, Condition &condition)
+ {
+ int insn = *static_cast<int*>(address);
+ op01 = ((insn >> 23) & 0x2) | ((insn >> 4) & 0x1);
+ imm19 = (insn << 8) >> 13;
+ condition = static_cast<Condition>(insn & 0xf);
+ return (insn & 0xfe000000) == 0x54000000;
+ }
+
+ static bool disassembleTestAndBranchImmediate(void* address, bool& op, unsigned& bitNumber, int& imm14, RegisterID& rt)
+ {
+ int insn = *static_cast<int*>(address);
+ op = (insn >> 24) & 0x1;
+ imm14 = (insn << 13) >> 18;
+ bitNumber = static_cast<unsigned>((((insn >> 26) & 0x20)) | ((insn >> 19) & 0x1f));
+ rt = static_cast<RegisterID>(insn & 0x1f);
+ return (insn & 0x7e000000) == 0x36000000;
+
+ }
+
+ static bool disassembleUnconditionalBranchImmediate(void* address, bool& op, int& imm26)
+ {
+ int insn = *static_cast<int*>(address);
+ op = (insn >> 31) & 1;
+ imm26 = (insn << 6) >> 6;
+ return (insn & 0x7c000000) == 0x14000000;
+ }
+
+ static int xOrSp(RegisterID reg) { ASSERT(!isZr(reg)); return reg; }
+ static int xOrZr(RegisterID reg) { ASSERT(!isSp(reg)); return reg & 31; }
+ static FPRegisterID xOrZrAsFPR(RegisterID reg) { return static_cast<FPRegisterID>(xOrZr(reg)); }
+ static int xOrZrOrSp(bool useZr, RegisterID reg) { return useZr ? xOrZr(reg) : xOrSp(reg); }
+
+ ALWAYS_INLINE void insn(int instruction)
+ {
+ m_buffer.putInt(instruction);
+ }
+
+ ALWAYS_INLINE static int addSubtractExtendedRegister(Datasize sf, AddOp op, SetFlags S, RegisterID rm, ExtendType option, int imm3, RegisterID rn, RegisterID rd)
+ {
+ ASSERT(imm3 < 5);
+ // The only allocated values for opt is 0.
+ const int opt = 0;
+ return (0x0b200000 | sf << 31 | op << 30 | S << 29 | opt << 22 | xOrZr(rm) << 16 | option << 13 | (imm3 & 0x7) << 10 | xOrSp(rn) << 5 | xOrZrOrSp(S, rd));
+ }
+
+ ALWAYS_INLINE static int addSubtractImmediate(Datasize sf, AddOp op, SetFlags S, int shift, int imm12, RegisterID rn, RegisterID rd)
+ {
+ ASSERT(shift < 2);
+ ASSERT(isUInt12(imm12));
+ return (0x11000000 | sf << 31 | op << 30 | S << 29 | shift << 22 | (imm12 & 0xfff) << 10 | xOrSp(rn) << 5 | xOrZrOrSp(S, rd));
+ }
+
+ ALWAYS_INLINE static int addSubtractShiftedRegister(Datasize sf, AddOp op, SetFlags S, ShiftType shift, RegisterID rm, int imm6, RegisterID rn, RegisterID rd)
+ {
+ ASSERT(shift < 3);
+ ASSERT(!(imm6 & (sf ? ~63 : ~31)));
+ return (0x0b000000 | sf << 31 | op << 30 | S << 29 | shift << 22 | xOrZr(rm) << 16 | (imm6 & 0x3f) << 10 | xOrZr(rn) << 5 | xOrZr(rd));
+ }
+
+ ALWAYS_INLINE static int addSubtractWithCarry(Datasize sf, AddOp op, SetFlags S, RegisterID rm, RegisterID rn, RegisterID rd)
+ {
+ const int opcode2 = 0;
+ return (0x1a000000 | sf << 31 | op << 30 | S << 29 | xOrZr(rm) << 16 | opcode2 << 10 | xOrZr(rn) << 5 | xOrZr(rd));
+ }
+
+ ALWAYS_INLINE static int bitfield(Datasize sf, BitfieldOp opc, int immr, int imms, RegisterID rn, RegisterID rd)
+ {
+ ASSERT(immr < (sf ? 64 : 32));
+ ASSERT(imms < (sf ? 64 : 32));
+ const int N = sf;
+ return (0x13000000 | sf << 31 | opc << 29 | N << 22 | immr << 16 | imms << 10 | xOrZr(rn) << 5 | xOrZr(rd));
+ }
+
+ // 'op' means negate
+ ALWAYS_INLINE static int compareAndBranchImmediate(Datasize sf, bool op, int32_t imm19, RegisterID rt)
+ {
+ ASSERT(imm19 == (imm19 << 13) >> 13);
+ return (0x34000000 | sf << 31 | op << 24 | (imm19 & 0x7ffff) << 5 | xOrZr(rt));
+ }
+
+ ALWAYS_INLINE static int conditionalBranchImmediate(int32_t imm19, Condition cond)
+ {
+ ASSERT(imm19 == (imm19 << 13) >> 13);
+ ASSERT(!(cond & ~15));
+ // The only allocated values for o1 & o0 are 0.
+ const int o1 = 0;
+ const int o0 = 0;
+ return (0x54000000 | o1 << 24 | (imm19 & 0x7ffff) << 5 | o0 << 4 | cond);
+ }
+
+ ALWAYS_INLINE static int conditionalCompareImmediate(Datasize sf, AddOp op, int imm5, Condition cond, RegisterID rn, int nzcv)
+ {
+ ASSERT(!(imm5 & ~0x1f));
+ ASSERT(nzcv < 16);
+ const int S = 1;
+ const int o2 = 0;
+ const int o3 = 0;
+ return (0x1a400800 | sf << 31 | op << 30 | S << 29 | (imm5 & 0x1f) << 16 | cond << 12 | o2 << 10 | xOrZr(rn) << 5 | o3 << 4 | nzcv);
+ }
+
+ ALWAYS_INLINE static int conditionalCompareRegister(Datasize sf, AddOp op, RegisterID rm, Condition cond, RegisterID rn, int nzcv)
+ {
+ ASSERT(nzcv < 16);
+ const int S = 1;
+ const int o2 = 0;
+ const int o3 = 0;
+ return (0x1a400000 | sf << 31 | op << 30 | S << 29 | xOrZr(rm) << 16 | cond << 12 | o2 << 10 | xOrZr(rn) << 5 | o3 << 4 | nzcv);
+ }
+
+ // 'op' means negate
+ // 'op2' means increment
+ ALWAYS_INLINE static int conditionalSelect(Datasize sf, bool op, RegisterID rm, Condition cond, bool op2, RegisterID rn, RegisterID rd)
+ {
+ const int S = 0;
+ return (0x1a800000 | sf << 31 | op << 30 | S << 29 | xOrZr(rm) << 16 | cond << 12 | op2 << 10 | xOrZr(rn) << 5 | xOrZr(rd));
+ }
+
+ ALWAYS_INLINE static int dataProcessing1Source(Datasize sf, DataOp1Source opcode, RegisterID rn, RegisterID rd)
+ {
+ const int S = 0;
+ const int opcode2 = 0;
+ return (0x5ac00000 | sf << 31 | S << 29 | opcode2 << 16 | opcode << 10 | xOrZr(rn) << 5 | xOrZr(rd));
+ }
+
+ ALWAYS_INLINE static int dataProcessing2Source(Datasize sf, RegisterID rm, DataOp2Source opcode, RegisterID rn, RegisterID rd)
+ {
+ const int S = 0;
+ return (0x1ac00000 | sf << 31 | S << 29 | xOrZr(rm) << 16 | opcode << 10 | xOrZr(rn) << 5 | xOrZr(rd));
+ }
+
+ ALWAYS_INLINE static int dataProcessing3Source(Datasize sf, DataOp3Source opcode, RegisterID rm, RegisterID ra, RegisterID rn, RegisterID rd)
+ {
+ int op54 = opcode >> 4;
+ int op31 = (opcode >> 1) & 7;
+ int op0 = opcode & 1;
+ return (0x1b000000 | sf << 31 | op54 << 29 | op31 << 21 | xOrZr(rm) << 16 | op0 << 15 | xOrZr(ra) << 10 | xOrZr(rn) << 5 | xOrZr(rd));
+ }
+
+ ALWAYS_INLINE static int excepnGeneration(ExcepnOp opc, uint16_t imm16, int LL)
+ {
+ ASSERT((opc == ExcepnOp_BREAKPOINT || opc == ExcepnOp_HALT) ? !LL : (LL && (LL < 4)));
+ const int op2 = 0;
+ return (0xd4000000 | opc << 21 | imm16 << 5 | op2 << 2 | LL);
+ }
+
+ ALWAYS_INLINE static int extract(Datasize sf, RegisterID rm, int imms, RegisterID rn, RegisterID rd)
+ {
+ ASSERT(imms < (sf ? 64 : 32));
+ const int op21 = 0;
+ const int N = sf;
+ const int o0 = 0;
+ return (0x13800000 | sf << 31 | op21 << 29 | N << 22 | o0 << 21 | xOrZr(rm) << 16 | imms << 10 | xOrZr(rn) << 5 | xOrZr(rd));
+ }
+
+ ALWAYS_INLINE static int floatingPointCompare(Datasize type, FPRegisterID rm, FPRegisterID rn, FPCmpOp opcode2)
+ {
+ const int M = 0;
+ const int S = 0;
+ const int op = 0;
+ return (0x1e202000 | M << 31 | S << 29 | type << 22 | rm << 16 | op << 14 | rn << 5 | opcode2);
+ }
+
+ ALWAYS_INLINE static int floatingPointConditionalCompare(Datasize type, FPRegisterID rm, Condition cond, FPRegisterID rn, FPCondCmpOp op, int nzcv)
+ {
+ ASSERT(nzcv < 16);
+ const int M = 0;
+ const int S = 0;
+ return (0x1e200400 | M << 31 | S << 29 | type << 22 | rm << 16 | cond << 12 | rn << 5 | op << 4 | nzcv);
+ }
+
+ ALWAYS_INLINE static int floatingPointConditionalSelect(Datasize type, FPRegisterID rm, Condition cond, FPRegisterID rn, FPRegisterID rd)
+ {
+ const int M = 0;
+ const int S = 0;
+ return (0x1e200c00 | M << 31 | S << 29 | type << 22 | rm << 16 | cond << 12 | rn << 5 | rd);
+ }
+
+ ALWAYS_INLINE static int floatingPointImmediate(Datasize type, int imm8, FPRegisterID rd)
+ {
+ const int M = 0;
+ const int S = 0;
+ const int imm5 = 0;
+ return (0x1e201000 | M << 31 | S << 29 | type << 22 | (imm8 & 0xff) << 13 | imm5 << 5 | rd);
+ }
+
+ ALWAYS_INLINE static int floatingPointIntegerConversions(Datasize sf, Datasize type, FPIntConvOp rmodeOpcode, FPRegisterID rn, FPRegisterID rd)
+ {
+ const int S = 0;
+ return (0x1e200000 | sf << 31 | S << 29 | type << 22 | rmodeOpcode << 16 | rn << 5 | rd);
+ }
+
+ ALWAYS_INLINE static int floatingPointIntegerConversions(Datasize sf, Datasize type, FPIntConvOp rmodeOpcode, FPRegisterID rn, RegisterID rd)
+ {
+ return floatingPointIntegerConversions(sf, type, rmodeOpcode, rn, xOrZrAsFPR(rd));
+ }
+
+ ALWAYS_INLINE static int floatingPointIntegerConversions(Datasize sf, Datasize type, FPIntConvOp rmodeOpcode, RegisterID rn, FPRegisterID rd)
+ {
+ return floatingPointIntegerConversions(sf, type, rmodeOpcode, xOrZrAsFPR(rn), rd);
+ }
+
+ ALWAYS_INLINE static int floatingPointDataProcessing1Source(Datasize type, FPDataOp1Source opcode, FPRegisterID rn, FPRegisterID rd)
+ {
+ const int M = 0;
+ const int S = 0;
+ return (0x1e204000 | M << 31 | S << 29 | type << 22 | opcode << 15 | rn << 5 | rd);
+ }
+
+ ALWAYS_INLINE static int floatingPointDataProcessing2Source(Datasize type, FPRegisterID rm, FPDataOp2Source opcode, FPRegisterID rn, FPRegisterID rd)
+ {
+ const int M = 0;
+ const int S = 0;
+ return (0x1e200800 | M << 31 | S << 29 | type << 22 | rm << 16 | opcode << 12 | rn << 5 | rd);
+ }
+
+ // 'o1' means negate
+ ALWAYS_INLINE static int floatingPointDataProcessing3Source(Datasize type, bool o1, FPRegisterID rm, AddOp o2, FPRegisterID ra, FPRegisterID rn, FPRegisterID rd)
+ {
+ const int M = 0;
+ const int S = 0;
+ return (0x1f000000 | M << 31 | S << 29 | type << 22 | o1 << 21 | rm << 16 | o2 << 15 | ra << 10 | rn << 5 | rd);
+ }
+
+ // 'V' means vector
+ ALWAYS_INLINE static int loadRegisterLiteral(LdrLiteralOp opc, bool V, int imm19, FPRegisterID rt)
+ {
+ ASSERT(((imm19 << 13) >> 13) == imm19);
+ return (0x18000000 | opc << 30 | V << 26 | (imm19 & 0x7ffff) << 5 | rt);
+ }
+
+ ALWAYS_INLINE static int loadRegisterLiteral(LdrLiteralOp opc, bool V, int imm19, RegisterID rt)
+ {
+ return loadRegisterLiteral(opc, V, imm19, xOrZrAsFPR(rt));
+ }
+
+ // 'V' means vector
+ ALWAYS_INLINE static int loadStoreRegisterPostIndex(MemOpSize size, bool V, MemOp opc, int imm9, RegisterID rn, FPRegisterID rt)
+ {
+ ASSERT(!(size && V && (opc & 2))); // Maximum vector size is 128 bits.
+ ASSERT(!((size & 2) && !V && (opc == 3))); // signed 32-bit load must be extending from 8/16 bits.
+ ASSERT(isInt9(imm9));
+ return (0x38000400 | size << 30 | V << 26 | opc << 22 | (imm9 & 0x1ff) << 12 | xOrSp(rn) << 5 | rt);
+ }
+
+ ALWAYS_INLINE static int loadStoreRegisterPostIndex(MemOpSize size, bool V, MemOp opc, int imm9, RegisterID rn, RegisterID rt)
+ {
+ return loadStoreRegisterPostIndex(size, V, opc, imm9, rn, xOrZrAsFPR(rt));
+ }
+
+ // 'V' means vector
+ ALWAYS_INLINE static int loadStoreRegisterPairPostIndex(MemPairOpSize size, bool V, MemOp opc, int immediate, RegisterID rn, FPRegisterID rt, FPRegisterID rt2)
+ {
+ ASSERT(size < 3);
+ ASSERT(opc == (opc & 1)); // Only load or store, load signed 64 is handled via size.
+ ASSERT(V || (size != MemPairOp_LoadSigned_32) || (opc == MemOp_LOAD)); // There isn't an integer store signed.
+ unsigned immedShiftAmount = memPairOffsetShift(V, size);
+ int imm7 = immediate >> immedShiftAmount;
+ ASSERT((imm7 << immedShiftAmount) == immediate && isInt7(imm7));
+ return (0x28800000 | size << 30 | V << 26 | opc << 22 | (imm7 & 0x7f) << 15 | rt2 << 10 | xOrSp(rn) << 5 | rt);
+ }
+
+ ALWAYS_INLINE static int loadStoreRegisterPairPostIndex(MemPairOpSize size, bool V, MemOp opc, int immediate, RegisterID rn, RegisterID rt, RegisterID rt2)
+ {
+ return loadStoreRegisterPairPostIndex(size, V, opc, immediate, rn, xOrZrAsFPR(rt), xOrZrAsFPR(rt2));
+ }
+
+ // 'V' means vector
+ ALWAYS_INLINE static int loadStoreRegisterPreIndex(MemOpSize size, bool V, MemOp opc, int imm9, RegisterID rn, FPRegisterID rt)
+ {
+ ASSERT(!(size && V && (opc & 2))); // Maximum vector size is 128 bits.
+ ASSERT(!((size & 2) && !V && (opc == 3))); // signed 32-bit load must be extending from 8/16 bits.
+ ASSERT(isInt9(imm9));
+ return (0x38000c00 | size << 30 | V << 26 | opc << 22 | (imm9 & 0x1ff) << 12 | xOrSp(rn) << 5 | rt);
+ }
+
+ ALWAYS_INLINE static int loadStoreRegisterPreIndex(MemOpSize size, bool V, MemOp opc, int imm9, RegisterID rn, RegisterID rt)
+ {
+ return loadStoreRegisterPreIndex(size, V, opc, imm9, rn, xOrZrAsFPR(rt));
+ }
+
+ // 'V' means vector
+ ALWAYS_INLINE static int loadStoreRegisterPairPreIndex(MemPairOpSize size, bool V, MemOp opc, int immediate, RegisterID rn, FPRegisterID rt, FPRegisterID rt2)
+ {
+ ASSERT(size < 3);
+ ASSERT(opc == (opc & 1)); // Only load or store, load signed 64 is handled via size.
+ ASSERT(V || (size != MemPairOp_LoadSigned_32) || (opc == MemOp_LOAD)); // There isn't an integer store signed.
+ unsigned immedShiftAmount = memPairOffsetShift(V, size);
+ int imm7 = immediate >> immedShiftAmount;
+ ASSERT((imm7 << immedShiftAmount) == immediate && isInt7(imm7));
+ return (0x29800000 | size << 30 | V << 26 | opc << 22 | (imm7 & 0x7f) << 15 | rt2 << 10 | xOrSp(rn) << 5 | rt);
+ }
+
+ ALWAYS_INLINE static int loadStoreRegisterPairPreIndex(MemPairOpSize size, bool V, MemOp opc, int immediate, RegisterID rn, RegisterID rt, RegisterID rt2)
+ {
+ return loadStoreRegisterPairPreIndex(size, V, opc, immediate, rn, xOrZrAsFPR(rt), xOrZrAsFPR(rt2));
+ }
+
+ // 'V' means vector
+ // 'S' means shift rm
+ ALWAYS_INLINE static int loadStoreRegisterRegisterOffset(MemOpSize size, bool V, MemOp opc, RegisterID rm, ExtendType option, bool S, RegisterID rn, FPRegisterID rt)
+ {
+ ASSERT(!(size && V && (opc & 2))); // Maximum vector size is 128 bits.
+ ASSERT(!((size & 2) && !V && (opc == 3))); // signed 32-bit load must be extending from 8/16 bits.
+ ASSERT(option & 2); // The ExtendType for the address must be 32/64 bit, signed or unsigned - not 8/16bit.
+ return (0x38200800 | size << 30 | V << 26 | opc << 22 | xOrZr(rm) << 16 | option << 13 | S << 12 | xOrSp(rn) << 5 | rt);
+ }
+
+ ALWAYS_INLINE static int loadStoreRegisterRegisterOffset(MemOpSize size, bool V, MemOp opc, RegisterID rm, ExtendType option, bool S, RegisterID rn, RegisterID rt)
+ {
+ return loadStoreRegisterRegisterOffset(size, V, opc, rm, option, S, rn, xOrZrAsFPR(rt));
+ }
+
+ // 'V' means vector
+ ALWAYS_INLINE static int loadStoreRegisterUnscaledImmediate(MemOpSize size, bool V, MemOp opc, int imm9, RegisterID rn, FPRegisterID rt)
+ {
+ ASSERT(!(size && V && (opc & 2))); // Maximum vector size is 128 bits.
+ ASSERT(!((size & 2) && !V && (opc == 3))); // signed 32-bit load must be extending from 8/16 bits.
+ ASSERT(isInt9(imm9));
+ return (0x38000000 | size << 30 | V << 26 | opc << 22 | (imm9 & 0x1ff) << 12 | xOrSp(rn) << 5 | rt);
+ }
+
+ ALWAYS_INLINE static int loadStoreRegisterUnscaledImmediate(MemOpSize size, bool V, MemOp opc, int imm9, RegisterID rn, RegisterID rt)
+ {
+ ASSERT(isInt9(imm9));
+ return loadStoreRegisterUnscaledImmediate(size, V, opc, imm9, rn, xOrZrAsFPR(rt));
+ }
+
+ // 'V' means vector
+ ALWAYS_INLINE static int loadStoreRegisterUnsignedImmediate(MemOpSize size, bool V, MemOp opc, int imm12, RegisterID rn, FPRegisterID rt)
+ {
+ ASSERT(!(size && V && (opc & 2))); // Maximum vector size is 128 bits.
+ ASSERT(!((size & 2) && !V && (opc == 3))); // signed 32-bit load must be extending from 8/16 bits.
+ ASSERT(isUInt12(imm12));
+ return (0x39000000 | size << 30 | V << 26 | opc << 22 | (imm12 & 0xfff) << 10 | xOrSp(rn) << 5 | rt);
+ }
+
+ ALWAYS_INLINE static int loadStoreRegisterUnsignedImmediate(MemOpSize size, bool V, MemOp opc, int imm12, RegisterID rn, RegisterID rt)
+ {
+ return loadStoreRegisterUnsignedImmediate(size, V, opc, imm12, rn, xOrZrAsFPR(rt));
+ }
+
+ ALWAYS_INLINE static int logicalImmediate(Datasize sf, LogicalOp opc, int N_immr_imms, RegisterID rn, RegisterID rd)
+ {
+ ASSERT(!(N_immr_imms & (sf ? ~0x1fff : ~0xfff)));
+ return (0x12000000 | sf << 31 | opc << 29 | N_immr_imms << 10 | xOrZr(rn) << 5 | xOrZrOrSp(opc == LogicalOp_ANDS, rd));
+ }
+
+ // 'N' means negate rm
+ ALWAYS_INLINE static int logicalShiftedRegister(Datasize sf, LogicalOp opc, ShiftType shift, bool N, RegisterID rm, int imm6, RegisterID rn, RegisterID rd)
+ {
+ ASSERT(!(imm6 & (sf ? ~63 : ~31)));
+ return (0x0a000000 | sf << 31 | opc << 29 | shift << 22 | N << 21 | xOrZr(rm) << 16 | (imm6 & 0x3f) << 10 | xOrZr(rn) << 5 | xOrZr(rd));
+ }
+
+ ALWAYS_INLINE static int moveWideImediate(Datasize sf, MoveWideOp opc, int hw, uint16_t imm16, RegisterID rd)
+ {
+ ASSERT(hw < (sf ? 4 : 2));
+ return (0x12800000 | sf << 31 | opc << 29 | hw << 21 | (int)imm16 << 5 | xOrZr(rd));
+ }
+
+ // 'op' means link
+ ALWAYS_INLINE static int unconditionalBranchImmediate(bool op, int32_t imm26)
+ {
+ ASSERT(imm26 == (imm26 << 6) >> 6);
+ return (0x14000000 | op << 31 | (imm26 & 0x3ffffff));
+ }
+
+ // 'op' means page
+ ALWAYS_INLINE static int pcRelative(bool op, int32_t imm21, RegisterID rd)
+ {
+ ASSERT(imm21 == (imm21 << 11) >> 11);
+ int32_t immlo = imm21 & 3;
+ int32_t immhi = (imm21 >> 2) & 0x7ffff;
+ return (0x10000000 | op << 31 | immlo << 29 | immhi << 5 | xOrZr(rd));
+ }
+
+ ALWAYS_INLINE static int system(bool L, int op0, int op1, int crn, int crm, int op2, RegisterID rt)
+ {
+ return (0xd5000000 | L << 21 | op0 << 19 | op1 << 16 | crn << 12 | crm << 8 | op2 << 5 | xOrZr(rt));
+ }
+
+ ALWAYS_INLINE static int hintPseudo(int imm)
+ {
+ ASSERT(!(imm & ~0x7f));
+ return system(0, 0, 3, 2, (imm >> 3) & 0xf, imm & 0x7, ARM64Registers::zr);
+ }
+
+ ALWAYS_INLINE static int nopPseudo()
+ {
+ return hintPseudo(0);
+ }
+
+ // 'op' means negate
+ ALWAYS_INLINE static int testAndBranchImmediate(bool op, int b50, int imm14, RegisterID rt)
+ {
+ ASSERT(!(b50 & ~0x3f));
+ ASSERT(imm14 == (imm14 << 18) >> 18);
+ int b5 = b50 >> 5;
+ int b40 = b50 & 0x1f;
+ return (0x36000000 | b5 << 31 | op << 24 | b40 << 19 | (imm14 & 0x3fff) << 5 | xOrZr(rt));
+ }
+
+ ALWAYS_INLINE static int unconditionalBranchRegister(BranchType opc, RegisterID rn)
+ {
+ // The only allocated values for op2 is 0x1f, for op3 & op4 are 0.
+ const int op2 = 0x1f;
+ const int op3 = 0;
+ const int op4 = 0;
+ return (0xd6000000 | opc << 21 | op2 << 16 | op3 << 10 | xOrZr(rn) << 5 | op4);
+ }
+
+ // Workaround for Cortex-A53 erratum (835769). Emit an extra nop if the
+ // last instruction in the buffer is a load, store or prefetch. Needed
+ // before 64-bit multiply-accumulate instructions.
+ template<int datasize>
+ ALWAYS_INLINE void nopCortexA53Fix835769()
+ {
+#if CPU(ARM64_CORTEXA53)
+ CHECK_DATASIZE();
+ if (datasize == 64) {
+ if (LIKELY(m_buffer.codeSize() >= sizeof(int32_t))) {
+ // From ARMv8 Reference Manual, Section C4.1: the encoding of the
+ // instructions in the Loads and stores instruction group is:
+ // ---- 1-0- ---- ---- ---- ---- ---- ----
+ if (UNLIKELY((*reinterpret_cast_ptr<int32_t*>(reinterpret_cast_ptr<char*>(m_buffer.data()) + m_buffer.codeSize() - sizeof(int32_t)) & 0x0a000000) == 0x08000000))
+ nop();
+ }
+ }
+#endif
+ }
+
+ // Workaround for Cortex-A53 erratum (843419). Emit extra nops to avoid
+ // wrong address access after ADRP instruction.
+ ALWAYS_INLINE void nopCortexA53Fix843419()
+ {
+#if CPU(ARM64_CORTEXA53)
+ nop();
+ nop();
+ nop();
+#endif
+ }
+
+ AssemblerBuffer m_buffer;
+ Vector<LinkRecord, 0, UnsafeVectorOverflow> m_jumpsToLink;
+ int m_indexOfLastWatchpoint;
+ int m_indexOfTailOfLastWatchpoint;
+};
+
+} // namespace JSC
+
+#undef CHECK_DATASIZE_OF
+#undef DATASIZE_OF
+#undef MEMOPSIZE_OF
+#undef CHECK_DATASIZE
+#undef DATASIZE
+#undef MEMOPSIZE
+#undef CHECK_FP_MEMOP_DATASIZE
+
+#endif // ENABLE(ASSEMBLER) && CPU(ARM64)
+
+#endif // ARM64Assembler_h
diff --git a/Source/JavaScriptCore/assembler/ARMAssembler.cpp b/Source/JavaScriptCore/assembler/ARMAssembler.cpp
new file mode 100644
index 000000000..f9100d4c9
--- /dev/null
+++ b/Source/JavaScriptCore/assembler/ARMAssembler.cpp
@@ -0,0 +1,425 @@
+/*
+ * Copyright (C) 2009 University of Szeged
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
+
+#include "ARMAssembler.h"
+
+namespace JSC {
+
+// Patching helpers
+
+void ARMAssembler::patchConstantPoolLoad(void* loadAddr, void* constPoolAddr)
+{
+ ARMWord *ldr = reinterpret_cast<ARMWord*>(loadAddr);
+ ARMWord diff = reinterpret_cast<ARMWord*>(constPoolAddr) - ldr;
+ ARMWord index = (*ldr & 0xfff) >> 1;
+
+ ASSERT(diff >= 1);
+ if (diff >= 2 || index > 0) {
+ diff = (diff + index - 2) * sizeof(ARMWord);
+ ASSERT(diff <= 0xfff);
+ *ldr = (*ldr & ~0xfff) | diff;
+ } else
+ *ldr = (*ldr & ~(0xfff | ARMAssembler::DataTransferUp)) | sizeof(ARMWord);
+}
+
+// Handle immediates
+
+ARMWord ARMAssembler::getOp2(ARMWord imm)
+{
+ int rol;
+
+ if (imm <= 0xff)
+ return Op2Immediate | imm;
+
+ if ((imm & 0xff000000) == 0) {
+ imm <<= 8;
+ rol = 8;
+ }
+ else {
+ imm = (imm << 24) | (imm >> 8);
+ rol = 0;
+ }
+
+ if ((imm & 0xff000000) == 0) {
+ imm <<= 8;
+ rol += 4;
+ }
+
+ if ((imm & 0xf0000000) == 0) {
+ imm <<= 4;
+ rol += 2;
+ }
+
+ if ((imm & 0xc0000000) == 0) {
+ imm <<= 2;
+ rol += 1;
+ }
+
+ if ((imm & 0x00ffffff) == 0)
+ return Op2Immediate | (imm >> 24) | (rol << 8);
+
+ return InvalidImmediate;
+}
+
+int ARMAssembler::genInt(int reg, ARMWord imm, bool positive)
+{
+ // Step1: Search a non-immediate part
+ ARMWord mask;
+ ARMWord imm1;
+ ARMWord imm2;
+ int rol;
+
+ mask = 0xff000000;
+ rol = 8;
+ while(1) {
+ if ((imm & mask) == 0) {
+ imm = (imm << rol) | (imm >> (32 - rol));
+ rol = 4 + (rol >> 1);
+ break;
+ }
+ rol += 2;
+ mask >>= 2;
+ if (mask & 0x3) {
+ // rol 8
+ imm = (imm << 8) | (imm >> 24);
+ mask = 0xff00;
+ rol = 24;
+ while (1) {
+ if ((imm & mask) == 0) {
+ imm = (imm << rol) | (imm >> (32 - rol));
+ rol = (rol >> 1) - 8;
+ break;
+ }
+ rol += 2;
+ mask >>= 2;
+ if (mask & 0x3)
+ return 0;
+ }
+ break;
+ }
+ }
+
+ ASSERT((imm & 0xff) == 0);
+
+ if ((imm & 0xff000000) == 0) {
+ imm1 = Op2Immediate | ((imm >> 16) & 0xff) | (((rol + 4) & 0xf) << 8);
+ imm2 = Op2Immediate | ((imm >> 8) & 0xff) | (((rol + 8) & 0xf) << 8);
+ } else if (imm & 0xc0000000) {
+ imm1 = Op2Immediate | ((imm >> 24) & 0xff) | ((rol & 0xf) << 8);
+ imm <<= 8;
+ rol += 4;
+
+ if ((imm & 0xff000000) == 0) {
+ imm <<= 8;
+ rol += 4;
+ }
+
+ if ((imm & 0xf0000000) == 0) {
+ imm <<= 4;
+ rol += 2;
+ }
+
+ if ((imm & 0xc0000000) == 0) {
+ imm <<= 2;
+ rol += 1;
+ }
+
+ if ((imm & 0x00ffffff) == 0)
+ imm2 = Op2Immediate | (imm >> 24) | ((rol & 0xf) << 8);
+ else
+ return 0;
+ } else {
+ if ((imm & 0xf0000000) == 0) {
+ imm <<= 4;
+ rol += 2;
+ }
+
+ if ((imm & 0xc0000000) == 0) {
+ imm <<= 2;
+ rol += 1;
+ }
+
+ imm1 = Op2Immediate | ((imm >> 24) & 0xff) | ((rol & 0xf) << 8);
+ imm <<= 8;
+ rol += 4;
+
+ if ((imm & 0xf0000000) == 0) {
+ imm <<= 4;
+ rol += 2;
+ }
+
+ if ((imm & 0xc0000000) == 0) {
+ imm <<= 2;
+ rol += 1;
+ }
+
+ if ((imm & 0x00ffffff) == 0)
+ imm2 = Op2Immediate | (imm >> 24) | ((rol & 0xf) << 8);
+ else
+ return 0;
+ }
+
+ if (positive) {
+ mov(reg, imm1);
+ orr(reg, reg, imm2);
+ } else {
+ mvn(reg, imm1);
+ bic(reg, reg, imm2);
+ }
+
+ return 1;
+}
+
+ARMWord ARMAssembler::getImm(ARMWord imm, int tmpReg, bool invert)
+{
+ ARMWord tmp;
+
+ // Do it by 1 instruction
+ tmp = getOp2(imm);
+ if (tmp != InvalidImmediate)
+ return tmp;
+
+ tmp = getOp2(~imm);
+ if (tmp != InvalidImmediate) {
+ if (invert)
+ return tmp | Op2InvertedImmediate;
+ mvn(tmpReg, tmp);
+ return tmpReg;
+ }
+
+ return encodeComplexImm(imm, tmpReg);
+}
+
+void ARMAssembler::moveImm(ARMWord imm, int dest)
+{
+ ARMWord tmp;
+
+ // Do it by 1 instruction
+ tmp = getOp2(imm);
+ if (tmp != InvalidImmediate) {
+ mov(dest, tmp);
+ return;
+ }
+
+ tmp = getOp2(~imm);
+ if (tmp != InvalidImmediate) {
+ mvn(dest, tmp);
+ return;
+ }
+
+ encodeComplexImm(imm, dest);
+}
+
+ARMWord ARMAssembler::encodeComplexImm(ARMWord imm, int dest)
+{
+#if WTF_ARM_ARCH_AT_LEAST(7)
+ ARMWord tmp = getImm16Op2(imm);
+ if (tmp != InvalidImmediate) {
+ movw(dest, tmp);
+ return dest;
+ }
+ movw(dest, getImm16Op2(imm & 0xffff));
+ movt(dest, getImm16Op2(imm >> 16));
+ return dest;
+#else
+ // Do it by 2 instruction
+ if (genInt(dest, imm, true))
+ return dest;
+ if (genInt(dest, ~imm, false))
+ return dest;
+
+ ldrImmediate(dest, imm);
+ return dest;
+#endif
+}
+
+// Memory load/store helpers
+
+void ARMAssembler::dataTransfer32(DataTransferTypeA transferType, RegisterID srcDst, RegisterID base, int32_t offset)
+{
+ if (offset >= 0) {
+ if (offset <= 0xfff)
+ dtrUp(transferType, srcDst, base, offset);
+ else if (offset <= 0xfffff) {
+ add(ARMRegisters::S0, base, Op2Immediate | (offset >> 12) | (10 << 8));
+ dtrUp(transferType, srcDst, ARMRegisters::S0, (offset & 0xfff));
+ } else {
+ moveImm(offset, ARMRegisters::S0);
+ dtrUpRegister(transferType, srcDst, base, ARMRegisters::S0);
+ }
+ } else {
+ if (offset >= -0xfff)
+ dtrDown(transferType, srcDst, base, -offset);
+ else if (offset >= -0xfffff) {
+ sub(ARMRegisters::S0, base, Op2Immediate | (-offset >> 12) | (10 << 8));
+ dtrDown(transferType, srcDst, ARMRegisters::S0, (-offset & 0xfff));
+ } else {
+ moveImm(offset, ARMRegisters::S0);
+ dtrUpRegister(transferType, srcDst, base, ARMRegisters::S0);
+ }
+ }
+}
+
+void ARMAssembler::baseIndexTransfer32(DataTransferTypeA transferType, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset)
+{
+ ASSERT(scale >= 0 && scale <= 3);
+ ARMWord op2 = lsl(index, scale);
+
+ if (!offset) {
+ dtrUpRegister(transferType, srcDst, base, op2);
+ return;
+ }
+
+ if (offset <= 0xfffff && offset >= -0xfffff) {
+ add(ARMRegisters::S0, base, op2);
+ dataTransfer32(transferType, srcDst, ARMRegisters::S0, offset);
+ return;
+ }
+
+ moveImm(offset, ARMRegisters::S0);
+ add(ARMRegisters::S0, ARMRegisters::S0, op2);
+ dtrUpRegister(transferType, srcDst, base, ARMRegisters::S0);
+}
+
+void ARMAssembler::dataTransfer16(DataTransferTypeB transferType, RegisterID srcDst, RegisterID base, int32_t offset)
+{
+ if (offset >= 0) {
+ if (offset <= 0xff)
+ halfDtrUp(transferType, srcDst, base, getOp2Half(offset));
+ else if (offset <= 0xffff) {
+ add(ARMRegisters::S0, base, Op2Immediate | (offset >> 8) | (12 << 8));
+ halfDtrUp(transferType, srcDst, ARMRegisters::S0, getOp2Half(offset & 0xff));
+ } else {
+ moveImm(offset, ARMRegisters::S0);
+ halfDtrUpRegister(transferType, srcDst, base, ARMRegisters::S0);
+ }
+ } else {
+ if (offset >= -0xff)
+ halfDtrDown(transferType, srcDst, base, getOp2Half(-offset));
+ else if (offset >= -0xffff) {
+ sub(ARMRegisters::S0, base, Op2Immediate | (-offset >> 8) | (12 << 8));
+ halfDtrDown(transferType, srcDst, ARMRegisters::S0, getOp2Half(-offset & 0xff));
+ } else {
+ moveImm(offset, ARMRegisters::S0);
+ halfDtrUpRegister(transferType, srcDst, base, ARMRegisters::S0);
+ }
+ }
+}
+
+void ARMAssembler::baseIndexTransfer16(DataTransferTypeB transferType, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset)
+{
+ if (!scale && !offset) {
+ halfDtrUpRegister(transferType, srcDst, base, index);
+ return;
+ }
+
+ ARMWord op2 = lsl(index, scale);
+
+ if (offset <= 0xffff && offset >= -0xffff) {
+ add(ARMRegisters::S0, base, op2);
+ dataTransfer16(transferType, srcDst, ARMRegisters::S0, offset);
+ return;
+ }
+
+ moveImm(offset, ARMRegisters::S0);
+ add(ARMRegisters::S0, ARMRegisters::S0, op2);
+ halfDtrUpRegister(transferType, srcDst, base, ARMRegisters::S0);
+}
+
+void ARMAssembler::dataTransferFloat(DataTransferTypeFloat transferType, FPRegisterID srcDst, RegisterID base, int32_t offset)
+{
+ // VFP cannot directly access memory that is not four-byte-aligned
+ if (!(offset & 0x3)) {
+ if (offset <= 0x3ff && offset >= 0) {
+ doubleDtrUp(transferType, srcDst, base, offset >> 2);
+ return;
+ }
+ if (offset <= 0x3ffff && offset >= 0) {
+ add(ARMRegisters::S0, base, Op2Immediate | (offset >> 10) | (11 << 8));
+ doubleDtrUp(transferType, srcDst, ARMRegisters::S0, (offset >> 2) & 0xff);
+ return;
+ }
+ offset = -offset;
+
+ if (offset <= 0x3ff && offset >= 0) {
+ doubleDtrDown(transferType, srcDst, base, offset >> 2);
+ return;
+ }
+ if (offset <= 0x3ffff && offset >= 0) {
+ sub(ARMRegisters::S0, base, Op2Immediate | (offset >> 10) | (11 << 8));
+ doubleDtrDown(transferType, srcDst, ARMRegisters::S0, (offset >> 2) & 0xff);
+ return;
+ }
+ offset = -offset;
+ }
+
+ moveImm(offset, ARMRegisters::S0);
+ add(ARMRegisters::S0, ARMRegisters::S0, base);
+ doubleDtrUp(transferType, srcDst, ARMRegisters::S0, 0);
+}
+
+void ARMAssembler::baseIndexTransferFloat(DataTransferTypeFloat transferType, FPRegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset)
+{
+ add(ARMRegisters::S1, base, lsl(index, scale));
+ dataTransferFloat(transferType, srcDst, ARMRegisters::S1, offset);
+}
+
+void ARMAssembler::prepareExecutableCopy(void* to)
+{
+ // 64-bit alignment is required for next constant pool and JIT code as well
+ m_buffer.flushWithoutBarrier(true);
+ if (!m_buffer.isAligned(8))
+ bkpt(0);
+
+ char* data = reinterpret_cast<char*>(m_buffer.data());
+ ptrdiff_t delta = reinterpret_cast<char*>(to) - data;
+
+ for (Jumps::Iterator iter = m_jumps.begin(); iter != m_jumps.end(); ++iter) {
+ // The last bit is set if the constant must be placed on constant pool.
+ int pos = (iter->m_offset) & (~0x1);
+ ARMWord* ldrAddr = reinterpret_cast_ptr<ARMWord*>(data + pos);
+ ARMWord* addr = getLdrImmAddress(ldrAddr);
+ if (*addr != InvalidBranchTarget) {
+ if (!(iter->m_offset & 1)) {
+ intptr_t difference = reinterpret_cast_ptr<ARMWord*>(data + *addr) - (ldrAddr + DefaultPrefetchOffset);
+
+ if ((difference <= MaximumBranchOffsetDistance && difference >= MinimumBranchOffsetDistance)) {
+ *ldrAddr = B | getConditionalField(*ldrAddr) | (difference & BranchOffsetMask);
+ continue;
+ }
+ }
+ *addr = reinterpret_cast<ARMWord>(data + delta + *addr);
+ }
+ }
+}
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
diff --git a/Source/JavaScriptCore/assembler/ARMAssembler.h b/Source/JavaScriptCore/assembler/ARMAssembler.h
new file mode 100644
index 000000000..b314ea690
--- /dev/null
+++ b/Source/JavaScriptCore/assembler/ARMAssembler.h
@@ -0,0 +1,1187 @@
+/*
+ * Copyright (C) 2009, 2010 University of Szeged
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ARMAssembler_h
+#define ARMAssembler_h
+
+#if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
+
+#include "AssemblerBufferWithConstantPool.h"
+#include "JITCompilationEffort.h"
+#include <wtf/Assertions.h>
+namespace JSC {
+
+ typedef uint32_t ARMWord;
+
+ #define FOR_EACH_CPU_REGISTER(V) \
+ FOR_EACH_CPU_GPREGISTER(V) \
+ FOR_EACH_CPU_SPECIAL_REGISTER(V) \
+ FOR_EACH_CPU_FPREGISTER(V)
+
+ #define FOR_EACH_CPU_GPREGISTER(V) \
+ V(void*, r0) \
+ V(void*, r1) \
+ V(void*, r2) \
+ V(void*, r3) \
+ V(void*, r4) \
+ V(void*, r5) \
+ V(void*, r6) \
+ V(void*, r7) \
+ V(void*, r8) \
+ V(void*, r9) \
+ V(void*, r10) \
+ V(void*, fp) \
+ V(void*, ip) \
+ V(void*, sp) \
+ V(void*, lr) \
+ V(void*, pc) \
+
+ #define FOR_EACH_CPU_SPECIAL_REGISTER(V) \
+ V(void*, apsr) \
+ V(void*, fpscr) \
+
+ #define FOR_EACH_CPU_FPREGISTER(V) \
+ V(double, d0) \
+ V(double, d1) \
+ V(double, d2) \
+ V(double, d3) \
+ V(double, d4) \
+ V(double, d5) \
+ V(double, d6) \
+ V(double, d7) \
+ V(double, d8) \
+ V(double, d9) \
+ V(double, d10) \
+ V(double, d11) \
+ V(double, d12) \
+ V(double, d13) \
+ V(double, d14) \
+ V(double, d15) \
+ V(double, d16) \
+ V(double, d17) \
+ V(double, d18) \
+ V(double, d19) \
+ V(double, d20) \
+ V(double, d21) \
+ V(double, d22) \
+ V(double, d23) \
+ V(double, d24) \
+ V(double, d25) \
+ V(double, d26) \
+ V(double, d27) \
+ V(double, d28) \
+ V(double, d29) \
+ V(double, d30) \
+ V(double, d31) \
+
+ namespace ARMRegisters {
+
+ typedef enum {
+ #define DECLARE_REGISTER(_type, _regName) _regName,
+ FOR_EACH_CPU_GPREGISTER(DECLARE_REGISTER)
+ #undef DECLARE_REGISTER
+
+ // Pseudonyms for some of the registers.
+ S0 = r6,
+ r11 = fp, // frame pointer
+ r12 = ip, S1 = ip,
+ r13 = sp,
+ r14 = lr,
+ r15 = pc
+ } RegisterID;
+
+ typedef enum {
+ #define DECLARE_REGISTER(_type, _regName) _regName,
+ FOR_EACH_CPU_FPREGISTER(DECLARE_REGISTER)
+ #undef DECLARE_REGISTER
+
+ // Pseudonyms for some of the registers.
+ SD0 = d7, /* Same as thumb assembler. */
+ } FPRegisterID;
+
+ } // namespace ARMRegisters
+
+ class ARMAssembler {
+ public:
+ typedef ARMRegisters::RegisterID RegisterID;
+ typedef ARMRegisters::FPRegisterID FPRegisterID;
+ typedef AssemblerBufferWithConstantPool<2048, 4, 4, ARMAssembler> ARMBuffer;
+ typedef SegmentedVector<AssemblerLabel, 64> Jumps;
+
+ ARMAssembler()
+ : m_indexOfTailOfLastWatchpoint(1)
+ {
+ }
+
+ ARMBuffer& buffer() { return m_buffer; }
+
+ static RegisterID firstRegister() { return ARMRegisters::r0; }
+ static RegisterID lastRegister() { return ARMRegisters::r15; }
+
+ static FPRegisterID firstFPRegister() { return ARMRegisters::d0; }
+ static FPRegisterID lastFPRegister() { return ARMRegisters::d31; }
+
+ // ARM conditional constants
+ typedef enum {
+ EQ = 0x00000000, // Zero / Equal.
+ NE = 0x10000000, // Non-zero / Not equal.
+ CS = 0x20000000, // Unsigned higher or same.
+ CC = 0x30000000, // Unsigned lower.
+ MI = 0x40000000, // Negative.
+ PL = 0x50000000, // Positive or zero.
+ VS = 0x60000000, // Overflowed.
+ VC = 0x70000000, // Not overflowed.
+ HI = 0x80000000, // Unsigned higher.
+ LS = 0x90000000, // Unsigned lower or same.
+ GE = 0xa0000000, // Signed greater than or equal.
+ LT = 0xb0000000, // Signed less than.
+ GT = 0xc0000000, // Signed greater than.
+ LE = 0xd0000000, // Signed less than or equal.
+ AL = 0xe0000000 // Unconditional / Always execute.
+ } Condition;
+
+ // ARM instruction constants
+ enum {
+ AND = (0x0 << 21),
+ EOR = (0x1 << 21),
+ SUB = (0x2 << 21),
+ RSB = (0x3 << 21),
+ ADD = (0x4 << 21),
+ ADC = (0x5 << 21),
+ SBC = (0x6 << 21),
+ RSC = (0x7 << 21),
+ TST = (0x8 << 21),
+ TEQ = (0x9 << 21),
+ CMP = (0xa << 21),
+ CMN = (0xb << 21),
+ ORR = (0xc << 21),
+ MOV = (0xd << 21),
+ BIC = (0xe << 21),
+ MVN = (0xf << 21),
+ MUL = 0x00000090,
+ MULL = 0x00c00090,
+ VMOV_F64 = 0x0eb00b40,
+ VADD_F64 = 0x0e300b00,
+ VDIV_F64 = 0x0e800b00,
+ VSUB_F64 = 0x0e300b40,
+ VMUL_F64 = 0x0e200b00,
+ VCMP_F64 = 0x0eb40b40,
+ VSQRT_F64 = 0x0eb10bc0,
+ VABS_F64 = 0x0eb00bc0,
+ VNEG_F64 = 0x0eb10b40,
+ STMDB = 0x09200000,
+ LDMIA = 0x08b00000,
+ B = 0x0a000000,
+ BL = 0x0b000000,
+ BX = 0x012fff10,
+ VMOV_VFP64 = 0x0c400a10,
+ VMOV_ARM64 = 0x0c500a10,
+ VMOV_VFP32 = 0x0e000a10,
+ VMOV_ARM32 = 0x0e100a10,
+ VCVT_F64_S32 = 0x0eb80bc0,
+ VCVT_S32_F64 = 0x0ebd0bc0,
+ VCVT_U32_F64 = 0x0ebc0bc0,
+ VCVT_F32_F64 = 0x0eb70bc0,
+ VCVT_F64_F32 = 0x0eb70ac0,
+ VMRS_APSR = 0x0ef1fa10,
+ CLZ = 0x016f0f10,
+ BKPT = 0xe1200070,
+ BLX = 0x012fff30,
+#if WTF_ARM_ARCH_AT_LEAST(7)
+ MOVW = 0x03000000,
+ MOVT = 0x03400000,
+#endif
+ NOP = 0xe1a00000,
+ DMB_SY = 0xf57ff05f,
+#if HAVE(ARM_IDIV_INSTRUCTIONS)
+ SDIV = 0x0710f010,
+ UDIV = 0x0730f010,
+#endif
+ };
+
+ enum {
+ Op2Immediate = (1 << 25),
+ ImmediateForHalfWordTransfer = (1 << 22),
+ Op2InvertedImmediate = (1 << 26),
+ SetConditionalCodes = (1 << 20),
+ Op2IsRegisterArgument = (1 << 25),
+ // Data transfer flags.
+ DataTransferUp = (1 << 23),
+ DataTransferWriteBack = (1 << 21),
+ DataTransferPostUpdate = (1 << 24),
+ DataTransferLoad = (1 << 20),
+ ByteDataTransfer = (1 << 22),
+ };
+
+ enum DataTransferTypeA {
+ LoadUint32 = 0x05000000 | DataTransferLoad,
+ LoadUint8 = 0x05400000 | DataTransferLoad,
+ StoreUint32 = 0x05000000,
+ StoreUint8 = 0x05400000,
+ };
+
+ enum DataTransferTypeB {
+ LoadUint16 = 0x010000b0 | DataTransferLoad,
+ LoadInt16 = 0x010000f0 | DataTransferLoad,
+ LoadInt8 = 0x010000d0 | DataTransferLoad,
+ StoreUint16 = 0x010000b0,
+ };
+
+ enum DataTransferTypeFloat {
+ LoadFloat = 0x0d000a00 | DataTransferLoad,
+ LoadDouble = 0x0d000b00 | DataTransferLoad,
+ StoreFloat = 0x0d000a00,
+ StoreDouble = 0x0d000b00,
+ };
+
+ // Masks of ARM instructions
+ enum {
+ BranchOffsetMask = 0x00ffffff,
+ ConditionalFieldMask = 0xf0000000,
+ DataTransferOffsetMask = 0xfff,
+ };
+
+ enum {
+ MinimumBranchOffsetDistance = -0x00800000,
+ MaximumBranchOffsetDistance = 0x007fffff,
+ };
+
+ enum {
+ padForAlign8 = 0x00,
+ padForAlign16 = 0x0000,
+ padForAlign32 = 0xe12fff7f // 'bkpt 0xffff' instruction.
+ };
+
+ static const ARMWord InvalidImmediate = 0xf0000000;
+ static const ARMWord InvalidBranchTarget = 0xffffffff;
+ static const int DefaultPrefetchOffset = 2;
+
+ static const ARMWord BlxInstructionMask = 0x012fff30;
+ static const ARMWord LdrOrAddInstructionMask = 0x0ff00000;
+ static const ARMWord LdrPcImmediateInstructionMask = 0x0f7f0000;
+
+ static const ARMWord AddImmediateInstruction = 0x02800000;
+ static const ARMWord BlxInstruction = 0x012fff30;
+ static const ARMWord LdrImmediateInstruction = 0x05900000;
+ static const ARMWord LdrPcImmediateInstruction = 0x051f0000;
+
+ // Instruction formating
+
+ void emitInstruction(ARMWord op, int rd, int rn, ARMWord op2)
+ {
+ ASSERT(((op2 & ~Op2Immediate) <= 0xfff) || (((op2 & ~ImmediateForHalfWordTransfer) <= 0xfff)));
+ m_buffer.putInt(op | RN(rn) | RD(rd) | op2);
+ }
+
+ void emitDoublePrecisionInstruction(ARMWord op, int dd, int dn, int dm)
+ {
+ ASSERT((dd >= 0 && dd <= 31) && (dn >= 0 && dn <= 31) && (dm >= 0 && dm <= 31));
+ m_buffer.putInt(op | ((dd & 0xf) << 12) | ((dd & 0x10) << (22 - 4))
+ | ((dn & 0xf) << 16) | ((dn & 0x10) << (7 - 4))
+ | (dm & 0xf) | ((dm & 0x10) << (5 - 4)));
+ }
+
+ void emitSinglePrecisionInstruction(ARMWord op, int sd, int sn, int sm)
+ {
+ ASSERT((sd >= 0 && sd <= 31) && (sn >= 0 && sn <= 31) && (sm >= 0 && sm <= 31));
+ m_buffer.putInt(op | ((sd >> 1) << 12) | ((sd & 0x1) << 22)
+ | ((sn >> 1) << 16) | ((sn & 0x1) << 7)
+ | (sm >> 1) | ((sm & 0x1) << 5));
+ }
+
+ void bitAnd(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | AND, rd, rn, op2);
+ }
+
+ void bitAnds(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | AND | SetConditionalCodes, rd, rn, op2);
+ }
+
+ void eor(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | EOR, rd, rn, op2);
+ }
+
+ void eors(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | EOR | SetConditionalCodes, rd, rn, op2);
+ }
+
+ void sub(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | SUB, rd, rn, op2);
+ }
+
+ void subs(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | SUB | SetConditionalCodes, rd, rn, op2);
+ }
+
+ void rsb(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | RSB, rd, rn, op2);
+ }
+
+ void rsbs(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | RSB | SetConditionalCodes, rd, rn, op2);
+ }
+
+ void add(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | ADD, rd, rn, op2);
+ }
+
+ void adds(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | ADD | SetConditionalCodes, rd, rn, op2);
+ }
+
+ void adc(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | ADC, rd, rn, op2);
+ }
+
+ void adcs(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | ADC | SetConditionalCodes, rd, rn, op2);
+ }
+
+ void sbc(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | SBC, rd, rn, op2);
+ }
+
+ void sbcs(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | SBC | SetConditionalCodes, rd, rn, op2);
+ }
+
+ void rsc(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | RSC, rd, rn, op2);
+ }
+
+ void rscs(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | RSC | SetConditionalCodes, rd, rn, op2);
+ }
+
+ void tst(int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | TST | SetConditionalCodes, 0, rn, op2);
+ }
+
+ void teq(int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | TEQ | SetConditionalCodes, 0, rn, op2);
+ }
+
+ void cmp(int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | CMP | SetConditionalCodes, 0, rn, op2);
+ }
+
+ void cmn(int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | CMN | SetConditionalCodes, 0, rn, op2);
+ }
+
+ void orr(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | ORR, rd, rn, op2);
+ }
+
+ void orrs(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | ORR | SetConditionalCodes, rd, rn, op2);
+ }
+
+ void mov(int rd, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | MOV, rd, ARMRegisters::r0, op2);
+ }
+
+#if WTF_ARM_ARCH_AT_LEAST(7)
+ void movw(int rd, ARMWord op2, Condition cc = AL)
+ {
+ ASSERT((op2 | 0xf0fff) == 0xf0fff);
+ m_buffer.putInt(toARMWord(cc) | MOVW | RD(rd) | op2);
+ }
+
+ void movt(int rd, ARMWord op2, Condition cc = AL)
+ {
+ ASSERT((op2 | 0xf0fff) == 0xf0fff);
+ m_buffer.putInt(toARMWord(cc) | MOVT | RD(rd) | op2);
+ }
+#endif
+
+ void movs(int rd, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | MOV | SetConditionalCodes, rd, ARMRegisters::r0, op2);
+ }
+
+ void bic(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | BIC, rd, rn, op2);
+ }
+
+ void bics(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | BIC | SetConditionalCodes, rd, rn, op2);
+ }
+
+ void mvn(int rd, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | MVN, rd, ARMRegisters::r0, op2);
+ }
+
+ void mvns(int rd, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | MVN | SetConditionalCodes, rd, ARMRegisters::r0, op2);
+ }
+
+ void mul(int rd, int rn, int rm, Condition cc = AL)
+ {
+ m_buffer.putInt(toARMWord(cc) | MUL | RN(rd) | RS(rn) | RM(rm));
+ }
+
+ void muls(int rd, int rn, int rm, Condition cc = AL)
+ {
+ m_buffer.putInt(toARMWord(cc) | MUL | SetConditionalCodes | RN(rd) | RS(rn) | RM(rm));
+ }
+
+ void mull(int rdhi, int rdlo, int rn, int rm, Condition cc = AL)
+ {
+ m_buffer.putInt(toARMWord(cc) | MULL | RN(rdhi) | RD(rdlo) | RS(rn) | RM(rm));
+ }
+
+#if HAVE(ARM_IDIV_INSTRUCTIONS)
+ template<int datasize>
+ void sdiv(int rd, int rn, int rm, Condition cc = AL)
+ {
+ static_assert(datasize == 32, "sdiv datasize must be 32 for armv7s");
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(rm != ARMRegisters::pc);
+ m_buffer.putInt(toARMWord(cc) | SDIV | RN(rd) | RM(rn) | RS(rm));
+ }
+
+ void udiv(int rd, int rn, int rm, Condition cc = AL)
+ {
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(rm != ARMRegisters::pc);
+ m_buffer.putInt(toARMWord(cc) | UDIV | RN(rd) | RM(rn) | RS(rm));
+ }
+#endif
+
+ void vmov_f64(int dd, int dm, Condition cc = AL)
+ {
+ emitDoublePrecisionInstruction(toARMWord(cc) | VMOV_F64, dd, 0, dm);
+ }
+
+ void vadd_f64(int dd, int dn, int dm, Condition cc = AL)
+ {
+ emitDoublePrecisionInstruction(toARMWord(cc) | VADD_F64, dd, dn, dm);
+ }
+
+ void vdiv_f64(int dd, int dn, int dm, Condition cc = AL)
+ {
+ emitDoublePrecisionInstruction(toARMWord(cc) | VDIV_F64, dd, dn, dm);
+ }
+
+ void vsub_f64(int dd, int dn, int dm, Condition cc = AL)
+ {
+ emitDoublePrecisionInstruction(toARMWord(cc) | VSUB_F64, dd, dn, dm);
+ }
+
+ void vmul_f64(int dd, int dn, int dm, Condition cc = AL)
+ {
+ emitDoublePrecisionInstruction(toARMWord(cc) | VMUL_F64, dd, dn, dm);
+ }
+
+ void vcmp_f64(int dd, int dm, Condition cc = AL)
+ {
+ emitDoublePrecisionInstruction(toARMWord(cc) | VCMP_F64, dd, 0, dm);
+ }
+
+ void vsqrt_f64(int dd, int dm, Condition cc = AL)
+ {
+ emitDoublePrecisionInstruction(toARMWord(cc) | VSQRT_F64, dd, 0, dm);
+ }
+
+ void vabs_f64(int dd, int dm, Condition cc = AL)
+ {
+ emitDoublePrecisionInstruction(toARMWord(cc) | VABS_F64, dd, 0, dm);
+ }
+
+ void vneg_f64(int dd, int dm, Condition cc = AL)
+ {
+ emitDoublePrecisionInstruction(toARMWord(cc) | VNEG_F64, dd, 0, dm);
+ }
+
+ void ldrImmediate(int rd, ARMWord imm, Condition cc = AL)
+ {
+ m_buffer.putIntWithConstantInt(toARMWord(cc) | LoadUint32 | DataTransferUp | RN(ARMRegisters::pc) | RD(rd), imm, true);
+ }
+
+ void ldrUniqueImmediate(int rd, ARMWord imm, Condition cc = AL)
+ {
+ m_buffer.putIntWithConstantInt(toARMWord(cc) | LoadUint32 | DataTransferUp | RN(ARMRegisters::pc) | RD(rd), imm);
+ }
+
+ void dtrUp(DataTransferTypeA transferType, int rd, int rb, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | transferType | DataTransferUp, rd, rb, op2);
+ }
+
+ void dtrUpRegister(DataTransferTypeA transferType, int rd, int rb, int rm, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | transferType | DataTransferUp | Op2IsRegisterArgument, rd, rb, rm);
+ }
+
+ void dtrDown(DataTransferTypeA transferType, int rd, int rb, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | transferType, rd, rb, op2);
+ }
+
+ void dtrDownRegister(DataTransferTypeA transferType, int rd, int rb, int rm, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | transferType | Op2IsRegisterArgument, rd, rb, rm);
+ }
+
+ void halfDtrUp(DataTransferTypeB transferType, int rd, int rb, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | transferType | DataTransferUp, rd, rb, op2);
+ }
+
+ void halfDtrUpRegister(DataTransferTypeB transferType, int rd, int rn, int rm, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | transferType | DataTransferUp, rd, rn, rm);
+ }
+
+ void halfDtrDown(DataTransferTypeB transferType, int rd, int rb, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | transferType, rd, rb, op2);
+ }
+
+ void halfDtrDownRegister(DataTransferTypeB transferType, int rd, int rn, int rm, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | transferType, rd, rn, rm);
+ }
+
+ void doubleDtrUp(DataTransferTypeFloat type, int rd, int rb, ARMWord op2, Condition cc = AL)
+ {
+ ASSERT(op2 <= 0xff && rd <= 15);
+ /* Only d0-d15 and s0, s2, s4 ... s30 are supported. */
+ m_buffer.putInt(toARMWord(cc) | DataTransferUp | type | (rd << 12) | RN(rb) | op2);
+ }
+
+ void doubleDtrDown(DataTransferTypeFloat type, int rd, int rb, ARMWord op2, Condition cc = AL)
+ {
+ ASSERT(op2 <= 0xff && rd <= 15);
+ /* Only d0-d15 and s0, s2, s4 ... s30 are supported. */
+ m_buffer.putInt(toARMWord(cc) | type | (rd << 12) | RN(rb) | op2);
+ }
+
+ void push(int reg, Condition cc = AL)
+ {
+ ASSERT(ARMWord(reg) <= 0xf);
+ m_buffer.putInt(toARMWord(cc) | StoreUint32 | DataTransferWriteBack | RN(ARMRegisters::sp) | RD(reg) | 0x4);
+ }
+
+ void pop(int reg, Condition cc = AL)
+ {
+ ASSERT(ARMWord(reg) <= 0xf);
+ m_buffer.putInt(toARMWord(cc) | (LoadUint32 ^ DataTransferPostUpdate) | DataTransferUp | RN(ARMRegisters::sp) | RD(reg) | 0x4);
+ }
+
+ inline void poke(int reg, Condition cc = AL)
+ {
+ dtrDown(StoreUint32, ARMRegisters::sp, 0, reg, cc);
+ }
+
+ inline void peek(int reg, Condition cc = AL)
+ {
+ dtrUp(LoadUint32, reg, ARMRegisters::sp, 0, cc);
+ }
+
+ void vmov_vfp64(int sm, int rt, int rt2, Condition cc = AL)
+ {
+ ASSERT(rt != rt2);
+ m_buffer.putInt(toARMWord(cc) | VMOV_VFP64 | RN(rt2) | RD(rt) | (sm & 0xf) | ((sm & 0x10) << (5 - 4)));
+ }
+
+ void vmov_arm64(int rt, int rt2, int sm, Condition cc = AL)
+ {
+ ASSERT(rt != rt2);
+ m_buffer.putInt(toARMWord(cc) | VMOV_ARM64 | RN(rt2) | RD(rt) | (sm & 0xf) | ((sm & 0x10) << (5 - 4)));
+ }
+
+ void vmov_vfp32(int sn, int rt, Condition cc = AL)
+ {
+ ASSERT(rt <= 15);
+ emitSinglePrecisionInstruction(toARMWord(cc) | VMOV_VFP32, rt << 1, sn, 0);
+ }
+
+ void vmov_arm32(int rt, int sn, Condition cc = AL)
+ {
+ ASSERT(rt <= 15);
+ emitSinglePrecisionInstruction(toARMWord(cc) | VMOV_ARM32, rt << 1, sn, 0);
+ }
+
+ void vcvt_f64_s32(int dd, int sm, Condition cc = AL)
+ {
+ ASSERT(!(sm & 0x1)); // sm must be divisible by 2
+ emitDoublePrecisionInstruction(toARMWord(cc) | VCVT_F64_S32, dd, 0, (sm >> 1));
+ }
+
+ void vcvt_s32_f64(int sd, int dm, Condition cc = AL)
+ {
+ ASSERT(!(sd & 0x1)); // sd must be divisible by 2
+ emitDoublePrecisionInstruction(toARMWord(cc) | VCVT_S32_F64, (sd >> 1), 0, dm);
+ }
+
+ void vcvt_u32_f64(int sd, int dm, Condition cc = AL)
+ {
+ ASSERT(!(sd & 0x1)); // sd must be divisible by 2
+ emitDoublePrecisionInstruction(toARMWord(cc) | VCVT_U32_F64, (sd >> 1), 0, dm);
+ }
+
+ void vcvt_f64_f32(int dd, int sm, Condition cc = AL)
+ {
+ ASSERT(dd <= 15 && sm <= 15);
+ emitDoublePrecisionInstruction(toARMWord(cc) | VCVT_F64_F32, dd, 0, sm);
+ }
+
+ void vcvt_f32_f64(int dd, int sm, Condition cc = AL)
+ {
+ ASSERT(dd <= 15 && sm <= 15);
+ emitDoublePrecisionInstruction(toARMWord(cc) | VCVT_F32_F64, dd, 0, sm);
+ }
+
+ void vmrs_apsr(Condition cc = AL)
+ {
+ m_buffer.putInt(toARMWord(cc) | VMRS_APSR);
+ }
+
+ void clz(int rd, int rm, Condition cc = AL)
+ {
+ m_buffer.putInt(toARMWord(cc) | CLZ | RD(rd) | RM(rm));
+ }
+
+ void bkpt(ARMWord value)
+ {
+ m_buffer.putInt(BKPT | ((value & 0xff0) << 4) | (value & 0xf));
+ }
+
+ void nop()
+ {
+ m_buffer.putInt(NOP);
+ }
+
+ void dmbSY()
+ {
+ m_buffer.putInt(DMB_SY);
+ }
+
+ void bx(int rm, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | BX, 0, 0, RM(rm));
+ }
+
+ AssemblerLabel blx(int rm, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | BLX, 0, 0, RM(rm));
+ return m_buffer.label();
+ }
+
+ static ARMWord lsl(int reg, ARMWord value)
+ {
+ ASSERT(reg <= ARMRegisters::pc);
+ ASSERT(value <= 0x1f);
+ return reg | (value << 7) | 0x00;
+ }
+
+ static ARMWord lsr(int reg, ARMWord value)
+ {
+ ASSERT(reg <= ARMRegisters::pc);
+ ASSERT(value <= 0x1f);
+ return reg | (value << 7) | 0x20;
+ }
+
+ static ARMWord asr(int reg, ARMWord value)
+ {
+ ASSERT(reg <= ARMRegisters::pc);
+ ASSERT(value <= 0x1f);
+ return reg | (value << 7) | 0x40;
+ }
+
+ static ARMWord lslRegister(int reg, int shiftReg)
+ {
+ ASSERT(reg <= ARMRegisters::pc);
+ ASSERT(shiftReg <= ARMRegisters::pc);
+ return reg | (shiftReg << 8) | 0x10;
+ }
+
+ static ARMWord lsrRegister(int reg, int shiftReg)
+ {
+ ASSERT(reg <= ARMRegisters::pc);
+ ASSERT(shiftReg <= ARMRegisters::pc);
+ return reg | (shiftReg << 8) | 0x30;
+ }
+
+ static ARMWord asrRegister(int reg, int shiftReg)
+ {
+ ASSERT(reg <= ARMRegisters::pc);
+ ASSERT(shiftReg <= ARMRegisters::pc);
+ return reg | (shiftReg << 8) | 0x50;
+ }
+
+ // General helpers
+
+ size_t codeSize() const
+ {
+ return m_buffer.codeSize();
+ }
+
+ void ensureSpace(int insnSpace, int constSpace)
+ {
+ m_buffer.ensureSpace(insnSpace, constSpace);
+ }
+
+ int sizeOfConstantPool()
+ {
+ return m_buffer.sizeOfConstantPool();
+ }
+
+ AssemblerLabel labelIgnoringWatchpoints()
+ {
+ m_buffer.ensureSpaceForAnyInstruction();
+ return m_buffer.label();
+ }
+
+ AssemblerLabel labelForWatchpoint()
+ {
+ m_buffer.ensureSpaceForAnyInstruction(maxJumpReplacementSize() / sizeof(ARMWord));
+ AssemblerLabel result = m_buffer.label();
+ if (result.m_offset != (m_indexOfTailOfLastWatchpoint - maxJumpReplacementSize()))
+ result = label();
+ m_indexOfTailOfLastWatchpoint = result.m_offset + maxJumpReplacementSize();
+ return label();
+ }
+
+ AssemblerLabel label()
+ {
+ AssemblerLabel result = labelIgnoringWatchpoints();
+ while (result.m_offset + 1 < m_indexOfTailOfLastWatchpoint) {
+ nop();
+ // The available number of instructions are ensured by labelForWatchpoint.
+ result = m_buffer.label();
+ }
+ return result;
+ }
+
+ AssemblerLabel align(int alignment)
+ {
+ while (!m_buffer.isAligned(alignment))
+ mov(ARMRegisters::r0, ARMRegisters::r0);
+
+ return label();
+ }
+
+ AssemblerLabel loadBranchTarget(int rd, Condition cc = AL, int useConstantPool = 0)
+ {
+ ensureSpace(sizeof(ARMWord), sizeof(ARMWord));
+ m_jumps.append(m_buffer.codeSize() | (useConstantPool & 0x1));
+ ldrUniqueImmediate(rd, InvalidBranchTarget, cc);
+ return m_buffer.label();
+ }
+
+ AssemblerLabel jmp(Condition cc = AL, int useConstantPool = 0)
+ {
+ return loadBranchTarget(ARMRegisters::pc, cc, useConstantPool);
+ }
+
+ void prepareExecutableCopy(void* to);
+
+ unsigned debugOffset() { return m_buffer.debugOffset(); }
+
+ // DFG assembly helpers for moving data between fp and registers.
+ void vmov(RegisterID rd1, RegisterID rd2, FPRegisterID rn)
+ {
+ vmov_arm64(rd1, rd2, rn);
+ }
+
+ void vmov(FPRegisterID rd, RegisterID rn1, RegisterID rn2)
+ {
+ vmov_vfp64(rd, rn1, rn2);
+ }
+
+ // Patching helpers
+
+ static ARMWord* getLdrImmAddress(ARMWord* insn)
+ {
+ // Check for call
+ if ((*insn & LdrPcImmediateInstructionMask) != LdrPcImmediateInstruction) {
+ // Must be BLX
+ ASSERT((*insn & BlxInstructionMask) == BlxInstruction);
+ insn--;
+ }
+
+ // Must be an ldr ..., [pc +/- imm]
+ ASSERT((*insn & LdrPcImmediateInstructionMask) == LdrPcImmediateInstruction);
+
+ ARMWord addr = reinterpret_cast<ARMWord>(insn) + DefaultPrefetchOffset * sizeof(ARMWord);
+ if (*insn & DataTransferUp)
+ return reinterpret_cast<ARMWord*>(addr + (*insn & DataTransferOffsetMask));
+ return reinterpret_cast<ARMWord*>(addr - (*insn & DataTransferOffsetMask));
+ }
+
+ static ARMWord* getLdrImmAddressOnPool(ARMWord* insn, uint32_t* constPool)
+ {
+ // Must be an ldr ..., [pc +/- imm]
+ ASSERT((*insn & LdrPcImmediateInstructionMask) == LdrPcImmediateInstruction);
+
+ if (*insn & 0x1)
+ return reinterpret_cast<ARMWord*>(constPool + ((*insn & DataTransferOffsetMask) >> 1));
+ return getLdrImmAddress(insn);
+ }
+
+ static void patchPointerInternal(intptr_t from, void* to)
+ {
+ ARMWord* insn = reinterpret_cast<ARMWord*>(from);
+ ARMWord* addr = getLdrImmAddress(insn);
+ *addr = reinterpret_cast<ARMWord>(to);
+ }
+
+ static ARMWord patchConstantPoolLoad(ARMWord load, ARMWord value)
+ {
+ value = (value << 1) + 1;
+ ASSERT(!(value & ~DataTransferOffsetMask));
+ return (load & ~DataTransferOffsetMask) | value;
+ }
+
+ static void patchConstantPoolLoad(void* loadAddr, void* constPoolAddr);
+
+ // Read pointers
+ static void* readPointer(void* from)
+ {
+ ARMWord* instruction = reinterpret_cast<ARMWord*>(from);
+ ARMWord* address = getLdrImmAddress(instruction);
+ return *reinterpret_cast<void**>(address);
+ }
+
+ // Patch pointers
+
+ static void linkPointer(void* code, AssemblerLabel from, void* to)
+ {
+ patchPointerInternal(reinterpret_cast<intptr_t>(code) + from.m_offset, to);
+ }
+
+ static void repatchInt32(void* where, int32_t to)
+ {
+ patchPointerInternal(reinterpret_cast<intptr_t>(where), reinterpret_cast<void*>(to));
+ }
+
+ static void repatchCompact(void* where, int32_t value)
+ {
+ ARMWord* instruction = reinterpret_cast<ARMWord*>(where);
+ ASSERT((*instruction & 0x0f700000) == LoadUint32);
+ if (value >= 0)
+ *instruction = (*instruction & 0xff7ff000) | DataTransferUp | value;
+ else
+ *instruction = (*instruction & 0xff7ff000) | -value;
+ cacheFlush(instruction, sizeof(ARMWord));
+ }
+
+ static void repatchPointer(void* from, void* to)
+ {
+ patchPointerInternal(reinterpret_cast<intptr_t>(from), to);
+ }
+
+ // Linkers
+ static intptr_t getAbsoluteJumpAddress(void* base, int offset = 0)
+ {
+ return reinterpret_cast<intptr_t>(base) + offset - sizeof(ARMWord);
+ }
+
+ void linkJump(AssemblerLabel from, AssemblerLabel to)
+ {
+ ARMWord* insn = reinterpret_cast<ARMWord*>(getAbsoluteJumpAddress(m_buffer.data(), from.m_offset));
+ ARMWord* addr = getLdrImmAddressOnPool(insn, m_buffer.poolAddress());
+ *addr = toARMWord(to.m_offset);
+ }
+
+ static void linkJump(void* code, AssemblerLabel from, void* to)
+ {
+ patchPointerInternal(getAbsoluteJumpAddress(code, from.m_offset), to);
+ }
+
+ static void relinkJump(void* from, void* to)
+ {
+ patchPointerInternal(getAbsoluteJumpAddress(from), to);
+ }
+
+ static void linkCall(void* code, AssemblerLabel from, void* to)
+ {
+ patchPointerInternal(getAbsoluteJumpAddress(code, from.m_offset), to);
+ }
+
+ static void relinkCall(void* from, void* to)
+ {
+ patchPointerInternal(getAbsoluteJumpAddress(from), to);
+ }
+
+ static void* readCallTarget(void* from)
+ {
+ return reinterpret_cast<void*>(readPointer(reinterpret_cast<void*>(getAbsoluteJumpAddress(from))));
+ }
+
+ static void replaceWithJump(void* instructionStart, void* to)
+ {
+ ARMWord* instruction = reinterpret_cast<ARMWord*>(instructionStart);
+ intptr_t difference = reinterpret_cast<intptr_t>(to) - (reinterpret_cast<intptr_t>(instruction) + DefaultPrefetchOffset * sizeof(ARMWord));
+
+ if (!(difference & 1)) {
+ difference >>= 2;
+ if ((difference <= MaximumBranchOffsetDistance && difference >= MinimumBranchOffsetDistance)) {
+ // Direct branch.
+ instruction[0] = B | AL | (difference & BranchOffsetMask);
+ cacheFlush(instruction, sizeof(ARMWord));
+ return;
+ }
+ }
+
+ // Load target.
+ instruction[0] = LoadUint32 | AL | RN(ARMRegisters::pc) | RD(ARMRegisters::pc) | 4;
+ instruction[1] = reinterpret_cast<ARMWord>(to);
+ cacheFlush(instruction, sizeof(ARMWord) * 2);
+ }
+
+ static ptrdiff_t maxJumpReplacementSize()
+ {
+ return sizeof(ARMWord) * 2;
+ }
+
+ static void replaceWithLoad(void* instructionStart)
+ {
+ ARMWord* instruction = reinterpret_cast<ARMWord*>(instructionStart);
+ cacheFlush(instruction, sizeof(ARMWord));
+
+ ASSERT((*instruction & LdrOrAddInstructionMask) == AddImmediateInstruction || (*instruction & LdrOrAddInstructionMask) == LdrImmediateInstruction);
+ if ((*instruction & LdrOrAddInstructionMask) == AddImmediateInstruction) {
+ *instruction = (*instruction & ~LdrOrAddInstructionMask) | LdrImmediateInstruction;
+ cacheFlush(instruction, sizeof(ARMWord));
+ }
+ }
+
+ static void replaceWithAddressComputation(void* instructionStart)
+ {
+ ARMWord* instruction = reinterpret_cast<ARMWord*>(instructionStart);
+ cacheFlush(instruction, sizeof(ARMWord));
+
+ ASSERT((*instruction & LdrOrAddInstructionMask) == AddImmediateInstruction || (*instruction & LdrOrAddInstructionMask) == LdrImmediateInstruction);
+ if ((*instruction & LdrOrAddInstructionMask) == LdrImmediateInstruction) {
+ *instruction = (*instruction & ~LdrOrAddInstructionMask) | AddImmediateInstruction;
+ cacheFlush(instruction, sizeof(ARMWord));
+ }
+ }
+
+ static void revertBranchPtrWithPatch(void* instructionStart, RegisterID rn, ARMWord imm)
+ {
+ ARMWord* instruction = reinterpret_cast<ARMWord*>(instructionStart);
+
+ ASSERT((instruction[2] & LdrPcImmediateInstructionMask) == LdrPcImmediateInstruction);
+ instruction[0] = toARMWord(AL) | ((instruction[2] & 0x0fff0fff) + sizeof(ARMWord)) | RD(ARMRegisters::S1);
+ *getLdrImmAddress(instruction) = imm;
+ instruction[1] = toARMWord(AL) | CMP | SetConditionalCodes | RN(rn) | RM(ARMRegisters::S1);
+ cacheFlush(instruction, 2 * sizeof(ARMWord));
+ }
+
+ // Address operations
+
+ static void* getRelocatedAddress(void* code, AssemblerLabel label)
+ {
+ return reinterpret_cast<void*>(reinterpret_cast<char*>(code) + label.m_offset);
+ }
+
+ // Address differences
+
+ static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b)
+ {
+ return b.m_offset - a.m_offset;
+ }
+
+ static unsigned getCallReturnOffset(AssemblerLabel call)
+ {
+ return call.m_offset;
+ }
+
+ // Handle immediates
+
+ static ARMWord getOp2(ARMWord imm);
+
+ // Fast case if imm is known to be between 0 and 0xff
+ static ARMWord getOp2Byte(ARMWord imm)
+ {
+ ASSERT(imm <= 0xff);
+ return Op2Immediate | imm;
+ }
+
+ static ARMWord getOp2Half(ARMWord imm)
+ {
+ ASSERT(imm <= 0xff);
+ return ImmediateForHalfWordTransfer | (imm & 0x0f) | ((imm & 0xf0) << 4);
+ }
+
+#if WTF_ARM_ARCH_AT_LEAST(7)
+ static ARMWord getImm16Op2(ARMWord imm)
+ {
+ if (imm <= 0xffff)
+ return (imm & 0xf000) << 4 | (imm & 0xfff);
+ return InvalidImmediate;
+ }
+#endif
+ ARMWord getImm(ARMWord imm, int tmpReg, bool invert = false);
+ void moveImm(ARMWord imm, int dest);
+ ARMWord encodeComplexImm(ARMWord imm, int dest);
+
+ // Memory load/store helpers
+
+ void dataTransfer32(DataTransferTypeA, RegisterID srcDst, RegisterID base, int32_t offset);
+ void baseIndexTransfer32(DataTransferTypeA, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset);
+ void dataTransfer16(DataTransferTypeB, RegisterID srcDst, RegisterID base, int32_t offset);
+ void baseIndexTransfer16(DataTransferTypeB, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset);
+ void dataTransferFloat(DataTransferTypeFloat, FPRegisterID srcDst, RegisterID base, int32_t offset);
+ void baseIndexTransferFloat(DataTransferTypeFloat, FPRegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset);
+
+ // Constant pool hnadlers
+
+ static ARMWord placeConstantPoolBarrier(int offset)
+ {
+ offset = (offset - sizeof(ARMWord)) >> 2;
+ ASSERT((offset <= MaximumBranchOffsetDistance && offset >= MinimumBranchOffsetDistance));
+ return AL | B | (offset & BranchOffsetMask);
+ }
+
+#if OS(LINUX) && COMPILER(GCC_OR_CLANG)
+ static inline void linuxPageFlush(uintptr_t begin, uintptr_t end)
+ {
+ asm volatile(
+ "push {r7}\n"
+ "mov r0, %0\n"
+ "mov r1, %1\n"
+ "mov r7, #0xf0000\n"
+ "add r7, r7, #0x2\n"
+ "mov r2, #0x0\n"
+ "svc 0x0\n"
+ "pop {r7}\n"
+ :
+ : "r" (begin), "r" (end)
+ : "r0", "r1", "r2");
+ }
+#endif
+
+ static void cacheFlush(void* code, size_t size)
+ {
+#if OS(LINUX) && COMPILER(GCC_OR_CLANG)
+ size_t page = pageSize();
+ uintptr_t current = reinterpret_cast<uintptr_t>(code);
+ uintptr_t end = current + size;
+ uintptr_t firstPageEnd = (current & ~(page - 1)) + page;
+
+ if (end <= firstPageEnd) {
+ linuxPageFlush(current, end);
+ return;
+ }
+
+ linuxPageFlush(current, firstPageEnd);
+
+ for (current = firstPageEnd; current + page < end; current += page)
+ linuxPageFlush(current, current + page);
+
+ linuxPageFlush(current, end);
+#else
+#error "The cacheFlush support is missing on this platform."
+#endif
+ }
+
+ private:
+ static ARMWord RM(int reg)
+ {
+ ASSERT(reg <= ARMRegisters::pc);
+ return reg;
+ }
+
+ static ARMWord RS(int reg)
+ {
+ ASSERT(reg <= ARMRegisters::pc);
+ return reg << 8;
+ }
+
+ static ARMWord RD(int reg)
+ {
+ ASSERT(reg <= ARMRegisters::pc);
+ return reg << 12;
+ }
+
+ static ARMWord RN(int reg)
+ {
+ ASSERT(reg <= ARMRegisters::pc);
+ return reg << 16;
+ }
+
+ static ARMWord getConditionalField(ARMWord i)
+ {
+ return i & ConditionalFieldMask;
+ }
+
+ static ARMWord toARMWord(Condition cc)
+ {
+ return static_cast<ARMWord>(cc);
+ }
+
+ static ARMWord toARMWord(uint32_t u)
+ {
+ return static_cast<ARMWord>(u);
+ }
+
+ int genInt(int reg, ARMWord imm, bool positive);
+
+ ARMBuffer m_buffer;
+ Jumps m_jumps;
+ uint32_t m_indexOfTailOfLastWatchpoint;
+ };
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
+
+#endif // ARMAssembler_h
diff --git a/Source/JavaScriptCore/assembler/ARMv7Assembler.h b/Source/JavaScriptCore/assembler/ARMv7Assembler.h
new file mode 100644
index 000000000..1d731f98b
--- /dev/null
+++ b/Source/JavaScriptCore/assembler/ARMv7Assembler.h
@@ -0,0 +1,2872 @@
+/*
+ * Copyright (C) 2009, 2010, 2012, 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2010 University of Szeged
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ARMAssembler_h
+#define ARMAssembler_h
+
+#if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
+
+#include "AssemblerBuffer.h"
+#include <limits.h>
+#include <wtf/Assertions.h>
+#include <wtf/Vector.h>
+#include <stdint.h>
+
+namespace JSC {
+
+namespace ARMRegisters {
+
+ #define FOR_EACH_CPU_REGISTER(V) \
+ FOR_EACH_CPU_GPREGISTER(V) \
+ FOR_EACH_CPU_SPECIAL_REGISTER(V) \
+ FOR_EACH_CPU_FPREGISTER(V)
+
+ // The following are defined as pairs of the following value:
+ // 1. type of the storage needed to save the register value by the JIT probe.
+ // 2. name of the register.
+ #define FOR_EACH_CPU_GPREGISTER(V) \
+ V(void*, r0) \
+ V(void*, r1) \
+ V(void*, r2) \
+ V(void*, r3) \
+ V(void*, r4) \
+ V(void*, r5) \
+ V(void*, r6) \
+ V(void*, r7) \
+ V(void*, r8) \
+ V(void*, r9) \
+ V(void*, r10) \
+ V(void*, r11) \
+ V(void*, ip) \
+ V(void*, sp) \
+ V(void*, lr) \
+ V(void*, pc)
+
+ #define FOR_EACH_CPU_SPECIAL_REGISTER(V) \
+ V(void*, apsr) \
+ V(void*, fpscr) \
+
+ #define FOR_EACH_CPU_FPREGISTER(V) \
+ V(double, d0) \
+ V(double, d1) \
+ V(double, d2) \
+ V(double, d3) \
+ V(double, d4) \
+ V(double, d5) \
+ V(double, d6) \
+ V(double, d7) \
+ V(double, d8) \
+ V(double, d9) \
+ V(double, d10) \
+ V(double, d11) \
+ V(double, d12) \
+ V(double, d13) \
+ V(double, d14) \
+ V(double, d15) \
+ V(double, d16) \
+ V(double, d17) \
+ V(double, d18) \
+ V(double, d19) \
+ V(double, d20) \
+ V(double, d21) \
+ V(double, d22) \
+ V(double, d23) \
+ V(double, d24) \
+ V(double, d25) \
+ V(double, d26) \
+ V(double, d27) \
+ V(double, d28) \
+ V(double, d29) \
+ V(double, d30) \
+ V(double, d31)
+
+ typedef enum {
+ #define DECLARE_REGISTER(_type, _regName) _regName,
+ FOR_EACH_CPU_GPREGISTER(DECLARE_REGISTER)
+ #undef DECLARE_REGISTER
+
+ fp = r7, // frame pointer
+ sb = r9, // static base
+ sl = r10, // stack limit
+ r12 = ip,
+ r13 = sp,
+ r14 = lr,
+ r15 = pc
+ } RegisterID;
+
+ typedef enum {
+ s0,
+ s1,
+ s2,
+ s3,
+ s4,
+ s5,
+ s6,
+ s7,
+ s8,
+ s9,
+ s10,
+ s11,
+ s12,
+ s13,
+ s14,
+ s15,
+ s16,
+ s17,
+ s18,
+ s19,
+ s20,
+ s21,
+ s22,
+ s23,
+ s24,
+ s25,
+ s26,
+ s27,
+ s28,
+ s29,
+ s30,
+ s31,
+ } FPSingleRegisterID;
+
+ typedef enum {
+ #define DECLARE_REGISTER(_type, _regName) _regName,
+ FOR_EACH_CPU_FPREGISTER(DECLARE_REGISTER)
+ #undef DECLARE_REGISTER
+ } FPDoubleRegisterID;
+
+ typedef enum {
+ q0,
+ q1,
+ q2,
+ q3,
+ q4,
+ q5,
+ q6,
+ q7,
+ q8,
+ q9,
+ q10,
+ q11,
+ q12,
+ q13,
+ q14,
+ q15,
+ q16,
+ q17,
+ q18,
+ q19,
+ q20,
+ q21,
+ q22,
+ q23,
+ q24,
+ q25,
+ q26,
+ q27,
+ q28,
+ q29,
+ q30,
+ q31,
+ } FPQuadRegisterID;
+
+ inline FPSingleRegisterID asSingle(FPDoubleRegisterID reg)
+ {
+ ASSERT(reg < d16);
+ return (FPSingleRegisterID)(reg << 1);
+ }
+
+ inline FPDoubleRegisterID asDouble(FPSingleRegisterID reg)
+ {
+ ASSERT(!(reg & 1));
+ return (FPDoubleRegisterID)(reg >> 1);
+ }
+
+} // namespace ARMRegisters
+
+class ARMv7Assembler;
+class ARMThumbImmediate {
+ friend class ARMv7Assembler;
+
+ typedef uint8_t ThumbImmediateType;
+ static const ThumbImmediateType TypeInvalid = 0;
+ static const ThumbImmediateType TypeEncoded = 1;
+ static const ThumbImmediateType TypeUInt16 = 2;
+
+ typedef union {
+ int16_t asInt;
+ struct {
+ unsigned imm8 : 8;
+ unsigned imm3 : 3;
+ unsigned i : 1;
+ unsigned imm4 : 4;
+ };
+ // If this is an encoded immediate, then it may describe a shift, or a pattern.
+ struct {
+ unsigned shiftValue7 : 7;
+ unsigned shiftAmount : 5;
+ };
+ struct {
+ unsigned immediate : 8;
+ unsigned pattern : 4;
+ };
+ } ThumbImmediateValue;
+
+ // byte0 contains least significant bit; not using an array to make client code endian agnostic.
+ typedef union {
+ int32_t asInt;
+ struct {
+ uint8_t byte0;
+ uint8_t byte1;
+ uint8_t byte2;
+ uint8_t byte3;
+ };
+ } PatternBytes;
+
+ ALWAYS_INLINE static void countLeadingZerosPartial(uint32_t& value, int32_t& zeros, const int N)
+ {
+ if (value & ~((1 << N) - 1)) /* check for any of the top N bits (of 2N bits) are set */
+ value >>= N; /* if any were set, lose the bottom N */
+ else /* if none of the top N bits are set, */
+ zeros += N; /* then we have identified N leading zeros */
+ }
+
+ static int32_t countLeadingZeros(uint32_t value)
+ {
+ if (!value)
+ return 32;
+
+ int32_t zeros = 0;
+ countLeadingZerosPartial(value, zeros, 16);
+ countLeadingZerosPartial(value, zeros, 8);
+ countLeadingZerosPartial(value, zeros, 4);
+ countLeadingZerosPartial(value, zeros, 2);
+ countLeadingZerosPartial(value, zeros, 1);
+ return zeros;
+ }
+
+ ARMThumbImmediate()
+ : m_type(TypeInvalid)
+ {
+ m_value.asInt = 0;
+ }
+
+ ARMThumbImmediate(ThumbImmediateType type, ThumbImmediateValue value)
+ : m_type(type)
+ , m_value(value)
+ {
+ }
+
+ ARMThumbImmediate(ThumbImmediateType type, uint16_t value)
+ : m_type(TypeUInt16)
+ {
+ // Make sure this constructor is only reached with type TypeUInt16;
+ // this extra parameter makes the code a little clearer by making it
+ // explicit at call sites which type is being constructed
+ ASSERT_UNUSED(type, type == TypeUInt16);
+
+ m_value.asInt = value;
+ }
+
+public:
+ static ARMThumbImmediate makeEncodedImm(uint32_t value)
+ {
+ ThumbImmediateValue encoding;
+ encoding.asInt = 0;
+
+ // okay, these are easy.
+ if (value < 256) {
+ encoding.immediate = value;
+ encoding.pattern = 0;
+ return ARMThumbImmediate(TypeEncoded, encoding);
+ }
+
+ int32_t leadingZeros = countLeadingZeros(value);
+ // if there were 24 or more leading zeros, then we'd have hit the (value < 256) case.
+ ASSERT(leadingZeros < 24);
+
+ // Given a number with bit fields Z:B:C, where count(Z)+count(B)+count(C) == 32,
+ // Z are the bits known zero, B is the 8-bit immediate, C are the bits to check for
+ // zero. count(B) == 8, so the count of bits to be checked is 24 - count(Z).
+ int32_t rightShiftAmount = 24 - leadingZeros;
+ if (value == ((value >> rightShiftAmount) << rightShiftAmount)) {
+ // Shift the value down to the low byte position. The assign to
+ // shiftValue7 drops the implicit top bit.
+ encoding.shiftValue7 = value >> rightShiftAmount;
+ // The endoded shift amount is the magnitude of a right rotate.
+ encoding.shiftAmount = 8 + leadingZeros;
+ return ARMThumbImmediate(TypeEncoded, encoding);
+ }
+
+ PatternBytes bytes;
+ bytes.asInt = value;
+
+ if ((bytes.byte0 == bytes.byte1) && (bytes.byte0 == bytes.byte2) && (bytes.byte0 == bytes.byte3)) {
+ encoding.immediate = bytes.byte0;
+ encoding.pattern = 3;
+ return ARMThumbImmediate(TypeEncoded, encoding);
+ }
+
+ if ((bytes.byte0 == bytes.byte2) && !(bytes.byte1 | bytes.byte3)) {
+ encoding.immediate = bytes.byte0;
+ encoding.pattern = 1;
+ return ARMThumbImmediate(TypeEncoded, encoding);
+ }
+
+ if ((bytes.byte1 == bytes.byte3) && !(bytes.byte0 | bytes.byte2)) {
+ encoding.immediate = bytes.byte1;
+ encoding.pattern = 2;
+ return ARMThumbImmediate(TypeEncoded, encoding);
+ }
+
+ return ARMThumbImmediate();
+ }
+
+ static ARMThumbImmediate makeUInt12(int32_t value)
+ {
+ return (!(value & 0xfffff000))
+ ? ARMThumbImmediate(TypeUInt16, (uint16_t)value)
+ : ARMThumbImmediate();
+ }
+
+ static ARMThumbImmediate makeUInt12OrEncodedImm(int32_t value)
+ {
+ // If this is not a 12-bit unsigned it, try making an encoded immediate.
+ return (!(value & 0xfffff000))
+ ? ARMThumbImmediate(TypeUInt16, (uint16_t)value)
+ : makeEncodedImm(value);
+ }
+
+ // The 'make' methods, above, return a !isValid() value if the argument
+ // cannot be represented as the requested type. This methods is called
+ // 'get' since the argument can always be represented.
+ static ARMThumbImmediate makeUInt16(uint16_t value)
+ {
+ return ARMThumbImmediate(TypeUInt16, value);
+ }
+
+ bool isValid()
+ {
+ return m_type != TypeInvalid;
+ }
+
+ uint16_t asUInt16() const { return m_value.asInt; }
+
+ // These methods rely on the format of encoded byte values.
+ bool isUInt3() { return !(m_value.asInt & 0xfff8); }
+ bool isUInt4() { return !(m_value.asInt & 0xfff0); }
+ bool isUInt5() { return !(m_value.asInt & 0xffe0); }
+ bool isUInt6() { return !(m_value.asInt & 0xffc0); }
+ bool isUInt7() { return !(m_value.asInt & 0xff80); }
+ bool isUInt8() { return !(m_value.asInt & 0xff00); }
+ bool isUInt9() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xfe00); }
+ bool isUInt10() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xfc00); }
+ bool isUInt12() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xf000); }
+ bool isUInt16() { return m_type == TypeUInt16; }
+ uint8_t getUInt3() { ASSERT(isUInt3()); return m_value.asInt; }
+ uint8_t getUInt4() { ASSERT(isUInt4()); return m_value.asInt; }
+ uint8_t getUInt5() { ASSERT(isUInt5()); return m_value.asInt; }
+ uint8_t getUInt6() { ASSERT(isUInt6()); return m_value.asInt; }
+ uint8_t getUInt7() { ASSERT(isUInt7()); return m_value.asInt; }
+ uint8_t getUInt8() { ASSERT(isUInt8()); return m_value.asInt; }
+ uint16_t getUInt9() { ASSERT(isUInt9()); return m_value.asInt; }
+ uint16_t getUInt10() { ASSERT(isUInt10()); return m_value.asInt; }
+ uint16_t getUInt12() { ASSERT(isUInt12()); return m_value.asInt; }
+ uint16_t getUInt16() { ASSERT(isUInt16()); return m_value.asInt; }
+
+ bool isEncodedImm() { return m_type == TypeEncoded; }
+
+private:
+ ThumbImmediateType m_type;
+ ThumbImmediateValue m_value;
+};
+
+typedef enum {
+ SRType_LSL,
+ SRType_LSR,
+ SRType_ASR,
+ SRType_ROR,
+
+ SRType_RRX = SRType_ROR
+} ARMShiftType;
+
+class ShiftTypeAndAmount {
+ friend class ARMv7Assembler;
+
+public:
+ ShiftTypeAndAmount()
+ {
+ m_u.type = (ARMShiftType)0;
+ m_u.amount = 0;
+ }
+
+ ShiftTypeAndAmount(ARMShiftType type, unsigned amount)
+ {
+ m_u.type = type;
+ m_u.amount = amount & 31;
+ }
+
+ unsigned lo4() { return m_u.lo4; }
+ unsigned hi4() { return m_u.hi4; }
+
+private:
+ union {
+ struct {
+ unsigned lo4 : 4;
+ unsigned hi4 : 4;
+ };
+ struct {
+ unsigned type : 2;
+ unsigned amount : 6;
+ };
+ } m_u;
+};
+
+class ARMv7Assembler {
+public:
+ typedef ARMRegisters::RegisterID RegisterID;
+ typedef ARMRegisters::FPSingleRegisterID FPSingleRegisterID;
+ typedef ARMRegisters::FPDoubleRegisterID FPDoubleRegisterID;
+ typedef ARMRegisters::FPQuadRegisterID FPQuadRegisterID;
+ typedef FPDoubleRegisterID FPRegisterID;
+
+ static RegisterID firstRegister() { return ARMRegisters::r0; }
+ static RegisterID lastRegister() { return ARMRegisters::r13; }
+
+ static FPRegisterID firstFPRegister() { return ARMRegisters::d0; }
+ static FPRegisterID lastFPRegister() { return ARMRegisters::d31; }
+
+ // (HS, LO, HI, LS) -> (AE, B, A, BE)
+ // (VS, VC) -> (O, NO)
+ typedef enum {
+ ConditionEQ, // Zero / Equal.
+ ConditionNE, // Non-zero / Not equal.
+ ConditionHS, ConditionCS = ConditionHS, // Unsigned higher or same.
+ ConditionLO, ConditionCC = ConditionLO, // Unsigned lower.
+ ConditionMI, // Negative.
+ ConditionPL, // Positive or zero.
+ ConditionVS, // Overflowed.
+ ConditionVC, // Not overflowed.
+ ConditionHI, // Unsigned higher.
+ ConditionLS, // Unsigned lower or same.
+ ConditionGE, // Signed greater than or equal.
+ ConditionLT, // Signed less than.
+ ConditionGT, // Signed greater than.
+ ConditionLE, // Signed less than or equal.
+ ConditionAL, // Unconditional / Always execute.
+ ConditionInvalid
+ } Condition;
+
+#define JUMP_ENUM_WITH_SIZE(index, value) (((value) << 3) | (index))
+#define JUMP_ENUM_SIZE(jump) ((jump) >> 3)
+ enum JumpType { JumpFixed = JUMP_ENUM_WITH_SIZE(0, 0),
+ JumpNoCondition = JUMP_ENUM_WITH_SIZE(1, 5 * sizeof(uint16_t)),
+ JumpCondition = JUMP_ENUM_WITH_SIZE(2, 6 * sizeof(uint16_t)),
+ JumpNoConditionFixedSize = JUMP_ENUM_WITH_SIZE(3, 5 * sizeof(uint16_t)),
+ JumpConditionFixedSize = JUMP_ENUM_WITH_SIZE(4, 6 * sizeof(uint16_t))
+ };
+ enum JumpLinkType {
+ LinkInvalid = JUMP_ENUM_WITH_SIZE(0, 0),
+ LinkJumpT1 = JUMP_ENUM_WITH_SIZE(1, sizeof(uint16_t)),
+ LinkJumpT2 = JUMP_ENUM_WITH_SIZE(2, sizeof(uint16_t)),
+ LinkJumpT3 = JUMP_ENUM_WITH_SIZE(3, 2 * sizeof(uint16_t)),
+ LinkJumpT4 = JUMP_ENUM_WITH_SIZE(4, 2 * sizeof(uint16_t)),
+ LinkConditionalJumpT4 = JUMP_ENUM_WITH_SIZE(5, 3 * sizeof(uint16_t)),
+ LinkBX = JUMP_ENUM_WITH_SIZE(6, 5 * sizeof(uint16_t)),
+ LinkConditionalBX = JUMP_ENUM_WITH_SIZE(7, 6 * sizeof(uint16_t))
+ };
+
+ class LinkRecord {
+ public:
+ LinkRecord(intptr_t from, intptr_t to, JumpType type, Condition condition)
+ {
+ data.realTypes.m_from = from;
+ data.realTypes.m_to = to;
+ data.realTypes.m_type = type;
+ data.realTypes.m_linkType = LinkInvalid;
+ data.realTypes.m_condition = condition;
+ }
+ void operator=(const LinkRecord& other)
+ {
+ data.copyTypes.content[0] = other.data.copyTypes.content[0];
+ data.copyTypes.content[1] = other.data.copyTypes.content[1];
+ data.copyTypes.content[2] = other.data.copyTypes.content[2];
+ }
+ intptr_t from() const { return data.realTypes.m_from; }
+ void setFrom(intptr_t from) { data.realTypes.m_from = from; }
+ intptr_t to() const { return data.realTypes.m_to; }
+ JumpType type() const { return data.realTypes.m_type; }
+ JumpLinkType linkType() const { return data.realTypes.m_linkType; }
+ void setLinkType(JumpLinkType linkType) { ASSERT(data.realTypes.m_linkType == LinkInvalid); data.realTypes.m_linkType = linkType; }
+ Condition condition() const { return data.realTypes.m_condition; }
+ private:
+ union {
+ struct RealTypes {
+ intptr_t m_from : 31;
+ intptr_t m_to : 31;
+ JumpType m_type : 8;
+ JumpLinkType m_linkType : 8;
+ Condition m_condition : 16;
+ } realTypes;
+ struct CopyTypes {
+ uint32_t content[3];
+ } copyTypes;
+ COMPILE_ASSERT(sizeof(RealTypes) == sizeof(CopyTypes), LinkRecordCopyStructSizeEqualsRealStruct);
+ } data;
+ };
+
+ ARMv7Assembler()
+ : m_indexOfLastWatchpoint(INT_MIN)
+ , m_indexOfTailOfLastWatchpoint(INT_MIN)
+ {
+ }
+
+ AssemblerBuffer& buffer() { return m_formatter.m_buffer; }
+
+private:
+
+ // ARMv7, Appx-A.6.3
+ static bool BadReg(RegisterID reg)
+ {
+ return (reg == ARMRegisters::sp) || (reg == ARMRegisters::pc);
+ }
+
+ uint32_t singleRegisterMask(FPSingleRegisterID rdNum, int highBitsShift, int lowBitShift)
+ {
+ uint32_t rdMask = (rdNum >> 1) << highBitsShift;
+ if (rdNum & 1)
+ rdMask |= 1 << lowBitShift;
+ return rdMask;
+ }
+
+ uint32_t doubleRegisterMask(FPDoubleRegisterID rdNum, int highBitShift, int lowBitsShift)
+ {
+ uint32_t rdMask = (rdNum & 0xf) << lowBitsShift;
+ if (rdNum & 16)
+ rdMask |= 1 << highBitShift;
+ return rdMask;
+ }
+
+ typedef enum {
+ OP_ADD_reg_T1 = 0x1800,
+ OP_SUB_reg_T1 = 0x1A00,
+ OP_ADD_imm_T1 = 0x1C00,
+ OP_SUB_imm_T1 = 0x1E00,
+ OP_MOV_imm_T1 = 0x2000,
+ OP_CMP_imm_T1 = 0x2800,
+ OP_ADD_imm_T2 = 0x3000,
+ OP_SUB_imm_T2 = 0x3800,
+ OP_AND_reg_T1 = 0x4000,
+ OP_EOR_reg_T1 = 0x4040,
+ OP_TST_reg_T1 = 0x4200,
+ OP_RSB_imm_T1 = 0x4240,
+ OP_CMP_reg_T1 = 0x4280,
+ OP_ORR_reg_T1 = 0x4300,
+ OP_MVN_reg_T1 = 0x43C0,
+ OP_ADD_reg_T2 = 0x4400,
+ OP_MOV_reg_T1 = 0x4600,
+ OP_BLX = 0x4700,
+ OP_BX = 0x4700,
+ OP_STR_reg_T1 = 0x5000,
+ OP_STRH_reg_T1 = 0x5200,
+ OP_STRB_reg_T1 = 0x5400,
+ OP_LDRSB_reg_T1 = 0x5600,
+ OP_LDR_reg_T1 = 0x5800,
+ OP_LDRH_reg_T1 = 0x5A00,
+ OP_LDRB_reg_T1 = 0x5C00,
+ OP_LDRSH_reg_T1 = 0x5E00,
+ OP_STR_imm_T1 = 0x6000,
+ OP_LDR_imm_T1 = 0x6800,
+ OP_STRB_imm_T1 = 0x7000,
+ OP_LDRB_imm_T1 = 0x7800,
+ OP_STRH_imm_T1 = 0x8000,
+ OP_LDRH_imm_T1 = 0x8800,
+ OP_STR_imm_T2 = 0x9000,
+ OP_LDR_imm_T2 = 0x9800,
+ OP_ADD_SP_imm_T1 = 0xA800,
+ OP_ADD_SP_imm_T2 = 0xB000,
+ OP_SUB_SP_imm_T1 = 0xB080,
+ OP_PUSH_T1 = 0xB400,
+ OP_POP_T1 = 0xBC00,
+ OP_BKPT = 0xBE00,
+ OP_IT = 0xBF00,
+ OP_NOP_T1 = 0xBF00,
+ } OpcodeID;
+
+ typedef enum {
+ OP_B_T1 = 0xD000,
+ OP_B_T2 = 0xE000,
+ OP_POP_T2 = 0xE8BD,
+ OP_PUSH_T2 = 0xE92D,
+ OP_AND_reg_T2 = 0xEA00,
+ OP_TST_reg_T2 = 0xEA10,
+ OP_ORR_reg_T2 = 0xEA40,
+ OP_ORR_S_reg_T2 = 0xEA50,
+ OP_ASR_imm_T1 = 0xEA4F,
+ OP_LSL_imm_T1 = 0xEA4F,
+ OP_LSR_imm_T1 = 0xEA4F,
+ OP_ROR_imm_T1 = 0xEA4F,
+ OP_MVN_reg_T2 = 0xEA6F,
+ OP_EOR_reg_T2 = 0xEA80,
+ OP_ADD_reg_T3 = 0xEB00,
+ OP_ADD_S_reg_T3 = 0xEB10,
+ OP_SUB_reg_T2 = 0xEBA0,
+ OP_SUB_S_reg_T2 = 0xEBB0,
+ OP_CMP_reg_T2 = 0xEBB0,
+ OP_VMOV_CtoD = 0xEC00,
+ OP_VMOV_DtoC = 0xEC10,
+ OP_FSTS = 0xED00,
+ OP_VSTR = 0xED00,
+ OP_FLDS = 0xED10,
+ OP_VLDR = 0xED10,
+ OP_VMOV_CtoS = 0xEE00,
+ OP_VMOV_StoC = 0xEE10,
+ OP_VMUL_T2 = 0xEE20,
+ OP_VADD_T2 = 0xEE30,
+ OP_VSUB_T2 = 0xEE30,
+ OP_VDIV = 0xEE80,
+ OP_VABS_T2 = 0xEEB0,
+ OP_VCMP = 0xEEB0,
+ OP_VCVT_FPIVFP = 0xEEB0,
+ OP_VMOV_T2 = 0xEEB0,
+ OP_VMOV_IMM_T2 = 0xEEB0,
+ OP_VMRS = 0xEEB0,
+ OP_VNEG_T2 = 0xEEB0,
+ OP_VSQRT_T1 = 0xEEB0,
+ OP_VCVTSD_T1 = 0xEEB0,
+ OP_VCVTDS_T1 = 0xEEB0,
+ OP_B_T3a = 0xF000,
+ OP_B_T4a = 0xF000,
+ OP_AND_imm_T1 = 0xF000,
+ OP_TST_imm = 0xF010,
+ OP_ORR_imm_T1 = 0xF040,
+ OP_MOV_imm_T2 = 0xF040,
+ OP_MVN_imm = 0xF060,
+ OP_EOR_imm_T1 = 0xF080,
+ OP_ADD_imm_T3 = 0xF100,
+ OP_ADD_S_imm_T3 = 0xF110,
+ OP_CMN_imm = 0xF110,
+ OP_ADC_imm = 0xF140,
+ OP_SUB_imm_T3 = 0xF1A0,
+ OP_SUB_S_imm_T3 = 0xF1B0,
+ OP_CMP_imm_T2 = 0xF1B0,
+ OP_RSB_imm_T2 = 0xF1C0,
+ OP_RSB_S_imm_T2 = 0xF1D0,
+ OP_ADD_imm_T4 = 0xF200,
+ OP_MOV_imm_T3 = 0xF240,
+ OP_SUB_imm_T4 = 0xF2A0,
+ OP_MOVT = 0xF2C0,
+ OP_UBFX_T1 = 0xF3C0,
+ OP_NOP_T2a = 0xF3AF,
+ OP_DMB_SY_T2a = 0xF3BF,
+ OP_STRB_imm_T3 = 0xF800,
+ OP_STRB_reg_T2 = 0xF800,
+ OP_LDRB_imm_T3 = 0xF810,
+ OP_LDRB_reg_T2 = 0xF810,
+ OP_STRH_imm_T3 = 0xF820,
+ OP_STRH_reg_T2 = 0xF820,
+ OP_LDRH_reg_T2 = 0xF830,
+ OP_LDRH_imm_T3 = 0xF830,
+ OP_STR_imm_T4 = 0xF840,
+ OP_STR_reg_T2 = 0xF840,
+ OP_LDR_imm_T4 = 0xF850,
+ OP_LDR_reg_T2 = 0xF850,
+ OP_STRB_imm_T2 = 0xF880,
+ OP_LDRB_imm_T2 = 0xF890,
+ OP_STRH_imm_T2 = 0xF8A0,
+ OP_LDRH_imm_T2 = 0xF8B0,
+ OP_STR_imm_T3 = 0xF8C0,
+ OP_LDR_imm_T3 = 0xF8D0,
+ OP_LDRSB_reg_T2 = 0xF910,
+ OP_LDRSH_reg_T2 = 0xF930,
+ OP_LSL_reg_T2 = 0xFA00,
+ OP_LSR_reg_T2 = 0xFA20,
+ OP_ASR_reg_T2 = 0xFA40,
+ OP_ROR_reg_T2 = 0xFA60,
+ OP_CLZ = 0xFAB0,
+ OP_SMULL_T1 = 0xFB80,
+#if HAVE(ARM_IDIV_INSTRUCTIONS)
+ OP_SDIV_T1 = 0xFB90,
+ OP_UDIV_T1 = 0xFBB0,
+#endif
+ } OpcodeID1;
+
+ typedef enum {
+ OP_VADD_T2b = 0x0A00,
+ OP_VDIVb = 0x0A00,
+ OP_FLDSb = 0x0A00,
+ OP_VLDRb = 0x0A00,
+ OP_VMOV_IMM_T2b = 0x0A00,
+ OP_VMOV_T2b = 0x0A40,
+ OP_VMUL_T2b = 0x0A00,
+ OP_FSTSb = 0x0A00,
+ OP_VSTRb = 0x0A00,
+ OP_VMOV_StoCb = 0x0A10,
+ OP_VMOV_CtoSb = 0x0A10,
+ OP_VMOV_DtoCb = 0x0A10,
+ OP_VMOV_CtoDb = 0x0A10,
+ OP_VMRSb = 0x0A10,
+ OP_VABS_T2b = 0x0A40,
+ OP_VCMPb = 0x0A40,
+ OP_VCVT_FPIVFPb = 0x0A40,
+ OP_VNEG_T2b = 0x0A40,
+ OP_VSUB_T2b = 0x0A40,
+ OP_VSQRT_T1b = 0x0A40,
+ OP_VCVTSD_T1b = 0x0A40,
+ OP_VCVTDS_T1b = 0x0A40,
+ OP_NOP_T2b = 0x8000,
+ OP_DMB_SY_T2b = 0x8F5F,
+ OP_B_T3b = 0x8000,
+ OP_B_T4b = 0x9000,
+ } OpcodeID2;
+
+ struct FourFours {
+ FourFours(unsigned f3, unsigned f2, unsigned f1, unsigned f0)
+ {
+ m_u.f0 = f0;
+ m_u.f1 = f1;
+ m_u.f2 = f2;
+ m_u.f3 = f3;
+ }
+
+ union {
+ unsigned value;
+ struct {
+ unsigned f0 : 4;
+ unsigned f1 : 4;
+ unsigned f2 : 4;
+ unsigned f3 : 4;
+ };
+ } m_u;
+ };
+
+ class ARMInstructionFormatter;
+
+ // false means else!
+ static bool ifThenElseConditionBit(Condition condition, bool isIf)
+ {
+ return isIf ? (condition & 1) : !(condition & 1);
+ }
+ static uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if, bool inst4if)
+ {
+ int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
+ | (ifThenElseConditionBit(condition, inst3if) << 2)
+ | (ifThenElseConditionBit(condition, inst4if) << 1)
+ | 1;
+ ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
+ return (condition << 4) | mask;
+ }
+ static uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if)
+ {
+ int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
+ | (ifThenElseConditionBit(condition, inst3if) << 2)
+ | 2;
+ ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
+ return (condition << 4) | mask;
+ }
+ static uint8_t ifThenElse(Condition condition, bool inst2if)
+ {
+ int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
+ | 4;
+ ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
+ return (condition << 4) | mask;
+ }
+
+ static uint8_t ifThenElse(Condition condition)
+ {
+ int mask = 8;
+ return (condition << 4) | mask;
+ }
+
+public:
+
+ void adc(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+ {
+ // Rd can only be SP if Rn is also SP.
+ ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isEncodedImm());
+
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADC_imm, rn, rd, imm);
+ }
+
+ void add(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+ {
+ // Rd can only be SP if Rn is also SP.
+ ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isValid());
+
+ if (rn == ARMRegisters::sp && imm.isUInt16()) {
+ ASSERT(!(imm.getUInt16() & 3));
+ if (!(rd & 8) && imm.isUInt10()) {
+ m_formatter.oneWordOp5Reg3Imm8(OP_ADD_SP_imm_T1, rd, static_cast<uint8_t>(imm.getUInt10() >> 2));
+ return;
+ } else if ((rd == ARMRegisters::sp) && imm.isUInt9()) {
+ m_formatter.oneWordOp9Imm7(OP_ADD_SP_imm_T2, static_cast<uint8_t>(imm.getUInt9() >> 2));
+ return;
+ }
+ } else if (!((rd | rn) & 8)) {
+ if (imm.isUInt3()) {
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
+ return;
+ } else if ((rd == rn) && imm.isUInt8()) {
+ m_formatter.oneWordOp5Reg3Imm8(OP_ADD_imm_T2, rd, imm.getUInt8());
+ return;
+ }
+ }
+
+ if (imm.isEncodedImm())
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T3, rn, rd, imm);
+ else {
+ ASSERT(imm.isUInt12());
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T4, rn, rd, imm);
+ }
+ }
+
+ ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_ADD_reg_T3, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ // NOTE: In an IT block, add doesn't modify the flags register.
+ ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ if (rd == ARMRegisters::sp) {
+ mov(rd, rn);
+ rn = rd;
+ }
+
+ if (rd == rn)
+ m_formatter.oneWordOp8RegReg143(OP_ADD_reg_T2, rm, rd);
+ else if (rd == rm)
+ m_formatter.oneWordOp8RegReg143(OP_ADD_reg_T2, rn, rd);
+ else if (!((rd | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1, rm, rn, rd);
+ else
+ add(rd, rn, rm, ShiftTypeAndAmount());
+ }
+
+ // Not allowed in an IT (if then) block.
+ ALWAYS_INLINE void add_S(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+ {
+ // Rd can only be SP if Rn is also SP.
+ ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isEncodedImm());
+
+ if (!((rd | rn) & 8)) {
+ if (imm.isUInt3()) {
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
+ return;
+ } else if ((rd == rn) && imm.isUInt8()) {
+ m_formatter.oneWordOp5Reg3Imm8(OP_ADD_imm_T2, rd, imm.getUInt8());
+ return;
+ }
+ }
+
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_S_imm_T3, rn, rd, imm);
+ }
+
+ // Not allowed in an IT (if then) block?
+ ALWAYS_INLINE void add_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_ADD_S_reg_T3, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ // Not allowed in an IT (if then) block.
+ ALWAYS_INLINE void add_S(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ if (!((rd | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1, rm, rn, rd);
+ else
+ add_S(rd, rn, rm, ShiftTypeAndAmount());
+ }
+
+ ALWAYS_INLINE void ARM_and(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(imm.isEncodedImm());
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_AND_imm_T1, rn, rd, imm);
+ }
+
+ ALWAYS_INLINE void ARM_and(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_AND_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ ALWAYS_INLINE void ARM_and(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ if ((rd == rn) && !((rd | rm) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_AND_reg_T1, rm, rd);
+ else if ((rd == rm) && !((rd | rn) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_AND_reg_T1, rn, rd);
+ else
+ ARM_and(rd, rn, rm, ShiftTypeAndAmount());
+ }
+
+ ALWAYS_INLINE void asr(RegisterID rd, RegisterID rm, int32_t shiftAmount)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rm));
+ ShiftTypeAndAmount shift(SRType_ASR, shiftAmount);
+ m_formatter.twoWordOp16FourFours(OP_ASR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ ALWAYS_INLINE void asr(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_ASR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
+ }
+
+ // Only allowed in IT (if then) block if last instruction.
+ ALWAYS_INLINE AssemblerLabel b()
+ {
+ m_formatter.twoWordOp16Op16(OP_B_T4a, OP_B_T4b);
+ return m_formatter.label();
+ }
+
+ // Only allowed in IT (if then) block if last instruction.
+ ALWAYS_INLINE AssemblerLabel blx(RegisterID rm)
+ {
+ ASSERT(rm != ARMRegisters::pc);
+ m_formatter.oneWordOp8RegReg143(OP_BLX, rm, (RegisterID)8);
+ return m_formatter.label();
+ }
+
+ // Only allowed in IT (if then) block if last instruction.
+ ALWAYS_INLINE AssemblerLabel bx(RegisterID rm)
+ {
+ m_formatter.oneWordOp8RegReg143(OP_BX, rm, (RegisterID)0);
+ return m_formatter.label();
+ }
+
+ void bkpt(uint8_t imm = 0)
+ {
+ m_formatter.oneWordOp8Imm8(OP_BKPT, imm);
+ }
+
+ ALWAYS_INLINE void clz(RegisterID rd, RegisterID rm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_CLZ, rm, FourFours(0xf, rd, 8, rm));
+ }
+
+ ALWAYS_INLINE void cmn(RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isEncodedImm());
+
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMN_imm, rn, (RegisterID)0xf, imm);
+ }
+
+ ALWAYS_INLINE void cmp(RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isEncodedImm());
+
+ if (!(rn & 8) && imm.isUInt8())
+ m_formatter.oneWordOp5Reg3Imm8(OP_CMP_imm_T1, rn, imm.getUInt8());
+ else
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMP_imm_T2, rn, (RegisterID)0xf, imm);
+ }
+
+ ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_CMP_reg_T2, rn, FourFours(shift.hi4(), 0xf, shift.lo4(), rm));
+ }
+
+ ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm)
+ {
+ if ((rn | rm) & 8)
+ cmp(rn, rm, ShiftTypeAndAmount());
+ else
+ m_formatter.oneWordOp10Reg3Reg3(OP_CMP_reg_T1, rm, rn);
+ }
+
+ // xor is not spelled with an 'e'. :-(
+ ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(imm.isEncodedImm());
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_EOR_imm_T1, rn, rd, imm);
+ }
+
+ // xor is not spelled with an 'e'. :-(
+ ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_EOR_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ // xor is not spelled with an 'e'. :-(
+ void eor(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ if ((rd == rn) && !((rd | rm) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_EOR_reg_T1, rm, rd);
+ else if ((rd == rm) && !((rd | rn) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_EOR_reg_T1, rn, rd);
+ else
+ eor(rd, rn, rm, ShiftTypeAndAmount());
+ }
+
+ ALWAYS_INLINE void it(Condition cond)
+ {
+ m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond));
+ }
+
+ ALWAYS_INLINE void it(Condition cond, bool inst2if)
+ {
+ m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if));
+ }
+
+ ALWAYS_INLINE void it(Condition cond, bool inst2if, bool inst3if)
+ {
+ m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if, inst3if));
+ }
+
+ ALWAYS_INLINE void it(Condition cond, bool inst2if, bool inst3if, bool inst4if)
+ {
+ m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if, inst3if, inst4if));
+ }
+
+ // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+ ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(rn != ARMRegisters::pc); // LDR (literal)
+ ASSERT(imm.isUInt12());
+
+ if (!((rt | rn) & 8) && imm.isUInt7())
+ m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1, imm.getUInt7() >> 2, rn, rt);
+ else if ((rn == ARMRegisters::sp) && !(rt & 8) && imm.isUInt10())
+ m_formatter.oneWordOp5Reg3Imm8(OP_LDR_imm_T2, rt, static_cast<uint8_t>(imm.getUInt10() >> 2));
+ else
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T3, rn, rt, imm.getUInt12());
+ }
+
+ ALWAYS_INLINE void ldrWide8BitImmediate(RegisterID rt, RegisterID rn, uint8_t immediate)
+ {
+ ASSERT(rn != ARMRegisters::pc);
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T3, rn, rt, immediate);
+ }
+
+ ALWAYS_INLINE void ldrCompact(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(rn != ARMRegisters::pc); // LDR (literal)
+ ASSERT(imm.isUInt7());
+ ASSERT(!((rt | rn) & 8));
+ m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1, imm.getUInt7() >> 2, rn, rt);
+ }
+
+ // If index is set, this is a regular offset or a pre-indexed load;
+ // if index is not set then is is a post-index load.
+ //
+ // If wback is set rn is updated - this is a pre or post index load,
+ // if wback is not set this is a regular offset memory access.
+ //
+ // (-255 <= offset <= 255)
+ // _reg = REG[rn]
+ // _tmp = _reg + offset
+ // MEM[index ? _tmp : _reg] = REG[rt]
+ // if (wback) REG[rn] = _tmp
+ ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
+ {
+ ASSERT(rt != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(index || wback);
+ ASSERT(!wback | (rt != rn));
+
+ bool add = true;
+ if (offset < 0) {
+ add = false;
+ offset = -offset;
+ }
+ ASSERT((offset & ~0xff) == 0);
+
+ offset |= (wback << 8);
+ offset |= (add << 9);
+ offset |= (index << 10);
+ offset |= (1 << 11);
+
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T4, rn, rt, offset);
+ }
+
+ // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+ ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
+ {
+ ASSERT(rn != ARMRegisters::pc); // LDR (literal)
+ ASSERT(!BadReg(rm));
+ ASSERT(shift <= 3);
+
+ if (!shift && !((rt | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDR_reg_T1, rm, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4FourFours(OP_LDR_reg_T2, rn, FourFours(rt, 0, shift, rm));
+ }
+
+ // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+ ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(rn != ARMRegisters::pc); // LDR (literal)
+ ASSERT(imm.isUInt12());
+ ASSERT(!(imm.getUInt12() & 1));
+
+ if (!((rt | rn) & 8) && imm.isUInt6())
+ m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRH_imm_T1, imm.getUInt6() >> 1, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T2, rn, rt, imm.getUInt12());
+ }
+
+ // If index is set, this is a regular offset or a pre-indexed load;
+ // if index is not set then is is a post-index load.
+ //
+ // If wback is set rn is updated - this is a pre or post index load,
+ // if wback is not set this is a regular offset memory access.
+ //
+ // (-255 <= offset <= 255)
+ // _reg = REG[rn]
+ // _tmp = _reg + offset
+ // MEM[index ? _tmp : _reg] = REG[rt]
+ // if (wback) REG[rn] = _tmp
+ ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
+ {
+ ASSERT(rt != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(index || wback);
+ ASSERT(!wback | (rt != rn));
+
+ bool add = true;
+ if (offset < 0) {
+ add = false;
+ offset = -offset;
+ }
+ ASSERT((offset & ~0xff) == 0);
+
+ offset |= (wback << 8);
+ offset |= (add << 9);
+ offset |= (index << 10);
+ offset |= (1 << 11);
+
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T3, rn, rt, offset);
+ }
+
+ ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
+ {
+ ASSERT(!BadReg(rt)); // Memory hint
+ ASSERT(rn != ARMRegisters::pc); // LDRH (literal)
+ ASSERT(!BadReg(rm));
+ ASSERT(shift <= 3);
+
+ if (!shift && !((rt | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRH_reg_T1, rm, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4FourFours(OP_LDRH_reg_T2, rn, FourFours(rt, 0, shift, rm));
+ }
+
+ void ldrb(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(rn != ARMRegisters::pc); // LDR (literal)
+ ASSERT(imm.isUInt12());
+
+ if (!((rt | rn) & 8) && imm.isUInt5())
+ m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRB_imm_T1, imm.getUInt5(), rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRB_imm_T2, rn, rt, imm.getUInt12());
+ }
+
+ void ldrb(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
+ {
+ ASSERT(rt != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(index || wback);
+ ASSERT(!wback | (rt != rn));
+
+ bool add = true;
+ if (offset < 0) {
+ add = false;
+ offset = -offset;
+ }
+
+ ASSERT(!(offset & ~0xff));
+
+ offset |= (wback << 8);
+ offset |= (add << 9);
+ offset |= (index << 10);
+ offset |= (1 << 11);
+
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRB_imm_T3, rn, rt, offset);
+ }
+
+ ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
+ {
+ ASSERT(rn != ARMRegisters::pc); // LDR (literal)
+ ASSERT(!BadReg(rm));
+ ASSERT(shift <= 3);
+
+ if (!shift && !((rt | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRB_reg_T1, rm, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4FourFours(OP_LDRB_reg_T2, rn, FourFours(rt, 0, shift, rm));
+ }
+
+ void ldrsb(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
+ {
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(!BadReg(rm));
+ ASSERT(shift <= 3);
+
+ if (!shift && !((rt | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRSB_reg_T1, rm, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4FourFours(OP_LDRSB_reg_T2, rn, FourFours(rt, 0, shift, rm));
+ }
+
+ void ldrsh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
+ {
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(!BadReg(rm));
+ ASSERT(shift <= 3);
+
+ if (!shift && !((rt | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRSH_reg_T1, rm, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4FourFours(OP_LDRSH_reg_T2, rn, FourFours(rt, 0, shift, rm));
+ }
+
+ void lsl(RegisterID rd, RegisterID rm, int32_t shiftAmount)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rm));
+ ShiftTypeAndAmount shift(SRType_LSL, shiftAmount);
+ m_formatter.twoWordOp16FourFours(OP_LSL_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ ALWAYS_INLINE void lsl(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_LSL_reg_T2, rn, FourFours(0xf, rd, 0, rm));
+ }
+
+ ALWAYS_INLINE void lsr(RegisterID rd, RegisterID rm, int32_t shiftAmount)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rm));
+ ShiftTypeAndAmount shift(SRType_LSR, shiftAmount);
+ m_formatter.twoWordOp16FourFours(OP_LSR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ ALWAYS_INLINE void lsr(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_LSR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
+ }
+
+ ALWAYS_INLINE void movT3(RegisterID rd, ARMThumbImmediate imm)
+ {
+ ASSERT(imm.isValid());
+ ASSERT(!imm.isEncodedImm());
+ ASSERT(!BadReg(rd));
+
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T3, imm.m_value.imm4, rd, imm);
+ }
+
+#if OS(LINUX)
+ static void revertJumpTo_movT3movtcmpT2(void* instructionStart, RegisterID left, RegisterID right, uintptr_t imm)
+ {
+ uint16_t* address = static_cast<uint16_t*>(instructionStart);
+ ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(imm));
+ ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(imm >> 16));
+ address[0] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
+ address[1] = twoWordOp5i6Imm4Reg4EncodedImmSecond(right, lo16);
+ address[2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
+ address[3] = twoWordOp5i6Imm4Reg4EncodedImmSecond(right, hi16);
+ address[4] = OP_CMP_reg_T2 | left;
+ cacheFlush(address, sizeof(uint16_t) * 5);
+ }
+#else
+ static void revertJumpTo_movT3(void* instructionStart, RegisterID rd, ARMThumbImmediate imm)
+ {
+ ASSERT(imm.isValid());
+ ASSERT(!imm.isEncodedImm());
+ ASSERT(!BadReg(rd));
+
+ uint16_t* address = static_cast<uint16_t*>(instructionStart);
+ address[0] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, imm);
+ address[1] = twoWordOp5i6Imm4Reg4EncodedImmSecond(rd, imm);
+ cacheFlush(address, sizeof(uint16_t) * 2);
+ }
+#endif
+
+ ALWAYS_INLINE void mov(RegisterID rd, ARMThumbImmediate imm)
+ {
+ ASSERT(imm.isValid());
+ ASSERT(!BadReg(rd));
+
+ if ((rd < 8) && imm.isUInt8())
+ m_formatter.oneWordOp5Reg3Imm8(OP_MOV_imm_T1, rd, imm.getUInt8());
+ else if (imm.isEncodedImm())
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T2, 0xf, rd, imm);
+ else
+ movT3(rd, imm);
+ }
+
+ ALWAYS_INLINE void mov(RegisterID rd, RegisterID rm)
+ {
+ m_formatter.oneWordOp8RegReg143(OP_MOV_reg_T1, rm, rd);
+ }
+
+ ALWAYS_INLINE void movt(RegisterID rd, ARMThumbImmediate imm)
+ {
+ ASSERT(imm.isUInt16());
+ ASSERT(!BadReg(rd));
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOVT, imm.m_value.imm4, rd, imm);
+ }
+
+ ALWAYS_INLINE void mvn(RegisterID rd, ARMThumbImmediate imm)
+ {
+ ASSERT(imm.isEncodedImm());
+ ASSERT(!BadReg(rd));
+
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MVN_imm, 0xf, rd, imm);
+ }
+
+ ALWAYS_INLINE void mvn(RegisterID rd, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp16FourFours(OP_MVN_reg_T2, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ ALWAYS_INLINE void mvn(RegisterID rd, RegisterID rm)
+ {
+ if (!((rd | rm) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_MVN_reg_T1, rm, rd);
+ else
+ mvn(rd, rm, ShiftTypeAndAmount());
+ }
+
+ ALWAYS_INLINE void neg(RegisterID rd, RegisterID rm)
+ {
+ ARMThumbImmediate zero = ARMThumbImmediate::makeUInt12(0);
+ sub(rd, zero, rm);
+ }
+
+ ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(imm.isEncodedImm());
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ORR_imm_T1, rn, rd, imm);
+ }
+
+ ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_ORR_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ void orr(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ if ((rd == rn) && !((rd | rm) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rm, rd);
+ else if ((rd == rm) && !((rd | rn) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rn, rd);
+ else
+ orr(rd, rn, rm, ShiftTypeAndAmount());
+ }
+
+ ALWAYS_INLINE void orr_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_ORR_S_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ void orr_S(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ if ((rd == rn) && !((rd | rm) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rm, rd);
+ else if ((rd == rm) && !((rd | rn) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rn, rd);
+ else
+ orr_S(rd, rn, rm, ShiftTypeAndAmount());
+ }
+
+ ALWAYS_INLINE void ror(RegisterID rd, RegisterID rm, int32_t shiftAmount)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rm));
+ ShiftTypeAndAmount shift(SRType_ROR, shiftAmount);
+ m_formatter.twoWordOp16FourFours(OP_ROR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ ALWAYS_INLINE void ror(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_ROR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
+ }
+
+ ALWAYS_INLINE void pop(RegisterID dest)
+ {
+ if (dest < ARMRegisters::r8)
+ m_formatter.oneWordOp7Imm9(OP_POP_T1, 1 << dest);
+ else {
+ // Load postindexed with writeback.
+ ldr(dest, ARMRegisters::sp, sizeof(void*), false, true);
+ }
+ }
+
+ ALWAYS_INLINE void pop(uint32_t registerList)
+ {
+ ASSERT(WTF::bitCount(registerList) > 1);
+ ASSERT(!((1 << ARMRegisters::pc) & registerList) || !((1 << ARMRegisters::lr) & registerList));
+ ASSERT(!((1 << ARMRegisters::sp) & registerList));
+ m_formatter.twoWordOp16Imm16(OP_POP_T2, registerList);
+ }
+
+ ALWAYS_INLINE void push(RegisterID src)
+ {
+ if (src < ARMRegisters::r8)
+ m_formatter.oneWordOp7Imm9(OP_PUSH_T1, 1 << src);
+ else if (src == ARMRegisters::lr)
+ m_formatter.oneWordOp7Imm9(OP_PUSH_T1, 0x100);
+ else {
+ // Store preindexed with writeback.
+ str(src, ARMRegisters::sp, -sizeof(void*), true, true);
+ }
+ }
+
+ ALWAYS_INLINE void push(uint32_t registerList)
+ {
+ ASSERT(WTF::bitCount(registerList) > 1);
+ ASSERT(!((1 << ARMRegisters::pc) & registerList));
+ ASSERT(!((1 << ARMRegisters::sp) & registerList));
+ m_formatter.twoWordOp16Imm16(OP_PUSH_T2, registerList);
+ }
+
+#if HAVE(ARM_IDIV_INSTRUCTIONS)
+ template<int datasize>
+ ALWAYS_INLINE void sdiv(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ static_assert(datasize == 32, "sdiv datasize must be 32 for armv7s");
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_SDIV_T1, rn, FourFours(0xf, rd, 0xf, rm));
+ }
+#endif
+
+ ALWAYS_INLINE void smull(RegisterID rdLo, RegisterID rdHi, RegisterID rn, RegisterID rm)
+ {
+ ASSERT(!BadReg(rdLo));
+ ASSERT(!BadReg(rdHi));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ ASSERT(rdLo != rdHi);
+ m_formatter.twoWordOp12Reg4FourFours(OP_SMULL_T1, rn, FourFours(rdLo, rdHi, 0, rm));
+ }
+
+ // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+ ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(rt != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isUInt12());
+
+ if (!((rt | rn) & 8) && imm.isUInt7())
+ m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STR_imm_T1, imm.getUInt7() >> 2, rn, rt);
+ else if ((rn == ARMRegisters::sp) && !(rt & 8) && imm.isUInt10())
+ m_formatter.oneWordOp5Reg3Imm8(OP_STR_imm_T2, rt, static_cast<uint8_t>(imm.getUInt10() >> 2));
+ else
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T3, rn, rt, imm.getUInt12());
+ }
+
+ // If index is set, this is a regular offset or a pre-indexed store;
+ // if index is not set then is is a post-index store.
+ //
+ // If wback is set rn is updated - this is a pre or post index store,
+ // if wback is not set this is a regular offset memory access.
+ //
+ // (-255 <= offset <= 255)
+ // _reg = REG[rn]
+ // _tmp = _reg + offset
+ // MEM[index ? _tmp : _reg] = REG[rt]
+ // if (wback) REG[rn] = _tmp
+ ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
+ {
+ ASSERT(rt != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(index || wback);
+ ASSERT(!wback | (rt != rn));
+
+ bool add = true;
+ if (offset < 0) {
+ add = false;
+ offset = -offset;
+ }
+ ASSERT((offset & ~0xff) == 0);
+
+ offset |= (wback << 8);
+ offset |= (add << 9);
+ offset |= (index << 10);
+ offset |= (1 << 11);
+
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T4, rn, rt, offset);
+ }
+
+ // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+ ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
+ {
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(!BadReg(rm));
+ ASSERT(shift <= 3);
+
+ if (!shift && !((rt | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STR_reg_T1, rm, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4FourFours(OP_STR_reg_T2, rn, FourFours(rt, 0, shift, rm));
+ }
+
+ // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+ ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(rt != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isUInt12());
+
+ if (!((rt | rn) & 8) && imm.isUInt7())
+ m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STRB_imm_T1, imm.getUInt7() >> 2, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRB_imm_T2, rn, rt, imm.getUInt12());
+ }
+
+ // If index is set, this is a regular offset or a pre-indexed store;
+ // if index is not set then is is a post-index store.
+ //
+ // If wback is set rn is updated - this is a pre or post index store,
+ // if wback is not set this is a regular offset memory access.
+ //
+ // (-255 <= offset <= 255)
+ // _reg = REG[rn]
+ // _tmp = _reg + offset
+ // MEM[index ? _tmp : _reg] = REG[rt]
+ // if (wback) REG[rn] = _tmp
+ ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
+ {
+ ASSERT(rt != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(index || wback);
+ ASSERT(!wback | (rt != rn));
+
+ bool add = true;
+ if (offset < 0) {
+ add = false;
+ offset = -offset;
+ }
+ ASSERT((offset & ~0xff) == 0);
+
+ offset |= (wback << 8);
+ offset |= (add << 9);
+ offset |= (index << 10);
+ offset |= (1 << 11);
+
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRB_imm_T3, rn, rt, offset);
+ }
+
+ // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+ ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
+ {
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(!BadReg(rm));
+ ASSERT(shift <= 3);
+
+ if (!shift && !((rt | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STRB_reg_T1, rm, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4FourFours(OP_STRB_reg_T2, rn, FourFours(rt, 0, shift, rm));
+ }
+
+ // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+ ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(rt != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isUInt12());
+
+ if (!((rt | rn) & 8) && imm.isUInt6())
+ m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STRH_imm_T1, imm.getUInt6() >> 1, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRH_imm_T2, rn, rt, imm.getUInt12());
+ }
+
+ // If index is set, this is a regular offset or a pre-indexed store;
+ // if index is not set then is is a post-index store.
+ //
+ // If wback is set rn is updated - this is a pre or post index store,
+ // if wback is not set this is a regular offset memory access.
+ //
+ // (-255 <= offset <= 255)
+ // _reg = REG[rn]
+ // _tmp = _reg + offset
+ // MEM[index ? _tmp : _reg] = REG[rt]
+ // if (wback) REG[rn] = _tmp
+ ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
+ {
+ ASSERT(rt != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(index || wback);
+ ASSERT(!wback | (rt != rn));
+
+ bool add = true;
+ if (offset < 0) {
+ add = false;
+ offset = -offset;
+ }
+ ASSERT(!(offset & ~0xff));
+
+ offset |= (wback << 8);
+ offset |= (add << 9);
+ offset |= (index << 10);
+ offset |= (1 << 11);
+
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRH_imm_T3, rn, rt, offset);
+ }
+
+ // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+ ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
+ {
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(!BadReg(rm));
+ ASSERT(shift <= 3);
+
+ if (!shift && !((rt | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STRH_reg_T1, rm, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4FourFours(OP_STRH_reg_T2, rn, FourFours(rt, 0, shift, rm));
+ }
+
+ ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+ {
+ // Rd can only be SP if Rn is also SP.
+ ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isValid());
+
+ if ((rn == ARMRegisters::sp) && (rd == ARMRegisters::sp) && imm.isUInt9()) {
+ ASSERT(!(imm.getUInt16() & 3));
+ m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, static_cast<uint8_t>(imm.getUInt9() >> 2));
+ return;
+ } else if (!((rd | rn) & 8)) {
+ if (imm.isUInt3()) {
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
+ return;
+ } else if ((rd == rn) && imm.isUInt8()) {
+ m_formatter.oneWordOp5Reg3Imm8(OP_SUB_imm_T2, rd, imm.getUInt8());
+ return;
+ }
+ }
+
+ if (imm.isEncodedImm())
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T3, rn, rd, imm);
+ else {
+ ASSERT(imm.isUInt12());
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T4, rn, rd, imm);
+ }
+ }
+
+ ALWAYS_INLINE void sub(RegisterID rd, ARMThumbImmediate imm, RegisterID rn)
+ {
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isValid());
+ ASSERT(imm.isUInt12());
+
+ if (!((rd | rn) & 8) && !imm.getUInt12())
+ m_formatter.oneWordOp10Reg3Reg3(OP_RSB_imm_T1, rn, rd);
+ else
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_RSB_imm_T2, rn, rd, imm);
+ }
+
+ ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_SUB_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ // NOTE: In an IT block, add doesn't modify the flags register.
+ ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ if (!((rd | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1, rm, rn, rd);
+ else
+ sub(rd, rn, rm, ShiftTypeAndAmount());
+ }
+
+ // Not allowed in an IT (if then) block.
+ void sub_S(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+ {
+ // Rd can only be SP if Rn is also SP.
+ ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isValid());
+
+ if ((rn == ARMRegisters::sp) && (rd == ARMRegisters::sp) && imm.isUInt9()) {
+ ASSERT(!(imm.getUInt16() & 3));
+ m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, static_cast<uint8_t>(imm.getUInt9() >> 2));
+ return;
+ } else if (!((rd | rn) & 8)) {
+ if (imm.isUInt3()) {
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
+ return;
+ } else if ((rd == rn) && imm.isUInt8()) {
+ m_formatter.oneWordOp5Reg3Imm8(OP_SUB_imm_T2, rd, imm.getUInt8());
+ return;
+ }
+ }
+
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_S_imm_T3, rn, rd, imm);
+ }
+
+ ALWAYS_INLINE void sub_S(RegisterID rd, ARMThumbImmediate imm, RegisterID rn)
+ {
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isValid());
+ ASSERT(imm.isUInt12());
+
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_RSB_S_imm_T2, rn, rd, imm);
+ }
+
+ // Not allowed in an IT (if then) block?
+ ALWAYS_INLINE void sub_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_SUB_S_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ // Not allowed in an IT (if then) block.
+ ALWAYS_INLINE void sub_S(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ if (!((rd | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1, rm, rn, rd);
+ else
+ sub_S(rd, rn, rm, ShiftTypeAndAmount());
+ }
+
+ ALWAYS_INLINE void tst(RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(!BadReg(rn));
+ ASSERT(imm.isEncodedImm());
+
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_TST_imm, rn, (RegisterID)0xf, imm);
+ }
+
+ ALWAYS_INLINE void tst(RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_TST_reg_T2, rn, FourFours(shift.hi4(), 0xf, shift.lo4(), rm));
+ }
+
+ ALWAYS_INLINE void tst(RegisterID rn, RegisterID rm)
+ {
+ if ((rn | rm) & 8)
+ tst(rn, rm, ShiftTypeAndAmount());
+ else
+ m_formatter.oneWordOp10Reg3Reg3(OP_TST_reg_T1, rm, rn);
+ }
+
+ ALWAYS_INLINE void ubfx(RegisterID rd, RegisterID rn, unsigned lsb, unsigned width)
+ {
+ ASSERT(lsb < 32);
+ ASSERT((width >= 1) && (width <= 32));
+ ASSERT((lsb + width) <= 32);
+ m_formatter.twoWordOp12Reg40Imm3Reg4Imm20Imm5(OP_UBFX_T1, rd, rn, (lsb & 0x1c) << 10, (lsb & 0x3) << 6, (width - 1) & 0x1f);
+ }
+
+#if HAVE(ARM_IDIV_INSTRUCTIONS)
+ ALWAYS_INLINE void udiv(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_UDIV_T1, rn, FourFours(0xf, rd, 0xf, rm));
+ }
+#endif
+
+ void vadd(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
+ {
+ m_formatter.vfpOp(OP_VADD_T2, OP_VADD_T2b, true, rn, rd, rm);
+ }
+
+ void vcmp(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
+ {
+ m_formatter.vfpOp(OP_VCMP, OP_VCMPb, true, VFPOperand(4), rd, rm);
+ }
+
+ void vcmpz(FPDoubleRegisterID rd)
+ {
+ m_formatter.vfpOp(OP_VCMP, OP_VCMPb, true, VFPOperand(5), rd, VFPOperand(0));
+ }
+
+ void vcvt_signedToFloatingPoint(FPDoubleRegisterID rd, FPSingleRegisterID rm)
+ {
+ // boolean values are 64bit (toInt, unsigned, roundZero)
+ m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(false, false, false), rd, rm);
+ }
+
+ void vcvt_floatingPointToSigned(FPSingleRegisterID rd, FPDoubleRegisterID rm)
+ {
+ // boolean values are 64bit (toInt, unsigned, roundZero)
+ m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(true, false, true), rd, rm);
+ }
+
+ void vcvt_floatingPointToUnsigned(FPSingleRegisterID rd, FPDoubleRegisterID rm)
+ {
+ // boolean values are 64bit (toInt, unsigned, roundZero)
+ m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(true, true, true), rd, rm);
+ }
+
+ void vdiv(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
+ {
+ m_formatter.vfpOp(OP_VDIV, OP_VDIVb, true, rn, rd, rm);
+ }
+
+ void vldr(FPDoubleRegisterID rd, RegisterID rn, int32_t imm)
+ {
+ m_formatter.vfpMemOp(OP_VLDR, OP_VLDRb, true, rn, rd, imm);
+ }
+
+ void flds(FPSingleRegisterID rd, RegisterID rn, int32_t imm)
+ {
+ m_formatter.vfpMemOp(OP_FLDS, OP_FLDSb, false, rn, rd, imm);
+ }
+
+ void vmov(RegisterID rd, FPSingleRegisterID rn)
+ {
+ ASSERT(!BadReg(rd));
+ m_formatter.vfpOp(OP_VMOV_StoC, OP_VMOV_StoCb, false, rn, rd, VFPOperand(0));
+ }
+
+ void vmov(FPSingleRegisterID rd, RegisterID rn)
+ {
+ ASSERT(!BadReg(rn));
+ m_formatter.vfpOp(OP_VMOV_CtoS, OP_VMOV_CtoSb, false, rd, rn, VFPOperand(0));
+ }
+
+ void vmov(RegisterID rd1, RegisterID rd2, FPDoubleRegisterID rn)
+ {
+ ASSERT(!BadReg(rd1));
+ ASSERT(!BadReg(rd2));
+ m_formatter.vfpOp(OP_VMOV_DtoC, OP_VMOV_DtoCb, true, rd2, VFPOperand(rd1 | 16), rn);
+ }
+
+ void vmov(FPDoubleRegisterID rd, RegisterID rn1, RegisterID rn2)
+ {
+ ASSERT(!BadReg(rn1));
+ ASSERT(!BadReg(rn2));
+ m_formatter.vfpOp(OP_VMOV_CtoD, OP_VMOV_CtoDb, true, rn2, VFPOperand(rn1 | 16), rd);
+ }
+
+ void vmov(FPDoubleRegisterID rd, FPDoubleRegisterID rn)
+ {
+ m_formatter.vfpOp(OP_VMOV_T2, OP_VMOV_T2b, true, VFPOperand(0), rd, rn);
+ }
+
+ void vmrs(RegisterID reg = ARMRegisters::pc)
+ {
+ ASSERT(reg != ARMRegisters::sp);
+ m_formatter.vfpOp(OP_VMRS, OP_VMRSb, false, VFPOperand(1), VFPOperand(0x10 | reg), VFPOperand(0));
+ }
+
+ void vmul(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
+ {
+ m_formatter.vfpOp(OP_VMUL_T2, OP_VMUL_T2b, true, rn, rd, rm);
+ }
+
+ void vstr(FPDoubleRegisterID rd, RegisterID rn, int32_t imm)
+ {
+ m_formatter.vfpMemOp(OP_VSTR, OP_VSTRb, true, rn, rd, imm);
+ }
+
+ void fsts(FPSingleRegisterID rd, RegisterID rn, int32_t imm)
+ {
+ m_formatter.vfpMemOp(OP_FSTS, OP_FSTSb, false, rn, rd, imm);
+ }
+
+ void vsub(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
+ {
+ m_formatter.vfpOp(OP_VSUB_T2, OP_VSUB_T2b, true, rn, rd, rm);
+ }
+
+ void vabs(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
+ {
+ m_formatter.vfpOp(OP_VABS_T2, OP_VABS_T2b, true, VFPOperand(16), rd, rm);
+ }
+
+ void vneg(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
+ {
+ m_formatter.vfpOp(OP_VNEG_T2, OP_VNEG_T2b, true, VFPOperand(1), rd, rm);
+ }
+
+ void vsqrt(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
+ {
+ m_formatter.vfpOp(OP_VSQRT_T1, OP_VSQRT_T1b, true, VFPOperand(17), rd, rm);
+ }
+
+ void vcvtds(FPDoubleRegisterID rd, FPSingleRegisterID rm)
+ {
+ m_formatter.vfpOp(OP_VCVTDS_T1, OP_VCVTDS_T1b, false, VFPOperand(23), rd, rm);
+ }
+
+ void vcvtsd(FPSingleRegisterID rd, FPDoubleRegisterID rm)
+ {
+ m_formatter.vfpOp(OP_VCVTSD_T1, OP_VCVTSD_T1b, true, VFPOperand(23), rd, rm);
+ }
+
+ void nop()
+ {
+ m_formatter.oneWordOp8Imm8(OP_NOP_T1, 0);
+ }
+
+ void nopw()
+ {
+ m_formatter.twoWordOp16Op16(OP_NOP_T2a, OP_NOP_T2b);
+ }
+
+ void dmbSY()
+ {
+ m_formatter.twoWordOp16Op16(OP_DMB_SY_T2a, OP_DMB_SY_T2b);
+ }
+
+ AssemblerLabel labelIgnoringWatchpoints()
+ {
+ return m_formatter.label();
+ }
+
+ AssemblerLabel labelForWatchpoint()
+ {
+ AssemblerLabel result = m_formatter.label();
+ if (static_cast<int>(result.m_offset) != m_indexOfLastWatchpoint)
+ result = label();
+ m_indexOfLastWatchpoint = result.m_offset;
+ m_indexOfTailOfLastWatchpoint = result.m_offset + maxJumpReplacementSize();
+ return result;
+ }
+
+ AssemblerLabel label()
+ {
+ AssemblerLabel result = m_formatter.label();
+ while (UNLIKELY(static_cast<int>(result.m_offset) < m_indexOfTailOfLastWatchpoint)) {
+ if (UNLIKELY(static_cast<int>(result.m_offset) + 4 <= m_indexOfTailOfLastWatchpoint))
+ nopw();
+ else
+ nop();
+ result = m_formatter.label();
+ }
+ return result;
+ }
+
+ AssemblerLabel align(int alignment)
+ {
+ while (!m_formatter.isAligned(alignment))
+ bkpt();
+
+ return label();
+ }
+
+ static void* getRelocatedAddress(void* code, AssemblerLabel label)
+ {
+ ASSERT(label.isSet());
+ return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + label.m_offset);
+ }
+
+ static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b)
+ {
+ return b.m_offset - a.m_offset;
+ }
+
+ static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return JUMP_ENUM_SIZE(jumpType) - JUMP_ENUM_SIZE(jumpLinkType); }
+
+ // Assembler admin methods:
+
+ static ALWAYS_INLINE bool linkRecordSourceComparator(const LinkRecord& a, const LinkRecord& b)
+ {
+ return a.from() < b.from();
+ }
+
+ static bool canCompact(JumpType jumpType)
+ {
+ // The following cannot be compacted:
+ // JumpFixed: represents custom jump sequence
+ // JumpNoConditionFixedSize: represents unconditional jump that must remain a fixed size
+ // JumpConditionFixedSize: represents conditional jump that must remain a fixed size
+ return (jumpType == JumpNoCondition) || (jumpType == JumpCondition);
+ }
+
+ static JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to)
+ {
+ if (jumpType == JumpFixed)
+ return LinkInvalid;
+
+ // for patchable jump we must leave space for the longest code sequence
+ if (jumpType == JumpNoConditionFixedSize)
+ return LinkBX;
+ if (jumpType == JumpConditionFixedSize)
+ return LinkConditionalBX;
+
+ const int paddingSize = JUMP_ENUM_SIZE(jumpType);
+
+ if (jumpType == JumpCondition) {
+ // 2-byte conditional T1
+ const uint16_t* jumpT1Location = reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT1)));
+ if (canBeJumpT1(jumpT1Location, to))
+ return LinkJumpT1;
+ // 4-byte conditional T3
+ const uint16_t* jumpT3Location = reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT3)));
+ if (canBeJumpT3(jumpT3Location, to))
+ return LinkJumpT3;
+ // 4-byte conditional T4 with IT
+ const uint16_t* conditionalJumpT4Location =
+ reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkConditionalJumpT4)));
+ if (canBeJumpT4(conditionalJumpT4Location, to))
+ return LinkConditionalJumpT4;
+ } else {
+ // 2-byte unconditional T2
+ const uint16_t* jumpT2Location = reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT2)));
+ if (canBeJumpT2(jumpT2Location, to))
+ return LinkJumpT2;
+ // 4-byte unconditional T4
+ const uint16_t* jumpT4Location = reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT4)));
+ if (canBeJumpT4(jumpT4Location, to))
+ return LinkJumpT4;
+ // use long jump sequence
+ return LinkBX;
+ }
+
+ ASSERT(jumpType == JumpCondition);
+ return LinkConditionalBX;
+ }
+
+ static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to)
+ {
+ JumpLinkType linkType = computeJumpType(record.type(), from, to);
+ record.setLinkType(linkType);
+ return linkType;
+ }
+
+ Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink()
+ {
+ std::sort(m_jumpsToLink.begin(), m_jumpsToLink.end(), linkRecordSourceComparator);
+ return m_jumpsToLink;
+ }
+
+ static void ALWAYS_INLINE link(LinkRecord& record, uint8_t* from, uint8_t* to)
+ {
+ switch (record.linkType()) {
+ case LinkJumpT1:
+ linkJumpT1(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), to);
+ break;
+ case LinkJumpT2:
+ linkJumpT2(reinterpret_cast_ptr<uint16_t*>(from), to);
+ break;
+ case LinkJumpT3:
+ linkJumpT3(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), to);
+ break;
+ case LinkJumpT4:
+ linkJumpT4(reinterpret_cast_ptr<uint16_t*>(from), to);
+ break;
+ case LinkConditionalJumpT4:
+ linkConditionalJumpT4(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), to);
+ break;
+ case LinkConditionalBX:
+ linkConditionalBX(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), to);
+ break;
+ case LinkBX:
+ linkBX(reinterpret_cast_ptr<uint16_t*>(from), to);
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+ }
+
+ void* unlinkedCode() { return m_formatter.data(); }
+ size_t codeSize() const { return m_formatter.codeSize(); }
+
+ static unsigned getCallReturnOffset(AssemblerLabel call)
+ {
+ ASSERT(call.isSet());
+ return call.m_offset;
+ }
+
+ // Linking & patching:
+ //
+ // 'link' and 'patch' methods are for use on unprotected code - such as the code
+ // within the AssemblerBuffer, and code being patched by the patch buffer. Once
+ // code has been finalized it is (platform support permitting) within a non-
+ // writable region of memory; to modify the code in an execute-only execuable
+ // pool the 'repatch' and 'relink' methods should be used.
+
+ void linkJump(AssemblerLabel from, AssemblerLabel to, JumpType type, Condition condition)
+ {
+ ASSERT(to.isSet());
+ ASSERT(from.isSet());
+ m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset, type, condition));
+ }
+
+ static void linkJump(void* code, AssemblerLabel from, void* to)
+ {
+ ASSERT(from.isSet());
+
+ uint16_t* location = reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.m_offset);
+ linkJumpAbsolute(location, to);
+ }
+
+ static void linkCall(void* code, AssemblerLabel from, void* to)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(code) & 1));
+ ASSERT(from.isSet());
+
+ setPointer(reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.m_offset) - 1, to, false);
+ }
+
+ static void linkPointer(void* code, AssemblerLabel where, void* value)
+ {
+ setPointer(reinterpret_cast<char*>(code) + where.m_offset, value, false);
+ }
+
+ static void relinkJump(void* from, void* to)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(from) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(to) & 1));
+
+ linkJumpAbsolute(reinterpret_cast<uint16_t*>(from), to);
+
+ cacheFlush(reinterpret_cast<uint16_t*>(from) - 5, 5 * sizeof(uint16_t));
+ }
+
+ static void relinkCall(void* from, void* to)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(from) & 1));
+
+ setPointer(reinterpret_cast<uint16_t*>(from) - 1, to, true);
+ }
+
+ static void* readCallTarget(void* from)
+ {
+ return readPointer(reinterpret_cast<uint16_t*>(from) - 1);
+ }
+
+ static void repatchInt32(void* where, int32_t value)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
+
+ setInt32(where, value, true);
+ }
+
+ static void repatchCompact(void* where, int32_t offset)
+ {
+ ASSERT(offset >= -255 && offset <= 255);
+
+ bool add = true;
+ if (offset < 0) {
+ add = false;
+ offset = -offset;
+ }
+
+ offset |= (add << 9);
+ offset |= (1 << 10);
+ offset |= (1 << 11);
+
+ uint16_t* location = reinterpret_cast<uint16_t*>(where);
+ location[1] &= ~((1 << 12) - 1);
+ location[1] |= offset;
+ cacheFlush(location, sizeof(uint16_t) * 2);
+ }
+
+ static void repatchPointer(void* where, void* value)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
+
+ setPointer(where, value, true);
+ }
+
+ static void* readPointer(void* where)
+ {
+ return reinterpret_cast<void*>(readInt32(where));
+ }
+
+ static void replaceWithJump(void* instructionStart, void* to)
+ {
+ ASSERT(!(bitwise_cast<uintptr_t>(instructionStart) & 1));
+ ASSERT(!(bitwise_cast<uintptr_t>(to) & 1));
+
+#if OS(LINUX)
+ if (canBeJumpT4(reinterpret_cast<uint16_t*>(instructionStart), to)) {
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart) + 2;
+ linkJumpT4(ptr, to);
+ cacheFlush(ptr - 2, sizeof(uint16_t) * 2);
+ } else {
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart) + 5;
+ linkBX(ptr, to);
+ cacheFlush(ptr - 5, sizeof(uint16_t) * 5);
+ }
+#else
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart) + 2;
+ linkJumpT4(ptr, to);
+ cacheFlush(ptr - 2, sizeof(uint16_t) * 2);
+#endif
+ }
+
+ static ptrdiff_t maxJumpReplacementSize()
+ {
+#if OS(LINUX)
+ return 10;
+#else
+ return 4;
+#endif
+ }
+
+ static void replaceWithLoad(void* instructionStart)
+ {
+ ASSERT(!(bitwise_cast<uintptr_t>(instructionStart) & 1));
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart);
+ switch (ptr[0] & 0xFFF0) {
+ case OP_LDR_imm_T3:
+ break;
+ case OP_ADD_imm_T3:
+ ASSERT(!(ptr[1] & 0xF000));
+ ptr[0] &= 0x000F;
+ ptr[0] |= OP_LDR_imm_T3;
+ ptr[1] |= (ptr[1] & 0x0F00) << 4;
+ ptr[1] &= 0xF0FF;
+ cacheFlush(ptr, sizeof(uint16_t) * 2);
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ }
+
+ static void replaceWithAddressComputation(void* instructionStart)
+ {
+ ASSERT(!(bitwise_cast<uintptr_t>(instructionStart) & 1));
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart);
+ switch (ptr[0] & 0xFFF0) {
+ case OP_LDR_imm_T3:
+ ASSERT(!(ptr[1] & 0x0F00));
+ ptr[0] &= 0x000F;
+ ptr[0] |= OP_ADD_imm_T3;
+ ptr[1] |= (ptr[1] & 0xF000) >> 4;
+ ptr[1] &= 0x0FFF;
+ cacheFlush(ptr, sizeof(uint16_t) * 2);
+ break;
+ case OP_ADD_imm_T3:
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ }
+
+ unsigned debugOffset() { return m_formatter.debugOffset(); }
+
+#if OS(LINUX)
+ static inline void linuxPageFlush(uintptr_t begin, uintptr_t end)
+ {
+ asm volatile(
+ "push {r7}\n"
+ "mov r0, %0\n"
+ "mov r1, %1\n"
+ "movw r7, #0x2\n"
+ "movt r7, #0xf\n"
+ "movs r2, #0x0\n"
+ "svc 0x0\n"
+ "pop {r7}\n"
+ :
+ : "r" (begin), "r" (end)
+ : "r0", "r1", "r2");
+ }
+#endif
+
+ static void cacheFlush(void* code, size_t size)
+ {
+#if OS(IOS)
+ sys_cache_control(kCacheFunctionPrepareForExecution, code, size);
+#elif OS(LINUX)
+ size_t page = pageSize();
+ uintptr_t current = reinterpret_cast<uintptr_t>(code);
+ uintptr_t end = current + size;
+ uintptr_t firstPageEnd = (current & ~(page - 1)) + page;
+
+ if (end <= firstPageEnd) {
+ linuxPageFlush(current, end);
+ return;
+ }
+
+ linuxPageFlush(current, firstPageEnd);
+
+ for (current = firstPageEnd; current + page < end; current += page)
+ linuxPageFlush(current, current + page);
+
+ linuxPageFlush(current, end);
+#else
+#error "The cacheFlush support is missing on this platform."
+#endif
+ }
+
+private:
+ // VFP operations commonly take one or more 5-bit operands, typically representing a
+ // floating point register number. This will commonly be encoded in the instruction
+ // in two parts, with one single bit field, and one 4-bit field. In the case of
+ // double precision operands the high bit of the register number will be encoded
+ // separately, and for single precision operands the high bit of the register number
+ // will be encoded individually.
+ // VFPOperand encapsulates a 5-bit VFP operand, with bits 0..3 containing the 4-bit
+ // field to be encoded together in the instruction (the low 4-bits of a double
+ // register number, or the high 4-bits of a single register number), and bit 4
+ // contains the bit value to be encoded individually.
+ struct VFPOperand {
+ explicit VFPOperand(uint32_t value)
+ : m_value(value)
+ {
+ ASSERT(!(m_value & ~0x1f));
+ }
+
+ VFPOperand(FPDoubleRegisterID reg)
+ : m_value(reg)
+ {
+ }
+
+ VFPOperand(RegisterID reg)
+ : m_value(reg)
+ {
+ }
+
+ VFPOperand(FPSingleRegisterID reg)
+ : m_value(((reg & 1) << 4) | (reg >> 1)) // rotate the lowest bit of 'reg' to the top.
+ {
+ }
+
+ uint32_t bits1()
+ {
+ return m_value >> 4;
+ }
+
+ uint32_t bits4()
+ {
+ return m_value & 0xf;
+ }
+
+ uint32_t m_value;
+ };
+
+ VFPOperand vcvtOp(bool toInteger, bool isUnsigned, bool isRoundZero)
+ {
+ // Cannot specify rounding when converting to float.
+ ASSERT(toInteger || !isRoundZero);
+
+ uint32_t op = 0x8;
+ if (toInteger) {
+ // opc2 indicates both toInteger & isUnsigned.
+ op |= isUnsigned ? 0x4 : 0x5;
+ // 'op' field in instruction is isRoundZero
+ if (isRoundZero)
+ op |= 0x10;
+ } else {
+ ASSERT(!isRoundZero);
+ // 'op' field in instruction is isUnsigned
+ if (!isUnsigned)
+ op |= 0x10;
+ }
+ return VFPOperand(op);
+ }
+
+ static void setInt32(void* code, uint32_t value, bool flush)
+ {
+ uint16_t* location = reinterpret_cast<uint16_t*>(code);
+ ASSERT(isMOV_imm_T3(location - 4) && isMOVT(location - 2));
+
+ ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value));
+ ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value >> 16));
+ location[-4] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
+ location[-3] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location[-3] >> 8) & 0xf, lo16);
+ location[-2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
+ location[-1] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location[-1] >> 8) & 0xf, hi16);
+
+ if (flush)
+ cacheFlush(location - 4, 4 * sizeof(uint16_t));
+ }
+
+ static int32_t readInt32(void* code)
+ {
+ uint16_t* location = reinterpret_cast<uint16_t*>(code);
+ ASSERT(isMOV_imm_T3(location - 4) && isMOVT(location - 2));
+
+ ARMThumbImmediate lo16;
+ ARMThumbImmediate hi16;
+ decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(lo16, location[-4]);
+ decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(lo16, location[-3]);
+ decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(hi16, location[-2]);
+ decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(hi16, location[-1]);
+ uint32_t result = hi16.asUInt16();
+ result <<= 16;
+ result |= lo16.asUInt16();
+ return static_cast<int32_t>(result);
+ }
+
+ static void setUInt7ForLoad(void* code, ARMThumbImmediate imm)
+ {
+ // Requires us to have planted a LDR_imm_T1
+ ASSERT(imm.isValid());
+ ASSERT(imm.isUInt7());
+ uint16_t* location = reinterpret_cast<uint16_t*>(code);
+ location[0] &= ~((static_cast<uint16_t>(0x7f) >> 2) << 6);
+ location[0] |= (imm.getUInt7() >> 2) << 6;
+ cacheFlush(location, sizeof(uint16_t));
+ }
+
+ static void setPointer(void* code, void* value, bool flush)
+ {
+ setInt32(code, reinterpret_cast<uint32_t>(value), flush);
+ }
+
+ static bool isB(void* address)
+ {
+ uint16_t* instruction = static_cast<uint16_t*>(address);
+ return ((instruction[0] & 0xf800) == OP_B_T4a) && ((instruction[1] & 0xd000) == OP_B_T4b);
+ }
+
+ static bool isBX(void* address)
+ {
+ uint16_t* instruction = static_cast<uint16_t*>(address);
+ return (instruction[0] & 0xff87) == OP_BX;
+ }
+
+ static bool isMOV_imm_T3(void* address)
+ {
+ uint16_t* instruction = static_cast<uint16_t*>(address);
+ return ((instruction[0] & 0xFBF0) == OP_MOV_imm_T3) && ((instruction[1] & 0x8000) == 0);
+ }
+
+ static bool isMOVT(void* address)
+ {
+ uint16_t* instruction = static_cast<uint16_t*>(address);
+ return ((instruction[0] & 0xFBF0) == OP_MOVT) && ((instruction[1] & 0x8000) == 0);
+ }
+
+ static bool isNOP_T1(void* address)
+ {
+ uint16_t* instruction = static_cast<uint16_t*>(address);
+ return instruction[0] == OP_NOP_T1;
+ }
+
+ static bool isNOP_T2(void* address)
+ {
+ uint16_t* instruction = static_cast<uint16_t*>(address);
+ return (instruction[0] == OP_NOP_T2a) && (instruction[1] == OP_NOP_T2b);
+ }
+
+ static bool canBeJumpT1(const uint16_t* instruction, const void* target)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+
+ intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
+ // It does not appear to be documented in the ARM ARM (big surprise), but
+ // for OP_B_T1 the branch displacement encoded in the instruction is 2
+ // less than the actual displacement.
+ relative -= 2;
+ return ((relative << 23) >> 23) == relative;
+ }
+
+ static bool canBeJumpT2(const uint16_t* instruction, const void* target)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+
+ intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
+ // It does not appear to be documented in the ARM ARM (big surprise), but
+ // for OP_B_T2 the branch displacement encoded in the instruction is 2
+ // less than the actual displacement.
+ relative -= 2;
+ return ((relative << 20) >> 20) == relative;
+ }
+
+ static bool canBeJumpT3(const uint16_t* instruction, const void* target)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+
+ intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
+ return ((relative << 11) >> 11) == relative;
+ }
+
+ static bool canBeJumpT4(const uint16_t* instruction, const void* target)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+
+ intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
+ return ((relative << 7) >> 7) == relative;
+ }
+
+ static void linkJumpT1(Condition cond, uint16_t* instruction, void* target)
+ {
+ // FIMXE: this should be up in the MacroAssembler layer. :-(
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+ ASSERT(canBeJumpT1(instruction, target));
+
+ intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
+ // It does not appear to be documented in the ARM ARM (big surprise), but
+ // for OP_B_T1 the branch displacement encoded in the instruction is 2
+ // less than the actual displacement.
+ relative -= 2;
+
+ // All branch offsets should be an even distance.
+ ASSERT(!(relative & 1));
+ instruction[-1] = OP_B_T1 | ((cond & 0xf) << 8) | ((relative & 0x1fe) >> 1);
+ }
+
+ static void linkJumpT2(uint16_t* instruction, void* target)
+ {
+ // FIMXE: this should be up in the MacroAssembler layer. :-(
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+ ASSERT(canBeJumpT2(instruction, target));
+
+ intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
+ // It does not appear to be documented in the ARM ARM (big surprise), but
+ // for OP_B_T2 the branch displacement encoded in the instruction is 2
+ // less than the actual displacement.
+ relative -= 2;
+
+ // All branch offsets should be an even distance.
+ ASSERT(!(relative & 1));
+ instruction[-1] = OP_B_T2 | ((relative & 0xffe) >> 1);
+ }
+
+ static void linkJumpT3(Condition cond, uint16_t* instruction, void* target)
+ {
+ // FIMXE: this should be up in the MacroAssembler layer. :-(
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+ ASSERT(canBeJumpT3(instruction, target));
+
+ intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
+
+ // All branch offsets should be an even distance.
+ ASSERT(!(relative & 1));
+ instruction[-2] = OP_B_T3a | ((relative & 0x100000) >> 10) | ((cond & 0xf) << 6) | ((relative & 0x3f000) >> 12);
+ instruction[-1] = OP_B_T3b | ((relative & 0x80000) >> 8) | ((relative & 0x40000) >> 5) | ((relative & 0xffe) >> 1);
+ }
+
+ static void linkJumpT4(uint16_t* instruction, void* target)
+ {
+ // FIMXE: this should be up in the MacroAssembler layer. :-(
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+ ASSERT(canBeJumpT4(instruction, target));
+
+ intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
+ // ARM encoding for the top two bits below the sign bit is 'peculiar'.
+ if (relative >= 0)
+ relative ^= 0xC00000;
+
+ // All branch offsets should be an even distance.
+ ASSERT(!(relative & 1));
+ instruction[-2] = OP_B_T4a | ((relative & 0x1000000) >> 14) | ((relative & 0x3ff000) >> 12);
+ instruction[-1] = OP_B_T4b | ((relative & 0x800000) >> 10) | ((relative & 0x400000) >> 11) | ((relative & 0xffe) >> 1);
+ }
+
+ static void linkConditionalJumpT4(Condition cond, uint16_t* instruction, void* target)
+ {
+ // FIMXE: this should be up in the MacroAssembler layer. :-(
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+
+ instruction[-3] = ifThenElse(cond) | OP_IT;
+ linkJumpT4(instruction, target);
+ }
+
+ static void linkBX(uint16_t* instruction, void* target)
+ {
+ // FIMXE: this should be up in the MacroAssembler layer. :-(
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+
+ const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip;
+ ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) + 1));
+ ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) >> 16));
+ instruction[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
+ instruction[-4] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, lo16);
+ instruction[-3] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
+ instruction[-2] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16);
+ instruction[-1] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3);
+ }
+
+ static void linkConditionalBX(Condition cond, uint16_t* instruction, void* target)
+ {
+ // FIMXE: this should be up in the MacroAssembler layer. :-(
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+
+ linkBX(instruction, target);
+ instruction[-6] = ifThenElse(cond, true, true) | OP_IT;
+ }
+
+ static void linkJumpAbsolute(uint16_t* instruction, void* target)
+ {
+ // FIMXE: this should be up in the MacroAssembler layer. :-(
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+
+ ASSERT((isMOV_imm_T3(instruction - 5) && isMOVT(instruction - 3) && isBX(instruction - 1))
+ || (isNOP_T1(instruction - 5) && isNOP_T2(instruction - 4) && isB(instruction - 2)));
+
+ if (canBeJumpT4(instruction, target)) {
+ // There may be a better way to fix this, but right now put the NOPs first, since in the
+ // case of an conditional branch this will be coming after an ITTT predicating *three*
+ // instructions! Looking backwards to modify the ITTT to an IT is not easy, due to
+ // variable wdith encoding - the previous instruction might *look* like an ITTT but
+ // actually be the second half of a 2-word op.
+ instruction[-5] = OP_NOP_T1;
+ instruction[-4] = OP_NOP_T2a;
+ instruction[-3] = OP_NOP_T2b;
+ linkJumpT4(instruction, target);
+ } else {
+ const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip;
+ ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) + 1));
+ ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) >> 16));
+ instruction[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
+ instruction[-4] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, lo16);
+ instruction[-3] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
+ instruction[-2] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16);
+ instruction[-1] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3);
+ }
+ }
+
+ static uint16_t twoWordOp5i6Imm4Reg4EncodedImmFirst(uint16_t op, ARMThumbImmediate imm)
+ {
+ return op | (imm.m_value.i << 10) | imm.m_value.imm4;
+ }
+
+ static void decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(ARMThumbImmediate& result, uint16_t value)
+ {
+ result.m_value.i = (value >> 10) & 1;
+ result.m_value.imm4 = value & 15;
+ }
+
+ static uint16_t twoWordOp5i6Imm4Reg4EncodedImmSecond(uint16_t rd, ARMThumbImmediate imm)
+ {
+ return (imm.m_value.imm3 << 12) | (rd << 8) | imm.m_value.imm8;
+ }
+
+ static void decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(ARMThumbImmediate& result, uint16_t value)
+ {
+ result.m_value.imm3 = (value >> 12) & 7;
+ result.m_value.imm8 = value & 255;
+ }
+
+ class ARMInstructionFormatter {
+ public:
+ ALWAYS_INLINE void oneWordOp5Reg3Imm8(OpcodeID op, RegisterID rd, uint8_t imm)
+ {
+ m_buffer.putShort(op | (rd << 8) | imm);
+ }
+
+ ALWAYS_INLINE void oneWordOp5Imm5Reg3Reg3(OpcodeID op, uint8_t imm, RegisterID reg1, RegisterID reg2)
+ {
+ m_buffer.putShort(op | (imm << 6) | (reg1 << 3) | reg2);
+ }
+
+ ALWAYS_INLINE void oneWordOp7Reg3Reg3Reg3(OpcodeID op, RegisterID reg1, RegisterID reg2, RegisterID reg3)
+ {
+ m_buffer.putShort(op | (reg1 << 6) | (reg2 << 3) | reg3);
+ }
+
+ ALWAYS_INLINE void oneWordOp7Imm9(OpcodeID op, uint16_t imm)
+ {
+ m_buffer.putShort(op | imm);
+ }
+
+ ALWAYS_INLINE void oneWordOp8Imm8(OpcodeID op, uint8_t imm)
+ {
+ m_buffer.putShort(op | imm);
+ }
+
+ ALWAYS_INLINE void oneWordOp8RegReg143(OpcodeID op, RegisterID reg1, RegisterID reg2)
+ {
+ m_buffer.putShort(op | ((reg2 & 8) << 4) | (reg1 << 3) | (reg2 & 7));
+ }
+
+ ALWAYS_INLINE void oneWordOp9Imm7(OpcodeID op, uint8_t imm)
+ {
+ m_buffer.putShort(op | imm);
+ }
+
+ ALWAYS_INLINE void oneWordOp10Reg3Reg3(OpcodeID op, RegisterID reg1, RegisterID reg2)
+ {
+ m_buffer.putShort(op | (reg1 << 3) | reg2);
+ }
+
+ ALWAYS_INLINE void twoWordOp12Reg4FourFours(OpcodeID1 op, RegisterID reg, FourFours ff)
+ {
+ m_buffer.putShort(op | reg);
+ m_buffer.putShort(ff.m_u.value);
+ }
+
+ ALWAYS_INLINE void twoWordOp16FourFours(OpcodeID1 op, FourFours ff)
+ {
+ m_buffer.putShort(op);
+ m_buffer.putShort(ff.m_u.value);
+ }
+
+ ALWAYS_INLINE void twoWordOp16Op16(OpcodeID1 op1, OpcodeID2 op2)
+ {
+ m_buffer.putShort(op1);
+ m_buffer.putShort(op2);
+ }
+
+ ALWAYS_INLINE void twoWordOp16Imm16(OpcodeID1 op1, uint16_t imm)
+ {
+ m_buffer.putShort(op1);
+ m_buffer.putShort(imm);
+ }
+
+ ALWAYS_INLINE void twoWordOp5i6Imm4Reg4EncodedImm(OpcodeID1 op, int imm4, RegisterID rd, ARMThumbImmediate imm)
+ {
+ ARMThumbImmediate newImm = imm;
+ newImm.m_value.imm4 = imm4;
+
+ m_buffer.putShort(ARMv7Assembler::twoWordOp5i6Imm4Reg4EncodedImmFirst(op, newImm));
+ m_buffer.putShort(ARMv7Assembler::twoWordOp5i6Imm4Reg4EncodedImmSecond(rd, newImm));
+ }
+
+ ALWAYS_INLINE void twoWordOp12Reg4Reg4Imm12(OpcodeID1 op, RegisterID reg1, RegisterID reg2, uint16_t imm)
+ {
+ m_buffer.putShort(op | reg1);
+ m_buffer.putShort((reg2 << 12) | imm);
+ }
+
+ ALWAYS_INLINE void twoWordOp12Reg40Imm3Reg4Imm20Imm5(OpcodeID1 op, RegisterID reg1, RegisterID reg2, uint16_t imm1, uint16_t imm2, uint16_t imm3)
+ {
+ m_buffer.putShort(op | reg1);
+ m_buffer.putShort((imm1 << 12) | (reg2 << 8) | (imm2 << 6) | imm3);
+ }
+
+ // Formats up instructions of the pattern:
+ // 111111111B11aaaa:bbbb222SA2C2cccc
+ // Where 1s in the pattern come from op1, 2s in the pattern come from op2, S is the provided size bit.
+ // Operands provide 5 bit values of the form Aaaaa, Bbbbb, Ccccc.
+ ALWAYS_INLINE void vfpOp(OpcodeID1 op1, OpcodeID2 op2, bool size, VFPOperand a, VFPOperand b, VFPOperand c)
+ {
+ ASSERT(!(op1 & 0x004f));
+ ASSERT(!(op2 & 0xf1af));
+ m_buffer.putShort(op1 | b.bits1() << 6 | a.bits4());
+ m_buffer.putShort(op2 | b.bits4() << 12 | size << 8 | a.bits1() << 7 | c.bits1() << 5 | c.bits4());
+ }
+
+ // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
+ // (i.e. +/-(0..255) 32-bit words)
+ ALWAYS_INLINE void vfpMemOp(OpcodeID1 op1, OpcodeID2 op2, bool size, RegisterID rn, VFPOperand rd, int32_t imm)
+ {
+ bool up = true;
+ if (imm < 0) {
+ imm = -imm;
+ up = false;
+ }
+
+ uint32_t offset = imm;
+ ASSERT(!(offset & ~0x3fc));
+ offset >>= 2;
+
+ m_buffer.putShort(op1 | (up << 7) | rd.bits1() << 6 | rn);
+ m_buffer.putShort(op2 | rd.bits4() << 12 | size << 8 | offset);
+ }
+
+ // Administrative methods:
+
+ size_t codeSize() const { return m_buffer.codeSize(); }
+ AssemblerLabel label() const { return m_buffer.label(); }
+ bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
+ void* data() const { return m_buffer.data(); }
+
+ unsigned debugOffset() { return m_buffer.debugOffset(); }
+
+ AssemblerBuffer m_buffer;
+ } m_formatter;
+
+ Vector<LinkRecord, 0, UnsafeVectorOverflow> m_jumpsToLink;
+ int m_indexOfLastWatchpoint;
+ int m_indexOfTailOfLastWatchpoint;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
+
+#endif // ARMAssembler_h
diff --git a/Source/JavaScriptCore/assembler/AbortReason.h b/Source/JavaScriptCore/assembler/AbortReason.h
new file mode 100644
index 000000000..1a5f068c7
--- /dev/null
+++ b/Source/JavaScriptCore/assembler/AbortReason.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef AbortReason_h
+#define AbortReason_h
+
+namespace JSC {
+
+// It's important to not change the values of existing abort reasons unless we really
+// have to. For this reason there is a BASIC-style numbering that should allow us to
+// sneak new reasons in without changing the numbering of existing reasons - at least
+// for a while.
+enum AbortReason {
+ AHCallFrameMisaligned = 10,
+ AHIndexingTypeIsValid = 20,
+ AHInsaneArgumentCount = 30,
+ AHIsNotCell = 40,
+ AHIsNotInt32 = 50,
+ AHIsNotJSDouble = 60,
+ AHIsNotJSInt32 = 70,
+ AHIsNotJSNumber = 80,
+ AHIsNotNull = 90,
+ AHStackPointerMisaligned = 100,
+ AHStructureIDIsValid = 110,
+ AHTagMaskNotInPlace = 120,
+ AHTagTypeNumberNotInPlace = 130,
+ AHTypeInfoInlineTypeFlagsAreValid = 140,
+ AHTypeInfoIsValid = 150,
+ DFGBailedAtTopOfBlock = 161,
+ DFGBailedAtEndOfNode = 162,
+ DFGBasicStorageAllocatorZeroSize = 170,
+ DFGIsNotCell = 180,
+ DFGIneffectiveWatchpoint = 190,
+ DFGNegativeStringLength = 200,
+ DFGSlowPathGeneratorFellThrough = 210,
+ DFGUnreachableBasicBlock = 220,
+ DFGUnreasonableOSREntryJumpDestination = 230,
+ DFGVarargsThrowingPathDidNotThrow = 235,
+ JITDivOperandsAreNotNumbers = 240,
+ JITGetByValResultIsNotEmpty = 250,
+ JITNotSupported = 260,
+ JITOffsetIsNotOutOfLine = 270,
+ JITUnreasonableLoopHintJumpTarget = 280,
+ RPWUnreasonableJumpTarget = 290,
+ RepatchIneffectiveWatchpoint = 300,
+ RepatchInsaneArgumentCount = 310,
+ TGInvalidPointer = 320,
+ TGNotSupported = 330,
+ YARRNoInputConsumed = 340,
+};
+
+} // namespace JSC
+
+#endif // AbortReason_h
+
diff --git a/Source/JavaScriptCore/assembler/AbstractMacroAssembler.h b/Source/JavaScriptCore/assembler/AbstractMacroAssembler.h
new file mode 100644
index 000000000..6e82dcc5e
--- /dev/null
+++ b/Source/JavaScriptCore/assembler/AbstractMacroAssembler.h
@@ -0,0 +1,1328 @@
+/*
+ * Copyright (C) 2008, 2012, 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef AbstractMacroAssembler_h
+#define AbstractMacroAssembler_h
+
+#include "AbortReason.h"
+#include "AssemblerBuffer.h"
+#include "CodeLocation.h"
+#include "MacroAssemblerCodeRef.h"
+#include "Options.h"
+#include "WeakRandom.h"
+#include <wtf/CryptographicallyRandomNumber.h>
+#include <wtf/Noncopyable.h>
+
+#if ENABLE(ASSEMBLER)
+
+namespace JSC {
+
+inline bool isARMv7IDIVSupported()
+{
+#if HAVE(ARM_IDIV_INSTRUCTIONS)
+ return true;
+#else
+ return false;
+#endif
+}
+
+inline bool isARM64()
+{
+#if CPU(ARM64)
+ return true;
+#else
+ return false;
+#endif
+}
+
+inline bool isX86()
+{
+#if CPU(X86_64) || CPU(X86)
+ return true;
+#else
+ return false;
+#endif
+}
+
+inline bool optimizeForARMv7IDIVSupported()
+{
+ return isARMv7IDIVSupported() && Options::enableArchitectureSpecificOptimizations();
+}
+
+inline bool optimizeForARM64()
+{
+ return isARM64() && Options::enableArchitectureSpecificOptimizations();
+}
+
+inline bool optimizeForX86()
+{
+ return isX86() && Options::enableArchitectureSpecificOptimizations();
+}
+
+class LinkBuffer;
+class RepatchBuffer;
+class Watchpoint;
+namespace DFG {
+struct OSRExit;
+}
+
+template <class AssemblerType, class MacroAssemblerType>
+class AbstractMacroAssembler {
+public:
+ friend class JITWriteBarrierBase;
+ typedef AbstractMacroAssembler<AssemblerType, MacroAssemblerType> AbstractMacroAssemblerType;
+ typedef AssemblerType AssemblerType_T;
+
+ typedef MacroAssemblerCodePtr CodePtr;
+ typedef MacroAssemblerCodeRef CodeRef;
+
+ class Jump;
+
+ typedef typename AssemblerType::RegisterID RegisterID;
+ typedef typename AssemblerType::FPRegisterID FPRegisterID;
+
+ static RegisterID firstRegister() { return AssemblerType::firstRegister(); }
+ static RegisterID lastRegister() { return AssemblerType::lastRegister(); }
+
+ static FPRegisterID firstFPRegister() { return AssemblerType::firstFPRegister(); }
+ static FPRegisterID lastFPRegister() { return AssemblerType::lastFPRegister(); }
+
+ // Section 1: MacroAssembler operand types
+ //
+ // The following types are used as operands to MacroAssembler operations,
+ // describing immediate and memory operands to the instructions to be planted.
+
+ enum Scale {
+ TimesOne,
+ TimesTwo,
+ TimesFour,
+ TimesEight,
+ };
+
+ static Scale timesPtr()
+ {
+ if (sizeof(void*) == 4)
+ return TimesFour;
+ return TimesEight;
+ }
+
+ // Address:
+ //
+ // Describes a simple base-offset address.
+ struct Address {
+ explicit Address(RegisterID base, int32_t offset = 0)
+ : base(base)
+ , offset(offset)
+ {
+ }
+
+ Address withOffset(int32_t additionalOffset)
+ {
+ return Address(base, offset + additionalOffset);
+ }
+
+ RegisterID base;
+ int32_t offset;
+ };
+
+ struct ExtendedAddress {
+ explicit ExtendedAddress(RegisterID base, intptr_t offset = 0)
+ : base(base)
+ , offset(offset)
+ {
+ }
+
+ RegisterID base;
+ intptr_t offset;
+ };
+
+ // ImplicitAddress:
+ //
+ // This class is used for explicit 'load' and 'store' operations
+ // (as opposed to situations in which a memory operand is provided
+ // to a generic operation, such as an integer arithmetic instruction).
+ //
+ // In the case of a load (or store) operation we want to permit
+ // addresses to be implicitly constructed, e.g. the two calls:
+ //
+ // load32(Address(addrReg), destReg);
+ // load32(addrReg, destReg);
+ //
+ // Are equivalent, and the explicit wrapping of the Address in the former
+ // is unnecessary.
+ struct ImplicitAddress {
+ ImplicitAddress(RegisterID base)
+ : base(base)
+ , offset(0)
+ {
+ }
+
+ ImplicitAddress(Address address)
+ : base(address.base)
+ , offset(address.offset)
+ {
+ }
+
+ RegisterID base;
+ int32_t offset;
+ };
+
+ // BaseIndex:
+ //
+ // Describes a complex addressing mode.
+ struct BaseIndex {
+ BaseIndex(RegisterID base, RegisterID index, Scale scale, int32_t offset = 0)
+ : base(base)
+ , index(index)
+ , scale(scale)
+ , offset(offset)
+ {
+ }
+
+ RegisterID base;
+ RegisterID index;
+ Scale scale;
+ int32_t offset;
+
+ BaseIndex withOffset(int32_t additionalOffset)
+ {
+ return BaseIndex(base, index, scale, offset + additionalOffset);
+ }
+ };
+
+ // AbsoluteAddress:
+ //
+ // Describes an memory operand given by a pointer. For regular load & store
+ // operations an unwrapped void* will be used, rather than using this.
+ struct AbsoluteAddress {
+ explicit AbsoluteAddress(const void* ptr)
+ : m_ptr(ptr)
+ {
+ }
+
+ const void* m_ptr;
+ };
+
+ // TrustedImmPtr:
+ //
+ // A pointer sized immediate operand to an instruction - this is wrapped
+ // in a class requiring explicit construction in order to differentiate
+ // from pointers used as absolute addresses to memory operations
+ struct TrustedImmPtr {
+ TrustedImmPtr() { }
+
+ explicit TrustedImmPtr(const void* value)
+ : m_value(value)
+ {
+ }
+
+ // This is only here so that TrustedImmPtr(0) does not confuse the C++
+ // overload handling rules.
+ explicit TrustedImmPtr(int value)
+ : m_value(0)
+ {
+ ASSERT_UNUSED(value, !value);
+ }
+
+ explicit TrustedImmPtr(size_t value)
+ : m_value(reinterpret_cast<void*>(value))
+ {
+ }
+
+ intptr_t asIntptr()
+ {
+ return reinterpret_cast<intptr_t>(m_value);
+ }
+
+ const void* m_value;
+ };
+
+ struct ImmPtr : private TrustedImmPtr
+ {
+ explicit ImmPtr(const void* value)
+ : TrustedImmPtr(value)
+ {
+ }
+
+ TrustedImmPtr asTrustedImmPtr() { return *this; }
+ };
+
+ // TrustedImm32:
+ //
+ // A 32bit immediate operand to an instruction - this is wrapped in a
+ // class requiring explicit construction in order to prevent RegisterIDs
+ // (which are implemented as an enum) from accidentally being passed as
+ // immediate values.
+ struct TrustedImm32 {
+ TrustedImm32() { }
+
+ explicit TrustedImm32(int32_t value)
+ : m_value(value)
+ {
+ }
+
+#if !CPU(X86_64)
+ explicit TrustedImm32(TrustedImmPtr ptr)
+ : m_value(ptr.asIntptr())
+ {
+ }
+#endif
+
+ int32_t m_value;
+ };
+
+
+ struct Imm32 : private TrustedImm32 {
+ explicit Imm32(int32_t value)
+ : TrustedImm32(value)
+ {
+ }
+#if !CPU(X86_64)
+ explicit Imm32(TrustedImmPtr ptr)
+ : TrustedImm32(ptr)
+ {
+ }
+#endif
+ const TrustedImm32& asTrustedImm32() const { return *this; }
+
+ };
+
+ // TrustedImm64:
+ //
+ // A 64bit immediate operand to an instruction - this is wrapped in a
+ // class requiring explicit construction in order to prevent RegisterIDs
+ // (which are implemented as an enum) from accidentally being passed as
+ // immediate values.
+ struct TrustedImm64 {
+ TrustedImm64() { }
+
+ explicit TrustedImm64(int64_t value)
+ : m_value(value)
+ {
+ }
+
+#if CPU(X86_64) || CPU(ARM64)
+ explicit TrustedImm64(TrustedImmPtr ptr)
+ : m_value(ptr.asIntptr())
+ {
+ }
+#endif
+
+ int64_t m_value;
+ };
+
+ struct Imm64 : private TrustedImm64
+ {
+ explicit Imm64(int64_t value)
+ : TrustedImm64(value)
+ {
+ }
+#if CPU(X86_64) || CPU(ARM64)
+ explicit Imm64(TrustedImmPtr ptr)
+ : TrustedImm64(ptr)
+ {
+ }
+#endif
+ const TrustedImm64& asTrustedImm64() const { return *this; }
+ };
+
+ // Section 2: MacroAssembler code buffer handles
+ //
+ // The following types are used to reference items in the code buffer
+ // during JIT code generation. For example, the type Jump is used to
+ // track the location of a jump instruction so that it may later be
+ // linked to a label marking its destination.
+
+
+ // Label:
+ //
+ // A Label records a point in the generated instruction stream, typically such that
+ // it may be used as a destination for a jump.
+ class Label {
+ template<class TemplateAssemblerType, class TemplateMacroAssemblerType>
+ friend class AbstractMacroAssembler;
+ friend struct DFG::OSRExit;
+ friend class Jump;
+ friend class MacroAssemblerCodeRef;
+ friend class LinkBuffer;
+ friend class Watchpoint;
+
+ public:
+ Label()
+ {
+ }
+
+ Label(AbstractMacroAssemblerType* masm)
+ : m_label(masm->m_assembler.label())
+ {
+ masm->invalidateAllTempRegisters();
+ }
+
+ bool isSet() const { return m_label.isSet(); }
+ private:
+ AssemblerLabel m_label;
+ };
+
+ // ConvertibleLoadLabel:
+ //
+ // A ConvertibleLoadLabel records a loadPtr instruction that can be patched to an addPtr
+ // so that:
+ //
+ // loadPtr(Address(a, i), b)
+ //
+ // becomes:
+ //
+ // addPtr(TrustedImmPtr(i), a, b)
+ class ConvertibleLoadLabel {
+ template<class TemplateAssemblerType, class TemplateMacroAssemblerType>
+ friend class AbstractMacroAssembler;
+ friend class LinkBuffer;
+
+ public:
+ ConvertibleLoadLabel()
+ {
+ }
+
+ ConvertibleLoadLabel(AbstractMacroAssemblerType* masm)
+ : m_label(masm->m_assembler.labelIgnoringWatchpoints())
+ {
+ }
+
+ bool isSet() const { return m_label.isSet(); }
+ private:
+ AssemblerLabel m_label;
+ };
+
+ // DataLabelPtr:
+ //
+ // A DataLabelPtr is used to refer to a location in the code containing a pointer to be
+ // patched after the code has been generated.
+ class DataLabelPtr {
+ template<class TemplateAssemblerType, class TemplateMacroAssemblerType>
+ friend class AbstractMacroAssembler;
+ friend class LinkBuffer;
+ public:
+ DataLabelPtr()
+ {
+ }
+
+ DataLabelPtr(AbstractMacroAssemblerType* masm)
+ : m_label(masm->m_assembler.label())
+ {
+ }
+
+ bool isSet() const { return m_label.isSet(); }
+
+ private:
+ AssemblerLabel m_label;
+ };
+
+ // DataLabel32:
+ //
+ // A DataLabel32 is used to refer to a location in the code containing a 32-bit constant to be
+ // patched after the code has been generated.
+ class DataLabel32 {
+ template<class TemplateAssemblerType, class TemplateMacroAssemblerType>
+ friend class AbstractMacroAssembler;
+ friend class LinkBuffer;
+ public:
+ DataLabel32()
+ {
+ }
+
+ DataLabel32(AbstractMacroAssemblerType* masm)
+ : m_label(masm->m_assembler.label())
+ {
+ }
+
+ AssemblerLabel label() const { return m_label; }
+
+ private:
+ AssemblerLabel m_label;
+ };
+
+ // DataLabelCompact:
+ //
+ // A DataLabelCompact is used to refer to a location in the code containing a
+ // compact immediate to be patched after the code has been generated.
+ class DataLabelCompact {
+ template<class TemplateAssemblerType, class TemplateMacroAssemblerType>
+ friend class AbstractMacroAssembler;
+ friend class LinkBuffer;
+ public:
+ DataLabelCompact()
+ {
+ }
+
+ DataLabelCompact(AbstractMacroAssemblerType* masm)
+ : m_label(masm->m_assembler.label())
+ {
+ }
+
+ DataLabelCompact(AssemblerLabel label)
+ : m_label(label)
+ {
+ }
+
+ AssemblerLabel label() const { return m_label; }
+
+ private:
+ AssemblerLabel m_label;
+ };
+
+ // Call:
+ //
+ // A Call object is a reference to a call instruction that has been planted
+ // into the code buffer - it is typically used to link the call, setting the
+ // relative offset such that when executed it will call to the desired
+ // destination.
+ class Call {
+ template<class TemplateAssemblerType, class TemplateMacroAssemblerType>
+ friend class AbstractMacroAssembler;
+
+ public:
+ enum Flags {
+ None = 0x0,
+ Linkable = 0x1,
+ Near = 0x2,
+ LinkableNear = 0x3,
+ };
+
+ Call()
+ : m_flags(None)
+ {
+ }
+
+ Call(AssemblerLabel jmp, Flags flags)
+ : m_label(jmp)
+ , m_flags(flags)
+ {
+ }
+
+ bool isFlagSet(Flags flag)
+ {
+ return m_flags & flag;
+ }
+
+ static Call fromTailJump(Jump jump)
+ {
+ return Call(jump.m_label, Linkable);
+ }
+
+ AssemblerLabel m_label;
+ private:
+ Flags m_flags;
+ };
+
+ // Jump:
+ //
+ // A jump object is a reference to a jump instruction that has been planted
+ // into the code buffer - it is typically used to link the jump, setting the
+ // relative offset such that when executed it will jump to the desired
+ // destination.
+ class Jump {
+ template<class TemplateAssemblerType, class TemplateMacroAssemblerType>
+ friend class AbstractMacroAssembler;
+ friend class Call;
+ friend struct DFG::OSRExit;
+ friend class LinkBuffer;
+ public:
+ Jump()
+ {
+ }
+
+#if CPU(ARM_THUMB2)
+ // Fixme: this information should be stored in the instruction stream, not in the Jump object.
+ Jump(AssemblerLabel jmp, ARMv7Assembler::JumpType type = ARMv7Assembler::JumpNoCondition, ARMv7Assembler::Condition condition = ARMv7Assembler::ConditionInvalid)
+ : m_label(jmp)
+ , m_type(type)
+ , m_condition(condition)
+ {
+ }
+#elif CPU(ARM64)
+ Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type = ARM64Assembler::JumpNoCondition, ARM64Assembler::Condition condition = ARM64Assembler::ConditionInvalid)
+ : m_label(jmp)
+ , m_type(type)
+ , m_condition(condition)
+ {
+ }
+
+ Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type, ARM64Assembler::Condition condition, bool is64Bit, ARM64Assembler::RegisterID compareRegister)
+ : m_label(jmp)
+ , m_type(type)
+ , m_condition(condition)
+ , m_is64Bit(is64Bit)
+ , m_compareRegister(compareRegister)
+ {
+ ASSERT((type == ARM64Assembler::JumpCompareAndBranch) || (type == ARM64Assembler::JumpCompareAndBranchFixedSize));
+ }
+
+ Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type, ARM64Assembler::Condition condition, unsigned bitNumber, ARM64Assembler::RegisterID compareRegister)
+ : m_label(jmp)
+ , m_type(type)
+ , m_condition(condition)
+ , m_bitNumber(bitNumber)
+ , m_compareRegister(compareRegister)
+ {
+ ASSERT((type == ARM64Assembler::JumpTestBit) || (type == ARM64Assembler::JumpTestBitFixedSize));
+ }
+#elif CPU(SH4)
+ Jump(AssemblerLabel jmp, SH4Assembler::JumpType type = SH4Assembler::JumpFar)
+ : m_label(jmp)
+ , m_type(type)
+ {
+ }
+#else
+ Jump(AssemblerLabel jmp)
+ : m_label(jmp)
+ {
+ }
+#endif
+
+ Label label() const
+ {
+ Label result;
+ result.m_label = m_label;
+ return result;
+ }
+
+ void link(AbstractMacroAssemblerType* masm) const
+ {
+ masm->invalidateAllTempRegisters();
+
+#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
+ masm->checkRegisterAllocationAgainstBranchRange(m_label.m_offset, masm->debugOffset());
+#endif
+
+#if CPU(ARM_THUMB2)
+ masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition);
+#elif CPU(ARM64)
+ if ((m_type == ARM64Assembler::JumpCompareAndBranch) || (m_type == ARM64Assembler::JumpCompareAndBranchFixedSize))
+ masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition, m_is64Bit, m_compareRegister);
+ else if ((m_type == ARM64Assembler::JumpTestBit) || (m_type == ARM64Assembler::JumpTestBitFixedSize))
+ masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition, m_bitNumber, m_compareRegister);
+ else
+ masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition);
+#elif CPU(SH4)
+ masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type);
+#else
+ masm->m_assembler.linkJump(m_label, masm->m_assembler.label());
+#endif
+ }
+
+ void linkTo(Label label, AbstractMacroAssemblerType* masm) const
+ {
+#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
+ masm->checkRegisterAllocationAgainstBranchRange(label.m_label.m_offset, m_label.m_offset);
+#endif
+
+#if CPU(ARM_THUMB2)
+ masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition);
+#elif CPU(ARM64)
+ if ((m_type == ARM64Assembler::JumpCompareAndBranch) || (m_type == ARM64Assembler::JumpCompareAndBranchFixedSize))
+ masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition, m_is64Bit, m_compareRegister);
+ else if ((m_type == ARM64Assembler::JumpTestBit) || (m_type == ARM64Assembler::JumpTestBitFixedSize))
+ masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition, m_bitNumber, m_compareRegister);
+ else
+ masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition);
+#else
+ masm->m_assembler.linkJump(m_label, label.m_label);
+#endif
+ }
+
+ bool isSet() const { return m_label.isSet(); }
+
+ private:
+ AssemblerLabel m_label;
+#if CPU(ARM_THUMB2)
+ ARMv7Assembler::JumpType m_type;
+ ARMv7Assembler::Condition m_condition;
+#elif CPU(ARM64)
+ ARM64Assembler::JumpType m_type;
+ ARM64Assembler::Condition m_condition;
+ bool m_is64Bit;
+ unsigned m_bitNumber;
+ ARM64Assembler::RegisterID m_compareRegister;
+#endif
+#if CPU(SH4)
+ SH4Assembler::JumpType m_type;
+#endif
+ };
+
+ struct PatchableJump {
+ PatchableJump()
+ {
+ }
+
+ explicit PatchableJump(Jump jump)
+ : m_jump(jump)
+ {
+ }
+
+ operator Jump&() { return m_jump; }
+
+ Jump m_jump;
+ };
+
+ // JumpList:
+ //
+ // A JumpList is a set of Jump objects.
+ // All jumps in the set will be linked to the same destination.
+ class JumpList {
+ friend class LinkBuffer;
+
+ public:
+ typedef Vector<Jump, 2> JumpVector;
+
+ JumpList() { }
+
+ JumpList(Jump jump)
+ {
+ if (jump.isSet())
+ append(jump);
+ }
+
+ void link(AbstractMacroAssemblerType* masm)
+ {
+ size_t size = m_jumps.size();
+ for (size_t i = 0; i < size; ++i)
+ m_jumps[i].link(masm);
+ m_jumps.clear();
+ }
+
+ void linkTo(Label label, AbstractMacroAssemblerType* masm)
+ {
+ size_t size = m_jumps.size();
+ for (size_t i = 0; i < size; ++i)
+ m_jumps[i].linkTo(label, masm);
+ m_jumps.clear();
+ }
+
+ void append(Jump jump)
+ {
+ m_jumps.append(jump);
+ }
+
+ void append(const JumpList& other)
+ {
+ m_jumps.append(other.m_jumps.begin(), other.m_jumps.size());
+ }
+
+ bool empty()
+ {
+ return !m_jumps.size();
+ }
+
+ void clear()
+ {
+ m_jumps.clear();
+ }
+
+ const JumpVector& jumps() const { return m_jumps; }
+
+ private:
+ JumpVector m_jumps;
+ };
+
+
+ // Section 3: Misc admin methods
+#if ENABLE(DFG_JIT)
+ Label labelIgnoringWatchpoints()
+ {
+ Label result;
+ result.m_label = m_assembler.labelIgnoringWatchpoints();
+ return result;
+ }
+#else
+ Label labelIgnoringWatchpoints()
+ {
+ return label();
+ }
+#endif
+
+ Label label()
+ {
+ return Label(this);
+ }
+
+ void padBeforePatch()
+ {
+ // Rely on the fact that asking for a label already does the padding.
+ (void)label();
+ }
+
+ Label watchpointLabel()
+ {
+ Label result;
+ result.m_label = m_assembler.labelForWatchpoint();
+ return result;
+ }
+
+ Label align()
+ {
+ m_assembler.align(16);
+ return Label(this);
+ }
+
+#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
+ class RegisterAllocationOffset {
+ public:
+ RegisterAllocationOffset(unsigned offset)
+ : m_offset(offset)
+ {
+ }
+
+ void checkOffsets(unsigned low, unsigned high)
+ {
+ RELEASE_ASSERT_WITH_MESSAGE(!(low <= m_offset && m_offset <= high), "Unsafe branch over register allocation at instruction offset %u in jump offset range %u..%u", m_offset, low, high);
+ }
+
+ private:
+ unsigned m_offset;
+ };
+
+ void addRegisterAllocationAtOffset(unsigned offset)
+ {
+ m_registerAllocationForOffsets.append(RegisterAllocationOffset(offset));
+ }
+
+ void clearRegisterAllocationOffsets()
+ {
+ m_registerAllocationForOffsets.clear();
+ }
+
+ void checkRegisterAllocationAgainstBranchRange(unsigned offset1, unsigned offset2)
+ {
+ if (offset1 > offset2)
+ std::swap(offset1, offset2);
+
+ size_t size = m_registerAllocationForOffsets.size();
+ for (size_t i = 0; i < size; ++i)
+ m_registerAllocationForOffsets[i].checkOffsets(offset1, offset2);
+ }
+#endif
+
+ template<typename T, typename U>
+ static ptrdiff_t differenceBetween(T from, U to)
+ {
+ return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label);
+ }
+
+ static ptrdiff_t differenceBetweenCodePtr(const MacroAssemblerCodePtr& a, const MacroAssemblerCodePtr& b)
+ {
+ return reinterpret_cast<ptrdiff_t>(b.executableAddress()) - reinterpret_cast<ptrdiff_t>(a.executableAddress());
+ }
+
+ unsigned debugOffset() { return m_assembler.debugOffset(); }
+
+ ALWAYS_INLINE static void cacheFlush(void* code, size_t size)
+ {
+ AssemblerType::cacheFlush(code, size);
+ }
+
+#if ENABLE(MASM_PROBE)
+
+ struct CPUState {
+ #define DECLARE_REGISTER(_type, _regName) \
+ _type _regName;
+ FOR_EACH_CPU_REGISTER(DECLARE_REGISTER)
+ #undef DECLARE_REGISTER
+
+ static const char* registerName(RegisterID regID)
+ {
+ switch (regID) {
+ #define DECLARE_REGISTER(_type, _regName) \
+ case RegisterID::_regName: \
+ return #_regName;
+ FOR_EACH_CPU_GPREGISTER(DECLARE_REGISTER)
+ #undef DECLARE_REGISTER
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+
+ static const char* registerName(FPRegisterID regID)
+ {
+ switch (regID) {
+ #define DECLARE_REGISTER(_type, _regName) \
+ case FPRegisterID::_regName: \
+ return #_regName;
+ FOR_EACH_CPU_FPREGISTER(DECLARE_REGISTER)
+ #undef DECLARE_REGISTER
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+
+ void* registerValue(RegisterID regID)
+ {
+ switch (regID) {
+ #define DECLARE_REGISTER(_type, _regName) \
+ case RegisterID::_regName: \
+ return _regName;
+ FOR_EACH_CPU_GPREGISTER(DECLARE_REGISTER)
+ #undef DECLARE_REGISTER
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+
+ double registerValue(FPRegisterID regID)
+ {
+ switch (regID) {
+ #define DECLARE_REGISTER(_type, _regName) \
+ case FPRegisterID::_regName: \
+ return _regName;
+ FOR_EACH_CPU_FPREGISTER(DECLARE_REGISTER)
+ #undef DECLARE_REGISTER
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+
+ };
+
+ struct ProbeContext;
+ typedef void (*ProbeFunction)(struct ProbeContext*);
+
+ struct ProbeContext {
+ ProbeFunction probeFunction;
+ void* arg1;
+ void* arg2;
+ CPUState cpu;
+
+ void print(int indentation = 0)
+ {
+ #define INDENT MacroAssemblerType::printIndent(indentation)
+
+ INDENT, dataLogF("ProbeContext %p {\n", this);
+ indentation++;
+ {
+ INDENT, dataLogF("probeFunction: %p\n", probeFunction);
+ INDENT, dataLogF("arg1: %p %llu\n", arg1, reinterpret_cast<int64_t>(arg1));
+ INDENT, dataLogF("arg2: %p %llu\n", arg2, reinterpret_cast<int64_t>(arg2));
+ MacroAssemblerType::printCPU(cpu, indentation);
+ }
+ indentation--;
+ INDENT, dataLog("}\n");
+
+ #undef INDENT
+ }
+ };
+
+ static void printIndent(int indentation)
+ {
+ for (; indentation > 0; indentation--)
+ dataLog(" ");
+ }
+
+ static void printCPU(CPUState& cpu, int indentation = 0)
+ {
+ #define INDENT printIndent(indentation)
+
+ INDENT, dataLog("cpu: {\n");
+ MacroAssemblerType::printCPURegisters(cpu, indentation + 1);
+ INDENT, dataLog("}\n");
+
+ #undef INDENT
+ }
+
+ // This is a marker type only used with print(). See print() below for details.
+ struct AllRegisters { };
+
+ // Emits code which will print debugging info at runtime. The type of values that
+ // can be printed is encapsulated in the PrintArg struct below. Here are some
+ // examples:
+ //
+ // print("Hello world\n"); // Emits code to print the string.
+ //
+ // CodeBlock* cb = ...;
+ // print(cb); // Emits code to print the pointer value.
+ //
+ // RegisterID regID = ...;
+ // print(regID); // Emits code to print the register value (not the id).
+ //
+ // // Emits code to print all registers. Unlike other items, this prints
+ // // multiple lines as follows:
+ // // cpu {
+ // // eax: 0x123456789
+ // // ebx: 0x000000abc
+ // // ...
+ // // }
+ // print(AllRegisters());
+ //
+ // // Print multiple things at once. This incurs the probe overhead only once
+ // // to print all the items.
+ // print("cb:", cb, " regID:", regID, " cpu:\n", AllRegisters());
+
+ template<typename... Arguments>
+ void print(Arguments... args)
+ {
+ printInternal(static_cast<MacroAssemblerType*>(this), args...);
+ }
+
+ // This function will be called by printCPU() to print the contents of the
+ // target specific registers which are saved away in the CPUState struct.
+ // printCPURegisters() should make use of printIndentation() to print the
+ // registers with the appropriate amount of indentation.
+ //
+ // Note: printCPURegisters() should be implemented by the target specific
+ // MacroAssembler. This prototype is only provided here to document the
+ // interface.
+
+ static void printCPURegisters(CPUState&, int indentation = 0);
+
+ // This function will be called by print() to print the contents of a
+ // specific register (from the CPUState) in line with other items in the
+ // print stream. Hence, no indentation is needed.
+ //
+ // Note: printRegister() should be implemented by the target specific
+ // MacroAssembler. These prototypes are only provided here to document their
+ // interface.
+
+ static void printRegister(CPUState&, RegisterID);
+ static void printRegister(CPUState&, FPRegisterID);
+
+ // This function emits code to preserve the CPUState (e.g. registers),
+ // call a user supplied probe function, and restore the CPUState before
+ // continuing with other JIT generated code.
+ //
+ // The user supplied probe function will be called with a single pointer to
+ // a ProbeContext struct (defined above) which contains, among other things,
+ // the preserved CPUState. This allows the user probe function to inspect
+ // the CPUState at that point in the JIT generated code.
+ //
+ // If the user probe function alters the register values in the ProbeContext,
+ // the altered values will be loaded into the CPU registers when the probe
+ // returns.
+ //
+ // The ProbeContext is stack allocated and is only valid for the duration
+ // of the call to the user probe function.
+ //
+ // Note: probe() should be implemented by the target specific MacroAssembler.
+ // This prototype is only provided here to document the interface.
+
+ void probe(ProbeFunction, void* arg1 = 0, void* arg2 = 0);
+
+#endif // ENABLE(MASM_PROBE)
+
+ AssemblerType m_assembler;
+
+protected:
+ AbstractMacroAssembler()
+ : m_randomSource(cryptographicallyRandomNumber())
+ {
+ invalidateAllTempRegisters();
+ }
+
+ uint32_t random()
+ {
+ return m_randomSource.getUint32();
+ }
+
+ WeakRandom m_randomSource;
+
+#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
+ Vector<RegisterAllocationOffset, 10> m_registerAllocationForOffsets;
+#endif
+
+ static bool haveScratchRegisterForBlinding()
+ {
+ return false;
+ }
+ static RegisterID scratchRegisterForBlinding()
+ {
+ UNREACHABLE_FOR_PLATFORM();
+ return firstRegister();
+ }
+ static bool canBlind() { return false; }
+ static bool shouldBlindForSpecificArch(uint32_t) { return false; }
+ static bool shouldBlindForSpecificArch(uint64_t) { return false; }
+
+ class CachedTempRegister {
+ friend class DataLabelPtr;
+ friend class DataLabel32;
+ friend class DataLabelCompact;
+ friend class Jump;
+ friend class Label;
+
+ public:
+ CachedTempRegister(AbstractMacroAssemblerType* masm, RegisterID registerID)
+ : m_masm(masm)
+ , m_registerID(registerID)
+ , m_value(0)
+ , m_validBit(1 << static_cast<unsigned>(registerID))
+ {
+ ASSERT(static_cast<unsigned>(registerID) < (sizeof(unsigned) * 8));
+ }
+
+ ALWAYS_INLINE RegisterID registerIDInvalidate() { invalidate(); return m_registerID; }
+
+ ALWAYS_INLINE RegisterID registerIDNoInvalidate() { return m_registerID; }
+
+ bool value(intptr_t& value)
+ {
+ value = m_value;
+ return m_masm->isTempRegisterValid(m_validBit);
+ }
+
+ void setValue(intptr_t value)
+ {
+ m_value = value;
+ m_masm->setTempRegisterValid(m_validBit);
+ }
+
+ ALWAYS_INLINE void invalidate() { m_masm->clearTempRegisterValid(m_validBit); }
+
+ private:
+ AbstractMacroAssemblerType* m_masm;
+ RegisterID m_registerID;
+ intptr_t m_value;
+ unsigned m_validBit;
+ };
+
+ ALWAYS_INLINE void invalidateAllTempRegisters()
+ {
+ m_tempRegistersValidBits = 0;
+ }
+
+ ALWAYS_INLINE bool isTempRegisterValid(unsigned registerMask)
+ {
+ return (m_tempRegistersValidBits & registerMask);
+ }
+
+ ALWAYS_INLINE void clearTempRegisterValid(unsigned registerMask)
+ {
+ m_tempRegistersValidBits &= ~registerMask;
+ }
+
+ ALWAYS_INLINE void setTempRegisterValid(unsigned registerMask)
+ {
+ m_tempRegistersValidBits |= registerMask;
+ }
+
+ unsigned m_tempRegistersValidBits;
+
+ friend class LinkBuffer;
+ friend class RepatchBuffer;
+
+ static void linkJump(void* code, Jump jump, CodeLocationLabel target)
+ {
+ AssemblerType::linkJump(code, jump.m_label, target.dataLocation());
+ }
+
+ static void linkPointer(void* code, AssemblerLabel label, void* value)
+ {
+ AssemblerType::linkPointer(code, label, value);
+ }
+
+ static void* getLinkerAddress(void* code, AssemblerLabel label)
+ {
+ return AssemblerType::getRelocatedAddress(code, label);
+ }
+
+ static unsigned getLinkerCallReturnOffset(Call call)
+ {
+ return AssemblerType::getCallReturnOffset(call.m_label);
+ }
+
+ static void repatchJump(CodeLocationJump jump, CodeLocationLabel destination)
+ {
+ AssemblerType::relinkJump(jump.dataLocation(), destination.dataLocation());
+ }
+
+ static void repatchNearCall(CodeLocationNearCall nearCall, CodeLocationLabel destination)
+ {
+ AssemblerType::relinkCall(nearCall.dataLocation(), destination.executableAddress());
+ }
+
+ static void repatchCompact(CodeLocationDataLabelCompact dataLabelCompact, int32_t value)
+ {
+ AssemblerType::repatchCompact(dataLabelCompact.dataLocation(), value);
+ }
+
+ static void repatchInt32(CodeLocationDataLabel32 dataLabel32, int32_t value)
+ {
+ AssemblerType::repatchInt32(dataLabel32.dataLocation(), value);
+ }
+
+ static void repatchPointer(CodeLocationDataLabelPtr dataLabelPtr, void* value)
+ {
+ AssemblerType::repatchPointer(dataLabelPtr.dataLocation(), value);
+ }
+
+ static void* readPointer(CodeLocationDataLabelPtr dataLabelPtr)
+ {
+ return AssemblerType::readPointer(dataLabelPtr.dataLocation());
+ }
+
+ static void replaceWithLoad(CodeLocationConvertibleLoad label)
+ {
+ AssemblerType::replaceWithLoad(label.dataLocation());
+ }
+
+ static void replaceWithAddressComputation(CodeLocationConvertibleLoad label)
+ {
+ AssemblerType::replaceWithAddressComputation(label.dataLocation());
+ }
+
+private:
+
+#if ENABLE(MASM_PROBE)
+
+ struct PrintArg {
+
+ enum class Type {
+ AllRegisters,
+ RegisterID,
+ FPRegisterID,
+ ConstCharPtr,
+ ConstVoidPtr,
+ IntptrValue,
+ UintptrValue,
+ };
+
+ PrintArg(AllRegisters&)
+ : type(Type::AllRegisters)
+ {
+ }
+
+ PrintArg(RegisterID regID)
+ : type(Type::RegisterID)
+ {
+ u.gpRegisterID = regID;
+ }
+
+ PrintArg(FPRegisterID regID)
+ : type(Type::FPRegisterID)
+ {
+ u.fpRegisterID = regID;
+ }
+
+ PrintArg(const char* ptr)
+ : type(Type::ConstCharPtr)
+ {
+ u.constCharPtr = ptr;
+ }
+
+ PrintArg(const void* ptr)
+ : type(Type::ConstVoidPtr)
+ {
+ u.constVoidPtr = ptr;
+ }
+
+ PrintArg(int value)
+ : type(Type::IntptrValue)
+ {
+ u.intptrValue = value;
+ }
+
+ PrintArg(unsigned value)
+ : type(Type::UintptrValue)
+ {
+ u.intptrValue = value;
+ }
+
+ PrintArg(intptr_t value)
+ : type(Type::IntptrValue)
+ {
+ u.intptrValue = value;
+ }
+
+ PrintArg(uintptr_t value)
+ : type(Type::UintptrValue)
+ {
+ u.uintptrValue = value;
+ }
+
+ Type type;
+ union {
+ RegisterID gpRegisterID;
+ FPRegisterID fpRegisterID;
+ const char* constCharPtr;
+ const void* constVoidPtr;
+ intptr_t intptrValue;
+ uintptr_t uintptrValue;
+ } u;
+ };
+
+ typedef Vector<PrintArg> PrintArgsList;
+
+ template<typename FirstArg, typename... Arguments>
+ static void appendPrintArg(PrintArgsList* argsList, FirstArg& firstArg, Arguments... otherArgs)
+ {
+ argsList->append(PrintArg(firstArg));
+ appendPrintArg(argsList, otherArgs...);
+ }
+
+ static void appendPrintArg(PrintArgsList*) { }
+
+
+ template<typename... Arguments>
+ static void printInternal(MacroAssemblerType* masm, Arguments... args)
+ {
+ auto argsList = std::make_unique<PrintArgsList>();
+ appendPrintArg(argsList.get(), args...);
+ masm->probe(printCallback, argsList.release());
+ }
+
+ static void printCallback(ProbeContext* context)
+ {
+ typedef PrintArg Arg;
+ PrintArgsList& argsList =
+ *reinterpret_cast<PrintArgsList*>(context->arg1);
+ for (size_t i = 0; i < argsList.size(); i++) {
+ auto& arg = argsList[i];
+ switch (arg.type) {
+ case Arg::Type::AllRegisters:
+ MacroAssemblerType::printCPU(context->cpu);
+ break;
+ case Arg::Type::RegisterID:
+ MacroAssemblerType::printRegister(context->cpu, arg.u.gpRegisterID);
+ break;
+ case Arg::Type::FPRegisterID:
+ MacroAssemblerType::printRegister(context->cpu, arg.u.fpRegisterID);
+ break;
+ case Arg::Type::ConstCharPtr:
+ dataLog(arg.u.constCharPtr);
+ break;
+ case Arg::Type::ConstVoidPtr:
+ dataLogF("%p", arg.u.constVoidPtr);
+ break;
+ case Arg::Type::IntptrValue:
+ dataLog(arg.u.intptrValue);
+ break;
+ case Arg::Type::UintptrValue:
+ dataLog(arg.u.uintptrValue);
+ break;
+ }
+ }
+ }
+
+#endif // ENABLE(MASM_PROBE)
+
+}; // class AbstractMacroAssembler
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // AbstractMacroAssembler_h
diff --git a/Source/JavaScriptCore/assembler/AssemblerBuffer.h b/Source/JavaScriptCore/assembler/AssemblerBuffer.h
new file mode 100644
index 000000000..3632a5b6e
--- /dev/null
+++ b/Source/JavaScriptCore/assembler/AssemblerBuffer.h
@@ -0,0 +1,209 @@
+/*
+ * Copyright (C) 2008, 2012, 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef AssemblerBuffer_h
+#define AssemblerBuffer_h
+
+#if ENABLE(ASSEMBLER)
+
+#include "ExecutableAllocator.h"
+#include "JITCompilationEffort.h"
+#include "stdint.h"
+#include <string.h>
+#include <wtf/Assertions.h>
+#include <wtf/FastMalloc.h>
+#include <wtf/StdLibExtras.h>
+
+namespace JSC {
+
+ struct AssemblerLabel {
+ AssemblerLabel()
+ : m_offset(std::numeric_limits<uint32_t>::max())
+ {
+ }
+
+ explicit AssemblerLabel(uint32_t offset)
+ : m_offset(offset)
+ {
+ }
+
+ bool isSet() const { return (m_offset != std::numeric_limits<uint32_t>::max()); }
+
+ AssemblerLabel labelAtOffset(int offset) const
+ {
+ return AssemblerLabel(m_offset + offset);
+ }
+
+ uint32_t m_offset;
+ };
+
+ class AssemblerData {
+ public:
+ AssemblerData()
+ : m_buffer(nullptr)
+ , m_capacity(0)
+ {
+ }
+
+ AssemblerData(unsigned initialCapacity)
+ {
+ m_capacity = initialCapacity;
+ m_buffer = static_cast<char*>(fastMalloc(m_capacity));
+ }
+
+ AssemblerData(AssemblerData&& other)
+ {
+ m_buffer = other.m_buffer;
+ other.m_buffer = nullptr;
+ m_capacity = other.m_capacity;
+ other.m_capacity = 0;
+ }
+
+ AssemblerData& operator=(AssemblerData&& other)
+ {
+ m_buffer = other.m_buffer;
+ other.m_buffer = nullptr;
+ m_capacity = other.m_capacity;
+ other.m_capacity = 0;
+ return *this;
+ }
+
+ ~AssemblerData()
+ {
+ fastFree(m_buffer);
+ }
+
+ char* buffer() const { return m_buffer; }
+
+ unsigned capacity() const { return m_capacity; }
+
+ void grow(unsigned extraCapacity = 0)
+ {
+ m_capacity = m_capacity + m_capacity / 2 + extraCapacity;
+ m_buffer = static_cast<char*>(fastRealloc(m_buffer, m_capacity));
+ }
+
+ private:
+ char* m_buffer;
+ unsigned m_capacity;
+ };
+
+ class AssemblerBuffer {
+ static const int initialCapacity = 128;
+ public:
+ AssemblerBuffer()
+ : m_storage(initialCapacity)
+ , m_index(0)
+ {
+ }
+
+ bool isAvailable(int space)
+ {
+ return m_index + space <= m_storage.capacity();
+ }
+
+ void ensureSpace(int space)
+ {
+ if (!isAvailable(space))
+ grow();
+ }
+
+ bool isAligned(int alignment) const
+ {
+ return !(m_index & (alignment - 1));
+ }
+
+ void putByteUnchecked(int8_t value) { putIntegralUnchecked(value); }
+ void putByte(int8_t value) { putIntegral(value); }
+ void putShortUnchecked(int16_t value) { putIntegralUnchecked(value); }
+ void putShort(int16_t value) { putIntegral(value); }
+ void putIntUnchecked(int32_t value) { putIntegralUnchecked(value); }
+ void putInt(int32_t value) { putIntegral(value); }
+ void putInt64Unchecked(int64_t value) { putIntegralUnchecked(value); }
+ void putInt64(int64_t value) { putIntegral(value); }
+
+ void* data() const
+ {
+ return m_storage.buffer();
+ }
+
+ size_t codeSize() const
+ {
+ return m_index;
+ }
+
+ AssemblerLabel label() const
+ {
+ return AssemblerLabel(m_index);
+ }
+
+ unsigned debugOffset() { return m_index; }
+
+ AssemblerData releaseAssemblerData() { return WTF::move(m_storage); }
+
+ protected:
+ template<typename IntegralType>
+ void putIntegral(IntegralType value)
+ {
+ unsigned nextIndex = m_index + sizeof(IntegralType);
+ if (UNLIKELY(nextIndex > m_storage.capacity()))
+ grow();
+ ASSERT(isAvailable(sizeof(IntegralType)));
+ *reinterpret_cast_ptr<IntegralType*>(m_storage.buffer() + m_index) = value;
+ m_index = nextIndex;
+ }
+
+ template<typename IntegralType>
+ void putIntegralUnchecked(IntegralType value)
+ {
+ ASSERT(isAvailable(sizeof(IntegralType)));
+ *reinterpret_cast_ptr<IntegralType*>(m_storage.buffer() + m_index) = value;
+ m_index += sizeof(IntegralType);
+ }
+
+ void append(const char* data, int size)
+ {
+ if (!isAvailable(size))
+ grow(size);
+
+ memcpy(m_storage.buffer() + m_index, data, size);
+ m_index += size;
+ }
+
+ void grow(int extraCapacity = 0)
+ {
+ m_storage.grow(extraCapacity);
+ }
+
+ private:
+ AssemblerData m_storage;
+ unsigned m_index;
+ };
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // AssemblerBuffer_h
diff --git a/Source/JavaScriptCore/assembler/AssemblerBufferWithConstantPool.h b/Source/JavaScriptCore/assembler/AssemblerBufferWithConstantPool.h
new file mode 100644
index 000000000..053884b01
--- /dev/null
+++ b/Source/JavaScriptCore/assembler/AssemblerBufferWithConstantPool.h
@@ -0,0 +1,336 @@
+/*
+ * Copyright (C) 2009 University of Szeged
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef AssemblerBufferWithConstantPool_h
+#define AssemblerBufferWithConstantPool_h
+
+#if ENABLE(ASSEMBLER)
+
+#include "AssemblerBuffer.h"
+#include <wtf/SegmentedVector.h>
+
+#define ASSEMBLER_HAS_CONSTANT_POOL 1
+
+namespace JSC {
+
+/*
+ On a constant pool 4 or 8 bytes data can be stored. The values can be
+ constants or addresses. The addresses should be 32 or 64 bits. The constants
+ should be double-precisions float or integer numbers which are hard to be
+ encoded as few machine instructions.
+
+ TODO: The pool is desinged to handle both 32 and 64 bits values, but
+ currently only the 4 bytes constants are implemented and tested.
+
+ The AssemblerBuffer can contain multiple constant pools. Each pool is inserted
+ into the instruction stream - protected by a jump instruction from the
+ execution flow.
+
+ The flush mechanism is called when no space remain to insert the next instruction
+ into the pool. Three values are used to determine when the constant pool itself
+ have to be inserted into the instruction stream (Assembler Buffer):
+
+ - maxPoolSize: size of the constant pool in bytes, this value cannot be
+ larger than the maximum offset of a PC relative memory load
+
+ - barrierSize: size of jump instruction in bytes which protects the
+ constant pool from execution
+
+ - maxInstructionSize: maximum length of a machine instruction in bytes
+
+ There are some callbacks which solve the target architecture specific
+ address handling:
+
+ - TYPE patchConstantPoolLoad(TYPE load, int value):
+ patch the 'load' instruction with the index of the constant in the
+ constant pool and return the patched instruction.
+
+ - void patchConstantPoolLoad(void* loadAddr, void* constPoolAddr):
+ patch the a PC relative load instruction at 'loadAddr' address with the
+ final relative offset. The offset can be computed with help of
+ 'constPoolAddr' (the address of the constant pool) and index of the
+ constant (which is stored previously in the load instruction itself).
+
+ - TYPE placeConstantPoolBarrier(int size):
+ return with a constant pool barrier instruction which jumps over the
+ constant pool.
+
+ The 'put*WithConstant*' functions should be used to place a data into the
+ constant pool.
+*/
+
+template <int maxPoolSize, int barrierSize, int maxInstructionSize, class AssemblerType>
+class AssemblerBufferWithConstantPool : public AssemblerBuffer {
+ typedef SegmentedVector<uint32_t, 512> LoadOffsets;
+ using AssemblerBuffer::putIntegral;
+ using AssemblerBuffer::putIntegralUnchecked;
+public:
+ typedef struct {
+ short high;
+ short low;
+ } TwoShorts;
+
+ enum {
+ UniqueConst,
+ ReusableConst,
+ UnusedEntry,
+ };
+
+ AssemblerBufferWithConstantPool()
+ : AssemblerBuffer()
+ , m_numConsts(0)
+ , m_maxDistance(maxPoolSize)
+ , m_lastConstDelta(0)
+ {
+ m_pool = static_cast<uint32_t*>(fastMalloc(maxPoolSize));
+ m_mask = static_cast<char*>(fastMalloc(maxPoolSize / sizeof(uint32_t)));
+ }
+
+ ~AssemblerBufferWithConstantPool()
+ {
+ fastFree(m_mask);
+ fastFree(m_pool);
+ }
+
+ void ensureSpace(int space)
+ {
+ flushIfNoSpaceFor(space);
+ AssemblerBuffer::ensureSpace(space);
+ }
+
+ void ensureSpace(int insnSpace, int constSpace)
+ {
+ flushIfNoSpaceFor(insnSpace, constSpace);
+ AssemblerBuffer::ensureSpace(insnSpace);
+ }
+
+ void ensureSpaceForAnyInstruction(int amount = 1)
+ {
+ flushIfNoSpaceFor(amount * maxInstructionSize, amount * sizeof(uint64_t));
+ }
+
+ bool isAligned(int alignment)
+ {
+ flushIfNoSpaceFor(alignment);
+ return AssemblerBuffer::isAligned(alignment);
+ }
+
+ void putByteUnchecked(int value)
+ {
+ AssemblerBuffer::putByteUnchecked(value);
+ correctDeltas(1);
+ }
+
+ void putByte(int value)
+ {
+ flushIfNoSpaceFor(1);
+ AssemblerBuffer::putByte(value);
+ correctDeltas(1);
+ }
+
+ void putShortUnchecked(int value)
+ {
+ AssemblerBuffer::putShortUnchecked(value);
+ correctDeltas(2);
+ }
+
+ void putShort(int value)
+ {
+ flushIfNoSpaceFor(2);
+ AssemblerBuffer::putShort(value);
+ correctDeltas(2);
+ }
+
+ void putIntUnchecked(int value)
+ {
+ AssemblerBuffer::putIntUnchecked(value);
+ correctDeltas(4);
+ }
+
+ void putInt(int value)
+ {
+ flushIfNoSpaceFor(4);
+ AssemblerBuffer::putInt(value);
+ correctDeltas(4);
+ }
+
+ void putInt64Unchecked(int64_t value)
+ {
+ AssemblerBuffer::putInt64Unchecked(value);
+ correctDeltas(8);
+ }
+
+ void putIntegral(TwoShorts value)
+ {
+ putIntegral(value.high);
+ putIntegral(value.low);
+ }
+
+ void putIntegralUnchecked(TwoShorts value)
+ {
+ putIntegralUnchecked(value.high);
+ putIntegralUnchecked(value.low);
+ }
+
+ void putShortWithConstantInt(uint16_t insn, uint32_t constant, bool isReusable = false)
+ {
+ putIntegralWithConstantInt(insn, constant, isReusable);
+ }
+
+ void putIntWithConstantInt(uint32_t insn, uint32_t constant, bool isReusable = false)
+ {
+ putIntegralWithConstantInt(insn, constant, isReusable);
+ }
+
+ // This flushing mechanism can be called after any unconditional jumps.
+ void flushWithoutBarrier(bool isForced = false)
+ {
+ // Flush if constant pool is more than 60% full to avoid overuse of this function.
+ if (isForced || 5 * static_cast<uint32_t>(m_numConsts) > 3 * maxPoolSize / sizeof(uint32_t))
+ flushConstantPool(false);
+ }
+
+ uint32_t* poolAddress()
+ {
+ return m_pool;
+ }
+
+ int sizeOfConstantPool()
+ {
+ return m_numConsts;
+ }
+
+ void flushConstantPool(bool useBarrier = true)
+ {
+ if (!m_numConsts)
+ return;
+ int alignPool = (codeSize() + (useBarrier ? barrierSize : 0)) & (sizeof(uint64_t) - 1);
+
+ if (alignPool)
+ alignPool = sizeof(uint64_t) - alignPool;
+
+ // Callback to protect the constant pool from execution
+ if (useBarrier)
+ putIntegral(AssemblerType::placeConstantPoolBarrier(m_numConsts * sizeof(uint32_t) + alignPool));
+
+ if (alignPool) {
+ if (alignPool & 1)
+ AssemblerBuffer::putByte(AssemblerType::padForAlign8);
+ if (alignPool & 2)
+ AssemblerBuffer::putShort(AssemblerType::padForAlign16);
+ if (alignPool & 4)
+ AssemblerBuffer::putInt(AssemblerType::padForAlign32);
+ }
+
+ int constPoolOffset = codeSize();
+ append(reinterpret_cast<char*>(m_pool), m_numConsts * sizeof(uint32_t));
+
+ // Patch each PC relative load
+ for (LoadOffsets::Iterator iter = m_loadOffsets.begin(); iter != m_loadOffsets.end(); ++iter) {
+ void* loadAddr = reinterpret_cast<char*>(data()) + *iter;
+ AssemblerType::patchConstantPoolLoad(loadAddr, reinterpret_cast<char*>(data()) + constPoolOffset);
+ }
+
+ m_loadOffsets.clear();
+ m_numConsts = 0;
+ }
+
+private:
+ void correctDeltas(int insnSize)
+ {
+ m_maxDistance -= insnSize;
+ m_lastConstDelta -= insnSize;
+ if (m_lastConstDelta < 0)
+ m_lastConstDelta = 0;
+ }
+
+ void correctDeltas(int insnSize, int constSize)
+ {
+ correctDeltas(insnSize);
+
+ m_maxDistance -= m_lastConstDelta;
+ m_lastConstDelta = constSize;
+ }
+
+ template<typename IntegralType>
+ void putIntegralWithConstantInt(IntegralType insn, uint32_t constant, bool isReusable)
+ {
+ if (!m_numConsts)
+ m_maxDistance = maxPoolSize;
+ flushIfNoSpaceFor(sizeof(IntegralType), 4);
+
+ m_loadOffsets.append(codeSize());
+ if (isReusable) {
+ for (int i = 0; i < m_numConsts; ++i) {
+ if (m_mask[i] == ReusableConst && m_pool[i] == constant) {
+ putIntegral(static_cast<IntegralType>(AssemblerType::patchConstantPoolLoad(insn, i)));
+ correctDeltas(sizeof(IntegralType));
+ return;
+ }
+ }
+ }
+
+ m_pool[m_numConsts] = constant;
+ m_mask[m_numConsts] = static_cast<char>(isReusable ? ReusableConst : UniqueConst);
+
+ putIntegral(static_cast<IntegralType>(AssemblerType::patchConstantPoolLoad(insn, m_numConsts)));
+ ++m_numConsts;
+
+ correctDeltas(sizeof(IntegralType), 4);
+ }
+
+ void flushIfNoSpaceFor(int nextInsnSize)
+ {
+ if (m_numConsts == 0)
+ return;
+ int lastConstDelta = m_lastConstDelta > nextInsnSize ? m_lastConstDelta - nextInsnSize : 0;
+ if ((m_maxDistance < nextInsnSize + lastConstDelta + barrierSize + (int)sizeof(uint32_t)))
+ flushConstantPool();
+ }
+
+ void flushIfNoSpaceFor(int nextInsnSize, int nextConstSize)
+ {
+ if (m_numConsts == 0)
+ return;
+ if ((m_maxDistance < nextInsnSize + m_lastConstDelta + nextConstSize + barrierSize + (int)sizeof(uint32_t)) ||
+ (m_numConsts * sizeof(uint32_t) + nextConstSize >= maxPoolSize))
+ flushConstantPool();
+ }
+
+ uint32_t* m_pool;
+ char* m_mask;
+ LoadOffsets m_loadOffsets;
+
+ int m_numConsts;
+ int m_maxDistance;
+ int m_lastConstDelta;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // AssemblerBufferWithConstantPool_h
diff --git a/Source/JavaScriptCore/assembler/CodeLocation.h b/Source/JavaScriptCore/assembler/CodeLocation.h
new file mode 100644
index 000000000..86d1f2b75
--- /dev/null
+++ b/Source/JavaScriptCore/assembler/CodeLocation.h
@@ -0,0 +1,218 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef CodeLocation_h
+#define CodeLocation_h
+
+#include "MacroAssemblerCodeRef.h"
+
+#if ENABLE(ASSEMBLER)
+
+namespace JSC {
+
+class CodeLocationInstruction;
+class CodeLocationLabel;
+class CodeLocationJump;
+class CodeLocationCall;
+class CodeLocationNearCall;
+class CodeLocationDataLabelCompact;
+class CodeLocationDataLabel32;
+class CodeLocationDataLabelPtr;
+class CodeLocationConvertibleLoad;
+
+// The CodeLocation* types are all pretty much do-nothing wrappers around
+// CodePtr (or MacroAssemblerCodePtr, to give it its full name). These
+// classes only exist to provide type-safety when linking and patching code.
+//
+// The one new piece of functionallity introduced by these classes is the
+// ability to create (or put another way, to re-discover) another CodeLocation
+// at an offset from one you already know. When patching code to optimize it
+// we often want to patch a number of instructions that are short, fixed
+// offsets apart. To reduce memory overhead we will only retain a pointer to
+// one of the instructions, and we will use the *AtOffset methods provided by
+// CodeLocationCommon to find the other points in the code to modify.
+class CodeLocationCommon : public MacroAssemblerCodePtr {
+public:
+ CodeLocationInstruction instructionAtOffset(int offset);
+ CodeLocationLabel labelAtOffset(int offset);
+ CodeLocationJump jumpAtOffset(int offset);
+ CodeLocationCall callAtOffset(int offset);
+ CodeLocationNearCall nearCallAtOffset(int offset);
+ CodeLocationDataLabelPtr dataLabelPtrAtOffset(int offset);
+ CodeLocationDataLabel32 dataLabel32AtOffset(int offset);
+ CodeLocationDataLabelCompact dataLabelCompactAtOffset(int offset);
+ CodeLocationConvertibleLoad convertibleLoadAtOffset(int offset);
+
+protected:
+ CodeLocationCommon()
+ {
+ }
+
+ CodeLocationCommon(MacroAssemblerCodePtr location)
+ : MacroAssemblerCodePtr(location)
+ {
+ }
+};
+
+class CodeLocationInstruction : public CodeLocationCommon {
+public:
+ CodeLocationInstruction() {}
+ explicit CodeLocationInstruction(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) {}
+ explicit CodeLocationInstruction(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationLabel : public CodeLocationCommon {
+public:
+ CodeLocationLabel() {}
+ explicit CodeLocationLabel(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) {}
+ explicit CodeLocationLabel(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationJump : public CodeLocationCommon {
+public:
+ CodeLocationJump() {}
+ explicit CodeLocationJump(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) {}
+ explicit CodeLocationJump(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationCall : public CodeLocationCommon {
+public:
+ CodeLocationCall() {}
+ explicit CodeLocationCall(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) {}
+ explicit CodeLocationCall(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationNearCall : public CodeLocationCommon {
+public:
+ CodeLocationNearCall() {}
+ explicit CodeLocationNearCall(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) {}
+ explicit CodeLocationNearCall(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationDataLabel32 : public CodeLocationCommon {
+public:
+ CodeLocationDataLabel32() {}
+ explicit CodeLocationDataLabel32(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) {}
+ explicit CodeLocationDataLabel32(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationDataLabelCompact : public CodeLocationCommon {
+public:
+ CodeLocationDataLabelCompact() { }
+ explicit CodeLocationDataLabelCompact(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) { }
+ explicit CodeLocationDataLabelCompact(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) { }
+};
+
+class CodeLocationDataLabelPtr : public CodeLocationCommon {
+public:
+ CodeLocationDataLabelPtr() {}
+ explicit CodeLocationDataLabelPtr(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) {}
+ explicit CodeLocationDataLabelPtr(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationConvertibleLoad : public CodeLocationCommon {
+public:
+ CodeLocationConvertibleLoad() { }
+ explicit CodeLocationConvertibleLoad(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) { }
+ explicit CodeLocationConvertibleLoad(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) { }
+};
+
+inline CodeLocationInstruction CodeLocationCommon::instructionAtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationInstruction(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationLabel CodeLocationCommon::labelAtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationLabel(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationJump CodeLocationCommon::jumpAtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationJump(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationCall CodeLocationCommon::callAtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationCall(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationNearCall CodeLocationCommon::nearCallAtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationNearCall(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationDataLabelPtr CodeLocationCommon::dataLabelPtrAtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationDataLabelPtr(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationDataLabel32 CodeLocationCommon::dataLabel32AtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationDataLabel32(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationDataLabelCompact CodeLocationCommon::dataLabelCompactAtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationDataLabelCompact(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationConvertibleLoad CodeLocationCommon::convertibleLoadAtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationConvertibleLoad(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // CodeLocation_h
diff --git a/Source/JavaScriptCore/assembler/LinkBuffer.cpp b/Source/JavaScriptCore/assembler/LinkBuffer.cpp
new file mode 100644
index 000000000..d53ef451b
--- /dev/null
+++ b/Source/JavaScriptCore/assembler/LinkBuffer.cpp
@@ -0,0 +1,311 @@
+/*
+ * Copyright (C) 2012-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "LinkBuffer.h"
+
+#if ENABLE(ASSEMBLER)
+
+#include "CodeBlock.h"
+#include "JITCode.h"
+#include "JSCInlines.h"
+#include "Options.h"
+#include "VM.h"
+#include <wtf/CompilationThread.h>
+
+namespace JSC {
+
+bool shouldShowDisassemblyFor(CodeBlock* codeBlock)
+{
+ if (JITCode::isOptimizingJIT(codeBlock->jitType()) && Options::showDFGDisassembly())
+ return true;
+ return Options::showDisassembly();
+}
+
+LinkBuffer::CodeRef LinkBuffer::finalizeCodeWithoutDisassembly()
+{
+ performFinalization();
+
+ ASSERT(m_didAllocate);
+ if (m_executableMemory)
+ return CodeRef(m_executableMemory);
+
+ return CodeRef::createSelfManagedCodeRef(MacroAssemblerCodePtr(m_code));
+}
+
+LinkBuffer::CodeRef LinkBuffer::finalizeCodeWithDisassembly(const char* format, ...)
+{
+ CodeRef result = finalizeCodeWithoutDisassembly();
+
+ if (m_alreadyDisassembled)
+ return result;
+
+ StringPrintStream out;
+ out.printf("Generated JIT code for ");
+ va_list argList;
+ va_start(argList, format);
+ out.vprintf(format, argList);
+ va_end(argList);
+ out.printf(":\n");
+
+ out.printf(" Code at [%p, %p):\n", result.code().executableAddress(), static_cast<char*>(result.code().executableAddress()) + result.size());
+
+ CString header = out.toCString();
+
+ if (Options::asyncDisassembly()) {
+ disassembleAsynchronously(header, result, m_size, " ");
+ return result;
+ }
+
+ dataLog(header);
+ disassemble(result.code(), m_size, " ", WTF::dataFile());
+
+ return result;
+}
+
+#if ENABLE(BRANCH_COMPACTION)
+static ALWAYS_INLINE void recordLinkOffsets(AssemblerData& assemblerData, int32_t regionStart, int32_t regionEnd, int32_t offset)
+{
+ int32_t ptr = regionStart / sizeof(int32_t);
+ const int32_t end = regionEnd / sizeof(int32_t);
+ int32_t* offsets = reinterpret_cast<int32_t*>(assemblerData.buffer());
+ while (ptr < end)
+ offsets[ptr++] = offset;
+}
+
+template <typename InstructionType>
+void LinkBuffer::copyCompactAndLinkCode(MacroAssembler& macroAssembler, void* ownerUID, JITCompilationEffort effort)
+{
+ m_initialSize = macroAssembler.m_assembler.codeSize();
+ allocate(m_initialSize, ownerUID, effort);
+ if (didFailToAllocate())
+ return;
+ Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink = macroAssembler.jumpsToLink();
+ m_assemblerStorage = macroAssembler.m_assembler.buffer().releaseAssemblerData();
+ uint8_t* inData = reinterpret_cast<uint8_t*>(m_assemblerStorage.buffer());
+ uint8_t* outData = reinterpret_cast<uint8_t*>(m_code);
+ int readPtr = 0;
+ int writePtr = 0;
+ unsigned jumpCount = jumpsToLink.size();
+ for (unsigned i = 0; i < jumpCount; ++i) {
+ int offset = readPtr - writePtr;
+ ASSERT(!(offset & 1));
+
+ // Copy the instructions from the last jump to the current one.
+ size_t regionSize = jumpsToLink[i].from() - readPtr;
+ InstructionType* copySource = reinterpret_cast_ptr<InstructionType*>(inData + readPtr);
+ InstructionType* copyEnd = reinterpret_cast_ptr<InstructionType*>(inData + readPtr + regionSize);
+ InstructionType* copyDst = reinterpret_cast_ptr<InstructionType*>(outData + writePtr);
+ ASSERT(!(regionSize % 2));
+ ASSERT(!(readPtr % 2));
+ ASSERT(!(writePtr % 2));
+ while (copySource != copyEnd)
+ *copyDst++ = *copySource++;
+ recordLinkOffsets(m_assemblerStorage, readPtr, jumpsToLink[i].from(), offset);
+ readPtr += regionSize;
+ writePtr += regionSize;
+
+ // Calculate absolute address of the jump target, in the case of backwards
+ // branches we need to be precise, forward branches we are pessimistic
+ const uint8_t* target;
+ if (jumpsToLink[i].to() >= jumpsToLink[i].from())
+ target = outData + jumpsToLink[i].to() - offset; // Compensate for what we have collapsed so far
+ else
+ target = outData + jumpsToLink[i].to() - executableOffsetFor(jumpsToLink[i].to());
+
+ JumpLinkType jumpLinkType = MacroAssembler::computeJumpType(jumpsToLink[i], outData + writePtr, target);
+ // Compact branch if we can...
+ if (MacroAssembler::canCompact(jumpsToLink[i].type())) {
+ // Step back in the write stream
+ int32_t delta = MacroAssembler::jumpSizeDelta(jumpsToLink[i].type(), jumpLinkType);
+ if (delta) {
+ writePtr -= delta;
+ recordLinkOffsets(m_assemblerStorage, jumpsToLink[i].from() - delta, readPtr, readPtr - writePtr);
+ }
+ }
+ jumpsToLink[i].setFrom(writePtr);
+ }
+ // Copy everything after the last jump
+ memcpy(outData + writePtr, inData + readPtr, m_initialSize - readPtr);
+ recordLinkOffsets(m_assemblerStorage, readPtr, m_initialSize, readPtr - writePtr);
+
+ for (unsigned i = 0; i < jumpCount; ++i) {
+ uint8_t* location = outData + jumpsToLink[i].from();
+ uint8_t* target = outData + jumpsToLink[i].to() - executableOffsetFor(jumpsToLink[i].to());
+ MacroAssembler::link(jumpsToLink[i], location, target);
+ }
+
+ jumpsToLink.clear();
+ shrink(writePtr + m_initialSize - readPtr);
+
+#if DUMP_LINK_STATISTICS
+ dumpLinkStatistics(m_code, m_initialSize, m_size);
+#endif
+#if DUMP_CODE
+ dumpCode(m_code, m_size);
+#endif
+}
+#endif
+
+
+void LinkBuffer::linkCode(MacroAssembler& macroAssembler, void* ownerUID, JITCompilationEffort effort)
+{
+#if !ENABLE(BRANCH_COMPACTION)
+#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
+ macroAssembler.m_assembler.buffer().flushConstantPool(false);
+#endif
+ AssemblerBuffer& buffer = macroAssembler.m_assembler.buffer();
+ allocate(buffer.codeSize(), ownerUID, effort);
+ if (!m_didAllocate)
+ return;
+ ASSERT(m_code);
+#if CPU(ARM_TRADITIONAL)
+ macroAssembler.m_assembler.prepareExecutableCopy(m_code);
+#endif
+ memcpy(m_code, buffer.data(), buffer.codeSize());
+#if CPU(MIPS)
+ macroAssembler.m_assembler.relocateJumps(buffer.data(), m_code);
+#endif
+#elif CPU(ARM_THUMB2)
+ copyCompactAndLinkCode<uint16_t>(macroAssembler, ownerUID, effort);
+#elif CPU(ARM64)
+ copyCompactAndLinkCode<uint32_t>(macroAssembler, ownerUID, effort);
+#endif
+}
+
+void LinkBuffer::allocate(size_t initialSize, void* ownerUID, JITCompilationEffort effort)
+{
+ if (m_code) {
+ if (initialSize > m_size)
+ return;
+
+ m_didAllocate = true;
+ m_size = initialSize;
+ return;
+ }
+
+ m_executableMemory = m_vm->executableAllocator.allocate(*m_vm, initialSize, ownerUID, effort);
+ if (!m_executableMemory)
+ return;
+ ExecutableAllocator::makeWritable(m_executableMemory->start(), m_executableMemory->sizeInBytes());
+ m_code = m_executableMemory->start();
+ m_size = initialSize;
+ m_didAllocate = true;
+}
+
+void LinkBuffer::shrink(size_t newSize)
+{
+ if (!m_executableMemory)
+ return;
+ m_size = newSize;
+ m_executableMemory->shrink(m_size);
+}
+
+void LinkBuffer::performFinalization()
+{
+#ifndef NDEBUG
+ ASSERT(!isCompilationThread());
+ ASSERT(!m_completed);
+ ASSERT(isValid());
+ m_completed = true;
+#endif
+
+#if ENABLE(BRANCH_COMPACTION)
+ ExecutableAllocator::makeExecutable(code(), m_initialSize);
+#else
+ ExecutableAllocator::makeExecutable(code(), m_size);
+#endif
+ MacroAssembler::cacheFlush(code(), m_size);
+}
+
+#if DUMP_LINK_STATISTICS
+void LinkBuffer::dumpLinkStatistics(void* code, size_t initializeSize, size_t finalSize)
+{
+ static unsigned linkCount = 0;
+ static unsigned totalInitialSize = 0;
+ static unsigned totalFinalSize = 0;
+ linkCount++;
+ totalInitialSize += initialSize;
+ totalFinalSize += finalSize;
+ dataLogF("link %p: orig %u, compact %u (delta %u, %.2f%%)\n",
+ code, static_cast<unsigned>(initialSize), static_cast<unsigned>(finalSize),
+ static_cast<unsigned>(initialSize - finalSize),
+ 100.0 * (initialSize - finalSize) / initialSize);
+ dataLogF("\ttotal %u: orig %u, compact %u (delta %u, %.2f%%)\n",
+ linkCount, totalInitialSize, totalFinalSize, totalInitialSize - totalFinalSize,
+ 100.0 * (totalInitialSize - totalFinalSize) / totalInitialSize);
+}
+#endif
+
+#if DUMP_CODE
+void LinkBuffer::dumpCode(void* code, size_t size)
+{
+#if CPU(ARM_THUMB2)
+ // Dump the generated code in an asm file format that can be assembled and then disassembled
+ // for debugging purposes. For example, save this output as jit.s:
+ // gcc -arch armv7 -c jit.s
+ // otool -tv jit.o
+ static unsigned codeCount = 0;
+ unsigned short* tcode = static_cast<unsigned short*>(code);
+ size_t tsize = size / sizeof(short);
+ char nameBuf[128];
+ snprintf(nameBuf, sizeof(nameBuf), "_jsc_jit%u", codeCount++);
+ dataLogF("\t.syntax unified\n"
+ "\t.section\t__TEXT,__text,regular,pure_instructions\n"
+ "\t.globl\t%s\n"
+ "\t.align 2\n"
+ "\t.code 16\n"
+ "\t.thumb_func\t%s\n"
+ "# %p\n"
+ "%s:\n", nameBuf, nameBuf, code, nameBuf);
+
+ for (unsigned i = 0; i < tsize; i++)
+ dataLogF("\t.short\t0x%x\n", tcode[i]);
+#elif CPU(ARM_TRADITIONAL)
+ // gcc -c jit.s
+ // objdump -D jit.o
+ static unsigned codeCount = 0;
+ unsigned int* tcode = static_cast<unsigned int*>(code);
+ size_t tsize = size / sizeof(unsigned int);
+ char nameBuf[128];
+ snprintf(nameBuf, sizeof(nameBuf), "_jsc_jit%u", codeCount++);
+ dataLogF("\t.globl\t%s\n"
+ "\t.align 4\n"
+ "\t.code 32\n"
+ "\t.text\n"
+ "# %p\n"
+ "%s:\n", nameBuf, code, nameBuf);
+
+ for (unsigned i = 0; i < tsize; i++)
+ dataLogF("\t.long\t0x%x\n", tcode[i]);
+#endif
+}
+#endif
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+
diff --git a/Source/JavaScriptCore/assembler/LinkBuffer.h b/Source/JavaScriptCore/assembler/LinkBuffer.h
new file mode 100644
index 000000000..9b0d4c437
--- /dev/null
+++ b/Source/JavaScriptCore/assembler/LinkBuffer.h
@@ -0,0 +1,355 @@
+/*
+ * Copyright (C) 2009, 2010, 2012-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef LinkBuffer_h
+#define LinkBuffer_h
+
+#if ENABLE(ASSEMBLER)
+
+#define DUMP_LINK_STATISTICS 0
+#define DUMP_CODE 0
+
+#define GLOBAL_THUNK_ID reinterpret_cast<void*>(static_cast<intptr_t>(-1))
+#define REGEXP_CODE_ID reinterpret_cast<void*>(static_cast<intptr_t>(-2))
+#define CSS_CODE_ID reinterpret_cast<void*>(static_cast<intptr_t>(-3))
+
+#include "JITCompilationEffort.h"
+#include "MacroAssembler.h"
+#include <wtf/DataLog.h>
+#include <wtf/FastMalloc.h>
+#include <wtf/Noncopyable.h>
+
+namespace JSC {
+
+class CodeBlock;
+class VM;
+
+// LinkBuffer:
+//
+// This class assists in linking code generated by the macro assembler, once code generation
+// has been completed, and the code has been copied to is final location in memory. At this
+// time pointers to labels within the code may be resolved, and relative offsets to external
+// addresses may be fixed.
+//
+// Specifically:
+// * Jump objects may be linked to external targets,
+// * The address of Jump objects may taken, such that it can later be relinked.
+// * The return address of a Call may be acquired.
+// * The address of a Label pointing into the code may be resolved.
+// * The value referenced by a DataLabel may be set.
+//
+class LinkBuffer {
+ WTF_MAKE_NONCOPYABLE(LinkBuffer); WTF_MAKE_FAST_ALLOCATED;
+
+ typedef MacroAssemblerCodeRef CodeRef;
+ typedef MacroAssemblerCodePtr CodePtr;
+ typedef MacroAssembler::Label Label;
+ typedef MacroAssembler::Jump Jump;
+ typedef MacroAssembler::PatchableJump PatchableJump;
+ typedef MacroAssembler::JumpList JumpList;
+ typedef MacroAssembler::Call Call;
+ typedef MacroAssembler::DataLabelCompact DataLabelCompact;
+ typedef MacroAssembler::DataLabel32 DataLabel32;
+ typedef MacroAssembler::DataLabelPtr DataLabelPtr;
+ typedef MacroAssembler::ConvertibleLoadLabel ConvertibleLoadLabel;
+#if ENABLE(BRANCH_COMPACTION)
+ typedef MacroAssembler::LinkRecord LinkRecord;
+ typedef MacroAssembler::JumpLinkType JumpLinkType;
+#endif
+
+public:
+ LinkBuffer(VM& vm, MacroAssembler& macroAssembler, void* ownerUID, JITCompilationEffort effort = JITCompilationMustSucceed)
+ : m_size(0)
+#if ENABLE(BRANCH_COMPACTION)
+ , m_initialSize(0)
+#endif
+ , m_didAllocate(false)
+ , m_code(0)
+ , m_vm(&vm)
+#ifndef NDEBUG
+ , m_completed(false)
+#endif
+ {
+ linkCode(macroAssembler, ownerUID, effort);
+ }
+
+ LinkBuffer(VM& vm, MacroAssembler& macroAssembler, void* code, size_t size)
+ : m_size(size)
+#if ENABLE(BRANCH_COMPACTION)
+ , m_initialSize(0)
+#endif
+ , m_didAllocate(false)
+ , m_code(code)
+ , m_vm(&vm)
+#ifndef NDEBUG
+ , m_completed(false)
+#endif
+ {
+ linkCode(macroAssembler, 0, JITCompilationCanFail);
+ }
+
+ ~LinkBuffer()
+ {
+ }
+
+ bool didFailToAllocate() const
+ {
+ return !m_didAllocate;
+ }
+
+ bool isValid() const
+ {
+ return !didFailToAllocate();
+ }
+
+ // These methods are used to link or set values at code generation time.
+
+ void link(Call call, FunctionPtr function)
+ {
+ ASSERT(call.isFlagSet(Call::Linkable));
+ call.m_label = applyOffset(call.m_label);
+ MacroAssembler::linkCall(code(), call, function);
+ }
+
+ void link(Call call, CodeLocationLabel label)
+ {
+ link(call, FunctionPtr(label.executableAddress()));
+ }
+
+ void link(Jump jump, CodeLocationLabel label)
+ {
+ jump.m_label = applyOffset(jump.m_label);
+ MacroAssembler::linkJump(code(), jump, label);
+ }
+
+ void link(JumpList list, CodeLocationLabel label)
+ {
+ for (unsigned i = 0; i < list.m_jumps.size(); ++i)
+ link(list.m_jumps[i], label);
+ }
+
+ void patch(DataLabelPtr label, void* value)
+ {
+ AssemblerLabel target = applyOffset(label.m_label);
+ MacroAssembler::linkPointer(code(), target, value);
+ }
+
+ void patch(DataLabelPtr label, CodeLocationLabel value)
+ {
+ AssemblerLabel target = applyOffset(label.m_label);
+ MacroAssembler::linkPointer(code(), target, value.executableAddress());
+ }
+
+ // These methods are used to obtain handles to allow the code to be relinked / repatched later.
+
+ CodeLocationLabel entrypoint()
+ {
+ return CodeLocationLabel(code());
+ }
+
+ CodeLocationCall locationOf(Call call)
+ {
+ ASSERT(call.isFlagSet(Call::Linkable));
+ ASSERT(!call.isFlagSet(Call::Near));
+ return CodeLocationCall(MacroAssembler::getLinkerAddress(code(), applyOffset(call.m_label)));
+ }
+
+ CodeLocationNearCall locationOfNearCall(Call call)
+ {
+ ASSERT(call.isFlagSet(Call::Linkable));
+ ASSERT(call.isFlagSet(Call::Near));
+ return CodeLocationNearCall(MacroAssembler::getLinkerAddress(code(), applyOffset(call.m_label)));
+ }
+
+ CodeLocationLabel locationOf(PatchableJump jump)
+ {
+ return CodeLocationLabel(MacroAssembler::getLinkerAddress(code(), applyOffset(jump.m_jump.m_label)));
+ }
+
+ CodeLocationLabel locationOf(Label label)
+ {
+ return CodeLocationLabel(MacroAssembler::getLinkerAddress(code(), applyOffset(label.m_label)));
+ }
+
+ CodeLocationDataLabelPtr locationOf(DataLabelPtr label)
+ {
+ return CodeLocationDataLabelPtr(MacroAssembler::getLinkerAddress(code(), applyOffset(label.m_label)));
+ }
+
+ CodeLocationDataLabel32 locationOf(DataLabel32 label)
+ {
+ return CodeLocationDataLabel32(MacroAssembler::getLinkerAddress(code(), applyOffset(label.m_label)));
+ }
+
+ CodeLocationDataLabelCompact locationOf(DataLabelCompact label)
+ {
+ return CodeLocationDataLabelCompact(MacroAssembler::getLinkerAddress(code(), applyOffset(label.m_label)));
+ }
+
+ CodeLocationConvertibleLoad locationOf(ConvertibleLoadLabel label)
+ {
+ return CodeLocationConvertibleLoad(MacroAssembler::getLinkerAddress(code(), applyOffset(label.m_label)));
+ }
+
+ // This method obtains the return address of the call, given as an offset from
+ // the start of the code.
+ unsigned returnAddressOffset(Call call)
+ {
+ call.m_label = applyOffset(call.m_label);
+ return MacroAssembler::getLinkerCallReturnOffset(call);
+ }
+
+ uint32_t offsetOf(Label label)
+ {
+ return applyOffset(label.m_label).m_offset;
+ }
+
+ unsigned offsetOf(PatchableJump jump)
+ {
+ return applyOffset(jump.m_jump.m_label).m_offset;
+ }
+
+ // Upon completion of all patching 'FINALIZE_CODE()' should be called once to
+ // complete generation of the code. Alternatively, call
+ // finalizeCodeWithoutDisassembly() directly if you have your own way of
+ // displaying disassembly.
+
+ JS_EXPORT_PRIVATE CodeRef finalizeCodeWithoutDisassembly();
+ JS_EXPORT_PRIVATE CodeRef finalizeCodeWithDisassembly(const char* format, ...) WTF_ATTRIBUTE_PRINTF(2, 3);
+
+ CodePtr trampolineAt(Label label)
+ {
+ return CodePtr(MacroAssembler::AssemblerType_T::getRelocatedAddress(code(), applyOffset(label.m_label)));
+ }
+
+ void* debugAddress()
+ {
+ return m_code;
+ }
+
+ // FIXME: this does not account for the AssemblerData size!
+ size_t size()
+ {
+ return m_size;
+ }
+
+ bool wasAlreadyDisassembled() const { return m_alreadyDisassembled; }
+ void didAlreadyDisassemble() { m_alreadyDisassembled = true; }
+
+private:
+#if ENABLE(BRANCH_COMPACTION)
+ int executableOffsetFor(int location)
+ {
+ if (!location)
+ return 0;
+ return bitwise_cast<int32_t*>(m_assemblerStorage.buffer())[location / sizeof(int32_t) - 1];
+ }
+#endif
+
+ template <typename T> T applyOffset(T src)
+ {
+#if ENABLE(BRANCH_COMPACTION)
+ src.m_offset -= executableOffsetFor(src.m_offset);
+#endif
+ return src;
+ }
+
+ // Keep this private! - the underlying code should only be obtained externally via finalizeCode().
+ void* code()
+ {
+ return m_code;
+ }
+
+ void allocate(size_t initialSize, void* ownerUID, JITCompilationEffort);
+ void shrink(size_t newSize);
+
+ JS_EXPORT_PRIVATE void linkCode(MacroAssembler&, void* ownerUID, JITCompilationEffort);
+#if ENABLE(BRANCH_COMPACTION)
+ template <typename InstructionType>
+ void copyCompactAndLinkCode(MacroAssembler&, void* ownerUID, JITCompilationEffort);
+#endif
+
+ void performFinalization();
+
+#if DUMP_LINK_STATISTICS
+ static void dumpLinkStatistics(void* code, size_t initialSize, size_t finalSize);
+#endif
+
+#if DUMP_CODE
+ static void dumpCode(void* code, size_t);
+#endif
+
+ RefPtr<ExecutableMemoryHandle> m_executableMemory;
+ size_t m_size;
+#if ENABLE(BRANCH_COMPACTION)
+ size_t m_initialSize;
+ AssemblerData m_assemblerStorage;
+#endif
+ bool m_didAllocate;
+ void* m_code;
+ VM* m_vm;
+#ifndef NDEBUG
+ bool m_completed;
+#endif
+ bool m_alreadyDisassembled { false };
+};
+
+#define FINALIZE_CODE_IF(condition, linkBufferReference, dataLogFArgumentsForHeading) \
+ (UNLIKELY((condition)) \
+ ? ((linkBufferReference).finalizeCodeWithDisassembly dataLogFArgumentsForHeading) \
+ : (linkBufferReference).finalizeCodeWithoutDisassembly())
+
+bool shouldShowDisassemblyFor(CodeBlock*);
+
+#define FINALIZE_CODE_FOR(codeBlock, linkBufferReference, dataLogFArgumentsForHeading) \
+ FINALIZE_CODE_IF(shouldShowDisassemblyFor(codeBlock) || Options::asyncDisassembly(), linkBufferReference, dataLogFArgumentsForHeading)
+
+// Use this to finalize code, like so:
+//
+// CodeRef code = FINALIZE_CODE(linkBuffer, ("my super thingy number %d", number));
+//
+// Which, in disassembly mode, will print:
+//
+// Generated JIT code for my super thingy number 42:
+// Code at [0x123456, 0x234567]:
+// 0x123456: mov $0, 0
+// 0x12345a: ret
+//
+// ... and so on.
+//
+// Note that the dataLogFArgumentsForHeading are only evaluated when showDisassembly
+// is true, so you can hide expensive disassembly-only computations inside there.
+
+#define FINALIZE_CODE(linkBufferReference, dataLogFArgumentsForHeading) \
+ FINALIZE_CODE_IF(JSC::Options::asyncDisassembly() || JSC::Options::showDisassembly(), linkBufferReference, dataLogFArgumentsForHeading)
+
+#define FINALIZE_DFG_CODE(linkBufferReference, dataLogFArgumentsForHeading) \
+ FINALIZE_CODE_IF(JSC::Options::asyncDisassembly() || JSC::Options::showDisassembly() || Options::showDFGDisassembly(), linkBufferReference, dataLogFArgumentsForHeading)
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // LinkBuffer_h
diff --git a/Source/JavaScriptCore/assembler/MIPSAssembler.h b/Source/JavaScriptCore/assembler/MIPSAssembler.h
new file mode 100644
index 000000000..caad1524d
--- /dev/null
+++ b/Source/JavaScriptCore/assembler/MIPSAssembler.h
@@ -0,0 +1,1090 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2009 University of Szeged
+ * All rights reserved.
+ * Copyright (C) 2010 MIPS Technologies, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY MIPS TECHNOLOGIES, INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL MIPS TECHNOLOGIES, INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MIPSAssembler_h
+#define MIPSAssembler_h
+
+#if ENABLE(ASSEMBLER) && CPU(MIPS)
+
+#include "AssemblerBuffer.h"
+#include "JITCompilationEffort.h"
+#include <wtf/Assertions.h>
+#include <wtf/SegmentedVector.h>
+
+namespace JSC {
+
+typedef uint32_t MIPSWord;
+
+namespace MIPSRegisters {
+typedef enum {
+ r0 = 0,
+ r1,
+ r2,
+ r3,
+ r4,
+ r5,
+ r6,
+ r7,
+ r8,
+ r9,
+ r10,
+ r11,
+ r12,
+ r13,
+ r14,
+ r15,
+ r16,
+ r17,
+ r18,
+ r19,
+ r20,
+ r21,
+ r22,
+ r23,
+ r24,
+ r25,
+ r26,
+ r27,
+ r28,
+ r29,
+ r30,
+ r31,
+ zero = r0,
+ at = r1,
+ v0 = r2,
+ v1 = r3,
+ a0 = r4,
+ a1 = r5,
+ a2 = r6,
+ a3 = r7,
+ t0 = r8,
+ t1 = r9,
+ t2 = r10,
+ t3 = r11,
+ t4 = r12,
+ t5 = r13,
+ t6 = r14,
+ t7 = r15,
+ s0 = r16,
+ s1 = r17,
+ s2 = r18,
+ s3 = r19,
+ s4 = r20,
+ s5 = r21,
+ s6 = r22,
+ s7 = r23,
+ t8 = r24,
+ t9 = r25,
+ k0 = r26,
+ k1 = r27,
+ gp = r28,
+ sp = r29,
+ fp = r30,
+ ra = r31
+} RegisterID;
+
+typedef enum {
+ f0,
+ f1,
+ f2,
+ f3,
+ f4,
+ f5,
+ f6,
+ f7,
+ f8,
+ f9,
+ f10,
+ f11,
+ f12,
+ f13,
+ f14,
+ f15,
+ f16,
+ f17,
+ f18,
+ f19,
+ f20,
+ f21,
+ f22,
+ f23,
+ f24,
+ f25,
+ f26,
+ f27,
+ f28,
+ f29,
+ f30,
+ f31
+} FPRegisterID;
+
+} // namespace MIPSRegisters
+
+class MIPSAssembler {
+public:
+ typedef MIPSRegisters::RegisterID RegisterID;
+ typedef MIPSRegisters::FPRegisterID FPRegisterID;
+ typedef SegmentedVector<AssemblerLabel, 64> Jumps;
+
+ static RegisterID firstRegister() { return MIPSRegisters::r0; }
+ static RegisterID lastRegister() { return MIPSRegisters::r31; }
+
+ static FPRegisterID firstFPRegister() { return MIPSRegisters::f0; }
+ static FPRegisterID lastFPRegister() { return MIPSRegisters::f31; }
+
+ MIPSAssembler()
+ : m_indexOfLastWatchpoint(INT_MIN)
+ , m_indexOfTailOfLastWatchpoint(INT_MIN)
+ {
+ }
+
+ AssemblerBuffer& buffer() { return m_buffer; }
+
+ // MIPS instruction opcode field position
+ enum {
+ OP_SH_RD = 11,
+ OP_SH_RT = 16,
+ OP_SH_RS = 21,
+ OP_SH_SHAMT = 6,
+ OP_SH_CODE = 16,
+ OP_SH_FD = 6,
+ OP_SH_FS = 11,
+ OP_SH_FT = 16
+ };
+
+ void emitInst(MIPSWord op)
+ {
+ void* oldBase = m_buffer.data();
+
+ m_buffer.putInt(op);
+
+ void* newBase = m_buffer.data();
+ if (oldBase != newBase)
+ relocateJumps(oldBase, newBase);
+ }
+
+ void nop()
+ {
+ emitInst(0x00000000);
+ }
+
+ void sync()
+ {
+ emitInst(0x0000000f);
+ }
+
+ /* Need to insert one load data delay nop for mips1. */
+ void loadDelayNop()
+ {
+#if WTF_MIPS_ISA(1)
+ nop();
+#endif
+ }
+
+ /* Need to insert one coprocessor access delay nop for mips1. */
+ void copDelayNop()
+ {
+#if WTF_MIPS_ISA(1)
+ nop();
+#endif
+ }
+
+ void move(RegisterID rd, RegisterID rs)
+ {
+ /* addu */
+ emitInst(0x00000021 | (rd << OP_SH_RD) | (rs << OP_SH_RS));
+ }
+
+ /* Set an immediate value to a register. This may generate 1 or 2
+ instructions. */
+ void li(RegisterID dest, int imm)
+ {
+ if (imm >= -32768 && imm <= 32767)
+ addiu(dest, MIPSRegisters::zero, imm);
+ else if (imm >= 0 && imm < 65536)
+ ori(dest, MIPSRegisters::zero, imm);
+ else {
+ lui(dest, imm >> 16);
+ if (imm & 0xffff)
+ ori(dest, dest, imm);
+ }
+ }
+
+ void lui(RegisterID rt, int imm)
+ {
+ emitInst(0x3c000000 | (rt << OP_SH_RT) | (imm & 0xffff));
+ }
+
+ void addiu(RegisterID rt, RegisterID rs, int imm)
+ {
+ emitInst(0x24000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (imm & 0xffff));
+ }
+
+ void addu(RegisterID rd, RegisterID rs, RegisterID rt)
+ {
+ emitInst(0x00000021 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+ }
+
+ void subu(RegisterID rd, RegisterID rs, RegisterID rt)
+ {
+ emitInst(0x00000023 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+ }
+
+ void mult(RegisterID rs, RegisterID rt)
+ {
+ emitInst(0x00000018 | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+ }
+
+ void div(RegisterID rs, RegisterID rt)
+ {
+ emitInst(0x0000001a | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+ }
+
+ void mfhi(RegisterID rd)
+ {
+ emitInst(0x00000010 | (rd << OP_SH_RD));
+ }
+
+ void mflo(RegisterID rd)
+ {
+ emitInst(0x00000012 | (rd << OP_SH_RD));
+ }
+
+ void mul(RegisterID rd, RegisterID rs, RegisterID rt)
+ {
+#if WTF_MIPS_ISA_AT_LEAST(32)
+ emitInst(0x70000002 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+#else
+ mult(rs, rt);
+ mflo(rd);
+#endif
+ }
+
+ void andInsn(RegisterID rd, RegisterID rs, RegisterID rt)
+ {
+ emitInst(0x00000024 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+ }
+
+ void andi(RegisterID rt, RegisterID rs, int imm)
+ {
+ emitInst(0x30000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (imm & 0xffff));
+ }
+
+ void nor(RegisterID rd, RegisterID rs, RegisterID rt)
+ {
+ emitInst(0x00000027 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+ }
+
+ void orInsn(RegisterID rd, RegisterID rs, RegisterID rt)
+ {
+ emitInst(0x00000025 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+ }
+
+ void ori(RegisterID rt, RegisterID rs, int imm)
+ {
+ emitInst(0x34000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (imm & 0xffff));
+ }
+
+ void xorInsn(RegisterID rd, RegisterID rs, RegisterID rt)
+ {
+ emitInst(0x00000026 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+ }
+
+ void xori(RegisterID rt, RegisterID rs, int imm)
+ {
+ emitInst(0x38000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (imm & 0xffff));
+ }
+
+ void slt(RegisterID rd, RegisterID rs, RegisterID rt)
+ {
+ emitInst(0x0000002a | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+ }
+
+ void sltu(RegisterID rd, RegisterID rs, RegisterID rt)
+ {
+ emitInst(0x0000002b | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+ }
+
+ void sltiu(RegisterID rt, RegisterID rs, int imm)
+ {
+ emitInst(0x2c000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (imm & 0xffff));
+ }
+
+ void sll(RegisterID rd, RegisterID rt, int shamt)
+ {
+ emitInst(0x00000000 | (rd << OP_SH_RD) | (rt << OP_SH_RT) | ((shamt & 0x1f) << OP_SH_SHAMT));
+ }
+
+ void sllv(RegisterID rd, RegisterID rt, RegisterID rs)
+ {
+ emitInst(0x00000004 | (rd << OP_SH_RD) | (rt << OP_SH_RT) | (rs << OP_SH_RS));
+ }
+
+ void sra(RegisterID rd, RegisterID rt, int shamt)
+ {
+ emitInst(0x00000003 | (rd << OP_SH_RD) | (rt << OP_SH_RT) | ((shamt & 0x1f) << OP_SH_SHAMT));
+ }
+
+ void srav(RegisterID rd, RegisterID rt, RegisterID rs)
+ {
+ emitInst(0x00000007 | (rd << OP_SH_RD) | (rt << OP_SH_RT) | (rs << OP_SH_RS));
+ }
+
+ void srl(RegisterID rd, RegisterID rt, int shamt)
+ {
+ emitInst(0x00000002 | (rd << OP_SH_RD) | (rt << OP_SH_RT) | ((shamt & 0x1f) << OP_SH_SHAMT));
+ }
+
+ void srlv(RegisterID rd, RegisterID rt, RegisterID rs)
+ {
+ emitInst(0x00000006 | (rd << OP_SH_RD) | (rt << OP_SH_RT) | (rs << OP_SH_RS));
+ }
+
+ void lb(RegisterID rt, RegisterID rs, int offset)
+ {
+ emitInst(0x80000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ loadDelayNop();
+ }
+
+ void lbu(RegisterID rt, RegisterID rs, int offset)
+ {
+ emitInst(0x90000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ loadDelayNop();
+ }
+
+ void lw(RegisterID rt, RegisterID rs, int offset)
+ {
+ emitInst(0x8c000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ loadDelayNop();
+ }
+
+ void lwl(RegisterID rt, RegisterID rs, int offset)
+ {
+ emitInst(0x88000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ loadDelayNop();
+ }
+
+ void lwr(RegisterID rt, RegisterID rs, int offset)
+ {
+ emitInst(0x98000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ loadDelayNop();
+ }
+
+ void lh(RegisterID rt, RegisterID rs, int offset)
+ {
+ emitInst(0x84000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ loadDelayNop();
+ }
+
+ void lhu(RegisterID rt, RegisterID rs, int offset)
+ {
+ emitInst(0x94000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ loadDelayNop();
+ }
+
+ void sb(RegisterID rt, RegisterID rs, int offset)
+ {
+ emitInst(0xa0000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ }
+
+ void sh(RegisterID rt, RegisterID rs, int offset)
+ {
+ emitInst(0xa4000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ }
+
+ void sw(RegisterID rt, RegisterID rs, int offset)
+ {
+ emitInst(0xac000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ }
+
+ void jr(RegisterID rs)
+ {
+ emitInst(0x00000008 | (rs << OP_SH_RS));
+ }
+
+ void jalr(RegisterID rs)
+ {
+ emitInst(0x0000f809 | (rs << OP_SH_RS));
+ }
+
+ void jal()
+ {
+ emitInst(0x0c000000);
+ }
+
+ void bkpt()
+ {
+ int value = 512; /* BRK_BUG */
+ emitInst(0x0000000d | ((value & 0x3ff) << OP_SH_CODE));
+ }
+
+ void bgez(RegisterID rs, int imm)
+ {
+ emitInst(0x04010000 | (rs << OP_SH_RS) | (imm & 0xffff));
+ }
+
+ void bltz(RegisterID rs, int imm)
+ {
+ emitInst(0x04000000 | (rs << OP_SH_RS) | (imm & 0xffff));
+ }
+
+ void beq(RegisterID rs, RegisterID rt, int imm)
+ {
+ emitInst(0x10000000 | (rs << OP_SH_RS) | (rt << OP_SH_RT) | (imm & 0xffff));
+ }
+
+ void bne(RegisterID rs, RegisterID rt, int imm)
+ {
+ emitInst(0x14000000 | (rs << OP_SH_RS) | (rt << OP_SH_RT) | (imm & 0xffff));
+ }
+
+ void bc1t()
+ {
+ emitInst(0x45010000);
+ }
+
+ void bc1f()
+ {
+ emitInst(0x45000000);
+ }
+
+ void appendJump()
+ {
+ m_jumps.append(m_buffer.label());
+ }
+
+ void addd(FPRegisterID fd, FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x46200000 | (fd << OP_SH_FD) | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ }
+
+ void subd(FPRegisterID fd, FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x46200001 | (fd << OP_SH_FD) | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ }
+
+ void muld(FPRegisterID fd, FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x46200002 | (fd << OP_SH_FD) | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ }
+
+ void divd(FPRegisterID fd, FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x46200003 | (fd << OP_SH_FD) | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ }
+
+ void lwc1(FPRegisterID ft, RegisterID rs, int offset)
+ {
+ emitInst(0xc4000000 | (ft << OP_SH_FT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ copDelayNop();
+ }
+
+ void ldc1(FPRegisterID ft, RegisterID rs, int offset)
+ {
+ emitInst(0xd4000000 | (ft << OP_SH_FT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ }
+
+ void swc1(FPRegisterID ft, RegisterID rs, int offset)
+ {
+ emitInst(0xe4000000 | (ft << OP_SH_FT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ }
+
+ void sdc1(FPRegisterID ft, RegisterID rs, int offset)
+ {
+ emitInst(0xf4000000 | (ft << OP_SH_FT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ }
+
+ void mtc1(RegisterID rt, FPRegisterID fs)
+ {
+ emitInst(0x44800000 | (fs << OP_SH_FS) | (rt << OP_SH_RT));
+ copDelayNop();
+ }
+
+ void mthc1(RegisterID rt, FPRegisterID fs)
+ {
+ emitInst(0x44e00000 | (fs << OP_SH_FS) | (rt << OP_SH_RT));
+ copDelayNop();
+ }
+
+ void mfc1(RegisterID rt, FPRegisterID fs)
+ {
+ emitInst(0x44000000 | (fs << OP_SH_FS) | (rt << OP_SH_RT));
+ copDelayNop();
+ }
+
+ void sqrtd(FPRegisterID fd, FPRegisterID fs)
+ {
+ emitInst(0x46200004 | (fd << OP_SH_FD) | (fs << OP_SH_FS));
+ }
+
+ void movd(FPRegisterID fd, FPRegisterID fs)
+ {
+ emitInst(0x46200006 | (fd << OP_SH_FD) | (fs << OP_SH_FS));
+ }
+
+ void negd(FPRegisterID fd, FPRegisterID fs)
+ {
+ emitInst(0x46200007 | (fd << OP_SH_FD) | (fs << OP_SH_FS));
+ }
+
+ void truncwd(FPRegisterID fd, FPRegisterID fs)
+ {
+ emitInst(0x4620000d | (fd << OP_SH_FD) | (fs << OP_SH_FS));
+ }
+
+ void cvtdw(FPRegisterID fd, FPRegisterID fs)
+ {
+ emitInst(0x46800021 | (fd << OP_SH_FD) | (fs << OP_SH_FS));
+ }
+
+ void cvtds(FPRegisterID fd, FPRegisterID fs)
+ {
+ emitInst(0x46000021 | (fd << OP_SH_FD) | (fs << OP_SH_FS));
+ }
+
+ void cvtwd(FPRegisterID fd, FPRegisterID fs)
+ {
+ emitInst(0x46200024 | (fd << OP_SH_FD) | (fs << OP_SH_FS));
+ }
+
+ void cvtsd(FPRegisterID fd, FPRegisterID fs)
+ {
+ emitInst(0x46200020 | (fd << OP_SH_FD) | (fs << OP_SH_FS));
+ }
+
+ void ceqd(FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x46200032 | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ copDelayNop();
+ }
+
+ void cngtd(FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x4620003f | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ copDelayNop();
+ }
+
+ void cnged(FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x4620003d | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ copDelayNop();
+ }
+
+ void cltd(FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x4620003c | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ copDelayNop();
+ }
+
+ void cled(FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x4620003e | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ copDelayNop();
+ }
+
+ void cueqd(FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x46200033 | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ copDelayNop();
+ }
+
+ void coled(FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x46200036 | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ copDelayNop();
+ }
+
+ void coltd(FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x46200034 | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ copDelayNop();
+ }
+
+ void culed(FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x46200037 | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ copDelayNop();
+ }
+
+ void cultd(FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x46200035 | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ copDelayNop();
+ }
+
+ // General helpers
+
+ AssemblerLabel labelIgnoringWatchpoints()
+ {
+ return m_buffer.label();
+ }
+
+ AssemblerLabel labelForWatchpoint()
+ {
+ AssemblerLabel result = m_buffer.label();
+ if (static_cast<int>(result.m_offset) != m_indexOfLastWatchpoint)
+ result = label();
+ m_indexOfLastWatchpoint = result.m_offset;
+ m_indexOfTailOfLastWatchpoint = result.m_offset + maxJumpReplacementSize();
+ return result;
+ }
+
+ AssemblerLabel label()
+ {
+ AssemblerLabel result = m_buffer.label();
+ while (UNLIKELY(static_cast<int>(result.m_offset) < m_indexOfTailOfLastWatchpoint)) {
+ nop();
+ result = m_buffer.label();
+ }
+ return result;
+ }
+
+ AssemblerLabel align(int alignment)
+ {
+ while (!m_buffer.isAligned(alignment))
+ bkpt();
+
+ return label();
+ }
+
+ static void* getRelocatedAddress(void* code, AssemblerLabel label)
+ {
+ return reinterpret_cast<void*>(reinterpret_cast<char*>(code) + label.m_offset);
+ }
+
+ static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b)
+ {
+ return b.m_offset - a.m_offset;
+ }
+
+ // Assembler admin methods:
+
+ size_t codeSize() const
+ {
+ return m_buffer.codeSize();
+ }
+
+ unsigned debugOffset() { return m_buffer.debugOffset(); }
+
+ // Assembly helpers for moving data between fp and registers.
+ void vmov(RegisterID rd1, RegisterID rd2, FPRegisterID rn)
+ {
+#if WTF_MIPS_ISA_REV(2) && WTF_MIPS_FP64
+ mfc1(rd1, rn);
+ mfhc1(rd2, rn);
+#else
+ mfc1(rd1, rn);
+ mfc1(rd2, FPRegisterID(rn + 1));
+#endif
+ }
+
+ void vmov(FPRegisterID rd, RegisterID rn1, RegisterID rn2)
+ {
+#if WTF_MIPS_ISA_REV(2) && WTF_MIPS_FP64
+ mtc1(rn1, rd);
+ mthc1(rn2, rd);
+#else
+ mtc1(rn1, rd);
+ mtc1(rn2, FPRegisterID(rd + 1));
+#endif
+ }
+
+ static unsigned getCallReturnOffset(AssemblerLabel call)
+ {
+ // The return address is after a call and a delay slot instruction
+ return call.m_offset;
+ }
+
+ // Linking & patching:
+ //
+ // 'link' and 'patch' methods are for use on unprotected code - such as the code
+ // within the AssemblerBuffer, and code being patched by the patch buffer. Once
+ // code has been finalized it is (platform support permitting) within a non-
+ // writable region of memory; to modify the code in an execute-only execuable
+ // pool the 'repatch' and 'relink' methods should be used.
+
+ static size_t linkDirectJump(void* code, void* to)
+ {
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(reinterpret_cast<intptr_t>(code));
+ size_t ops = 0;
+ int32_t slotAddr = reinterpret_cast<int>(insn) + 4;
+ int32_t toAddr = reinterpret_cast<int>(to);
+
+ if ((slotAddr & 0xf0000000) != (toAddr & 0xf0000000)) {
+ // lui
+ *insn = 0x3c000000 | (MIPSRegisters::t9 << OP_SH_RT) | ((toAddr >> 16) & 0xffff);
+ ++insn;
+ // ori
+ *insn = 0x34000000 | (MIPSRegisters::t9 << OP_SH_RT) | (MIPSRegisters::t9 << OP_SH_RS) | (toAddr & 0xffff);
+ ++insn;
+ // jr
+ *insn = 0x00000008 | (MIPSRegisters::t9 << OP_SH_RS);
+ ++insn;
+ ops = 4 * sizeof(MIPSWord);
+ } else {
+ // j
+ *insn = 0x08000000 | ((toAddr & 0x0fffffff) >> 2);
+ ++insn;
+ ops = 2 * sizeof(MIPSWord);
+ }
+ // nop
+ *insn = 0x00000000;
+ return ops;
+ }
+
+ void linkJump(AssemblerLabel from, AssemblerLabel to)
+ {
+ ASSERT(to.isSet());
+ ASSERT(from.isSet());
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(reinterpret_cast<intptr_t>(m_buffer.data()) + from.m_offset);
+ MIPSWord* toPos = reinterpret_cast<MIPSWord*>(reinterpret_cast<intptr_t>(m_buffer.data()) + to.m_offset);
+
+ ASSERT(!(*(insn - 1)) && !(*(insn - 2)) && !(*(insn - 3)) && !(*(insn - 5)));
+ insn = insn - 6;
+ linkWithOffset(insn, toPos);
+ }
+
+ static void linkJump(void* code, AssemblerLabel from, void* to)
+ {
+ ASSERT(from.isSet());
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(reinterpret_cast<intptr_t>(code) + from.m_offset);
+
+ ASSERT(!(*(insn - 1)) && !(*(insn - 2)) && !(*(insn - 3)) && !(*(insn - 5)));
+ insn = insn - 6;
+ linkWithOffset(insn, to);
+ }
+
+ static void linkCall(void* code, AssemblerLabel from, void* to)
+ {
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(reinterpret_cast<intptr_t>(code) + from.m_offset);
+ linkCallInternal(insn, to);
+ }
+
+ static void linkPointer(void* code, AssemblerLabel from, void* to)
+ {
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(reinterpret_cast<intptr_t>(code) + from.m_offset);
+ ASSERT((*insn & 0xffe00000) == 0x3c000000); // lui
+ *insn = (*insn & 0xffff0000) | ((reinterpret_cast<intptr_t>(to) >> 16) & 0xffff);
+ insn++;
+ ASSERT((*insn & 0xfc000000) == 0x34000000); // ori
+ *insn = (*insn & 0xffff0000) | (reinterpret_cast<intptr_t>(to) & 0xffff);
+ }
+
+ static void relinkJump(void* from, void* to)
+ {
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(from);
+
+ ASSERT(!(*(insn - 1)) && !(*(insn - 5)));
+ insn = insn - 6;
+ int flushSize = linkWithOffset(insn, to);
+
+ cacheFlush(insn, flushSize);
+ }
+
+ static void relinkCall(void* from, void* to)
+ {
+ void* start;
+ int size = linkCallInternal(from, to);
+ if (size == sizeof(MIPSWord))
+ start = reinterpret_cast<void*>(reinterpret_cast<intptr_t>(from) - 2 * sizeof(MIPSWord));
+ else
+ start = reinterpret_cast<void*>(reinterpret_cast<intptr_t>(from) - 4 * sizeof(MIPSWord));
+
+ cacheFlush(start, size);
+ }
+
+ static void repatchInt32(void* from, int32_t to)
+ {
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(from);
+ ASSERT((*insn & 0xffe00000) == 0x3c000000); // lui
+ *insn = (*insn & 0xffff0000) | ((to >> 16) & 0xffff);
+ insn++;
+ ASSERT((*insn & 0xfc000000) == 0x34000000); // ori
+ *insn = (*insn & 0xffff0000) | (to & 0xffff);
+ insn--;
+ cacheFlush(insn, 2 * sizeof(MIPSWord));
+ }
+
+ static int32_t readInt32(void* from)
+ {
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(from);
+ ASSERT((*insn & 0xffe00000) == 0x3c000000); // lui
+ int32_t result = (*insn & 0x0000ffff) << 16;
+ insn++;
+ ASSERT((*insn & 0xfc000000) == 0x34000000); // ori
+ result |= *insn & 0x0000ffff;
+ return result;
+ }
+
+ static void repatchCompact(void* where, int32_t value)
+ {
+ repatchInt32(where, value);
+ }
+
+ static void repatchPointer(void* from, void* to)
+ {
+ repatchInt32(from, reinterpret_cast<int32_t>(to));
+ }
+
+ static void* readPointer(void* from)
+ {
+ return reinterpret_cast<void*>(readInt32(from));
+ }
+
+ static void* readCallTarget(void* from)
+ {
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(from);
+ insn -= 4;
+ ASSERT((*insn & 0xffe00000) == 0x3c000000); // lui
+ int32_t result = (*insn & 0x0000ffff) << 16;
+ insn++;
+ ASSERT((*insn & 0xfc000000) == 0x34000000); // ori
+ result |= *insn & 0x0000ffff;
+ return reinterpret_cast<void*>(result);
+ }
+
+ static void cacheFlush(void* code, size_t size)
+ {
+ intptr_t end = reinterpret_cast<intptr_t>(code) + size;
+ __builtin___clear_cache(reinterpret_cast<char*>(code), reinterpret_cast<char*>(end));
+ }
+
+ static ptrdiff_t maxJumpReplacementSize()
+ {
+ return sizeof(MIPSWord) * 4;
+ }
+
+ static void revertJumpToMove(void* instructionStart, RegisterID rt, int imm)
+ {
+ MIPSWord* insn = static_cast<MIPSWord*>(instructionStart);
+ size_t codeSize = 2 * sizeof(MIPSWord);
+
+ // lui
+ *insn = 0x3c000000 | (rt << OP_SH_RT) | ((imm >> 16) & 0xffff);
+ ++insn;
+ // ori
+ *insn = 0x34000000 | (rt << OP_SH_RS) | (rt << OP_SH_RT) | (imm & 0xffff);
+ ++insn;
+ // if jr $t9
+ if (*insn == 0x03200008) {
+ *insn = 0x00000000;
+ codeSize += sizeof(MIPSWord);
+ }
+ cacheFlush(insn, codeSize);
+ }
+
+ static void replaceWithJump(void* instructionStart, void* to)
+ {
+ ASSERT(!(bitwise_cast<uintptr_t>(instructionStart) & 3));
+ ASSERT(!(bitwise_cast<uintptr_t>(to) & 3));
+ size_t ops = linkDirectJump(instructionStart, to);
+ cacheFlush(instructionStart, ops);
+ }
+
+ static void replaceWithLoad(void* instructionStart)
+ {
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(instructionStart);
+ ASSERT((*insn & 0xffe00000) == 0x3c000000); // lui
+ insn++;
+ ASSERT((*insn & 0xfc0007ff) == 0x00000021); // addu
+ insn++;
+ *insn = 0x8c000000 | ((*insn) & 0x3ffffff); // lw
+ cacheFlush(insn, 4);
+ }
+
+ static void replaceWithAddressComputation(void* instructionStart)
+ {
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(instructionStart);
+ ASSERT((*insn & 0xffe00000) == 0x3c000000); // lui
+ insn++;
+ ASSERT((*insn & 0xfc0007ff) == 0x00000021); // addu
+ insn++;
+ *insn = 0x24000000 | ((*insn) & 0x3ffffff); // addiu
+ cacheFlush(insn, 4);
+ }
+
+ /* Update each jump in the buffer of newBase. */
+ void relocateJumps(void* oldBase, void* newBase)
+ {
+ // Check each jump
+ for (Jumps::Iterator iter = m_jumps.begin(); iter != m_jumps.end(); ++iter) {
+ int pos = iter->m_offset;
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(reinterpret_cast<intptr_t>(newBase) + pos);
+ insn = insn + 2;
+ // Need to make sure we have 5 valid instructions after pos
+ if ((unsigned)pos >= m_buffer.codeSize() - 5 * sizeof(MIPSWord))
+ continue;
+
+ if ((*insn & 0xfc000000) == 0x08000000) { // j
+ int offset = *insn & 0x03ffffff;
+ int oldInsnAddress = (int)insn - (int)newBase + (int)oldBase;
+ int topFourBits = (oldInsnAddress + 4) >> 28;
+ int oldTargetAddress = (topFourBits << 28) | (offset << 2);
+ int newTargetAddress = oldTargetAddress - (int)oldBase + (int)newBase;
+ int newInsnAddress = (int)insn;
+ if (((newInsnAddress + 4) >> 28) == (newTargetAddress >> 28))
+ *insn = 0x08000000 | ((newTargetAddress >> 2) & 0x3ffffff);
+ else {
+ /* lui */
+ *insn = 0x3c000000 | (MIPSRegisters::t9 << OP_SH_RT) | ((newTargetAddress >> 16) & 0xffff);
+ /* ori */
+ *(insn + 1) = 0x34000000 | (MIPSRegisters::t9 << OP_SH_RT) | (MIPSRegisters::t9 << OP_SH_RS) | (newTargetAddress & 0xffff);
+ /* jr */
+ *(insn + 2) = 0x00000008 | (MIPSRegisters::t9 << OP_SH_RS);
+ }
+ } else if ((*insn & 0xffe00000) == 0x3c000000) { // lui
+ int high = (*insn & 0xffff) << 16;
+ int low = *(insn + 1) & 0xffff;
+ int oldTargetAddress = high | low;
+ int newTargetAddress = oldTargetAddress - (int)oldBase + (int)newBase;
+ /* lui */
+ *insn = 0x3c000000 | (MIPSRegisters::t9 << OP_SH_RT) | ((newTargetAddress >> 16) & 0xffff);
+ /* ori */
+ *(insn + 1) = 0x34000000 | (MIPSRegisters::t9 << OP_SH_RT) | (MIPSRegisters::t9 << OP_SH_RS) | (newTargetAddress & 0xffff);
+ }
+ }
+ }
+
+private:
+ static int linkWithOffset(MIPSWord* insn, void* to)
+ {
+ ASSERT((*insn & 0xfc000000) == 0x10000000 // beq
+ || (*insn & 0xfc000000) == 0x14000000 // bne
+ || (*insn & 0xffff0000) == 0x45010000 // bc1t
+ || (*insn & 0xffff0000) == 0x45000000); // bc1f
+ intptr_t diff = (reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(insn) - 4) >> 2;
+
+ if (diff < -32768 || diff > 32767 || *(insn + 2) != 0x10000003) {
+ /*
+ Convert the sequence:
+ beq $2, $3, target
+ nop
+ b 1f
+ nop
+ nop
+ nop
+ 1:
+
+ to the new sequence if possible:
+ bne $2, $3, 1f
+ nop
+ j target
+ nop
+ nop
+ nop
+ 1:
+
+ OR to the new sequence:
+ bne $2, $3, 1f
+ nop
+ lui $25, target >> 16
+ ori $25, $25, target & 0xffff
+ jr $25
+ nop
+ 1:
+
+ Note: beq/bne/bc1t/bc1f are converted to bne/beq/bc1f/bc1t.
+ */
+
+ if (*(insn + 2) == 0x10000003) {
+ if ((*insn & 0xfc000000) == 0x10000000) // beq
+ *insn = (*insn & 0x03ff0000) | 0x14000005; // bne
+ else if ((*insn & 0xfc000000) == 0x14000000) // bne
+ *insn = (*insn & 0x03ff0000) | 0x10000005; // beq
+ else if ((*insn & 0xffff0000) == 0x45010000) // bc1t
+ *insn = 0x45000005; // bc1f
+ else if ((*insn & 0xffff0000) == 0x45000000) // bc1f
+ *insn = 0x45010005; // bc1t
+ else
+ ASSERT(0);
+ }
+
+ insn = insn + 2;
+ if ((reinterpret_cast<intptr_t>(insn) + 4) >> 28
+ == reinterpret_cast<intptr_t>(to) >> 28) {
+ *insn = 0x08000000 | ((reinterpret_cast<intptr_t>(to) >> 2) & 0x3ffffff);
+ *(insn + 1) = 0;
+ return 4 * sizeof(MIPSWord);
+ }
+
+ intptr_t newTargetAddress = reinterpret_cast<intptr_t>(to);
+ /* lui */
+ *insn = 0x3c000000 | (MIPSRegisters::t9 << OP_SH_RT) | ((newTargetAddress >> 16) & 0xffff);
+ /* ori */
+ *(insn + 1) = 0x34000000 | (MIPSRegisters::t9 << OP_SH_RT) | (MIPSRegisters::t9 << OP_SH_RS) | (newTargetAddress & 0xffff);
+ /* jr */
+ *(insn + 2) = 0x00000008 | (MIPSRegisters::t9 << OP_SH_RS);
+ return 5 * sizeof(MIPSWord);
+ }
+
+ *insn = (*insn & 0xffff0000) | (diff & 0xffff);
+ return sizeof(MIPSWord);
+ }
+
+ static int linkCallInternal(void* from, void* to)
+ {
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(from);
+ insn = insn - 4;
+
+ if ((*(insn + 2) & 0xfc000000) == 0x0c000000) { // jal
+ if ((reinterpret_cast<intptr_t>(from) - 4) >> 28
+ == reinterpret_cast<intptr_t>(to) >> 28) {
+ *(insn + 2) = 0x0c000000 | ((reinterpret_cast<intptr_t>(to) >> 2) & 0x3ffffff);
+ return sizeof(MIPSWord);
+ }
+
+ /* lui $25, (to >> 16) & 0xffff */
+ *insn = 0x3c000000 | (MIPSRegisters::t9 << OP_SH_RT) | ((reinterpret_cast<intptr_t>(to) >> 16) & 0xffff);
+ /* ori $25, $25, to & 0xffff */
+ *(insn + 1) = 0x34000000 | (MIPSRegisters::t9 << OP_SH_RT) | (MIPSRegisters::t9 << OP_SH_RS) | (reinterpret_cast<intptr_t>(to) & 0xffff);
+ /* jalr $25 */
+ *(insn + 2) = 0x0000f809 | (MIPSRegisters::t9 << OP_SH_RS);
+ return 3 * sizeof(MIPSWord);
+ }
+
+ ASSERT((*insn & 0xffe00000) == 0x3c000000); // lui
+ ASSERT((*(insn + 1) & 0xfc000000) == 0x34000000); // ori
+
+ /* lui */
+ *insn = (*insn & 0xffff0000) | ((reinterpret_cast<intptr_t>(to) >> 16) & 0xffff);
+ /* ori */
+ *(insn + 1) = (*(insn + 1) & 0xffff0000) | (reinterpret_cast<intptr_t>(to) & 0xffff);
+ return 2 * sizeof(MIPSWord);
+ }
+
+ AssemblerBuffer m_buffer;
+ Jumps m_jumps;
+ int m_indexOfLastWatchpoint;
+ int m_indexOfTailOfLastWatchpoint;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER) && CPU(MIPS)
+
+#endif // MIPSAssembler_h
diff --git a/Source/JavaScriptCore/assembler/MacroAssembler.cpp b/Source/JavaScriptCore/assembler/MacroAssembler.cpp
new file mode 100644
index 000000000..2cff056d2
--- /dev/null
+++ b/Source/JavaScriptCore/assembler/MacroAssembler.cpp
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "MacroAssembler.h"
+
+#if ENABLE(ASSEMBLER)
+
+namespace JSC {
+
+const double MacroAssembler::twoToThe32 = (double)0x100000000ull;
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
diff --git a/Source/JavaScriptCore/assembler/MacroAssembler.h b/Source/JavaScriptCore/assembler/MacroAssembler.h
new file mode 100644
index 000000000..fd4c5bbf5
--- /dev/null
+++ b/Source/JavaScriptCore/assembler/MacroAssembler.h
@@ -0,0 +1,1607 @@
+/*
+ * Copyright (C) 2008, 2012-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MacroAssembler_h
+#define MacroAssembler_h
+
+#if ENABLE(ASSEMBLER)
+
+#if CPU(ARM_THUMB2)
+#include "MacroAssemblerARMv7.h"
+namespace JSC { typedef MacroAssemblerARMv7 MacroAssemblerBase; };
+
+#elif CPU(ARM64)
+#include "MacroAssemblerARM64.h"
+namespace JSC { typedef MacroAssemblerARM64 MacroAssemblerBase; };
+
+#elif CPU(ARM_TRADITIONAL)
+#include "MacroAssemblerARM.h"
+namespace JSC { typedef MacroAssemblerARM MacroAssemblerBase; };
+
+#elif CPU(MIPS)
+#include "MacroAssemblerMIPS.h"
+namespace JSC {
+typedef MacroAssemblerMIPS MacroAssemblerBase;
+};
+
+#elif CPU(X86)
+#include "MacroAssemblerX86.h"
+namespace JSC { typedef MacroAssemblerX86 MacroAssemblerBase; };
+
+#elif CPU(X86_64)
+#include "MacroAssemblerX86_64.h"
+namespace JSC { typedef MacroAssemblerX86_64 MacroAssemblerBase; };
+
+#elif CPU(SH4)
+#include "MacroAssemblerSH4.h"
+namespace JSC {
+typedef MacroAssemblerSH4 MacroAssemblerBase;
+};
+
+#else
+#error "The MacroAssembler is not supported on this platform."
+#endif
+
+namespace JSC {
+
+class MacroAssembler : public MacroAssemblerBase {
+public:
+
+ static RegisterID nextRegister(RegisterID reg)
+ {
+ return static_cast<RegisterID>(reg + 1);
+ }
+
+ static FPRegisterID nextFPRegister(FPRegisterID reg)
+ {
+ return static_cast<FPRegisterID>(reg + 1);
+ }
+
+ static unsigned numberOfRegisters()
+ {
+ return lastRegister() - firstRegister() + 1;
+ }
+
+ static unsigned registerIndex(RegisterID reg)
+ {
+ return reg - firstRegister();
+ }
+
+ static unsigned numberOfFPRegisters()
+ {
+ return lastFPRegister() - firstFPRegister() + 1;
+ }
+
+ static unsigned fpRegisterIndex(FPRegisterID reg)
+ {
+ return reg - firstFPRegister();
+ }
+
+ static unsigned registerIndex(FPRegisterID reg)
+ {
+ return fpRegisterIndex(reg) + numberOfRegisters();
+ }
+
+ static unsigned totalNumberOfRegisters()
+ {
+ return numberOfRegisters() + numberOfFPRegisters();
+ }
+
+ using MacroAssemblerBase::pop;
+ using MacroAssemblerBase::jump;
+ using MacroAssemblerBase::branch32;
+ using MacroAssemblerBase::move;
+ using MacroAssemblerBase::add32;
+ using MacroAssemblerBase::and32;
+ using MacroAssemblerBase::branchAdd32;
+ using MacroAssemblerBase::branchMul32;
+#if CPU(ARM64) || CPU(ARM_THUMB2) || CPU(X86_64)
+ using MacroAssemblerBase::branchPtr;
+#endif
+ using MacroAssemblerBase::branchSub32;
+ using MacroAssemblerBase::lshift32;
+ using MacroAssemblerBase::or32;
+ using MacroAssemblerBase::rshift32;
+ using MacroAssemblerBase::store32;
+ using MacroAssemblerBase::sub32;
+ using MacroAssemblerBase::urshift32;
+ using MacroAssemblerBase::xor32;
+
+ static bool isPtrAlignedAddressOffset(ptrdiff_t value)
+ {
+ return value == static_cast<int32_t>(value);
+ }
+
+ static const double twoToThe32; // This is super useful for some double code.
+
+ // Utilities used by the DFG JIT.
+#if ENABLE(DFG_JIT)
+ using MacroAssemblerBase::invert;
+
+ static DoubleCondition invert(DoubleCondition cond)
+ {
+ switch (cond) {
+ case DoubleEqual:
+ return DoubleNotEqualOrUnordered;
+ case DoubleNotEqual:
+ return DoubleEqualOrUnordered;
+ case DoubleGreaterThan:
+ return DoubleLessThanOrEqualOrUnordered;
+ case DoubleGreaterThanOrEqual:
+ return DoubleLessThanOrUnordered;
+ case DoubleLessThan:
+ return DoubleGreaterThanOrEqualOrUnordered;
+ case DoubleLessThanOrEqual:
+ return DoubleGreaterThanOrUnordered;
+ case DoubleEqualOrUnordered:
+ return DoubleNotEqual;
+ case DoubleNotEqualOrUnordered:
+ return DoubleEqual;
+ case DoubleGreaterThanOrUnordered:
+ return DoubleLessThanOrEqual;
+ case DoubleGreaterThanOrEqualOrUnordered:
+ return DoubleLessThan;
+ case DoubleLessThanOrUnordered:
+ return DoubleGreaterThanOrEqual;
+ case DoubleLessThanOrEqualOrUnordered:
+ return DoubleGreaterThan;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return DoubleEqual; // make compiler happy
+ }
+ }
+
+ static bool isInvertible(ResultCondition cond)
+ {
+ switch (cond) {
+ case Zero:
+ case NonZero:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ static ResultCondition invert(ResultCondition cond)
+ {
+ switch (cond) {
+ case Zero:
+ return NonZero;
+ case NonZero:
+ return Zero;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return Zero; // Make compiler happy for release builds.
+ }
+ }
+#endif
+
+ // Platform agnostic onvenience functions,
+ // described in terms of other macro assembly methods.
+ void pop()
+ {
+ addPtr(TrustedImm32(sizeof(void*)), stackPointerRegister);
+ }
+
+ void peek(RegisterID dest, int index = 0)
+ {
+ loadPtr(Address(stackPointerRegister, (index * sizeof(void*))), dest);
+ }
+
+ Address addressForPoke(int index)
+ {
+ return Address(stackPointerRegister, (index * sizeof(void*)));
+ }
+
+ void poke(RegisterID src, int index = 0)
+ {
+ storePtr(src, addressForPoke(index));
+ }
+
+ void poke(TrustedImm32 value, int index = 0)
+ {
+ store32(value, addressForPoke(index));
+ }
+
+ void poke(TrustedImmPtr imm, int index = 0)
+ {
+ storePtr(imm, addressForPoke(index));
+ }
+
+#if !CPU(ARM64)
+ void pushToSave(RegisterID src)
+ {
+ push(src);
+ }
+ void pushToSaveImmediateWithoutTouchingRegisters(TrustedImm32 imm)
+ {
+ push(imm);
+ }
+ void popToRestore(RegisterID dest)
+ {
+ pop(dest);
+ }
+ void pushToSave(FPRegisterID src)
+ {
+ subPtr(TrustedImm32(sizeof(double)), stackPointerRegister);
+ storeDouble(src, stackPointerRegister);
+ }
+ void popToRestore(FPRegisterID dest)
+ {
+ loadDouble(stackPointerRegister, dest);
+ addPtr(TrustedImm32(sizeof(double)), stackPointerRegister);
+ }
+
+ static ptrdiff_t pushToSaveByteOffset() { return sizeof(void*); }
+#endif // !CPU(ARM64)
+
+#if CPU(X86_64) || CPU(ARM64)
+ void peek64(RegisterID dest, int index = 0)
+ {
+ load64(Address(stackPointerRegister, (index * sizeof(void*))), dest);
+ }
+
+ void poke(TrustedImm64 value, int index = 0)
+ {
+ store64(value, addressForPoke(index));
+ }
+
+ void poke64(RegisterID src, int index = 0)
+ {
+ store64(src, addressForPoke(index));
+ }
+#endif
+
+#if CPU(MIPS)
+ void poke(FPRegisterID src, int index = 0)
+ {
+ ASSERT(!(index & 1));
+ storeDouble(src, addressForPoke(index));
+ }
+#endif
+
+ // Immediate shifts only have 5 controllable bits
+ // so we'll consider them safe for now.
+ TrustedImm32 trustedImm32ForShift(Imm32 imm)
+ {
+ return TrustedImm32(imm.asTrustedImm32().m_value & 31);
+ }
+
+ // Backwards banches, these are currently all implemented using existing forwards branch mechanisms.
+ void branchPtr(RelationalCondition cond, RegisterID op1, TrustedImmPtr imm, Label target)
+ {
+ branchPtr(cond, op1, imm).linkTo(target, this);
+ }
+ void branchPtr(RelationalCondition cond, RegisterID op1, ImmPtr imm, Label target)
+ {
+ branchPtr(cond, op1, imm).linkTo(target, this);
+ }
+
+ void branch32(RelationalCondition cond, RegisterID op1, RegisterID op2, Label target)
+ {
+ branch32(cond, op1, op2).linkTo(target, this);
+ }
+
+ void branch32(RelationalCondition cond, RegisterID op1, TrustedImm32 imm, Label target)
+ {
+ branch32(cond, op1, imm).linkTo(target, this);
+ }
+
+ void branch32(RelationalCondition cond, RegisterID op1, Imm32 imm, Label target)
+ {
+ branch32(cond, op1, imm).linkTo(target, this);
+ }
+
+ void branch32(RelationalCondition cond, RegisterID left, Address right, Label target)
+ {
+ branch32(cond, left, right).linkTo(target, this);
+ }
+
+ Jump branch32(RelationalCondition cond, TrustedImm32 left, RegisterID right)
+ {
+ return branch32(commute(cond), right, left);
+ }
+
+ Jump branch32(RelationalCondition cond, Imm32 left, RegisterID right)
+ {
+ return branch32(commute(cond), right, left);
+ }
+
+ void branchTestPtr(ResultCondition cond, RegisterID reg, Label target)
+ {
+ branchTestPtr(cond, reg).linkTo(target, this);
+ }
+
+#if !CPU(ARM_THUMB2) && !CPU(ARM64)
+ PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right = TrustedImmPtr(0))
+ {
+ return PatchableJump(branchPtr(cond, left, right));
+ }
+
+ PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+ {
+ return PatchableJump(branchPtrWithPatch(cond, left, dataLabel, initialRightValue));
+ }
+
+ PatchableJump patchableBranch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
+ {
+ return PatchableJump(branch32WithPatch(cond, left, dataLabel, initialRightValue));
+ }
+
+#if !CPU(ARM_TRADITIONAL)
+ PatchableJump patchableJump()
+ {
+ return PatchableJump(jump());
+ }
+
+ PatchableJump patchableBranchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ return PatchableJump(branchTest32(cond, reg, mask));
+ }
+
+ PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm)
+ {
+ return PatchableJump(branch32(cond, reg, imm));
+ }
+
+ PatchableJump patchableBranch32(RelationalCondition cond, Address address, TrustedImm32 imm)
+ {
+ return PatchableJump(branch32(cond, address, imm));
+ }
+#endif
+#endif
+
+ void jump(Label target)
+ {
+ jump().linkTo(target, this);
+ }
+
+ // Commute a relational condition, returns a new condition that will produce
+ // the same results given the same inputs but with their positions exchanged.
+ static RelationalCondition commute(RelationalCondition condition)
+ {
+ switch (condition) {
+ case Above:
+ return Below;
+ case AboveOrEqual:
+ return BelowOrEqual;
+ case Below:
+ return Above;
+ case BelowOrEqual:
+ return AboveOrEqual;
+ case GreaterThan:
+ return LessThan;
+ case GreaterThanOrEqual:
+ return LessThanOrEqual;
+ case LessThan:
+ return GreaterThan;
+ case LessThanOrEqual:
+ return GreaterThanOrEqual;
+ default:
+ break;
+ }
+
+ ASSERT(condition == Equal || condition == NotEqual);
+ return condition;
+ }
+
+ static const unsigned BlindingModulus = 64;
+ bool shouldConsiderBlinding()
+ {
+ return !(random() & (BlindingModulus - 1));
+ }
+
+ // Ptr methods
+ // On 32-bit platforms (i.e. x86), these methods directly map onto their 32-bit equivalents.
+ // FIXME: should this use a test for 32-bitness instead of this specific exception?
+#if !CPU(X86_64) && !CPU(ARM64)
+ void addPtr(Address src, RegisterID dest)
+ {
+ add32(src, dest);
+ }
+
+ void addPtr(AbsoluteAddress src, RegisterID dest)
+ {
+ add32(src, dest);
+ }
+
+ void addPtr(RegisterID src, RegisterID dest)
+ {
+ add32(src, dest);
+ }
+
+ void addPtr(TrustedImm32 imm, RegisterID srcDest)
+ {
+ add32(imm, srcDest);
+ }
+
+ void addPtr(TrustedImmPtr imm, RegisterID dest)
+ {
+ add32(TrustedImm32(imm), dest);
+ }
+
+ void addPtr(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ add32(imm, src, dest);
+ }
+
+ void addPtr(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ add32(imm, address);
+ }
+
+ void andPtr(RegisterID src, RegisterID dest)
+ {
+ and32(src, dest);
+ }
+
+ void andPtr(TrustedImm32 imm, RegisterID srcDest)
+ {
+ and32(imm, srcDest);
+ }
+
+ void andPtr(TrustedImmPtr imm, RegisterID srcDest)
+ {
+ and32(TrustedImm32(imm), srcDest);
+ }
+
+ void lshiftPtr(Imm32 imm, RegisterID srcDest)
+ {
+ lshift32(trustedImm32ForShift(imm), srcDest);
+ }
+
+ void rshiftPtr(Imm32 imm, RegisterID srcDest)
+ {
+ rshift32(trustedImm32ForShift(imm), srcDest);
+ }
+
+ void urshiftPtr(Imm32 imm, RegisterID srcDest)
+ {
+ urshift32(trustedImm32ForShift(imm), srcDest);
+ }
+
+ void negPtr(RegisterID dest)
+ {
+ neg32(dest);
+ }
+
+ void orPtr(RegisterID src, RegisterID dest)
+ {
+ or32(src, dest);
+ }
+
+ void orPtr(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ or32(op1, op2, dest);
+ }
+
+ void orPtr(TrustedImmPtr imm, RegisterID dest)
+ {
+ or32(TrustedImm32(imm), dest);
+ }
+
+ void orPtr(TrustedImm32 imm, RegisterID dest)
+ {
+ or32(imm, dest);
+ }
+
+ void subPtr(RegisterID src, RegisterID dest)
+ {
+ sub32(src, dest);
+ }
+
+ void subPtr(TrustedImm32 imm, RegisterID dest)
+ {
+ sub32(imm, dest);
+ }
+
+ void subPtr(TrustedImmPtr imm, RegisterID dest)
+ {
+ sub32(TrustedImm32(imm), dest);
+ }
+
+ void xorPtr(RegisterID src, RegisterID dest)
+ {
+ xor32(src, dest);
+ }
+
+ void xorPtr(TrustedImm32 imm, RegisterID srcDest)
+ {
+ xor32(imm, srcDest);
+ }
+
+
+ void loadPtr(ImplicitAddress address, RegisterID dest)
+ {
+ load32(address, dest);
+ }
+
+ void loadPtr(BaseIndex address, RegisterID dest)
+ {
+ load32(address, dest);
+ }
+
+ void loadPtr(const void* address, RegisterID dest)
+ {
+ load32(address, dest);
+ }
+
+ DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ return load32WithAddressOffsetPatch(address, dest);
+ }
+
+ DataLabelCompact loadPtrWithCompactAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ return load32WithCompactAddressOffsetPatch(address, dest);
+ }
+
+ void move(ImmPtr imm, RegisterID dest)
+ {
+ move(Imm32(imm.asTrustedImmPtr()), dest);
+ }
+
+ void comparePtr(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
+ {
+ compare32(cond, left, right, dest);
+ }
+
+ void comparePtr(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
+ {
+ compare32(cond, left, right, dest);
+ }
+
+ void storePtr(RegisterID src, ImplicitAddress address)
+ {
+ store32(src, address);
+ }
+
+ void storePtr(RegisterID src, BaseIndex address)
+ {
+ store32(src, address);
+ }
+
+ void storePtr(RegisterID src, void* address)
+ {
+ store32(src, address);
+ }
+
+ void storePtr(TrustedImmPtr imm, ImplicitAddress address)
+ {
+ store32(TrustedImm32(imm), address);
+ }
+
+ void storePtr(ImmPtr imm, Address address)
+ {
+ store32(Imm32(imm.asTrustedImmPtr()), address);
+ }
+
+ void storePtr(TrustedImmPtr imm, void* address)
+ {
+ store32(TrustedImm32(imm), address);
+ }
+
+ void storePtr(TrustedImm32 imm, ImplicitAddress address)
+ {
+ store32(imm, address);
+ }
+
+ void storePtr(TrustedImmPtr imm, BaseIndex address)
+ {
+ store32(TrustedImm32(imm), address);
+ }
+
+ DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
+ {
+ return store32WithAddressOffsetPatch(src, address);
+ }
+
+ Jump branchPtr(RelationalCondition cond, RegisterID left, RegisterID right)
+ {
+ return branch32(cond, left, right);
+ }
+
+ Jump branchPtr(RelationalCondition cond, RegisterID left, TrustedImmPtr right)
+ {
+ return branch32(cond, left, TrustedImm32(right));
+ }
+
+ Jump branchPtr(RelationalCondition cond, RegisterID left, ImmPtr right)
+ {
+ return branch32(cond, left, Imm32(right.asTrustedImmPtr()));
+ }
+
+ Jump branchPtr(RelationalCondition cond, RegisterID left, Address right)
+ {
+ return branch32(cond, left, right);
+ }
+
+ Jump branchPtr(RelationalCondition cond, Address left, RegisterID right)
+ {
+ return branch32(cond, left, right);
+ }
+
+ Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
+ {
+ return branch32(cond, left, right);
+ }
+
+ Jump branchPtr(RelationalCondition cond, Address left, TrustedImmPtr right)
+ {
+ return branch32(cond, left, TrustedImm32(right));
+ }
+
+ Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, TrustedImmPtr right)
+ {
+ return branch32(cond, left, TrustedImm32(right));
+ }
+
+ Jump branchSubPtr(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ return branchSub32(cond, src, dest);
+ }
+
+ Jump branchTestPtr(ResultCondition cond, RegisterID reg, RegisterID mask)
+ {
+ return branchTest32(cond, reg, mask);
+ }
+
+ Jump branchTestPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ return branchTest32(cond, reg, mask);
+ }
+
+ Jump branchTestPtr(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ return branchTest32(cond, address, mask);
+ }
+
+ Jump branchTestPtr(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ return branchTest32(cond, address, mask);
+ }
+
+ Jump branchAddPtr(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ return branchAdd32(cond, src, dest);
+ }
+
+ Jump branchSubPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ return branchSub32(cond, imm, dest);
+ }
+ using MacroAssemblerBase::branchTest8;
+ Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ return MacroAssemblerBase::branchTest8(cond, Address(address.base, address.offset), mask);
+ }
+
+#else // !CPU(X86_64)
+
+ void addPtr(RegisterID src, RegisterID dest)
+ {
+ add64(src, dest);
+ }
+
+ void addPtr(Address src, RegisterID dest)
+ {
+ add64(src, dest);
+ }
+
+ void addPtr(TrustedImm32 imm, RegisterID srcDest)
+ {
+ add64(imm, srcDest);
+ }
+
+ void addPtr(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ add64(imm, src, dest);
+ }
+
+ void addPtr(TrustedImm32 imm, Address address)
+ {
+ add64(imm, address);
+ }
+
+ void addPtr(AbsoluteAddress src, RegisterID dest)
+ {
+ add64(src, dest);
+ }
+
+ void addPtr(TrustedImmPtr imm, RegisterID dest)
+ {
+ add64(TrustedImm64(imm), dest);
+ }
+
+ void addPtr(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ add64(imm, address);
+ }
+
+ void andPtr(RegisterID src, RegisterID dest)
+ {
+ and64(src, dest);
+ }
+
+ void andPtr(TrustedImm32 imm, RegisterID srcDest)
+ {
+ and64(imm, srcDest);
+ }
+
+ void andPtr(TrustedImmPtr imm, RegisterID srcDest)
+ {
+ and64(imm, srcDest);
+ }
+
+ void lshiftPtr(Imm32 imm, RegisterID srcDest)
+ {
+ lshift64(trustedImm32ForShift(imm), srcDest);
+ }
+
+ void rshiftPtr(Imm32 imm, RegisterID srcDest)
+ {
+ rshift64(trustedImm32ForShift(imm), srcDest);
+ }
+
+ void urshiftPtr(Imm32 imm, RegisterID srcDest)
+ {
+ urshift64(trustedImm32ForShift(imm), srcDest);
+ }
+
+ void negPtr(RegisterID dest)
+ {
+ neg64(dest);
+ }
+
+ void orPtr(RegisterID src, RegisterID dest)
+ {
+ or64(src, dest);
+ }
+
+ void orPtr(TrustedImm32 imm, RegisterID dest)
+ {
+ or64(imm, dest);
+ }
+
+ void orPtr(TrustedImmPtr imm, RegisterID dest)
+ {
+ or64(TrustedImm64(imm), dest);
+ }
+
+ void orPtr(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ or64(op1, op2, dest);
+ }
+
+ void orPtr(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ or64(imm, src, dest);
+ }
+
+ void rotateRightPtr(TrustedImm32 imm, RegisterID srcDst)
+ {
+ rotateRight64(imm, srcDst);
+ }
+
+ void subPtr(RegisterID src, RegisterID dest)
+ {
+ sub64(src, dest);
+ }
+
+ void subPtr(TrustedImm32 imm, RegisterID dest)
+ {
+ sub64(imm, dest);
+ }
+
+ void subPtr(TrustedImmPtr imm, RegisterID dest)
+ {
+ sub64(TrustedImm64(imm), dest);
+ }
+
+ void xorPtr(RegisterID src, RegisterID dest)
+ {
+ xor64(src, dest);
+ }
+
+ void xorPtr(RegisterID src, Address dest)
+ {
+ xor64(src, dest);
+ }
+
+ void xorPtr(TrustedImm32 imm, RegisterID srcDest)
+ {
+ xor64(imm, srcDest);
+ }
+
+ void loadPtr(ImplicitAddress address, RegisterID dest)
+ {
+ load64(address, dest);
+ }
+
+ void loadPtr(BaseIndex address, RegisterID dest)
+ {
+ load64(address, dest);
+ }
+
+ void loadPtr(const void* address, RegisterID dest)
+ {
+ load64(address, dest);
+ }
+
+ DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ return load64WithAddressOffsetPatch(address, dest);
+ }
+
+ DataLabelCompact loadPtrWithCompactAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ return load64WithCompactAddressOffsetPatch(address, dest);
+ }
+
+ void storePtr(RegisterID src, ImplicitAddress address)
+ {
+ store64(src, address);
+ }
+
+ void storePtr(RegisterID src, BaseIndex address)
+ {
+ store64(src, address);
+ }
+
+ void storePtr(RegisterID src, void* address)
+ {
+ store64(src, address);
+ }
+
+ void storePtr(TrustedImmPtr imm, ImplicitAddress address)
+ {
+ store64(TrustedImm64(imm), address);
+ }
+
+ void storePtr(TrustedImmPtr imm, BaseIndex address)
+ {
+ store64(TrustedImm64(imm), address);
+ }
+
+ DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
+ {
+ return store64WithAddressOffsetPatch(src, address);
+ }
+
+ void comparePtr(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
+ {
+ compare64(cond, left, right, dest);
+ }
+
+ void comparePtr(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
+ {
+ compare64(cond, left, right, dest);
+ }
+
+ void testPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest)
+ {
+ test64(cond, reg, mask, dest);
+ }
+
+ void testPtr(ResultCondition cond, RegisterID reg, RegisterID mask, RegisterID dest)
+ {
+ test64(cond, reg, mask, dest);
+ }
+
+ Jump branchPtr(RelationalCondition cond, RegisterID left, RegisterID right)
+ {
+ return branch64(cond, left, right);
+ }
+
+ Jump branchPtr(RelationalCondition cond, RegisterID left, TrustedImmPtr right)
+ {
+ return branch64(cond, left, TrustedImm64(right));
+ }
+
+ Jump branchPtr(RelationalCondition cond, RegisterID left, Address right)
+ {
+ return branch64(cond, left, right);
+ }
+
+ Jump branchPtr(RelationalCondition cond, Address left, RegisterID right)
+ {
+ return branch64(cond, left, right);
+ }
+
+ Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
+ {
+ return branch64(cond, left, right);
+ }
+
+ Jump branchPtr(RelationalCondition cond, Address left, TrustedImmPtr right)
+ {
+ return branch64(cond, left, TrustedImm64(right));
+ }
+
+ Jump branchTestPtr(ResultCondition cond, RegisterID reg, RegisterID mask)
+ {
+ return branchTest64(cond, reg, mask);
+ }
+
+ Jump branchTestPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ return branchTest64(cond, reg, mask);
+ }
+
+ Jump branchTestPtr(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ return branchTest64(cond, address, mask);
+ }
+
+ Jump branchTestPtr(ResultCondition cond, Address address, RegisterID reg)
+ {
+ return branchTest64(cond, address, reg);
+ }
+
+ Jump branchTestPtr(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ return branchTest64(cond, address, mask);
+ }
+
+ Jump branchTestPtr(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ return branchTest64(cond, address, mask);
+ }
+
+ Jump branchAddPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ return branchAdd64(cond, imm, dest);
+ }
+
+ Jump branchAddPtr(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ return branchAdd64(cond, src, dest);
+ }
+
+ Jump branchSubPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ return branchSub64(cond, imm, dest);
+ }
+
+ Jump branchSubPtr(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ return branchSub64(cond, src, dest);
+ }
+
+ Jump branchSubPtr(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest)
+ {
+ return branchSub64(cond, src1, src2, dest);
+ }
+
+ using MacroAssemblerBase::and64;
+ using MacroAssemblerBase::convertInt32ToDouble;
+ using MacroAssemblerBase::store64;
+ bool shouldBlindDouble(double value)
+ {
+ // Don't trust NaN or +/-Infinity
+ if (!std::isfinite(value))
+ return shouldConsiderBlinding();
+
+ // Try to force normalisation, and check that there's no change
+ // in the bit pattern
+ if (bitwise_cast<uint64_t>(value * 1.0) != bitwise_cast<uint64_t>(value))
+ return shouldConsiderBlinding();
+
+ value = fabs(value);
+ // Only allow a limited set of fractional components
+ double scaledValue = value * 8;
+ if (scaledValue / 8 != value)
+ return shouldConsiderBlinding();
+ double frac = scaledValue - floor(scaledValue);
+ if (frac != 0.0)
+ return shouldConsiderBlinding();
+
+ return value > 0xff;
+ }
+
+ bool shouldBlindPointerForSpecificArch(uintptr_t value)
+ {
+ if (sizeof(void*) == 4)
+ return shouldBlindForSpecificArch(static_cast<uint32_t>(value));
+ return shouldBlindForSpecificArch(static_cast<uint64_t>(value));
+ }
+
+ bool shouldBlind(ImmPtr imm)
+ {
+ if (!canBlind())
+ return false;
+
+#if ENABLE(FORCED_JIT_BLINDING)
+ UNUSED_PARAM(imm);
+ // Debug always blind all constants, if only so we know
+ // if we've broken blinding during patch development.
+ return true;
+#endif
+
+ // First off we'll special case common, "safe" values to avoid hurting
+ // performance too much
+ uintptr_t value = imm.asTrustedImmPtr().asIntptr();
+ switch (value) {
+ case 0xffff:
+ case 0xffffff:
+ case 0xffffffffL:
+ case 0xffffffffffL:
+ case 0xffffffffffffL:
+ case 0xffffffffffffffL:
+ case 0xffffffffffffffffL:
+ return false;
+ default: {
+ if (value <= 0xff)
+ return false;
+ if (~value <= 0xff)
+ return false;
+ }
+ }
+
+ if (!shouldConsiderBlinding())
+ return false;
+
+ return shouldBlindPointerForSpecificArch(value);
+ }
+
+ struct RotatedImmPtr {
+ RotatedImmPtr(uintptr_t v1, uint8_t v2)
+ : value(v1)
+ , rotation(v2)
+ {
+ }
+ TrustedImmPtr value;
+ TrustedImm32 rotation;
+ };
+
+ RotatedImmPtr rotationBlindConstant(ImmPtr imm)
+ {
+ uint8_t rotation = random() % (sizeof(void*) * 8);
+ uintptr_t value = imm.asTrustedImmPtr().asIntptr();
+ value = (value << rotation) | (value >> (sizeof(void*) * 8 - rotation));
+ return RotatedImmPtr(value, rotation);
+ }
+
+ void loadRotationBlindedConstant(RotatedImmPtr constant, RegisterID dest)
+ {
+ move(constant.value, dest);
+ rotateRightPtr(constant.rotation, dest);
+ }
+
+ bool shouldBlind(Imm64 imm)
+ {
+#if ENABLE(FORCED_JIT_BLINDING)
+ UNUSED_PARAM(imm);
+ // Debug always blind all constants, if only so we know
+ // if we've broken blinding during patch development.
+ return true;
+#endif
+
+ // First off we'll special case common, "safe" values to avoid hurting
+ // performance too much
+ uint64_t value = imm.asTrustedImm64().m_value;
+ switch (value) {
+ case 0xffff:
+ case 0xffffff:
+ case 0xffffffffL:
+ case 0xffffffffffL:
+ case 0xffffffffffffL:
+ case 0xffffffffffffffL:
+ case 0xffffffffffffffffL:
+ return false;
+ default: {
+ if (value <= 0xff)
+ return false;
+ if (~value <= 0xff)
+ return false;
+
+ JSValue jsValue = JSValue::decode(value);
+ if (jsValue.isInt32())
+ return shouldBlind(Imm32(jsValue.asInt32()));
+ if (jsValue.isDouble() && !shouldBlindDouble(jsValue.asDouble()))
+ return false;
+
+ if (!shouldBlindDouble(bitwise_cast<double>(value)))
+ return false;
+ }
+ }
+
+ if (!shouldConsiderBlinding())
+ return false;
+
+ return shouldBlindForSpecificArch(value);
+ }
+
+ struct RotatedImm64 {
+ RotatedImm64(uint64_t v1, uint8_t v2)
+ : value(v1)
+ , rotation(v2)
+ {
+ }
+ TrustedImm64 value;
+ TrustedImm32 rotation;
+ };
+
+ RotatedImm64 rotationBlindConstant(Imm64 imm)
+ {
+ uint8_t rotation = random() % (sizeof(int64_t) * 8);
+ uint64_t value = imm.asTrustedImm64().m_value;
+ value = (value << rotation) | (value >> (sizeof(int64_t) * 8 - rotation));
+ return RotatedImm64(value, rotation);
+ }
+
+ void loadRotationBlindedConstant(RotatedImm64 constant, RegisterID dest)
+ {
+ move(constant.value, dest);
+ rotateRight64(constant.rotation, dest);
+ }
+
+ void convertInt32ToDouble(Imm32 imm, FPRegisterID dest)
+ {
+ if (shouldBlind(imm) && haveScratchRegisterForBlinding()) {
+ RegisterID scratchRegister = scratchRegisterForBlinding();
+ loadXorBlindedConstant(xorBlindConstant(imm), scratchRegister);
+ convertInt32ToDouble(scratchRegister, dest);
+ } else
+ convertInt32ToDouble(imm.asTrustedImm32(), dest);
+ }
+
+ void move(ImmPtr imm, RegisterID dest)
+ {
+ if (shouldBlind(imm))
+ loadRotationBlindedConstant(rotationBlindConstant(imm), dest);
+ else
+ move(imm.asTrustedImmPtr(), dest);
+ }
+
+ void move(Imm64 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm))
+ loadRotationBlindedConstant(rotationBlindConstant(imm), dest);
+ else
+ move(imm.asTrustedImm64(), dest);
+ }
+
+ void and64(Imm32 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ BlindedImm32 key = andBlindedConstant(imm);
+ and64(key.value1, dest);
+ and64(key.value2, dest);
+ } else
+ and64(imm.asTrustedImm32(), dest);
+ }
+
+ Jump branchPtr(RelationalCondition cond, RegisterID left, ImmPtr right)
+ {
+ if (shouldBlind(right) && haveScratchRegisterForBlinding()) {
+ RegisterID scratchRegister = scratchRegisterForBlinding();
+ loadRotationBlindedConstant(rotationBlindConstant(right), scratchRegister);
+ return branchPtr(cond, left, scratchRegister);
+ }
+ return branchPtr(cond, left, right.asTrustedImmPtr());
+ }
+
+ void storePtr(ImmPtr imm, Address dest)
+ {
+ if (shouldBlind(imm) && haveScratchRegisterForBlinding()) {
+ RegisterID scratchRegister = scratchRegisterForBlinding();
+ loadRotationBlindedConstant(rotationBlindConstant(imm), scratchRegister);
+ storePtr(scratchRegister, dest);
+ } else
+ storePtr(imm.asTrustedImmPtr(), dest);
+ }
+
+ void store64(Imm64 imm, Address dest)
+ {
+ if (shouldBlind(imm) && haveScratchRegisterForBlinding()) {
+ RegisterID scratchRegister = scratchRegisterForBlinding();
+ loadRotationBlindedConstant(rotationBlindConstant(imm), scratchRegister);
+ store64(scratchRegister, dest);
+ } else
+ store64(imm.asTrustedImm64(), dest);
+ }
+
+#endif // !CPU(X86_64)
+
+ bool shouldBlind(Imm32 imm)
+ {
+#if ENABLE(FORCED_JIT_BLINDING)
+ UNUSED_PARAM(imm);
+ // Debug always blind all constants, if only so we know
+ // if we've broken blinding during patch development.
+ return true;
+#else // ENABLE(FORCED_JIT_BLINDING)
+
+ // First off we'll special case common, "safe" values to avoid hurting
+ // performance too much
+ uint32_t value = imm.asTrustedImm32().m_value;
+ switch (value) {
+ case 0xffff:
+ case 0xffffff:
+ case 0xffffffff:
+ return false;
+ default:
+ if (value <= 0xff)
+ return false;
+ if (~value <= 0xff)
+ return false;
+ }
+
+ if (!shouldConsiderBlinding())
+ return false;
+
+ return shouldBlindForSpecificArch(value);
+#endif // ENABLE(FORCED_JIT_BLINDING)
+ }
+
+ struct BlindedImm32 {
+ BlindedImm32(int32_t v1, int32_t v2)
+ : value1(v1)
+ , value2(v2)
+ {
+ }
+ TrustedImm32 value1;
+ TrustedImm32 value2;
+ };
+
+ uint32_t keyForConstant(uint32_t value, uint32_t& mask)
+ {
+ uint32_t key = random();
+ if (value <= 0xff)
+ mask = 0xff;
+ else if (value <= 0xffff)
+ mask = 0xffff;
+ else if (value <= 0xffffff)
+ mask = 0xffffff;
+ else
+ mask = 0xffffffff;
+ return key & mask;
+ }
+
+ uint32_t keyForConstant(uint32_t value)
+ {
+ uint32_t mask = 0;
+ return keyForConstant(value, mask);
+ }
+
+ BlindedImm32 xorBlindConstant(Imm32 imm)
+ {
+ uint32_t baseValue = imm.asTrustedImm32().m_value;
+ uint32_t key = keyForConstant(baseValue);
+ return BlindedImm32(baseValue ^ key, key);
+ }
+
+ BlindedImm32 additionBlindedConstant(Imm32 imm)
+ {
+ // The addition immediate may be used as a pointer offset. Keep aligned based on "imm".
+ static uint32_t maskTable[4] = { 0xfffffffc, 0xffffffff, 0xfffffffe, 0xffffffff };
+
+ uint32_t baseValue = imm.asTrustedImm32().m_value;
+ uint32_t key = keyForConstant(baseValue) & maskTable[baseValue & 3];
+ if (key > baseValue)
+ key = key - baseValue;
+ return BlindedImm32(baseValue - key, key);
+ }
+
+ BlindedImm32 andBlindedConstant(Imm32 imm)
+ {
+ uint32_t baseValue = imm.asTrustedImm32().m_value;
+ uint32_t mask = 0;
+ uint32_t key = keyForConstant(baseValue, mask);
+ ASSERT((baseValue & mask) == baseValue);
+ return BlindedImm32(((baseValue & key) | ~key) & mask, ((baseValue & ~key) | key) & mask);
+ }
+
+ BlindedImm32 orBlindedConstant(Imm32 imm)
+ {
+ uint32_t baseValue = imm.asTrustedImm32().m_value;
+ uint32_t mask = 0;
+ uint32_t key = keyForConstant(baseValue, mask);
+ ASSERT((baseValue & mask) == baseValue);
+ return BlindedImm32((baseValue & key) & mask, (baseValue & ~key) & mask);
+ }
+
+ void loadXorBlindedConstant(BlindedImm32 constant, RegisterID dest)
+ {
+ move(constant.value1, dest);
+ xor32(constant.value2, dest);
+ }
+
+ void add32(Imm32 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ BlindedImm32 key = additionBlindedConstant(imm);
+ add32(key.value1, dest);
+ add32(key.value2, dest);
+ } else
+ add32(imm.asTrustedImm32(), dest);
+ }
+
+ void addPtr(Imm32 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ BlindedImm32 key = additionBlindedConstant(imm);
+ addPtr(key.value1, dest);
+ addPtr(key.value2, dest);
+ } else
+ addPtr(imm.asTrustedImm32(), dest);
+ }
+
+ void and32(Imm32 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ BlindedImm32 key = andBlindedConstant(imm);
+ and32(key.value1, dest);
+ and32(key.value2, dest);
+ } else
+ and32(imm.asTrustedImm32(), dest);
+ }
+
+ void andPtr(Imm32 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ BlindedImm32 key = andBlindedConstant(imm);
+ andPtr(key.value1, dest);
+ andPtr(key.value2, dest);
+ } else
+ andPtr(imm.asTrustedImm32(), dest);
+ }
+
+ void and32(Imm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ if (src == dest)
+ return and32(imm.asTrustedImm32(), dest);
+ loadXorBlindedConstant(xorBlindConstant(imm), dest);
+ and32(src, dest);
+ } else
+ and32(imm.asTrustedImm32(), src, dest);
+ }
+
+ void move(Imm32 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm))
+ loadXorBlindedConstant(xorBlindConstant(imm), dest);
+ else
+ move(imm.asTrustedImm32(), dest);
+ }
+
+ void or32(Imm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ if (src == dest)
+ return or32(imm, dest);
+ loadXorBlindedConstant(xorBlindConstant(imm), dest);
+ or32(src, dest);
+ } else
+ or32(imm.asTrustedImm32(), src, dest);
+ }
+
+ void or32(Imm32 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ BlindedImm32 key = orBlindedConstant(imm);
+ or32(key.value1, dest);
+ or32(key.value2, dest);
+ } else
+ or32(imm.asTrustedImm32(), dest);
+ }
+
+ void poke(Imm32 value, int index = 0)
+ {
+ store32(value, addressForPoke(index));
+ }
+
+ void poke(ImmPtr value, int index = 0)
+ {
+ storePtr(value, addressForPoke(index));
+ }
+
+#if CPU(X86_64) || CPU(ARM64)
+ void poke(Imm64 value, int index = 0)
+ {
+ store64(value, addressForPoke(index));
+ }
+#endif // CPU(X86_64)
+
+ void store32(Imm32 imm, Address dest)
+ {
+ if (shouldBlind(imm)) {
+#if CPU(X86) || CPU(X86_64)
+ BlindedImm32 blind = xorBlindConstant(imm);
+ store32(blind.value1, dest);
+ xor32(blind.value2, dest);
+#else // CPU(X86) || CPU(X86_64)
+ if (haveScratchRegisterForBlinding()) {
+ loadXorBlindedConstant(xorBlindConstant(imm), scratchRegisterForBlinding());
+ store32(scratchRegisterForBlinding(), dest);
+ } else {
+ // If we don't have a scratch register available for use, we'll just
+ // place a random number of nops.
+ uint32_t nopCount = random() & 3;
+ while (nopCount--)
+ nop();
+ store32(imm.asTrustedImm32(), dest);
+ }
+#endif // CPU(X86) || CPU(X86_64)
+ } else
+ store32(imm.asTrustedImm32(), dest);
+ }
+
+ void sub32(Imm32 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ BlindedImm32 key = additionBlindedConstant(imm);
+ sub32(key.value1, dest);
+ sub32(key.value2, dest);
+ } else
+ sub32(imm.asTrustedImm32(), dest);
+ }
+
+ void subPtr(Imm32 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ BlindedImm32 key = additionBlindedConstant(imm);
+ subPtr(key.value1, dest);
+ subPtr(key.value2, dest);
+ } else
+ subPtr(imm.asTrustedImm32(), dest);
+ }
+
+ void xor32(Imm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ BlindedImm32 blind = xorBlindConstant(imm);
+ xor32(blind.value1, src, dest);
+ xor32(blind.value2, dest);
+ } else
+ xor32(imm.asTrustedImm32(), src, dest);
+ }
+
+ void xor32(Imm32 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ BlindedImm32 blind = xorBlindConstant(imm);
+ xor32(blind.value1, dest);
+ xor32(blind.value2, dest);
+ } else
+ xor32(imm.asTrustedImm32(), dest);
+ }
+
+ Jump branch32(RelationalCondition cond, RegisterID left, Imm32 right)
+ {
+ if (shouldBlind(right)) {
+ if (haveScratchRegisterForBlinding()) {
+ loadXorBlindedConstant(xorBlindConstant(right), scratchRegisterForBlinding());
+ return branch32(cond, left, scratchRegisterForBlinding());
+ }
+ // If we don't have a scratch register available for use, we'll just
+ // place a random number of nops.
+ uint32_t nopCount = random() & 3;
+ while (nopCount--)
+ nop();
+ return branch32(cond, left, right.asTrustedImm32());
+ }
+
+ return branch32(cond, left, right.asTrustedImm32());
+ }
+
+ Jump branchAdd32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest)
+ {
+ if (src == dest)
+ ASSERT(haveScratchRegisterForBlinding());
+
+ if (shouldBlind(imm)) {
+ if (src == dest) {
+ move(src, scratchRegisterForBlinding());
+ src = scratchRegisterForBlinding();
+ }
+ loadXorBlindedConstant(xorBlindConstant(imm), dest);
+ return branchAdd32(cond, src, dest);
+ }
+ return branchAdd32(cond, src, imm.asTrustedImm32(), dest);
+ }
+
+ Jump branchMul32(ResultCondition cond, Imm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (src == dest)
+ ASSERT(haveScratchRegisterForBlinding());
+
+ if (shouldBlind(imm)) {
+ if (src == dest) {
+ move(src, scratchRegisterForBlinding());
+ src = scratchRegisterForBlinding();
+ }
+ loadXorBlindedConstant(xorBlindConstant(imm), dest);
+ return branchMul32(cond, src, dest);
+ }
+ return branchMul32(cond, imm.asTrustedImm32(), src, dest);
+ }
+
+ // branchSub32 takes a scratch register as 32 bit platforms make use of this,
+ // with src == dst, and on x86-32 we don't have a platform scratch register.
+ Jump branchSub32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest, RegisterID scratch)
+ {
+ if (shouldBlind(imm)) {
+ ASSERT(scratch != dest);
+ ASSERT(scratch != src);
+ loadXorBlindedConstant(xorBlindConstant(imm), scratch);
+ return branchSub32(cond, src, scratch, dest);
+ }
+ return branchSub32(cond, src, imm.asTrustedImm32(), dest);
+ }
+
+ void lshift32(Imm32 imm, RegisterID dest)
+ {
+ lshift32(trustedImm32ForShift(imm), dest);
+ }
+
+ void lshift32(RegisterID src, Imm32 amount, RegisterID dest)
+ {
+ lshift32(src, trustedImm32ForShift(amount), dest);
+ }
+
+ void rshift32(Imm32 imm, RegisterID dest)
+ {
+ rshift32(trustedImm32ForShift(imm), dest);
+ }
+
+ void rshift32(RegisterID src, Imm32 amount, RegisterID dest)
+ {
+ rshift32(src, trustedImm32ForShift(amount), dest);
+ }
+
+ void urshift32(Imm32 imm, RegisterID dest)
+ {
+ urshift32(trustedImm32ForShift(imm), dest);
+ }
+
+ void urshift32(RegisterID src, Imm32 amount, RegisterID dest)
+ {
+ urshift32(src, trustedImm32ForShift(amount), dest);
+ }
+};
+
+} // namespace JSC
+
+#else // ENABLE(ASSEMBLER)
+
+// If there is no assembler for this platform, at least allow code to make references to
+// some of the things it would otherwise define, albeit without giving that code any way
+// of doing anything useful.
+class MacroAssembler {
+private:
+ MacroAssembler() { }
+
+public:
+
+ enum RegisterID { NoRegister };
+ enum FPRegisterID { NoFPRegister };
+};
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // MacroAssembler_h
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerARM.cpp b/Source/JavaScriptCore/assembler/MacroAssemblerARM.cpp
new file mode 100644
index 000000000..b0a9bf074
--- /dev/null
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerARM.cpp
@@ -0,0 +1,170 @@
+/*
+ * Copyright (C) 2013, 2014 Apple Inc.
+ * Copyright (C) 2009 University of Szeged
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
+
+#include "MacroAssemblerARM.h"
+
+#if OS(LINUX)
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <elf.h>
+#include <asm/hwcap.h>
+#endif
+
+namespace JSC {
+
+static bool isVFPPresent()
+{
+#if OS(LINUX)
+ int fd = open("/proc/self/auxv", O_RDONLY);
+ if (fd != -1) {
+ Elf32_auxv_t aux;
+ while (read(fd, &aux, sizeof(Elf32_auxv_t))) {
+ if (aux.a_type == AT_HWCAP) {
+ close(fd);
+ return aux.a_un.a_val & HWCAP_VFP;
+ }
+ }
+ close(fd);
+ }
+#endif // OS(LINUX)
+
+#if (COMPILER(GCC_OR_CLANG) && defined(__VFP_FP__))
+ return true;
+#else
+ return false;
+#endif
+}
+
+const bool MacroAssemblerARM::s_isVFPPresent = isVFPPresent();
+
+#if CPU(ARMV5_OR_LOWER)
+/* On ARMv5 and below, natural alignment is required. */
+void MacroAssemblerARM::load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
+{
+ ARMWord op2;
+
+ ASSERT(address.scale >= 0 && address.scale <= 3);
+ op2 = m_assembler.lsl(address.index, static_cast<int>(address.scale));
+
+ if (address.offset >= 0 && address.offset + 0x2 <= 0xff) {
+ m_assembler.add(ARMRegisters::S0, address.base, op2);
+ m_assembler.halfDtrUp(ARMAssembler::LoadUint16, dest, ARMRegisters::S0, ARMAssembler::getOp2Half(address.offset));
+ m_assembler.halfDtrUp(ARMAssembler::LoadUint16, ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::getOp2Half(address.offset + 0x2));
+ } else if (address.offset < 0 && address.offset >= -0xff) {
+ m_assembler.add(ARMRegisters::S0, address.base, op2);
+ m_assembler.halfDtrDown(ARMAssembler::LoadUint16, dest, ARMRegisters::S0, ARMAssembler::getOp2Half(-address.offset));
+ m_assembler.halfDtrDown(ARMAssembler::LoadUint16, ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::getOp2Half(-address.offset - 0x2));
+ } else {
+ m_assembler.moveImm(address.offset, ARMRegisters::S0);
+ m_assembler.add(ARMRegisters::S0, ARMRegisters::S0, op2);
+ m_assembler.halfDtrUpRegister(ARMAssembler::LoadUint16, dest, address.base, ARMRegisters::S0);
+ m_assembler.add(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::Op2Immediate | 0x2);
+ m_assembler.halfDtrUpRegister(ARMAssembler::LoadUint16, ARMRegisters::S0, address.base, ARMRegisters::S0);
+ }
+ m_assembler.orr(dest, dest, m_assembler.lsl(ARMRegisters::S0, 16));
+}
+#endif // CPU(ARMV5_OR_LOWER)
+
+#if ENABLE(MASM_PROBE)
+
+#define INDENT printIndent(indentation)
+
+void MacroAssemblerARM::printCPURegisters(CPUState& cpu, int indentation)
+{
+ #define PRINT_GPREGISTER(_type, _regName) { \
+ int32_t value = reinterpret_cast<int32_t>(cpu._regName); \
+ INDENT, dataLogF("%5s: 0x%08x %d\n", #_regName, value, value) ; \
+ }
+ FOR_EACH_CPU_GPREGISTER(PRINT_GPREGISTER)
+ FOR_EACH_CPU_SPECIAL_REGISTER(PRINT_GPREGISTER)
+ #undef PRINT_GPREGISTER
+
+ #define PRINT_FPREGISTER(_type, _regName) { \
+ uint64_t* u = reinterpret_cast<uint64_t*>(&cpu._regName); \
+ double* d = reinterpret_cast<double*>(&cpu._regName); \
+ INDENT, dataLogF("%5s: 0x%016llx %.13g\n", #_regName, *u, *d); \
+ }
+ FOR_EACH_CPU_FPREGISTER(PRINT_FPREGISTER)
+ #undef PRINT_FPREGISTER
+}
+
+#undef INDENT
+
+void MacroAssemblerARM::printRegister(MacroAssemblerARM::CPUState& cpu, RegisterID regID)
+{
+ const char* name = CPUState::registerName(regID);
+ union {
+ void* voidPtr;
+ intptr_t intptrValue;
+ } u;
+ u.voidPtr = cpu.registerValue(regID);
+ dataLogF("%s:<%p %ld>", name, u.voidPtr, u.intptrValue);
+}
+
+void MacroAssemblerARM::printRegister(MacroAssemblerARM::CPUState& cpu, FPRegisterID regID)
+{
+ const char* name = CPUState::registerName(regID);
+ union {
+ double doubleValue;
+ uint64_t uint64Value;
+ } u;
+ u.doubleValue = cpu.registerValue(regID);
+ dataLogF("%s:<0x%016llx %.13g>", name, u.uint64Value, u.doubleValue);
+}
+
+extern "C" void ctiMasmProbeTrampoline();
+
+// For details on "What code is emitted for the probe?" and "What values are in
+// the saved registers?", see comment for MacroAssemblerX86Common::probe() in
+// MacroAssemblerX86Common.cpp.
+
+void MacroAssemblerARM::probe(MacroAssemblerARM::ProbeFunction function, void* arg1, void* arg2)
+{
+ push(RegisterID::sp);
+ push(RegisterID::lr);
+ push(RegisterID::ip);
+ push(RegisterID::S0);
+ // The following uses RegisterID::S0. So, they must come after we push S0 above.
+ push(trustedImm32FromPtr(arg2));
+ push(trustedImm32FromPtr(arg1));
+ push(trustedImm32FromPtr(function));
+
+ move(trustedImm32FromPtr(ctiMasmProbeTrampoline), RegisterID::S0);
+ m_assembler.blx(RegisterID::S0);
+
+}
+#endif // ENABLE(MASM_PROBE)
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerARM.h b/Source/JavaScriptCore/assembler/MacroAssemblerARM.h
new file mode 100644
index 000000000..6cda896a3
--- /dev/null
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerARM.h
@@ -0,0 +1,1523 @@
+/*
+ * Copyright (C) 2008, 2013, 2014 Apple Inc.
+ * Copyright (C) 2009, 2010 University of Szeged
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MacroAssemblerARM_h
+#define MacroAssemblerARM_h
+
+#if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
+
+#include "ARMAssembler.h"
+#include "AbstractMacroAssembler.h"
+
+namespace JSC {
+
+class MacroAssemblerARM : public AbstractMacroAssembler<ARMAssembler, MacroAssemblerARM> {
+ static const int DoubleConditionMask = 0x0f;
+ static const int DoubleConditionBitSpecial = 0x10;
+ COMPILE_ASSERT(!(DoubleConditionBitSpecial & DoubleConditionMask), DoubleConditionBitSpecial_should_not_interfere_with_ARMAssembler_Condition_codes);
+public:
+ typedef ARMRegisters::FPRegisterID FPRegisterID;
+
+ enum RelationalCondition {
+ Equal = ARMAssembler::EQ,
+ NotEqual = ARMAssembler::NE,
+ Above = ARMAssembler::HI,
+ AboveOrEqual = ARMAssembler::CS,
+ Below = ARMAssembler::CC,
+ BelowOrEqual = ARMAssembler::LS,
+ GreaterThan = ARMAssembler::GT,
+ GreaterThanOrEqual = ARMAssembler::GE,
+ LessThan = ARMAssembler::LT,
+ LessThanOrEqual = ARMAssembler::LE
+ };
+
+ enum ResultCondition {
+ Overflow = ARMAssembler::VS,
+ Signed = ARMAssembler::MI,
+ PositiveOrZero = ARMAssembler::PL,
+ Zero = ARMAssembler::EQ,
+ NonZero = ARMAssembler::NE
+ };
+
+ enum DoubleCondition {
+ // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
+ DoubleEqual = ARMAssembler::EQ,
+ DoubleNotEqual = ARMAssembler::NE | DoubleConditionBitSpecial,
+ DoubleGreaterThan = ARMAssembler::GT,
+ DoubleGreaterThanOrEqual = ARMAssembler::GE,
+ DoubleLessThan = ARMAssembler::CC,
+ DoubleLessThanOrEqual = ARMAssembler::LS,
+ // If either operand is NaN, these conditions always evaluate to true.
+ DoubleEqualOrUnordered = ARMAssembler::EQ | DoubleConditionBitSpecial,
+ DoubleNotEqualOrUnordered = ARMAssembler::NE,
+ DoubleGreaterThanOrUnordered = ARMAssembler::HI,
+ DoubleGreaterThanOrEqualOrUnordered = ARMAssembler::CS,
+ DoubleLessThanOrUnordered = ARMAssembler::LT,
+ DoubleLessThanOrEqualOrUnordered = ARMAssembler::LE,
+ };
+
+ static const RegisterID stackPointerRegister = ARMRegisters::sp;
+ static const RegisterID framePointerRegister = ARMRegisters::fp;
+ static const RegisterID linkRegister = ARMRegisters::lr;
+
+ static const Scale ScalePtr = TimesFour;
+
+ void add32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.adds(dest, dest, src);
+ }
+
+ void add32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.adds(dest, op1, op2);
+ }
+
+ void add32(TrustedImm32 imm, Address address)
+ {
+ load32(address, ARMRegisters::S1);
+ add32(imm, ARMRegisters::S1);
+ store32(ARMRegisters::S1, address);
+ }
+
+ void add32(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.adds(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
+ }
+
+ void add32(AbsoluteAddress src, RegisterID dest)
+ {
+ move(TrustedImmPtr(src.m_ptr), ARMRegisters::S1);
+ m_assembler.dtrUp(ARMAssembler::LoadUint32, ARMRegisters::S1, ARMRegisters::S1, 0);
+ add32(ARMRegisters::S1, dest);
+ }
+
+ void add32(Address src, RegisterID dest)
+ {
+ load32(src, ARMRegisters::S1);
+ add32(ARMRegisters::S1, dest);
+ }
+
+ void add32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.adds(dest, src, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
+ }
+
+ void and32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.bitAnds(dest, dest, src);
+ }
+
+ void and32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.bitAnds(dest, op1, op2);
+ }
+
+ void and32(TrustedImm32 imm, RegisterID dest)
+ {
+ ARMWord w = m_assembler.getImm(imm.m_value, ARMRegisters::S0, true);
+ if (w & ARMAssembler::Op2InvertedImmediate)
+ m_assembler.bics(dest, dest, w & ~ARMAssembler::Op2InvertedImmediate);
+ else
+ m_assembler.bitAnds(dest, dest, w);
+ }
+
+ void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ ARMWord w = m_assembler.getImm(imm.m_value, ARMRegisters::S0, true);
+ if (w & ARMAssembler::Op2InvertedImmediate)
+ m_assembler.bics(dest, src, w & ~ARMAssembler::Op2InvertedImmediate);
+ else
+ m_assembler.bitAnds(dest, src, w);
+ }
+
+ void and32(Address src, RegisterID dest)
+ {
+ load32(src, ARMRegisters::S1);
+ and32(ARMRegisters::S1, dest);
+ }
+
+ void lshift32(RegisterID shiftAmount, RegisterID dest)
+ {
+ lshift32(dest, shiftAmount, dest);
+ }
+
+ void lshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+ {
+ ARMWord w = ARMAssembler::getOp2Byte(0x1f);
+ m_assembler.bitAnd(ARMRegisters::S0, shiftAmount, w);
+
+ m_assembler.movs(dest, m_assembler.lslRegister(src, ARMRegisters::S0));
+ }
+
+ void lshift32(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.movs(dest, m_assembler.lsl(dest, imm.m_value & 0x1f));
+ }
+
+ void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.movs(dest, m_assembler.lsl(src, imm.m_value & 0x1f));
+ }
+
+ void mul32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ if (op2 == dest) {
+ if (op1 == dest) {
+ move(op2, ARMRegisters::S0);
+ op2 = ARMRegisters::S0;
+ } else {
+ // Swap the operands.
+ RegisterID tmp = op1;
+ op1 = op2;
+ op2 = tmp;
+ }
+ }
+ m_assembler.muls(dest, op1, op2);
+ }
+
+ void mul32(RegisterID src, RegisterID dest)
+ {
+ mul32(src, dest, dest);
+ }
+
+ void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ move(imm, ARMRegisters::S0);
+ m_assembler.muls(dest, src, ARMRegisters::S0);
+ }
+
+ void neg32(RegisterID srcDest)
+ {
+ m_assembler.rsbs(srcDest, srcDest, ARMAssembler::getOp2Byte(0));
+ }
+
+ void or32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.orrs(dest, dest, src);
+ }
+
+ void or32(RegisterID src, AbsoluteAddress dest)
+ {
+ move(TrustedImmPtr(dest.m_ptr), ARMRegisters::S0);
+ load32(Address(ARMRegisters::S0), ARMRegisters::S1);
+ or32(src, ARMRegisters::S1);
+ store32(ARMRegisters::S1, ARMRegisters::S0);
+ }
+
+ void or32(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.orrs(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
+ }
+
+ void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ m_assembler.orrs(dest, src, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
+ }
+
+ void or32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.orrs(dest, op1, op2);
+ }
+
+ void rshift32(RegisterID shiftAmount, RegisterID dest)
+ {
+ rshift32(dest, shiftAmount, dest);
+ }
+
+ void rshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+ {
+ ARMWord w = ARMAssembler::getOp2Byte(0x1f);
+ m_assembler.bitAnd(ARMRegisters::S0, shiftAmount, w);
+
+ m_assembler.movs(dest, m_assembler.asrRegister(src, ARMRegisters::S0));
+ }
+
+ void rshift32(TrustedImm32 imm, RegisterID dest)
+ {
+ rshift32(dest, imm, dest);
+ }
+
+ void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.movs(dest, m_assembler.asr(src, imm.m_value & 0x1f));
+ }
+
+ void urshift32(RegisterID shiftAmount, RegisterID dest)
+ {
+ urshift32(dest, shiftAmount, dest);
+ }
+
+ void urshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+ {
+ ARMWord w = ARMAssembler::getOp2Byte(0x1f);
+ m_assembler.bitAnd(ARMRegisters::S0, shiftAmount, w);
+
+ m_assembler.movs(dest, m_assembler.lsrRegister(src, ARMRegisters::S0));
+ }
+
+ void urshift32(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.movs(dest, m_assembler.lsr(dest, imm.m_value & 0x1f));
+ }
+
+ void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.movs(dest, m_assembler.lsr(src, imm.m_value & 0x1f));
+ }
+
+ void sub32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.subs(dest, dest, src);
+ }
+
+ void sub32(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.subs(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
+ }
+
+ void sub32(TrustedImm32 imm, Address address)
+ {
+ load32(address, ARMRegisters::S1);
+ sub32(imm, ARMRegisters::S1);
+ store32(ARMRegisters::S1, address);
+ }
+
+ void sub32(Address src, RegisterID dest)
+ {
+ load32(src, ARMRegisters::S1);
+ sub32(ARMRegisters::S1, dest);
+ }
+
+ void sub32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.subs(dest, src, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
+ }
+
+ void xor32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.eors(dest, dest, src);
+ }
+
+ void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.eors(dest, op1, op2);
+ }
+
+ void xor32(TrustedImm32 imm, RegisterID dest)
+ {
+ if (imm.m_value == -1)
+ m_assembler.mvns(dest, dest);
+ else
+ m_assembler.eors(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
+ }
+
+ void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (imm.m_value == -1)
+ m_assembler.mvns(dest, src);
+ else
+ m_assembler.eors(dest, src, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
+ }
+
+ void countLeadingZeros32(RegisterID src, RegisterID dest)
+ {
+#if WTF_ARM_ARCH_AT_LEAST(5)
+ m_assembler.clz(dest, src);
+#else
+ UNUSED_PARAM(src);
+ UNUSED_PARAM(dest);
+ RELEASE_ASSERT_NOT_REACHED();
+#endif
+ }
+
+ void load8(ImplicitAddress address, RegisterID dest)
+ {
+ m_assembler.dataTransfer32(ARMAssembler::LoadUint8, dest, address.base, address.offset);
+ }
+
+ void load8(BaseIndex address, RegisterID dest)
+ {
+ m_assembler.baseIndexTransfer32(ARMAssembler::LoadUint8, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
+ }
+
+ void load8(const void* address, RegisterID dest)
+ {
+ move(TrustedImmPtr(address), ARMRegisters::S0);
+ m_assembler.dataTransfer32(ARMAssembler::LoadUint8, dest, ARMRegisters::S0, 0);
+ }
+
+ void load8SignedExtendTo32(BaseIndex address, RegisterID dest)
+ {
+ m_assembler.baseIndexTransfer16(ARMAssembler::LoadInt8, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
+ }
+
+ void load16(ImplicitAddress address, RegisterID dest)
+ {
+ m_assembler.dataTransfer16(ARMAssembler::LoadUint16, dest, address.base, address.offset);
+ }
+
+ void load16(BaseIndex address, RegisterID dest)
+ {
+ m_assembler.baseIndexTransfer16(ARMAssembler::LoadUint16, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
+ }
+
+ void load16SignedExtendTo32(BaseIndex address, RegisterID dest)
+ {
+ m_assembler.baseIndexTransfer16(ARMAssembler::LoadInt16, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
+ }
+
+ void load32(ImplicitAddress address, RegisterID dest)
+ {
+ m_assembler.dataTransfer32(ARMAssembler::LoadUint32, dest, address.base, address.offset);
+ }
+
+ void load32(BaseIndex address, RegisterID dest)
+ {
+ m_assembler.baseIndexTransfer32(ARMAssembler::LoadUint32, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
+ }
+
+#if CPU(ARMV5_OR_LOWER)
+ void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest);
+#else
+ void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
+ {
+ load32(address, dest);
+ }
+#endif
+
+ void load16Unaligned(BaseIndex address, RegisterID dest)
+ {
+ load16(address, dest);
+ }
+
+ void abortWithReason(AbortReason reason)
+ {
+ move(TrustedImm32(reason), ARMRegisters::S0);
+ breakpoint();
+ }
+
+ void abortWithReason(AbortReason reason, intptr_t misc)
+ {
+ move(TrustedImm32(misc), ARMRegisters::S1);
+ abortWithReason(reason);
+ }
+
+ ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
+ {
+ ConvertibleLoadLabel result(this);
+ ASSERT(address.offset >= 0 && address.offset <= 255);
+ m_assembler.dtrUp(ARMAssembler::LoadUint32, dest, address.base, address.offset);
+ return result;
+ }
+
+ DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ DataLabel32 dataLabel(this);
+ m_assembler.ldrUniqueImmediate(ARMRegisters::S0, 0);
+ m_assembler.dtrUpRegister(ARMAssembler::LoadUint32, dest, address.base, ARMRegisters::S0);
+ return dataLabel;
+ }
+
+ static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
+ {
+ return value >= -4095 && value <= 4095;
+ }
+
+ DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ DataLabelCompact dataLabel(this);
+ ASSERT(isCompactPtrAlignedAddressOffset(address.offset));
+ if (address.offset >= 0)
+ m_assembler.dtrUp(ARMAssembler::LoadUint32, dest, address.base, address.offset);
+ else
+ m_assembler.dtrDown(ARMAssembler::LoadUint32, dest, address.base, address.offset);
+ return dataLabel;
+ }
+
+ DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
+ {
+ DataLabel32 dataLabel(this);
+ m_assembler.ldrUniqueImmediate(ARMRegisters::S0, 0);
+ m_assembler.dtrUpRegister(ARMAssembler::StoreUint32, src, address.base, ARMRegisters::S0);
+ return dataLabel;
+ }
+
+ void store8(RegisterID src, BaseIndex address)
+ {
+ m_assembler.baseIndexTransfer32(ARMAssembler::StoreUint8, src, address.base, address.index, static_cast<int>(address.scale), address.offset);
+ }
+
+ void store8(RegisterID src, ImplicitAddress address)
+ {
+ m_assembler.dtrUp(ARMAssembler::StoreUint8, src, address.base, address.offset);
+ }
+
+ void store8(RegisterID src, const void* address)
+ {
+ move(TrustedImmPtr(address), ARMRegisters::S0);
+ m_assembler.dtrUp(ARMAssembler::StoreUint8, src, ARMRegisters::S0, 0);
+ }
+
+ void store8(TrustedImm32 imm, ImplicitAddress address)
+ {
+ move(imm, ARMRegisters::S1);
+ store8(ARMRegisters::S1, address);
+ }
+
+ void store8(TrustedImm32 imm, const void* address)
+ {
+ move(TrustedImm32(reinterpret_cast<ARMWord>(address)), ARMRegisters::S0);
+ move(imm, ARMRegisters::S1);
+ m_assembler.dtrUp(ARMAssembler::StoreUint8, ARMRegisters::S1, ARMRegisters::S0, 0);
+ }
+
+ void store16(RegisterID src, BaseIndex address)
+ {
+ m_assembler.baseIndexTransfer16(ARMAssembler::StoreUint16, src, address.base, address.index, static_cast<int>(address.scale), address.offset);
+ }
+
+ void store32(RegisterID src, ImplicitAddress address)
+ {
+ m_assembler.dataTransfer32(ARMAssembler::StoreUint32, src, address.base, address.offset);
+ }
+
+ void store32(RegisterID src, BaseIndex address)
+ {
+ m_assembler.baseIndexTransfer32(ARMAssembler::StoreUint32, src, address.base, address.index, static_cast<int>(address.scale), address.offset);
+ }
+
+ void store32(TrustedImm32 imm, ImplicitAddress address)
+ {
+ move(imm, ARMRegisters::S1);
+ store32(ARMRegisters::S1, address);
+ }
+
+ void store32(TrustedImm32 imm, BaseIndex address)
+ {
+ move(imm, ARMRegisters::S1);
+ m_assembler.baseIndexTransfer32(ARMAssembler::StoreUint32, ARMRegisters::S1, address.base, address.index, static_cast<int>(address.scale), address.offset);
+ }
+
+ void store32(RegisterID src, const void* address)
+ {
+ m_assembler.ldrUniqueImmediate(ARMRegisters::S0, reinterpret_cast<ARMWord>(address));
+ m_assembler.dtrUp(ARMAssembler::StoreUint32, src, ARMRegisters::S0, 0);
+ }
+
+ void store32(TrustedImm32 imm, const void* address)
+ {
+ m_assembler.ldrUniqueImmediate(ARMRegisters::S0, reinterpret_cast<ARMWord>(address));
+ m_assembler.moveImm(imm.m_value, ARMRegisters::S1);
+ m_assembler.dtrUp(ARMAssembler::StoreUint32, ARMRegisters::S1, ARMRegisters::S0, 0);
+ }
+
+ void pop(RegisterID dest)
+ {
+ m_assembler.pop(dest);
+ }
+
+ void popPair(RegisterID dest1, RegisterID dest2)
+ {
+ m_assembler.pop(dest1);
+ m_assembler.pop(dest2);
+ }
+
+ void push(RegisterID src)
+ {
+ m_assembler.push(src);
+ }
+
+ void push(Address address)
+ {
+ load32(address, ARMRegisters::S1);
+ push(ARMRegisters::S1);
+ }
+
+ void push(TrustedImm32 imm)
+ {
+ move(imm, ARMRegisters::S0);
+ push(ARMRegisters::S0);
+ }
+
+ void pushPair(RegisterID src1, RegisterID src2)
+ {
+ m_assembler.push(src2);
+ m_assembler.push(src1);
+ }
+
+ void move(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.moveImm(imm.m_value, dest);
+ }
+
+ void move(RegisterID src, RegisterID dest)
+ {
+ if (src != dest)
+ m_assembler.mov(dest, src);
+ }
+
+ void move(TrustedImmPtr imm, RegisterID dest)
+ {
+ move(TrustedImm32(imm), dest);
+ }
+
+ void swap(RegisterID reg1, RegisterID reg2)
+ {
+ xor32(reg1, reg2);
+ xor32(reg2, reg1);
+ xor32(reg1, reg2);
+ }
+
+ void signExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ if (src != dest)
+ move(src, dest);
+ }
+
+ void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ if (src != dest)
+ move(src, dest);
+ }
+
+ Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
+ {
+ load8(left, ARMRegisters::S1);
+ return branch32(cond, ARMRegisters::S1, right);
+ }
+
+ Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+ {
+ ASSERT(!(right.m_value & 0xFFFFFF00));
+ load8(left, ARMRegisters::S1);
+ return branch32(cond, ARMRegisters::S1, right);
+ }
+
+ Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
+ {
+ move(TrustedImmPtr(left.m_ptr), ARMRegisters::S1);
+ load8(Address(ARMRegisters::S1), ARMRegisters::S1);
+ return branch32(cond, ARMRegisters::S1, right);
+ }
+
+ Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right, int useConstantPool = 0)
+ {
+ m_assembler.cmp(left, right);
+ return Jump(m_assembler.jmp(ARMCondition(cond), useConstantPool));
+ }
+
+ Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right, int useConstantPool = 0)
+ {
+ internalCompare32(left, right);
+ return Jump(m_assembler.jmp(ARMCondition(cond), useConstantPool));
+ }
+
+ Jump branch32(RelationalCondition cond, RegisterID left, Address right)
+ {
+ load32(right, ARMRegisters::S1);
+ return branch32(cond, left, ARMRegisters::S1);
+ }
+
+ Jump branch32(RelationalCondition cond, Address left, RegisterID right)
+ {
+ load32(left, ARMRegisters::S1);
+ return branch32(cond, ARMRegisters::S1, right);
+ }
+
+ Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
+ {
+ load32(left, ARMRegisters::S1);
+ return branch32(cond, ARMRegisters::S1, right);
+ }
+
+ Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+ {
+ load32(left, ARMRegisters::S1);
+ return branch32(cond, ARMRegisters::S1, right);
+ }
+
+ Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+ {
+ load32WithUnalignedHalfWords(left, ARMRegisters::S1);
+ return branch32(cond, ARMRegisters::S1, right);
+ }
+
+ Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ load8(address, ARMRegisters::S1);
+ return branchTest32(cond, ARMRegisters::S1, mask);
+ }
+
+ Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ load8(address, ARMRegisters::S1);
+ return branchTest32(cond, ARMRegisters::S1, mask);
+ }
+
+ Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ move(TrustedImmPtr(address.m_ptr), ARMRegisters::S1);
+ load8(Address(ARMRegisters::S1), ARMRegisters::S1);
+ return branchTest32(cond, ARMRegisters::S1, mask);
+ }
+
+ Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+ m_assembler.tst(reg, mask);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+ ARMWord w = m_assembler.getImm(mask.m_value, ARMRegisters::S0, true);
+ if (w & ARMAssembler::Op2InvertedImmediate)
+ m_assembler.bics(ARMRegisters::S0, reg, w & ~ARMAssembler::Op2InvertedImmediate);
+ else
+ m_assembler.tst(reg, w);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ load32(address, ARMRegisters::S1);
+ return branchTest32(cond, ARMRegisters::S1, mask);
+ }
+
+ Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ load32(address, ARMRegisters::S1);
+ return branchTest32(cond, ARMRegisters::S1, mask);
+ }
+
+ Jump jump()
+ {
+ return Jump(m_assembler.jmp());
+ }
+
+ void jump(RegisterID target)
+ {
+ m_assembler.bx(target);
+ }
+
+ void jump(Address address)
+ {
+ load32(address, ARMRegisters::pc);
+ }
+
+ void jump(AbsoluteAddress address)
+ {
+ move(TrustedImmPtr(address.m_ptr), ARMRegisters::S0);
+ load32(Address(ARMRegisters::S0, 0), ARMRegisters::pc);
+ }
+
+ void moveDoubleToInts(FPRegisterID src, RegisterID dest1, RegisterID dest2)
+ {
+ m_assembler.vmov(dest1, dest2, src);
+ }
+
+ void moveIntsToDouble(RegisterID src1, RegisterID src2, FPRegisterID dest, FPRegisterID)
+ {
+ m_assembler.vmov(dest, src1, src2);
+ }
+
+ Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero)
+ || (cond == NonZero) || (cond == PositiveOrZero));
+ add32(src, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchAdd32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero)
+ || (cond == NonZero) || (cond == PositiveOrZero));
+ add32(op1, op2, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero)
+ || (cond == NonZero) || (cond == PositiveOrZero));
+ add32(imm, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchAdd32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero)
+ || (cond == NonZero) || (cond == PositiveOrZero));
+ add32(src, imm, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero)
+ || (cond == NonZero) || (cond == PositiveOrZero));
+ add32(imm, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchAdd32(ResultCondition cond, Address src, RegisterID dest)
+ {
+ load32(src, ARMRegisters::S0);
+ return branchAdd32(cond, dest, ARMRegisters::S0, dest);
+ }
+ void mull32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ if (op2 == dest) {
+ if (op1 == dest) {
+ move(op2, ARMRegisters::S0);
+ op2 = ARMRegisters::S0;
+ } else {
+ // Swap the operands.
+ RegisterID tmp = op1;
+ op1 = op2;
+ op2 = tmp;
+ }
+ }
+ m_assembler.mull(ARMRegisters::S1, dest, op1, op2);
+ m_assembler.cmp(ARMRegisters::S1, m_assembler.asr(dest, 31));
+ }
+
+ Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ if (cond == Overflow) {
+ mull32(src1, src2, dest);
+ cond = NonZero;
+ }
+ else
+ mul32(src1, src2, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ return branchMul32(cond, src, dest, dest);
+ }
+
+ Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ if (cond == Overflow) {
+ move(imm, ARMRegisters::S0);
+ mull32(ARMRegisters::S0, src, dest);
+ cond = NonZero;
+ }
+ else
+ mul32(imm, src, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ sub32(src, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ sub32(imm, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ sub32(src, imm, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ m_assembler.subs(dest, op1, op2);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchNeg32(ResultCondition cond, RegisterID srcDest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ neg32(srcDest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero));
+ or32(src, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ PatchableJump patchableJump()
+ {
+ return PatchableJump(m_assembler.jmp(ARMAssembler::AL, 1));
+ }
+
+ PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm)
+ {
+ internalCompare32(reg, imm);
+ Jump jump(m_assembler.loadBranchTarget(ARMRegisters::S1, ARMCondition(cond), true));
+ m_assembler.bx(ARMRegisters::S1, ARMCondition(cond));
+ return PatchableJump(jump);
+ }
+
+ void breakpoint()
+ {
+ m_assembler.bkpt(0);
+ }
+
+ Call nearCall()
+ {
+ m_assembler.loadBranchTarget(ARMRegisters::S1, ARMAssembler::AL, true);
+ return Call(m_assembler.blx(ARMRegisters::S1), Call::LinkableNear);
+ }
+
+ Call call(RegisterID target)
+ {
+ return Call(m_assembler.blx(target), Call::None);
+ }
+
+ void call(Address address)
+ {
+ call32(address.base, address.offset);
+ }
+
+ void ret()
+ {
+ m_assembler.bx(linkRegister);
+ }
+
+ void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
+ {
+ m_assembler.cmp(left, right);
+ m_assembler.mov(dest, ARMAssembler::getOp2Byte(0));
+ m_assembler.mov(dest, ARMAssembler::getOp2Byte(1), ARMCondition(cond));
+ }
+
+ void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
+ {
+ m_assembler.cmp(left, m_assembler.getImm(right.m_value, ARMRegisters::S0));
+ m_assembler.mov(dest, ARMAssembler::getOp2Byte(0));
+ m_assembler.mov(dest, ARMAssembler::getOp2Byte(1), ARMCondition(cond));
+ }
+
+ void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
+ {
+ load8(left, ARMRegisters::S1);
+ compare32(cond, ARMRegisters::S1, right, dest);
+ }
+
+ void test32(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest)
+ {
+ if (mask.m_value == -1)
+ m_assembler.tst(reg, reg);
+ else
+ m_assembler.tst(reg, m_assembler.getImm(mask.m_value, ARMRegisters::S0));
+ m_assembler.mov(dest, ARMAssembler::getOp2Byte(0));
+ m_assembler.mov(dest, ARMAssembler::getOp2Byte(1), ARMCondition(cond));
+ }
+
+ void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
+ {
+ load32(address, ARMRegisters::S1);
+ test32(cond, ARMRegisters::S1, mask, dest);
+ }
+
+ void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
+ {
+ load8(address, ARMRegisters::S1);
+ test32(cond, ARMRegisters::S1, mask, dest);
+ }
+
+ void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ m_assembler.add(dest, src, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
+ }
+
+ void add32(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ load32(address.m_ptr, ARMRegisters::S1);
+ add32(imm, ARMRegisters::S1);
+ store32(ARMRegisters::S1, address.m_ptr);
+ }
+
+ void add64(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ ARMWord tmp;
+
+ move(TrustedImmPtr(address.m_ptr), ARMRegisters::S1);
+ m_assembler.dtrUp(ARMAssembler::LoadUint32, ARMRegisters::S0, ARMRegisters::S1, 0);
+
+ if ((tmp = ARMAssembler::getOp2(imm.m_value)) != ARMAssembler::InvalidImmediate)
+ m_assembler.adds(ARMRegisters::S0, ARMRegisters::S0, tmp);
+ else if ((tmp = ARMAssembler::getOp2(-imm.m_value)) != ARMAssembler::InvalidImmediate)
+ m_assembler.subs(ARMRegisters::S0, ARMRegisters::S0, tmp);
+ else {
+ m_assembler.adds(ARMRegisters::S0, ARMRegisters::S0, m_assembler.getImm(imm.m_value, ARMRegisters::S1));
+ move(TrustedImmPtr(address.m_ptr), ARMRegisters::S1);
+ }
+ m_assembler.dtrUp(ARMAssembler::StoreUint32, ARMRegisters::S0, ARMRegisters::S1, 0);
+
+ m_assembler.dtrUp(ARMAssembler::LoadUint32, ARMRegisters::S0, ARMRegisters::S1, sizeof(ARMWord));
+ if (imm.m_value >= 0)
+ m_assembler.adc(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::getOp2Byte(0));
+ else
+ m_assembler.sbc(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::getOp2Byte(0));
+ m_assembler.dtrUp(ARMAssembler::StoreUint32, ARMRegisters::S0, ARMRegisters::S1, sizeof(ARMWord));
+ }
+
+ void sub32(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ load32(address.m_ptr, ARMRegisters::S1);
+ sub32(imm, ARMRegisters::S1);
+ store32(ARMRegisters::S1, address.m_ptr);
+ }
+
+ void load32(const void* address, RegisterID dest)
+ {
+ m_assembler.ldrUniqueImmediate(ARMRegisters::S0, reinterpret_cast<ARMWord>(address));
+ m_assembler.dtrUp(ARMAssembler::LoadUint32, dest, ARMRegisters::S0, 0);
+ }
+
+ Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
+ {
+ load32(left.m_ptr, ARMRegisters::S1);
+ return branch32(cond, ARMRegisters::S1, right);
+ }
+
+ Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
+ {
+ load32(left.m_ptr, ARMRegisters::S1);
+ return branch32(cond, ARMRegisters::S1, right);
+ }
+
+ void relativeTableJump(RegisterID index, int scale)
+ {
+ ASSERT(scale >= 0 && scale <= 31);
+ m_assembler.add(ARMRegisters::pc, ARMRegisters::pc, m_assembler.lsl(index, scale));
+
+ // NOP the default prefetching
+ m_assembler.mov(ARMRegisters::r0, ARMRegisters::r0);
+ }
+
+ Call call()
+ {
+ ensureSpace(2 * sizeof(ARMWord), sizeof(ARMWord));
+ m_assembler.loadBranchTarget(ARMRegisters::S1, ARMAssembler::AL, true);
+ return Call(m_assembler.blx(ARMRegisters::S1), Call::Linkable);
+ }
+
+ Call tailRecursiveCall()
+ {
+ return Call::fromTailJump(jump());
+ }
+
+ Call makeTailRecursiveCall(Jump oldJump)
+ {
+ return Call::fromTailJump(oldJump);
+ }
+
+ DataLabelPtr moveWithPatch(TrustedImmPtr initialValue, RegisterID dest)
+ {
+ DataLabelPtr dataLabel(this);
+ m_assembler.ldrUniqueImmediate(dest, reinterpret_cast<ARMWord>(initialValue.m_value));
+ return dataLabel;
+ }
+
+ DataLabel32 moveWithPatch(TrustedImm32 initialValue, RegisterID dest)
+ {
+ DataLabel32 dataLabel(this);
+ m_assembler.ldrUniqueImmediate(dest, static_cast<ARMWord>(initialValue.m_value));
+ return dataLabel;
+ }
+
+ Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+ {
+ ensureSpace(3 * sizeof(ARMWord), 2 * sizeof(ARMWord));
+ dataLabel = moveWithPatch(initialRightValue, ARMRegisters::S1);
+ Jump jump = branch32(cond, left, ARMRegisters::S1, true);
+ return jump;
+ }
+
+ Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+ {
+ load32(left, ARMRegisters::S1);
+ ensureSpace(3 * sizeof(ARMWord), 2 * sizeof(ARMWord));
+ dataLabel = moveWithPatch(initialRightValue, ARMRegisters::S0);
+ Jump jump = branch32(cond, ARMRegisters::S0, ARMRegisters::S1, true);
+ return jump;
+ }
+
+ Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
+ {
+ load32(left, ARMRegisters::S1);
+ ensureSpace(3 * sizeof(ARMWord), 2 * sizeof(ARMWord));
+ dataLabel = moveWithPatch(initialRightValue, ARMRegisters::S0);
+ Jump jump = branch32(cond, ARMRegisters::S0, ARMRegisters::S1, true);
+ return jump;
+ }
+
+ DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
+ {
+ DataLabelPtr dataLabel = moveWithPatch(initialValue, ARMRegisters::S1);
+ store32(ARMRegisters::S1, address);
+ return dataLabel;
+ }
+
+ DataLabelPtr storePtrWithPatch(ImplicitAddress address)
+ {
+ return storePtrWithPatch(TrustedImmPtr(0), address);
+ }
+
+ // Floating point operators
+ static bool supportsFloatingPoint()
+ {
+ return s_isVFPPresent;
+ }
+
+ static bool supportsFloatingPointTruncate()
+ {
+ return false;
+ }
+
+ static bool supportsFloatingPointSqrt()
+ {
+ return s_isVFPPresent;
+ }
+ static bool supportsFloatingPointAbs() { return false; }
+
+ void loadFloat(BaseIndex address, FPRegisterID dest)
+ {
+ m_assembler.baseIndexTransferFloat(ARMAssembler::LoadFloat, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
+ }
+
+ void loadDouble(ImplicitAddress address, FPRegisterID dest)
+ {
+ m_assembler.dataTransferFloat(ARMAssembler::LoadDouble, dest, address.base, address.offset);
+ }
+
+ void loadDouble(BaseIndex address, FPRegisterID dest)
+ {
+ m_assembler.baseIndexTransferFloat(ARMAssembler::LoadDouble, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
+ }
+
+ void loadDouble(TrustedImmPtr address, FPRegisterID dest)
+ {
+ move(TrustedImm32(reinterpret_cast<ARMWord>(address.m_value)), ARMRegisters::S0);
+ m_assembler.doubleDtrUp(ARMAssembler::LoadDouble, dest, ARMRegisters::S0, 0);
+ }
+
+ void storeFloat(FPRegisterID src, BaseIndex address)
+ {
+ m_assembler.baseIndexTransferFloat(ARMAssembler::StoreFloat, src, address.base, address.index, static_cast<int>(address.scale), address.offset);
+ }
+
+ void storeDouble(FPRegisterID src, ImplicitAddress address)
+ {
+ m_assembler.dataTransferFloat(ARMAssembler::StoreDouble, src, address.base, address.offset);
+ }
+
+ void storeDouble(FPRegisterID src, BaseIndex address)
+ {
+ m_assembler.baseIndexTransferFloat(ARMAssembler::StoreDouble, src, address.base, address.index, static_cast<int>(address.scale), address.offset);
+ }
+
+ void storeDouble(FPRegisterID src, TrustedImmPtr address)
+ {
+ move(TrustedImm32(reinterpret_cast<ARMWord>(address.m_value)), ARMRegisters::S0);
+ m_assembler.dataTransferFloat(ARMAssembler::StoreDouble, src, ARMRegisters::S0, 0);
+ }
+
+ void moveDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ if (src != dest)
+ m_assembler.vmov_f64(dest, src);
+ }
+
+ void addDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vadd_f64(dest, dest, src);
+ }
+
+ void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ m_assembler.vadd_f64(dest, op1, op2);
+ }
+
+ void addDouble(Address src, FPRegisterID dest)
+ {
+ loadDouble(src, ARMRegisters::SD0);
+ addDouble(ARMRegisters::SD0, dest);
+ }
+
+ void addDouble(AbsoluteAddress address, FPRegisterID dest)
+ {
+ loadDouble(TrustedImmPtr(address.m_ptr), ARMRegisters::SD0);
+ addDouble(ARMRegisters::SD0, dest);
+ }
+
+ void divDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vdiv_f64(dest, dest, src);
+ }
+
+ void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ m_assembler.vdiv_f64(dest, op1, op2);
+ }
+
+ void divDouble(Address src, FPRegisterID dest)
+ {
+ RELEASE_ASSERT_NOT_REACHED(); // Untested
+ loadDouble(src, ARMRegisters::SD0);
+ divDouble(ARMRegisters::SD0, dest);
+ }
+
+ void subDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vsub_f64(dest, dest, src);
+ }
+
+ void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ m_assembler.vsub_f64(dest, op1, op2);
+ }
+
+ void subDouble(Address src, FPRegisterID dest)
+ {
+ loadDouble(src, ARMRegisters::SD0);
+ subDouble(ARMRegisters::SD0, dest);
+ }
+
+ void mulDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vmul_f64(dest, dest, src);
+ }
+
+ void mulDouble(Address src, FPRegisterID dest)
+ {
+ loadDouble(src, ARMRegisters::SD0);
+ mulDouble(ARMRegisters::SD0, dest);
+ }
+
+ void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ m_assembler.vmul_f64(dest, op1, op2);
+ }
+
+ void sqrtDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vsqrt_f64(dest, src);
+ }
+
+ void absDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vabs_f64(dest, src);
+ }
+
+ void negateDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vneg_f64(dest, src);
+ }
+
+ void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vmov_vfp32(dest << 1, src);
+ m_assembler.vcvt_f64_s32(dest, dest << 1);
+ }
+
+ void convertInt32ToDouble(Address src, FPRegisterID dest)
+ {
+ load32(src, ARMRegisters::S1);
+ convertInt32ToDouble(ARMRegisters::S1, dest);
+ }
+
+ void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest)
+ {
+ move(TrustedImmPtr(src.m_ptr), ARMRegisters::S1);
+ load32(Address(ARMRegisters::S1), ARMRegisters::S1);
+ convertInt32ToDouble(ARMRegisters::S1, dest);
+ }
+
+ void convertFloatToDouble(FPRegisterID src, FPRegisterID dst)
+ {
+ m_assembler.vcvt_f64_f32(dst, src);
+ }
+
+ void convertDoubleToFloat(FPRegisterID src, FPRegisterID dst)
+ {
+ m_assembler.vcvt_f32_f64(dst, src);
+ }
+
+ Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
+ {
+ m_assembler.vcmp_f64(left, right);
+ m_assembler.vmrs_apsr();
+ if (cond & DoubleConditionBitSpecial)
+ m_assembler.cmp(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::VS);
+ return Jump(m_assembler.jmp(static_cast<ARMAssembler::Condition>(cond & ~DoubleConditionMask)));
+ }
+
+ // Truncates 'src' to an integer, and places the resulting 'dest'.
+ // If the result is not representable as a 32 bit value, branch.
+ // May also branch for some values that are representable in 32 bits
+ // (specifically, in this case, INT_MIN).
+ enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
+ Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
+ {
+ truncateDoubleToInt32(src, dest);
+
+ m_assembler.add(ARMRegisters::S0, dest, ARMAssembler::getOp2Byte(1));
+ m_assembler.bic(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::getOp2Byte(1));
+
+ ARMWord w = ARMAssembler::getOp2(0x80000000);
+ ASSERT(w != ARMAssembler::InvalidImmediate);
+ m_assembler.cmp(ARMRegisters::S0, w);
+ return Jump(m_assembler.jmp(branchType == BranchIfTruncateFailed ? ARMAssembler::EQ : ARMAssembler::NE));
+ }
+
+ Jump branchTruncateDoubleToUint32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
+ {
+ truncateDoubleToUint32(src, dest);
+
+ m_assembler.add(ARMRegisters::S0, dest, ARMAssembler::getOp2Byte(1));
+ m_assembler.bic(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::getOp2Byte(1));
+
+ m_assembler.cmp(ARMRegisters::S0, ARMAssembler::getOp2Byte(0));
+ return Jump(m_assembler.jmp(branchType == BranchIfTruncateFailed ? ARMAssembler::EQ : ARMAssembler::NE));
+ }
+
+ // Result is undefined if the value is outside of the integer range.
+ void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
+ {
+ m_assembler.vcvt_s32_f64(ARMRegisters::SD0 << 1, src);
+ m_assembler.vmov_arm32(dest, ARMRegisters::SD0 << 1);
+ }
+
+ void truncateDoubleToUint32(FPRegisterID src, RegisterID dest)
+ {
+ m_assembler.vcvt_u32_f64(ARMRegisters::SD0 << 1, src);
+ m_assembler.vmov_arm32(dest, ARMRegisters::SD0 << 1);
+ }
+
+ // Convert 'src' to an integer, and places the resulting 'dest'.
+ // If the result is not representable as a 32 bit value, branch.
+ // May also branch for some values that are representable in 32 bits
+ // (specifically, in this case, 0).
+ void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID, bool negZeroCheck = true)
+ {
+ m_assembler.vcvt_s32_f64(ARMRegisters::SD0 << 1, src);
+ m_assembler.vmov_arm32(dest, ARMRegisters::SD0 << 1);
+
+ // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
+ m_assembler.vcvt_f64_s32(ARMRegisters::SD0, ARMRegisters::SD0 << 1);
+ failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, ARMRegisters::SD0));
+
+ // If the result is zero, it might have been -0.0, and 0.0 equals to -0.0
+ if (negZeroCheck)
+ failureCases.append(branchTest32(Zero, dest));
+ }
+
+ Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch)
+ {
+ m_assembler.mov(ARMRegisters::S0, ARMAssembler::getOp2Byte(0));
+ convertInt32ToDouble(ARMRegisters::S0, scratch);
+ return branchDouble(DoubleNotEqual, reg, scratch);
+ }
+
+ Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID scratch)
+ {
+ m_assembler.mov(ARMRegisters::S0, ARMAssembler::getOp2Byte(0));
+ convertInt32ToDouble(ARMRegisters::S0, scratch);
+ return branchDouble(DoubleEqualOrUnordered, reg, scratch);
+ }
+
+ // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
+ static RelationalCondition invert(RelationalCondition cond)
+ {
+ ASSERT((static_cast<uint32_t>(cond & 0x0fffffff)) == 0 && static_cast<uint32_t>(cond) < static_cast<uint32_t>(ARMAssembler::AL));
+ return static_cast<RelationalCondition>(cond ^ 0x10000000);
+ }
+
+ void nop()
+ {
+ m_assembler.nop();
+ }
+
+ void memoryFence()
+ {
+ m_assembler.dmbSY();
+ }
+
+ static FunctionPtr readCallTarget(CodeLocationCall call)
+ {
+ return FunctionPtr(reinterpret_cast<void(*)()>(ARMAssembler::readCallTarget(call.dataLocation())));
+ }
+
+ static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
+ {
+ ARMAssembler::replaceWithJump(instructionStart.dataLocation(), destination.dataLocation());
+ }
+
+ static ptrdiff_t maxJumpReplacementSize()
+ {
+ ARMAssembler::maxJumpReplacementSize();
+ return 0;
+ }
+
+ static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
+ static bool canJumpReplacePatchableBranch32WithPatch() { return false; }
+
+ static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32)
+ {
+ UNREACHABLE_FOR_PLATFORM();
+ return CodeLocationLabel();
+ }
+
+ static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr)
+ {
+ UNREACHABLE_FOR_PLATFORM();
+ return CodeLocationLabel();
+ }
+
+ static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
+ {
+ return label.labelAtOffset(0);
+ }
+
+ static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID reg, void* initialValue)
+ {
+ ARMAssembler::revertBranchPtrWithPatch(instructionStart.dataLocation(), reg, reinterpret_cast<uintptr_t>(initialValue) & 0xffff);
+ }
+
+ static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel, Address, int32_t)
+ {
+ UNREACHABLE_FOR_PLATFORM();
+ }
+
+ static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*)
+ {
+ UNREACHABLE_FOR_PLATFORM();
+ }
+
+#if ENABLE(MASM_PROBE)
+ // Methods required by the MASM_PROBE mechanism as defined in
+ // AbstractMacroAssembler.h.
+ static void printCPURegisters(CPUState&, int indentation = 0);
+ static void printRegister(CPUState&, RegisterID);
+ static void printRegister(CPUState&, FPRegisterID);
+ void probe(ProbeFunction, void* arg1 = 0, void* arg2 = 0);
+#endif // ENABLE(MASM_PROBE)
+
+protected:
+ ARMAssembler::Condition ARMCondition(RelationalCondition cond)
+ {
+ return static_cast<ARMAssembler::Condition>(cond);
+ }
+
+ ARMAssembler::Condition ARMCondition(ResultCondition cond)
+ {
+ return static_cast<ARMAssembler::Condition>(cond);
+ }
+
+ void ensureSpace(int insnSpace, int constSpace)
+ {
+ m_assembler.ensureSpace(insnSpace, constSpace);
+ }
+
+ int sizeOfConstantPool()
+ {
+ return m_assembler.sizeOfConstantPool();
+ }
+
+ void call32(RegisterID base, int32_t offset)
+ {
+ load32(Address(base, offset), ARMRegisters::S1);
+ m_assembler.blx(ARMRegisters::S1);
+ }
+
+private:
+ friend class LinkBuffer;
+ friend class RepatchBuffer;
+
+ void internalCompare32(RegisterID left, TrustedImm32 right)
+ {
+ ARMWord tmp = (static_cast<unsigned>(right.m_value) == 0x80000000) ? ARMAssembler::InvalidImmediate : m_assembler.getOp2(-right.m_value);
+ if (tmp != ARMAssembler::InvalidImmediate)
+ m_assembler.cmn(left, tmp);
+ else
+ m_assembler.cmp(left, m_assembler.getImm(right.m_value, ARMRegisters::S0));
+ }
+
+ static void linkCall(void* code, Call call, FunctionPtr function)
+ {
+ ARMAssembler::linkCall(code, call.m_label, function.value());
+ }
+
+ static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
+ {
+ ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
+ }
+
+ static void repatchCall(CodeLocationCall call, FunctionPtr destination)
+ {
+ ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
+ }
+
+#if ENABLE(MASM_PROBE)
+ inline TrustedImm32 trustedImm32FromPtr(void* ptr)
+ {
+ return TrustedImm32(TrustedImmPtr(ptr));
+ }
+
+ inline TrustedImm32 trustedImm32FromPtr(ProbeFunction function)
+ {
+ return TrustedImm32(TrustedImmPtr(reinterpret_cast<void*>(function)));
+ }
+
+ inline TrustedImm32 trustedImm32FromPtr(void (*function)())
+ {
+ return TrustedImm32(TrustedImmPtr(reinterpret_cast<void*>(function)));
+ }
+#endif
+
+ static const bool s_isVFPPresent;
+};
+
+}
+
+#endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
+
+#endif // MacroAssemblerARM_h
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerARM64.h b/Source/JavaScriptCore/assembler/MacroAssemblerARM64.h
new file mode 100644
index 000000000..c82585952
--- /dev/null
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerARM64.h
@@ -0,0 +1,2949 @@
+/*
+ * Copyright (C) 2012, 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MacroAssemblerARM64_h
+#define MacroAssemblerARM64_h
+
+#if ENABLE(ASSEMBLER)
+
+#include "ARM64Assembler.h"
+#include "AbstractMacroAssembler.h"
+#include <wtf/MathExtras.h>
+
+namespace JSC {
+
+class MacroAssemblerARM64 : public AbstractMacroAssembler<ARM64Assembler, MacroAssemblerARM64> {
+ static const RegisterID dataTempRegister = ARM64Registers::ip0;
+ static const RegisterID memoryTempRegister = ARM64Registers::ip1;
+ static const ARM64Registers::FPRegisterID fpTempRegister = ARM64Registers::q31;
+ static const ARM64Assembler::SetFlags S = ARM64Assembler::S;
+ static const intptr_t maskHalfWord0 = 0xffffl;
+ static const intptr_t maskHalfWord1 = 0xffff0000l;
+ static const intptr_t maskUpperWord = 0xffffffff00000000l;
+
+ // 4 instructions - 3 to load the function pointer, + blr.
+ static const ptrdiff_t REPATCH_OFFSET_CALL_TO_POINTER = -16;
+
+public:
+ MacroAssemblerARM64()
+ : m_dataMemoryTempRegister(this, dataTempRegister)
+ , m_cachedMemoryTempRegister(this, memoryTempRegister)
+ , m_makeJumpPatchable(false)
+ {
+ }
+
+ typedef ARM64Assembler::LinkRecord LinkRecord;
+ typedef ARM64Assembler::JumpType JumpType;
+ typedef ARM64Assembler::JumpLinkType JumpLinkType;
+ typedef ARM64Assembler::Condition Condition;
+
+ static const ARM64Assembler::Condition DefaultCondition = ARM64Assembler::ConditionInvalid;
+ static const ARM64Assembler::JumpType DefaultJump = ARM64Assembler::JumpNoConditionFixedSize;
+
+ Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink() { return m_assembler.jumpsToLink(); }
+ void* unlinkedCode() { return m_assembler.unlinkedCode(); }
+ static bool canCompact(JumpType jumpType) { return ARM64Assembler::canCompact(jumpType); }
+ static JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return ARM64Assembler::computeJumpType(jumpType, from, to); }
+ static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return ARM64Assembler::computeJumpType(record, from, to); }
+ static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return ARM64Assembler::jumpSizeDelta(jumpType, jumpLinkType); }
+ static void link(LinkRecord& record, uint8_t* from, uint8_t* to) { return ARM64Assembler::link(record, from, to); }
+
+ static const Scale ScalePtr = TimesEight;
+
+ static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
+ {
+ // This is the largest 32-bit access allowed, aligned to 64-bit boundary.
+ return !(value & ~0x3ff8);
+ }
+
+ enum RelationalCondition {
+ Equal = ARM64Assembler::ConditionEQ,
+ NotEqual = ARM64Assembler::ConditionNE,
+ Above = ARM64Assembler::ConditionHI,
+ AboveOrEqual = ARM64Assembler::ConditionHS,
+ Below = ARM64Assembler::ConditionLO,
+ BelowOrEqual = ARM64Assembler::ConditionLS,
+ GreaterThan = ARM64Assembler::ConditionGT,
+ GreaterThanOrEqual = ARM64Assembler::ConditionGE,
+ LessThan = ARM64Assembler::ConditionLT,
+ LessThanOrEqual = ARM64Assembler::ConditionLE
+ };
+
+ enum ResultCondition {
+ Overflow = ARM64Assembler::ConditionVS,
+ Signed = ARM64Assembler::ConditionMI,
+ PositiveOrZero = ARM64Assembler::ConditionPL,
+ Zero = ARM64Assembler::ConditionEQ,
+ NonZero = ARM64Assembler::ConditionNE
+ };
+
+ enum ZeroCondition {
+ IsZero = ARM64Assembler::ConditionEQ,
+ IsNonZero = ARM64Assembler::ConditionNE
+ };
+
+ enum DoubleCondition {
+ // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
+ DoubleEqual = ARM64Assembler::ConditionEQ,
+ DoubleNotEqual = ARM64Assembler::ConditionVC, // Not the right flag! check for this & handle differently.
+ DoubleGreaterThan = ARM64Assembler::ConditionGT,
+ DoubleGreaterThanOrEqual = ARM64Assembler::ConditionGE,
+ DoubleLessThan = ARM64Assembler::ConditionLO,
+ DoubleLessThanOrEqual = ARM64Assembler::ConditionLS,
+ // If either operand is NaN, these conditions always evaluate to true.
+ DoubleEqualOrUnordered = ARM64Assembler::ConditionVS, // Not the right flag! check for this & handle differently.
+ DoubleNotEqualOrUnordered = ARM64Assembler::ConditionNE,
+ DoubleGreaterThanOrUnordered = ARM64Assembler::ConditionHI,
+ DoubleGreaterThanOrEqualOrUnordered = ARM64Assembler::ConditionHS,
+ DoubleLessThanOrUnordered = ARM64Assembler::ConditionLT,
+ DoubleLessThanOrEqualOrUnordered = ARM64Assembler::ConditionLE,
+ };
+
+ static const RegisterID stackPointerRegister = ARM64Registers::sp;
+ static const RegisterID framePointerRegister = ARM64Registers::fp;
+ static const RegisterID linkRegister = ARM64Registers::lr;
+
+ // FIXME: Get reasonable implementations for these
+ static bool shouldBlindForSpecificArch(uint32_t value) { return value >= 0x00ffffff; }
+ static bool shouldBlindForSpecificArch(uint64_t value) { return value >= 0x00ffffff; }
+
+ // Integer operations:
+
+ void add32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.add<32>(dest, dest, src);
+ }
+
+ void add32(TrustedImm32 imm, RegisterID dest)
+ {
+ add32(imm, dest, dest);
+ }
+
+ void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (isUInt12(imm.m_value))
+ m_assembler.add<32>(dest, src, UInt12(imm.m_value));
+ else if (isUInt12(-imm.m_value))
+ m_assembler.sub<32>(dest, src, UInt12(-imm.m_value));
+ else {
+ move(imm, getCachedDataTempRegisterIDAndInvalidate());
+ m_assembler.add<32>(dest, src, dataTempRegister);
+ }
+ }
+
+ void add32(TrustedImm32 imm, Address address)
+ {
+ load32(address, getCachedDataTempRegisterIDAndInvalidate());
+
+ if (isUInt12(imm.m_value))
+ m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
+ else if (isUInt12(-imm.m_value))
+ m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
+ else {
+ move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
+ m_assembler.add<32>(dataTempRegister, dataTempRegister, memoryTempRegister);
+ }
+
+ store32(dataTempRegister, address);
+ }
+
+ void add32(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
+
+ if (isUInt12(imm.m_value)) {
+ m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
+ store32(dataTempRegister, address.m_ptr);
+ return;
+ }
+
+ if (isUInt12(-imm.m_value)) {
+ m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
+ store32(dataTempRegister, address.m_ptr);
+ return;
+ }
+
+ move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
+ m_assembler.add<32>(dataTempRegister, dataTempRegister, memoryTempRegister);
+ store32(dataTempRegister, address.m_ptr);
+ }
+
+ void add32(Address src, RegisterID dest)
+ {
+ load32(src, getCachedDataTempRegisterIDAndInvalidate());
+ add32(dataTempRegister, dest);
+ }
+
+ void add64(RegisterID src, RegisterID dest)
+ {
+ if (src == ARM64Registers::sp)
+ m_assembler.add<64>(dest, src, dest);
+ else
+ m_assembler.add<64>(dest, dest, src);
+ }
+
+ void add64(TrustedImm32 imm, RegisterID dest)
+ {
+ if (isUInt12(imm.m_value)) {
+ m_assembler.add<64>(dest, dest, UInt12(imm.m_value));
+ return;
+ }
+ if (isUInt12(-imm.m_value)) {
+ m_assembler.sub<64>(dest, dest, UInt12(-imm.m_value));
+ return;
+ }
+
+ signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
+ m_assembler.add<64>(dest, dest, dataTempRegister);
+ }
+
+ void add64(TrustedImm64 imm, RegisterID dest)
+ {
+ intptr_t immediate = imm.m_value;
+
+ if (isUInt12(immediate)) {
+ m_assembler.add<64>(dest, dest, UInt12(static_cast<int32_t>(immediate)));
+ return;
+ }
+ if (isUInt12(-immediate)) {
+ m_assembler.sub<64>(dest, dest, UInt12(static_cast<int32_t>(-immediate)));
+ return;
+ }
+
+ move(imm, getCachedDataTempRegisterIDAndInvalidate());
+ m_assembler.add<64>(dest, dest, dataTempRegister);
+ }
+
+ void add64(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (isUInt12(imm.m_value)) {
+ m_assembler.add<64>(dest, src, UInt12(imm.m_value));
+ return;
+ }
+ if (isUInt12(-imm.m_value)) {
+ m_assembler.sub<64>(dest, src, UInt12(-imm.m_value));
+ return;
+ }
+
+ signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
+ m_assembler.add<64>(dest, src, dataTempRegister);
+ }
+
+ void add64(TrustedImm32 imm, Address address)
+ {
+ load64(address, getCachedDataTempRegisterIDAndInvalidate());
+
+ if (isUInt12(imm.m_value))
+ m_assembler.add<64>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
+ else if (isUInt12(-imm.m_value))
+ m_assembler.sub<64>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
+ else {
+ signExtend32ToPtr(imm, getCachedMemoryTempRegisterIDAndInvalidate());
+ m_assembler.add<64>(dataTempRegister, dataTempRegister, memoryTempRegister);
+ }
+
+ store64(dataTempRegister, address);
+ }
+
+ void add64(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ load64(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
+
+ if (isUInt12(imm.m_value)) {
+ m_assembler.add<64>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
+ store64(dataTempRegister, address.m_ptr);
+ return;
+ }
+
+ if (isUInt12(-imm.m_value)) {
+ m_assembler.sub<64>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
+ store64(dataTempRegister, address.m_ptr);
+ return;
+ }
+
+ signExtend32ToPtr(imm, getCachedMemoryTempRegisterIDAndInvalidate());
+ m_assembler.add<64>(dataTempRegister, dataTempRegister, memoryTempRegister);
+ store64(dataTempRegister, address.m_ptr);
+ }
+
+ void addPtrNoFlags(TrustedImm32 imm, RegisterID srcDest)
+ {
+ add64(imm, srcDest);
+ }
+
+ void add64(Address src, RegisterID dest)
+ {
+ load64(src, getCachedDataTempRegisterIDAndInvalidate());
+ m_assembler.add<64>(dest, dest, dataTempRegister);
+ }
+
+ void add64(AbsoluteAddress src, RegisterID dest)
+ {
+ load64(src.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
+ m_assembler.add<64>(dest, dest, dataTempRegister);
+ }
+
+ void and32(RegisterID src, RegisterID dest)
+ {
+ and32(dest, src, dest);
+ }
+
+ void and32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.and_<32>(dest, op1, op2);
+ }
+
+ void and32(TrustedImm32 imm, RegisterID dest)
+ {
+ and32(imm, dest, dest);
+ }
+
+ void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value);
+
+ if (logicalImm.isValid()) {
+ m_assembler.and_<32>(dest, src, logicalImm);
+ return;
+ }
+
+ move(imm, getCachedDataTempRegisterIDAndInvalidate());
+ m_assembler.and_<32>(dest, src, dataTempRegister);
+ }
+
+ void and32(Address src, RegisterID dest)
+ {
+ load32(src, dataTempRegister);
+ and32(dataTempRegister, dest);
+ }
+
+ void and64(RegisterID src, RegisterID dest)
+ {
+ m_assembler.and_<64>(dest, dest, src);
+ }
+
+ void and64(TrustedImm32 imm, RegisterID dest)
+ {
+ LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
+
+ if (logicalImm.isValid()) {
+ m_assembler.and_<64>(dest, dest, logicalImm);
+ return;
+ }
+
+ signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
+ m_assembler.and_<64>(dest, dest, dataTempRegister);
+ }
+
+ void and64(TrustedImmPtr imm, RegisterID dest)
+ {
+ LogicalImmediate logicalImm = LogicalImmediate::create64(reinterpret_cast<uint64_t>(imm.m_value));
+
+ if (logicalImm.isValid()) {
+ m_assembler.and_<64>(dest, dest, logicalImm);
+ return;
+ }
+
+ move(imm, getCachedDataTempRegisterIDAndInvalidate());
+ m_assembler.and_<64>(dest, dest, dataTempRegister);
+ }
+
+ void countLeadingZeros32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.clz<32>(dest, src);
+ }
+
+ void lshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+ {
+ m_assembler.lsl<32>(dest, src, shiftAmount);
+ }
+
+ void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.lsl<32>(dest, src, imm.m_value & 0x1f);
+ }
+
+ void lshift32(RegisterID shiftAmount, RegisterID dest)
+ {
+ lshift32(dest, shiftAmount, dest);
+ }
+
+ void lshift32(TrustedImm32 imm, RegisterID dest)
+ {
+ lshift32(dest, imm, dest);
+ }
+
+ void lshift64(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+ {
+ m_assembler.lsl<64>(dest, src, shiftAmount);
+ }
+
+ void lshift64(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.lsl<64>(dest, src, imm.m_value & 0x3f);
+ }
+
+ void lshift64(RegisterID shiftAmount, RegisterID dest)
+ {
+ lshift64(dest, shiftAmount, dest);
+ }
+
+ void lshift64(TrustedImm32 imm, RegisterID dest)
+ {
+ lshift64(dest, imm, dest);
+ }
+
+ void mul32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.mul<32>(dest, dest, src);
+ }
+
+ void mul64(RegisterID src, RegisterID dest)
+ {
+ m_assembler.mul<64>(dest, dest, src);
+ }
+
+ void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ move(imm, getCachedDataTempRegisterIDAndInvalidate());
+ m_assembler.mul<32>(dest, src, dataTempRegister);
+ }
+
+ void neg32(RegisterID dest)
+ {
+ m_assembler.neg<32>(dest, dest);
+ }
+
+ void neg64(RegisterID dest)
+ {
+ m_assembler.neg<64>(dest, dest);
+ }
+
+ void or32(RegisterID src, RegisterID dest)
+ {
+ or32(dest, src, dest);
+ }
+
+ void or32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.orr<32>(dest, op1, op2);
+ }
+
+ void or32(TrustedImm32 imm, RegisterID dest)
+ {
+ or32(imm, dest, dest);
+ }
+
+ void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value);
+
+ if (logicalImm.isValid()) {
+ m_assembler.orr<32>(dest, src, logicalImm);
+ return;
+ }
+
+ move(imm, getCachedDataTempRegisterIDAndInvalidate());
+ m_assembler.orr<32>(dest, src, dataTempRegister);
+ }
+
+ void or32(RegisterID src, AbsoluteAddress address)
+ {
+ load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
+ m_assembler.orr<32>(dataTempRegister, dataTempRegister, src);
+ store32(dataTempRegister, address.m_ptr);
+ }
+
+ void or32(TrustedImm32 imm, Address address)
+ {
+ load32(address, getCachedDataTempRegisterIDAndInvalidate());
+ or32(imm, dataTempRegister, dataTempRegister);
+ store32(dataTempRegister, address);
+ }
+
+ void or64(RegisterID src, RegisterID dest)
+ {
+ or64(dest, src, dest);
+ }
+
+ void or64(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.orr<64>(dest, op1, op2);
+ }
+
+ void or64(TrustedImm32 imm, RegisterID dest)
+ {
+ or64(imm, dest, dest);
+ }
+
+ void or64(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
+
+ if (logicalImm.isValid()) {
+ m_assembler.orr<64>(dest, src, logicalImm);
+ return;
+ }
+
+ signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
+ m_assembler.orr<64>(dest, src, dataTempRegister);
+ }
+
+ void or64(TrustedImm64 imm, RegisterID dest)
+ {
+ LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
+
+ if (logicalImm.isValid()) {
+ m_assembler.orr<64>(dest, dest, logicalImm);
+ return;
+ }
+
+ move(imm, getCachedDataTempRegisterIDAndInvalidate());
+ m_assembler.orr<64>(dest, dest, dataTempRegister);
+ }
+
+ void rotateRight64(TrustedImm32 imm, RegisterID srcDst)
+ {
+ m_assembler.ror<64>(srcDst, srcDst, imm.m_value & 63);
+ }
+
+ void rshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+ {
+ m_assembler.asr<32>(dest, src, shiftAmount);
+ }
+
+ void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.asr<32>(dest, src, imm.m_value & 0x1f);
+ }
+
+ void rshift32(RegisterID shiftAmount, RegisterID dest)
+ {
+ rshift32(dest, shiftAmount, dest);
+ }
+
+ void rshift32(TrustedImm32 imm, RegisterID dest)
+ {
+ rshift32(dest, imm, dest);
+ }
+
+ void rshift64(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+ {
+ m_assembler.asr<64>(dest, src, shiftAmount);
+ }
+
+ void rshift64(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.asr<64>(dest, src, imm.m_value & 0x3f);
+ }
+
+ void rshift64(RegisterID shiftAmount, RegisterID dest)
+ {
+ rshift64(dest, shiftAmount, dest);
+ }
+
+ void rshift64(TrustedImm32 imm, RegisterID dest)
+ {
+ rshift64(dest, imm, dest);
+ }
+
+ void sub32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.sub<32>(dest, dest, src);
+ }
+
+ void sub32(TrustedImm32 imm, RegisterID dest)
+ {
+ if (isUInt12(imm.m_value)) {
+ m_assembler.sub<32>(dest, dest, UInt12(imm.m_value));
+ return;
+ }
+ if (isUInt12(-imm.m_value)) {
+ m_assembler.add<32>(dest, dest, UInt12(-imm.m_value));
+ return;
+ }
+
+ move(imm, getCachedDataTempRegisterIDAndInvalidate());
+ m_assembler.sub<32>(dest, dest, dataTempRegister);
+ }
+
+ void sub32(TrustedImm32 imm, Address address)
+ {
+ load32(address, getCachedDataTempRegisterIDAndInvalidate());
+
+ if (isUInt12(imm.m_value))
+ m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
+ else if (isUInt12(-imm.m_value))
+ m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
+ else {
+ move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
+ m_assembler.sub<32>(dataTempRegister, dataTempRegister, memoryTempRegister);
+ }
+
+ store32(dataTempRegister, address);
+ }
+
+ void sub32(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
+
+ if (isUInt12(imm.m_value)) {
+ m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
+ store32(dataTempRegister, address.m_ptr);
+ return;
+ }
+
+ if (isUInt12(-imm.m_value)) {
+ m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
+ store32(dataTempRegister, address.m_ptr);
+ return;
+ }
+
+ move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
+ m_assembler.sub<32>(dataTempRegister, dataTempRegister, memoryTempRegister);
+ store32(dataTempRegister, address.m_ptr);
+ }
+
+ void sub32(Address src, RegisterID dest)
+ {
+ load32(src, getCachedDataTempRegisterIDAndInvalidate());
+ sub32(dataTempRegister, dest);
+ }
+
+ void sub64(RegisterID src, RegisterID dest)
+ {
+ m_assembler.sub<64>(dest, dest, src);
+ }
+
+ void sub64(TrustedImm32 imm, RegisterID dest)
+ {
+ if (isUInt12(imm.m_value)) {
+ m_assembler.sub<64>(dest, dest, UInt12(imm.m_value));
+ return;
+ }
+ if (isUInt12(-imm.m_value)) {
+ m_assembler.add<64>(dest, dest, UInt12(-imm.m_value));
+ return;
+ }
+
+ signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
+ m_assembler.sub<64>(dest, dest, dataTempRegister);
+ }
+
+ void sub64(TrustedImm64 imm, RegisterID dest)
+ {
+ intptr_t immediate = imm.m_value;
+
+ if (isUInt12(immediate)) {
+ m_assembler.sub<64>(dest, dest, UInt12(static_cast<int32_t>(immediate)));
+ return;
+ }
+ if (isUInt12(-immediate)) {
+ m_assembler.add<64>(dest, dest, UInt12(static_cast<int32_t>(-immediate)));
+ return;
+ }
+
+ move(imm, getCachedDataTempRegisterIDAndInvalidate());
+ m_assembler.sub<64>(dest, dest, dataTempRegister);
+ }
+
+ void urshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+ {
+ m_assembler.lsr<32>(dest, src, shiftAmount);
+ }
+
+ void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.lsr<32>(dest, src, imm.m_value & 0x1f);
+ }
+
+ void urshift32(RegisterID shiftAmount, RegisterID dest)
+ {
+ urshift32(dest, shiftAmount, dest);
+ }
+
+ void urshift32(TrustedImm32 imm, RegisterID dest)
+ {
+ urshift32(dest, imm, dest);
+ }
+
+ void urshift64(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+ {
+ m_assembler.lsr<64>(dest, src, shiftAmount);
+ }
+
+ void urshift64(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.lsr<64>(dest, src, imm.m_value & 0x1f);
+ }
+
+ void urshift64(RegisterID shiftAmount, RegisterID dest)
+ {
+ urshift64(dest, shiftAmount, dest);
+ }
+
+ void urshift64(TrustedImm32 imm, RegisterID dest)
+ {
+ urshift64(dest, imm, dest);
+ }
+
+ void xor32(RegisterID src, RegisterID dest)
+ {
+ xor32(dest, src, dest);
+ }
+
+ void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.eor<32>(dest, op1, op2);
+ }
+
+ void xor32(TrustedImm32 imm, RegisterID dest)
+ {
+ xor32(imm, dest, dest);
+ }
+
+ void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (imm.m_value == -1)
+ m_assembler.mvn<32>(dest, src);
+ else {
+ LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value);
+
+ if (logicalImm.isValid()) {
+ m_assembler.eor<32>(dest, src, logicalImm);
+ return;
+ }
+
+ move(imm, getCachedDataTempRegisterIDAndInvalidate());
+ m_assembler.eor<32>(dest, src, dataTempRegister);
+ }
+ }
+
+ void xor64(RegisterID src, Address address)
+ {
+ load64(address, getCachedDataTempRegisterIDAndInvalidate());
+ m_assembler.eor<64>(dataTempRegister, dataTempRegister, src);
+ store64(dataTempRegister, address);
+ }
+
+ void xor64(RegisterID src, RegisterID dest)
+ {
+ xor64(dest, src, dest);
+ }
+
+ void xor64(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.eor<64>(dest, op1, op2);
+ }
+
+ void xor64(TrustedImm32 imm, RegisterID dest)
+ {
+ xor64(imm, dest, dest);
+ }
+
+ void xor64(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (imm.m_value == -1)
+ m_assembler.mvn<64>(dest, src);
+ else {
+ LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
+
+ if (logicalImm.isValid()) {
+ m_assembler.eor<64>(dest, src, logicalImm);
+ return;
+ }
+
+ signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
+ m_assembler.eor<64>(dest, src, dataTempRegister);
+ }
+ }
+
+
+ // Memory access operations:
+
+ void load64(ImplicitAddress address, RegisterID dest)
+ {
+ if (tryLoadWithOffset<64>(dest, address.base, address.offset))
+ return;
+
+ signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+ m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
+ }
+
+ void load64(BaseIndex address, RegisterID dest)
+ {
+ if (!address.offset && (!address.scale || address.scale == 3)) {
+ m_assembler.ldr<64>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
+ return;
+ }
+
+ signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+ m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
+ m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
+ }
+
+ void load64(const void* address, RegisterID dest)
+ {
+ load<64>(address, dest);
+ }
+
+ DataLabel32 load64WithAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ DataLabel32 label(this);
+ signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate());
+ m_assembler.ldr<64>(dest, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0);
+ return label;
+ }
+
+ DataLabelCompact load64WithCompactAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ ASSERT(isCompactPtrAlignedAddressOffset(address.offset));
+ DataLabelCompact label(this);
+ m_assembler.ldr<64>(dest, address.base, address.offset);
+ return label;
+ }
+
+ void abortWithReason(AbortReason reason)
+ {
+ move(TrustedImm32(reason), dataTempRegister);
+ breakpoint();
+ }
+
+ void abortWithReason(AbortReason reason, intptr_t misc)
+ {
+ move(TrustedImm64(misc), memoryTempRegister);
+ abortWithReason(reason);
+ }
+
+ ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
+ {
+ ConvertibleLoadLabel result(this);
+ ASSERT(!(address.offset & ~0xff8));
+ m_assembler.ldr<64>(dest, address.base, address.offset);
+ return result;
+ }
+
+ void load32(ImplicitAddress address, RegisterID dest)
+ {
+ if (tryLoadWithOffset<32>(dest, address.base, address.offset))
+ return;
+
+ signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+ m_assembler.ldr<32>(dest, address.base, memoryTempRegister);
+ }
+
+ void load32(BaseIndex address, RegisterID dest)
+ {
+ if (!address.offset && (!address.scale || address.scale == 2)) {
+ m_assembler.ldr<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
+ return;
+ }
+
+ signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+ m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
+ m_assembler.ldr<32>(dest, address.base, memoryTempRegister);
+ }
+
+ void load32(const void* address, RegisterID dest)
+ {
+ load<32>(address, dest);
+ }
+
+ DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ DataLabel32 label(this);
+ signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate());
+ m_assembler.ldr<32>(dest, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0);
+ return label;
+ }
+
+ DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ ASSERT(isCompactPtrAlignedAddressOffset(address.offset));
+ DataLabelCompact label(this);
+ m_assembler.ldr<32>(dest, address.base, address.offset);
+ return label;
+ }
+
+ void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
+ {
+ load32(address, dest);
+ }
+
+ void load16(ImplicitAddress address, RegisterID dest)
+ {
+ if (tryLoadWithOffset<16>(dest, address.base, address.offset))
+ return;
+
+ signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+ m_assembler.ldrh(dest, address.base, memoryTempRegister);
+ }
+
+ void load16(BaseIndex address, RegisterID dest)
+ {
+ if (!address.offset && (!address.scale || address.scale == 1)) {
+ m_assembler.ldrh(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
+ return;
+ }
+
+ signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+ m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
+ m_assembler.ldrh(dest, address.base, memoryTempRegister);
+ }
+
+ void load16Unaligned(BaseIndex address, RegisterID dest)
+ {
+ load16(address, dest);
+ }
+
+ void load16SignedExtendTo32(BaseIndex address, RegisterID dest)
+ {
+ if (!address.offset && (!address.scale || address.scale == 1)) {
+ m_assembler.ldrsh<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
+ return;
+ }
+
+ signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+ m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
+ m_assembler.ldrsh<32>(dest, address.base, memoryTempRegister);
+ }
+
+ void load8(ImplicitAddress address, RegisterID dest)
+ {
+ if (tryLoadWithOffset<8>(dest, address.base, address.offset))
+ return;
+
+ signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+ m_assembler.ldrb(dest, address.base, memoryTempRegister);
+ }
+
+ void load8(BaseIndex address, RegisterID dest)
+ {
+ if (!address.offset && !address.scale) {
+ m_assembler.ldrb(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
+ return;
+ }
+
+ signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+ m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
+ m_assembler.ldrb(dest, address.base, memoryTempRegister);
+ }
+
+ void load8(const void* address, RegisterID dest)
+ {
+ moveToCachedReg(TrustedImmPtr(address), m_cachedMemoryTempRegister);
+ m_assembler.ldrb(dest, memoryTempRegister, ARM64Registers::zr);
+ if (dest == memoryTempRegister)
+ m_cachedMemoryTempRegister.invalidate();
+ }
+
+ void load8SignedExtendTo32(BaseIndex address, RegisterID dest)
+ {
+ if (!address.offset && !address.scale) {
+ m_assembler.ldrsb<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
+ return;
+ }
+
+ signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+ m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
+ m_assembler.ldrsb<32>(dest, address.base, memoryTempRegister);
+ }
+
+ void store64(RegisterID src, ImplicitAddress address)
+ {
+ if (tryStoreWithOffset<64>(src, address.base, address.offset))
+ return;
+
+ signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+ m_assembler.str<64>(src, address.base, memoryTempRegister);
+ }
+
+ void store64(RegisterID src, BaseIndex address)
+ {
+ if (!address.offset && (!address.scale || address.scale == 3)) {
+ m_assembler.str<64>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
+ return;
+ }
+
+ signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+ m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
+ m_assembler.str<64>(src, address.base, memoryTempRegister);
+ }
+
+ void store64(RegisterID src, const void* address)
+ {
+ store<64>(src, address);
+ }
+
+ void store64(TrustedImm64 imm, ImplicitAddress address)
+ {
+ if (!imm.m_value) {
+ store64(ARM64Registers::zr, address);
+ return;
+ }
+
+ moveToCachedReg(imm, m_dataMemoryTempRegister);
+ store64(dataTempRegister, address);
+ }
+
+ void store64(TrustedImm64 imm, BaseIndex address)
+ {
+ if (!imm.m_value) {
+ store64(ARM64Registers::zr, address);
+ return;
+ }
+
+ moveToCachedReg(imm, m_dataMemoryTempRegister);
+ store64(dataTempRegister, address);
+ }
+
+ DataLabel32 store64WithAddressOffsetPatch(RegisterID src, Address address)
+ {
+ DataLabel32 label(this);
+ signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate());
+ m_assembler.str<64>(src, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0);
+ return label;
+ }
+
+ void store32(RegisterID src, ImplicitAddress address)
+ {
+ if (tryStoreWithOffset<32>(src, address.base, address.offset))
+ return;
+
+ signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+ m_assembler.str<32>(src, address.base, memoryTempRegister);
+ }
+
+ void store32(RegisterID src, BaseIndex address)
+ {
+ if (!address.offset && (!address.scale || address.scale == 2)) {
+ m_assembler.str<32>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
+ return;
+ }
+
+ signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+ m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
+ m_assembler.str<32>(src, address.base, memoryTempRegister);
+ }
+
+ void store32(RegisterID src, const void* address)
+ {
+ store<32>(src, address);
+ }
+
+ void store32(TrustedImm32 imm, ImplicitAddress address)
+ {
+ if (!imm.m_value) {
+ store32(ARM64Registers::zr, address);
+ return;
+ }
+
+ moveToCachedReg(imm, m_dataMemoryTempRegister);
+ store32(dataTempRegister, address);
+ }
+
+ void store32(TrustedImm32 imm, BaseIndex address)
+ {
+ if (!imm.m_value) {
+ store32(ARM64Registers::zr, address);
+ return;
+ }
+
+ moveToCachedReg(imm, m_dataMemoryTempRegister);
+ store32(dataTempRegister, address);
+ }
+
+ void store32(TrustedImm32 imm, const void* address)
+ {
+ if (!imm.m_value) {
+ store32(ARM64Registers::zr, address);
+ return;
+ }
+
+ moveToCachedReg(imm, m_dataMemoryTempRegister);
+ store32(dataTempRegister, address);
+ }
+
+ DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
+ {
+ DataLabel32 label(this);
+ signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate());
+ m_assembler.str<32>(src, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0);
+ return label;
+ }
+
+ void store16(RegisterID src, BaseIndex address)
+ {
+ if (!address.offset && (!address.scale || address.scale == 1)) {
+ m_assembler.strh(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
+ return;
+ }
+
+ signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+ m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
+ m_assembler.strh(src, address.base, memoryTempRegister);
+ }
+
+ void store8(RegisterID src, BaseIndex address)
+ {
+ if (!address.offset && !address.scale) {
+ m_assembler.strb(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
+ return;
+ }
+
+ signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+ m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
+ m_assembler.strb(src, address.base, memoryTempRegister);
+ }
+
+ void store8(RegisterID src, void* address)
+ {
+ move(TrustedImmPtr(address), getCachedMemoryTempRegisterIDAndInvalidate());
+ m_assembler.strb(src, memoryTempRegister, 0);
+ }
+
+ void store8(RegisterID src, ImplicitAddress address)
+ {
+ if (tryStoreWithOffset<8>(src, address.base, address.offset))
+ return;
+
+ signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+ m_assembler.str<8>(src, address.base, memoryTempRegister);
+ }
+
+ void store8(TrustedImm32 imm, void* address)
+ {
+ if (!imm.m_value) {
+ store8(ARM64Registers::zr, address);
+ return;
+ }
+
+ move(imm, getCachedDataTempRegisterIDAndInvalidate());
+ store8(dataTempRegister, address);
+ }
+
+ void store8(TrustedImm32 imm, ImplicitAddress address)
+ {
+ if (!imm.m_value) {
+ store8(ARM64Registers::zr, address);
+ return;
+ }
+
+ move(imm, getCachedDataTempRegisterIDAndInvalidate());
+ store8(dataTempRegister, address);
+ }
+
+ // Floating-point operations:
+
+ static bool supportsFloatingPoint() { return true; }
+ static bool supportsFloatingPointTruncate() { return true; }
+ static bool supportsFloatingPointSqrt() { return true; }
+ static bool supportsFloatingPointAbs() { return true; }
+
+ enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
+
+ void absDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.fabs<64>(dest, src);
+ }
+
+ void addDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ addDouble(dest, src, dest);
+ }
+
+ void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ m_assembler.fadd<64>(dest, op1, op2);
+ }
+
+ void addDouble(Address src, FPRegisterID dest)
+ {
+ loadDouble(src, fpTempRegister);
+ addDouble(fpTempRegister, dest);
+ }
+
+ void addDouble(AbsoluteAddress address, FPRegisterID dest)
+ {
+ loadDouble(TrustedImmPtr(address.m_ptr), fpTempRegister);
+ addDouble(fpTempRegister, dest);
+ }
+
+ void ceilDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.frintp<64>(dest, src);
+ }
+
+ void floorDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.frintm<64>(dest, src);
+ }
+
+ // Convert 'src' to an integer, and places the resulting 'dest'.
+ // If the result is not representable as a 32 bit value, branch.
+ // May also branch for some values that are representable in 32 bits
+ // (specifically, in this case, 0).
+ void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID, bool negZeroCheck = true)
+ {
+ m_assembler.fcvtns<32, 64>(dest, src);
+
+ // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
+ m_assembler.scvtf<64, 32>(fpTempRegister, dest);
+ failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, fpTempRegister));
+
+ // Test for negative zero.
+ if (negZeroCheck) {
+ Jump valueIsNonZero = branchTest32(NonZero, dest);
+ RegisterID scratch = getCachedMemoryTempRegisterIDAndInvalidate();
+ m_assembler.fmov<64>(scratch, src);
+ failureCases.append(makeTestBitAndBranch(scratch, 63, IsNonZero));
+ valueIsNonZero.link(this);
+ }
+ }
+
+ Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
+ {
+ m_assembler.fcmp<64>(left, right);
+
+ if (cond == DoubleNotEqual) {
+ // ConditionNE jumps if NotEqual *or* unordered - force the unordered cases not to jump.
+ Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
+ Jump result = makeBranch(ARM64Assembler::ConditionNE);
+ unordered.link(this);
+ return result;
+ }
+ if (cond == DoubleEqualOrUnordered) {
+ Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
+ Jump notEqual = makeBranch(ARM64Assembler::ConditionNE);
+ unordered.link(this);
+ // We get here if either unordered or equal.
+ Jump result = jump();
+ notEqual.link(this);
+ return result;
+ }
+ return makeBranch(cond);
+ }
+
+ Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID)
+ {
+ m_assembler.fcmp_0<64>(reg);
+ Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
+ Jump result = makeBranch(ARM64Assembler::ConditionNE);
+ unordered.link(this);
+ return result;
+ }
+
+ Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID)
+ {
+ m_assembler.fcmp_0<64>(reg);
+ Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
+ Jump notEqual = makeBranch(ARM64Assembler::ConditionNE);
+ unordered.link(this);
+ // We get here if either unordered or equal.
+ Jump result = jump();
+ notEqual.link(this);
+ return result;
+ }
+
+ Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
+ {
+ // Truncate to a 64-bit integer in dataTempRegister, copy the low 32-bit to dest.
+ m_assembler.fcvtzs<64, 64>(getCachedDataTempRegisterIDAndInvalidate(), src);
+ zeroExtend32ToPtr(dataTempRegister, dest);
+ // Check thlow 32-bits sign extend to be equal to the full value.
+ m_assembler.cmp<64>(dataTempRegister, dataTempRegister, ARM64Assembler::SXTW, 0);
+ return Jump(makeBranch(branchType == BranchIfTruncateSuccessful ? Equal : NotEqual));
+ }
+
+ void convertDoubleToFloat(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.fcvt<32, 64>(dest, src);
+ }
+
+ void convertFloatToDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.fcvt<64, 32>(dest, src);
+ }
+
+ void convertInt32ToDouble(TrustedImm32 imm, FPRegisterID dest)
+ {
+ move(imm, getCachedDataTempRegisterIDAndInvalidate());
+ convertInt32ToDouble(dataTempRegister, dest);
+ }
+
+ void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
+ {
+ m_assembler.scvtf<64, 32>(dest, src);
+ }
+
+ void convertInt32ToDouble(Address address, FPRegisterID dest)
+ {
+ load32(address, getCachedDataTempRegisterIDAndInvalidate());
+ convertInt32ToDouble(dataTempRegister, dest);
+ }
+
+ void convertInt32ToDouble(AbsoluteAddress address, FPRegisterID dest)
+ {
+ load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
+ convertInt32ToDouble(dataTempRegister, dest);
+ }
+
+ void convertInt64ToDouble(RegisterID src, FPRegisterID dest)
+ {
+ m_assembler.scvtf<64, 64>(dest, src);
+ }
+
+ void divDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ divDouble(dest, src, dest);
+ }
+
+ void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ m_assembler.fdiv<64>(dest, op1, op2);
+ }
+
+ void loadDouble(ImplicitAddress address, FPRegisterID dest)
+ {
+ if (tryLoadWithOffset<64>(dest, address.base, address.offset))
+ return;
+
+ signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+ m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
+ }
+
+ void loadDouble(BaseIndex address, FPRegisterID dest)
+ {
+ if (!address.offset && (!address.scale || address.scale == 3)) {
+ m_assembler.ldr<64>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
+ return;
+ }
+
+ signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+ m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
+ m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
+ }
+
+ void loadDouble(TrustedImmPtr address, FPRegisterID dest)
+ {
+ moveToCachedReg(address, m_cachedMemoryTempRegister);
+ m_assembler.ldr<64>(dest, memoryTempRegister, ARM64Registers::zr);
+ }
+
+ void loadFloat(BaseIndex address, FPRegisterID dest)
+ {
+ if (!address.offset && (!address.scale || address.scale == 2)) {
+ m_assembler.ldr<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
+ return;
+ }
+
+ signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+ m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
+ m_assembler.ldr<32>(dest, address.base, memoryTempRegister);
+ }
+
+ void moveDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.fmov<64>(dest, src);
+ }
+
+ void moveDoubleTo64(FPRegisterID src, RegisterID dest)
+ {
+ m_assembler.fmov<64>(dest, src);
+ }
+
+ void move64ToDouble(RegisterID src, FPRegisterID dest)
+ {
+ m_assembler.fmov<64>(dest, src);
+ }
+
+ void mulDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ mulDouble(dest, src, dest);
+ }
+
+ void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ m_assembler.fmul<64>(dest, op1, op2);
+ }
+
+ void mulDouble(Address src, FPRegisterID dest)
+ {
+ loadDouble(src, fpTempRegister);
+ mulDouble(fpTempRegister, dest);
+ }
+
+ void negateDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.fneg<64>(dest, src);
+ }
+
+ void sqrtDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.fsqrt<64>(dest, src);
+ }
+
+ void storeDouble(FPRegisterID src, ImplicitAddress address)
+ {
+ if (tryStoreWithOffset<64>(src, address.base, address.offset))
+ return;
+
+ signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+ m_assembler.str<64>(src, address.base, memoryTempRegister);
+ }
+
+ void storeDouble(FPRegisterID src, TrustedImmPtr address)
+ {
+ moveToCachedReg(address, m_cachedMemoryTempRegister);
+ m_assembler.str<64>(src, memoryTempRegister, ARM64Registers::zr);
+ }
+
+ void storeDouble(FPRegisterID src, BaseIndex address)
+ {
+ if (!address.offset && (!address.scale || address.scale == 3)) {
+ m_assembler.str<64>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
+ return;
+ }
+
+ signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+ m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
+ m_assembler.str<64>(src, address.base, memoryTempRegister);
+ }
+
+ void storeFloat(FPRegisterID src, BaseIndex address)
+ {
+ if (!address.offset && (!address.scale || address.scale == 2)) {
+ m_assembler.str<32>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
+ return;
+ }
+
+ signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+ m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
+ m_assembler.str<32>(src, address.base, memoryTempRegister);
+ }
+
+ void subDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ subDouble(dest, src, dest);
+ }
+
+ void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ m_assembler.fsub<64>(dest, op1, op2);
+ }
+
+ void subDouble(Address src, FPRegisterID dest)
+ {
+ loadDouble(src, fpTempRegister);
+ subDouble(fpTempRegister, dest);
+ }
+
+ // Result is undefined if the value is outside of the integer range.
+ void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
+ {
+ m_assembler.fcvtzs<32, 64>(dest, src);
+ }
+
+ void truncateDoubleToUint32(FPRegisterID src, RegisterID dest)
+ {
+ m_assembler.fcvtzu<32, 64>(dest, src);
+ }
+
+
+ // Stack manipulation operations:
+ //
+ // The ABI is assumed to provide a stack abstraction to memory,
+ // containing machine word sized units of data. Push and pop
+ // operations add and remove a single register sized unit of data
+ // to or from the stack. These operations are not supported on
+ // ARM64. Peek and poke operations read or write values on the
+ // stack, without moving the current stack position. Additionally,
+ // there are popToRestore and pushToSave operations, which are
+ // designed just for quick-and-dirty saving and restoring of
+ // temporary values. These operations don't claim to have any
+ // ABI compatibility.
+
+ void pop(RegisterID) NO_RETURN_DUE_TO_CRASH
+ {
+ CRASH();
+ }
+
+ void push(RegisterID) NO_RETURN_DUE_TO_CRASH
+ {
+ CRASH();
+ }
+
+ void push(Address) NO_RETURN_DUE_TO_CRASH
+ {
+ CRASH();
+ }
+
+ void push(TrustedImm32) NO_RETURN_DUE_TO_CRASH
+ {
+ CRASH();
+ }
+
+ void popPair(RegisterID dest1, RegisterID dest2)
+ {
+ m_assembler.ldp<64>(dest1, dest2, ARM64Registers::sp, PairPostIndex(16));
+ }
+
+ void pushPair(RegisterID src1, RegisterID src2)
+ {
+ m_assembler.stp<64>(src1, src2, ARM64Registers::sp, PairPreIndex(-16));
+ }
+
+ void popToRestore(RegisterID dest)
+ {
+ m_assembler.ldr<64>(dest, ARM64Registers::sp, PostIndex(16));
+ }
+
+ void pushToSave(RegisterID src)
+ {
+ m_assembler.str<64>(src, ARM64Registers::sp, PreIndex(-16));
+ }
+
+ void pushToSaveImmediateWithoutTouchingRegisters(TrustedImm32 imm)
+ {
+ RegisterID reg = dataTempRegister;
+ pushPair(reg, reg);
+ move(imm, reg);
+ store64(reg, stackPointerRegister);
+ load64(Address(stackPointerRegister, 8), reg);
+ }
+
+ void pushToSave(Address address)
+ {
+ load32(address, getCachedDataTempRegisterIDAndInvalidate());
+ pushToSave(dataTempRegister);
+ }
+
+ void pushToSave(TrustedImm32 imm)
+ {
+ move(imm, getCachedDataTempRegisterIDAndInvalidate());
+ pushToSave(dataTempRegister);
+ }
+
+ void popToRestore(FPRegisterID dest)
+ {
+ loadDouble(stackPointerRegister, dest);
+ add64(TrustedImm32(16), stackPointerRegister);
+ }
+
+ void pushToSave(FPRegisterID src)
+ {
+ sub64(TrustedImm32(16), stackPointerRegister);
+ storeDouble(src, stackPointerRegister);
+ }
+
+ static ptrdiff_t pushToSaveByteOffset() { return 16; }
+
+ // Register move operations:
+
+ void move(RegisterID src, RegisterID dest)
+ {
+ if (src != dest)
+ m_assembler.mov<64>(dest, src);
+ }
+
+ void move(TrustedImm32 imm, RegisterID dest)
+ {
+ moveInternal<TrustedImm32, int32_t>(imm, dest);
+ }
+
+ void move(TrustedImmPtr imm, RegisterID dest)
+ {
+ moveInternal<TrustedImmPtr, intptr_t>(imm, dest);
+ }
+
+ void move(TrustedImm64 imm, RegisterID dest)
+ {
+ moveInternal<TrustedImm64, int64_t>(imm, dest);
+ }
+
+ void swap(RegisterID reg1, RegisterID reg2)
+ {
+ move(reg1, getCachedDataTempRegisterIDAndInvalidate());
+ move(reg2, reg1);
+ move(dataTempRegister, reg2);
+ }
+
+ void signExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ m_assembler.sxtw(dest, src);
+ }
+
+ void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ m_assembler.uxtw(dest, src);
+ }
+
+
+ // Forwards / external control flow operations:
+ //
+ // This set of jump and conditional branch operations return a Jump
+ // object which may linked at a later point, allow forwards jump,
+ // or jumps that will require external linkage (after the code has been
+ // relocated).
+ //
+ // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
+ // respecitvely, for unsigned comparisons the names b, a, be, and ae are
+ // used (representing the names 'below' and 'above').
+ //
+ // Operands to the comparision are provided in the expected order, e.g.
+ // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
+ // treated as a signed 32bit value, is less than or equal to 5.
+ //
+ // jz and jnz test whether the first operand is equal to zero, and take
+ // an optional second operand of a mask under which to perform the test.
+
+ Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
+ {
+ m_assembler.cmp<32>(left, right);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
+ {
+ if (isUInt12(right.m_value))
+ m_assembler.cmp<32>(left, UInt12(right.m_value));
+ else if (isUInt12(-right.m_value))
+ m_assembler.cmn<32>(left, UInt12(-right.m_value));
+ else {
+ moveToCachedReg(right, m_dataMemoryTempRegister);
+ m_assembler.cmp<32>(left, dataTempRegister);
+ }
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branch32(RelationalCondition cond, RegisterID left, Address right)
+ {
+ load32(right, getCachedMemoryTempRegisterIDAndInvalidate());
+ return branch32(cond, left, memoryTempRegister);
+ }
+
+ Jump branch32(RelationalCondition cond, Address left, RegisterID right)
+ {
+ load32(left, getCachedMemoryTempRegisterIDAndInvalidate());
+ return branch32(cond, memoryTempRegister, right);
+ }
+
+ Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
+ {
+ load32(left, getCachedMemoryTempRegisterIDAndInvalidate());
+ return branch32(cond, memoryTempRegister, right);
+ }
+
+ Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+ {
+ load32(left, getCachedMemoryTempRegisterIDAndInvalidate());
+ return branch32(cond, memoryTempRegister, right);
+ }
+
+ Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
+ {
+ load32(left.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
+ return branch32(cond, dataTempRegister, right);
+ }
+
+ Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
+ {
+ load32(left.m_ptr, getCachedMemoryTempRegisterIDAndInvalidate());
+ return branch32(cond, memoryTempRegister, right);
+ }
+
+ Jump branch64(RelationalCondition cond, RegisterID left, RegisterID right)
+ {
+ if (right == ARM64Registers::sp) {
+ if (cond == Equal && left != ARM64Registers::sp) {
+ // CMP can only use SP for the left argument, since we are testing for equality, the order
+ // does not matter here.
+ std::swap(left, right);
+ } else {
+ move(right, getCachedDataTempRegisterIDAndInvalidate());
+ right = dataTempRegister;
+ }
+ }
+ m_assembler.cmp<64>(left, right);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branch64(RelationalCondition cond, RegisterID left, TrustedImm64 right)
+ {
+ intptr_t immediate = right.m_value;
+ if (isUInt12(immediate))
+ m_assembler.cmp<64>(left, UInt12(static_cast<int32_t>(immediate)));
+ else if (isUInt12(-immediate))
+ m_assembler.cmn<64>(left, UInt12(static_cast<int32_t>(-immediate)));
+ else {
+ moveToCachedReg(right, m_dataMemoryTempRegister);
+ m_assembler.cmp<64>(left, dataTempRegister);
+ }
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branch64(RelationalCondition cond, RegisterID left, Address right)
+ {
+ load64(right, getCachedMemoryTempRegisterIDAndInvalidate());
+ return branch64(cond, left, memoryTempRegister);
+ }
+
+ Jump branch64(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
+ {
+ load64(left.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
+ return branch64(cond, dataTempRegister, right);
+ }
+
+ Jump branch64(RelationalCondition cond, Address left, RegisterID right)
+ {
+ load64(left, getCachedMemoryTempRegisterIDAndInvalidate());
+ return branch64(cond, memoryTempRegister, right);
+ }
+
+ Jump branch64(RelationalCondition cond, Address left, TrustedImm64 right)
+ {
+ load64(left, getCachedMemoryTempRegisterIDAndInvalidate());
+ return branch64(cond, memoryTempRegister, right);
+ }
+
+ Jump branchPtr(RelationalCondition cond, BaseIndex left, RegisterID right)
+ {
+ load64(left, getCachedMemoryTempRegisterIDAndInvalidate());
+ return branch64(cond, memoryTempRegister, right);
+ }
+
+ Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
+ {
+ ASSERT(!(0xffffff00 & right.m_value));
+ load8(left, getCachedMemoryTempRegisterIDAndInvalidate());
+ return branch32(cond, memoryTempRegister, right);
+ }
+
+ Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+ {
+ ASSERT(!(0xffffff00 & right.m_value));
+ load8(left, getCachedMemoryTempRegisterIDAndInvalidate());
+ return branch32(cond, memoryTempRegister, right);
+ }
+
+ Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
+ {
+ ASSERT(!(0xffffff00 & right.m_value));
+ load8(left.m_ptr, getCachedMemoryTempRegisterIDAndInvalidate());
+ return branch32(cond, memoryTempRegister, right);
+ }
+
+ Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
+ {
+ m_assembler.tst<32>(reg, mask);
+ return Jump(makeBranch(cond));
+ }
+
+ void test32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ if (mask.m_value == -1)
+ m_assembler.tst<32>(reg, reg);
+ else {
+ bool testedWithImmediate = false;
+ if ((cond == Zero) || (cond == NonZero)) {
+ LogicalImmediate logicalImm = LogicalImmediate::create32(mask.m_value);
+
+ if (logicalImm.isValid()) {
+ m_assembler.tst<32>(reg, logicalImm);
+ testedWithImmediate = true;
+ }
+ }
+ if (!testedWithImmediate) {
+ move(mask, getCachedDataTempRegisterIDAndInvalidate());
+ m_assembler.tst<32>(reg, dataTempRegister);
+ }
+ }
+ }
+
+ Jump branch(ResultCondition cond)
+ {
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ if (mask.m_value == -1) {
+ if ((cond == Zero) || (cond == NonZero))
+ return Jump(makeCompareAndBranch<32>(static_cast<ZeroCondition>(cond), reg));
+ m_assembler.tst<32>(reg, reg);
+ } else if (hasOneBitSet(mask.m_value) && ((cond == Zero) || (cond == NonZero)))
+ return Jump(makeTestBitAndBranch(reg, getLSBSet(mask.m_value), static_cast<ZeroCondition>(cond)));
+ else {
+ if ((cond == Zero) || (cond == NonZero)) {
+ LogicalImmediate logicalImm = LogicalImmediate::create32(mask.m_value);
+
+ if (logicalImm.isValid()) {
+ m_assembler.tst<32>(reg, logicalImm);
+ return Jump(makeBranch(cond));
+ }
+ }
+
+ move(mask, getCachedDataTempRegisterIDAndInvalidate());
+ m_assembler.tst<32>(reg, dataTempRegister);
+ }
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ load32(address, getCachedMemoryTempRegisterIDAndInvalidate());
+ return branchTest32(cond, memoryTempRegister, mask);
+ }
+
+ Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ load32(address, getCachedMemoryTempRegisterIDAndInvalidate());
+ return branchTest32(cond, memoryTempRegister, mask);
+ }
+
+ Jump branchTest64(ResultCondition cond, RegisterID reg, RegisterID mask)
+ {
+ m_assembler.tst<64>(reg, mask);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchTest64(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ if (mask.m_value == -1) {
+ if ((cond == Zero) || (cond == NonZero))
+ return Jump(makeCompareAndBranch<64>(static_cast<ZeroCondition>(cond), reg));
+ m_assembler.tst<64>(reg, reg);
+ } else if (hasOneBitSet(mask.m_value) && ((cond == Zero) || (cond == NonZero)))
+ return Jump(makeTestBitAndBranch(reg, getLSBSet(mask.m_value), static_cast<ZeroCondition>(cond)));
+ else {
+ if ((cond == Zero) || (cond == NonZero)) {
+ LogicalImmediate logicalImm = LogicalImmediate::create64(mask.m_value);
+
+ if (logicalImm.isValid()) {
+ m_assembler.tst<64>(reg, logicalImm);
+ return Jump(makeBranch(cond));
+ }
+ }
+
+ signExtend32ToPtr(mask, getCachedDataTempRegisterIDAndInvalidate());
+ m_assembler.tst<64>(reg, dataTempRegister);
+ }
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchTest64(ResultCondition cond, Address address, RegisterID mask)
+ {
+ load64(address, getCachedDataTempRegisterIDAndInvalidate());
+ return branchTest64(cond, dataTempRegister, mask);
+ }
+
+ Jump branchTest64(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ load64(address, getCachedDataTempRegisterIDAndInvalidate());
+ return branchTest64(cond, dataTempRegister, mask);
+ }
+
+ Jump branchTest64(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ load64(address, getCachedDataTempRegisterIDAndInvalidate());
+ return branchTest64(cond, dataTempRegister, mask);
+ }
+
+ Jump branchTest64(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ load64(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
+ return branchTest64(cond, dataTempRegister, mask);
+ }
+
+ Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ load8(address, getCachedDataTempRegisterIDAndInvalidate());
+ return branchTest32(cond, dataTempRegister, mask);
+ }
+
+ Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ load8(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
+ return branchTest32(cond, dataTempRegister, mask);
+ }
+
+ Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ move(TrustedImmPtr(reinterpret_cast<void*>(address.offset)), getCachedDataTempRegisterIDAndInvalidate());
+ m_assembler.ldrb(dataTempRegister, address.base, dataTempRegister);
+ return branchTest32(cond, dataTempRegister, mask);
+ }
+
+ Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ load8(address, getCachedDataTempRegisterIDAndInvalidate());
+ return branchTest32(cond, dataTempRegister, mask);
+ }
+
+ Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+ {
+ return branch32(cond, left, right);
+ }
+
+
+ // Arithmetic control flow operations:
+ //
+ // This set of conditional branch operations branch based
+ // on the result of an arithmetic operation. The operation
+ // is performed as normal, storing the result.
+ //
+ // * jz operations branch if the result is zero.
+ // * jo operations branch if the (signed) arithmetic
+ // operation caused an overflow to occur.
+
+ Jump branchAdd32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.add<32, S>(dest, op1, op2);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchAdd32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
+ {
+ if (isUInt12(imm.m_value)) {
+ m_assembler.add<32, S>(dest, op1, UInt12(imm.m_value));
+ return Jump(makeBranch(cond));
+ }
+ if (isUInt12(-imm.m_value)) {
+ m_assembler.sub<32, S>(dest, op1, UInt12(-imm.m_value));
+ return Jump(makeBranch(cond));
+ }
+
+ signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
+ return branchAdd32(cond, op1, dataTempRegister, dest);
+ }
+
+ Jump branchAdd32(ResultCondition cond, Address src, RegisterID dest)
+ {
+ load32(src, getCachedDataTempRegisterIDAndInvalidate());
+ return branchAdd32(cond, dest, dataTempRegister, dest);
+ }
+
+ Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ return branchAdd32(cond, dest, src, dest);
+ }
+
+ Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ return branchAdd32(cond, dest, imm, dest);
+ }
+
+ Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress address)
+ {
+ load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
+
+ if (isUInt12(imm.m_value)) {
+ m_assembler.add<32, S>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
+ store32(dataTempRegister, address.m_ptr);
+ } else if (isUInt12(-imm.m_value)) {
+ m_assembler.sub<32, S>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
+ store32(dataTempRegister, address.m_ptr);
+ } else {
+ move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
+ m_assembler.add<32, S>(dataTempRegister, dataTempRegister, memoryTempRegister);
+ store32(dataTempRegister, address.m_ptr);
+ }
+
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchAdd64(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.add<64, S>(dest, op1, op2);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchAdd64(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
+ {
+ if (isUInt12(imm.m_value)) {
+ m_assembler.add<64, S>(dest, op1, UInt12(imm.m_value));
+ return Jump(makeBranch(cond));
+ }
+ if (isUInt12(-imm.m_value)) {
+ m_assembler.sub<64, S>(dest, op1, UInt12(-imm.m_value));
+ return Jump(makeBranch(cond));
+ }
+
+ move(imm, getCachedDataTempRegisterIDAndInvalidate());
+ return branchAdd64(cond, op1, dataTempRegister, dest);
+ }
+
+ Jump branchAdd64(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ return branchAdd64(cond, dest, src, dest);
+ }
+
+ Jump branchAdd64(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ return branchAdd64(cond, dest, imm, dest);
+ }
+
+ Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
+ {
+ ASSERT(cond != Signed);
+
+ if (cond != Overflow) {
+ m_assembler.mul<32>(dest, src1, src2);
+ return branchTest32(cond, dest);
+ }
+
+ // This is a signed multiple of two 32-bit values, producing a 64-bit result.
+ m_assembler.smull(dest, src1, src2);
+ // Copy bits 63..32 of the result to bits 31..0 of dataTempRegister.
+ m_assembler.asr<64>(getCachedDataTempRegisterIDAndInvalidate(), dest, 32);
+ // Splat bit 31 of the result to bits 31..0 of memoryTempRegister.
+ m_assembler.asr<32>(getCachedMemoryTempRegisterIDAndInvalidate(), dest, 31);
+ // After a mul32 the top 32 bits of the register should be clear.
+ zeroExtend32ToPtr(dest, dest);
+ // Check that bits 31..63 of the original result were all equal.
+ return branch32(NotEqual, memoryTempRegister, dataTempRegister);
+ }
+
+ Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ return branchMul32(cond, dest, src, dest);
+ }
+
+ Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ move(imm, getCachedDataTempRegisterIDAndInvalidate());
+ return branchMul32(cond, dataTempRegister, src, dest);
+ }
+
+ Jump branchMul64(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
+ {
+ ASSERT(cond != Signed);
+
+ // This is a signed multiple of two 64-bit values, producing a 64-bit result.
+ m_assembler.mul<64>(dest, src1, src2);
+
+ if (cond != Overflow)
+ return branchTest64(cond, dest);
+
+ // Compute bits 127..64 of the result into dataTempRegister.
+ m_assembler.smulh(getCachedDataTempRegisterIDAndInvalidate(), src1, src2);
+ // Splat bit 63 of the result to bits 63..0 of memoryTempRegister.
+ m_assembler.asr<64>(getCachedMemoryTempRegisterIDAndInvalidate(), dest, 63);
+ // Check that bits 31..63 of the original result were all equal.
+ return branch64(NotEqual, memoryTempRegister, dataTempRegister);
+ }
+
+ Jump branchMul64(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ return branchMul64(cond, dest, src, dest);
+ }
+
+ Jump branchNeg32(ResultCondition cond, RegisterID dest)
+ {
+ m_assembler.neg<32, S>(dest, dest);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchNeg64(ResultCondition cond, RegisterID srcDest)
+ {
+ m_assembler.neg<64, S>(srcDest, srcDest);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID dest)
+ {
+ m_assembler.neg<32, S>(dest, dest);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.sub<32, S>(dest, op1, op2);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
+ {
+ if (isUInt12(imm.m_value)) {
+ m_assembler.sub<32, S>(dest, op1, UInt12(imm.m_value));
+ return Jump(makeBranch(cond));
+ }
+ if (isUInt12(-imm.m_value)) {
+ m_assembler.add<32, S>(dest, op1, UInt12(-imm.m_value));
+ return Jump(makeBranch(cond));
+ }
+
+ signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
+ return branchSub32(cond, op1, dataTempRegister, dest);
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ return branchSub32(cond, dest, src, dest);
+ }
+
+ Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ return branchSub32(cond, dest, imm, dest);
+ }
+
+ Jump branchSub64(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.sub<64, S>(dest, op1, op2);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchSub64(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
+ {
+ if (isUInt12(imm.m_value)) {
+ m_assembler.sub<64, S>(dest, op1, UInt12(imm.m_value));
+ return Jump(makeBranch(cond));
+ }
+ if (isUInt12(-imm.m_value)) {
+ m_assembler.add<64, S>(dest, op1, UInt12(-imm.m_value));
+ return Jump(makeBranch(cond));
+ }
+
+ move(imm, getCachedDataTempRegisterIDAndInvalidate());
+ return branchSub64(cond, op1, dataTempRegister, dest);
+ }
+
+ Jump branchSub64(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ return branchSub64(cond, dest, src, dest);
+ }
+
+ Jump branchSub64(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ return branchSub64(cond, dest, imm, dest);
+ }
+
+
+ // Jumps, calls, returns
+
+ ALWAYS_INLINE Call call()
+ {
+ AssemblerLabel pointerLabel = m_assembler.label();
+ moveWithFixedWidth(TrustedImmPtr(0), getCachedDataTempRegisterIDAndInvalidate());
+ invalidateAllTempRegisters();
+ m_assembler.blr(dataTempRegister);
+ AssemblerLabel callLabel = m_assembler.label();
+ ASSERT_UNUSED(pointerLabel, ARM64Assembler::getDifferenceBetweenLabels(callLabel, pointerLabel) == REPATCH_OFFSET_CALL_TO_POINTER);
+ return Call(callLabel, Call::Linkable);
+ }
+
+ ALWAYS_INLINE Call call(RegisterID target)
+ {
+ invalidateAllTempRegisters();
+ m_assembler.blr(target);
+ return Call(m_assembler.label(), Call::None);
+ }
+
+ ALWAYS_INLINE Call call(Address address)
+ {
+ load64(address, getCachedDataTempRegisterIDAndInvalidate());
+ return call(dataTempRegister);
+ }
+
+ ALWAYS_INLINE Jump jump()
+ {
+ AssemblerLabel label = m_assembler.label();
+ m_assembler.b();
+ return Jump(label, m_makeJumpPatchable ? ARM64Assembler::JumpNoConditionFixedSize : ARM64Assembler::JumpNoCondition);
+ }
+
+ void jump(RegisterID target)
+ {
+ m_assembler.br(target);
+ }
+
+ void jump(Address address)
+ {
+ load64(address, getCachedDataTempRegisterIDAndInvalidate());
+ m_assembler.br(dataTempRegister);
+ }
+
+ void jump(AbsoluteAddress address)
+ {
+ move(TrustedImmPtr(address.m_ptr), getCachedDataTempRegisterIDAndInvalidate());
+ load64(Address(dataTempRegister), dataTempRegister);
+ m_assembler.br(dataTempRegister);
+ }
+
+ ALWAYS_INLINE Call makeTailRecursiveCall(Jump oldJump)
+ {
+ oldJump.link(this);
+ return tailRecursiveCall();
+ }
+
+ ALWAYS_INLINE Call nearCall()
+ {
+ m_assembler.bl();
+ return Call(m_assembler.label(), Call::LinkableNear);
+ }
+
+ ALWAYS_INLINE void ret()
+ {
+ m_assembler.ret();
+ }
+
+ ALWAYS_INLINE Call tailRecursiveCall()
+ {
+ // Like a normal call, but don't link.
+ AssemblerLabel pointerLabel = m_assembler.label();
+ moveWithFixedWidth(TrustedImmPtr(0), getCachedDataTempRegisterIDAndInvalidate());
+ m_assembler.br(dataTempRegister);
+ AssemblerLabel callLabel = m_assembler.label();
+ ASSERT_UNUSED(pointerLabel, ARM64Assembler::getDifferenceBetweenLabels(callLabel, pointerLabel) == REPATCH_OFFSET_CALL_TO_POINTER);
+ return Call(callLabel, Call::Linkable);
+ }
+
+
+ // Comparisons operations
+
+ void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
+ {
+ m_assembler.cmp<32>(left, right);
+ m_assembler.cset<32>(dest, ARM64Condition(cond));
+ }
+
+ void compare32(RelationalCondition cond, Address left, RegisterID right, RegisterID dest)
+ {
+ load32(left, getCachedDataTempRegisterIDAndInvalidate());
+ m_assembler.cmp<32>(dataTempRegister, right);
+ m_assembler.cset<32>(dest, ARM64Condition(cond));
+ }
+
+ void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
+ {
+ move(right, getCachedDataTempRegisterIDAndInvalidate());
+ m_assembler.cmp<32>(left, dataTempRegister);
+ m_assembler.cset<32>(dest, ARM64Condition(cond));
+ }
+
+ void compare64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
+ {
+ m_assembler.cmp<64>(left, right);
+ m_assembler.cset<32>(dest, ARM64Condition(cond));
+ }
+
+ void compare64(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
+ {
+ signExtend32ToPtr(right, getCachedDataTempRegisterIDAndInvalidate());
+ m_assembler.cmp<64>(left, dataTempRegister);
+ m_assembler.cset<32>(dest, ARM64Condition(cond));
+ }
+
+ void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
+ {
+ load8(left, getCachedMemoryTempRegisterIDAndInvalidate());
+ move(right, getCachedDataTempRegisterIDAndInvalidate());
+ compare32(cond, memoryTempRegister, dataTempRegister, dest);
+ }
+
+ void test32(ResultCondition cond, RegisterID src, TrustedImm32 mask, RegisterID dest)
+ {
+ if (mask.m_value == -1)
+ m_assembler.tst<32>(src, src);
+ else {
+ signExtend32ToPtr(mask, getCachedDataTempRegisterIDAndInvalidate());
+ m_assembler.tst<32>(src, dataTempRegister);
+ }
+ m_assembler.cset<32>(dest, ARM64Condition(cond));
+ }
+
+ void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
+ {
+ load32(address, getCachedDataTempRegisterIDAndInvalidate());
+ test32(cond, dataTempRegister, mask, dest);
+ }
+
+ void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
+ {
+ load8(address, getCachedDataTempRegisterIDAndInvalidate());
+ test32(cond, dataTempRegister, mask, dest);
+ }
+
+ void test64(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.tst<64>(op1, op2);
+ m_assembler.cset<32>(dest, ARM64Condition(cond));
+ }
+
+ void test64(ResultCondition cond, RegisterID src, TrustedImm32 mask, RegisterID dest)
+ {
+ if (mask.m_value == -1)
+ m_assembler.tst<64>(src, src);
+ else {
+ signExtend32ToPtr(mask, getCachedDataTempRegisterIDAndInvalidate());
+ m_assembler.tst<64>(src, dataTempRegister);
+ }
+ m_assembler.cset<32>(dest, ARM64Condition(cond));
+ }
+
+
+ // Patchable operations
+
+ ALWAYS_INLINE DataLabel32 moveWithPatch(TrustedImm32 imm, RegisterID dest)
+ {
+ DataLabel32 label(this);
+ moveWithFixedWidth(imm, dest);
+ return label;
+ }
+
+ ALWAYS_INLINE DataLabelPtr moveWithPatch(TrustedImmPtr imm, RegisterID dest)
+ {
+ DataLabelPtr label(this);
+ moveWithFixedWidth(imm, dest);
+ return label;
+ }
+
+ ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+ {
+ dataLabel = DataLabelPtr(this);
+ moveWithPatch(initialRightValue, getCachedDataTempRegisterIDAndInvalidate());
+ return branch64(cond, left, dataTempRegister);
+ }
+
+ ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+ {
+ dataLabel = DataLabelPtr(this);
+ moveWithPatch(initialRightValue, getCachedDataTempRegisterIDAndInvalidate());
+ return branch64(cond, left, dataTempRegister);
+ }
+
+ ALWAYS_INLINE Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
+ {
+ dataLabel = DataLabel32(this);
+ moveWithPatch(initialRightValue, getCachedDataTempRegisterIDAndInvalidate());
+ return branch32(cond, left, dataTempRegister);
+ }
+
+ PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right)
+ {
+ m_makeJumpPatchable = true;
+ Jump result = branch64(cond, left, TrustedImm64(right));
+ m_makeJumpPatchable = false;
+ return PatchableJump(result);
+ }
+
+ PatchableJump patchableBranchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ m_makeJumpPatchable = true;
+ Jump result = branchTest32(cond, reg, mask);
+ m_makeJumpPatchable = false;
+ return PatchableJump(result);
+ }
+
+ PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm)
+ {
+ m_makeJumpPatchable = true;
+ Jump result = branch32(cond, reg, imm);
+ m_makeJumpPatchable = false;
+ return PatchableJump(result);
+ }
+
+ PatchableJump patchableBranch64(RelationalCondition cond, RegisterID reg, TrustedImm64 imm)
+ {
+ m_makeJumpPatchable = true;
+ Jump result = branch64(cond, reg, imm);
+ m_makeJumpPatchable = false;
+ return PatchableJump(result);
+ }
+
+ PatchableJump patchableBranch64(RelationalCondition cond, RegisterID left, RegisterID right)
+ {
+ m_makeJumpPatchable = true;
+ Jump result = branch64(cond, left, right);
+ m_makeJumpPatchable = false;
+ return PatchableJump(result);
+ }
+
+ PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+ {
+ m_makeJumpPatchable = true;
+ Jump result = branchPtrWithPatch(cond, left, dataLabel, initialRightValue);
+ m_makeJumpPatchable = false;
+ return PatchableJump(result);
+ }
+
+ PatchableJump patchableBranch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
+ {
+ m_makeJumpPatchable = true;
+ Jump result = branch32WithPatch(cond, left, dataLabel, initialRightValue);
+ m_makeJumpPatchable = false;
+ return PatchableJump(result);
+ }
+
+ PatchableJump patchableJump()
+ {
+ m_makeJumpPatchable = true;
+ Jump result = jump();
+ m_makeJumpPatchable = false;
+ return PatchableJump(result);
+ }
+
+ ALWAYS_INLINE DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
+ {
+ DataLabelPtr label(this);
+ moveWithFixedWidth(initialValue, getCachedDataTempRegisterIDAndInvalidate());
+ store64(dataTempRegister, address);
+ return label;
+ }
+
+ ALWAYS_INLINE DataLabelPtr storePtrWithPatch(ImplicitAddress address)
+ {
+ return storePtrWithPatch(TrustedImmPtr(0), address);
+ }
+
+ static void reemitInitialMoveWithPatch(void* address, void* value)
+ {
+ ARM64Assembler::setPointer(static_cast<int*>(address), value, dataTempRegister, true);
+ }
+
+ // Miscellaneous operations:
+
+ void breakpoint(uint16_t imm = 0)
+ {
+ m_assembler.brk(imm);
+ }
+
+ void nop()
+ {
+ m_assembler.nop();
+ }
+
+ void memoryFence()
+ {
+ m_assembler.dmbSY();
+ }
+
+
+ // Misc helper functions.
+
+ // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
+ static RelationalCondition invert(RelationalCondition cond)
+ {
+ return static_cast<RelationalCondition>(ARM64Assembler::invert(static_cast<ARM64Assembler::Condition>(cond)));
+ }
+
+ static FunctionPtr readCallTarget(CodeLocationCall call)
+ {
+ return FunctionPtr(reinterpret_cast<void(*)()>(ARM64Assembler::readCallTarget(call.dataLocation())));
+ }
+
+ static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
+ {
+ ARM64Assembler::replaceWithJump(instructionStart.dataLocation(), destination.dataLocation());
+ }
+
+ static ptrdiff_t maxJumpReplacementSize()
+ {
+ return ARM64Assembler::maxJumpReplacementSize();
+ }
+
+ RegisterID scratchRegisterForBlinding()
+ {
+ // We *do not* have a scratch register for blinding.
+ RELEASE_ASSERT_NOT_REACHED();
+ return getCachedDataTempRegisterIDAndInvalidate();
+ }
+
+ static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
+ static bool canJumpReplacePatchableBranch32WithPatch() { return false; }
+
+ static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
+ {
+ return label.labelAtOffset(0);
+ }
+
+ static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr)
+ {
+ UNREACHABLE_FOR_PLATFORM();
+ return CodeLocationLabel();
+ }
+
+ static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32)
+ {
+ UNREACHABLE_FOR_PLATFORM();
+ return CodeLocationLabel();
+ }
+
+ static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID, void* initialValue)
+ {
+ reemitInitialMoveWithPatch(instructionStart.dataLocation(), initialValue);
+ }
+
+ static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*)
+ {
+ UNREACHABLE_FOR_PLATFORM();
+ }
+
+ static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel, Address, int32_t)
+ {
+ UNREACHABLE_FOR_PLATFORM();
+ }
+
+protected:
+ ALWAYS_INLINE Jump makeBranch(ARM64Assembler::Condition cond)
+ {
+ m_assembler.b_cond(cond);
+ AssemblerLabel label = m_assembler.label();
+ m_assembler.nop();
+ return Jump(label, m_makeJumpPatchable ? ARM64Assembler::JumpConditionFixedSize : ARM64Assembler::JumpCondition, cond);
+ }
+ ALWAYS_INLINE Jump makeBranch(RelationalCondition cond) { return makeBranch(ARM64Condition(cond)); }
+ ALWAYS_INLINE Jump makeBranch(ResultCondition cond) { return makeBranch(ARM64Condition(cond)); }
+ ALWAYS_INLINE Jump makeBranch(DoubleCondition cond) { return makeBranch(ARM64Condition(cond)); }
+
+ template <int dataSize>
+ ALWAYS_INLINE Jump makeCompareAndBranch(ZeroCondition cond, RegisterID reg)
+ {
+ if (cond == IsZero)
+ m_assembler.cbz<dataSize>(reg);
+ else
+ m_assembler.cbnz<dataSize>(reg);
+ AssemblerLabel label = m_assembler.label();
+ m_assembler.nop();
+ return Jump(label, m_makeJumpPatchable ? ARM64Assembler::JumpCompareAndBranchFixedSize : ARM64Assembler::JumpCompareAndBranch, static_cast<ARM64Assembler::Condition>(cond), dataSize == 64, reg);
+ }
+
+ ALWAYS_INLINE Jump makeTestBitAndBranch(RegisterID reg, unsigned bit, ZeroCondition cond)
+ {
+ ASSERT(bit < 64);
+ bit &= 0x3f;
+ if (cond == IsZero)
+ m_assembler.tbz(reg, bit);
+ else
+ m_assembler.tbnz(reg, bit);
+ AssemblerLabel label = m_assembler.label();
+ m_assembler.nop();
+ return Jump(label, m_makeJumpPatchable ? ARM64Assembler::JumpTestBitFixedSize : ARM64Assembler::JumpTestBit, static_cast<ARM64Assembler::Condition>(cond), bit, reg);
+ }
+
+ ARM64Assembler::Condition ARM64Condition(RelationalCondition cond)
+ {
+ return static_cast<ARM64Assembler::Condition>(cond);
+ }
+
+ ARM64Assembler::Condition ARM64Condition(ResultCondition cond)
+ {
+ return static_cast<ARM64Assembler::Condition>(cond);
+ }
+
+ ARM64Assembler::Condition ARM64Condition(DoubleCondition cond)
+ {
+ return static_cast<ARM64Assembler::Condition>(cond);
+ }
+
+private:
+ ALWAYS_INLINE RegisterID getCachedDataTempRegisterIDAndInvalidate() { return m_dataMemoryTempRegister.registerIDInvalidate(); }
+ ALWAYS_INLINE RegisterID getCachedMemoryTempRegisterIDAndInvalidate() { return m_cachedMemoryTempRegister.registerIDInvalidate(); }
+
+ ALWAYS_INLINE bool isInIntRange(intptr_t value)
+ {
+ return value == ((value << 32) >> 32);
+ }
+
+ template<typename ImmediateType, typename rawType>
+ void moveInternal(ImmediateType imm, RegisterID dest)
+ {
+ const int dataSize = sizeof(rawType) * 8;
+ const int numberHalfWords = dataSize / 16;
+ rawType value = bitwise_cast<rawType>(imm.m_value);
+ uint16_t halfword[numberHalfWords];
+
+ // Handle 0 and ~0 here to simplify code below
+ if (!value) {
+ m_assembler.movz<dataSize>(dest, 0);
+ return;
+ }
+ if (!~value) {
+ m_assembler.movn<dataSize>(dest, 0);
+ return;
+ }
+
+ LogicalImmediate logicalImm = dataSize == 64 ? LogicalImmediate::create64(static_cast<uint64_t>(value)) : LogicalImmediate::create32(static_cast<uint32_t>(value));
+
+ if (logicalImm.isValid()) {
+ m_assembler.movi<dataSize>(dest, logicalImm);
+ return;
+ }
+
+ // Figure out how many halfwords are 0 or FFFF, then choose movz or movn accordingly.
+ int zeroOrNegateVote = 0;
+ for (int i = 0; i < numberHalfWords; ++i) {
+ halfword[i] = getHalfword(value, i);
+ if (!halfword[i])
+ zeroOrNegateVote++;
+ else if (halfword[i] == 0xffff)
+ zeroOrNegateVote--;
+ }
+
+ bool needToClearRegister = true;
+ if (zeroOrNegateVote >= 0) {
+ for (int i = 0; i < numberHalfWords; i++) {
+ if (halfword[i]) {
+ if (needToClearRegister) {
+ m_assembler.movz<dataSize>(dest, halfword[i], 16*i);
+ needToClearRegister = false;
+ } else
+ m_assembler.movk<dataSize>(dest, halfword[i], 16*i);
+ }
+ }
+ } else {
+ for (int i = 0; i < numberHalfWords; i++) {
+ if (halfword[i] != 0xffff) {
+ if (needToClearRegister) {
+ m_assembler.movn<dataSize>(dest, ~halfword[i], 16*i);
+ needToClearRegister = false;
+ } else
+ m_assembler.movk<dataSize>(dest, halfword[i], 16*i);
+ }
+ }
+ }
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void loadUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm)
+ {
+ m_assembler.ldr<datasize>(rt, rn, pimm);
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void loadUnscaledImmediate(RegisterID rt, RegisterID rn, int simm)
+ {
+ m_assembler.ldur<datasize>(rt, rn, simm);
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void storeUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm)
+ {
+ m_assembler.str<datasize>(rt, rn, pimm);
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void storeUnscaledImmediate(RegisterID rt, RegisterID rn, int simm)
+ {
+ m_assembler.stur<datasize>(rt, rn, simm);
+ }
+
+ void moveWithFixedWidth(TrustedImm32 imm, RegisterID dest)
+ {
+ int32_t value = imm.m_value;
+ m_assembler.movz<32>(dest, getHalfword(value, 0));
+ m_assembler.movk<32>(dest, getHalfword(value, 1), 16);
+ }
+
+ void moveWithFixedWidth(TrustedImmPtr imm, RegisterID dest)
+ {
+ intptr_t value = reinterpret_cast<intptr_t>(imm.m_value);
+ m_assembler.movz<64>(dest, getHalfword(value, 0));
+ m_assembler.movk<64>(dest, getHalfword(value, 1), 16);
+ m_assembler.movk<64>(dest, getHalfword(value, 2), 32);
+ }
+
+ void signExtend32ToPtrWithFixedWidth(int32_t value, RegisterID dest)
+ {
+ if (value >= 0) {
+ m_assembler.movz<32>(dest, getHalfword(value, 0));
+ m_assembler.movk<32>(dest, getHalfword(value, 1), 16);
+ } else {
+ m_assembler.movn<32>(dest, ~getHalfword(value, 0));
+ m_assembler.movk<32>(dest, getHalfword(value, 1), 16);
+ }
+ }
+
+ void signExtend32ToPtr(TrustedImm32 imm, RegisterID dest)
+ {
+ move(TrustedImmPtr(reinterpret_cast<void*>(static_cast<intptr_t>(imm.m_value))), dest);
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void load(const void* address, RegisterID dest)
+ {
+ intptr_t currentRegisterContents;
+ if (m_cachedMemoryTempRegister.value(currentRegisterContents)) {
+ intptr_t addressAsInt = reinterpret_cast<intptr_t>(address);
+ intptr_t addressDelta = addressAsInt - currentRegisterContents;
+
+ if (dest == memoryTempRegister)
+ m_cachedMemoryTempRegister.invalidate();
+
+ if (isInIntRange(addressDelta)) {
+ if (ARM64Assembler::canEncodeSImmOffset(addressDelta)) {
+ m_assembler.ldur<datasize>(dest, memoryTempRegister, addressDelta);
+ return;
+ }
+
+ if (ARM64Assembler::canEncodePImmOffset<datasize>(addressDelta)) {
+ m_assembler.ldr<datasize>(dest, memoryTempRegister, addressDelta);
+ return;
+ }
+ }
+
+ if ((addressAsInt & (~maskHalfWord0)) == (currentRegisterContents & (~maskHalfWord0))) {
+ m_assembler.movk<64>(memoryTempRegister, addressAsInt & maskHalfWord0, 0);
+ m_cachedMemoryTempRegister.setValue(reinterpret_cast<intptr_t>(address));
+ m_assembler.ldr<datasize>(dest, memoryTempRegister, ARM64Registers::zr);
+ return;
+ }
+ }
+
+ move(TrustedImmPtr(address), memoryTempRegister);
+ if (dest == memoryTempRegister)
+ m_cachedMemoryTempRegister.invalidate();
+ else
+ m_cachedMemoryTempRegister.setValue(reinterpret_cast<intptr_t>(address));
+ m_assembler.ldr<datasize>(dest, memoryTempRegister, ARM64Registers::zr);
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void store(RegisterID src, const void* address)
+ {
+ intptr_t currentRegisterContents;
+ if (m_cachedMemoryTempRegister.value(currentRegisterContents)) {
+ intptr_t addressAsInt = reinterpret_cast<intptr_t>(address);
+ intptr_t addressDelta = addressAsInt - currentRegisterContents;
+
+ if (isInIntRange(addressDelta)) {
+ if (ARM64Assembler::canEncodeSImmOffset(addressDelta)) {
+ m_assembler.stur<datasize>(src, memoryTempRegister, addressDelta);
+ return;
+ }
+
+ if (ARM64Assembler::canEncodePImmOffset<datasize>(addressDelta)) {
+ m_assembler.str<datasize>(src, memoryTempRegister, addressDelta);
+ return;
+ }
+ }
+
+ if ((addressAsInt & (~maskHalfWord0)) == (currentRegisterContents & (~maskHalfWord0))) {
+ m_assembler.movk<64>(memoryTempRegister, addressAsInt & maskHalfWord0, 0);
+ m_cachedMemoryTempRegister.setValue(reinterpret_cast<intptr_t>(address));
+ m_assembler.str<datasize>(src, memoryTempRegister, ARM64Registers::zr);
+ return;
+ }
+ }
+
+ move(TrustedImmPtr(address), memoryTempRegister);
+ m_cachedMemoryTempRegister.setValue(reinterpret_cast<intptr_t>(address));
+ m_assembler.str<datasize>(src, memoryTempRegister, ARM64Registers::zr);
+ }
+
+ template <int dataSize>
+ ALWAYS_INLINE bool tryMoveUsingCacheRegisterContents(intptr_t immediate, CachedTempRegister& dest)
+ {
+ intptr_t currentRegisterContents;
+ if (dest.value(currentRegisterContents)) {
+ if (currentRegisterContents == immediate)
+ return true;
+
+ LogicalImmediate logicalImm = dataSize == 64 ? LogicalImmediate::create64(static_cast<uint64_t>(immediate)) : LogicalImmediate::create32(static_cast<uint32_t>(immediate));
+
+ if (logicalImm.isValid()) {
+ m_assembler.movi<dataSize>(dest.registerIDNoInvalidate(), logicalImm);
+ dest.setValue(immediate);
+ return true;
+ }
+
+ if ((immediate & maskUpperWord) == (currentRegisterContents & maskUpperWord)) {
+ if ((immediate & maskHalfWord1) != (currentRegisterContents & maskHalfWord1))
+ m_assembler.movk<dataSize>(dest.registerIDNoInvalidate(), (immediate & maskHalfWord1) >> 16, 16);
+
+ if ((immediate & maskHalfWord0) != (currentRegisterContents & maskHalfWord0))
+ m_assembler.movk<dataSize>(dest.registerIDNoInvalidate(), immediate & maskHalfWord0, 0);
+
+ dest.setValue(immediate);
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ void moveToCachedReg(TrustedImm32 imm, CachedTempRegister& dest)
+ {
+ if (tryMoveUsingCacheRegisterContents<32>(static_cast<intptr_t>(imm.m_value), dest))
+ return;
+
+ moveInternal<TrustedImm32, int32_t>(imm, dest.registerIDNoInvalidate());
+ dest.setValue(imm.m_value);
+ }
+
+ void moveToCachedReg(TrustedImmPtr imm, CachedTempRegister& dest)
+ {
+ if (tryMoveUsingCacheRegisterContents<64>(imm.asIntptr(), dest))
+ return;
+
+ moveInternal<TrustedImmPtr, intptr_t>(imm, dest.registerIDNoInvalidate());
+ dest.setValue(imm.asIntptr());
+ }
+
+ void moveToCachedReg(TrustedImm64 imm, CachedTempRegister& dest)
+ {
+ if (tryMoveUsingCacheRegisterContents<64>(static_cast<intptr_t>(imm.m_value), dest))
+ return;
+
+ moveInternal<TrustedImm64, int64_t>(imm, dest.registerIDNoInvalidate());
+ dest.setValue(imm.m_value);
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE bool tryLoadWithOffset(RegisterID rt, RegisterID rn, int32_t offset)
+ {
+ if (ARM64Assembler::canEncodeSImmOffset(offset)) {
+ loadUnscaledImmediate<datasize>(rt, rn, offset);
+ return true;
+ }
+ if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) {
+ loadUnsignedImmediate<datasize>(rt, rn, static_cast<unsigned>(offset));
+ return true;
+ }
+ return false;
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE bool tryLoadWithOffset(FPRegisterID rt, RegisterID rn, int32_t offset)
+ {
+ if (ARM64Assembler::canEncodeSImmOffset(offset)) {
+ m_assembler.ldur<datasize>(rt, rn, offset);
+ return true;
+ }
+ if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) {
+ m_assembler.ldr<datasize>(rt, rn, static_cast<unsigned>(offset));
+ return true;
+ }
+ return false;
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE bool tryStoreWithOffset(RegisterID rt, RegisterID rn, int32_t offset)
+ {
+ if (ARM64Assembler::canEncodeSImmOffset(offset)) {
+ storeUnscaledImmediate<datasize>(rt, rn, offset);
+ return true;
+ }
+ if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) {
+ storeUnsignedImmediate<datasize>(rt, rn, static_cast<unsigned>(offset));
+ return true;
+ }
+ return false;
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE bool tryStoreWithOffset(FPRegisterID rt, RegisterID rn, int32_t offset)
+ {
+ if (ARM64Assembler::canEncodeSImmOffset(offset)) {
+ m_assembler.stur<datasize>(rt, rn, offset);
+ return true;
+ }
+ if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) {
+ m_assembler.str<datasize>(rt, rn, static_cast<unsigned>(offset));
+ return true;
+ }
+ return false;
+ }
+
+ friend class LinkBuffer;
+ friend class RepatchBuffer;
+
+ static void linkCall(void* code, Call call, FunctionPtr function)
+ {
+ if (call.isFlagSet(Call::Near))
+ ARM64Assembler::linkCall(code, call.m_label, function.value());
+ else
+ ARM64Assembler::linkPointer(code, call.m_label.labelAtOffset(REPATCH_OFFSET_CALL_TO_POINTER), function.value());
+ }
+
+ static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
+ {
+ ARM64Assembler::repatchPointer(call.dataLabelPtrAtOffset(REPATCH_OFFSET_CALL_TO_POINTER).dataLocation(), destination.executableAddress());
+ }
+
+ static void repatchCall(CodeLocationCall call, FunctionPtr destination)
+ {
+ ARM64Assembler::repatchPointer(call.dataLabelPtrAtOffset(REPATCH_OFFSET_CALL_TO_POINTER).dataLocation(), destination.executableAddress());
+ }
+
+ CachedTempRegister m_dataMemoryTempRegister;
+ CachedTempRegister m_cachedMemoryTempRegister;
+ bool m_makeJumpPatchable;
+};
+
+// Extend the {load,store}{Unsigned,Unscaled}Immediate templated general register methods to cover all load/store sizes
+template<>
+ALWAYS_INLINE void MacroAssemblerARM64::loadUnsignedImmediate<8>(RegisterID rt, RegisterID rn, unsigned pimm)
+{
+ m_assembler.ldrb(rt, rn, pimm);
+}
+
+template<>
+ALWAYS_INLINE void MacroAssemblerARM64::loadUnsignedImmediate<16>(RegisterID rt, RegisterID rn, unsigned pimm)
+{
+ m_assembler.ldrh(rt, rn, pimm);
+}
+
+template<>
+ALWAYS_INLINE void MacroAssemblerARM64::loadUnscaledImmediate<8>(RegisterID rt, RegisterID rn, int simm)
+{
+ m_assembler.ldurb(rt, rn, simm);
+}
+
+template<>
+ALWAYS_INLINE void MacroAssemblerARM64::loadUnscaledImmediate<16>(RegisterID rt, RegisterID rn, int simm)
+{
+ m_assembler.ldurh(rt, rn, simm);
+}
+
+template<>
+ALWAYS_INLINE void MacroAssemblerARM64::storeUnsignedImmediate<8>(RegisterID rt, RegisterID rn, unsigned pimm)
+{
+ m_assembler.strb(rt, rn, pimm);
+}
+
+template<>
+ALWAYS_INLINE void MacroAssemblerARM64::storeUnsignedImmediate<16>(RegisterID rt, RegisterID rn, unsigned pimm)
+{
+ m_assembler.strh(rt, rn, pimm);
+}
+
+template<>
+ALWAYS_INLINE void MacroAssemblerARM64::storeUnscaledImmediate<8>(RegisterID rt, RegisterID rn, int simm)
+{
+ m_assembler.sturb(rt, rn, simm);
+}
+
+template<>
+ALWAYS_INLINE void MacroAssemblerARM64::storeUnscaledImmediate<16>(RegisterID rt, RegisterID rn, int simm)
+{
+ m_assembler.sturh(rt, rn, simm);
+}
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // MacroAssemblerARM64_h
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.cpp b/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.cpp
new file mode 100644
index 000000000..6651fff06
--- /dev/null
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.cpp
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2013, 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
+#include "MacroAssemblerARMv7.h"
+
+namespace JSC {
+
+#if ENABLE(MASM_PROBE)
+
+#define INDENT printIndent(indentation)
+
+void MacroAssemblerARMv7::printCPURegisters(CPUState& cpu, int indentation)
+{
+ #define PRINT_GPREGISTER(_type, _regName) { \
+ int32_t value = reinterpret_cast<int32_t>(cpu._regName); \
+ INDENT, dataLogF("%5s: 0x%08x %d\n", #_regName, value, value) ; \
+ }
+ FOR_EACH_CPU_GPREGISTER(PRINT_GPREGISTER)
+ FOR_EACH_CPU_SPECIAL_REGISTER(PRINT_GPREGISTER)
+ #undef PRINT_GPREGISTER
+
+ #define PRINT_FPREGISTER(_type, _regName) { \
+ uint64_t* u = reinterpret_cast<uint64_t*>(&cpu._regName); \
+ double* d = reinterpret_cast<double*>(&cpu._regName); \
+ INDENT, dataLogF("%5s: 0x%016llx %.13g\n", #_regName, *u, *d); \
+ }
+ FOR_EACH_CPU_FPREGISTER(PRINT_FPREGISTER)
+ #undef PRINT_FPREGISTER
+}
+
+#undef INDENT
+
+void MacroAssemblerARMv7::printRegister(MacroAssemblerARMv7::CPUState& cpu, RegisterID regID)
+{
+ const char* name = CPUState::registerName(regID);
+ union {
+ void* voidPtr;
+ intptr_t intptrValue;
+ } u;
+ u.voidPtr = cpu.registerValue(regID);
+ dataLogF("%s:<%p %ld>", name, u.voidPtr, u.intptrValue);
+}
+
+void MacroAssemblerARMv7::printRegister(MacroAssemblerARMv7::CPUState& cpu, FPRegisterID regID)
+{
+ const char* name = CPUState::registerName(regID);
+ union {
+ double doubleValue;
+ uint64_t uint64Value;
+ } u;
+ u.doubleValue = cpu.registerValue(regID);
+ dataLogF("%s:<0x%016llx %.13g>", name, u.uint64Value, u.doubleValue);
+}
+
+extern "C" void ctiMasmProbeTrampoline();
+
+// For details on "What code is emitted for the probe?" and "What values are in
+// the saved registers?", see comment for MacroAssemblerX86Common::probe() in
+// MacroAssemblerX86Common.cpp.
+
+void MacroAssemblerARMv7::probe(MacroAssemblerARMv7::ProbeFunction function, void* arg1, void* arg2)
+{
+ push(RegisterID::lr);
+ push(RegisterID::lr);
+ add32(TrustedImm32(8), RegisterID::sp, RegisterID::lr);
+ store32(RegisterID::lr, ArmAddress(RegisterID::sp, 4));
+ push(RegisterID::ip);
+ push(RegisterID::r0);
+ // The following uses RegisterID::ip. So, they must come after we push ip above.
+ push(trustedImm32FromPtr(arg2));
+ push(trustedImm32FromPtr(arg1));
+ push(trustedImm32FromPtr(function));
+
+ move(trustedImm32FromPtr(ctiMasmProbeTrampoline), RegisterID::ip);
+ m_assembler.blx(RegisterID::ip);
+}
+#endif // ENABLE(MASM_PROBE)
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h b/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h
new file mode 100644
index 000000000..2e71e61d8
--- /dev/null
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h
@@ -0,0 +1,2047 @@
+/*
+ * Copyright (C) 2009, 2010, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2010 University of Szeged
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MacroAssemblerARMv7_h
+#define MacroAssemblerARMv7_h
+
+#if ENABLE(ASSEMBLER)
+
+#include "ARMv7Assembler.h"
+#include "AbstractMacroAssembler.h"
+
+namespace JSC {
+
+class MacroAssemblerARMv7 : public AbstractMacroAssembler<ARMv7Assembler, MacroAssemblerARMv7> {
+ static const RegisterID dataTempRegister = ARMRegisters::ip;
+ static const RegisterID addressTempRegister = ARMRegisters::r6;
+
+ static const ARMRegisters::FPDoubleRegisterID fpTempRegister = ARMRegisters::d7;
+ inline ARMRegisters::FPSingleRegisterID fpTempRegisterAsSingle() { return ARMRegisters::asSingle(fpTempRegister); }
+
+public:
+ MacroAssemblerARMv7()
+ : m_makeJumpPatchable(false)
+ {
+ }
+
+ typedef ARMv7Assembler::LinkRecord LinkRecord;
+ typedef ARMv7Assembler::JumpType JumpType;
+ typedef ARMv7Assembler::JumpLinkType JumpLinkType;
+ typedef ARMv7Assembler::Condition Condition;
+
+ static const ARMv7Assembler::Condition DefaultCondition = ARMv7Assembler::ConditionInvalid;
+ static const ARMv7Assembler::JumpType DefaultJump = ARMv7Assembler::JumpNoConditionFixedSize;
+
+ static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
+ {
+ return value >= -255 && value <= 255;
+ }
+
+ Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink() { return m_assembler.jumpsToLink(); }
+ void* unlinkedCode() { return m_assembler.unlinkedCode(); }
+ static bool canCompact(JumpType jumpType) { return ARMv7Assembler::canCompact(jumpType); }
+ static JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return ARMv7Assembler::computeJumpType(jumpType, from, to); }
+ static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return ARMv7Assembler::computeJumpType(record, from, to); }
+ static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return ARMv7Assembler::jumpSizeDelta(jumpType, jumpLinkType); }
+ static void link(LinkRecord& record, uint8_t* from, uint8_t* to) { return ARMv7Assembler::link(record, from, to); }
+
+ struct ArmAddress {
+ enum AddressType {
+ HasOffset,
+ HasIndex,
+ } type;
+ RegisterID base;
+ union {
+ int32_t offset;
+ struct {
+ RegisterID index;
+ Scale scale;
+ };
+ } u;
+
+ explicit ArmAddress(RegisterID base, int32_t offset = 0)
+ : type(HasOffset)
+ , base(base)
+ {
+ u.offset = offset;
+ }
+
+ explicit ArmAddress(RegisterID base, RegisterID index, Scale scale = TimesOne)
+ : type(HasIndex)
+ , base(base)
+ {
+ u.index = index;
+ u.scale = scale;
+ }
+ };
+
+public:
+ static const Scale ScalePtr = TimesFour;
+
+ enum RelationalCondition {
+ Equal = ARMv7Assembler::ConditionEQ,
+ NotEqual = ARMv7Assembler::ConditionNE,
+ Above = ARMv7Assembler::ConditionHI,
+ AboveOrEqual = ARMv7Assembler::ConditionHS,
+ Below = ARMv7Assembler::ConditionLO,
+ BelowOrEqual = ARMv7Assembler::ConditionLS,
+ GreaterThan = ARMv7Assembler::ConditionGT,
+ GreaterThanOrEqual = ARMv7Assembler::ConditionGE,
+ LessThan = ARMv7Assembler::ConditionLT,
+ LessThanOrEqual = ARMv7Assembler::ConditionLE
+ };
+
+ enum ResultCondition {
+ Overflow = ARMv7Assembler::ConditionVS,
+ Signed = ARMv7Assembler::ConditionMI,
+ PositiveOrZero = ARMv7Assembler::ConditionPL,
+ Zero = ARMv7Assembler::ConditionEQ,
+ NonZero = ARMv7Assembler::ConditionNE
+ };
+
+ enum DoubleCondition {
+ // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
+ DoubleEqual = ARMv7Assembler::ConditionEQ,
+ DoubleNotEqual = ARMv7Assembler::ConditionVC, // Not the right flag! check for this & handle differently.
+ DoubleGreaterThan = ARMv7Assembler::ConditionGT,
+ DoubleGreaterThanOrEqual = ARMv7Assembler::ConditionGE,
+ DoubleLessThan = ARMv7Assembler::ConditionLO,
+ DoubleLessThanOrEqual = ARMv7Assembler::ConditionLS,
+ // If either operand is NaN, these conditions always evaluate to true.
+ DoubleEqualOrUnordered = ARMv7Assembler::ConditionVS, // Not the right flag! check for this & handle differently.
+ DoubleNotEqualOrUnordered = ARMv7Assembler::ConditionNE,
+ DoubleGreaterThanOrUnordered = ARMv7Assembler::ConditionHI,
+ DoubleGreaterThanOrEqualOrUnordered = ARMv7Assembler::ConditionHS,
+ DoubleLessThanOrUnordered = ARMv7Assembler::ConditionLT,
+ DoubleLessThanOrEqualOrUnordered = ARMv7Assembler::ConditionLE,
+ };
+
+ static const RegisterID stackPointerRegister = ARMRegisters::sp;
+ static const RegisterID framePointerRegister = ARMRegisters::fp;
+ static const RegisterID linkRegister = ARMRegisters::lr;
+
+ // Integer arithmetic operations:
+ //
+ // Operations are typically two operand - operation(source, srcDst)
+ // For many operations the source may be an TrustedImm32, the srcDst operand
+ // may often be a memory location (explictly described using an Address
+ // object).
+
+ void add32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.add(dest, dest, src);
+ }
+
+ void add32(TrustedImm32 imm, RegisterID dest)
+ {
+ add32(imm, dest, dest);
+ }
+
+ void add32(AbsoluteAddress src, RegisterID dest)
+ {
+ load32(src.m_ptr, dataTempRegister);
+ add32(dataTempRegister, dest);
+ }
+
+ void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
+
+ // For adds with stack pointer destination, moving the src first to sp is
+ // needed to avoid unpredictable instruction
+ if (dest == ARMRegisters::sp && src != dest) {
+ move(src, ARMRegisters::sp);
+ src = ARMRegisters::sp;
+ }
+
+ if (armImm.isValid())
+ m_assembler.add(dest, src, armImm);
+ else {
+ move(imm, dataTempRegister);
+ m_assembler.add(dest, src, dataTempRegister);
+ }
+ }
+
+ void add32(TrustedImm32 imm, Address address)
+ {
+ load32(address, dataTempRegister);
+
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.add(dataTempRegister, dataTempRegister, armImm);
+ else {
+ // Hrrrm, since dataTempRegister holds the data loaded,
+ // use addressTempRegister to hold the immediate.
+ move(imm, addressTempRegister);
+ m_assembler.add(dataTempRegister, dataTempRegister, addressTempRegister);
+ }
+
+ store32(dataTempRegister, address);
+ }
+
+ void add32(Address src, RegisterID dest)
+ {
+ load32(src, dataTempRegister);
+ add32(dataTempRegister, dest);
+ }
+
+ void add32(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ load32(address.m_ptr, dataTempRegister);
+
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.add(dataTempRegister, dataTempRegister, armImm);
+ else {
+ // Hrrrm, since dataTempRegister holds the data loaded,
+ // use addressTempRegister to hold the immediate.
+ move(imm, addressTempRegister);
+ m_assembler.add(dataTempRegister, dataTempRegister, addressTempRegister);
+ }
+
+ store32(dataTempRegister, address.m_ptr);
+ }
+
+ void addPtrNoFlags(TrustedImm32 imm, RegisterID srcDest)
+ {
+ add32(imm, srcDest);
+ }
+
+ void add64(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ move(TrustedImmPtr(address.m_ptr), addressTempRegister);
+
+ m_assembler.ldr(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(0));
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.add_S(dataTempRegister, dataTempRegister, armImm);
+ else {
+ move(imm, addressTempRegister);
+ m_assembler.add_S(dataTempRegister, dataTempRegister, addressTempRegister);
+ move(TrustedImmPtr(address.m_ptr), addressTempRegister);
+ }
+ m_assembler.str(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(0));
+
+ m_assembler.ldr(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(4));
+ m_assembler.adc(dataTempRegister, dataTempRegister, ARMThumbImmediate::makeEncodedImm(imm.m_value >> 31));
+ m_assembler.str(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(4));
+ }
+
+ void and32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.ARM_and(dest, op1, op2);
+ }
+
+ void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.ARM_and(dest, src, armImm);
+ else {
+ move(imm, dataTempRegister);
+ m_assembler.ARM_and(dest, src, dataTempRegister);
+ }
+ }
+
+ void and32(RegisterID src, RegisterID dest)
+ {
+ and32(dest, src, dest);
+ }
+
+ void and32(TrustedImm32 imm, RegisterID dest)
+ {
+ and32(imm, dest, dest);
+ }
+
+ void and32(Address src, RegisterID dest)
+ {
+ load32(src, dataTempRegister);
+ and32(dataTempRegister, dest);
+ }
+
+ void countLeadingZeros32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.clz(dest, src);
+ }
+
+ void lshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+ {
+ // Clamp the shift to the range 0..31
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
+ ASSERT(armImm.isValid());
+ m_assembler.ARM_and(dataTempRegister, shiftAmount, armImm);
+
+ m_assembler.lsl(dest, src, dataTempRegister);
+ }
+
+ void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.lsl(dest, src, imm.m_value & 0x1f);
+ }
+
+ void lshift32(RegisterID shiftAmount, RegisterID dest)
+ {
+ lshift32(dest, shiftAmount, dest);
+ }
+
+ void lshift32(TrustedImm32 imm, RegisterID dest)
+ {
+ lshift32(dest, imm, dest);
+ }
+
+ void mul32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.smull(dest, dataTempRegister, dest, src);
+ }
+
+ void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ move(imm, dataTempRegister);
+ m_assembler.smull(dest, dataTempRegister, src, dataTempRegister);
+ }
+
+ void neg32(RegisterID srcDest)
+ {
+ m_assembler.neg(srcDest, srcDest);
+ }
+
+ void or32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.orr(dest, dest, src);
+ }
+
+ void or32(RegisterID src, AbsoluteAddress dest)
+ {
+ move(TrustedImmPtr(dest.m_ptr), addressTempRegister);
+ load32(addressTempRegister, dataTempRegister);
+ or32(src, dataTempRegister);
+ store32(dataTempRegister, addressTempRegister);
+ }
+
+ void or32(TrustedImm32 imm, Address address)
+ {
+ load32(address, dataTempRegister);
+ or32(imm, dataTempRegister, dataTempRegister);
+ store32(dataTempRegister, address);
+ }
+
+ void or32(TrustedImm32 imm, RegisterID dest)
+ {
+ or32(imm, dest, dest);
+ }
+
+ void or32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.orr(dest, op1, op2);
+ }
+
+ void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.orr(dest, src, armImm);
+ else {
+ move(imm, dataTempRegister);
+ m_assembler.orr(dest, src, dataTempRegister);
+ }
+ }
+
+ void rshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+ {
+ // Clamp the shift to the range 0..31
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
+ ASSERT(armImm.isValid());
+ m_assembler.ARM_and(dataTempRegister, shiftAmount, armImm);
+
+ m_assembler.asr(dest, src, dataTempRegister);
+ }
+
+ void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.asr(dest, src, imm.m_value & 0x1f);
+ }
+
+ void rshift32(RegisterID shiftAmount, RegisterID dest)
+ {
+ rshift32(dest, shiftAmount, dest);
+ }
+
+ void rshift32(TrustedImm32 imm, RegisterID dest)
+ {
+ rshift32(dest, imm, dest);
+ }
+
+ void urshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+ {
+ // Clamp the shift to the range 0..31
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
+ ASSERT(armImm.isValid());
+ m_assembler.ARM_and(dataTempRegister, shiftAmount, armImm);
+
+ m_assembler.lsr(dest, src, dataTempRegister);
+ }
+
+ void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.lsr(dest, src, imm.m_value & 0x1f);
+ }
+
+ void urshift32(RegisterID shiftAmount, RegisterID dest)
+ {
+ urshift32(dest, shiftAmount, dest);
+ }
+
+ void urshift32(TrustedImm32 imm, RegisterID dest)
+ {
+ urshift32(dest, imm, dest);
+ }
+
+ void sub32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.sub(dest, dest, src);
+ }
+
+ void sub32(TrustedImm32 imm, RegisterID dest)
+ {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.sub(dest, dest, armImm);
+ else {
+ move(imm, dataTempRegister);
+ m_assembler.sub(dest, dest, dataTempRegister);
+ }
+ }
+
+ void sub32(TrustedImm32 imm, Address address)
+ {
+ load32(address, dataTempRegister);
+
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.sub(dataTempRegister, dataTempRegister, armImm);
+ else {
+ // Hrrrm, since dataTempRegister holds the data loaded,
+ // use addressTempRegister to hold the immediate.
+ move(imm, addressTempRegister);
+ m_assembler.sub(dataTempRegister, dataTempRegister, addressTempRegister);
+ }
+
+ store32(dataTempRegister, address);
+ }
+
+ void sub32(Address src, RegisterID dest)
+ {
+ load32(src, dataTempRegister);
+ sub32(dataTempRegister, dest);
+ }
+
+ void sub32(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ load32(address.m_ptr, dataTempRegister);
+
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.sub(dataTempRegister, dataTempRegister, armImm);
+ else {
+ // Hrrrm, since dataTempRegister holds the data loaded,
+ // use addressTempRegister to hold the immediate.
+ move(imm, addressTempRegister);
+ m_assembler.sub(dataTempRegister, dataTempRegister, addressTempRegister);
+ }
+
+ store32(dataTempRegister, address.m_ptr);
+ }
+
+ void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.eor(dest, op1, op2);
+ }
+
+ void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (imm.m_value == -1) {
+ m_assembler.mvn(dest, src);
+ return;
+ }
+
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.eor(dest, src, armImm);
+ else {
+ move(imm, dataTempRegister);
+ m_assembler.eor(dest, src, dataTempRegister);
+ }
+ }
+
+ void xor32(RegisterID src, RegisterID dest)
+ {
+ xor32(dest, src, dest);
+ }
+
+ void xor32(TrustedImm32 imm, RegisterID dest)
+ {
+ if (imm.m_value == -1)
+ m_assembler.mvn(dest, dest);
+ else
+ xor32(imm, dest, dest);
+ }
+
+
+ // Memory access operations:
+ //
+ // Loads are of the form load(address, destination) and stores of the form
+ // store(source, address). The source for a store may be an TrustedImm32. Address
+ // operand objects to loads and store will be implicitly constructed if a
+ // register is passed.
+
+private:
+ void load32(ArmAddress address, RegisterID dest)
+ {
+ if (address.type == ArmAddress::HasIndex)
+ m_assembler.ldr(dest, address.base, address.u.index, address.u.scale);
+ else if (address.u.offset >= 0) {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
+ ASSERT(armImm.isValid());
+ m_assembler.ldr(dest, address.base, armImm);
+ } else {
+ ASSERT(address.u.offset >= -255);
+ m_assembler.ldr(dest, address.base, address.u.offset, true, false);
+ }
+ }
+
+ void load16(ArmAddress address, RegisterID dest)
+ {
+ if (address.type == ArmAddress::HasIndex)
+ m_assembler.ldrh(dest, address.base, address.u.index, address.u.scale);
+ else if (address.u.offset >= 0) {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
+ ASSERT(armImm.isValid());
+ m_assembler.ldrh(dest, address.base, armImm);
+ } else {
+ ASSERT(address.u.offset >= -255);
+ m_assembler.ldrh(dest, address.base, address.u.offset, true, false);
+ }
+ }
+
+ void load16SignedExtendTo32(ArmAddress address, RegisterID dest)
+ {
+ ASSERT(address.type == ArmAddress::HasIndex);
+ m_assembler.ldrsh(dest, address.base, address.u.index, address.u.scale);
+ }
+
+ void load8(ArmAddress address, RegisterID dest)
+ {
+ if (address.type == ArmAddress::HasIndex)
+ m_assembler.ldrb(dest, address.base, address.u.index, address.u.scale);
+ else if (address.u.offset >= 0) {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
+ ASSERT(armImm.isValid());
+ m_assembler.ldrb(dest, address.base, armImm);
+ } else {
+ ASSERT(address.u.offset >= -255);
+ m_assembler.ldrb(dest, address.base, address.u.offset, true, false);
+ }
+ }
+
+ void load8SignedExtendTo32(ArmAddress address, RegisterID dest)
+ {
+ ASSERT(address.type == ArmAddress::HasIndex);
+ m_assembler.ldrsb(dest, address.base, address.u.index, address.u.scale);
+ }
+
+protected:
+ void store32(RegisterID src, ArmAddress address)
+ {
+ if (address.type == ArmAddress::HasIndex)
+ m_assembler.str(src, address.base, address.u.index, address.u.scale);
+ else if (address.u.offset >= 0) {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
+ ASSERT(armImm.isValid());
+ m_assembler.str(src, address.base, armImm);
+ } else {
+ ASSERT(address.u.offset >= -255);
+ m_assembler.str(src, address.base, address.u.offset, true, false);
+ }
+ }
+
+private:
+ void store8(RegisterID src, ArmAddress address)
+ {
+ if (address.type == ArmAddress::HasIndex)
+ m_assembler.strb(src, address.base, address.u.index, address.u.scale);
+ else if (address.u.offset >= 0) {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
+ ASSERT(armImm.isValid());
+ m_assembler.strb(src, address.base, armImm);
+ } else {
+ ASSERT(address.u.offset >= -255);
+ m_assembler.strb(src, address.base, address.u.offset, true, false);
+ }
+ }
+
+ void store16(RegisterID src, ArmAddress address)
+ {
+ if (address.type == ArmAddress::HasIndex)
+ m_assembler.strh(src, address.base, address.u.index, address.u.scale);
+ else if (address.u.offset >= 0) {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
+ ASSERT(armImm.isValid());
+ m_assembler.strh(src, address.base, armImm);
+ } else {
+ ASSERT(address.u.offset >= -255);
+ m_assembler.strh(src, address.base, address.u.offset, true, false);
+ }
+ }
+
+public:
+ void load32(ImplicitAddress address, RegisterID dest)
+ {
+ load32(setupArmAddress(address), dest);
+ }
+
+ void load32(BaseIndex address, RegisterID dest)
+ {
+ load32(setupArmAddress(address), dest);
+ }
+
+ void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
+ {
+ load32(setupArmAddress(address), dest);
+ }
+
+ void load16Unaligned(BaseIndex address, RegisterID dest)
+ {
+ load16(setupArmAddress(address), dest);
+ }
+
+ void load32(const void* address, RegisterID dest)
+ {
+ move(TrustedImmPtr(address), addressTempRegister);
+ m_assembler.ldr(dest, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
+ }
+
+ void abortWithReason(AbortReason reason)
+ {
+ move(TrustedImm32(reason), dataTempRegister);
+ breakpoint();
+ }
+
+ void abortWithReason(AbortReason reason, intptr_t misc)
+ {
+ move(TrustedImm32(misc), addressTempRegister);
+ abortWithReason(reason);
+ }
+
+ ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
+ {
+ ConvertibleLoadLabel result(this);
+ ASSERT(address.offset >= 0 && address.offset <= 255);
+ m_assembler.ldrWide8BitImmediate(dest, address.base, address.offset);
+ return result;
+ }
+
+ void load8(ImplicitAddress address, RegisterID dest)
+ {
+ load8(setupArmAddress(address), dest);
+ }
+
+ void load8SignedExtendTo32(ImplicitAddress, RegisterID)
+ {
+ UNREACHABLE_FOR_PLATFORM();
+ }
+
+ void load8(BaseIndex address, RegisterID dest)
+ {
+ load8(setupArmAddress(address), dest);
+ }
+
+ void load8SignedExtendTo32(BaseIndex address, RegisterID dest)
+ {
+ load8SignedExtendTo32(setupArmAddress(address), dest);
+ }
+
+ void load8(const void* address, RegisterID dest)
+ {
+ move(TrustedImmPtr(address), dest);
+ load8(dest, dest);
+ }
+
+ DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ DataLabel32 label = moveWithPatch(TrustedImm32(address.offset), dataTempRegister);
+ load32(ArmAddress(address.base, dataTempRegister), dest);
+ return label;
+ }
+
+ DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ padBeforePatch();
+
+ RegisterID base = address.base;
+
+ DataLabelCompact label(this);
+ ASSERT(isCompactPtrAlignedAddressOffset(address.offset));
+
+ m_assembler.ldr(dest, base, address.offset, true, false);
+ return label;
+ }
+
+ void load16(BaseIndex address, RegisterID dest)
+ {
+ m_assembler.ldrh(dest, makeBaseIndexBase(address), address.index, address.scale);
+ }
+
+ void load16SignedExtendTo32(BaseIndex address, RegisterID dest)
+ {
+ load16SignedExtendTo32(setupArmAddress(address), dest);
+ }
+
+ void load16(ImplicitAddress address, RegisterID dest)
+ {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.offset);
+ if (armImm.isValid())
+ m_assembler.ldrh(dest, address.base, armImm);
+ else {
+ move(TrustedImm32(address.offset), dataTempRegister);
+ m_assembler.ldrh(dest, address.base, dataTempRegister);
+ }
+ }
+
+ void load16SignedExtendTo32(ImplicitAddress, RegisterID)
+ {
+ UNREACHABLE_FOR_PLATFORM();
+ }
+
+ DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
+ {
+ DataLabel32 label = moveWithPatch(TrustedImm32(address.offset), dataTempRegister);
+ store32(src, ArmAddress(address.base, dataTempRegister));
+ return label;
+ }
+
+ void store32(RegisterID src, ImplicitAddress address)
+ {
+ store32(src, setupArmAddress(address));
+ }
+
+ void store32(RegisterID src, BaseIndex address)
+ {
+ store32(src, setupArmAddress(address));
+ }
+
+ void store32(TrustedImm32 imm, ImplicitAddress address)
+ {
+ move(imm, dataTempRegister);
+ store32(dataTempRegister, setupArmAddress(address));
+ }
+
+ void store32(TrustedImm32 imm, BaseIndex address)
+ {
+ move(imm, dataTempRegister);
+ store32(dataTempRegister, setupArmAddress(address));
+ }
+
+ void store32(RegisterID src, const void* address)
+ {
+ move(TrustedImmPtr(address), addressTempRegister);
+ m_assembler.str(src, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
+ }
+
+ void store32(TrustedImm32 imm, const void* address)
+ {
+ move(imm, dataTempRegister);
+ store32(dataTempRegister, address);
+ }
+
+ void store8(RegisterID src, Address address)
+ {
+ store8(src, setupArmAddress(address));
+ }
+
+ void store8(RegisterID src, BaseIndex address)
+ {
+ store8(src, setupArmAddress(address));
+ }
+
+ void store8(RegisterID src, void* address)
+ {
+ move(TrustedImmPtr(address), addressTempRegister);
+ store8(src, ArmAddress(addressTempRegister, 0));
+ }
+
+ void store8(TrustedImm32 imm, void* address)
+ {
+ move(imm, dataTempRegister);
+ store8(dataTempRegister, address);
+ }
+
+ void store8(TrustedImm32 imm, Address address)
+ {
+ move(imm, dataTempRegister);
+ store8(dataTempRegister, address);
+ }
+
+ void store16(RegisterID src, BaseIndex address)
+ {
+ store16(src, setupArmAddress(address));
+ }
+
+ // Possibly clobbers src, but not on this architecture.
+ void moveDoubleToInts(FPRegisterID src, RegisterID dest1, RegisterID dest2)
+ {
+ m_assembler.vmov(dest1, dest2, src);
+ }
+
+ void moveIntsToDouble(RegisterID src1, RegisterID src2, FPRegisterID dest, FPRegisterID scratch)
+ {
+ UNUSED_PARAM(scratch);
+ m_assembler.vmov(dest, src1, src2);
+ }
+
+ static bool shouldBlindForSpecificArch(uint32_t value)
+ {
+ ARMThumbImmediate immediate = ARMThumbImmediate::makeEncodedImm(value);
+
+ // Couldn't be encoded as an immediate, so assume it's untrusted.
+ if (!immediate.isValid())
+ return true;
+
+ // If we can encode the immediate, we have less than 16 attacker
+ // controlled bits.
+ if (immediate.isEncodedImm())
+ return false;
+
+ // Don't let any more than 12 bits of an instruction word
+ // be controlled by an attacker.
+ return !immediate.isUInt12();
+ }
+
+ // Floating-point operations:
+
+ static bool supportsFloatingPoint() { return true; }
+ static bool supportsFloatingPointTruncate() { return true; }
+ static bool supportsFloatingPointSqrt() { return true; }
+ static bool supportsFloatingPointAbs() { return true; }
+
+ void loadDouble(ImplicitAddress address, FPRegisterID dest)
+ {
+ RegisterID base = address.base;
+ int32_t offset = address.offset;
+
+ // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
+ if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
+ add32(TrustedImm32(offset), base, addressTempRegister);
+ base = addressTempRegister;
+ offset = 0;
+ }
+
+ m_assembler.vldr(dest, base, offset);
+ }
+
+ void loadFloat(ImplicitAddress address, FPRegisterID dest)
+ {
+ RegisterID base = address.base;
+ int32_t offset = address.offset;
+
+ // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
+ if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
+ add32(TrustedImm32(offset), base, addressTempRegister);
+ base = addressTempRegister;
+ offset = 0;
+ }
+
+ m_assembler.flds(ARMRegisters::asSingle(dest), base, offset);
+ }
+
+ void loadDouble(BaseIndex address, FPRegisterID dest)
+ {
+ move(address.index, addressTempRegister);
+ lshift32(TrustedImm32(address.scale), addressTempRegister);
+ add32(address.base, addressTempRegister);
+ loadDouble(Address(addressTempRegister, address.offset), dest);
+ }
+
+ void loadFloat(BaseIndex address, FPRegisterID dest)
+ {
+ move(address.index, addressTempRegister);
+ lshift32(TrustedImm32(address.scale), addressTempRegister);
+ add32(address.base, addressTempRegister);
+ loadFloat(Address(addressTempRegister, address.offset), dest);
+ }
+
+ void moveDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ if (src != dest)
+ m_assembler.vmov(dest, src);
+ }
+
+ void loadDouble(TrustedImmPtr address, FPRegisterID dest)
+ {
+ move(address, addressTempRegister);
+ m_assembler.vldr(dest, addressTempRegister, 0);
+ }
+
+ void storeDouble(FPRegisterID src, ImplicitAddress address)
+ {
+ RegisterID base = address.base;
+ int32_t offset = address.offset;
+
+ // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
+ if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
+ add32(TrustedImm32(offset), base, addressTempRegister);
+ base = addressTempRegister;
+ offset = 0;
+ }
+
+ m_assembler.vstr(src, base, offset);
+ }
+
+ void storeFloat(FPRegisterID src, ImplicitAddress address)
+ {
+ RegisterID base = address.base;
+ int32_t offset = address.offset;
+
+ // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
+ if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
+ add32(TrustedImm32(offset), base, addressTempRegister);
+ base = addressTempRegister;
+ offset = 0;
+ }
+
+ m_assembler.fsts(ARMRegisters::asSingle(src), base, offset);
+ }
+
+ void storeDouble(FPRegisterID src, TrustedImmPtr address)
+ {
+ move(address, addressTempRegister);
+ storeDouble(src, addressTempRegister);
+ }
+
+ void storeDouble(FPRegisterID src, BaseIndex address)
+ {
+ move(address.index, addressTempRegister);
+ lshift32(TrustedImm32(address.scale), addressTempRegister);
+ add32(address.base, addressTempRegister);
+ storeDouble(src, Address(addressTempRegister, address.offset));
+ }
+
+ void storeFloat(FPRegisterID src, BaseIndex address)
+ {
+ move(address.index, addressTempRegister);
+ lshift32(TrustedImm32(address.scale), addressTempRegister);
+ add32(address.base, addressTempRegister);
+ storeFloat(src, Address(addressTempRegister, address.offset));
+ }
+
+ void addDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vadd(dest, dest, src);
+ }
+
+ void addDouble(Address src, FPRegisterID dest)
+ {
+ loadDouble(src, fpTempRegister);
+ addDouble(fpTempRegister, dest);
+ }
+
+ void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ m_assembler.vadd(dest, op1, op2);
+ }
+
+ void addDouble(AbsoluteAddress address, FPRegisterID dest)
+ {
+ loadDouble(TrustedImmPtr(address.m_ptr), fpTempRegister);
+ m_assembler.vadd(dest, dest, fpTempRegister);
+ }
+
+ void divDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vdiv(dest, dest, src);
+ }
+
+ void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ m_assembler.vdiv(dest, op1, op2);
+ }
+
+ void subDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vsub(dest, dest, src);
+ }
+
+ void subDouble(Address src, FPRegisterID dest)
+ {
+ loadDouble(src, fpTempRegister);
+ subDouble(fpTempRegister, dest);
+ }
+
+ void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ m_assembler.vsub(dest, op1, op2);
+ }
+
+ void mulDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vmul(dest, dest, src);
+ }
+
+ void mulDouble(Address src, FPRegisterID dest)
+ {
+ loadDouble(src, fpTempRegister);
+ mulDouble(fpTempRegister, dest);
+ }
+
+ void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ m_assembler.vmul(dest, op1, op2);
+ }
+
+ void sqrtDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vsqrt(dest, src);
+ }
+
+ void absDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vabs(dest, src);
+ }
+
+ void negateDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vneg(dest, src);
+ }
+
+ void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vmov(fpTempRegister, src, src);
+ m_assembler.vcvt_signedToFloatingPoint(dest, fpTempRegisterAsSingle());
+ }
+
+ void convertInt32ToDouble(Address address, FPRegisterID dest)
+ {
+ // Fixme: load directly into the fpr!
+ load32(address, dataTempRegister);
+ m_assembler.vmov(fpTempRegister, dataTempRegister, dataTempRegister);
+ m_assembler.vcvt_signedToFloatingPoint(dest, fpTempRegisterAsSingle());
+ }
+
+ void convertInt32ToDouble(AbsoluteAddress address, FPRegisterID dest)
+ {
+ // Fixme: load directly into the fpr!
+ load32(address.m_ptr, dataTempRegister);
+ m_assembler.vmov(fpTempRegister, dataTempRegister, dataTempRegister);
+ m_assembler.vcvt_signedToFloatingPoint(dest, fpTempRegisterAsSingle());
+ }
+
+ void convertFloatToDouble(FPRegisterID src, FPRegisterID dst)
+ {
+ m_assembler.vcvtds(dst, ARMRegisters::asSingle(src));
+ }
+
+ void convertDoubleToFloat(FPRegisterID src, FPRegisterID dst)
+ {
+ m_assembler.vcvtsd(ARMRegisters::asSingle(dst), src);
+ }
+
+ Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
+ {
+ m_assembler.vcmp(left, right);
+ m_assembler.vmrs();
+
+ if (cond == DoubleNotEqual) {
+ // ConditionNE jumps if NotEqual *or* unordered - force the unordered cases not to jump.
+ Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
+ Jump result = makeBranch(ARMv7Assembler::ConditionNE);
+ unordered.link(this);
+ return result;
+ }
+ if (cond == DoubleEqualOrUnordered) {
+ Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
+ Jump notEqual = makeBranch(ARMv7Assembler::ConditionNE);
+ unordered.link(this);
+ // We get here if either unordered or equal.
+ Jump result = jump();
+ notEqual.link(this);
+ return result;
+ }
+ return makeBranch(cond);
+ }
+
+ enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
+ Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
+ {
+ // Convert into dest.
+ m_assembler.vcvt_floatingPointToSigned(fpTempRegisterAsSingle(), src);
+ m_assembler.vmov(dest, fpTempRegisterAsSingle());
+
+ // Calculate 2x dest. If the value potentially underflowed, it will have
+ // clamped to 0x80000000, so 2x dest is zero in this case. In the case of
+ // overflow the result will be equal to -2.
+ Jump underflow = branchAdd32(Zero, dest, dest, dataTempRegister);
+ Jump noOverflow = branch32(NotEqual, dataTempRegister, TrustedImm32(-2));
+
+ // For BranchIfTruncateSuccessful, we branch if 'noOverflow' jumps.
+ underflow.link(this);
+ if (branchType == BranchIfTruncateSuccessful)
+ return noOverflow;
+
+ // We'll reach the current point in the code on failure, so plant a
+ // jump here & link the success case.
+ Jump failure = jump();
+ noOverflow.link(this);
+ return failure;
+ }
+
+ // Result is undefined if the value is outside of the integer range.
+ void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
+ {
+ m_assembler.vcvt_floatingPointToSigned(fpTempRegisterAsSingle(), src);
+ m_assembler.vmov(dest, fpTempRegisterAsSingle());
+ }
+
+ void truncateDoubleToUint32(FPRegisterID src, RegisterID dest)
+ {
+ m_assembler.vcvt_floatingPointToUnsigned(fpTempRegisterAsSingle(), src);
+ m_assembler.vmov(dest, fpTempRegisterAsSingle());
+ }
+
+ // Convert 'src' to an integer, and places the resulting 'dest'.
+ // If the result is not representable as a 32 bit value, branch.
+ // May also branch for some values that are representable in 32 bits
+ // (specifically, in this case, 0).
+ void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID, bool negZeroCheck = true)
+ {
+ m_assembler.vcvt_floatingPointToSigned(fpTempRegisterAsSingle(), src);
+ m_assembler.vmov(dest, fpTempRegisterAsSingle());
+
+ // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
+ m_assembler.vcvt_signedToFloatingPoint(fpTempRegister, fpTempRegisterAsSingle());
+ failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, fpTempRegister));
+
+ // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
+ if (negZeroCheck)
+ failureCases.append(branchTest32(Zero, dest));
+ }
+
+ Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID)
+ {
+ m_assembler.vcmpz(reg);
+ m_assembler.vmrs();
+ Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
+ Jump result = makeBranch(ARMv7Assembler::ConditionNE);
+ unordered.link(this);
+ return result;
+ }
+
+ Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID)
+ {
+ m_assembler.vcmpz(reg);
+ m_assembler.vmrs();
+ Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
+ Jump notEqual = makeBranch(ARMv7Assembler::ConditionNE);
+ unordered.link(this);
+ // We get here if either unordered or equal.
+ Jump result = jump();
+ notEqual.link(this);
+ return result;
+ }
+
+ // Stack manipulation operations:
+ //
+ // The ABI is assumed to provide a stack abstraction to memory,
+ // containing machine word sized units of data. Push and pop
+ // operations add and remove a single register sized unit of data
+ // to or from the stack. Peek and poke operations read or write
+ // values on the stack, without moving the current stack position.
+
+ void pop(RegisterID dest)
+ {
+ m_assembler.pop(dest);
+ }
+
+ void push(RegisterID src)
+ {
+ m_assembler.push(src);
+ }
+
+ void push(Address address)
+ {
+ load32(address, dataTempRegister);
+ push(dataTempRegister);
+ }
+
+ void push(TrustedImm32 imm)
+ {
+ move(imm, dataTempRegister);
+ push(dataTempRegister);
+ }
+
+ void popPair(RegisterID dest1, RegisterID dest2)
+ {
+ m_assembler.pop(1 << dest1 | 1 << dest2);
+ }
+
+ void pushPair(RegisterID src1, RegisterID src2)
+ {
+ m_assembler.push(1 << src1 | 1 << src2);
+ }
+
+ // Register move operations:
+ //
+ // Move values in registers.
+
+ void move(TrustedImm32 imm, RegisterID dest)
+ {
+ uint32_t value = imm.m_value;
+
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(value);
+
+ if (armImm.isValid())
+ m_assembler.mov(dest, armImm);
+ else if ((armImm = ARMThumbImmediate::makeEncodedImm(~value)).isValid())
+ m_assembler.mvn(dest, armImm);
+ else {
+ m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(value));
+ if (value & 0xffff0000)
+ m_assembler.movt(dest, ARMThumbImmediate::makeUInt16(value >> 16));
+ }
+ }
+
+ void move(RegisterID src, RegisterID dest)
+ {
+ if (src != dest)
+ m_assembler.mov(dest, src);
+ }
+
+ void move(TrustedImmPtr imm, RegisterID dest)
+ {
+ move(TrustedImm32(imm), dest);
+ }
+
+ void swap(RegisterID reg1, RegisterID reg2)
+ {
+ move(reg1, dataTempRegister);
+ move(reg2, reg1);
+ move(dataTempRegister, reg2);
+ }
+
+ void signExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ move(src, dest);
+ }
+
+ void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ move(src, dest);
+ }
+
+ // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
+ static RelationalCondition invert(RelationalCondition cond)
+ {
+ return static_cast<RelationalCondition>(cond ^ 1);
+ }
+
+ void nop()
+ {
+ m_assembler.nop();
+ }
+
+ void memoryFence()
+ {
+ m_assembler.dmbSY();
+ }
+
+ static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
+ {
+ ARMv7Assembler::replaceWithJump(instructionStart.dataLocation(), destination.dataLocation());
+ }
+
+ static ptrdiff_t maxJumpReplacementSize()
+ {
+ return ARMv7Assembler::maxJumpReplacementSize();
+ }
+
+ // Forwards / external control flow operations:
+ //
+ // This set of jump and conditional branch operations return a Jump
+ // object which may linked at a later point, allow forwards jump,
+ // or jumps that will require external linkage (after the code has been
+ // relocated).
+ //
+ // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
+ // respecitvely, for unsigned comparisons the names b, a, be, and ae are
+ // used (representing the names 'below' and 'above').
+ //
+ // Operands to the comparision are provided in the expected order, e.g.
+ // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
+ // treated as a signed 32bit value, is less than or equal to 5.
+ //
+ // jz and jnz test whether the first operand is equal to zero, and take
+ // an optional second operand of a mask under which to perform the test.
+private:
+
+ // Should we be using TEQ for equal/not-equal?
+ void compare32(RegisterID left, TrustedImm32 right)
+ {
+ int32_t imm = right.m_value;
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm);
+ if (armImm.isValid())
+ m_assembler.cmp(left, armImm);
+ else if ((armImm = ARMThumbImmediate::makeEncodedImm(-imm)).isValid())
+ m_assembler.cmn(left, armImm);
+ else {
+ move(TrustedImm32(imm), dataTempRegister);
+ m_assembler.cmp(left, dataTempRegister);
+ }
+ }
+
+ void test32(RegisterID reg, TrustedImm32 mask)
+ {
+ int32_t imm = mask.m_value;
+
+ if (imm == -1)
+ m_assembler.tst(reg, reg);
+ else {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm);
+ if (armImm.isValid()) {
+ if (reg == ARMRegisters::sp) {
+ move(reg, addressTempRegister);
+ m_assembler.tst(addressTempRegister, armImm);
+ } else
+ m_assembler.tst(reg, armImm);
+ } else {
+ move(mask, dataTempRegister);
+ if (reg == ARMRegisters::sp) {
+ move(reg, addressTempRegister);
+ m_assembler.tst(addressTempRegister, dataTempRegister);
+ } else
+ m_assembler.tst(reg, dataTempRegister);
+ }
+ }
+ }
+
+public:
+ void test32(ResultCondition, RegisterID reg, TrustedImm32 mask)
+ {
+ test32(reg, mask);
+ }
+
+ Jump branch(ResultCondition cond)
+ {
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
+ {
+ m_assembler.cmp(left, right);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
+ {
+ compare32(left, right);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branch32(RelationalCondition cond, RegisterID left, Address right)
+ {
+ load32(right, dataTempRegister);
+ return branch32(cond, left, dataTempRegister);
+ }
+
+ Jump branch32(RelationalCondition cond, Address left, RegisterID right)
+ {
+ load32(left, dataTempRegister);
+ return branch32(cond, dataTempRegister, right);
+ }
+
+ Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
+ {
+ // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
+ load32(left, addressTempRegister);
+ return branch32(cond, addressTempRegister, right);
+ }
+
+ Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+ {
+ // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
+ load32(left, addressTempRegister);
+ return branch32(cond, addressTempRegister, right);
+ }
+
+ Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+ {
+ // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
+ load32WithUnalignedHalfWords(left, addressTempRegister);
+ return branch32(cond, addressTempRegister, right);
+ }
+
+ Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
+ {
+ load32(left.m_ptr, dataTempRegister);
+ return branch32(cond, dataTempRegister, right);
+ }
+
+ Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
+ {
+ // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
+ load32(left.m_ptr, addressTempRegister);
+ return branch32(cond, addressTempRegister, right);
+ }
+
+ Jump branchPtr(RelationalCondition cond, BaseIndex left, RegisterID right)
+ {
+ load32(left, dataTempRegister);
+ return branch32(cond, dataTempRegister, right);
+ }
+
+ Jump branch8(RelationalCondition cond, RegisterID left, TrustedImm32 right)
+ {
+ compare32(left, right);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
+ {
+ ASSERT(!(0xffffff00 & right.m_value));
+ // use addressTempRegister incase the branch8 we call uses dataTempRegister. :-/
+ load8(left, addressTempRegister);
+ return branch8(cond, addressTempRegister, right);
+ }
+
+ Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+ {
+ ASSERT(!(0xffffff00 & right.m_value));
+ // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
+ load8(left, addressTempRegister);
+ return branch32(cond, addressTempRegister, right);
+ }
+
+ Jump branch8(RelationalCondition cond, AbsoluteAddress address, TrustedImm32 right)
+ {
+ // Use addressTempRegister instead of dataTempRegister, since branch32 uses dataTempRegister.
+ move(TrustedImmPtr(address.m_ptr), addressTempRegister);
+ load8(Address(addressTempRegister), addressTempRegister);
+ return branch32(cond, addressTempRegister, right);
+ }
+
+ Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
+ {
+ m_assembler.tst(reg, mask);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ test32(reg, mask);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ // use addressTempRegister incase the branchTest32 we call uses dataTempRegister. :-/
+ load32(address, addressTempRegister);
+ return branchTest32(cond, addressTempRegister, mask);
+ }
+
+ Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ // use addressTempRegister incase the branchTest32 we call uses dataTempRegister. :-/
+ load32(address, addressTempRegister);
+ return branchTest32(cond, addressTempRegister, mask);
+ }
+
+ Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ // use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/
+ load8(address, addressTempRegister);
+ return branchTest32(cond, addressTempRegister, mask);
+ }
+
+ Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ // use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/
+ load8(address, addressTempRegister);
+ return branchTest32(cond, addressTempRegister, mask);
+ }
+
+ Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ // use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/
+ move(TrustedImmPtr(address.m_ptr), addressTempRegister);
+ load8(Address(addressTempRegister), addressTempRegister);
+ return branchTest32(cond, addressTempRegister, mask);
+ }
+
+ void jump(RegisterID target)
+ {
+ m_assembler.bx(target);
+ }
+
+ // Address is a memory location containing the address to jump to
+ void jump(Address address)
+ {
+ load32(address, dataTempRegister);
+ m_assembler.bx(dataTempRegister);
+ }
+
+ void jump(AbsoluteAddress address)
+ {
+ move(TrustedImmPtr(address.m_ptr), dataTempRegister);
+ load32(Address(dataTempRegister), dataTempRegister);
+ m_assembler.bx(dataTempRegister);
+ }
+
+
+ // Arithmetic control flow operations:
+ //
+ // This set of conditional branch operations branch based
+ // on the result of an arithmetic operation. The operation
+ // is performed as normal, storing the result.
+ //
+ // * jz operations branch if the result is zero.
+ // * jo operations branch if the (signed) arithmetic
+ // operation caused an overflow to occur.
+
+ Jump branchAdd32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.add_S(dest, op1, op2);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchAdd32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
+ {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.add_S(dest, op1, armImm);
+ else {
+ move(imm, dataTempRegister);
+ m_assembler.add_S(dest, op1, dataTempRegister);
+ }
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ return branchAdd32(cond, dest, src, dest);
+ }
+
+ Jump branchAdd32(ResultCondition cond, Address src, RegisterID dest)
+ {
+ load32(src, dataTempRegister);
+ return branchAdd32(cond, dest, dataTempRegister, dest);
+ }
+
+ Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ return branchAdd32(cond, dest, imm, dest);
+ }
+
+ Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest)
+ {
+ // Move the high bits of the address into addressTempRegister,
+ // and load the value into dataTempRegister.
+ move(TrustedImmPtr(dest.m_ptr), addressTempRegister);
+ m_assembler.ldr(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
+
+ // Do the add.
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.add_S(dataTempRegister, dataTempRegister, armImm);
+ else {
+ // If the operand does not fit into an immediate then load it temporarily
+ // into addressTempRegister; since we're overwriting addressTempRegister
+ // we'll need to reload it with the high bits of the address afterwards.
+ move(imm, addressTempRegister);
+ m_assembler.add_S(dataTempRegister, dataTempRegister, addressTempRegister);
+ move(TrustedImmPtr(dest.m_ptr), addressTempRegister);
+ }
+
+ // Store the result.
+ m_assembler.str(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
+
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
+ {
+ m_assembler.smull(dest, dataTempRegister, src1, src2);
+
+ if (cond == Overflow) {
+ m_assembler.asr(addressTempRegister, dest, 31);
+ return branch32(NotEqual, addressTempRegister, dataTempRegister);
+ }
+
+ return branchTest32(cond, dest);
+ }
+
+ Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ return branchMul32(cond, src, dest, dest);
+ }
+
+ Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ move(imm, dataTempRegister);
+ return branchMul32(cond, dataTempRegister, src, dest);
+ }
+
+ Jump branchNeg32(ResultCondition cond, RegisterID srcDest)
+ {
+ ARMThumbImmediate zero = ARMThumbImmediate::makeUInt12(0);
+ m_assembler.sub_S(srcDest, zero, srcDest);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ m_assembler.orr_S(dest, dest, src);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.sub_S(dest, op1, op2);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
+ {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.sub_S(dest, op1, armImm);
+ else {
+ move(imm, dataTempRegister);
+ m_assembler.sub_S(dest, op1, dataTempRegister);
+ }
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ return branchSub32(cond, dest, src, dest);
+ }
+
+ Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ return branchSub32(cond, dest, imm, dest);
+ }
+
+ void relativeTableJump(RegisterID index, int scale)
+ {
+ ASSERT(scale >= 0 && scale <= 31);
+
+ // dataTempRegister will point after the jump if index register contains zero
+ move(ARMRegisters::pc, dataTempRegister);
+ m_assembler.add(dataTempRegister, dataTempRegister, ARMThumbImmediate::makeEncodedImm(9));
+
+ ShiftTypeAndAmount shift(SRType_LSL, scale);
+ m_assembler.add(dataTempRegister, dataTempRegister, index, shift);
+ jump(dataTempRegister);
+ }
+
+ // Miscellaneous operations:
+
+ void breakpoint(uint8_t imm = 0)
+ {
+ m_assembler.bkpt(imm);
+ }
+
+ ALWAYS_INLINE Call nearCall()
+ {
+ moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
+ return Call(m_assembler.blx(dataTempRegister), Call::LinkableNear);
+ }
+
+ ALWAYS_INLINE Call call()
+ {
+ moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
+ return Call(m_assembler.blx(dataTempRegister), Call::Linkable);
+ }
+
+ ALWAYS_INLINE Call call(RegisterID target)
+ {
+ return Call(m_assembler.blx(target), Call::None);
+ }
+
+ ALWAYS_INLINE Call call(Address address)
+ {
+ load32(address, dataTempRegister);
+ return Call(m_assembler.blx(dataTempRegister), Call::None);
+ }
+
+ ALWAYS_INLINE void ret()
+ {
+ m_assembler.bx(linkRegister);
+ }
+
+ void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
+ {
+ m_assembler.cmp(left, right);
+ m_assembler.it(armV7Condition(cond), false);
+ m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
+ m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
+ }
+
+ void compare32(RelationalCondition cond, Address left, RegisterID right, RegisterID dest)
+ {
+ load32(left, dataTempRegister);
+ compare32(cond, dataTempRegister, right, dest);
+ }
+
+ void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
+ {
+ load8(left, addressTempRegister);
+ compare32(cond, addressTempRegister, right, dest);
+ }
+
+ void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
+ {
+ compare32(left, right);
+ m_assembler.it(armV7Condition(cond), false);
+ m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
+ m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
+ }
+
+ // FIXME:
+ // The mask should be optional... paerhaps the argument order should be
+ // dest-src, operations always have a dest? ... possibly not true, considering
+ // asm ops like test, or pseudo ops like pop().
+ void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
+ {
+ load32(address, dataTempRegister);
+ test32(dataTempRegister, mask);
+ m_assembler.it(armV7Condition(cond), false);
+ m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
+ m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
+ }
+
+ void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
+ {
+ load8(address, dataTempRegister);
+ test32(dataTempRegister, mask);
+ m_assembler.it(armV7Condition(cond), false);
+ m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
+ m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
+ }
+
+ ALWAYS_INLINE DataLabel32 moveWithPatch(TrustedImm32 imm, RegisterID dst)
+ {
+ padBeforePatch();
+ moveFixedWidthEncoding(imm, dst);
+ return DataLabel32(this);
+ }
+
+ ALWAYS_INLINE DataLabelPtr moveWithPatch(TrustedImmPtr imm, RegisterID dst)
+ {
+ padBeforePatch();
+ moveFixedWidthEncoding(TrustedImm32(imm), dst);
+ return DataLabelPtr(this);
+ }
+
+ ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+ {
+ dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
+ return branch32(cond, left, dataTempRegister);
+ }
+
+ ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+ {
+ load32(left, addressTempRegister);
+ dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
+ return branch32(cond, addressTempRegister, dataTempRegister);
+ }
+
+ ALWAYS_INLINE Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
+ {
+ load32(left, addressTempRegister);
+ dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
+ return branch32(cond, addressTempRegister, dataTempRegister);
+ }
+
+ PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right = TrustedImmPtr(0))
+ {
+ m_makeJumpPatchable = true;
+ Jump result = branch32(cond, left, TrustedImm32(right));
+ m_makeJumpPatchable = false;
+ return PatchableJump(result);
+ }
+
+ PatchableJump patchableBranchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ m_makeJumpPatchable = true;
+ Jump result = branchTest32(cond, reg, mask);
+ m_makeJumpPatchable = false;
+ return PatchableJump(result);
+ }
+
+ PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm)
+ {
+ m_makeJumpPatchable = true;
+ Jump result = branch32(cond, reg, imm);
+ m_makeJumpPatchable = false;
+ return PatchableJump(result);
+ }
+
+ PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+ {
+ m_makeJumpPatchable = true;
+ Jump result = branchPtrWithPatch(cond, left, dataLabel, initialRightValue);
+ m_makeJumpPatchable = false;
+ return PatchableJump(result);
+ }
+
+ PatchableJump patchableBranch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
+ {
+ m_makeJumpPatchable = true;
+ Jump result = branch32WithPatch(cond, left, dataLabel, initialRightValue);
+ m_makeJumpPatchable = false;
+ return PatchableJump(result);
+ }
+
+ PatchableJump patchableJump()
+ {
+ padBeforePatch();
+ m_makeJumpPatchable = true;
+ Jump result = jump();
+ m_makeJumpPatchable = false;
+ return PatchableJump(result);
+ }
+
+ ALWAYS_INLINE DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
+ {
+ DataLabelPtr label = moveWithPatch(initialValue, dataTempRegister);
+ store32(dataTempRegister, address);
+ return label;
+ }
+ ALWAYS_INLINE DataLabelPtr storePtrWithPatch(ImplicitAddress address) { return storePtrWithPatch(TrustedImmPtr(0), address); }
+
+
+ ALWAYS_INLINE Call tailRecursiveCall()
+ {
+ // Like a normal call, but don't link.
+ moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
+ return Call(m_assembler.bx(dataTempRegister), Call::Linkable);
+ }
+
+ ALWAYS_INLINE Call makeTailRecursiveCall(Jump oldJump)
+ {
+ oldJump.link(this);
+ return tailRecursiveCall();
+ }
+
+
+ static FunctionPtr readCallTarget(CodeLocationCall call)
+ {
+ return FunctionPtr(reinterpret_cast<void(*)()>(ARMv7Assembler::readCallTarget(call.dataLocation())));
+ }
+
+ static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
+ static bool canJumpReplacePatchableBranch32WithPatch() { return false; }
+
+ static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
+ {
+ const unsigned twoWordOpSize = 4;
+ return label.labelAtOffset(-twoWordOpSize * 2);
+ }
+
+ static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID rd, void* initialValue)
+ {
+#if OS(LINUX)
+ ARMv7Assembler::revertJumpTo_movT3movtcmpT2(instructionStart.dataLocation(), rd, dataTempRegister, reinterpret_cast<uintptr_t>(initialValue));
+#else
+ UNUSED_PARAM(rd);
+ ARMv7Assembler::revertJumpTo_movT3(instructionStart.dataLocation(), dataTempRegister, ARMThumbImmediate::makeUInt16(reinterpret_cast<uintptr_t>(initialValue) & 0xffff));
+#endif
+ }
+
+ static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr)
+ {
+ UNREACHABLE_FOR_PLATFORM();
+ return CodeLocationLabel();
+ }
+
+ static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32)
+ {
+ UNREACHABLE_FOR_PLATFORM();
+ return CodeLocationLabel();
+ }
+
+ static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*)
+ {
+ UNREACHABLE_FOR_PLATFORM();
+ }
+
+ static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel, Address, int32_t)
+ {
+ UNREACHABLE_FOR_PLATFORM();
+ }
+
+#if ENABLE(MASM_PROBE)
+ // Methods required by the MASM_PROBE mechanism as defined in
+ // AbstractMacroAssembler.h.
+ static void printCPURegisters(CPUState&, int indentation = 0);
+ static void printRegister(CPUState&, RegisterID);
+ static void printRegister(CPUState&, FPRegisterID);
+ void probe(ProbeFunction, void* arg1 = 0, void* arg2 = 0);
+#endif // ENABLE(MASM_PROBE)
+
+protected:
+ ALWAYS_INLINE Jump jump()
+ {
+ m_assembler.label(); // Force nop-padding if we're in the middle of a watchpoint.
+ moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
+ return Jump(m_assembler.bx(dataTempRegister), m_makeJumpPatchable ? ARMv7Assembler::JumpNoConditionFixedSize : ARMv7Assembler::JumpNoCondition);
+ }
+
+ ALWAYS_INLINE Jump makeBranch(ARMv7Assembler::Condition cond)
+ {
+ m_assembler.label(); // Force nop-padding if we're in the middle of a watchpoint.
+ m_assembler.it(cond, true, true);
+ moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
+ return Jump(m_assembler.bx(dataTempRegister), m_makeJumpPatchable ? ARMv7Assembler::JumpConditionFixedSize : ARMv7Assembler::JumpCondition, cond);
+ }
+ ALWAYS_INLINE Jump makeBranch(RelationalCondition cond) { return makeBranch(armV7Condition(cond)); }
+ ALWAYS_INLINE Jump makeBranch(ResultCondition cond) { return makeBranch(armV7Condition(cond)); }
+ ALWAYS_INLINE Jump makeBranch(DoubleCondition cond) { return makeBranch(armV7Condition(cond)); }
+
+ ArmAddress setupArmAddress(BaseIndex address)
+ {
+ if (address.offset) {
+ ARMThumbImmediate imm = ARMThumbImmediate::makeUInt12OrEncodedImm(address.offset);
+ if (imm.isValid())
+ m_assembler.add(addressTempRegister, address.base, imm);
+ else {
+ move(TrustedImm32(address.offset), addressTempRegister);
+ m_assembler.add(addressTempRegister, addressTempRegister, address.base);
+ }
+
+ return ArmAddress(addressTempRegister, address.index, address.scale);
+ } else
+ return ArmAddress(address.base, address.index, address.scale);
+ }
+
+ ArmAddress setupArmAddress(Address address)
+ {
+ if ((address.offset >= -0xff) && (address.offset <= 0xfff))
+ return ArmAddress(address.base, address.offset);
+
+ move(TrustedImm32(address.offset), addressTempRegister);
+ return ArmAddress(address.base, addressTempRegister);
+ }
+
+ ArmAddress setupArmAddress(ImplicitAddress address)
+ {
+ if ((address.offset >= -0xff) && (address.offset <= 0xfff))
+ return ArmAddress(address.base, address.offset);
+
+ move(TrustedImm32(address.offset), addressTempRegister);
+ return ArmAddress(address.base, addressTempRegister);
+ }
+
+ RegisterID makeBaseIndexBase(BaseIndex address)
+ {
+ if (!address.offset)
+ return address.base;
+
+ ARMThumbImmediate imm = ARMThumbImmediate::makeUInt12OrEncodedImm(address.offset);
+ if (imm.isValid())
+ m_assembler.add(addressTempRegister, address.base, imm);
+ else {
+ move(TrustedImm32(address.offset), addressTempRegister);
+ m_assembler.add(addressTempRegister, addressTempRegister, address.base);
+ }
+
+ return addressTempRegister;
+ }
+
+ void moveFixedWidthEncoding(TrustedImm32 imm, RegisterID dst)
+ {
+ uint32_t value = imm.m_value;
+ m_assembler.movT3(dst, ARMThumbImmediate::makeUInt16(value & 0xffff));
+ m_assembler.movt(dst, ARMThumbImmediate::makeUInt16(value >> 16));
+ }
+
+ ARMv7Assembler::Condition armV7Condition(RelationalCondition cond)
+ {
+ return static_cast<ARMv7Assembler::Condition>(cond);
+ }
+
+ ARMv7Assembler::Condition armV7Condition(ResultCondition cond)
+ {
+ return static_cast<ARMv7Assembler::Condition>(cond);
+ }
+
+ ARMv7Assembler::Condition armV7Condition(DoubleCondition cond)
+ {
+ return static_cast<ARMv7Assembler::Condition>(cond);
+ }
+
+private:
+ friend class LinkBuffer;
+ friend class RepatchBuffer;
+
+ static void linkCall(void* code, Call call, FunctionPtr function)
+ {
+ ARMv7Assembler::linkCall(code, call.m_label, function.value());
+ }
+
+ static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
+ {
+ ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
+ }
+
+ static void repatchCall(CodeLocationCall call, FunctionPtr destination)
+ {
+ ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
+ }
+
+#if ENABLE(MASM_PROBE)
+ inline TrustedImm32 trustedImm32FromPtr(void* ptr)
+ {
+ return TrustedImm32(TrustedImmPtr(ptr));
+ }
+
+ inline TrustedImm32 trustedImm32FromPtr(ProbeFunction function)
+ {
+ return TrustedImm32(TrustedImmPtr(reinterpret_cast<void*>(function)));
+ }
+
+ inline TrustedImm32 trustedImm32FromPtr(void (*function)())
+ {
+ return TrustedImm32(TrustedImmPtr(reinterpret_cast<void*>(function)));
+ }
+#endif
+
+ bool m_makeJumpPatchable;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // MacroAssemblerARMv7_h
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.h b/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.h
new file mode 100644
index 000000000..b4d6c0bfb
--- /dev/null
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.h
@@ -0,0 +1,467 @@
+/*
+ * Copyright (C) 2009, 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MacroAssemblerCodeRef_h
+#define MacroAssemblerCodeRef_h
+
+#include "Disassembler.h"
+#include "ExecutableAllocator.h"
+#include "LLIntData.h"
+#include <wtf/DataLog.h>
+#include <wtf/PassRefPtr.h>
+#include <wtf/PrintStream.h>
+#include <wtf/RefPtr.h>
+
+// ASSERT_VALID_CODE_POINTER checks that ptr is a non-null pointer, and that it is a valid
+// instruction address on the platform (for example, check any alignment requirements).
+#if CPU(ARM_THUMB2) && ENABLE(JIT)
+// ARM instructions must be 16-bit aligned. Thumb2 code pointers to be loaded into
+// into the processor are decorated with the bottom bit set, while traditional ARM has
+// the lower bit clear. Since we don't know what kind of pointer, we check for both
+// decorated and undecorated null.
+#define ASSERT_VALID_CODE_POINTER(ptr) \
+ ASSERT(reinterpret_cast<intptr_t>(ptr) & ~1)
+#define ASSERT_VALID_CODE_OFFSET(offset) \
+ ASSERT(!(offset & 1)) // Must be multiple of 2.
+#else
+#define ASSERT_VALID_CODE_POINTER(ptr) \
+ ASSERT(ptr)
+#define ASSERT_VALID_CODE_OFFSET(offset) // Anything goes!
+#endif
+
+#if CPU(X86) && OS(WINDOWS)
+#define CALLING_CONVENTION_IS_STDCALL 1
+#ifndef CDECL
+#if COMPILER(MSVC)
+#define CDECL __cdecl
+#else
+#define CDECL __attribute__ ((__cdecl))
+#endif // COMPILER(MSVC)
+#endif // CDECL
+#else
+#define CALLING_CONVENTION_IS_STDCALL 0
+#endif
+
+#if CPU(X86)
+#define HAS_FASTCALL_CALLING_CONVENTION 1
+#ifndef FASTCALL
+#if COMPILER(MSVC)
+#define FASTCALL __fastcall
+#else
+#define FASTCALL __attribute__ ((fastcall))
+#endif // COMPILER(MSVC)
+#endif // FASTCALL
+#else
+#define HAS_FASTCALL_CALLING_CONVENTION 0
+#endif // CPU(X86)
+
+namespace JSC {
+
+// FunctionPtr:
+//
+// FunctionPtr should be used to wrap pointers to C/C++ functions in JSC
+// (particularly, the stub functions).
+class FunctionPtr {
+public:
+ FunctionPtr()
+ : m_value(0)
+ {
+ }
+
+ template<typename returnType>
+ FunctionPtr(returnType(*value)())
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ template<typename returnType, typename argType1>
+ FunctionPtr(returnType(*value)(argType1))
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ template<typename returnType, typename argType1, typename argType2>
+ FunctionPtr(returnType(*value)(argType1, argType2))
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ template<typename returnType, typename argType1, typename argType2, typename argType3>
+ FunctionPtr(returnType(*value)(argType1, argType2, argType3))
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ template<typename returnType, typename argType1, typename argType2, typename argType3, typename argType4>
+ FunctionPtr(returnType(*value)(argType1, argType2, argType3, argType4))
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ template<typename returnType, typename argType1, typename argType2, typename argType3, typename argType4, typename argType5>
+ FunctionPtr(returnType(*value)(argType1, argType2, argType3, argType4, argType5))
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ template<typename returnType, typename argType1, typename argType2, typename argType3, typename argType4, typename argType5, typename argType6>
+ FunctionPtr(returnType(*value)(argType1, argType2, argType3, argType4, argType5, argType6))
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+// MSVC doesn't seem to treat functions with different calling conventions as
+// different types; these methods already defined for fastcall, below.
+#if CALLING_CONVENTION_IS_STDCALL && !OS(WINDOWS)
+
+ template<typename returnType>
+ FunctionPtr(returnType (CDECL *value)())
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ template<typename returnType, typename argType1>
+ FunctionPtr(returnType (CDECL *value)(argType1))
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ template<typename returnType, typename argType1, typename argType2>
+ FunctionPtr(returnType (CDECL *value)(argType1, argType2))
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ template<typename returnType, typename argType1, typename argType2, typename argType3>
+ FunctionPtr(returnType (CDECL *value)(argType1, argType2, argType3))
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ template<typename returnType, typename argType1, typename argType2, typename argType3, typename argType4>
+ FunctionPtr(returnType (CDECL *value)(argType1, argType2, argType3, argType4))
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+#endif
+
+#if HAS_FASTCALL_CALLING_CONVENTION
+
+ template<typename returnType>
+ FunctionPtr(returnType (FASTCALL *value)())
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ template<typename returnType, typename argType1>
+ FunctionPtr(returnType (FASTCALL *value)(argType1))
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ template<typename returnType, typename argType1, typename argType2>
+ FunctionPtr(returnType (FASTCALL *value)(argType1, argType2))
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ template<typename returnType, typename argType1, typename argType2, typename argType3>
+ FunctionPtr(returnType (FASTCALL *value)(argType1, argType2, argType3))
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ template<typename returnType, typename argType1, typename argType2, typename argType3, typename argType4>
+ FunctionPtr(returnType (FASTCALL *value)(argType1, argType2, argType3, argType4))
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+#endif
+
+ template<typename FunctionType>
+ explicit FunctionPtr(FunctionType* value)
+ // Using a C-ctyle cast here to avoid compiler error on RVTC:
+ // Error: #694: reinterpret_cast cannot cast away const or other type qualifiers
+ // (I guess on RVTC function pointers have a different constness to GCC/MSVC?)
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ void* value() const { return m_value; }
+ void* executableAddress() const { return m_value; }
+
+
+private:
+ void* m_value;
+};
+
+// ReturnAddressPtr:
+//
+// ReturnAddressPtr should be used to wrap return addresses generated by processor
+// 'call' instructions exectued in JIT code. We use return addresses to look up
+// exception and optimization information, and to repatch the call instruction
+// that is the source of the return address.
+class ReturnAddressPtr {
+public:
+ ReturnAddressPtr()
+ : m_value(0)
+ {
+ }
+
+ explicit ReturnAddressPtr(void* value)
+ : m_value(value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ explicit ReturnAddressPtr(FunctionPtr function)
+ : m_value(function.value())
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ void* value() const { return m_value; }
+
+ void dump(PrintStream& out) const
+ {
+ out.print(RawPointer(m_value));
+ }
+
+private:
+ void* m_value;
+};
+
+// MacroAssemblerCodePtr:
+//
+// MacroAssemblerCodePtr should be used to wrap pointers to JIT generated code.
+class MacroAssemblerCodePtr {
+public:
+ MacroAssemblerCodePtr()
+ : m_value(0)
+ {
+ }
+
+ explicit MacroAssemblerCodePtr(void* value)
+#if CPU(ARM_THUMB2)
+ // Decorate the pointer as a thumb code pointer.
+ : m_value(reinterpret_cast<char*>(value) + 1)
+#else
+ : m_value(value)
+#endif
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ static MacroAssemblerCodePtr createFromExecutableAddress(void* value)
+ {
+ ASSERT_VALID_CODE_POINTER(value);
+ MacroAssemblerCodePtr result;
+ result.m_value = value;
+ return result;
+ }
+
+ static MacroAssemblerCodePtr createLLIntCodePtr(OpcodeID codeId)
+ {
+ return createFromExecutableAddress(LLInt::getCodePtr(codeId));
+ }
+
+ explicit MacroAssemblerCodePtr(ReturnAddressPtr ra)
+ : m_value(ra.value())
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ void* executableAddress() const { return m_value; }
+#if CPU(ARM_THUMB2)
+ // To use this pointer as a data address remove the decoration.
+ void* dataLocation() const { ASSERT_VALID_CODE_POINTER(m_value); return reinterpret_cast<char*>(m_value) - 1; }
+#else
+ void* dataLocation() const { ASSERT_VALID_CODE_POINTER(m_value); return m_value; }
+#endif
+
+ explicit operator bool() const { return m_value; }
+
+ bool operator==(const MacroAssemblerCodePtr& other) const
+ {
+ return m_value == other.m_value;
+ }
+
+ void dumpWithName(const char* name, PrintStream& out) const
+ {
+ if (executableAddress() == dataLocation()) {
+ out.print(name, "(", RawPointer(executableAddress()), ")");
+ return;
+ }
+ out.print(name, "(executable = ", RawPointer(executableAddress()), ", dataLocation = ", RawPointer(dataLocation()), ")");
+ }
+
+ void dump(PrintStream& out) const
+ {
+ dumpWithName("CodePtr", out);
+ }
+
+ enum EmptyValueTag { EmptyValue };
+ enum DeletedValueTag { DeletedValue };
+
+ MacroAssemblerCodePtr(EmptyValueTag)
+ : m_value(emptyValue())
+ {
+ }
+
+ MacroAssemblerCodePtr(DeletedValueTag)
+ : m_value(deletedValue())
+ {
+ }
+
+ bool isEmptyValue() const { return m_value == emptyValue(); }
+ bool isDeletedValue() const { return m_value == deletedValue(); }
+
+ unsigned hash() const { return PtrHash<void*>::hash(m_value); }
+
+private:
+ static void* emptyValue() { return bitwise_cast<void*>(static_cast<intptr_t>(1)); }
+ static void* deletedValue() { return bitwise_cast<void*>(static_cast<intptr_t>(2)); }
+
+ void* m_value;
+};
+
+struct MacroAssemblerCodePtrHash {
+ static unsigned hash(const MacroAssemblerCodePtr& ptr) { return ptr.hash(); }
+ static bool equal(const MacroAssemblerCodePtr& a, const MacroAssemblerCodePtr& b)
+ {
+ return a == b;
+ }
+ static const bool safeToCompareToEmptyOrDeleted = true;
+};
+
+// MacroAssemblerCodeRef:
+//
+// A reference to a section of JIT generated code. A CodeRef consists of a
+// pointer to the code, and a ref pointer to the pool from within which it
+// was allocated.
+class MacroAssemblerCodeRef {
+private:
+ // This is private because it's dangerous enough that we want uses of it
+ // to be easy to find - hence the static create method below.
+ explicit MacroAssemblerCodeRef(MacroAssemblerCodePtr codePtr)
+ : m_codePtr(codePtr)
+ {
+ ASSERT(m_codePtr);
+ }
+
+public:
+ MacroAssemblerCodeRef()
+ {
+ }
+
+ MacroAssemblerCodeRef(PassRefPtr<ExecutableMemoryHandle> executableMemory)
+ : m_codePtr(executableMemory->start())
+ , m_executableMemory(executableMemory)
+ {
+ ASSERT(m_executableMemory->isManaged());
+ ASSERT(m_executableMemory->start());
+ ASSERT(m_codePtr);
+ }
+
+ // Use this only when you know that the codePtr refers to code that is
+ // already being kept alive through some other means. Typically this means
+ // that codePtr is immortal.
+ static MacroAssemblerCodeRef createSelfManagedCodeRef(MacroAssemblerCodePtr codePtr)
+ {
+ return MacroAssemblerCodeRef(codePtr);
+ }
+
+ // Helper for creating self-managed code refs from LLInt.
+ static MacroAssemblerCodeRef createLLIntCodeRef(OpcodeID codeId)
+ {
+ return createSelfManagedCodeRef(MacroAssemblerCodePtr::createFromExecutableAddress(LLInt::getCodePtr(codeId)));
+ }
+
+ ExecutableMemoryHandle* executableMemory() const
+ {
+ return m_executableMemory.get();
+ }
+
+ MacroAssemblerCodePtr code() const
+ {
+ return m_codePtr;
+ }
+
+ size_t size() const
+ {
+ if (!m_executableMemory)
+ return 0;
+ return m_executableMemory->sizeInBytes();
+ }
+
+ bool tryToDisassemble(const char* prefix) const
+ {
+ return JSC::tryToDisassemble(m_codePtr, size(), prefix, WTF::dataFile());
+ }
+
+ explicit operator bool() const { return !!m_codePtr; }
+
+ void dump(PrintStream& out) const
+ {
+ m_codePtr.dumpWithName("CodeRef", out);
+ }
+
+private:
+ MacroAssemblerCodePtr m_codePtr;
+ RefPtr<ExecutableMemoryHandle> m_executableMemory;
+};
+
+} // namespace JSC
+
+namespace WTF {
+
+template<typename T> struct DefaultHash;
+template<> struct DefaultHash<JSC::MacroAssemblerCodePtr> {
+ typedef JSC::MacroAssemblerCodePtrHash Hash;
+};
+
+template<typename T> struct HashTraits;
+template<> struct HashTraits<JSC::MacroAssemblerCodePtr> : public CustomHashTraits<JSC::MacroAssemblerCodePtr> { };
+
+} // namespace WTF
+
+#endif // MacroAssemblerCodeRef_h
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerMIPS.h b/Source/JavaScriptCore/assembler/MacroAssemblerMIPS.h
new file mode 100644
index 000000000..1a9312829
--- /dev/null
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerMIPS.h
@@ -0,0 +1,2822 @@
+/*
+ * Copyright (C) 2008, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2010 MIPS Technologies, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY MIPS TECHNOLOGIES, INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL MIPS TECHNOLOGIES, INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MacroAssemblerMIPS_h
+#define MacroAssemblerMIPS_h
+
+#if ENABLE(ASSEMBLER) && CPU(MIPS)
+
+#include "AbstractMacroAssembler.h"
+#include "MIPSAssembler.h"
+
+namespace JSC {
+
+class MacroAssemblerMIPS : public AbstractMacroAssembler<MIPSAssembler, MacroAssemblerMIPS> {
+public:
+ typedef MIPSRegisters::FPRegisterID FPRegisterID;
+
+ MacroAssemblerMIPS()
+ : m_fixedWidth(false)
+ {
+ }
+
+ static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
+ {
+ return value >= -2147483647 - 1 && value <= 2147483647;
+ }
+
+ static const Scale ScalePtr = TimesFour;
+
+ // For storing immediate number
+ static const RegisterID immTempRegister = MIPSRegisters::t0;
+ // For storing data loaded from the memory
+ static const RegisterID dataTempRegister = MIPSRegisters::t1;
+ // For storing address base
+ static const RegisterID addrTempRegister = MIPSRegisters::t2;
+ // For storing compare result
+ static const RegisterID cmpTempRegister = MIPSRegisters::t3;
+
+ // FP temp register
+ static const FPRegisterID fpTempRegister = MIPSRegisters::f16;
+
+ static const int MaximumCompactPtrAlignedAddressOffset = 0x7FFFFFFF;
+
+ enum RelationalCondition {
+ Equal,
+ NotEqual,
+ Above,
+ AboveOrEqual,
+ Below,
+ BelowOrEqual,
+ GreaterThan,
+ GreaterThanOrEqual,
+ LessThan,
+ LessThanOrEqual
+ };
+
+ enum ResultCondition {
+ Overflow,
+ Signed,
+ PositiveOrZero,
+ Zero,
+ NonZero
+ };
+
+ enum DoubleCondition {
+ DoubleEqual,
+ DoubleNotEqual,
+ DoubleGreaterThan,
+ DoubleGreaterThanOrEqual,
+ DoubleLessThan,
+ DoubleLessThanOrEqual,
+ DoubleEqualOrUnordered,
+ DoubleNotEqualOrUnordered,
+ DoubleGreaterThanOrUnordered,
+ DoubleGreaterThanOrEqualOrUnordered,
+ DoubleLessThanOrUnordered,
+ DoubleLessThanOrEqualOrUnordered
+ };
+
+ static const RegisterID stackPointerRegister = MIPSRegisters::sp;
+ static const RegisterID framePointerRegister = MIPSRegisters::fp;
+ static const RegisterID returnAddressRegister = MIPSRegisters::ra;
+
+ // Integer arithmetic operations:
+ //
+ // Operations are typically two operand - operation(source, srcDst)
+ // For many operations the source may be an TrustedImm32, the srcDst operand
+ // may often be a memory location (explictly described using an Address
+ // object).
+
+ void add32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.addu(dest, dest, src);
+ }
+
+ void add32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.addu(dest, op1, op2);
+ }
+
+ void add32(TrustedImm32 imm, RegisterID dest)
+ {
+ add32(imm, dest, dest);
+ }
+
+ void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (imm.m_value >= -32768 && imm.m_value <= 32767
+ && !m_fixedWidth) {
+ /*
+ addiu dest, src, imm
+ */
+ m_assembler.addiu(dest, src, imm.m_value);
+ } else {
+ /*
+ li immTemp, imm
+ addu dest, src, immTemp
+ */
+ move(imm, immTempRegister);
+ m_assembler.addu(dest, src, immTempRegister);
+ }
+ }
+
+ void add32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ add32(imm, src, dest);
+ }
+
+ void add32(TrustedImm32 imm, Address address)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ lw dataTemp, offset(base)
+ li immTemp, imm
+ addu dataTemp, dataTemp, immTemp
+ sw dataTemp, offset(base)
+ */
+ m_assembler.lw(dataTempRegister, address.base, address.offset);
+ if (imm.m_value >= -32768 && imm.m_value <= 32767
+ && !m_fixedWidth)
+ m_assembler.addiu(dataTempRegister, dataTempRegister, imm.m_value);
+ else {
+ move(imm, immTempRegister);
+ m_assembler.addu(dataTempRegister, dataTempRegister, immTempRegister);
+ }
+ m_assembler.sw(dataTempRegister, address.base, address.offset);
+ } else {
+ /*
+ lui addrTemp, (offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, base
+ lw dataTemp, (offset & 0xffff)(addrTemp)
+ li immtemp, imm
+ addu dataTemp, dataTemp, immTemp
+ sw dataTemp, (offset & 0xffff)(addrTemp)
+ */
+ m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lw(dataTempRegister, addrTempRegister, address.offset);
+
+ if (imm.m_value >= -32768 && imm.m_value <= 32767 && !m_fixedWidth)
+ m_assembler.addiu(dataTempRegister, dataTempRegister, imm.m_value);
+ else {
+ move(imm, immTempRegister);
+ m_assembler.addu(dataTempRegister, dataTempRegister, immTempRegister);
+ }
+ m_assembler.sw(dataTempRegister, addrTempRegister, address.offset);
+ }
+ }
+
+ void add32(Address src, RegisterID dest)
+ {
+ load32(src, dataTempRegister);
+ add32(dataTempRegister, dest);
+ }
+
+ void add32(AbsoluteAddress src, RegisterID dest)
+ {
+ load32(src.m_ptr, dataTempRegister);
+ add32(dataTempRegister, dest);
+ }
+
+ void add32(RegisterID src, Address dest)
+ {
+ if (dest.offset >= -32768 && dest.offset <= 32767 && !m_fixedWidth) {
+ /*
+ lw dataTemp, offset(base)
+ addu dataTemp, dataTemp, src
+ sw dataTemp, offset(base)
+ */
+ m_assembler.lw(dataTempRegister, dest.base, dest.offset);
+ m_assembler.addu(dataTempRegister, dataTempRegister, src);
+ m_assembler.sw(dataTempRegister, dest.base, dest.offset);
+ } else {
+ /*
+ lui addrTemp, (offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, base
+ lw dataTemp, (offset & 0xffff)(addrTemp)
+ addu dataTemp, dataTemp, src
+ sw dataTemp, (offset & 0xffff)(addrTemp)
+ */
+ m_assembler.lui(addrTempRegister, (dest.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, dest.base);
+ m_assembler.lw(dataTempRegister, addrTempRegister, dest.offset);
+ m_assembler.addu(dataTempRegister, dataTempRegister, src);
+ m_assembler.sw(dataTempRegister, addrTempRegister, dest.offset);
+ }
+ }
+
+ void add32(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ /*
+ li addrTemp, address
+ li immTemp, imm
+ lw cmpTemp, 0(addrTemp)
+ addu dataTemp, cmpTemp, immTemp
+ sw dataTemp, 0(addrTemp)
+ */
+ move(TrustedImmPtr(address.m_ptr), addrTempRegister);
+ m_assembler.lw(cmpTempRegister, addrTempRegister, 0);
+ if (imm.m_value >= -32768 && imm.m_value <= 32767 && !m_fixedWidth)
+ m_assembler.addiu(dataTempRegister, cmpTempRegister, imm.m_value);
+ else {
+ move(imm, immTempRegister);
+ m_assembler.addu(dataTempRegister, cmpTempRegister, immTempRegister);
+ }
+ m_assembler.sw(dataTempRegister, addrTempRegister, 0);
+ }
+
+ void add64(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ /*
+ add32(imm, address)
+ sltu immTemp, dataTemp, cmpTemp # set carry-in bit
+ lw dataTemp, 4(addrTemp)
+ addiu dataTemp, imm.m_value >> 31 ? -1 : 0
+ addu dataTemp, dataTemp, immTemp
+ sw dataTemp, 4(addrTemp)
+ */
+ add32(imm, address);
+ m_assembler.sltu(immTempRegister, dataTempRegister, cmpTempRegister);
+ m_assembler.lw(dataTempRegister, addrTempRegister, 4);
+ if (imm.m_value >> 31)
+ m_assembler.addiu(dataTempRegister, dataTempRegister, -1);
+ m_assembler.addu(dataTempRegister, dataTempRegister, immTempRegister);
+ m_assembler.sw(dataTempRegister, addrTempRegister, 4);
+ }
+
+ void and32(Address src, RegisterID dest)
+ {
+ load32(src, dataTempRegister);
+ and32(dataTempRegister, dest);
+ }
+
+ void and32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.andInsn(dest, dest, src);
+ }
+
+ void and32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.andInsn(dest, op1, op2);
+ }
+
+ void and32(TrustedImm32 imm, RegisterID dest)
+ {
+ if (!imm.m_value && !m_fixedWidth)
+ move(MIPSRegisters::zero, dest);
+ else if (imm.m_value > 0 && imm.m_value < 65535 && !m_fixedWidth)
+ m_assembler.andi(dest, dest, imm.m_value);
+ else {
+ /*
+ li immTemp, imm
+ and dest, dest, immTemp
+ */
+ move(imm, immTempRegister);
+ m_assembler.andInsn(dest, dest, immTempRegister);
+ }
+ }
+
+ void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (!imm.m_value && !m_fixedWidth)
+ move(MIPSRegisters::zero, dest);
+ else if (imm.m_value > 0 && imm.m_value < 65535 && !m_fixedWidth)
+ m_assembler.andi(dest, src, imm.m_value);
+ else {
+ move(imm, immTempRegister);
+ m_assembler.andInsn(dest, src, immTempRegister);
+ }
+ }
+
+ void lshift32(RegisterID shiftAmount, RegisterID dest)
+ {
+ m_assembler.sllv(dest, dest, shiftAmount);
+ }
+
+ void lshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+ {
+ m_assembler.sllv(dest, src, shiftAmount);
+ }
+
+ void lshift32(TrustedImm32 imm, RegisterID dest)
+ {
+ move(imm, immTempRegister);
+ m_assembler.sllv(dest, dest, immTempRegister);
+ }
+
+ void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ move(imm, immTempRegister);
+ m_assembler.sllv(dest, src, immTempRegister);
+ }
+
+ void mul32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.mul(dest, dest, src);
+ }
+
+ void mul32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.mul(dest, op1, op2);
+ }
+
+ void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (!imm.m_value && !m_fixedWidth)
+ move(MIPSRegisters::zero, dest);
+ else if (imm.m_value == 1 && !m_fixedWidth)
+ move(src, dest);
+ else {
+ /*
+ li dataTemp, imm
+ mul dest, src, dataTemp
+ */
+ move(imm, dataTempRegister);
+ m_assembler.mul(dest, src, dataTempRegister);
+ }
+ }
+
+ void neg32(RegisterID srcDest)
+ {
+ m_assembler.subu(srcDest, MIPSRegisters::zero, srcDest);
+ }
+
+ void or32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.orInsn(dest, dest, src);
+ }
+
+ void or32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.orInsn(dest, op1, op2);
+ }
+
+ void or32(TrustedImm32 imm, RegisterID dest)
+ {
+ if (!imm.m_value && !m_fixedWidth)
+ return;
+
+ if (imm.m_value > 0 && imm.m_value < 65535
+ && !m_fixedWidth) {
+ m_assembler.ori(dest, dest, imm.m_value);
+ return;
+ }
+
+ /*
+ li dataTemp, imm
+ or dest, dest, dataTemp
+ */
+ move(imm, dataTempRegister);
+ m_assembler.orInsn(dest, dest, dataTempRegister);
+ }
+
+ void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (!imm.m_value && !m_fixedWidth)
+ return;
+
+ if (imm.m_value > 0 && imm.m_value < 65535 && !m_fixedWidth) {
+ m_assembler.ori(dest, src, imm.m_value);
+ return;
+ }
+
+ /*
+ li dataTemp, imm
+ or dest, src, dataTemp
+ */
+ move(imm, dataTempRegister);
+ m_assembler.orInsn(dest, src, dataTempRegister);
+ }
+
+ void or32(RegisterID src, AbsoluteAddress dest)
+ {
+ load32(dest.m_ptr, dataTempRegister);
+ m_assembler.orInsn(dataTempRegister, dataTempRegister, src);
+ store32(dataTempRegister, dest.m_ptr);
+ }
+
+ void rshift32(RegisterID shiftAmount, RegisterID dest)
+ {
+ m_assembler.srav(dest, dest, shiftAmount);
+ }
+
+ void rshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+ {
+ m_assembler.srav(dest, src, shiftAmount);
+ }
+
+ void rshift32(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.sra(dest, dest, imm.m_value);
+ }
+
+ void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.sra(dest, src, imm.m_value);
+ }
+
+ void urshift32(RegisterID shiftAmount, RegisterID dest)
+ {
+ m_assembler.srlv(dest, dest, shiftAmount);
+ }
+
+ void urshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+ {
+ m_assembler.srlv(dest, src, shiftAmount);
+ }
+
+ void urshift32(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.srl(dest, dest, imm.m_value);
+ }
+
+ void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.srl(dest, src, imm.m_value);
+ }
+
+ void sub32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.subu(dest, dest, src);
+ }
+
+ void sub32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.subu(dest, op1, op2);
+ }
+
+ void sub32(TrustedImm32 imm, RegisterID dest)
+ {
+ if (imm.m_value >= -32767 && imm.m_value <= 32768
+ && !m_fixedWidth) {
+ /*
+ addiu dest, src, imm
+ */
+ m_assembler.addiu(dest, dest, -imm.m_value);
+ } else {
+ /*
+ li immTemp, imm
+ subu dest, src, immTemp
+ */
+ move(imm, immTempRegister);
+ m_assembler.subu(dest, dest, immTempRegister);
+ }
+ }
+
+ void sub32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ if (imm.m_value >= -32767 && imm.m_value <= 32768
+ && !m_fixedWidth) {
+ /*
+ addiu dest, src, imm
+ */
+ m_assembler.addiu(dest, src, -imm.m_value);
+ } else {
+ /*
+ li immTemp, imm
+ subu dest, src, immTemp
+ */
+ move(imm, immTempRegister);
+ m_assembler.subu(dest, src, immTempRegister);
+ }
+ }
+
+ void sub32(TrustedImm32 imm, Address address)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ lw dataTemp, offset(base)
+ li immTemp, imm
+ subu dataTemp, dataTemp, immTemp
+ sw dataTemp, offset(base)
+ */
+ m_assembler.lw(dataTempRegister, address.base, address.offset);
+ if (imm.m_value >= -32767 && imm.m_value <= 32768 && !m_fixedWidth)
+ m_assembler.addiu(dataTempRegister, dataTempRegister, -imm.m_value);
+ else {
+ move(imm, immTempRegister);
+ m_assembler.subu(dataTempRegister, dataTempRegister, immTempRegister);
+ }
+ m_assembler.sw(dataTempRegister, address.base, address.offset);
+ } else {
+ /*
+ lui addrTemp, (offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, base
+ lw dataTemp, (offset & 0xffff)(addrTemp)
+ li immtemp, imm
+ subu dataTemp, dataTemp, immTemp
+ sw dataTemp, (offset & 0xffff)(addrTemp)
+ */
+ m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lw(dataTempRegister, addrTempRegister, address.offset);
+
+ if (imm.m_value >= -32767 && imm.m_value <= 32768
+ && !m_fixedWidth)
+ m_assembler.addiu(dataTempRegister, dataTempRegister, -imm.m_value);
+ else {
+ move(imm, immTempRegister);
+ m_assembler.subu(dataTempRegister, dataTempRegister, immTempRegister);
+ }
+ m_assembler.sw(dataTempRegister, addrTempRegister, address.offset);
+ }
+ }
+
+ void sub32(Address src, RegisterID dest)
+ {
+ load32(src, dataTempRegister);
+ sub32(dataTempRegister, dest);
+ }
+
+ void sub32(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ /*
+ li addrTemp, address
+ li immTemp, imm
+ lw dataTemp, 0(addrTemp)
+ subu dataTemp, dataTemp, immTemp
+ sw dataTemp, 0(addrTemp)
+ */
+ move(TrustedImmPtr(address.m_ptr), addrTempRegister);
+ m_assembler.lw(dataTempRegister, addrTempRegister, 0);
+
+ if (imm.m_value >= -32767 && imm.m_value <= 32768 && !m_fixedWidth)
+ m_assembler.addiu(dataTempRegister, dataTempRegister, -imm.m_value);
+ else {
+ move(imm, immTempRegister);
+ m_assembler.subu(dataTempRegister, dataTempRegister, immTempRegister);
+ }
+ m_assembler.sw(dataTempRegister, addrTempRegister, 0);
+ }
+
+ void xor32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.xorInsn(dest, dest, src);
+ }
+
+ void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.xorInsn(dest, op1, op2);
+ }
+
+ void xor32(TrustedImm32 imm, RegisterID dest)
+ {
+ if (imm.m_value == -1) {
+ m_assembler.nor(dest, dest, MIPSRegisters::zero);
+ return;
+ }
+
+ /*
+ li immTemp, imm
+ xor dest, dest, immTemp
+ */
+ move(imm, immTempRegister);
+ m_assembler.xorInsn(dest, dest, immTempRegister);
+ }
+
+ void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (imm.m_value == -1) {
+ m_assembler.nor(dest, src, MIPSRegisters::zero);
+ return;
+ }
+
+ /*
+ li immTemp, imm
+ xor dest, dest, immTemp
+ */
+ move(imm, immTempRegister);
+ m_assembler.xorInsn(dest, src, immTempRegister);
+ }
+
+ void sqrtDouble(FPRegisterID src, FPRegisterID dst)
+ {
+ m_assembler.sqrtd(dst, src);
+ }
+
+ void absDouble(FPRegisterID, FPRegisterID)
+ {
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+
+ ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
+ {
+ ConvertibleLoadLabel result(this);
+ /*
+ lui addrTemp, (offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, base
+ lw dest, (offset & 0xffff)(addrTemp)
+ */
+ m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lw(dest, addrTempRegister, address.offset);
+ return result;
+ }
+
+ // Memory access operations:
+ //
+ // Loads are of the form load(address, destination) and stores of the form
+ // store(source, address). The source for a store may be an TrustedImm32. Address
+ // operand objects to loads and store will be implicitly constructed if a
+ // register is passed.
+
+ /* Need to use zero-extened load byte for load8. */
+ void load8(ImplicitAddress address, RegisterID dest)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth)
+ m_assembler.lbu(dest, address.base, address.offset);
+ else {
+ /*
+ lui addrTemp, (offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, base
+ lbu dest, (offset & 0xffff)(addrTemp)
+ */
+ m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lbu(dest, addrTempRegister, address.offset);
+ }
+ }
+
+ void load8(BaseIndex address, RegisterID dest)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lbu dest, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lbu(dest, addrTempRegister, address.offset);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ lbu dest, (address.offset & 0xffff)(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ m_assembler.lbu(dest, addrTempRegister, address.offset);
+ }
+ }
+
+ ALWAYS_INLINE void load8(AbsoluteAddress address, RegisterID dest)
+ {
+ load8(address.m_ptr, dest);
+ }
+
+ void load8(const void* address, RegisterID dest)
+ {
+ /*
+ li addrTemp, address
+ lbu dest, 0(addrTemp)
+ */
+ move(TrustedImmPtr(address), addrTempRegister);
+ m_assembler.lbu(dest, addrTempRegister, 0);
+ }
+
+ void load8SignedExtendTo32(BaseIndex address, RegisterID dest)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lb dest, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lb(dest, addrTempRegister, address.offset);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ lb dest, (address.offset & 0xffff)(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ m_assembler.lb(dest, addrTempRegister, address.offset);
+ }
+ }
+
+ void load32(ImplicitAddress address, RegisterID dest)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth)
+ m_assembler.lw(dest, address.base, address.offset);
+ else {
+ /*
+ lui addrTemp, (offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, base
+ lw dest, (offset & 0xffff)(addrTemp)
+ */
+ m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lw(dest, addrTempRegister, address.offset);
+ }
+ }
+
+ void load32(BaseIndex address, RegisterID dest)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lw dest, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lw(dest, addrTempRegister, address.offset);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ lw dest, (address.offset & 0xffff)(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ m_assembler.lw(dest, addrTempRegister, address.offset);
+ }
+ }
+
+ void load16Unaligned(BaseIndex address, RegisterID dest)
+ {
+ load16(address, dest);
+ }
+
+ void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
+ {
+ if (address.offset >= -32768 && address.offset <= 32764
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ (Big-Endian)
+ lwl dest, address.offset(addrTemp)
+ lwr dest, address.offset+3(addrTemp)
+ (Little-Endian)
+ lwl dest, address.offset+3(addrTemp)
+ lwr dest, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+#if CPU(BIG_ENDIAN)
+ m_assembler.lwl(dest, addrTempRegister, address.offset);
+ m_assembler.lwr(dest, addrTempRegister, address.offset + 3);
+#else
+ m_assembler.lwl(dest, addrTempRegister, address.offset + 3);
+ m_assembler.lwr(dest, addrTempRegister, address.offset);
+
+#endif
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, address.offset >> 16
+ ori immTemp, immTemp, address.offset & 0xffff
+ addu addrTemp, addrTemp, immTemp
+ (Big-Endian)
+ lw dest, 0(at)
+ lw dest, 3(at)
+ (Little-Endian)
+ lw dest, 3(at)
+ lw dest, 0(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, address.offset >> 16);
+ m_assembler.ori(immTempRegister, immTempRegister, address.offset);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+#if CPU(BIG_ENDIAN)
+ m_assembler.lwl(dest, addrTempRegister, 0);
+ m_assembler.lwr(dest, addrTempRegister, 3);
+#else
+ m_assembler.lwl(dest, addrTempRegister, 3);
+ m_assembler.lwr(dest, addrTempRegister, 0);
+#endif
+ }
+ }
+
+ void load32(const void* address, RegisterID dest)
+ {
+ /*
+ li addrTemp, address
+ lw dest, 0(addrTemp)
+ */
+ move(TrustedImmPtr(address), addrTempRegister);
+ m_assembler.lw(dest, addrTempRegister, 0);
+ }
+
+ DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ m_fixedWidth = true;
+ /*
+ lui addrTemp, address.offset >> 16
+ ori addrTemp, addrTemp, address.offset & 0xffff
+ addu addrTemp, addrTemp, address.base
+ lw dest, 0(addrTemp)
+ */
+ DataLabel32 dataLabel(this);
+ move(TrustedImm32(address.offset), addrTempRegister);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lw(dest, addrTempRegister, 0);
+ m_fixedWidth = false;
+ return dataLabel;
+ }
+
+ DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ DataLabelCompact dataLabel(this);
+ load32WithAddressOffsetPatch(address, dest);
+ return dataLabel;
+ }
+
+ /* Need to use zero-extened load half-word for load16. */
+ void load16(ImplicitAddress address, RegisterID dest)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth)
+ m_assembler.lhu(dest, address.base, address.offset);
+ else {
+ /*
+ lui addrTemp, (offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, base
+ lhu dest, (offset & 0xffff)(addrTemp)
+ */
+ m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lhu(dest, addrTempRegister, address.offset);
+ }
+ }
+
+ /* Need to use zero-extened load half-word for load16. */
+ void load16(BaseIndex address, RegisterID dest)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lhu dest, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lhu(dest, addrTempRegister, address.offset);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ lhu dest, (address.offset & 0xffff)(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ m_assembler.lhu(dest, addrTempRegister, address.offset);
+ }
+ }
+
+ void load16SignedExtendTo32(BaseIndex address, RegisterID dest)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lh dest, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lh(dest, addrTempRegister, address.offset);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ lh dest, (address.offset & 0xffff)(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ m_assembler.lh(dest, addrTempRegister, address.offset);
+ }
+ }
+
+ DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
+ {
+ m_fixedWidth = true;
+ /*
+ lui addrTemp, address.offset >> 16
+ ori addrTemp, addrTemp, address.offset & 0xffff
+ addu addrTemp, addrTemp, address.base
+ sw src, 0(addrTemp)
+ */
+ DataLabel32 dataLabel(this);
+ move(TrustedImm32(address.offset), addrTempRegister);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.sw(src, addrTempRegister, 0);
+ m_fixedWidth = false;
+ return dataLabel;
+ }
+
+ void store8(RegisterID src, BaseIndex address)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ sb src, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.sb(src, addrTempRegister, address.offset);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ sb src, (address.offset & 0xffff)(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ m_assembler.sb(src, addrTempRegister, address.offset);
+ }
+ }
+
+ void store8(RegisterID src, void* address)
+ {
+ move(TrustedImmPtr(address), addrTempRegister);
+ m_assembler.sb(src, addrTempRegister, 0);
+ }
+
+ void store8(TrustedImm32 imm, void* address)
+ {
+ /*
+ li immTemp, imm
+ li addrTemp, address
+ sb src, 0(addrTemp)
+ */
+ if (!imm.m_value && !m_fixedWidth) {
+ move(TrustedImmPtr(address), addrTempRegister);
+ m_assembler.sb(MIPSRegisters::zero, addrTempRegister, 0);
+ } else {
+ move(imm, immTempRegister);
+ move(TrustedImmPtr(address), addrTempRegister);
+ m_assembler.sb(immTempRegister, addrTempRegister, 0);
+ }
+ }
+
+ void store16(RegisterID src, BaseIndex address)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ sh src, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.sh(src, addrTempRegister, address.offset);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ sh src, (address.offset & 0xffff)(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ m_assembler.sh(src, addrTempRegister, address.offset);
+ }
+ }
+
+ void store32(RegisterID src, ImplicitAddress address)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth)
+ m_assembler.sw(src, address.base, address.offset);
+ else {
+ /*
+ lui addrTemp, (offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, base
+ sw src, (offset & 0xffff)(addrTemp)
+ */
+ m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.sw(src, addrTempRegister, address.offset);
+ }
+ }
+
+ void store32(RegisterID src, BaseIndex address)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ sw src, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.sw(src, addrTempRegister, address.offset);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ sw src, (address.offset & 0xffff)(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ m_assembler.sw(src, addrTempRegister, address.offset);
+ }
+ }
+
+ void store32(TrustedImm32 imm, ImplicitAddress address)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ if (!imm.m_value)
+ m_assembler.sw(MIPSRegisters::zero, address.base, address.offset);
+ else {
+ move(imm, immTempRegister);
+ m_assembler.sw(immTempRegister, address.base, address.offset);
+ }
+ } else {
+ /*
+ lui addrTemp, (offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, base
+ sw immTemp, (offset & 0xffff)(addrTemp)
+ */
+ m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ if (!imm.m_value && !m_fixedWidth)
+ m_assembler.sw(MIPSRegisters::zero, addrTempRegister, address.offset);
+ else {
+ move(imm, immTempRegister);
+ m_assembler.sw(immTempRegister, addrTempRegister, address.offset);
+ }
+ }
+ }
+
+ void store32(TrustedImm32 imm, BaseIndex address)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767 && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ sw src, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ if (!imm.m_value)
+ m_assembler.sw(MIPSRegisters::zero, addrTempRegister, address.offset);
+ else {
+ move(imm, immTempRegister);
+ m_assembler.sw(immTempRegister, addrTempRegister, address.offset);
+ }
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ sw src, (address.offset & 0xffff)(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ if (!imm.m_value && !m_fixedWidth)
+ m_assembler.sw(MIPSRegisters::zero, addrTempRegister, address.offset);
+ else {
+ move(imm, immTempRegister);
+ m_assembler.sw(immTempRegister, addrTempRegister, address.offset);
+ }
+ }
+ }
+
+
+ void store32(RegisterID src, const void* address)
+ {
+ /*
+ li addrTemp, address
+ sw src, 0(addrTemp)
+ */
+ move(TrustedImmPtr(address), addrTempRegister);
+ m_assembler.sw(src, addrTempRegister, 0);
+ }
+
+ void store32(TrustedImm32 imm, const void* address)
+ {
+ /*
+ li immTemp, imm
+ li addrTemp, address
+ sw src, 0(addrTemp)
+ */
+ if (!imm.m_value && !m_fixedWidth) {
+ move(TrustedImmPtr(address), addrTempRegister);
+ m_assembler.sw(MIPSRegisters::zero, addrTempRegister, 0);
+ } else {
+ move(imm, immTempRegister);
+ move(TrustedImmPtr(address), addrTempRegister);
+ m_assembler.sw(immTempRegister, addrTempRegister, 0);
+ }
+ }
+
+ // Floating-point operations:
+
+ static bool supportsFloatingPoint()
+ {
+#if WTF_MIPS_DOUBLE_FLOAT
+ return true;
+#else
+ return false;
+#endif
+ }
+
+ static bool supportsFloatingPointTruncate()
+ {
+#if WTF_MIPS_DOUBLE_FLOAT && WTF_MIPS_ISA_AT_LEAST(2)
+ return true;
+#else
+ return false;
+#endif
+ }
+
+ static bool supportsFloatingPointSqrt()
+ {
+#if WTF_MIPS_DOUBLE_FLOAT && WTF_MIPS_ISA_AT_LEAST(2)
+ return true;
+#else
+ return false;
+#endif
+ }
+ static bool supportsFloatingPointAbs() { return false; }
+
+ // Stack manipulation operations:
+ //
+ // The ABI is assumed to provide a stack abstraction to memory,
+ // containing machine word sized units of data. Push and pop
+ // operations add and remove a single register sized unit of data
+ // to or from the stack. Peek and poke operations read or write
+ // values on the stack, without moving the current stack position.
+
+ void pop(RegisterID dest)
+ {
+ m_assembler.lw(dest, MIPSRegisters::sp, 0);
+ m_assembler.addiu(MIPSRegisters::sp, MIPSRegisters::sp, 4);
+ }
+
+ void push(RegisterID src)
+ {
+ m_assembler.addiu(MIPSRegisters::sp, MIPSRegisters::sp, -4);
+ m_assembler.sw(src, MIPSRegisters::sp, 0);
+ }
+
+ void push(Address address)
+ {
+ load32(address, dataTempRegister);
+ push(dataTempRegister);
+ }
+
+ void push(TrustedImm32 imm)
+ {
+ move(imm, immTempRegister);
+ push(immTempRegister);
+ }
+
+ // Register move operations:
+ //
+ // Move values in registers.
+
+ void move(TrustedImm32 imm, RegisterID dest)
+ {
+ if (!imm.m_value && !m_fixedWidth)
+ move(MIPSRegisters::zero, dest);
+ else if (m_fixedWidth) {
+ m_assembler.lui(dest, imm.m_value >> 16);
+ m_assembler.ori(dest, dest, imm.m_value);
+ } else
+ m_assembler.li(dest, imm.m_value);
+ }
+
+ void move(RegisterID src, RegisterID dest)
+ {
+ if (src != dest || m_fixedWidth)
+ m_assembler.move(dest, src);
+ }
+
+ void move(TrustedImmPtr imm, RegisterID dest)
+ {
+ move(TrustedImm32(imm), dest);
+ }
+
+ void swap(RegisterID reg1, RegisterID reg2)
+ {
+ move(reg1, immTempRegister);
+ move(reg2, reg1);
+ move(immTempRegister, reg2);
+ }
+
+ void signExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ if (src != dest || m_fixedWidth)
+ move(src, dest);
+ }
+
+ void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ if (src != dest || m_fixedWidth)
+ move(src, dest);
+ }
+
+ // Forwards / external control flow operations:
+ //
+ // This set of jump and conditional branch operations return a Jump
+ // object which may linked at a later point, allow forwards jump,
+ // or jumps that will require external linkage (after the code has been
+ // relocated).
+ //
+ // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
+ // respecitvely, for unsigned comparisons the names b, a, be, and ae are
+ // used (representing the names 'below' and 'above').
+ //
+ // Operands to the comparision are provided in the expected order, e.g.
+ // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
+ // treated as a signed 32bit value, is less than or equal to 5.
+ //
+ // jz and jnz test whether the first operand is equal to zero, and take
+ // an optional second operand of a mask under which to perform the test.
+
+ Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
+ {
+ // Make sure the immediate value is unsigned 8 bits.
+ ASSERT(!(right.m_value & 0xFFFFFF00));
+ load8(left, dataTempRegister);
+ move(right, immTempRegister);
+ return branch32(cond, dataTempRegister, immTempRegister);
+ }
+
+ Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
+ {
+ // Make sure the immediate value is unsigned 8 bits.
+ ASSERT(!(right.m_value & 0xFFFFFF00));
+ load8(left, dataTempRegister);
+ move(right, immTempRegister);
+ return branch32(cond, dataTempRegister, immTempRegister);
+ }
+
+ void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
+ {
+ // Make sure the immediate value is unsigned 8 bits.
+ ASSERT(!(right.m_value & 0xFFFFFF00));
+ load8(left, dataTempRegister);
+ move(right, immTempRegister);
+ compare32(cond, dataTempRegister, immTempRegister, dest);
+ }
+
+ Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+ {
+ ASSERT(!(right.m_value & 0xFFFFFF00));
+ load8(left, dataTempRegister);
+ // Be careful that the previous load8() uses immTempRegister.
+ // So, we need to put move() after load8().
+ move(right, immTempRegister);
+ return branch32(cond, dataTempRegister, immTempRegister);
+ }
+
+ Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
+ {
+ if (cond == Equal)
+ return branchEqual(left, right);
+ if (cond == NotEqual)
+ return branchNotEqual(left, right);
+ if (cond == Above) {
+ m_assembler.sltu(cmpTempRegister, right, left);
+ return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == AboveOrEqual) {
+ m_assembler.sltu(cmpTempRegister, left, right);
+ return branchEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == Below) {
+ m_assembler.sltu(cmpTempRegister, left, right);
+ return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == BelowOrEqual) {
+ m_assembler.sltu(cmpTempRegister, right, left);
+ return branchEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == GreaterThan) {
+ m_assembler.slt(cmpTempRegister, right, left);
+ return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == GreaterThanOrEqual) {
+ m_assembler.slt(cmpTempRegister, left, right);
+ return branchEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == LessThan) {
+ m_assembler.slt(cmpTempRegister, left, right);
+ return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == LessThanOrEqual) {
+ m_assembler.slt(cmpTempRegister, right, left);
+ return branchEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ ASSERT(0);
+
+ return Jump();
+ }
+
+ Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
+ {
+ move(right, immTempRegister);
+ return branch32(cond, left, immTempRegister);
+ }
+
+ Jump branch32(RelationalCondition cond, RegisterID left, Address right)
+ {
+ load32(right, dataTempRegister);
+ return branch32(cond, left, dataTempRegister);
+ }
+
+ Jump branch32(RelationalCondition cond, Address left, RegisterID right)
+ {
+ load32(left, dataTempRegister);
+ return branch32(cond, dataTempRegister, right);
+ }
+
+ Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
+ {
+ load32(left, dataTempRegister);
+ move(right, immTempRegister);
+ return branch32(cond, dataTempRegister, immTempRegister);
+ }
+
+ Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+ {
+ load32(left, dataTempRegister);
+ // Be careful that the previous load32() uses immTempRegister.
+ // So, we need to put move() after load32().
+ move(right, immTempRegister);
+ return branch32(cond, dataTempRegister, immTempRegister);
+ }
+
+ Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+ {
+ load32WithUnalignedHalfWords(left, dataTempRegister);
+ // Be careful that the previous load32WithUnalignedHalfWords()
+ // uses immTempRegister.
+ // So, we need to put move() after load32WithUnalignedHalfWords().
+ move(right, immTempRegister);
+ return branch32(cond, dataTempRegister, immTempRegister);
+ }
+
+ Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
+ {
+ load32(left.m_ptr, dataTempRegister);
+ return branch32(cond, dataTempRegister, right);
+ }
+
+ Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
+ {
+ load32(left.m_ptr, dataTempRegister);
+ move(right, immTempRegister);
+ return branch32(cond, dataTempRegister, immTempRegister);
+ }
+
+ Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+ m_assembler.andInsn(cmpTempRegister, reg, mask);
+ if (cond == Zero)
+ return branchEqual(cmpTempRegister, MIPSRegisters::zero);
+ return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+
+ Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+ if (mask.m_value == -1 && !m_fixedWidth) {
+ if (cond == Zero)
+ return branchEqual(reg, MIPSRegisters::zero);
+ return branchNotEqual(reg, MIPSRegisters::zero);
+ }
+ move(mask, immTempRegister);
+ return branchTest32(cond, reg, immTempRegister);
+ }
+
+ Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ load32(address, dataTempRegister);
+ return branchTest32(cond, dataTempRegister, mask);
+ }
+
+ Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ load32(address, dataTempRegister);
+ return branchTest32(cond, dataTempRegister, mask);
+ }
+
+ Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ load8(address, dataTempRegister);
+ return branchTest32(cond, dataTempRegister, mask);
+ }
+
+ Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ load8(address, dataTempRegister);
+ return branchTest32(cond, dataTempRegister, mask);
+ }
+
+ Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ move(TrustedImmPtr(address.m_ptr), dataTempRegister);
+ load8(Address(dataTempRegister), dataTempRegister);
+ return branchTest32(cond, dataTempRegister, mask);
+ }
+
+ Jump jump()
+ {
+ return branchEqual(MIPSRegisters::zero, MIPSRegisters::zero);
+ }
+
+ void jump(RegisterID target)
+ {
+ move(target, MIPSRegisters::t9);
+ m_assembler.jr(MIPSRegisters::t9);
+ m_assembler.nop();
+ }
+
+ void jump(Address address)
+ {
+ m_fixedWidth = true;
+ load32(address, MIPSRegisters::t9);
+ m_assembler.jr(MIPSRegisters::t9);
+ m_assembler.nop();
+ m_fixedWidth = false;
+ }
+
+ void jump(AbsoluteAddress address)
+ {
+ m_fixedWidth = true;
+ load32(address.m_ptr, MIPSRegisters::t9);
+ m_assembler.jr(MIPSRegisters::t9);
+ m_assembler.nop();
+ m_fixedWidth = false;
+ }
+
+ void moveDoubleToInts(FPRegisterID src, RegisterID dest1, RegisterID dest2)
+ {
+ m_assembler.vmov(dest1, dest2, src);
+ }
+
+ void moveIntsToDouble(RegisterID src1, RegisterID src2, FPRegisterID dest, FPRegisterID scratch)
+ {
+ UNUSED_PARAM(scratch);
+ m_assembler.vmov(dest, src1, src2);
+ }
+
+ // Arithmetic control flow operations:
+ //
+ // This set of conditional branch operations branch based
+ // on the result of an arithmetic operation. The operation
+ // is performed as normal, storing the result.
+ //
+ // * jz operations branch if the result is zero.
+ // * jo operations branch if the (signed) arithmetic
+ // operation caused an overflow to occur.
+
+ Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == PositiveOrZero) || (cond == Zero) || (cond == NonZero));
+ if (cond == Overflow) {
+ /*
+ move dest, dataTemp
+ xor cmpTemp, dataTemp, src
+ bltz cmpTemp, No_overflow # diff sign bit -> no overflow
+ addu dest, dataTemp, src
+ xor cmpTemp, dest, dataTemp
+ bgez cmpTemp, No_overflow # same sign big -> no overflow
+ nop
+ b Overflow
+ nop
+ nop
+ nop
+ nop
+ nop
+ No_overflow:
+ */
+ move(dest, dataTempRegister);
+ m_assembler.xorInsn(cmpTempRegister, dataTempRegister, src);
+ m_assembler.bltz(cmpTempRegister, 10);
+ m_assembler.addu(dest, dataTempRegister, src);
+ m_assembler.xorInsn(cmpTempRegister, dest, dataTempRegister);
+ m_assembler.bgez(cmpTempRegister, 7);
+ m_assembler.nop();
+ return jump();
+ }
+ if (cond == Signed) {
+ add32(src, dest);
+ // Check if dest is negative.
+ m_assembler.slt(cmpTempRegister, dest, MIPSRegisters::zero);
+ return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == PositiveOrZero) {
+ add32(src, dest);
+ // Check if dest is not negative.
+ m_assembler.slt(cmpTempRegister, dest, MIPSRegisters::zero);
+ return branchEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == Zero) {
+ add32(src, dest);
+ return branchEqual(dest, MIPSRegisters::zero);
+ }
+ if (cond == NonZero) {
+ add32(src, dest);
+ return branchNotEqual(dest, MIPSRegisters::zero);
+ }
+ ASSERT(0);
+ return Jump();
+ }
+
+ Jump branchAdd32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == PositiveOrZero) || (cond == Zero) || (cond == NonZero));
+ if (cond == Overflow) {
+ /*
+ move dataTemp, op1
+ xor cmpTemp, dataTemp, op2
+ bltz cmpTemp, No_overflow # diff sign bit -> no overflow
+ addu dest, dataTemp, op2
+ xor cmpTemp, dest, dataTemp
+ bgez cmpTemp, No_overflow # same sign big -> no overflow
+ nop
+ b Overflow
+ nop
+ nop
+ nop
+ nop
+ nop
+ No_overflow:
+ */
+ move(op1, dataTempRegister);
+ m_assembler.xorInsn(cmpTempRegister, dataTempRegister, op2);
+ m_assembler.bltz(cmpTempRegister, 10);
+ m_assembler.addu(dest, dataTempRegister, op2);
+ m_assembler.xorInsn(cmpTempRegister, dest, dataTempRegister);
+ m_assembler.bgez(cmpTempRegister, 7);
+ m_assembler.nop();
+ return jump();
+ }
+ if (cond == Signed) {
+ add32(op1, op2, dest);
+ // Check if dest is negative.
+ m_assembler.slt(cmpTempRegister, dest, MIPSRegisters::zero);
+ return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == PositiveOrZero) {
+ add32(op1, op2, dest);
+ // Check if dest is not negative.
+ m_assembler.slt(cmpTempRegister, dest, MIPSRegisters::zero);
+ return branchEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == Zero) {
+ add32(op1, op2, dest);
+ return branchEqual(dest, MIPSRegisters::zero);
+ }
+ if (cond == NonZero) {
+ add32(op1, op2, dest);
+ return branchNotEqual(dest, MIPSRegisters::zero);
+ }
+ ASSERT(0);
+ return Jump();
+ }
+
+ Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ move(imm, immTempRegister);
+ return branchAdd32(cond, immTempRegister, dest);
+ }
+
+ Jump branchAdd32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ move(imm, immTempRegister);
+ move(src, dest);
+ return branchAdd32(cond, immTempRegister, dest);
+ }
+
+ Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == PositiveOrZero) || (cond == Zero) || (cond == NonZero));
+ if (cond == Overflow) {
+ /*
+ move dataTemp, dest
+ xori cmpTemp, dataTemp, imm
+ bltz cmpTemp, No_overflow # diff sign bit -> no overflow
+ addiu dataTemp, dataTemp, imm
+ move dest, dataTemp
+ xori cmpTemp, dataTemp, imm
+ bgez cmpTemp, No_overflow # same sign big -> no overflow
+ nop
+ b Overflow
+ nop
+ nop
+ nop
+ nop
+ nop
+ No_overflow:
+ */
+ if (imm.m_value >= -32768 && imm.m_value <= 32767 && !m_fixedWidth) {
+ load32(dest.m_ptr, dataTempRegister);
+ m_assembler.xori(cmpTempRegister, dataTempRegister, imm.m_value);
+ m_assembler.bltz(cmpTempRegister, 10);
+ m_assembler.addiu(dataTempRegister, dataTempRegister, imm.m_value);
+ store32(dataTempRegister, dest.m_ptr);
+ m_assembler.xori(cmpTempRegister, dataTempRegister, imm.m_value);
+ m_assembler.bgez(cmpTempRegister, 7);
+ m_assembler.nop();
+ } else {
+ load32(dest.m_ptr, dataTempRegister);
+ move(imm, immTempRegister);
+ m_assembler.xorInsn(cmpTempRegister, dataTempRegister, immTempRegister);
+ m_assembler.bltz(cmpTempRegister, 10);
+ m_assembler.addiu(dataTempRegister, dataTempRegister, immTempRegister);
+ store32(dataTempRegister, dest.m_ptr);
+ m_assembler.xori(cmpTempRegister, dataTempRegister, immTempRegister);
+ m_assembler.bgez(cmpTempRegister, 7);
+ m_assembler.nop();
+ }
+ return jump();
+ }
+ move(imm, immTempRegister);
+ load32(dest.m_ptr, dataTempRegister);
+ add32(immTempRegister, dataTempRegister);
+ store32(dataTempRegister, dest.m_ptr);
+ if (cond == Signed) {
+ // Check if dest is negative.
+ m_assembler.slt(cmpTempRegister, dataTempRegister, MIPSRegisters::zero);
+ return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == PositiveOrZero) {
+ // Check if dest is not negative.
+ m_assembler.slt(cmpTempRegister, dataTempRegister, MIPSRegisters::zero);
+ return branchEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == Zero)
+ return branchEqual(dataTempRegister, MIPSRegisters::zero);
+ if (cond == NonZero)
+ return branchNotEqual(dataTempRegister, MIPSRegisters::zero);
+ ASSERT(0);
+ return Jump();
+ }
+
+ Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ if (cond == Overflow) {
+ /*
+ mult src, dest
+ mfhi dataTemp
+ mflo dest
+ sra addrTemp, dest, 31
+ beq dataTemp, addrTemp, No_overflow # all sign bits (bit 63 to bit 31) are the same -> no overflow
+ nop
+ b Overflow
+ nop
+ nop
+ nop
+ nop
+ nop
+ No_overflow:
+ */
+ m_assembler.mult(src1, src2);
+ m_assembler.mfhi(dataTempRegister);
+ m_assembler.mflo(dest);
+ m_assembler.sra(addrTempRegister, dest, 31);
+ m_assembler.beq(dataTempRegister, addrTempRegister, 7);
+ m_assembler.nop();
+ return jump();
+ }
+ if (cond == Signed) {
+ mul32(src1, src2, dest);
+ // Check if dest is negative.
+ m_assembler.slt(cmpTempRegister, dest, MIPSRegisters::zero);
+ return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == Zero) {
+ mul32(src1, src2, dest);
+ return branchEqual(dest, MIPSRegisters::zero);
+ }
+ if (cond == NonZero) {
+ mul32(src1, src2, dest);
+ return branchNotEqual(dest, MIPSRegisters::zero);
+ }
+ ASSERT(0);
+ return Jump();
+ }
+
+ Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ if (cond == Overflow) {
+ /*
+ mult src, dest
+ mfhi dataTemp
+ mflo dest
+ sra addrTemp, dest, 31
+ beq dataTemp, addrTemp, No_overflow # all sign bits (bit 63 to bit 31) are the same -> no overflow
+ nop
+ b Overflow
+ nop
+ nop
+ nop
+ nop
+ nop
+ No_overflow:
+ */
+ m_assembler.mult(src, dest);
+ m_assembler.mfhi(dataTempRegister);
+ m_assembler.mflo(dest);
+ m_assembler.sra(addrTempRegister, dest, 31);
+ m_assembler.beq(dataTempRegister, addrTempRegister, 7);
+ m_assembler.nop();
+ return jump();
+ }
+ if (cond == Signed) {
+ mul32(src, dest);
+ // Check if dest is negative.
+ m_assembler.slt(cmpTempRegister, dest, MIPSRegisters::zero);
+ return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == Zero) {
+ mul32(src, dest);
+ return branchEqual(dest, MIPSRegisters::zero);
+ }
+ if (cond == NonZero) {
+ mul32(src, dest);
+ return branchNotEqual(dest, MIPSRegisters::zero);
+ }
+ ASSERT(0);
+ return Jump();
+ }
+
+ Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ move(imm, immTempRegister);
+ return branchMul32(cond, immTempRegister, src, dest);
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ if (cond == Overflow) {
+ /*
+ move dest, dataTemp
+ xor cmpTemp, dataTemp, src
+ bgez cmpTemp, No_overflow # same sign bit -> no overflow
+ subu dest, dataTemp, src
+ xor cmpTemp, dest, dataTemp
+ bgez cmpTemp, No_overflow # same sign bit -> no overflow
+ nop
+ b Overflow
+ nop
+ nop
+ nop
+ nop
+ nop
+ No_overflow:
+ */
+ move(dest, dataTempRegister);
+ m_assembler.xorInsn(cmpTempRegister, dataTempRegister, src);
+ m_assembler.bgez(cmpTempRegister, 10);
+ m_assembler.subu(dest, dataTempRegister, src);
+ m_assembler.xorInsn(cmpTempRegister, dest, dataTempRegister);
+ m_assembler.bgez(cmpTempRegister, 7);
+ m_assembler.nop();
+ return jump();
+ }
+ if (cond == Signed) {
+ sub32(src, dest);
+ // Check if dest is negative.
+ m_assembler.slt(cmpTempRegister, dest, MIPSRegisters::zero);
+ return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == Zero) {
+ sub32(src, dest);
+ return branchEqual(dest, MIPSRegisters::zero);
+ }
+ if (cond == NonZero) {
+ sub32(src, dest);
+ return branchNotEqual(dest, MIPSRegisters::zero);
+ }
+ ASSERT(0);
+ return Jump();
+ }
+
+ Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ move(imm, immTempRegister);
+ return branchSub32(cond, immTempRegister, dest);
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ move(imm, immTempRegister);
+ return branchSub32(cond, src, immTempRegister, dest);
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ if (cond == Overflow) {
+ /*
+ move dataTemp, op1
+ xor cmpTemp, dataTemp, op2
+ bgez cmpTemp, No_overflow # same sign bit -> no overflow
+ subu dest, dataTemp, op2
+ xor cmpTemp, dest, dataTemp
+ bgez cmpTemp, No_overflow # same sign bit -> no overflow
+ nop
+ b Overflow
+ nop
+ nop
+ nop
+ nop
+ nop
+ No_overflow:
+ */
+ move(op1, dataTempRegister);
+ m_assembler.xorInsn(cmpTempRegister, dataTempRegister, op2);
+ m_assembler.bgez(cmpTempRegister, 10);
+ m_assembler.subu(dest, dataTempRegister, op2);
+ m_assembler.xorInsn(cmpTempRegister, dest, dataTempRegister);
+ m_assembler.bgez(cmpTempRegister, 7);
+ m_assembler.nop();
+ return jump();
+ }
+ if (cond == Signed) {
+ sub32(op1, op2, dest);
+ // Check if dest is negative.
+ m_assembler.slt(cmpTempRegister, dest, MIPSRegisters::zero);
+ return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == Zero) {
+ sub32(op1, op2, dest);
+ return branchEqual(dest, MIPSRegisters::zero);
+ }
+ if (cond == NonZero) {
+ sub32(op1, op2, dest);
+ return branchNotEqual(dest, MIPSRegisters::zero);
+ }
+ ASSERT(0);
+ return Jump();
+ }
+
+ Jump branchNeg32(ResultCondition cond, RegisterID srcDest)
+ {
+ m_assembler.li(dataTempRegister, -1);
+ return branchMul32(cond, dataTempRegister, srcDest);
+ }
+
+ Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero));
+ if (cond == Signed) {
+ or32(src, dest);
+ // Check if dest is negative.
+ m_assembler.slt(cmpTempRegister, dest, MIPSRegisters::zero);
+ return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == Zero) {
+ or32(src, dest);
+ return branchEqual(dest, MIPSRegisters::zero);
+ }
+ if (cond == NonZero) {
+ or32(src, dest);
+ return branchNotEqual(dest, MIPSRegisters::zero);
+ }
+ ASSERT(0);
+ return Jump();
+ }
+
+ // Miscellaneous operations:
+
+ void breakpoint()
+ {
+ m_assembler.bkpt();
+ }
+
+ Call nearCall()
+ {
+ /* We need two words for relaxation. */
+ m_assembler.nop();
+ m_assembler.nop();
+ m_assembler.jal();
+ m_assembler.nop();
+ return Call(m_assembler.label(), Call::LinkableNear);
+ }
+
+ Call call()
+ {
+ m_assembler.lui(MIPSRegisters::t9, 0);
+ m_assembler.ori(MIPSRegisters::t9, MIPSRegisters::t9, 0);
+ m_assembler.jalr(MIPSRegisters::t9);
+ m_assembler.nop();
+ return Call(m_assembler.label(), Call::Linkable);
+ }
+
+ Call call(RegisterID target)
+ {
+ move(target, MIPSRegisters::t9);
+ m_assembler.jalr(MIPSRegisters::t9);
+ m_assembler.nop();
+ return Call(m_assembler.label(), Call::None);
+ }
+
+ Call call(Address address)
+ {
+ m_fixedWidth = true;
+ load32(address, MIPSRegisters::t9);
+ m_assembler.jalr(MIPSRegisters::t9);
+ m_assembler.nop();
+ m_fixedWidth = false;
+ return Call(m_assembler.label(), Call::None);
+ }
+
+ void ret()
+ {
+ m_assembler.jr(MIPSRegisters::ra);
+ m_assembler.nop();
+ }
+
+ void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
+ {
+ if (cond == Equal) {
+ m_assembler.xorInsn(dest, left, right);
+ m_assembler.sltiu(dest, dest, 1);
+ } else if (cond == NotEqual) {
+ m_assembler.xorInsn(dest, left, right);
+ m_assembler.sltu(dest, MIPSRegisters::zero, dest);
+ } else if (cond == Above)
+ m_assembler.sltu(dest, right, left);
+ else if (cond == AboveOrEqual) {
+ m_assembler.sltu(dest, left, right);
+ m_assembler.xori(dest, dest, 1);
+ } else if (cond == Below)
+ m_assembler.sltu(dest, left, right);
+ else if (cond == BelowOrEqual) {
+ m_assembler.sltu(dest, right, left);
+ m_assembler.xori(dest, dest, 1);
+ } else if (cond == GreaterThan)
+ m_assembler.slt(dest, right, left);
+ else if (cond == GreaterThanOrEqual) {
+ m_assembler.slt(dest, left, right);
+ m_assembler.xori(dest, dest, 1);
+ } else if (cond == LessThan)
+ m_assembler.slt(dest, left, right);
+ else if (cond == LessThanOrEqual) {
+ m_assembler.slt(dest, right, left);
+ m_assembler.xori(dest, dest, 1);
+ }
+ }
+
+ void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
+ {
+ move(right, immTempRegister);
+ compare32(cond, left, immTempRegister, dest);
+ }
+
+ void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+ load8(address, dataTempRegister);
+ if (mask.m_value == -1 && !m_fixedWidth) {
+ if (cond == Zero)
+ m_assembler.sltiu(dest, dataTempRegister, 1);
+ else
+ m_assembler.sltu(dest, MIPSRegisters::zero, dataTempRegister);
+ } else {
+ move(mask, immTempRegister);
+ m_assembler.andInsn(cmpTempRegister, dataTempRegister, immTempRegister);
+ if (cond == Zero)
+ m_assembler.sltiu(dest, cmpTempRegister, 1);
+ else
+ m_assembler.sltu(dest, MIPSRegisters::zero, cmpTempRegister);
+ }
+ }
+
+ void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+ load32(address, dataTempRegister);
+ if (mask.m_value == -1 && !m_fixedWidth) {
+ if (cond == Zero)
+ m_assembler.sltiu(dest, dataTempRegister, 1);
+ else
+ m_assembler.sltu(dest, MIPSRegisters::zero, dataTempRegister);
+ } else {
+ move(mask, immTempRegister);
+ m_assembler.andInsn(cmpTempRegister, dataTempRegister, immTempRegister);
+ if (cond == Zero)
+ m_assembler.sltiu(dest, cmpTempRegister, 1);
+ else
+ m_assembler.sltu(dest, MIPSRegisters::zero, cmpTempRegister);
+ }
+ }
+
+ DataLabel32 moveWithPatch(TrustedImm32 imm, RegisterID dest)
+ {
+ m_fixedWidth = true;
+ DataLabel32 label(this);
+ move(imm, dest);
+ m_fixedWidth = false;
+ return label;
+ }
+
+ DataLabelPtr moveWithPatch(TrustedImmPtr initialValue, RegisterID dest)
+ {
+ m_fixedWidth = true;
+ DataLabelPtr label(this);
+ move(initialValue, dest);
+ m_fixedWidth = false;
+ return label;
+ }
+
+ Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+ {
+ m_fixedWidth = true;
+ dataLabel = moveWithPatch(initialRightValue, immTempRegister);
+ Jump temp = branch32(cond, left, immTempRegister);
+ m_fixedWidth = false;
+ return temp;
+ }
+
+ Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+ {
+ m_fixedWidth = true;
+ load32(left, dataTempRegister);
+ dataLabel = moveWithPatch(initialRightValue, immTempRegister);
+ Jump temp = branch32(cond, dataTempRegister, immTempRegister);
+ m_fixedWidth = false;
+ return temp;
+ }
+
+ Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
+ {
+ m_fixedWidth = true;
+ load32(left, dataTempRegister);
+ dataLabel = moveWithPatch(initialRightValue, immTempRegister);
+ Jump temp = branch32(cond, dataTempRegister, immTempRegister);
+ m_fixedWidth = false;
+ return temp;
+ }
+
+ DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
+ {
+ m_fixedWidth = true;
+ DataLabelPtr dataLabel = moveWithPatch(initialValue, dataTempRegister);
+ store32(dataTempRegister, address);
+ m_fixedWidth = false;
+ return dataLabel;
+ }
+
+ DataLabelPtr storePtrWithPatch(ImplicitAddress address)
+ {
+ return storePtrWithPatch(TrustedImmPtr(0), address);
+ }
+
+ Call tailRecursiveCall()
+ {
+ // Like a normal call, but don't update the returned address register
+ m_fixedWidth = true;
+ move(TrustedImm32(0), MIPSRegisters::t9);
+ m_assembler.jr(MIPSRegisters::t9);
+ m_assembler.nop();
+ m_fixedWidth = false;
+ return Call(m_assembler.label(), Call::Linkable);
+ }
+
+ Call makeTailRecursiveCall(Jump oldJump)
+ {
+ oldJump.link(this);
+ return tailRecursiveCall();
+ }
+
+ void loadFloat(BaseIndex address, FPRegisterID dest)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lwc1 dest, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lwc1(dest, addrTempRegister, address.offset);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ lwc1 dest, (address.offset & 0xffff)(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ m_assembler.lwc1(dest, addrTempRegister, address.offset);
+ }
+ }
+
+ void loadDouble(ImplicitAddress address, FPRegisterID dest)
+ {
+#if WTF_MIPS_ISA(1)
+ /*
+ li addrTemp, address.offset
+ addu addrTemp, addrTemp, base
+ lwc1 dest, 0(addrTemp)
+ lwc1 dest+1, 4(addrTemp)
+ */
+ move(TrustedImm32(address.offset), addrTempRegister);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lwc1(dest, addrTempRegister, 0);
+ m_assembler.lwc1(FPRegisterID(dest + 1), addrTempRegister, 4);
+#else
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ m_assembler.ldc1(dest, address.base, address.offset);
+ } else {
+ /*
+ lui addrTemp, (offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, base
+ ldc1 dest, (offset & 0xffff)(addrTemp)
+ */
+ m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.ldc1(dest, addrTempRegister, address.offset);
+ }
+#endif
+ }
+
+ void loadDouble(BaseIndex address, FPRegisterID dest)
+ {
+#if WTF_MIPS_ISA(1)
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lwc1 dest, address.offset(addrTemp)
+ lwc1 dest+1, (address.offset+4)(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lwc1(dest, addrTempRegister, address.offset);
+ m_assembler.lwc1(FPRegisterID(dest + 1), addrTempRegister, address.offset + 4);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ lwc1 dest, (address.offset & 0xffff)(at)
+ lwc1 dest+1, (address.offset & 0xffff + 4)(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ m_assembler.lwc1(dest, addrTempRegister, address.offset);
+ m_assembler.lwc1(FPRegisterID(dest + 1), addrTempRegister, address.offset + 4);
+ }
+#else
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ ldc1 dest, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.ldc1(dest, addrTempRegister, address.offset);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ ldc1 dest, (address.offset & 0xffff)(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ m_assembler.ldc1(dest, addrTempRegister, address.offset);
+ }
+#endif
+ }
+
+ void loadDouble(TrustedImmPtr address, FPRegisterID dest)
+ {
+#if WTF_MIPS_ISA(1)
+ /*
+ li addrTemp, address
+ lwc1 dest, 0(addrTemp)
+ lwc1 dest+1, 4(addrTemp)
+ */
+ move(address, addrTempRegister);
+ m_assembler.lwc1(dest, addrTempRegister, 0);
+ m_assembler.lwc1(FPRegisterID(dest + 1), addrTempRegister, 4);
+#else
+ /*
+ li addrTemp, address
+ ldc1 dest, 0(addrTemp)
+ */
+ move(address, addrTempRegister);
+ m_assembler.ldc1(dest, addrTempRegister, 0);
+#endif
+ }
+
+ void storeFloat(FPRegisterID src, BaseIndex address)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ swc1 src, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.swc1(src, addrTempRegister, address.offset);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ swc1 src, (address.offset & 0xffff)(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ m_assembler.swc1(src, addrTempRegister, address.offset);
+ }
+ }
+
+ void storeDouble(FPRegisterID src, ImplicitAddress address)
+ {
+#if WTF_MIPS_ISA(1)
+ /*
+ li addrTemp, address.offset
+ addu addrTemp, addrTemp, base
+ swc1 dest, 0(addrTemp)
+ swc1 dest+1, 4(addrTemp)
+ */
+ move(TrustedImm32(address.offset), addrTempRegister);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.swc1(src, addrTempRegister, 0);
+ m_assembler.swc1(FPRegisterID(src + 1), addrTempRegister, 4);
+#else
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth)
+ m_assembler.sdc1(src, address.base, address.offset);
+ else {
+ /*
+ lui addrTemp, (offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, base
+ sdc1 src, (offset & 0xffff)(addrTemp)
+ */
+ m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.sdc1(src, addrTempRegister, address.offset);
+ }
+#endif
+ }
+
+ void storeDouble(FPRegisterID src, BaseIndex address)
+ {
+#if WTF_MIPS_ISA(1)
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ swc1 src, address.offset(addrTemp)
+ swc1 src+1, (address.offset + 4)(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.swc1(src, addrTempRegister, address.offset);
+ m_assembler.swc1(FPRegisterID(src + 1), addrTempRegister, address.offset + 4);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ swc1 src, (address.offset & 0xffff)(at)
+ swc1 src+1, (address.offset & 0xffff + 4)(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ m_assembler.swc1(src, addrTempRegister, address.offset);
+ m_assembler.swc1(FPRegisterID(src + 1), addrTempRegister, address.offset + 4);
+ }
+#else
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ sdc1 src, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.sdc1(src, addrTempRegister, address.offset);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ sdc1 src, (address.offset & 0xffff)(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ m_assembler.sdc1(src, addrTempRegister, address.offset);
+ }
+#endif
+ }
+
+ void storeDouble(FPRegisterID src, TrustedImmPtr address)
+ {
+#if WTF_MIPS_ISA(1)
+ move(address, addrTempRegister);
+ m_assembler.swc1(src, addrTempRegister, 0);
+ m_assembler.swc1(FPRegisterID(src + 1), addrTempRegister, 4);
+#else
+ move(address, addrTempRegister);
+ m_assembler.sdc1(src, addrTempRegister, 0);
+#endif
+ }
+
+ void moveDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ if (src != dest || m_fixedWidth)
+ m_assembler.movd(dest, src);
+ }
+
+ void swapDouble(FPRegisterID fr1, FPRegisterID fr2)
+ {
+ moveDouble(fr1, fpTempRegister);
+ moveDouble(fr2, fr1);
+ moveDouble(fpTempRegister, fr2);
+ }
+
+ void addDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.addd(dest, dest, src);
+ }
+
+ void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ m_assembler.addd(dest, op1, op2);
+ }
+
+ void addDouble(Address src, FPRegisterID dest)
+ {
+ loadDouble(src, fpTempRegister);
+ m_assembler.addd(dest, dest, fpTempRegister);
+ }
+
+ void addDouble(AbsoluteAddress address, FPRegisterID dest)
+ {
+ loadDouble(TrustedImmPtr(address.m_ptr), fpTempRegister);
+ m_assembler.addd(dest, dest, fpTempRegister);
+ }
+
+ void subDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.subd(dest, dest, src);
+ }
+
+ void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ m_assembler.subd(dest, op1, op2);
+ }
+
+ void subDouble(Address src, FPRegisterID dest)
+ {
+ loadDouble(src, fpTempRegister);
+ m_assembler.subd(dest, dest, fpTempRegister);
+ }
+
+ void mulDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.muld(dest, dest, src);
+ }
+
+ void mulDouble(Address src, FPRegisterID dest)
+ {
+ loadDouble(src, fpTempRegister);
+ m_assembler.muld(dest, dest, fpTempRegister);
+ }
+
+ void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ m_assembler.muld(dest, op1, op2);
+ }
+
+ void divDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.divd(dest, dest, src);
+ }
+
+ void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ m_assembler.divd(dest, op1, op2);
+ }
+
+ void divDouble(Address src, FPRegisterID dest)
+ {
+ loadDouble(src, fpTempRegister);
+ m_assembler.divd(dest, dest, fpTempRegister);
+ }
+
+ void negateDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.negd(dest, src);
+ }
+
+ void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
+ {
+ m_assembler.mtc1(src, fpTempRegister);
+ m_assembler.cvtdw(dest, fpTempRegister);
+ }
+
+ void convertInt32ToDouble(Address src, FPRegisterID dest)
+ {
+ load32(src, dataTempRegister);
+ m_assembler.mtc1(dataTempRegister, fpTempRegister);
+ m_assembler.cvtdw(dest, fpTempRegister);
+ }
+
+ void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest)
+ {
+ load32(src.m_ptr, dataTempRegister);
+ m_assembler.mtc1(dataTempRegister, fpTempRegister);
+ m_assembler.cvtdw(dest, fpTempRegister);
+ }
+
+ void convertFloatToDouble(FPRegisterID src, FPRegisterID dst)
+ {
+ m_assembler.cvtds(dst, src);
+ }
+
+ void convertDoubleToFloat(FPRegisterID src, FPRegisterID dst)
+ {
+ m_assembler.cvtsd(dst, src);
+ }
+
+ void insertRelaxationWords()
+ {
+ /* We need four words for relaxation. */
+ m_assembler.beq(MIPSRegisters::zero, MIPSRegisters::zero, 3); // Jump over nops;
+ m_assembler.nop();
+ m_assembler.nop();
+ m_assembler.nop();
+ }
+
+ Jump branchTrue()
+ {
+ m_assembler.appendJump();
+ m_assembler.bc1t();
+ m_assembler.nop();
+ insertRelaxationWords();
+ return Jump(m_assembler.label());
+ }
+
+ Jump branchFalse()
+ {
+ m_assembler.appendJump();
+ m_assembler.bc1f();
+ m_assembler.nop();
+ insertRelaxationWords();
+ return Jump(m_assembler.label());
+ }
+
+ Jump branchEqual(RegisterID rs, RegisterID rt)
+ {
+ m_assembler.nop();
+ m_assembler.nop();
+ m_assembler.appendJump();
+ m_assembler.beq(rs, rt, 0);
+ m_assembler.nop();
+ insertRelaxationWords();
+ return Jump(m_assembler.label());
+ }
+
+ Jump branchNotEqual(RegisterID rs, RegisterID rt)
+ {
+ m_assembler.nop();
+ m_assembler.nop();
+ m_assembler.appendJump();
+ m_assembler.bne(rs, rt, 0);
+ m_assembler.nop();
+ insertRelaxationWords();
+ return Jump(m_assembler.label());
+ }
+
+ Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
+ {
+ if (cond == DoubleEqual) {
+ m_assembler.ceqd(left, right);
+ return branchTrue();
+ }
+ if (cond == DoubleNotEqual) {
+ m_assembler.cueqd(left, right);
+ return branchFalse(); // false
+ }
+ if (cond == DoubleGreaterThan) {
+ m_assembler.cngtd(left, right);
+ return branchFalse(); // false
+ }
+ if (cond == DoubleGreaterThanOrEqual) {
+ m_assembler.cnged(left, right);
+ return branchFalse(); // false
+ }
+ if (cond == DoubleLessThan) {
+ m_assembler.cltd(left, right);
+ return branchTrue();
+ }
+ if (cond == DoubleLessThanOrEqual) {
+ m_assembler.cled(left, right);
+ return branchTrue();
+ }
+ if (cond == DoubleEqualOrUnordered) {
+ m_assembler.cueqd(left, right);
+ return branchTrue();
+ }
+ if (cond == DoubleNotEqualOrUnordered) {
+ m_assembler.ceqd(left, right);
+ return branchFalse(); // false
+ }
+ if (cond == DoubleGreaterThanOrUnordered) {
+ m_assembler.coled(left, right);
+ return branchFalse(); // false
+ }
+ if (cond == DoubleGreaterThanOrEqualOrUnordered) {
+ m_assembler.coltd(left, right);
+ return branchFalse(); // false
+ }
+ if (cond == DoubleLessThanOrUnordered) {
+ m_assembler.cultd(left, right);
+ return branchTrue();
+ }
+ if (cond == DoubleLessThanOrEqualOrUnordered) {
+ m_assembler.culed(left, right);
+ return branchTrue();
+ }
+ ASSERT(0);
+
+ return Jump();
+ }
+
+ // Truncates 'src' to an integer, and places the resulting 'dest'.
+ // If the result is not representable as a 32 bit value, branch.
+ // May also branch for some values that are representable in 32 bits
+ // (specifically, in this case, INT_MAX 0x7fffffff).
+ enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
+ Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
+ {
+ m_assembler.truncwd(fpTempRegister, src);
+ m_assembler.mfc1(dest, fpTempRegister);
+ return branch32(branchType == BranchIfTruncateFailed ? Equal : NotEqual, dest, TrustedImm32(0x7fffffff));
+ }
+
+ Jump branchTruncateDoubleToUint32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
+ {
+ m_assembler.truncwd(fpTempRegister, src);
+ m_assembler.mfc1(dest, fpTempRegister);
+ return branch32(branchType == BranchIfTruncateFailed ? Equal : NotEqual, dest, TrustedImm32(0));
+ }
+
+ // Result is undefined if the value is outside of the integer range.
+ void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
+ {
+ m_assembler.truncwd(fpTempRegister, src);
+ m_assembler.mfc1(dest, fpTempRegister);
+ }
+
+ // Result is undefined if src > 2^31
+ void truncateDoubleToUint32(FPRegisterID src, RegisterID dest)
+ {
+ m_assembler.truncwd(fpTempRegister, src);
+ m_assembler.mfc1(dest, fpTempRegister);
+ }
+
+ // Convert 'src' to an integer, and places the resulting 'dest'.
+ // If the result is not representable as a 32 bit value, branch.
+ // May also branch for some values that are representable in 32 bits
+ // (specifically, in this case, 0).
+ void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp, bool negZeroCheck = true)
+ {
+ m_assembler.cvtwd(fpTempRegister, src);
+ m_assembler.mfc1(dest, fpTempRegister);
+
+ // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
+ if (negZeroCheck)
+ failureCases.append(branch32(Equal, dest, MIPSRegisters::zero));
+
+ // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
+ convertInt32ToDouble(dest, fpTemp);
+ failureCases.append(branchDouble(DoubleNotEqualOrUnordered, fpTemp, src));
+ }
+
+ Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch)
+ {
+ m_assembler.vmov(scratch, MIPSRegisters::zero, MIPSRegisters::zero);
+ return branchDouble(DoubleNotEqual, reg, scratch);
+ }
+
+ Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID scratch)
+ {
+ m_assembler.vmov(scratch, MIPSRegisters::zero, MIPSRegisters::zero);
+ return branchDouble(DoubleEqualOrUnordered, reg, scratch);
+ }
+
+ // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
+ static RelationalCondition invert(RelationalCondition cond)
+ {
+ RelationalCondition r;
+ if (cond == Equal)
+ r = NotEqual;
+ else if (cond == NotEqual)
+ r = Equal;
+ else if (cond == Above)
+ r = BelowOrEqual;
+ else if (cond == AboveOrEqual)
+ r = Below;
+ else if (cond == Below)
+ r = AboveOrEqual;
+ else if (cond == BelowOrEqual)
+ r = Above;
+ else if (cond == GreaterThan)
+ r = LessThanOrEqual;
+ else if (cond == GreaterThanOrEqual)
+ r = LessThan;
+ else if (cond == LessThan)
+ r = GreaterThanOrEqual;
+ else if (cond == LessThanOrEqual)
+ r = GreaterThan;
+ return r;
+ }
+
+ void nop()
+ {
+ m_assembler.nop();
+ }
+
+ void memoryFence()
+ {
+ m_assembler.sync();
+ }
+
+ static FunctionPtr readCallTarget(CodeLocationCall call)
+ {
+ return FunctionPtr(reinterpret_cast<void(*)()>(MIPSAssembler::readCallTarget(call.dataLocation())));
+ }
+
+ static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
+ {
+ MIPSAssembler::replaceWithJump(instructionStart.dataLocation(), destination.dataLocation());
+ }
+
+ static ptrdiff_t maxJumpReplacementSize()
+ {
+ MIPSAssembler::maxJumpReplacementSize();
+ return 0;
+ }
+
+ static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
+
+ static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
+ {
+ return label.labelAtOffset(0);
+ }
+
+ static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID, void* initialValue)
+ {
+ MIPSAssembler::revertJumpToMove(instructionStart.dataLocation(), immTempRegister, reinterpret_cast<int>(initialValue) & 0xffff);
+ }
+
+ static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr)
+ {
+ UNREACHABLE_FOR_PLATFORM();
+ return CodeLocationLabel();
+ }
+
+ static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*)
+ {
+ UNREACHABLE_FOR_PLATFORM();
+ }
+
+
+private:
+ // If m_fixedWidth is true, we will generate a fixed number of instructions.
+ // Otherwise, we can emit any number of instructions.
+ bool m_fixedWidth;
+
+ friend class LinkBuffer;
+ friend class RepatchBuffer;
+
+ static void linkCall(void* code, Call call, FunctionPtr function)
+ {
+ MIPSAssembler::linkCall(code, call.m_label, function.value());
+ }
+
+ static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
+ {
+ MIPSAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
+ }
+
+ static void repatchCall(CodeLocationCall call, FunctionPtr destination)
+ {
+ MIPSAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
+ }
+
+};
+
+}
+
+#endif // ENABLE(ASSEMBLER) && CPU(MIPS)
+
+#endif // MacroAssemblerMIPS_h
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerSH4.h b/Source/JavaScriptCore/assembler/MacroAssemblerSH4.h
new file mode 100644
index 000000000..6857c60a4
--- /dev/null
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerSH4.h
@@ -0,0 +1,2629 @@
+/*
+ * Copyright (C) 2013 Cisco Systems, Inc. All rights reserved.
+ * Copyright (C) 2009-2011 STMicroelectronics. All rights reserved.
+ * Copyright (C) 2008, 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef MacroAssemblerSH4_h
+#define MacroAssemblerSH4_h
+
+#if ENABLE(ASSEMBLER) && CPU(SH4)
+
+#include "SH4Assembler.h"
+#include "AbstractMacroAssembler.h"
+#include <wtf/Assertions.h>
+
+namespace JSC {
+
+class MacroAssemblerSH4 : public AbstractMacroAssembler<SH4Assembler, MacroAssemblerSH4> {
+public:
+ typedef SH4Assembler::FPRegisterID FPRegisterID;
+
+ static const Scale ScalePtr = TimesFour;
+ static const FPRegisterID fscratch = SH4Registers::dr10;
+ static const RegisterID stackPointerRegister = SH4Registers::sp;
+ static const RegisterID framePointerRegister = SH4Registers::fp;
+ static const RegisterID linkRegister = SH4Registers::pr;
+ static const RegisterID scratchReg3 = SH4Registers::r13;
+
+ static const int MaximumCompactPtrAlignedAddressOffset = 60;
+
+ static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
+ {
+ return (value >= 0) && (value <= MaximumCompactPtrAlignedAddressOffset) && (!(value & 3));
+ }
+
+ enum RelationalCondition {
+ Equal = SH4Assembler::EQ,
+ NotEqual = SH4Assembler::NE,
+ Above = SH4Assembler::HI,
+ AboveOrEqual = SH4Assembler::HS,
+ Below = SH4Assembler::LI,
+ BelowOrEqual = SH4Assembler::LS,
+ GreaterThan = SH4Assembler::GT,
+ GreaterThanOrEqual = SH4Assembler::GE,
+ LessThan = SH4Assembler::LT,
+ LessThanOrEqual = SH4Assembler::LE
+ };
+
+ enum ResultCondition {
+ Overflow = SH4Assembler::OF,
+ Signed = SH4Assembler::SI,
+ PositiveOrZero = SH4Assembler::NS,
+ Zero = SH4Assembler::EQ,
+ NonZero = SH4Assembler::NE
+ };
+
+ enum DoubleCondition {
+ // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
+ DoubleEqual = SH4Assembler::EQ,
+ DoubleNotEqual = SH4Assembler::NE,
+ DoubleGreaterThan = SH4Assembler::GT,
+ DoubleGreaterThanOrEqual = SH4Assembler::GE,
+ DoubleLessThan = SH4Assembler::LT,
+ DoubleLessThanOrEqual = SH4Assembler::LE,
+ // If either operand is NaN, these conditions always evaluate to true.
+ DoubleEqualOrUnordered = SH4Assembler::EQU,
+ DoubleNotEqualOrUnordered = SH4Assembler::NEU,
+ DoubleGreaterThanOrUnordered = SH4Assembler::GTU,
+ DoubleGreaterThanOrEqualOrUnordered = SH4Assembler::GEU,
+ DoubleLessThanOrUnordered = SH4Assembler::LTU,
+ DoubleLessThanOrEqualOrUnordered = SH4Assembler::LEU,
+ };
+
+ RegisterID claimScratch()
+ {
+ return m_assembler.claimScratch();
+ }
+
+ void releaseScratch(RegisterID reg)
+ {
+ m_assembler.releaseScratch(reg);
+ }
+
+ static RelationalCondition invert(RelationalCondition cond)
+ {
+ switch (cond) {
+ case Equal:
+ return NotEqual;
+ case NotEqual:
+ return Equal;
+ case Above:
+ return BelowOrEqual;
+ case AboveOrEqual:
+ return Below;
+ case Below:
+ return AboveOrEqual;
+ case BelowOrEqual:
+ return Above;
+ case GreaterThan:
+ return LessThanOrEqual;
+ case GreaterThanOrEqual:
+ return LessThan;
+ case LessThan:
+ return GreaterThanOrEqual;
+ case LessThanOrEqual:
+ return GreaterThan;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ }
+
+ // Integer arithmetic operations
+
+ void add32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.addlRegReg(src, dest);
+ }
+
+ void add32(RegisterID src1, RegisterID src2, RegisterID dest)
+ {
+ if (src1 == dest)
+ add32(src2, dest);
+ else {
+ move(src2, dest);
+ add32(src1, dest);
+ }
+ }
+
+ void add32(TrustedImm32 imm, RegisterID dest)
+ {
+ if (!imm.m_value)
+ return;
+
+ if (m_assembler.isImmediate(imm.m_value)) {
+ m_assembler.addlImm8r(imm.m_value, dest);
+ return;
+ }
+
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant(imm.m_value, scr);
+ m_assembler.addlRegReg(scr, dest);
+ releaseScratch(scr);
+ }
+
+ void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ move(src, dest);
+ add32(imm, dest);
+ }
+
+ void add32(TrustedImm32 imm, Address address)
+ {
+ if (!imm.m_value)
+ return;
+
+ RegisterID scr = claimScratch();
+ load32(address, scr);
+ add32(imm, scr);
+ store32(scr, address);
+ releaseScratch(scr);
+ }
+
+ void add32(Address src, RegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+ load32(src, scr);
+ m_assembler.addlRegReg(scr, dest);
+ releaseScratch(scr);
+ }
+
+ void add32(AbsoluteAddress src, RegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+ load32(src.m_ptr, scr);
+ m_assembler.addlRegReg(scr, dest);
+ releaseScratch(scr);
+ }
+
+ void and32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.andlRegReg(src, dest);
+ }
+
+ void and32(RegisterID src1, RegisterID src2, RegisterID dest)
+ {
+ if (src1 == dest)
+ and32(src2, dest);
+ else {
+ move(src2, dest);
+ and32(src1, dest);
+ }
+ }
+
+ void and32(Address src, RegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+ load32(src, scr);
+ and32(scr, dest);
+ releaseScratch(scr);
+ }
+
+ void and32(TrustedImm32 imm, RegisterID dest)
+ {
+ if (!imm.m_value) {
+ m_assembler.movImm8(0, dest);
+ return;
+ }
+
+ if ((imm.m_value <= 255) && (imm.m_value >= 0) && (dest == SH4Registers::r0)) {
+ m_assembler.andlImm8r(imm.m_value, dest);
+ return;
+ }
+
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant(imm.m_value, scr);
+ m_assembler.andlRegReg(scr, dest);
+ releaseScratch(scr);
+ }
+
+ void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (src != dest) {
+ move(imm, dest);
+ and32(src, dest);
+ return;
+ }
+
+ and32(imm, dest);
+ }
+
+ void lshift32(RegisterID shiftamount, RegisterID dest)
+ {
+ RegisterID shiftTmp = claimScratch();
+ m_assembler.loadConstant(0x1f, shiftTmp);
+ m_assembler.andlRegReg(shiftamount, shiftTmp);
+ m_assembler.shldRegReg(dest, shiftTmp);
+ releaseScratch(shiftTmp);
+ }
+
+ void lshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+ {
+ move(src, dest);
+ lshift32(shiftAmount, dest);
+ }
+
+ void lshift32(TrustedImm32 imm, RegisterID dest)
+ {
+ int immMasked = imm.m_value & 0x1f;
+ if (!immMasked)
+ return;
+
+ if ((immMasked == 1) || (immMasked == 2) || (immMasked == 8) || (immMasked == 16)) {
+ m_assembler.shllImm8r(immMasked, dest);
+ return;
+ }
+
+ RegisterID shiftTmp = claimScratch();
+ m_assembler.loadConstant(immMasked, shiftTmp);
+ m_assembler.shldRegReg(dest, shiftTmp);
+ releaseScratch(shiftTmp);
+ }
+
+ void lshift32(RegisterID src, TrustedImm32 shiftamount, RegisterID dest)
+ {
+ move(src, dest);
+ lshift32(shiftamount, dest);
+ }
+
+ void mul32(RegisterID src, RegisterID dest)
+ {
+ mul32(src, dest, dest);
+ }
+
+ void mul32(RegisterID src1, RegisterID src2, RegisterID dest)
+ {
+ m_assembler.imullRegReg(src1, src2);
+ m_assembler.stsmacl(dest);
+ }
+
+ void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (src == dest) {
+ RegisterID immval = claimScratch();
+ move(imm, immval);
+ mul32(immval, dest);
+ releaseScratch(immval);
+ } else {
+ move(imm, dest);
+ mul32(src, dest);
+ }
+ }
+
+ void or32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.orlRegReg(src, dest);
+ }
+
+ void or32(TrustedImm32 imm, RegisterID dest)
+ {
+ if ((imm.m_value <= 255) && (imm.m_value >= 0) && (dest == SH4Registers::r0)) {
+ m_assembler.orlImm8r(imm.m_value, dest);
+ return;
+ }
+
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant(imm.m_value, scr);
+ m_assembler.orlRegReg(scr, dest);
+ releaseScratch(scr);
+ }
+
+ void or32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ if (op1 == op2)
+ move(op1, dest);
+ else if (op1 == dest)
+ or32(op2, dest);
+ else {
+ move(op2, dest);
+ or32(op1, dest);
+ }
+ }
+
+ void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (src != dest) {
+ move(imm, dest);
+ or32(src, dest);
+ return;
+ }
+
+ or32(imm, dest);
+ }
+
+ void or32(RegisterID src, AbsoluteAddress address)
+ {
+ RegisterID destptr = claimScratch();
+ move(TrustedImmPtr(address.m_ptr), destptr);
+ RegisterID destval = claimScratch();
+ m_assembler.movlMemReg(destptr, destval);
+ m_assembler.orlRegReg(src, destval);
+ m_assembler.movlRegMem(destval, destptr);
+ releaseScratch(destval);
+ releaseScratch(destptr);
+ }
+
+ void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (src != dest) {
+ move(imm, dest);
+ xor32(src, dest);
+ return;
+ }
+
+ xor32(imm, dest);
+ }
+
+ void rshift32(RegisterID shiftamount, RegisterID dest)
+ {
+ RegisterID shiftTmp = claimScratch();
+ m_assembler.loadConstant(0x1f, shiftTmp);
+ m_assembler.andlRegReg(shiftamount, shiftTmp);
+ m_assembler.neg(shiftTmp, shiftTmp);
+ m_assembler.shadRegReg(dest, shiftTmp);
+ releaseScratch(shiftTmp);
+ }
+
+ void rshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+ {
+ move(src, dest);
+ rshift32(shiftAmount, dest);
+ }
+
+ void rshift32(TrustedImm32 imm, RegisterID dest)
+ {
+ int immMasked = imm.m_value & 0x1f;
+ if (!immMasked)
+ return;
+
+ if (immMasked == 1) {
+ m_assembler.sharImm8r(immMasked, dest);
+ return;
+ }
+
+ RegisterID shiftTmp = claimScratch();
+ m_assembler.loadConstant(-immMasked, shiftTmp);
+ m_assembler.shadRegReg(dest, shiftTmp);
+ releaseScratch(shiftTmp);
+ }
+
+ void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ move(src, dest);
+ rshift32(imm, dest);
+ }
+
+ void sub32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.sublRegReg(src, dest);
+ }
+
+ void sub32(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ if (!imm.m_value)
+ return;
+
+ RegisterID result = claimScratch();
+ RegisterID scratchReg = claimScratch();
+
+ move(TrustedImmPtr(address.m_ptr), scratchReg);
+ m_assembler.movlMemReg(scratchReg, result);
+
+ if (m_assembler.isImmediate(-imm.m_value))
+ m_assembler.addlImm8r(-imm.m_value, result);
+ else {
+ m_assembler.loadConstant(imm.m_value, scratchReg3);
+ m_assembler.sublRegReg(scratchReg3, result);
+ }
+
+ store32(result, scratchReg);
+ releaseScratch(result);
+ releaseScratch(scratchReg);
+ }
+
+ void sub32(TrustedImm32 imm, Address address)
+ {
+ add32(TrustedImm32(-imm.m_value), address);
+ }
+
+ void add32(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ if (!imm.m_value)
+ return;
+
+ RegisterID result = claimScratch();
+ RegisterID scratchReg = claimScratch();
+
+ move(TrustedImmPtr(address.m_ptr), scratchReg);
+ m_assembler.movlMemReg(scratchReg, result);
+
+ if (m_assembler.isImmediate(imm.m_value))
+ m_assembler.addlImm8r(imm.m_value, result);
+ else {
+ m_assembler.loadConstant(imm.m_value, scratchReg3);
+ m_assembler.addlRegReg(scratchReg3, result);
+ }
+
+ store32(result, scratchReg);
+ releaseScratch(result);
+ releaseScratch(scratchReg);
+ }
+
+ void add64(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ RegisterID scr1 = claimScratch();
+ RegisterID scr2 = claimScratch();
+
+ // Add 32-bit LSB first.
+ move(TrustedImmPtr(address.m_ptr), scratchReg3);
+ m_assembler.movlMemReg(scratchReg3, scr1); // scr1 = 32-bit LSB of int64 @ address
+ m_assembler.loadConstant(imm.m_value, scr2);
+ m_assembler.clrt();
+ m_assembler.addclRegReg(scr1, scr2);
+ m_assembler.movlRegMem(scr2, scratchReg3); // Update address with 32-bit LSB result.
+
+ // Then add 32-bit MSB.
+ m_assembler.addlImm8r(4, scratchReg3);
+ m_assembler.movlMemReg(scratchReg3, scr1); // scr1 = 32-bit MSB of int64 @ address
+ m_assembler.movt(scr2);
+ if (imm.m_value < 0)
+ m_assembler.addlImm8r(-1, scr2); // Sign extend imm value if needed.
+ m_assembler.addvlRegReg(scr2, scr1);
+ m_assembler.movlRegMem(scr1, scratchReg3); // Update (address + 4) with 32-bit MSB result.
+
+ releaseScratch(scr2);
+ releaseScratch(scr1);
+ }
+
+ void sub32(TrustedImm32 imm, RegisterID dest)
+ {
+ if (!imm.m_value)
+ return;
+
+ if (m_assembler.isImmediate(-imm.m_value)) {
+ m_assembler.addlImm8r(-imm.m_value, dest);
+ return;
+ }
+
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant(imm.m_value, scr);
+ m_assembler.sublRegReg(scr, dest);
+ releaseScratch(scr);
+ }
+
+ void sub32(Address src, RegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+ load32(src, scr);
+ m_assembler.sublRegReg(scr, dest);
+ releaseScratch(scr);
+ }
+
+ void xor32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.xorlRegReg(src, dest);
+ }
+
+ void xor32(RegisterID src1, RegisterID src2, RegisterID dest)
+ {
+ if (src1 == dest)
+ xor32(src2, dest);
+ else {
+ move(src2, dest);
+ xor32(src1, dest);
+ }
+ }
+
+ void xor32(TrustedImm32 imm, RegisterID srcDest)
+ {
+ if (imm.m_value == -1) {
+ m_assembler.notlReg(srcDest, srcDest);
+ return;
+ }
+
+ if ((srcDest != SH4Registers::r0) || (imm.m_value > 255) || (imm.m_value < 0)) {
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant(imm.m_value, scr);
+ m_assembler.xorlRegReg(scr, srcDest);
+ releaseScratch(scr);
+ return;
+ }
+
+ m_assembler.xorlImm8r(imm.m_value, srcDest);
+ }
+
+ void compare32(int imm, RegisterID dst, RelationalCondition cond)
+ {
+ if (((cond == Equal) || (cond == NotEqual)) && (dst == SH4Registers::r0) && m_assembler.isImmediate(imm)) {
+ m_assembler.cmpEqImmR0(imm, dst);
+ return;
+ }
+
+ if (((cond == Equal) || (cond == NotEqual)) && !imm) {
+ m_assembler.testlRegReg(dst, dst);
+ return;
+ }
+
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant(imm, scr);
+ m_assembler.cmplRegReg(scr, dst, SH4Condition(cond));
+ releaseScratch(scr);
+ }
+
+ void compare32(int offset, RegisterID base, RegisterID left, RelationalCondition cond)
+ {
+ RegisterID scr = claimScratch();
+ if (!offset) {
+ m_assembler.movlMemReg(base, scr);
+ m_assembler.cmplRegReg(scr, left, SH4Condition(cond));
+ releaseScratch(scr);
+ return;
+ }
+
+ if ((offset < 0) || (offset >= 64)) {
+ m_assembler.loadConstant(offset, scr);
+ m_assembler.addlRegReg(base, scr);
+ m_assembler.movlMemReg(scr, scr);
+ m_assembler.cmplRegReg(scr, left, SH4Condition(cond));
+ releaseScratch(scr);
+ return;
+ }
+
+ m_assembler.movlMemReg(offset >> 2, base, scr);
+ m_assembler.cmplRegReg(scr, left, SH4Condition(cond));
+ releaseScratch(scr);
+ }
+
+ void testImm(int imm, int offset, RegisterID base)
+ {
+ RegisterID scr = claimScratch();
+ load32(base, offset, scr);
+
+ RegisterID scr1 = claimScratch();
+ move(TrustedImm32(imm), scr1);
+
+ m_assembler.testlRegReg(scr, scr1);
+ releaseScratch(scr);
+ releaseScratch(scr1);
+ }
+
+ void testlImm(int imm, RegisterID dst)
+ {
+ if ((dst == SH4Registers::r0) && (imm <= 255) && (imm >= 0)) {
+ m_assembler.testlImm8r(imm, dst);
+ return;
+ }
+
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant(imm, scr);
+ m_assembler.testlRegReg(scr, dst);
+ releaseScratch(scr);
+ }
+
+ void compare32(RegisterID right, int offset, RegisterID base, RelationalCondition cond)
+ {
+ if (!offset) {
+ RegisterID scr = claimScratch();
+ m_assembler.movlMemReg(base, scr);
+ m_assembler.cmplRegReg(right, scr, SH4Condition(cond));
+ releaseScratch(scr);
+ return;
+ }
+
+ if ((offset < 0) || (offset >= 64)) {
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant(offset, scr);
+ m_assembler.addlRegReg(base, scr);
+ m_assembler.movlMemReg(scr, scr);
+ m_assembler.cmplRegReg(right, scr, SH4Condition(cond));
+ releaseScratch(scr);
+ return;
+ }
+
+ RegisterID scr = claimScratch();
+ m_assembler.movlMemReg(offset >> 2, base, scr);
+ m_assembler.cmplRegReg(right, scr, SH4Condition(cond));
+ releaseScratch(scr);
+ }
+
+ void compare32(int imm, int offset, RegisterID base, RelationalCondition cond)
+ {
+ RegisterID scr = claimScratch();
+ load32(base, offset, scr);
+
+ RegisterID scr1 = claimScratch();
+ move(TrustedImm32(imm), scr1);
+
+ m_assembler.cmplRegReg(scr1, scr, SH4Condition(cond));
+
+ releaseScratch(scr1);
+ releaseScratch(scr);
+ }
+
+ // Memory access operation
+
+ ALWAYS_INLINE void loadEffectiveAddress(BaseIndex address, RegisterID dest, int extraoffset = 0)
+ {
+ if (dest == address.base) {
+ RegisterID scaledIndex = claimScratch();
+ move(address.index, scaledIndex);
+ lshift32(TrustedImm32(address.scale), scaledIndex);
+ add32(scaledIndex, dest);
+ releaseScratch(scaledIndex);
+ } else {
+ move(address.index, dest);
+ lshift32(TrustedImm32(address.scale), dest);
+ add32(address.base, dest);
+ }
+
+ add32(TrustedImm32(address.offset + extraoffset), dest);
+ }
+
+ void load32(ImplicitAddress address, RegisterID dest)
+ {
+ load32(address.base, address.offset, dest);
+ }
+
+ void load8(ImplicitAddress address, RegisterID dest)
+ {
+ load8(address.base, address.offset, dest);
+ }
+
+ void load8(BaseIndex address, RegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+ move(address.index, scr);
+ lshift32(TrustedImm32(address.scale), scr);
+ add32(address.base, scr);
+ load8(scr, address.offset, dest);
+ releaseScratch(scr);
+ }
+
+ void load8(AbsoluteAddress address, RegisterID dest)
+ {
+ move(TrustedImmPtr(address.m_ptr), dest);
+ m_assembler.movbMemReg(dest, dest);
+ m_assembler.extub(dest, dest);
+ }
+
+ void load8(const void* address, RegisterID dest)
+ {
+ load8(AbsoluteAddress(address), dest);
+ }
+
+ void load8PostInc(RegisterID base, RegisterID dest)
+ {
+ m_assembler.movbMemRegIn(base, dest);
+ m_assembler.extub(dest, dest);
+ }
+
+ void load8SignedExtendTo32(BaseIndex address, RegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+ move(address.index, scr);
+ lshift32(TrustedImm32(address.scale), scr);
+ add32(address.base, scr);
+ load8SignedExtendTo32(scr, address.offset, dest);
+ releaseScratch(scr);
+ }
+
+ void load32(BaseIndex address, RegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+ move(address.index, scr);
+ lshift32(TrustedImm32(address.scale), scr);
+ add32(address.base, scr);
+ load32(scr, address.offset, dest);
+ releaseScratch(scr);
+ }
+
+ void load32(const void* address, RegisterID dest)
+ {
+ move(TrustedImmPtr(address), dest);
+ m_assembler.movlMemReg(dest, dest);
+ }
+
+ void load32(RegisterID base, int offset, RegisterID dest)
+ {
+ if (!offset) {
+ m_assembler.movlMemReg(base, dest);
+ return;
+ }
+
+ if ((offset >= 0) && (offset < 64)) {
+ m_assembler.movlMemReg(offset >> 2, base, dest);
+ return;
+ }
+
+ RegisterID scr = (dest == base) ? claimScratch() : dest;
+
+ m_assembler.loadConstant(offset, scr);
+ if (base == SH4Registers::r0)
+ m_assembler.movlR0mr(scr, dest);
+ else {
+ m_assembler.addlRegReg(base, scr);
+ m_assembler.movlMemReg(scr, dest);
+ }
+
+ if (dest == base)
+ releaseScratch(scr);
+ }
+
+ void load8SignedExtendTo32(RegisterID base, int offset, RegisterID dest)
+ {
+ if (!offset) {
+ m_assembler.movbMemReg(base, dest);
+ return;
+ }
+
+ if ((offset > 0) && (offset <= 15) && (dest == SH4Registers::r0)) {
+ m_assembler.movbMemReg(offset, base, dest);
+ return;
+ }
+
+ RegisterID scr = (dest == base) ? claimScratch() : dest;
+
+ m_assembler.loadConstant(offset, scr);
+ if (base == SH4Registers::r0)
+ m_assembler.movbR0mr(scr, dest);
+ else {
+ m_assembler.addlRegReg(base, scr);
+ m_assembler.movbMemReg(scr, dest);
+ }
+
+ if (dest == base)
+ releaseScratch(scr);
+ }
+
+ void load8(RegisterID base, int offset, RegisterID dest)
+ {
+ load8SignedExtendTo32(base, offset, dest);
+ m_assembler.extub(dest, dest);
+ }
+
+ void load32(RegisterID src, RegisterID dst)
+ {
+ m_assembler.movlMemReg(src, dst);
+ }
+
+ void load16(ImplicitAddress address, RegisterID dest)
+ {
+ if (!address.offset) {
+ m_assembler.movwMemReg(address.base, dest);
+ m_assembler.extuw(dest, dest);
+ return;
+ }
+
+ if ((address.offset > 0) && (address.offset <= 30) && (dest == SH4Registers::r0)) {
+ m_assembler.movwMemReg(address.offset >> 1, address.base, dest);
+ m_assembler.extuw(dest, dest);
+ return;
+ }
+
+ RegisterID scr = (dest == address.base) ? claimScratch() : dest;
+
+ m_assembler.loadConstant(address.offset, scr);
+ if (address.base == SH4Registers::r0)
+ m_assembler.movwR0mr(scr, dest);
+ else {
+ m_assembler.addlRegReg(address.base, scr);
+ m_assembler.movwMemReg(scr, dest);
+ }
+ m_assembler.extuw(dest, dest);
+
+ if (dest == address.base)
+ releaseScratch(scr);
+ }
+
+ void load16Unaligned(BaseIndex address, RegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+
+ loadEffectiveAddress(address, scr);
+
+ RegisterID scr1 = claimScratch();
+ load8PostInc(scr, scr1);
+ load8(scr, dest);
+ m_assembler.shllImm8r(8, dest);
+ or32(scr1, dest);
+
+ releaseScratch(scr);
+ releaseScratch(scr1);
+ }
+
+ void load16(RegisterID src, RegisterID dest)
+ {
+ m_assembler.movwMemReg(src, dest);
+ m_assembler.extuw(dest, dest);
+ }
+
+ void load16SignedExtendTo32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.movwMemReg(src, dest);
+ }
+
+ void load16(BaseIndex address, RegisterID dest)
+ {
+ load16SignedExtendTo32(address, dest);
+ m_assembler.extuw(dest, dest);
+ }
+
+ void load16PostInc(RegisterID base, RegisterID dest)
+ {
+ m_assembler.movwMemRegIn(base, dest);
+ m_assembler.extuw(dest, dest);
+ }
+
+ void load16SignedExtendTo32(BaseIndex address, RegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+
+ move(address.index, scr);
+ lshift32(TrustedImm32(address.scale), scr);
+ add32(TrustedImm32(address.offset), scr);
+
+ if (address.base == SH4Registers::r0)
+ m_assembler.movwR0mr(scr, dest);
+ else {
+ add32(address.base, scr);
+ load16SignedExtendTo32(scr, dest);
+ }
+
+ releaseScratch(scr);
+ }
+
+ void store8(RegisterID src, BaseIndex address)
+ {
+ RegisterID scr = claimScratch();
+
+ move(address.index, scr);
+ lshift32(TrustedImm32(address.scale), scr);
+ add32(TrustedImm32(address.offset), scr);
+
+ if (address.base == SH4Registers::r0)
+ m_assembler.movbRegMemr0(src, scr);
+ else {
+ add32(address.base, scr);
+ m_assembler.movbRegMem(src, scr);
+ }
+
+ releaseScratch(scr);
+ }
+
+ void store8(RegisterID src, void* address)
+ {
+ RegisterID destptr = claimScratch();
+ move(TrustedImmPtr(address), destptr);
+ m_assembler.movbRegMem(src, destptr);
+ releaseScratch(destptr);
+ }
+
+ void store8(TrustedImm32 imm, void* address)
+ {
+ ASSERT((imm.m_value >= -128) && (imm.m_value <= 127));
+ RegisterID dstptr = claimScratch();
+ move(TrustedImmPtr(address), dstptr);
+ RegisterID srcval = claimScratch();
+ move(imm, srcval);
+ m_assembler.movbRegMem(srcval, dstptr);
+ releaseScratch(dstptr);
+ releaseScratch(srcval);
+ }
+
+ void store8(TrustedImm32 imm, Address address)
+ {
+ ASSERT((imm.m_value >= -128) && (imm.m_value <= 127));
+ RegisterID dstptr = claimScratch();
+ move(address.base, dstptr);
+ add32(TrustedImm32(address.offset), dstptr);
+ RegisterID srcval = claimScratch();
+ move(imm, srcval);
+ m_assembler.movbRegMem(srcval, dstptr);
+ releaseScratch(dstptr);
+ releaseScratch(srcval);
+ }
+
+ void store16(RegisterID src, BaseIndex address)
+ {
+ RegisterID scr = claimScratch();
+
+ move(address.index, scr);
+ lshift32(TrustedImm32(address.scale), scr);
+ add32(TrustedImm32(address.offset), scr);
+
+ if (address.base == SH4Registers::r0)
+ m_assembler.movwRegMemr0(src, scr);
+ else {
+ add32(address.base, scr);
+ m_assembler.movwRegMem(src, scr);
+ }
+
+ releaseScratch(scr);
+ }
+
+ void store32(RegisterID src, ImplicitAddress address)
+ {
+ if (!address.offset) {
+ m_assembler.movlRegMem(src, address.base);
+ return;
+ }
+
+ if ((address.offset >= 0) && (address.offset < 64)) {
+ m_assembler.movlRegMem(src, address.offset >> 2, address.base);
+ return;
+ }
+
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant(address.offset, scr);
+ if (address.base == SH4Registers::r0)
+ m_assembler.movlRegMemr0(src, scr);
+ else {
+ m_assembler.addlRegReg(address.base, scr);
+ m_assembler.movlRegMem(src, scr);
+ }
+ releaseScratch(scr);
+ }
+
+ void store32(RegisterID src, RegisterID dst)
+ {
+ m_assembler.movlRegMem(src, dst);
+ }
+
+ void store32(TrustedImm32 imm, ImplicitAddress address)
+ {
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant(imm.m_value, scr);
+ store32(scr, address);
+ releaseScratch(scr);
+ }
+
+ void store32(RegisterID src, BaseIndex address)
+ {
+ RegisterID scr = claimScratch();
+
+ move(address.index, scr);
+ lshift32(TrustedImm32(address.scale), scr);
+ add32(address.base, scr);
+ store32(src, Address(scr, address.offset));
+
+ releaseScratch(scr);
+ }
+
+ void store32(TrustedImm32 imm, void* address)
+ {
+ RegisterID scr = claimScratch();
+ RegisterID scr1 = claimScratch();
+ m_assembler.loadConstant(imm.m_value, scr);
+ move(TrustedImmPtr(address), scr1);
+ m_assembler.movlRegMem(scr, scr1);
+ releaseScratch(scr);
+ releaseScratch(scr1);
+ }
+
+ void store32(RegisterID src, void* address)
+ {
+ RegisterID scr = claimScratch();
+ move(TrustedImmPtr(address), scr);
+ m_assembler.movlRegMem(src, scr);
+ releaseScratch(scr);
+ }
+
+ void store32(TrustedImm32 imm, BaseIndex address)
+ {
+ RegisterID destptr = claimScratch();
+
+ loadEffectiveAddress(address, destptr);
+
+ RegisterID srcval = claimScratch();
+ move(imm, srcval);
+ m_assembler.movlRegMem(srcval, destptr);
+ releaseScratch(srcval);
+ releaseScratch(destptr);
+ }
+
+ DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+ DataLabel32 label(this);
+ m_assembler.loadConstantUnReusable(address.offset, scr);
+ m_assembler.addlRegReg(address.base, scr);
+ m_assembler.movlMemReg(scr, dest);
+ releaseScratch(scr);
+ return label;
+ }
+
+ DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
+ {
+ RegisterID scr = claimScratch();
+ DataLabel32 label(this);
+ m_assembler.loadConstantUnReusable(address.offset, scr);
+ m_assembler.addlRegReg(address.base, scr);
+ m_assembler.movlRegMem(src, scr);
+ releaseScratch(scr);
+ return label;
+ }
+
+ DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ DataLabelCompact dataLabel(this);
+ ASSERT(isCompactPtrAlignedAddressOffset(address.offset));
+ m_assembler.movlMemRegCompact(address.offset >> 2, address.base, dest);
+ return dataLabel;
+ }
+
+ ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
+ {
+ ConvertibleLoadLabel result(this);
+
+ RegisterID scr = claimScratch();
+ m_assembler.movImm8(address.offset, scr);
+ m_assembler.addlRegReg(address.base, scr);
+ m_assembler.movlMemReg(scr, dest);
+ releaseScratch(scr);
+
+ return result;
+ }
+
+ // Floating-point operations
+
+ static bool supportsFloatingPoint() { return true; }
+ static bool supportsFloatingPointTruncate() { return true; }
+ static bool supportsFloatingPointSqrt() { return true; }
+ static bool supportsFloatingPointAbs() { return true; }
+
+ void moveDoubleToInts(FPRegisterID src, RegisterID dest1, RegisterID dest2)
+ {
+ m_assembler.fldsfpul((FPRegisterID)(src + 1));
+ m_assembler.stsfpulReg(dest1);
+ m_assembler.fldsfpul(src);
+ m_assembler.stsfpulReg(dest2);
+ }
+
+ void moveIntsToDouble(RegisterID src1, RegisterID src2, FPRegisterID dest, FPRegisterID)
+ {
+ m_assembler.ldsrmfpul(src1);
+ m_assembler.fstsfpul((FPRegisterID)(dest + 1));
+ m_assembler.ldsrmfpul(src2);
+ m_assembler.fstsfpul(dest);
+ }
+
+ void moveDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ if (src != dest) {
+ m_assembler.fmovsRegReg((FPRegisterID)(src + 1), (FPRegisterID)(dest + 1));
+ m_assembler.fmovsRegReg(src, dest);
+ }
+ }
+
+ void swapDouble(FPRegisterID fr1, FPRegisterID fr2)
+ {
+ if (fr1 != fr2) {
+ m_assembler.fldsfpul((FPRegisterID)(fr1 + 1));
+ m_assembler.fmovsRegReg((FPRegisterID)(fr2 + 1), (FPRegisterID)(fr1 + 1));
+ m_assembler.fstsfpul((FPRegisterID)(fr2 + 1));
+ m_assembler.fldsfpul(fr1);
+ m_assembler.fmovsRegReg(fr2, fr1);
+ m_assembler.fstsfpul(fr2);
+ }
+ }
+
+ void loadFloat(BaseIndex address, FPRegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+
+ loadEffectiveAddress(address, scr);
+
+ m_assembler.fmovsReadrm(scr, dest);
+ releaseScratch(scr);
+ }
+
+ void loadDouble(BaseIndex address, FPRegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+
+ loadEffectiveAddress(address, scr);
+
+ m_assembler.fmovsReadrminc(scr, (FPRegisterID)(dest + 1));
+ m_assembler.fmovsReadrm(scr, dest);
+ releaseScratch(scr);
+ }
+
+ void loadDouble(ImplicitAddress address, FPRegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+
+ m_assembler.loadConstant(address.offset, scr);
+ if (address.base == SH4Registers::r0) {
+ m_assembler.fmovsReadr0r(scr, (FPRegisterID)(dest + 1));
+ m_assembler.addlImm8r(4, scr);
+ m_assembler.fmovsReadr0r(scr, dest);
+ releaseScratch(scr);
+ return;
+ }
+
+ m_assembler.addlRegReg(address.base, scr);
+ m_assembler.fmovsReadrminc(scr, (FPRegisterID)(dest + 1));
+ m_assembler.fmovsReadrm(scr, dest);
+ releaseScratch(scr);
+ }
+
+ void loadDouble(TrustedImmPtr address, FPRegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+ move(address, scr);
+ m_assembler.fmovsReadrminc(scr, (FPRegisterID)(dest + 1));
+ m_assembler.fmovsReadrm(scr, dest);
+ releaseScratch(scr);
+ }
+
+ void storeFloat(FPRegisterID src, BaseIndex address)
+ {
+ RegisterID scr = claimScratch();
+ loadEffectiveAddress(address, scr);
+ m_assembler.fmovsWriterm(src, scr);
+ releaseScratch(scr);
+ }
+
+ void storeDouble(FPRegisterID src, ImplicitAddress address)
+ {
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant(address.offset + 8, scr);
+ m_assembler.addlRegReg(address.base, scr);
+ m_assembler.fmovsWriterndec(src, scr);
+ m_assembler.fmovsWriterndec((FPRegisterID)(src + 1), scr);
+ releaseScratch(scr);
+ }
+
+ void storeDouble(FPRegisterID src, BaseIndex address)
+ {
+ RegisterID scr = claimScratch();
+
+ loadEffectiveAddress(address, scr, 8);
+
+ m_assembler.fmovsWriterndec(src, scr);
+ m_assembler.fmovsWriterndec((FPRegisterID)(src + 1), scr);
+
+ releaseScratch(scr);
+ }
+
+ void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ if (op1 == dest)
+ addDouble(op2, dest);
+ else {
+ moveDouble(op2, dest);
+ addDouble(op1, dest);
+ }
+ }
+
+ void storeDouble(FPRegisterID src, TrustedImmPtr address)
+ {
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant(reinterpret_cast<uint32_t>(const_cast<void*>(address.m_value)) + 8, scr);
+ m_assembler.fmovsWriterndec(src, scr);
+ m_assembler.fmovsWriterndec((FPRegisterID)(src + 1), scr);
+ releaseScratch(scr);
+ }
+
+ void addDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.daddRegReg(src, dest);
+ }
+
+ void addDouble(AbsoluteAddress address, FPRegisterID dest)
+ {
+ loadDouble(TrustedImmPtr(address.m_ptr), fscratch);
+ addDouble(fscratch, dest);
+ }
+
+ void addDouble(Address address, FPRegisterID dest)
+ {
+ loadDouble(address, fscratch);
+ addDouble(fscratch, dest);
+ }
+
+ void subDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.dsubRegReg(src, dest);
+ }
+
+ void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ if (op2 == dest) {
+ moveDouble(op1, fscratch);
+ subDouble(op2, fscratch);
+ moveDouble(fscratch, dest);
+ } else {
+ moveDouble(op1, dest);
+ subDouble(op2, dest);
+ }
+ }
+
+ void subDouble(Address address, FPRegisterID dest)
+ {
+ loadDouble(address, fscratch);
+ subDouble(fscratch, dest);
+ }
+
+ void mulDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.dmulRegReg(src, dest);
+ }
+
+ void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ if (op1 == dest)
+ mulDouble(op2, dest);
+ else {
+ moveDouble(op2, dest);
+ mulDouble(op1, dest);
+ }
+ }
+
+ void mulDouble(Address address, FPRegisterID dest)
+ {
+ loadDouble(address, fscratch);
+ mulDouble(fscratch, dest);
+ }
+
+ void divDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.ddivRegReg(src, dest);
+ }
+
+ void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ if (op2 == dest) {
+ moveDouble(op1, fscratch);
+ divDouble(op2, fscratch);
+ moveDouble(fscratch, dest);
+ } else {
+ moveDouble(op1, dest);
+ divDouble(op2, dest);
+ }
+ }
+
+ void negateDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ moveDouble(src, dest);
+ m_assembler.dneg(dest);
+ }
+
+ void convertFloatToDouble(FPRegisterID src, FPRegisterID dst)
+ {
+ m_assembler.fldsfpul(src);
+ m_assembler.dcnvsd(dst);
+ }
+
+ void convertDoubleToFloat(FPRegisterID src, FPRegisterID dst)
+ {
+ m_assembler.dcnvds(src);
+ m_assembler.fstsfpul(dst);
+ }
+
+ void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
+ {
+ m_assembler.ldsrmfpul(src);
+ m_assembler.floatfpulDreg(dest);
+ }
+
+ void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+ load32(src.m_ptr, scr);
+ convertInt32ToDouble(scr, dest);
+ releaseScratch(scr);
+ }
+
+ void convertInt32ToDouble(Address src, FPRegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+ load32(src, scr);
+ convertInt32ToDouble(scr, dest);
+ releaseScratch(scr);
+ }
+
+ void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+ Jump m_jump;
+ JumpList end;
+
+ loadEffectiveAddress(address, scr);
+
+ RegisterID scr1 = claimScratch();
+ if (dest != SH4Registers::r0)
+ move(SH4Registers::r0, scr1);
+
+ m_assembler.ensureSpace(m_assembler.maxInstructionSize + 58, sizeof(uint32_t));
+ move(scr, SH4Registers::r0);
+ m_assembler.testlImm8r(0x3, SH4Registers::r0);
+ m_jump = Jump(m_assembler.jne(), SH4Assembler::JumpNear);
+
+ if (dest != SH4Registers::r0)
+ move(scr1, SH4Registers::r0);
+
+ load32(scr, dest);
+ end.append(Jump(m_assembler.bra(), SH4Assembler::JumpNear));
+ m_assembler.nop();
+ m_jump.link(this);
+ m_assembler.testlImm8r(0x1, SH4Registers::r0);
+
+ if (dest != SH4Registers::r0)
+ move(scr1, SH4Registers::r0);
+
+ m_jump = Jump(m_assembler.jne(), SH4Assembler::JumpNear);
+ load16PostInc(scr, scr1);
+ load16(scr, dest);
+ m_assembler.shllImm8r(16, dest);
+ or32(scr1, dest);
+ end.append(Jump(m_assembler.bra(), SH4Assembler::JumpNear));
+ m_assembler.nop();
+ m_jump.link(this);
+ load8PostInc(scr, scr1);
+ load16PostInc(scr, dest);
+ m_assembler.shllImm8r(8, dest);
+ or32(dest, scr1);
+ load8(scr, dest);
+ m_assembler.shllImm8r(8, dest);
+ m_assembler.shllImm8r(16, dest);
+ or32(scr1, dest);
+ end.link(this);
+
+ releaseScratch(scr);
+ releaseScratch(scr1);
+ }
+
+ Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+ {
+ RegisterID scr = scratchReg3;
+ load32WithUnalignedHalfWords(left, scr);
+ if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
+ m_assembler.testlRegReg(scr, scr);
+ else
+ compare32(right.m_value, scr, cond);
+
+ if (cond == NotEqual)
+ return branchFalse();
+ return branchTrue();
+ }
+
+ Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch)
+ {
+ m_assembler.movImm8(0, scratchReg3);
+ convertInt32ToDouble(scratchReg3, scratch);
+ return branchDouble(DoubleNotEqual, reg, scratch);
+ }
+
+ Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID scratch)
+ {
+ m_assembler.movImm8(0, scratchReg3);
+ convertInt32ToDouble(scratchReg3, scratch);
+ return branchDouble(DoubleEqualOrUnordered, reg, scratch);
+ }
+
+ Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
+ {
+ if (cond == DoubleEqual) {
+ m_assembler.dcmppeq(right, left);
+ return branchTrue();
+ }
+
+ if (cond == DoubleNotEqual) {
+ JumpList end;
+ m_assembler.dcmppeq(left, left);
+ m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t));
+ end.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear));
+ m_assembler.dcmppeq(right, right);
+ end.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear));
+ m_assembler.dcmppeq(right, left);
+ Jump m_jump = branchFalse();
+ end.link(this);
+ return m_jump;
+ }
+
+ if (cond == DoubleGreaterThan) {
+ m_assembler.dcmppgt(right, left);
+ return branchTrue();
+ }
+
+ if (cond == DoubleGreaterThanOrEqual) {
+ JumpList end;
+ m_assembler.dcmppeq(left, left);
+ m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t));
+ end.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear));
+ m_assembler.dcmppeq(right, right);
+ end.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear));
+ m_assembler.dcmppgt(left, right);
+ Jump m_jump = branchFalse();
+ end.link(this);
+ return m_jump;
+ }
+
+ if (cond == DoubleLessThan) {
+ m_assembler.dcmppgt(left, right);
+ return branchTrue();
+ }
+
+ if (cond == DoubleLessThanOrEqual) {
+ JumpList end;
+ m_assembler.dcmppeq(left, left);
+ m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t));
+ end.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear));
+ m_assembler.dcmppeq(right, right);
+ end.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear));
+ m_assembler.dcmppgt(right, left);
+ Jump m_jump = branchFalse();
+ end.link(this);
+ return m_jump;
+ }
+
+ if (cond == DoubleEqualOrUnordered) {
+ JumpList takeBranch;
+ m_assembler.dcmppeq(left, left);
+ m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t));
+ takeBranch.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear));
+ m_assembler.dcmppeq(right, right);
+ takeBranch.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear));
+ m_assembler.dcmppeq(left, right);
+ m_assembler.branch(BF_OPCODE, 2);
+ takeBranch.link(this);
+ return Jump(m_assembler.extraInstrForBranch(scratchReg3));
+ }
+
+ if (cond == DoubleGreaterThanOrUnordered) {
+ JumpList takeBranch;
+ m_assembler.dcmppeq(left, left);
+ m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t));
+ takeBranch.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear));
+ m_assembler.dcmppeq(right, right);
+ takeBranch.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear));
+ m_assembler.dcmppgt(right, left);
+ m_assembler.branch(BF_OPCODE, 2);
+ takeBranch.link(this);
+ return Jump(m_assembler.extraInstrForBranch(scratchReg3));
+ }
+
+ if (cond == DoubleGreaterThanOrEqualOrUnordered) {
+ m_assembler.dcmppgt(left, right);
+ return branchFalse();
+ }
+
+ if (cond == DoubleLessThanOrUnordered) {
+ JumpList takeBranch;
+ m_assembler.dcmppeq(left, left);
+ m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t));
+ takeBranch.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear));
+ m_assembler.dcmppeq(right, right);
+ takeBranch.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear));
+ m_assembler.dcmppgt(left, right);
+ m_assembler.branch(BF_OPCODE, 2);
+ takeBranch.link(this);
+ return Jump(m_assembler.extraInstrForBranch(scratchReg3));
+ }
+
+ if (cond == DoubleLessThanOrEqualOrUnordered) {
+ m_assembler.dcmppgt(right, left);
+ return branchFalse();
+ }
+
+ ASSERT(cond == DoubleNotEqualOrUnordered);
+ m_assembler.dcmppeq(right, left);
+ return branchFalse();
+ }
+
+ Jump branchTrue()
+ {
+ m_assembler.ensureSpace(m_assembler.maxInstructionSize + 6, sizeof(uint32_t));
+ m_assembler.branch(BF_OPCODE, 2);
+ return Jump(m_assembler.extraInstrForBranch(scratchReg3));
+ }
+
+ Jump branchFalse()
+ {
+ m_assembler.ensureSpace(m_assembler.maxInstructionSize + 6, sizeof(uint32_t));
+ m_assembler.branch(BT_OPCODE, 2);
+ return Jump(m_assembler.extraInstrForBranch(scratchReg3));
+ }
+
+ Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+ {
+ RegisterID scr = claimScratch();
+ move(left.index, scr);
+ lshift32(TrustedImm32(left.scale), scr);
+ add32(left.base, scr);
+ load32(scr, left.offset, scr);
+ compare32(right.m_value, scr, cond);
+ releaseScratch(scr);
+
+ if (cond == NotEqual)
+ return branchFalse();
+ return branchTrue();
+ }
+
+ void sqrtDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ moveDouble(src, dest);
+ m_assembler.dsqrt(dest);
+ }
+
+ void absDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ moveDouble(src, dest);
+ m_assembler.dabs(dest);
+ }
+
+ Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ RegisterID addressTempRegister = claimScratch();
+ load8(address, addressTempRegister);
+ Jump jmp = branchTest32(cond, addressTempRegister, mask);
+ releaseScratch(addressTempRegister);
+ return jmp;
+ }
+
+ Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ RegisterID addressTempRegister = claimScratch();
+ load8(address, addressTempRegister);
+ Jump jmp = branchTest32(cond, addressTempRegister, mask);
+ releaseScratch(addressTempRegister);
+ return jmp;
+ }
+
+ Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ RegisterID addressTempRegister = claimScratch();
+ move(TrustedImmPtr(address.m_ptr), addressTempRegister);
+ load8(Address(addressTempRegister), addressTempRegister);
+ Jump jmp = branchTest32(cond, addressTempRegister, mask);
+ releaseScratch(addressTempRegister);
+ return jmp;
+ }
+
+ void signExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ move(src, dest);
+ }
+
+ void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ move(src, dest);
+ }
+
+ Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
+ {
+ RegisterID addressTempRegister = claimScratch();
+ load8(left, addressTempRegister);
+ Jump jmp = branch32(cond, addressTempRegister, right);
+ releaseScratch(addressTempRegister);
+ return jmp;
+ }
+
+ Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
+ {
+ RegisterID addressTempRegister = claimScratch();
+ load8(left, addressTempRegister);
+ Jump jmp = branch32(cond, addressTempRegister, right);
+ releaseScratch(addressTempRegister);
+ return jmp;
+ }
+
+ void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
+ {
+ RegisterID addressTempRegister = claimScratch();
+ load8(left, addressTempRegister);
+ compare32(cond, addressTempRegister, right, dest);
+ releaseScratch(addressTempRegister);
+ }
+
+ enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
+ Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
+ {
+ Jump result;
+ truncateDoubleToInt32(src, dest);
+ RegisterID intscr = claimScratch();
+ m_assembler.loadConstant(0x7fffffff, intscr);
+ m_assembler.cmplRegReg(dest, intscr, SH4Condition(Equal));
+ m_assembler.ensureSpace(m_assembler.maxInstructionSize + 12, sizeof(uint32_t));
+ if (branchType == BranchIfTruncateFailed) {
+ m_assembler.branch(BT_OPCODE, 2);
+ m_assembler.addlImm8r(1, intscr);
+ m_assembler.cmplRegReg(dest, intscr, SH4Condition(Equal));
+ result = branchTrue();
+ } else {
+ Jump out = Jump(m_assembler.je(), SH4Assembler::JumpNear);
+ m_assembler.addlImm8r(1, intscr);
+ m_assembler.cmplRegReg(dest, intscr, SH4Condition(Equal));
+ result = branchFalse();
+ out.link(this);
+ }
+ releaseScratch(intscr);
+ return result;
+ }
+
+ Jump branchTruncateDoubleToUint32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
+ {
+ Jump result;
+ RegisterID intscr = claimScratch();
+ m_assembler.loadConstant(0x80000000, intscr);
+ convertInt32ToDouble(intscr, fscratch);
+ addDouble(src, fscratch);
+ truncateDoubleToInt32(fscratch, dest);
+ m_assembler.cmplRegReg(dest, intscr, SH4Condition(Equal));
+ m_assembler.ensureSpace(m_assembler.maxInstructionSize + 16, sizeof(uint32_t));
+ if (branchType == BranchIfTruncateFailed) {
+ m_assembler.branch(BT_OPCODE, 4);
+ m_assembler.addlImm8r(-1, intscr);
+ m_assembler.cmplRegReg(dest, intscr, SH4Condition(Equal));
+ m_assembler.addlImm8r(1, intscr);
+ m_assembler.sublRegReg(intscr, dest);
+ result = branchTrue();
+ } else {
+ Jump out = Jump(m_assembler.je(), SH4Assembler::JumpNear);
+ m_assembler.addlImm8r(-1, intscr);
+ m_assembler.cmplRegReg(dest, intscr, SH4Condition(Equal));
+ m_assembler.addlImm8r(1, intscr);
+ m_assembler.sublRegReg(intscr, dest);
+ result = branchFalse();
+ out.link(this);
+ }
+ releaseScratch(intscr);
+ return result;
+ }
+
+ void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
+ {
+ m_assembler.ftrcdrmfpul(src);
+ m_assembler.stsfpulReg(dest);
+ }
+
+ void truncateDoubleToUint32(FPRegisterID src, RegisterID dest)
+ {
+ RegisterID intscr = claimScratch();
+ m_assembler.loadConstant(0x80000000, intscr);
+ convertInt32ToDouble(intscr, fscratch);
+ addDouble(src, fscratch);
+ m_assembler.ftrcdrmfpul(fscratch);
+ m_assembler.stsfpulReg(dest);
+ m_assembler.sublRegReg(intscr, dest);
+ releaseScratch(intscr);
+ }
+
+ // Stack manipulation operations
+
+ void pop(RegisterID dest)
+ {
+ m_assembler.popReg(dest);
+ }
+
+ void push(RegisterID src)
+ {
+ m_assembler.pushReg(src);
+ }
+
+ void push(TrustedImm32 imm)
+ {
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant(imm.m_value, scr);
+ push(scr);
+ releaseScratch(scr);
+ }
+
+ // Register move operations
+
+ void move(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.loadConstant(imm.m_value, dest);
+ }
+
+ DataLabelPtr moveWithPatch(TrustedImmPtr initialValue, RegisterID dest)
+ {
+ m_assembler.ensureSpace(m_assembler.maxInstructionSize, sizeof(uint32_t));
+ DataLabelPtr dataLabel(this);
+ m_assembler.loadConstantUnReusable(reinterpret_cast<uint32_t>(initialValue.m_value), dest);
+ return dataLabel;
+ }
+
+ DataLabel32 moveWithPatch(TrustedImm32 initialValue, RegisterID dest)
+ {
+ m_assembler.ensureSpace(m_assembler.maxInstructionSize, sizeof(uint32_t));
+ DataLabel32 dataLabel(this);
+ m_assembler.loadConstantUnReusable(static_cast<uint32_t>(initialValue.m_value), dest);
+ return dataLabel;
+ }
+
+ void move(RegisterID src, RegisterID dest)
+ {
+ if (src != dest)
+ m_assembler.movlRegReg(src, dest);
+ }
+
+ void move(TrustedImmPtr imm, RegisterID dest)
+ {
+ m_assembler.loadConstant(imm.asIntptr(), dest);
+ }
+
+ void swap(RegisterID reg1, RegisterID reg2)
+ {
+ if (reg1 != reg2) {
+ xor32(reg1, reg2);
+ xor32(reg2, reg1);
+ xor32(reg1, reg2);
+ }
+ }
+
+ void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
+ {
+ m_assembler.cmplRegReg(right, left, SH4Condition(cond));
+ if (cond != NotEqual) {
+ m_assembler.movt(dest);
+ return;
+ }
+
+ m_assembler.ensureSpace(m_assembler.maxInstructionSize + 4);
+ m_assembler.movImm8(0, dest);
+ m_assembler.branch(BT_OPCODE, 0);
+ m_assembler.movImm8(1, dest);
+ }
+
+ void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
+ {
+ if (left != dest) {
+ move(right, dest);
+ compare32(cond, left, dest, dest);
+ return;
+ }
+
+ RegisterID scr = claimScratch();
+ move(right, scr);
+ compare32(cond, left, scr, dest);
+ releaseScratch(scr);
+ }
+
+ void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+
+ load8(address, dest);
+ if (mask.m_value == -1)
+ compare32(0, dest, static_cast<RelationalCondition>(cond));
+ else
+ testlImm(mask.m_value, dest);
+ if (cond != NonZero) {
+ m_assembler.movt(dest);
+ return;
+ }
+
+ m_assembler.ensureSpace(m_assembler.maxInstructionSize + 4);
+ m_assembler.movImm8(0, dest);
+ m_assembler.branch(BT_OPCODE, 0);
+ m_assembler.movImm8(1, dest);
+ }
+
+ void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+
+ load32(address, dest);
+ if (mask.m_value == -1)
+ compare32(0, dest, static_cast<RelationalCondition>(cond));
+ else
+ testlImm(mask.m_value, dest);
+ if (cond != NonZero) {
+ m_assembler.movt(dest);
+ return;
+ }
+
+ m_assembler.ensureSpace(m_assembler.maxInstructionSize + 4);
+ m_assembler.movImm8(0, dest);
+ m_assembler.branch(BT_OPCODE, 0);
+ m_assembler.movImm8(1, dest);
+ }
+
+ void loadPtrLinkReg(ImplicitAddress address)
+ {
+ RegisterID scr = claimScratch();
+ load32(address, scr);
+ m_assembler.ldspr(scr);
+ releaseScratch(scr);
+ }
+
+ Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
+ {
+ m_assembler.cmplRegReg(right, left, SH4Condition(cond));
+ /* BT label => BF off
+ nop LDR reg
+ nop braf @reg
+ nop nop
+ */
+ if (cond == NotEqual)
+ return branchFalse();
+ return branchTrue();
+ }
+
+ Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
+ {
+ if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
+ m_assembler.testlRegReg(left, left);
+ else
+ compare32(right.m_value, left, cond);
+
+ if (cond == NotEqual)
+ return branchFalse();
+ return branchTrue();
+ }
+
+ Jump branch32(RelationalCondition cond, RegisterID left, Address right)
+ {
+ compare32(right.offset, right.base, left, cond);
+ if (cond == NotEqual)
+ return branchFalse();
+ return branchTrue();
+ }
+
+ Jump branch32(RelationalCondition cond, Address left, RegisterID right)
+ {
+ compare32(right, left.offset, left.base, cond);
+ if (cond == NotEqual)
+ return branchFalse();
+ return branchTrue();
+ }
+
+ Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
+ {
+ compare32(right.m_value, left.offset, left.base, cond);
+ if (cond == NotEqual)
+ return branchFalse();
+ return branchTrue();
+ }
+
+ Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
+ {
+ RegisterID scr = claimScratch();
+
+ load32(left.m_ptr, scr);
+ m_assembler.cmplRegReg(right, scr, SH4Condition(cond));
+ releaseScratch(scr);
+
+ if (cond == NotEqual)
+ return branchFalse();
+ return branchTrue();
+ }
+
+ Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
+ {
+ RegisterID addressTempRegister = claimScratch();
+
+ move(TrustedImmPtr(left.m_ptr), addressTempRegister);
+ m_assembler.movlMemReg(addressTempRegister, addressTempRegister);
+ compare32(right.m_value, addressTempRegister, cond);
+ releaseScratch(addressTempRegister);
+
+ if (cond == NotEqual)
+ return branchFalse();
+ return branchTrue();
+ }
+
+ Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+ {
+ ASSERT(!(right.m_value & 0xFFFFFF00));
+ RegisterID lefttmp = claimScratch();
+
+ loadEffectiveAddress(left, lefttmp);
+
+ load8(lefttmp, lefttmp);
+ RegisterID righttmp = claimScratch();
+ m_assembler.loadConstant(right.m_value, righttmp);
+
+ Jump result = branch32(cond, lefttmp, righttmp);
+ releaseScratch(lefttmp);
+ releaseScratch(righttmp);
+ return result;
+ }
+
+ Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+
+ m_assembler.testlRegReg(reg, mask);
+
+ if (cond == NonZero) // NotEqual
+ return branchFalse();
+ return branchTrue();
+ }
+
+ Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+
+ if (mask.m_value == -1)
+ m_assembler.testlRegReg(reg, reg);
+ else
+ testlImm(mask.m_value, reg);
+
+ if (cond == NonZero) // NotEqual
+ return branchFalse();
+ return branchTrue();
+ }
+
+ Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+
+ if (mask.m_value == -1)
+ compare32(0, address.offset, address.base, static_cast<RelationalCondition>(cond));
+ else
+ testImm(mask.m_value, address.offset, address.base);
+
+ if (cond == NonZero) // NotEqual
+ return branchFalse();
+ return branchTrue();
+ }
+
+ Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+
+ RegisterID scr = claimScratch();
+
+ move(address.index, scr);
+ lshift32(TrustedImm32(address.scale), scr);
+ add32(address.base, scr);
+ load32(scr, address.offset, scr);
+
+ if (mask.m_value == -1)
+ m_assembler.testlRegReg(scr, scr);
+ else
+ testlImm(mask.m_value, scr);
+
+ releaseScratch(scr);
+
+ if (cond == NonZero) // NotEqual
+ return branchFalse();
+ return branchTrue();
+ }
+
+ Jump jump()
+ {
+ return Jump(m_assembler.jmp());
+ }
+
+ void jump(RegisterID target)
+ {
+ m_assembler.jmpReg(target);
+ }
+
+ void jump(Address address)
+ {
+ RegisterID scr = claimScratch();
+ load32(address, scr);
+ m_assembler.jmpReg(scr);
+ releaseScratch(scr);
+ }
+
+ void jump(AbsoluteAddress address)
+ {
+ RegisterID scr = claimScratch();
+
+ move(TrustedImmPtr(address.m_ptr), scr);
+ m_assembler.movlMemReg(scr, scr);
+ m_assembler.jmpReg(scr);
+ releaseScratch(scr);
+ }
+
+ // Arithmetic control flow operations
+
+ Jump branchNeg32(ResultCondition cond, RegisterID srcDest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+
+ if (cond == Overflow)
+ return branchMul32(cond, TrustedImm32(-1), srcDest, srcDest);
+
+ neg32(srcDest);
+
+ if (cond == Signed) {
+ m_assembler.cmppz(srcDest);
+ return branchFalse();
+ }
+
+ compare32(0, srcDest, Equal);
+ return (cond == NonZero) ? branchFalse() : branchTrue();
+ }
+
+ Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == PositiveOrZero) || (cond == Zero) || (cond == NonZero));
+
+ if (cond == Overflow) {
+ m_assembler.addvlRegReg(src, dest);
+ return branchTrue();
+ }
+
+ m_assembler.addlRegReg(src, dest);
+
+ if ((cond == Signed) || (cond == PositiveOrZero)) {
+ m_assembler.cmppz(dest);
+ return (cond == Signed) ? branchFalse() : branchTrue();
+ }
+
+ compare32(0, dest, Equal);
+ return (cond == NonZero) ? branchFalse() : branchTrue();
+ }
+
+ Jump branchAdd32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == PositiveOrZero) || (cond == Zero) || (cond == NonZero));
+
+ if (cond == Overflow) {
+ if (src1 == dest)
+ m_assembler.addvlRegReg(src2, dest);
+ else {
+ move(src2, dest);
+ m_assembler.addvlRegReg(src1, dest);
+ }
+ return branchTrue();
+ }
+
+ add32(src1, src2, dest);
+
+ if ((cond == Signed) || (cond == PositiveOrZero)) {
+ m_assembler.cmppz(dest);
+ return (cond == Signed) ? branchFalse() : branchTrue();
+ }
+
+ compare32(0, dest, Equal);
+ return (cond == NonZero) ? branchFalse() : branchTrue();
+ }
+
+ Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == PositiveOrZero) || (cond == Zero) || (cond == NonZero));
+
+ RegisterID immval = claimScratch();
+ move(imm, immval);
+ Jump result = branchAdd32(cond, immval, dest);
+ releaseScratch(immval);
+ return result;
+ }
+
+ Jump branchAdd32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == PositiveOrZero) || (cond == Zero) || (cond == NonZero));
+
+ move(src, dest);
+
+ if (cond == Overflow) {
+ move(imm, scratchReg3);
+ m_assembler.addvlRegReg(scratchReg3, dest);
+ return branchTrue();
+ }
+
+ add32(imm, dest);
+
+ if ((cond == Signed) || (cond == PositiveOrZero)) {
+ m_assembler.cmppz(dest);
+ return (cond == Signed) ? branchFalse() : branchTrue();
+ }
+
+ compare32(0, dest, Equal);
+ return (cond == NonZero) ? branchFalse() : branchTrue();
+ }
+
+ Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == PositiveOrZero) || (cond == Zero) || (cond == NonZero));
+ bool result;
+
+ move(imm, scratchReg3);
+ RegisterID destptr = claimScratch();
+ RegisterID destval = claimScratch();
+ move(TrustedImmPtr(dest.m_ptr), destptr);
+ m_assembler.movlMemReg(destptr, destval);
+ if (cond == Overflow) {
+ m_assembler.addvlRegReg(scratchReg3, destval);
+ result = true;
+ } else {
+ m_assembler.addlRegReg(scratchReg3, destval);
+ if ((cond == Signed) || (cond == PositiveOrZero)) {
+ m_assembler.cmppz(destval);
+ result = (cond == PositiveOrZero);
+ } else {
+ m_assembler.testlRegReg(destval, destval);
+ result = (cond != NonZero);
+ }
+ }
+ m_assembler.movlRegMem(destval, destptr);
+ releaseScratch(destval);
+ releaseScratch(destptr);
+ return result ? branchTrue() : branchFalse();
+ }
+
+ Jump branchAdd32(ResultCondition cond, Address src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == PositiveOrZero) || (cond == Zero) || (cond == NonZero));
+
+ if (cond == Overflow) {
+ RegisterID srcVal = claimScratch();
+ load32(src, srcVal);
+ m_assembler.addvlRegReg(srcVal, dest);
+ releaseScratch(srcVal);
+ return branchTrue();
+ }
+
+ add32(src, dest);
+
+ if ((cond == Signed) || (cond == PositiveOrZero)) {
+ m_assembler.cmppz(dest);
+ return (cond == Signed) ? branchFalse() : branchTrue();
+ }
+
+ compare32(0, dest, Equal);
+ return (cond == NonZero) ? branchFalse() : branchTrue();
+ }
+
+ Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+
+ if (cond == Overflow) {
+ RegisterID scrsign = claimScratch();
+ RegisterID msbres = claimScratch();
+ m_assembler.dmulslRegReg(src, dest);
+ m_assembler.stsmacl(dest);
+ m_assembler.cmppz(dest);
+ m_assembler.movt(scrsign);
+ m_assembler.addlImm8r(-1, scrsign);
+ m_assembler.stsmach(msbres);
+ m_assembler.cmplRegReg(msbres, scrsign, SH4Condition(Equal));
+ releaseScratch(msbres);
+ releaseScratch(scrsign);
+ return branchFalse();
+ }
+
+ mul32(src, dest);
+
+ if (cond == Signed) {
+ m_assembler.cmppz(dest);
+ return branchFalse();
+ }
+
+ compare32(0, dest, static_cast<RelationalCondition>(cond));
+ return (cond == NonZero) ? branchFalse() : branchTrue();
+ }
+
+ Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+
+ if (cond == Overflow) {
+ RegisterID scrsign = claimScratch();
+ RegisterID msbres = claimScratch();
+ m_assembler.dmulslRegReg(src1, src2);
+ m_assembler.stsmacl(dest);
+ m_assembler.cmppz(dest);
+ m_assembler.movt(scrsign);
+ m_assembler.addlImm8r(-1, scrsign);
+ m_assembler.stsmach(msbres);
+ m_assembler.cmplRegReg(msbres, scrsign, SH4Condition(Equal));
+ releaseScratch(msbres);
+ releaseScratch(scrsign);
+ return branchFalse();
+ }
+
+ mul32(src1, src2, dest);
+
+ if (cond == Signed) {
+ m_assembler.cmppz(dest);
+ return branchFalse();
+ }
+
+ compare32(0, dest, Equal);
+ return (cond == NonZero) ? branchFalse() : branchTrue();
+ }
+
+ Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+
+ if (src == dest) {
+ move(imm, scratchReg3);
+ return branchMul32(cond, scratchReg3, dest);
+ }
+
+ move(imm, dest);
+ return branchMul32(cond, src, dest);
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+
+ if (cond == Overflow) {
+ m_assembler.subvlRegReg(src, dest);
+ return branchTrue();
+ }
+
+ sub32(src, dest);
+
+ if (cond == Signed) {
+ m_assembler.cmppz(dest);
+ return branchFalse();
+ }
+
+ compare32(0, dest, static_cast<RelationalCondition>(cond));
+ return (cond == NonZero) ? branchFalse() : branchTrue();
+ }
+
+ Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+
+ RegisterID immval = claimScratch();
+ move(imm, immval);
+ Jump result = branchSub32(cond, immval, dest);
+ releaseScratch(immval);
+ return result;
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+
+ move(src, dest);
+ return branchSub32(cond, imm, dest);
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+
+ if (src2 != dest) {
+ move(src1, dest);
+ return branchSub32(cond, src2, dest);
+ }
+
+ if (cond == Overflow) {
+ RegisterID tmpval = claimScratch();
+ move(src1, tmpval);
+ m_assembler.subvlRegReg(src2, tmpval);
+ move(tmpval, dest);
+ releaseScratch(tmpval);
+ return branchTrue();
+ }
+
+ RegisterID tmpval = claimScratch();
+ move(src1, tmpval);
+ sub32(src2, tmpval);
+ move(tmpval, dest);
+ releaseScratch(tmpval);
+
+ if (cond == Signed) {
+ m_assembler.cmppz(dest);
+ return branchFalse();
+ }
+
+ compare32(0, dest, static_cast<RelationalCondition>(cond));
+ return (cond == NonZero) ? branchFalse() : branchTrue();
+ }
+
+ Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero));
+
+ or32(src, dest);
+
+ if (cond == Signed) {
+ m_assembler.cmppz(dest);
+ return branchFalse();
+ }
+
+ compare32(0, dest, static_cast<RelationalCondition>(cond));
+ return (cond == NonZero) ? branchFalse() : branchTrue();
+ }
+
+ void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID, bool negZeroCheck = true)
+ {
+ truncateDoubleToInt32(src, dest);
+ convertInt32ToDouble(dest, fscratch);
+ failureCases.append(branchDouble(DoubleNotEqualOrUnordered, fscratch, src));
+
+ if (negZeroCheck)
+ failureCases.append(branch32(Equal, dest, TrustedImm32(0)));
+ }
+
+ void neg32(RegisterID dst)
+ {
+ m_assembler.neg(dst, dst);
+ }
+
+ void urshift32(RegisterID shiftamount, RegisterID dest)
+ {
+ RegisterID shiftTmp = claimScratch();
+ m_assembler.loadConstant(0x1f, shiftTmp);
+ m_assembler.andlRegReg(shiftamount, shiftTmp);
+ m_assembler.neg(shiftTmp, shiftTmp);
+ m_assembler.shldRegReg(dest, shiftTmp);
+ releaseScratch(shiftTmp);
+ }
+
+ void urshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+ {
+ move(src, dest);
+ urshift32(shiftAmount, dest);
+ }
+
+ void urshift32(TrustedImm32 imm, RegisterID dest)
+ {
+ int immMasked = imm.m_value & 0x1f;
+ if (!immMasked)
+ return;
+
+ if ((immMasked == 1) || (immMasked == 2) || (immMasked == 8) || (immMasked == 16)) {
+ m_assembler.shlrImm8r(immMasked, dest);
+ return;
+ }
+
+ RegisterID shiftTmp = claimScratch();
+ m_assembler.loadConstant(-immMasked, shiftTmp);
+ m_assembler.shldRegReg(dest, shiftTmp);
+ releaseScratch(shiftTmp);
+ }
+
+ void urshift32(RegisterID src, TrustedImm32 shiftamount, RegisterID dest)
+ {
+ move(src, dest);
+ urshift32(shiftamount, dest);
+ }
+
+ Call call()
+ {
+ return Call(m_assembler.call(), Call::Linkable);
+ }
+
+ Call nearCall()
+ {
+ return Call(m_assembler.call(), Call::LinkableNear);
+ }
+
+ Call call(RegisterID target)
+ {
+ return Call(m_assembler.call(target), Call::None);
+ }
+
+ void call(Address address)
+ {
+ RegisterID target = claimScratch();
+ load32(address.base, address.offset, target);
+ m_assembler.ensureSpace(m_assembler.maxInstructionSize + 2);
+ m_assembler.branch(JSR_OPCODE, target);
+ m_assembler.nop();
+ releaseScratch(target);
+ }
+
+ void breakpoint()
+ {
+ m_assembler.ensureSpace(m_assembler.maxInstructionSize + 2);
+ m_assembler.bkpt();
+ m_assembler.nop();
+ }
+
+ Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+ {
+ RegisterID dataTempRegister = claimScratch();
+
+ m_assembler.ensureSpace(m_assembler.maxInstructionSize + 10, 2 * sizeof(uint32_t));
+ dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
+ m_assembler.cmplRegReg(dataTempRegister, left, SH4Condition(cond));
+ releaseScratch(dataTempRegister);
+
+ if (cond == NotEqual)
+ return branchFalse();
+ return branchTrue();
+ }
+
+ Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+ {
+ RegisterID scr = claimScratch();
+
+ m_assembler.loadConstant(left.offset, scr);
+ m_assembler.addlRegReg(left.base, scr);
+ m_assembler.movlMemReg(scr, scr);
+ RegisterID scr1 = claimScratch();
+ m_assembler.ensureSpace(m_assembler.maxInstructionSize + 10, 2 * sizeof(uint32_t));
+ dataLabel = moveWithPatch(initialRightValue, scr1);
+ m_assembler.cmplRegReg(scr1, scr, SH4Condition(cond));
+ releaseScratch(scr);
+ releaseScratch(scr1);
+
+ if (cond == NotEqual)
+ return branchFalse();
+ return branchTrue();
+ }
+
+ Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
+ {
+ RegisterID scr = claimScratch();
+
+ m_assembler.loadConstant(left.offset, scr);
+ m_assembler.addlRegReg(left.base, scr);
+ m_assembler.movlMemReg(scr, scr);
+ RegisterID scr1 = claimScratch();
+ m_assembler.ensureSpace(m_assembler.maxInstructionSize + 10, 2 * sizeof(uint32_t));
+ dataLabel = moveWithPatch(initialRightValue, scr1);
+ m_assembler.cmplRegReg(scr1, scr, SH4Condition(cond));
+ releaseScratch(scr);
+ releaseScratch(scr1);
+
+ return (cond == NotEqual) ? branchFalse() : branchTrue();
+ }
+
+ void ret()
+ {
+ m_assembler.ret();
+ m_assembler.nop();
+ }
+
+ DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
+ {
+ RegisterID scr = claimScratch();
+ DataLabelPtr label = moveWithPatch(initialValue, scr);
+ store32(scr, address);
+ releaseScratch(scr);
+ return label;
+ }
+
+ DataLabelPtr storePtrWithPatch(ImplicitAddress address) { return storePtrWithPatch(TrustedImmPtr(0), address); }
+
+ int sizeOfConstantPool()
+ {
+ return m_assembler.sizeOfConstantPool();
+ }
+
+ Call tailRecursiveCall()
+ {
+ RegisterID scr = claimScratch();
+
+ m_assembler.loadConstantUnReusable(0x0, scr, true);
+ Jump m_jump = Jump(m_assembler.jmp(scr));
+ releaseScratch(scr);
+
+ return Call::fromTailJump(m_jump);
+ }
+
+ Call makeTailRecursiveCall(Jump oldJump)
+ {
+ oldJump.link(this);
+ return tailRecursiveCall();
+ }
+
+ void nop()
+ {
+ m_assembler.nop();
+ }
+
+ void memoryFence()
+ {
+ m_assembler.synco();
+ }
+
+ void abortWithReason(AbortReason reason)
+ {
+ move(TrustedImm32(reason), SH4Registers::r0);
+ breakpoint();
+ }
+
+ void abortWithReason(AbortReason reason, intptr_t misc)
+ {
+ move(TrustedImm32(misc), SH4Registers::r1);
+ abortWithReason(reason);
+ }
+
+ static FunctionPtr readCallTarget(CodeLocationCall call)
+ {
+ return FunctionPtr(reinterpret_cast<void(*)()>(SH4Assembler::readCallTarget(call.dataLocation())));
+ }
+
+ static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
+ {
+ SH4Assembler::replaceWithJump(instructionStart.dataLocation(), destination.dataLocation());
+ }
+
+ static ptrdiff_t maxJumpReplacementSize()
+ {
+ return SH4Assembler::maxJumpReplacementSize();
+ }
+
+ static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
+
+ static bool canJumpReplacePatchableBranch32WithPatch() { return false; }
+
+ static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
+ {
+ return label.labelAtOffset(0);
+ }
+
+ static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID rd, void* initialValue)
+ {
+ SH4Assembler::revertJumpReplacementToBranchPtrWithPatch(instructionStart.dataLocation(), rd, reinterpret_cast<int>(initialValue));
+ }
+
+ static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr)
+ {
+ UNREACHABLE_FOR_PLATFORM();
+ return CodeLocationLabel();
+ }
+
+ static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32)
+ {
+ UNREACHABLE_FOR_PLATFORM();
+ return CodeLocationLabel();
+ }
+
+ static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*)
+ {
+ UNREACHABLE_FOR_PLATFORM();
+ }
+
+ static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel, Address, int32_t)
+ {
+ UNREACHABLE_FOR_PLATFORM();
+ }
+
+protected:
+ SH4Assembler::Condition SH4Condition(RelationalCondition cond)
+ {
+ return static_cast<SH4Assembler::Condition>(cond);
+ }
+
+ SH4Assembler::Condition SH4Condition(ResultCondition cond)
+ {
+ return static_cast<SH4Assembler::Condition>(cond);
+ }
+private:
+ friend class LinkBuffer;
+ friend class RepatchBuffer;
+
+ static void linkCall(void* code, Call call, FunctionPtr function)
+ {
+ SH4Assembler::linkCall(code, call.m_label, function.value());
+ }
+
+ static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
+ {
+ SH4Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
+ }
+
+ static void repatchCall(CodeLocationCall call, FunctionPtr destination)
+ {
+ SH4Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
+ }
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // MacroAssemblerSH4_h
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerX86.h b/Source/JavaScriptCore/assembler/MacroAssemblerX86.h
new file mode 100644
index 000000000..bdd9e57ba
--- /dev/null
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerX86.h
@@ -0,0 +1,372 @@
+/*
+ * Copyright (C) 2008, 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MacroAssemblerX86_h
+#define MacroAssemblerX86_h
+
+#if ENABLE(ASSEMBLER) && CPU(X86)
+
+#include "MacroAssemblerX86Common.h"
+
+namespace JSC {
+
+class MacroAssemblerX86 : public MacroAssemblerX86Common {
+public:
+ static const Scale ScalePtr = TimesFour;
+
+ using MacroAssemblerX86Common::add32;
+ using MacroAssemblerX86Common::and32;
+ using MacroAssemblerX86Common::branchAdd32;
+ using MacroAssemblerX86Common::branchSub32;
+ using MacroAssemblerX86Common::sub32;
+ using MacroAssemblerX86Common::or32;
+ using MacroAssemblerX86Common::load32;
+ using MacroAssemblerX86Common::load8;
+ using MacroAssemblerX86Common::store32;
+ using MacroAssemblerX86Common::store8;
+ using MacroAssemblerX86Common::branch32;
+ using MacroAssemblerX86Common::call;
+ using MacroAssemblerX86Common::jump;
+ using MacroAssemblerX86Common::addDouble;
+ using MacroAssemblerX86Common::loadDouble;
+ using MacroAssemblerX86Common::storeDouble;
+ using MacroAssemblerX86Common::convertInt32ToDouble;
+ using MacroAssemblerX86Common::branch8;
+ using MacroAssemblerX86Common::branchTest8;
+
+ void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ m_assembler.leal_mr(imm.m_value, src, dest);
+ }
+
+ void add32(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ m_assembler.addl_im(imm.m_value, address.m_ptr);
+ }
+
+ void add32(AbsoluteAddress address, RegisterID dest)
+ {
+ m_assembler.addl_mr(address.m_ptr, dest);
+ }
+
+ void add64(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ m_assembler.addl_im(imm.m_value, address.m_ptr);
+ m_assembler.adcl_im(imm.m_value >> 31, reinterpret_cast<const char*>(address.m_ptr) + sizeof(int32_t));
+ }
+
+ void and32(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ m_assembler.andl_im(imm.m_value, address.m_ptr);
+ }
+
+ void or32(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ m_assembler.orl_im(imm.m_value, address.m_ptr);
+ }
+
+ void or32(RegisterID reg, AbsoluteAddress address)
+ {
+ m_assembler.orl_rm(reg, address.m_ptr);
+ }
+
+ void sub32(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ m_assembler.subl_im(imm.m_value, address.m_ptr);
+ }
+
+ void load32(const void* address, RegisterID dest)
+ {
+ m_assembler.movl_mr(address, dest);
+ }
+
+ void load8(const void* address, RegisterID dest)
+ {
+ m_assembler.movzbl_mr(address, dest);
+ }
+
+ void abortWithReason(AbortReason reason)
+ {
+ move(TrustedImm32(reason), X86Registers::eax);
+ breakpoint();
+ }
+
+ void abortWithReason(AbortReason reason, intptr_t misc)
+ {
+ move(TrustedImm32(misc), X86Registers::edx);
+ abortWithReason(reason);
+ }
+
+ ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
+ {
+ ConvertibleLoadLabel result = ConvertibleLoadLabel(this);
+ m_assembler.movl_mr(address.offset, address.base, dest);
+ return result;
+ }
+
+ void addDouble(AbsoluteAddress address, FPRegisterID dest)
+ {
+ m_assembler.addsd_mr(address.m_ptr, dest);
+ }
+
+ void storeDouble(FPRegisterID src, TrustedImmPtr address)
+ {
+ ASSERT(isSSE2Present());
+ ASSERT(address.m_value);
+ m_assembler.movsd_rm(src, address.m_value);
+ }
+
+ void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest)
+ {
+ m_assembler.cvtsi2sd_mr(src.m_ptr, dest);
+ }
+
+ void store32(TrustedImm32 imm, void* address)
+ {
+ m_assembler.movl_i32m(imm.m_value, address);
+ }
+
+ void store32(RegisterID src, void* address)
+ {
+ m_assembler.movl_rm(src, address);
+ }
+
+ void store8(RegisterID src, void* address)
+ {
+ m_assembler.movb_rm(src, address);
+ }
+
+ void store8(TrustedImm32 imm, void* address)
+ {
+ ASSERT(-128 <= imm.m_value && imm.m_value < 128);
+ m_assembler.movb_i8m(imm.m_value, address);
+ }
+
+ void moveDoubleToInts(FPRegisterID src, RegisterID dest1, RegisterID dest2)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.pextrw_irr(3, src, dest1);
+ m_assembler.pextrw_irr(2, src, dest2);
+ lshift32(TrustedImm32(16), dest1);
+ or32(dest1, dest2);
+ movePackedToInt32(src, dest1);
+ }
+
+ void moveIntsToDouble(RegisterID src1, RegisterID src2, FPRegisterID dest, FPRegisterID scratch)
+ {
+ moveInt32ToPacked(src1, dest);
+ moveInt32ToPacked(src2, scratch);
+ lshiftPacked(TrustedImm32(32), scratch);
+ orPacked(scratch, dest);
+ }
+
+ Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest)
+ {
+ m_assembler.addl_im(imm.m_value, dest.m_ptr);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchSub32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest)
+ {
+ m_assembler.subl_im(imm.m_value, dest.m_ptr);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
+ {
+ m_assembler.cmpl_rm(right, left.m_ptr);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
+ {
+ m_assembler.cmpl_im(right.m_value, left.m_ptr);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Call call()
+ {
+ return Call(m_assembler.call(), Call::Linkable);
+ }
+
+ // Address is a memory location containing the address to jump to
+ void jump(AbsoluteAddress address)
+ {
+ m_assembler.jmp_m(address.m_ptr);
+ }
+
+ Call tailRecursiveCall()
+ {
+ return Call::fromTailJump(jump());
+ }
+
+ Call makeTailRecursiveCall(Jump oldJump)
+ {
+ return Call::fromTailJump(oldJump);
+ }
+
+
+ DataLabelPtr moveWithPatch(TrustedImmPtr initialValue, RegisterID dest)
+ {
+ padBeforePatch();
+ m_assembler.movl_i32r(initialValue.asIntptr(), dest);
+ return DataLabelPtr(this);
+ }
+
+ Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
+ {
+ m_assembler.cmpb_im(right.m_value, left.m_ptr);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ ASSERT(mask.m_value >= -128 && mask.m_value <= 255);
+ if (mask.m_value == -1)
+ m_assembler.cmpb_im(0, address.m_ptr);
+ else
+ m_assembler.testb_im(mask.m_value, address.m_ptr);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+ {
+ padBeforePatch();
+ m_assembler.cmpl_ir_force32(initialRightValue.asIntptr(), left);
+ dataLabel = DataLabelPtr(this);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+ {
+ padBeforePatch();
+ m_assembler.cmpl_im_force32(initialRightValue.asIntptr(), left.offset, left.base);
+ dataLabel = DataLabelPtr(this);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
+ {
+ padBeforePatch();
+ m_assembler.cmpl_im_force32(initialRightValue.m_value, left.offset, left.base);
+ dataLabel = DataLabel32(this);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
+ {
+ padBeforePatch();
+ m_assembler.movl_i32m(initialValue.asIntptr(), address.offset, address.base);
+ return DataLabelPtr(this);
+ }
+
+ static bool supportsFloatingPoint() { return isSSE2Present(); }
+ static bool supportsFloatingPointTruncate() { return isSSE2Present(); }
+ static bool supportsFloatingPointSqrt() { return isSSE2Present(); }
+ static bool supportsFloatingPointAbs() { return isSSE2Present(); }
+
+ static FunctionPtr readCallTarget(CodeLocationCall call)
+ {
+ intptr_t offset = reinterpret_cast<int32_t*>(call.dataLocation())[-1];
+ return FunctionPtr(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(call.dataLocation()) + offset));
+ }
+
+ static bool canJumpReplacePatchableBranchPtrWithPatch() { return true; }
+ static bool canJumpReplacePatchableBranch32WithPatch() { return true; }
+
+ static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
+ {
+ const int opcodeBytes = 1;
+ const int modRMBytes = 1;
+ const int immediateBytes = 4;
+ const int totalBytes = opcodeBytes + modRMBytes + immediateBytes;
+ ASSERT(totalBytes >= maxJumpReplacementSize());
+ return label.labelAtOffset(-totalBytes);
+ }
+
+ static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr label)
+ {
+ const int opcodeBytes = 1;
+ const int modRMBytes = 1;
+ const int offsetBytes = 0;
+ const int immediateBytes = 4;
+ const int totalBytes = opcodeBytes + modRMBytes + offsetBytes + immediateBytes;
+ ASSERT(totalBytes >= maxJumpReplacementSize());
+ return label.labelAtOffset(-totalBytes);
+ }
+
+ static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32 label)
+ {
+ const int opcodeBytes = 1;
+ const int modRMBytes = 1;
+ const int offsetBytes = 0;
+ const int immediateBytes = 4;
+ const int totalBytes = opcodeBytes + modRMBytes + offsetBytes + immediateBytes;
+ ASSERT(totalBytes >= maxJumpReplacementSize());
+ return label.labelAtOffset(-totalBytes);
+ }
+
+ static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID reg, void* initialValue)
+ {
+ X86Assembler::revertJumpTo_cmpl_ir_force32(instructionStart.executableAddress(), reinterpret_cast<intptr_t>(initialValue), reg);
+ }
+
+ static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel instructionStart, Address address, void* initialValue)
+ {
+ ASSERT(!address.offset);
+ X86Assembler::revertJumpTo_cmpl_im_force32(instructionStart.executableAddress(), reinterpret_cast<intptr_t>(initialValue), 0, address.base);
+ }
+
+ static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel instructionStart, Address address, int32_t initialValue)
+ {
+ ASSERT(!address.offset);
+ X86Assembler::revertJumpTo_cmpl_im_force32(instructionStart.executableAddress(), initialValue, 0, address.base);
+ }
+
+private:
+ friend class LinkBuffer;
+ friend class RepatchBuffer;
+
+ static void linkCall(void* code, Call call, FunctionPtr function)
+ {
+ X86Assembler::linkCall(code, call.m_label, function.value());
+ }
+
+ static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
+ {
+ X86Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
+ }
+
+ static void repatchCall(CodeLocationCall call, FunctionPtr destination)
+ {
+ X86Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
+ }
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // MacroAssemblerX86_h
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.cpp b/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.cpp
new file mode 100644
index 000000000..0108ef4c0
--- /dev/null
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.cpp
@@ -0,0 +1,150 @@
+/*
+ * Copyright (C) 2013, 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(ASSEMBLER) && (CPU(X86) || CPU(X86_64))
+#include "MacroAssemblerX86Common.h"
+
+namespace JSC {
+
+#if ENABLE(MASM_PROBE)
+
+#define INDENT printIndent(indentation)
+
+void MacroAssemblerX86Common::printCPURegisters(MacroAssemblerX86Common::CPUState& cpu, int indentation)
+{
+#if CPU(X86)
+ #define PRINT_GPREGISTER(_type, _regName) { \
+ int32_t value = reinterpret_cast<int32_t>(cpu._regName); \
+ INDENT, dataLogF("%6s: 0x%08x %d\n", #_regName, value, value) ; \
+ }
+#elif CPU(X86_64)
+ #define PRINT_GPREGISTER(_type, _regName) { \
+ int64_t value = reinterpret_cast<int64_t>(cpu._regName); \
+ INDENT, dataLogF("%6s: 0x%016llx %lld\n", #_regName, value, value) ; \
+ }
+#endif
+ FOR_EACH_CPU_GPREGISTER(PRINT_GPREGISTER)
+ FOR_EACH_CPU_SPECIAL_REGISTER(PRINT_GPREGISTER)
+ #undef PRINT_GPREGISTER
+
+ #define PRINT_FPREGISTER(_type, _regName) { \
+ uint64_t* u = reinterpret_cast<uint64_t*>(&cpu._regName); \
+ double* d = reinterpret_cast<double*>(&cpu._regName); \
+ INDENT, dataLogF("%6s: 0x%016llx %.13g\n", #_regName, *u, *d); \
+ }
+ FOR_EACH_CPU_FPREGISTER(PRINT_FPREGISTER)
+ #undef PRINT_FPREGISTER
+}
+
+#undef INDENT
+
+void MacroAssemblerX86Common::printRegister(MacroAssemblerX86Common::CPUState& cpu, RegisterID regID)
+{
+ const char* name = CPUState::registerName(regID);
+ union {
+ void* voidPtr;
+ intptr_t intptrValue;
+ } u;
+ u.voidPtr = cpu.registerValue(regID);
+ dataLogF("%s:<%p %ld>", name, u.voidPtr, u.intptrValue);
+}
+
+void MacroAssemblerX86Common::printRegister(MacroAssemblerX86Common::CPUState& cpu, FPRegisterID regID)
+{
+ const char* name = CPUState::registerName(regID);
+ union {
+ double doubleValue;
+ uint64_t uint64Value;
+ } u;
+ u.doubleValue = cpu.registerValue(regID);
+ dataLogF("%s:<0x%016llx %.13g>", name, u.uint64Value, u.doubleValue);
+}
+
+extern "C" void ctiMasmProbeTrampoline();
+
+// What code is emitted for the probe?
+// ==================================
+// We want to keep the size of the emitted probe invocation code as compact as
+// possible to minimize the perturbation to the JIT generated code. However,
+// we also need to preserve the CPU registers and set up the ProbeContext to be
+// passed to the user probe function.
+//
+// Hence, we do only the minimum here to preserve a scratch register (i.e. rax
+// in this case) and the stack pointer (i.e. rsp), and pass the probe arguments.
+// We'll let the ctiMasmProbeTrampoline handle the rest of the probe invocation
+// work i.e. saving the CPUState (and setting up the ProbeContext), calling the
+// user probe function, and restoring the CPUState before returning to JIT
+// generated code.
+//
+// What registers need to be saved?
+// ===============================
+// The registers are saved for 2 reasons:
+// 1. To preserve their state in the JITted code. This means that all registers
+// that are not callee saved needs to be saved. We also need to save the
+// condition code registers because the probe can be inserted between a test
+// and a branch.
+// 2. To allow the probe to inspect the values of the registers for debugging
+// purposes. This means all registers need to be saved.
+//
+// In summary, save everything. But for reasons stated above, we should do the
+// minimum here and let ctiMasmProbeTrampoline do the heavy lifting to save the
+// full set.
+//
+// What values are in the saved registers?
+// ======================================
+// Conceptually, the saved registers should contain values as if the probe
+// is not present in the JIT generated code. Hence, they should contain values
+// that are expected at the start of the instruction immediately following the
+// probe.
+//
+// Specifically, the saved stack pointer register will point to the stack
+// position before we push the ProbeContext frame. The saved rip will point to
+// the address of the instruction immediately following the probe.
+
+void MacroAssemblerX86Common::probe(MacroAssemblerX86Common::ProbeFunction function, void* arg1, void* arg2)
+{
+ push(RegisterID::esp);
+ push(RegisterID::eax);
+ move(TrustedImmPtr(arg2), RegisterID::eax);
+ push(RegisterID::eax);
+ move(TrustedImmPtr(arg1), RegisterID::eax);
+ push(RegisterID::eax);
+ move(TrustedImmPtr(reinterpret_cast<void*>(function)), RegisterID::eax);
+ push(RegisterID::eax);
+ move(TrustedImmPtr(reinterpret_cast<void*>(ctiMasmProbeTrampoline)), RegisterID::eax);
+ call(RegisterID::eax);
+}
+
+#endif // ENABLE(MASM_PROBE)
+
+#if CPU(X86) && !OS(MAC_OS_X)
+MacroAssemblerX86Common::SSE2CheckState MacroAssemblerX86Common::s_sse2CheckState = NotCheckedSSE2;
+#endif
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER) && (CPU(X86) || CPU(X86_64))
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.h b/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.h
new file mode 100644
index 000000000..b6ae6fc6f
--- /dev/null
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.h
@@ -0,0 +1,1620 @@
+/*
+ * Copyright (C) 2008, 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MacroAssemblerX86Common_h
+#define MacroAssemblerX86Common_h
+
+#if ENABLE(ASSEMBLER)
+
+#include "X86Assembler.h"
+#include "AbstractMacroAssembler.h"
+
+namespace JSC {
+
+class MacroAssemblerX86Common : public AbstractMacroAssembler<X86Assembler, MacroAssemblerX86Common> {
+public:
+#if CPU(X86_64)
+ static const X86Registers::RegisterID scratchRegister = X86Registers::r11;
+#endif
+
+protected:
+ static const int DoubleConditionBitInvert = 0x10;
+ static const int DoubleConditionBitSpecial = 0x20;
+ static const int DoubleConditionBits = DoubleConditionBitInvert | DoubleConditionBitSpecial;
+
+public:
+ typedef X86Assembler::XMMRegisterID XMMRegisterID;
+
+ static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
+ {
+ return value >= -128 && value <= 127;
+ }
+
+ enum RelationalCondition {
+ Equal = X86Assembler::ConditionE,
+ NotEqual = X86Assembler::ConditionNE,
+ Above = X86Assembler::ConditionA,
+ AboveOrEqual = X86Assembler::ConditionAE,
+ Below = X86Assembler::ConditionB,
+ BelowOrEqual = X86Assembler::ConditionBE,
+ GreaterThan = X86Assembler::ConditionG,
+ GreaterThanOrEqual = X86Assembler::ConditionGE,
+ LessThan = X86Assembler::ConditionL,
+ LessThanOrEqual = X86Assembler::ConditionLE
+ };
+
+ enum ResultCondition {
+ Overflow = X86Assembler::ConditionO,
+ Signed = X86Assembler::ConditionS,
+ PositiveOrZero = X86Assembler::ConditionNS,
+ Zero = X86Assembler::ConditionE,
+ NonZero = X86Assembler::ConditionNE
+ };
+
+ enum DoubleCondition {
+ // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
+ DoubleEqual = X86Assembler::ConditionE | DoubleConditionBitSpecial,
+ DoubleNotEqual = X86Assembler::ConditionNE,
+ DoubleGreaterThan = X86Assembler::ConditionA,
+ DoubleGreaterThanOrEqual = X86Assembler::ConditionAE,
+ DoubleLessThan = X86Assembler::ConditionA | DoubleConditionBitInvert,
+ DoubleLessThanOrEqual = X86Assembler::ConditionAE | DoubleConditionBitInvert,
+ // If either operand is NaN, these conditions always evaluate to true.
+ DoubleEqualOrUnordered = X86Assembler::ConditionE,
+ DoubleNotEqualOrUnordered = X86Assembler::ConditionNE | DoubleConditionBitSpecial,
+ DoubleGreaterThanOrUnordered = X86Assembler::ConditionB | DoubleConditionBitInvert,
+ DoubleGreaterThanOrEqualOrUnordered = X86Assembler::ConditionBE | DoubleConditionBitInvert,
+ DoubleLessThanOrUnordered = X86Assembler::ConditionB,
+ DoubleLessThanOrEqualOrUnordered = X86Assembler::ConditionBE,
+ };
+ COMPILE_ASSERT(
+ !((X86Assembler::ConditionE | X86Assembler::ConditionNE | X86Assembler::ConditionA | X86Assembler::ConditionAE | X86Assembler::ConditionB | X86Assembler::ConditionBE) & DoubleConditionBits),
+ DoubleConditionBits_should_not_interfere_with_X86Assembler_Condition_codes);
+
+ static const RegisterID stackPointerRegister = X86Registers::esp;
+ static const RegisterID framePointerRegister = X86Registers::ebp;
+
+ static bool canBlind() { return true; }
+ static bool shouldBlindForSpecificArch(uint32_t value) { return value >= 0x00ffffff; }
+ static bool shouldBlindForSpecificArch(uint64_t value) { return value >= 0x00ffffff; }
+
+ // Integer arithmetic operations:
+ //
+ // Operations are typically two operand - operation(source, srcDst)
+ // For many operations the source may be an TrustedImm32, the srcDst operand
+ // may often be a memory location (explictly described using an Address
+ // object).
+
+ void add32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.addl_rr(src, dest);
+ }
+
+ void add32(TrustedImm32 imm, Address address)
+ {
+ m_assembler.addl_im(imm.m_value, address.offset, address.base);
+ }
+
+ void add32(TrustedImm32 imm, RegisterID dest)
+ {
+ if (imm.m_value == 1)
+ m_assembler.inc_r(dest);
+ else
+ m_assembler.addl_ir(imm.m_value, dest);
+ }
+
+ void add32(Address src, RegisterID dest)
+ {
+ m_assembler.addl_mr(src.offset, src.base, dest);
+ }
+
+ void add32(RegisterID src, Address dest)
+ {
+ m_assembler.addl_rm(src, dest.offset, dest.base);
+ }
+
+ void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ m_assembler.leal_mr(imm.m_value, src, dest);
+ }
+
+ void and32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.andl_rr(src, dest);
+ }
+
+ void and32(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.andl_ir(imm.m_value, dest);
+ }
+
+ void and32(RegisterID src, Address dest)
+ {
+ m_assembler.andl_rm(src, dest.offset, dest.base);
+ }
+
+ void and32(Address src, RegisterID dest)
+ {
+ m_assembler.andl_mr(src.offset, src.base, dest);
+ }
+
+ void and32(TrustedImm32 imm, Address address)
+ {
+ m_assembler.andl_im(imm.m_value, address.offset, address.base);
+ }
+
+ void and32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ if (op1 == op2)
+ zeroExtend32ToPtr(op1, dest);
+ else if (op1 == dest)
+ and32(op2, dest);
+ else {
+ move(op2, dest);
+ and32(op1, dest);
+ }
+ }
+
+ void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ move(src, dest);
+ and32(imm, dest);
+ }
+
+ void countLeadingZeros32(RegisterID src, RegisterID dst)
+ {
+ m_assembler.bsr_rr(src, dst);
+ Jump srcIsNonZero = m_assembler.jCC(x86Condition(NonZero));
+ move(TrustedImm32(32), dst);
+
+ Jump skipNonZeroCase = jump();
+ srcIsNonZero.link(this);
+ xor32(TrustedImm32(0x1f), dst);
+ skipNonZeroCase.link(this);
+ }
+
+ void lshift32(RegisterID shift_amount, RegisterID dest)
+ {
+ ASSERT(shift_amount != dest);
+
+ if (shift_amount == X86Registers::ecx)
+ m_assembler.shll_CLr(dest);
+ else {
+ // On x86 we can only shift by ecx; if asked to shift by another register we'll
+ // need rejig the shift amount into ecx first, and restore the registers afterwards.
+ // If we dest is ecx, then shift the swapped register!
+ swap(shift_amount, X86Registers::ecx);
+ m_assembler.shll_CLr(dest == X86Registers::ecx ? shift_amount : dest);
+ swap(shift_amount, X86Registers::ecx);
+ }
+ }
+
+ void lshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
+ {
+ ASSERT(shift_amount != dest);
+
+ if (src != dest)
+ move(src, dest);
+ lshift32(shift_amount, dest);
+ }
+
+ void lshift32(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.shll_i8r(imm.m_value, dest);
+ }
+
+ void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ if (src != dest)
+ move(src, dest);
+ lshift32(imm, dest);
+ }
+
+ void mul32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.imull_rr(src, dest);
+ }
+
+ void mul32(Address src, RegisterID dest)
+ {
+ m_assembler.imull_mr(src.offset, src.base, dest);
+ }
+
+ void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ m_assembler.imull_i32r(src, imm.m_value, dest);
+ }
+
+ void neg32(RegisterID srcDest)
+ {
+ m_assembler.negl_r(srcDest);
+ }
+
+ void neg32(Address srcDest)
+ {
+ m_assembler.negl_m(srcDest.offset, srcDest.base);
+ }
+
+ void or32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.orl_rr(src, dest);
+ }
+
+ void or32(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.orl_ir(imm.m_value, dest);
+ }
+
+ void or32(RegisterID src, Address dest)
+ {
+ m_assembler.orl_rm(src, dest.offset, dest.base);
+ }
+
+ void or32(Address src, RegisterID dest)
+ {
+ m_assembler.orl_mr(src.offset, src.base, dest);
+ }
+
+ void or32(TrustedImm32 imm, Address address)
+ {
+ m_assembler.orl_im(imm.m_value, address.offset, address.base);
+ }
+
+ void or32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ if (op1 == op2)
+ zeroExtend32ToPtr(op1, dest);
+ else if (op1 == dest)
+ or32(op2, dest);
+ else {
+ move(op2, dest);
+ or32(op1, dest);
+ }
+ }
+
+ void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ move(src, dest);
+ or32(imm, dest);
+ }
+
+ void rshift32(RegisterID shift_amount, RegisterID dest)
+ {
+ ASSERT(shift_amount != dest);
+
+ if (shift_amount == X86Registers::ecx)
+ m_assembler.sarl_CLr(dest);
+ else {
+ // On x86 we can only shift by ecx; if asked to shift by another register we'll
+ // need rejig the shift amount into ecx first, and restore the registers afterwards.
+ // If we dest is ecx, then shift the swapped register!
+ swap(shift_amount, X86Registers::ecx);
+ m_assembler.sarl_CLr(dest == X86Registers::ecx ? shift_amount : dest);
+ swap(shift_amount, X86Registers::ecx);
+ }
+ }
+
+ void rshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
+ {
+ ASSERT(shift_amount != dest);
+
+ if (src != dest)
+ move(src, dest);
+ rshift32(shift_amount, dest);
+ }
+
+ void rshift32(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.sarl_i8r(imm.m_value, dest);
+ }
+
+ void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ if (src != dest)
+ move(src, dest);
+ rshift32(imm, dest);
+ }
+
+ void urshift32(RegisterID shift_amount, RegisterID dest)
+ {
+ ASSERT(shift_amount != dest);
+
+ if (shift_amount == X86Registers::ecx)
+ m_assembler.shrl_CLr(dest);
+ else {
+ // On x86 we can only shift by ecx; if asked to shift by another register we'll
+ // need rejig the shift amount into ecx first, and restore the registers afterwards.
+ // If we dest is ecx, then shift the swapped register!
+ swap(shift_amount, X86Registers::ecx);
+ m_assembler.shrl_CLr(dest == X86Registers::ecx ? shift_amount : dest);
+ swap(shift_amount, X86Registers::ecx);
+ }
+ }
+
+ void urshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
+ {
+ ASSERT(shift_amount != dest);
+
+ if (src != dest)
+ move(src, dest);
+ urshift32(shift_amount, dest);
+ }
+
+ void urshift32(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.shrl_i8r(imm.m_value, dest);
+ }
+
+ void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ if (src != dest)
+ move(src, dest);
+ urshift32(imm, dest);
+ }
+
+ void sub32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.subl_rr(src, dest);
+ }
+
+ void sub32(TrustedImm32 imm, RegisterID dest)
+ {
+ if (imm.m_value == 1)
+ m_assembler.dec_r(dest);
+ else
+ m_assembler.subl_ir(imm.m_value, dest);
+ }
+
+ void sub32(TrustedImm32 imm, Address address)
+ {
+ m_assembler.subl_im(imm.m_value, address.offset, address.base);
+ }
+
+ void sub32(Address src, RegisterID dest)
+ {
+ m_assembler.subl_mr(src.offset, src.base, dest);
+ }
+
+ void sub32(RegisterID src, Address dest)
+ {
+ m_assembler.subl_rm(src, dest.offset, dest.base);
+ }
+
+ void xor32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.xorl_rr(src, dest);
+ }
+
+ void xor32(TrustedImm32 imm, Address dest)
+ {
+ if (imm.m_value == -1)
+ m_assembler.notl_m(dest.offset, dest.base);
+ else
+ m_assembler.xorl_im(imm.m_value, dest.offset, dest.base);
+ }
+
+ void xor32(TrustedImm32 imm, RegisterID dest)
+ {
+ if (imm.m_value == -1)
+ m_assembler.notl_r(dest);
+ else
+ m_assembler.xorl_ir(imm.m_value, dest);
+ }
+
+ void xor32(RegisterID src, Address dest)
+ {
+ m_assembler.xorl_rm(src, dest.offset, dest.base);
+ }
+
+ void xor32(Address src, RegisterID dest)
+ {
+ m_assembler.xorl_mr(src.offset, src.base, dest);
+ }
+
+ void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ if (op1 == op2)
+ move(TrustedImm32(0), dest);
+ else if (op1 == dest)
+ xor32(op2, dest);
+ else {
+ move(op2, dest);
+ xor32(op1, dest);
+ }
+ }
+
+ void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ move(src, dest);
+ xor32(imm, dest);
+ }
+
+ void sqrtDouble(FPRegisterID src, FPRegisterID dst)
+ {
+ m_assembler.sqrtsd_rr(src, dst);
+ }
+
+ void absDouble(FPRegisterID src, FPRegisterID dst)
+ {
+ ASSERT(src != dst);
+ static const double negativeZeroConstant = -0.0;
+ loadDouble(TrustedImmPtr(&negativeZeroConstant), dst);
+ m_assembler.andnpd_rr(src, dst);
+ }
+
+ void negateDouble(FPRegisterID src, FPRegisterID dst)
+ {
+ ASSERT(src != dst);
+ static const double negativeZeroConstant = -0.0;
+ loadDouble(TrustedImmPtr(&negativeZeroConstant), dst);
+ m_assembler.xorpd_rr(src, dst);
+ }
+
+
+ // Memory access operations:
+ //
+ // Loads are of the form load(address, destination) and stores of the form
+ // store(source, address). The source for a store may be an TrustedImm32. Address
+ // operand objects to loads and store will be implicitly constructed if a
+ // register is passed.
+
+ void load32(ImplicitAddress address, RegisterID dest)
+ {
+ m_assembler.movl_mr(address.offset, address.base, dest);
+ }
+
+ void load32(BaseIndex address, RegisterID dest)
+ {
+ m_assembler.movl_mr(address.offset, address.base, address.index, address.scale, dest);
+ }
+
+ void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
+ {
+ load32(address, dest);
+ }
+
+ void load16Unaligned(BaseIndex address, RegisterID dest)
+ {
+ load16(address, dest);
+ }
+
+ DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ padBeforePatch();
+ m_assembler.movl_mr_disp32(address.offset, address.base, dest);
+ return DataLabel32(this);
+ }
+
+ DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ padBeforePatch();
+ m_assembler.movl_mr_disp8(address.offset, address.base, dest);
+ return DataLabelCompact(this);
+ }
+
+ static void repatchCompact(CodeLocationDataLabelCompact dataLabelCompact, int32_t value)
+ {
+ ASSERT(isCompactPtrAlignedAddressOffset(value));
+ AssemblerType_T::repatchCompact(dataLabelCompact.dataLocation(), value);
+ }
+
+ DataLabelCompact loadCompactWithAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ padBeforePatch();
+ m_assembler.movl_mr_disp8(address.offset, address.base, dest);
+ return DataLabelCompact(this);
+ }
+
+ void load8(BaseIndex address, RegisterID dest)
+ {
+ m_assembler.movzbl_mr(address.offset, address.base, address.index, address.scale, dest);
+ }
+
+ void load8(ImplicitAddress address, RegisterID dest)
+ {
+ m_assembler.movzbl_mr(address.offset, address.base, dest);
+ }
+
+ void load8SignedExtendTo32(BaseIndex address, RegisterID dest)
+ {
+ m_assembler.movsbl_mr(address.offset, address.base, address.index, address.scale, dest);
+ }
+
+ void load8SignedExtendTo32(ImplicitAddress address, RegisterID dest)
+ {
+ m_assembler.movsbl_mr(address.offset, address.base, dest);
+ }
+
+ void load16(BaseIndex address, RegisterID dest)
+ {
+ m_assembler.movzwl_mr(address.offset, address.base, address.index, address.scale, dest);
+ }
+
+ void load16(Address address, RegisterID dest)
+ {
+ m_assembler.movzwl_mr(address.offset, address.base, dest);
+ }
+
+ void load16SignedExtendTo32(BaseIndex address, RegisterID dest)
+ {
+ m_assembler.movswl_mr(address.offset, address.base, address.index, address.scale, dest);
+ }
+
+ void load16SignedExtendTo32(Address address, RegisterID dest)
+ {
+ m_assembler.movswl_mr(address.offset, address.base, dest);
+ }
+
+ DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
+ {
+ padBeforePatch();
+ m_assembler.movl_rm_disp32(src, address.offset, address.base);
+ return DataLabel32(this);
+ }
+
+ void store32(RegisterID src, ImplicitAddress address)
+ {
+ m_assembler.movl_rm(src, address.offset, address.base);
+ }
+
+ void store32(RegisterID src, BaseIndex address)
+ {
+ m_assembler.movl_rm(src, address.offset, address.base, address.index, address.scale);
+ }
+
+ void store32(TrustedImm32 imm, ImplicitAddress address)
+ {
+ m_assembler.movl_i32m(imm.m_value, address.offset, address.base);
+ }
+
+ void store32(TrustedImm32 imm, BaseIndex address)
+ {
+ m_assembler.movl_i32m(imm.m_value, address.offset, address.base, address.index, address.scale);
+ }
+
+ void store8(TrustedImm32 imm, Address address)
+ {
+ ASSERT(-128 <= imm.m_value && imm.m_value < 128);
+ m_assembler.movb_i8m(imm.m_value, address.offset, address.base);
+ }
+
+ void store8(TrustedImm32 imm, BaseIndex address)
+ {
+ ASSERT(-128 <= imm.m_value && imm.m_value < 128);
+ m_assembler.movb_i8m(imm.m_value, address.offset, address.base, address.index, address.scale);
+ }
+
+ static ALWAYS_INLINE RegisterID getUnusedRegister(BaseIndex address)
+ {
+ if (address.base != X86Registers::eax && address.index != X86Registers::eax)
+ return X86Registers::eax;
+
+ if (address.base != X86Registers::ebx && address.index != X86Registers::ebx)
+ return X86Registers::ebx;
+
+ ASSERT(address.base != X86Registers::ecx && address.index != X86Registers::ecx);
+ return X86Registers::ecx;
+ }
+
+ static ALWAYS_INLINE RegisterID getUnusedRegister(Address address)
+ {
+ if (address.base != X86Registers::eax)
+ return X86Registers::eax;
+
+ ASSERT(address.base != X86Registers::edx);
+ return X86Registers::edx;
+ }
+
+ void store8(RegisterID src, BaseIndex address)
+ {
+#if CPU(X86)
+ // On 32-bit x86 we can only store from the first 4 registers;
+ // esp..edi are mapped to the 'h' registers!
+ if (src >= 4) {
+ // Pick a temporary register.
+ RegisterID temp = getUnusedRegister(address);
+
+ // Swap to the temporary register to perform the store.
+ swap(src, temp);
+ m_assembler.movb_rm(temp, address.offset, address.base, address.index, address.scale);
+ swap(src, temp);
+ return;
+ }
+#endif
+ m_assembler.movb_rm(src, address.offset, address.base, address.index, address.scale);
+ }
+
+ void store8(RegisterID src, Address address)
+ {
+#if CPU(X86)
+ // On 32-bit x86 we can only store from the first 4 registers;
+ // esp..edi are mapped to the 'h' registers!
+ if (src >= 4) {
+ // Pick a temporary register.
+ RegisterID temp = getUnusedRegister(address);
+
+ // Swap to the temporary register to perform the store.
+ swap(src, temp);
+ m_assembler.movb_rm(temp, address.offset, address.base);
+ swap(src, temp);
+ return;
+ }
+#endif
+ m_assembler.movb_rm(src, address.offset, address.base);
+ }
+
+ void store16(RegisterID src, BaseIndex address)
+ {
+#if CPU(X86)
+ // On 32-bit x86 we can only store from the first 4 registers;
+ // esp..edi are mapped to the 'h' registers!
+ if (src >= 4) {
+ // Pick a temporary register.
+ RegisterID temp = getUnusedRegister(address);
+
+ // Swap to the temporary register to perform the store.
+ swap(src, temp);
+ m_assembler.movw_rm(temp, address.offset, address.base, address.index, address.scale);
+ swap(src, temp);
+ return;
+ }
+#endif
+ m_assembler.movw_rm(src, address.offset, address.base, address.index, address.scale);
+ }
+
+
+ // Floating-point operation:
+ //
+ // Presently only supports SSE, not x87 floating point.
+
+ void moveDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ if (src != dest)
+ m_assembler.movsd_rr(src, dest);
+ }
+
+ void loadDouble(TrustedImmPtr address, FPRegisterID dest)
+ {
+#if CPU(X86)
+ ASSERT(isSSE2Present());
+ m_assembler.movsd_mr(address.m_value, dest);
+#else
+ move(address, scratchRegister);
+ loadDouble(scratchRegister, dest);
+#endif
+ }
+
+ void loadDouble(ImplicitAddress address, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.movsd_mr(address.offset, address.base, dest);
+ }
+
+ void loadDouble(BaseIndex address, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.movsd_mr(address.offset, address.base, address.index, address.scale, dest);
+ }
+ void loadFloat(BaseIndex address, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.movss_mr(address.offset, address.base, address.index, address.scale, dest);
+ }
+
+ void storeDouble(FPRegisterID src, ImplicitAddress address)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.movsd_rm(src, address.offset, address.base);
+ }
+
+ void storeDouble(FPRegisterID src, BaseIndex address)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.movsd_rm(src, address.offset, address.base, address.index, address.scale);
+ }
+
+ void storeFloat(FPRegisterID src, BaseIndex address)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.movss_rm(src, address.offset, address.base, address.index, address.scale);
+ }
+
+ void convertDoubleToFloat(FPRegisterID src, FPRegisterID dst)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.cvtsd2ss_rr(src, dst);
+ }
+
+ void convertFloatToDouble(FPRegisterID src, FPRegisterID dst)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.cvtss2sd_rr(src, dst);
+ }
+
+ void addDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.addsd_rr(src, dest);
+ }
+
+ void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ if (op1 == dest)
+ addDouble(op2, dest);
+ else {
+ moveDouble(op2, dest);
+ addDouble(op1, dest);
+ }
+ }
+
+ void addDouble(Address src, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.addsd_mr(src.offset, src.base, dest);
+ }
+
+ void divDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.divsd_rr(src, dest);
+ }
+
+ void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ // B := A / B is invalid.
+ ASSERT(op1 == dest || op2 != dest);
+
+ moveDouble(op1, dest);
+ divDouble(op2, dest);
+ }
+
+ void divDouble(Address src, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.divsd_mr(src.offset, src.base, dest);
+ }
+
+ void subDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.subsd_rr(src, dest);
+ }
+
+ void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ // B := A - B is invalid.
+ ASSERT(op1 == dest || op2 != dest);
+
+ moveDouble(op1, dest);
+ subDouble(op2, dest);
+ }
+
+ void subDouble(Address src, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.subsd_mr(src.offset, src.base, dest);
+ }
+
+ void mulDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.mulsd_rr(src, dest);
+ }
+
+ void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ if (op1 == dest)
+ mulDouble(op2, dest);
+ else {
+ moveDouble(op2, dest);
+ mulDouble(op1, dest);
+ }
+ }
+
+ void mulDouble(Address src, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.mulsd_mr(src.offset, src.base, dest);
+ }
+
+ void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.cvtsi2sd_rr(src, dest);
+ }
+
+ void convertInt32ToDouble(Address src, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.cvtsi2sd_mr(src.offset, src.base, dest);
+ }
+
+ Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
+ {
+ ASSERT(isSSE2Present());
+
+ if (cond & DoubleConditionBitInvert)
+ m_assembler.ucomisd_rr(left, right);
+ else
+ m_assembler.ucomisd_rr(right, left);
+
+ if (cond == DoubleEqual) {
+ if (left == right)
+ return Jump(m_assembler.jnp());
+ Jump isUnordered(m_assembler.jp());
+ Jump result = Jump(m_assembler.je());
+ isUnordered.link(this);
+ return result;
+ } else if (cond == DoubleNotEqualOrUnordered) {
+ if (left == right)
+ return Jump(m_assembler.jp());
+ Jump isUnordered(m_assembler.jp());
+ Jump isEqual(m_assembler.je());
+ isUnordered.link(this);
+ Jump result = jump();
+ isEqual.link(this);
+ return result;
+ }
+
+ ASSERT(!(cond & DoubleConditionBitSpecial));
+ return Jump(m_assembler.jCC(static_cast<X86Assembler::Condition>(cond & ~DoubleConditionBits)));
+ }
+
+ // Truncates 'src' to an integer, and places the resulting 'dest'.
+ // If the result is not representable as a 32 bit value, branch.
+ // May also branch for some values that are representable in 32 bits
+ // (specifically, in this case, INT_MIN).
+ enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
+ Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.cvttsd2si_rr(src, dest);
+ return branch32(branchType ? NotEqual : Equal, dest, TrustedImm32(0x80000000));
+ }
+
+ void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.cvttsd2si_rr(src, dest);
+ }
+
+#if CPU(X86_64)
+ void truncateDoubleToUint32(FPRegisterID src, RegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.cvttsd2siq_rr(src, dest);
+ }
+#endif
+
+ // Convert 'src' to an integer, and places the resulting 'dest'.
+ // If the result is not representable as a 32 bit value, branch.
+ // May also branch for some values that are representable in 32 bits
+ // (specifically, in this case, 0).
+ void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp, bool negZeroCheck = true)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.cvttsd2si_rr(src, dest);
+
+ // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
+#if CPU(X86_64)
+ if (negZeroCheck) {
+ Jump valueIsNonZero = branchTest32(NonZero, dest);
+ m_assembler.movmskpd_rr(src, scratchRegister);
+ failureCases.append(branchTest32(NonZero, scratchRegister, TrustedImm32(1)));
+ valueIsNonZero.link(this);
+ }
+#else
+ if (negZeroCheck)
+ failureCases.append(branchTest32(Zero, dest));
+#endif
+
+ // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
+ convertInt32ToDouble(dest, fpTemp);
+ m_assembler.ucomisd_rr(fpTemp, src);
+ failureCases.append(m_assembler.jp());
+ failureCases.append(m_assembler.jne());
+ }
+
+ Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.xorpd_rr(scratch, scratch);
+ return branchDouble(DoubleNotEqual, reg, scratch);
+ }
+
+ Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID scratch)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.xorpd_rr(scratch, scratch);
+ return branchDouble(DoubleEqualOrUnordered, reg, scratch);
+ }
+
+ void lshiftPacked(TrustedImm32 imm, XMMRegisterID reg)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.psllq_i8r(imm.m_value, reg);
+ }
+
+ void rshiftPacked(TrustedImm32 imm, XMMRegisterID reg)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.psrlq_i8r(imm.m_value, reg);
+ }
+
+ void orPacked(XMMRegisterID src, XMMRegisterID dst)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.por_rr(src, dst);
+ }
+
+ void moveInt32ToPacked(RegisterID src, XMMRegisterID dst)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.movd_rr(src, dst);
+ }
+
+ void movePackedToInt32(XMMRegisterID src, RegisterID dst)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.movd_rr(src, dst);
+ }
+
+ // Stack manipulation operations:
+ //
+ // The ABI is assumed to provide a stack abstraction to memory,
+ // containing machine word sized units of data. Push and pop
+ // operations add and remove a single register sized unit of data
+ // to or from the stack. Peek and poke operations read or write
+ // values on the stack, without moving the current stack position.
+
+ void pop(RegisterID dest)
+ {
+ m_assembler.pop_r(dest);
+ }
+
+ void push(RegisterID src)
+ {
+ m_assembler.push_r(src);
+ }
+
+ void push(Address address)
+ {
+ m_assembler.push_m(address.offset, address.base);
+ }
+
+ void push(TrustedImm32 imm)
+ {
+ m_assembler.push_i32(imm.m_value);
+ }
+
+
+ // Register move operations:
+ //
+ // Move values in registers.
+
+ void move(TrustedImm32 imm, RegisterID dest)
+ {
+ // Note: on 64-bit the TrustedImm32 value is zero extended into the register, it
+ // may be useful to have a separate version that sign extends the value?
+ if (!imm.m_value)
+ m_assembler.xorl_rr(dest, dest);
+ else
+ m_assembler.movl_i32r(imm.m_value, dest);
+ }
+
+#if CPU(X86_64)
+ void move(RegisterID src, RegisterID dest)
+ {
+ // Note: on 64-bit this is is a full register move; perhaps it would be
+ // useful to have separate move32 & movePtr, with move32 zero extending?
+ if (src != dest)
+ m_assembler.movq_rr(src, dest);
+ }
+
+ void move(TrustedImmPtr imm, RegisterID dest)
+ {
+ m_assembler.movq_i64r(imm.asIntptr(), dest);
+ }
+
+ void move(TrustedImm64 imm, RegisterID dest)
+ {
+ m_assembler.movq_i64r(imm.m_value, dest);
+ }
+
+ void swap(RegisterID reg1, RegisterID reg2)
+ {
+ if (reg1 != reg2)
+ m_assembler.xchgq_rr(reg1, reg2);
+ }
+
+ void signExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ m_assembler.movsxd_rr(src, dest);
+ }
+
+ void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ m_assembler.movl_rr(src, dest);
+ }
+#else
+ void move(RegisterID src, RegisterID dest)
+ {
+ if (src != dest)
+ m_assembler.movl_rr(src, dest);
+ }
+
+ void move(TrustedImmPtr imm, RegisterID dest)
+ {
+ m_assembler.movl_i32r(imm.asIntptr(), dest);
+ }
+
+ void swap(RegisterID reg1, RegisterID reg2)
+ {
+ if (reg1 != reg2)
+ m_assembler.xchgl_rr(reg1, reg2);
+ }
+
+ void signExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ move(src, dest);
+ }
+
+ void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ move(src, dest);
+ }
+#endif
+
+
+ // Forwards / external control flow operations:
+ //
+ // This set of jump and conditional branch operations return a Jump
+ // object which may linked at a later point, allow forwards jump,
+ // or jumps that will require external linkage (after the code has been
+ // relocated).
+ //
+ // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
+ // respecitvely, for unsigned comparisons the names b, a, be, and ae are
+ // used (representing the names 'below' and 'above').
+ //
+ // Operands to the comparision are provided in the expected order, e.g.
+ // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
+ // treated as a signed 32bit value, is less than or equal to 5.
+ //
+ // jz and jnz test whether the first operand is equal to zero, and take
+ // an optional second operand of a mask under which to perform the test.
+
+public:
+ Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
+ {
+ m_assembler.cmpb_im(right.m_value, left.offset, left.base);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
+ {
+ m_assembler.cmpl_rr(right, left);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
+ {
+ if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
+ m_assembler.testl_rr(left, left);
+ else
+ m_assembler.cmpl_ir(right.m_value, left);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branch32(RelationalCondition cond, RegisterID left, Address right)
+ {
+ m_assembler.cmpl_mr(right.offset, right.base, left);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branch32(RelationalCondition cond, Address left, RegisterID right)
+ {
+ m_assembler.cmpl_rm(right, left.offset, left.base);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
+ {
+ m_assembler.cmpl_im(right.m_value, left.offset, left.base);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+ {
+ m_assembler.cmpl_im(right.m_value, left.offset, left.base, left.index, left.scale);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+ {
+ return branch32(cond, left, right);
+ }
+
+ Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
+ {
+ m_assembler.testl_rr(reg, mask);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ void test32(ResultCondition, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ if (mask.m_value == -1)
+ m_assembler.testl_rr(reg, reg);
+ else if (!(mask.m_value & ~0xff) && reg < X86Registers::esp) { // Using esp and greater as a byte register yields the upper half of the 16 bit registers ax, cx, dx and bx, e.g. esp, register 4, is actually ah.
+ if (mask.m_value == 0xff)
+ m_assembler.testb_rr(reg, reg);
+ else
+ m_assembler.testb_i8r(mask.m_value, reg);
+ } else
+ m_assembler.testl_i32r(mask.m_value, reg);
+ }
+
+ Jump branch(ResultCondition cond)
+ {
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ test32(cond, reg, mask);
+ return branch(cond);
+ }
+
+ Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ generateTest32(address, mask);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ if (mask.m_value == -1)
+ m_assembler.cmpl_im(0, address.offset, address.base, address.index, address.scale);
+ else
+ m_assembler.testl_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values.
+ ASSERT(mask.m_value >= -128 && mask.m_value <= 255);
+ if (mask.m_value == -1)
+ m_assembler.cmpb_im(0, address.offset, address.base);
+ else
+ m_assembler.testb_im(mask.m_value, address.offset, address.base);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values.
+ ASSERT(mask.m_value >= -128 && mask.m_value <= 255);
+ if (mask.m_value == -1)
+ m_assembler.cmpb_im(0, address.offset, address.base, address.index, address.scale);
+ else
+ m_assembler.testb_im(mask.m_value, address.offset, address.base, address.index, address.scale);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+ {
+ ASSERT(!(right.m_value & 0xFFFFFF00));
+
+ m_assembler.cmpb_im(right.m_value, left.offset, left.base, left.index, left.scale);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump jump()
+ {
+ return Jump(m_assembler.jmp());
+ }
+
+ void jump(RegisterID target)
+ {
+ m_assembler.jmp_r(target);
+ }
+
+ // Address is a memory location containing the address to jump to
+ void jump(Address address)
+ {
+ m_assembler.jmp_m(address.offset, address.base);
+ }
+
+
+ // Arithmetic control flow operations:
+ //
+ // This set of conditional branch operations branch based
+ // on the result of an arithmetic operation. The operation
+ // is performed as normal, storing the result.
+ //
+ // * jz operations branch if the result is zero.
+ // * jo operations branch if the (signed) arithmetic
+ // operation caused an overflow to occur.
+
+ Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ add32(src, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ add32(imm, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchAdd32(ResultCondition cond, TrustedImm32 src, Address dest)
+ {
+ add32(src, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchAdd32(ResultCondition cond, RegisterID src, Address dest)
+ {
+ add32(src, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchAdd32(ResultCondition cond, Address src, RegisterID dest)
+ {
+ add32(src, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchAdd32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
+ {
+ if (src1 == dest)
+ return branchAdd32(cond, src2, dest);
+ move(src2, dest);
+ return branchAdd32(cond, src1, dest);
+ }
+
+ Jump branchAdd32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ move(src, dest);
+ return branchAdd32(cond, imm, dest);
+ }
+
+ Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ mul32(src, dest);
+ if (cond != Overflow)
+ m_assembler.testl_rr(dest, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchMul32(ResultCondition cond, Address src, RegisterID dest)
+ {
+ mul32(src, dest);
+ if (cond != Overflow)
+ m_assembler.testl_rr(dest, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ mul32(imm, src, dest);
+ if (cond != Overflow)
+ m_assembler.testl_rr(dest, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
+ {
+ if (src1 == dest)
+ return branchMul32(cond, src2, dest);
+ move(src2, dest);
+ return branchMul32(cond, src1, dest);
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ sub32(src, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ sub32(imm, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchSub32(ResultCondition cond, TrustedImm32 imm, Address dest)
+ {
+ sub32(imm, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID src, Address dest)
+ {
+ sub32(src, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchSub32(ResultCondition cond, Address src, RegisterID dest)
+ {
+ sub32(src, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
+ {
+ // B := A - B is invalid.
+ ASSERT(src1 == dest || src2 != dest);
+
+ move(src1, dest);
+ return branchSub32(cond, src2, dest);
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest)
+ {
+ move(src1, dest);
+ return branchSub32(cond, src2, dest);
+ }
+
+ Jump branchNeg32(ResultCondition cond, RegisterID srcDest)
+ {
+ neg32(srcDest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ or32(src, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+
+ // Miscellaneous operations:
+
+ void breakpoint()
+ {
+ m_assembler.int3();
+ }
+
+ Call nearCall()
+ {
+ return Call(m_assembler.call(), Call::LinkableNear);
+ }
+
+ Call call(RegisterID target)
+ {
+ return Call(m_assembler.call(target), Call::None);
+ }
+
+ void call(Address address)
+ {
+ m_assembler.call_m(address.offset, address.base);
+ }
+
+ void ret()
+ {
+ m_assembler.ret();
+ }
+
+ void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
+ {
+ m_assembler.cmpb_im(right.m_value, left.offset, left.base);
+ set32(x86Condition(cond), dest);
+ }
+
+ void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
+ {
+ m_assembler.cmpl_rr(right, left);
+ set32(x86Condition(cond), dest);
+ }
+
+ void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
+ {
+ if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
+ m_assembler.testl_rr(left, left);
+ else
+ m_assembler.cmpl_ir(right.m_value, left);
+ set32(x86Condition(cond), dest);
+ }
+
+ // FIXME:
+ // The mask should be optional... perhaps the argument order should be
+ // dest-src, operations always have a dest? ... possibly not true, considering
+ // asm ops like test, or pseudo ops like pop().
+
+ void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
+ {
+ if (mask.m_value == -1)
+ m_assembler.cmpb_im(0, address.offset, address.base);
+ else
+ m_assembler.testb_im(mask.m_value, address.offset, address.base);
+ set32(x86Condition(cond), dest);
+ }
+
+ void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
+ {
+ generateTest32(address, mask);
+ set32(x86Condition(cond), dest);
+ }
+
+ // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
+ static RelationalCondition invert(RelationalCondition cond)
+ {
+ return static_cast<RelationalCondition>(cond ^ 1);
+ }
+
+ void nop()
+ {
+ m_assembler.nop();
+ }
+
+ void memoryFence()
+ {
+ m_assembler.mfence();
+ }
+
+ static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
+ {
+ X86Assembler::replaceWithJump(instructionStart.executableAddress(), destination.executableAddress());
+ }
+
+ static ptrdiff_t maxJumpReplacementSize()
+ {
+ return X86Assembler::maxJumpReplacementSize();
+ }
+
+#if ENABLE(MASM_PROBE)
+ // Methods required by the MASM_PROBE mechanism as defined in
+ // AbstractMacroAssembler.h.
+ static void printCPURegisters(CPUState&, int indentation = 0);
+ static void printRegister(CPUState&, RegisterID);
+ static void printRegister(CPUState&, FPRegisterID);
+ void probe(ProbeFunction, void* arg1 = 0, void* arg2 = 0);
+#endif // ENABLE(MASM_PROBE)
+
+protected:
+ X86Assembler::Condition x86Condition(RelationalCondition cond)
+ {
+ return static_cast<X86Assembler::Condition>(cond);
+ }
+
+ X86Assembler::Condition x86Condition(ResultCondition cond)
+ {
+ return static_cast<X86Assembler::Condition>(cond);
+ }
+
+ void set32(X86Assembler::Condition cond, RegisterID dest)
+ {
+#if CPU(X86)
+ // On 32-bit x86 we can only set the first 4 registers;
+ // esp..edi are mapped to the 'h' registers!
+ if (dest >= 4) {
+ m_assembler.xchgl_rr(dest, X86Registers::eax);
+ m_assembler.setCC_r(cond, X86Registers::eax);
+ m_assembler.movzbl_rr(X86Registers::eax, X86Registers::eax);
+ m_assembler.xchgl_rr(dest, X86Registers::eax);
+ return;
+ }
+#endif
+ m_assembler.setCC_r(cond, dest);
+ m_assembler.movzbl_rr(dest, dest);
+ }
+
+private:
+ // Only MacroAssemblerX86 should be using the following method; SSE2 is always available on
+ // x86_64, and clients & subclasses of MacroAssembler should be using 'supportsFloatingPoint()'.
+ friend class MacroAssemblerX86;
+
+ ALWAYS_INLINE void generateTest32(Address address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ if (mask.m_value == -1)
+ m_assembler.cmpl_im(0, address.offset, address.base);
+ else if (!(mask.m_value & ~0xff))
+ m_assembler.testb_im(mask.m_value, address.offset, address.base);
+ else if (!(mask.m_value & ~0xff00))
+ m_assembler.testb_im(mask.m_value >> 8, address.offset + 1, address.base);
+ else if (!(mask.m_value & ~0xff0000))
+ m_assembler.testb_im(mask.m_value >> 16, address.offset + 2, address.base);
+ else if (!(mask.m_value & ~0xff000000))
+ m_assembler.testb_im(mask.m_value >> 24, address.offset + 3, address.base);
+ else
+ m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
+ }
+
+#if CPU(X86)
+#if OS(MAC_OS_X)
+
+ // All X86 Macs are guaranteed to support at least SSE2,
+ static bool isSSE2Present()
+ {
+ return true;
+ }
+
+#else // OS(MAC_OS_X)
+
+ enum SSE2CheckState {
+ NotCheckedSSE2,
+ HasSSE2,
+ NoSSE2
+ };
+
+ static bool isSSE2Present()
+ {
+ if (s_sse2CheckState == NotCheckedSSE2) {
+ // Default the flags value to zero; if the compiler is
+ // not MSVC or GCC we will read this as SSE2 not present.
+ int flags = 0;
+#if COMPILER(MSVC)
+ _asm {
+ mov eax, 1 // cpuid function 1 gives us the standard feature set
+ cpuid;
+ mov flags, edx;
+ }
+#elif COMPILER(GCC_OR_CLANG)
+ asm (
+ "movl $0x1, %%eax;"
+ "pushl %%ebx;"
+ "cpuid;"
+ "popl %%ebx;"
+ "movl %%edx, %0;"
+ : "=g" (flags)
+ :
+ : "%eax", "%ecx", "%edx"
+ );
+#endif
+ static const int SSE2FeatureBit = 1 << 26;
+ s_sse2CheckState = (flags & SSE2FeatureBit) ? HasSSE2 : NoSSE2;
+ }
+ // Only check once.
+ ASSERT(s_sse2CheckState != NotCheckedSSE2);
+
+ return s_sse2CheckState == HasSSE2;
+ }
+
+ static SSE2CheckState s_sse2CheckState;
+
+#endif // OS(MAC_OS_X)
+#elif !defined(NDEBUG) // CPU(X86)
+
+ // On x86-64 we should never be checking for SSE2 in a non-debug build,
+ // but non debug add this method to keep the asserts above happy.
+ static bool isSSE2Present()
+ {
+ return true;
+ }
+
+#endif
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // MacroAssemblerX86Common_h
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerX86_64.h b/Source/JavaScriptCore/assembler/MacroAssemblerX86_64.h
new file mode 100644
index 000000000..cee55f92e
--- /dev/null
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerX86_64.h
@@ -0,0 +1,884 @@
+/*
+ * Copyright (C) 2008, 2012, 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MacroAssemblerX86_64_h
+#define MacroAssemblerX86_64_h
+
+#if ENABLE(ASSEMBLER) && CPU(X86_64)
+
+#include "MacroAssemblerX86Common.h"
+
+#define REPTACH_OFFSET_CALL_R11 3
+
+inline bool CAN_SIGN_EXTEND_32_64(int64_t value) { return value == (int64_t)(int32_t)value; }
+
+namespace JSC {
+
+class MacroAssemblerX86_64 : public MacroAssemblerX86Common {
+public:
+ static const Scale ScalePtr = TimesEight;
+
+ using MacroAssemblerX86Common::add32;
+ using MacroAssemblerX86Common::and32;
+ using MacroAssemblerX86Common::branchAdd32;
+ using MacroAssemblerX86Common::or32;
+ using MacroAssemblerX86Common::sub32;
+ using MacroAssemblerX86Common::load8;
+ using MacroAssemblerX86Common::load32;
+ using MacroAssemblerX86Common::store32;
+ using MacroAssemblerX86Common::store8;
+ using MacroAssemblerX86Common::call;
+ using MacroAssemblerX86Common::jump;
+ using MacroAssemblerX86Common::addDouble;
+ using MacroAssemblerX86Common::loadDouble;
+ using MacroAssemblerX86Common::convertInt32ToDouble;
+
+ void add32(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ move(TrustedImmPtr(address.m_ptr), scratchRegister);
+ add32(imm, Address(scratchRegister));
+ }
+
+ void and32(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ move(TrustedImmPtr(address.m_ptr), scratchRegister);
+ and32(imm, Address(scratchRegister));
+ }
+
+ void add32(AbsoluteAddress address, RegisterID dest)
+ {
+ move(TrustedImmPtr(address.m_ptr), scratchRegister);
+ add32(Address(scratchRegister), dest);
+ }
+
+ void or32(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ move(TrustedImmPtr(address.m_ptr), scratchRegister);
+ or32(imm, Address(scratchRegister));
+ }
+
+ void or32(RegisterID reg, AbsoluteAddress address)
+ {
+ move(TrustedImmPtr(address.m_ptr), scratchRegister);
+ or32(reg, Address(scratchRegister));
+ }
+
+ void sub32(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ move(TrustedImmPtr(address.m_ptr), scratchRegister);
+ sub32(imm, Address(scratchRegister));
+ }
+
+ void load8(const void* address, RegisterID dest)
+ {
+ move(TrustedImmPtr(address), dest);
+ load8(dest, dest);
+ }
+
+ void load32(const void* address, RegisterID dest)
+ {
+ if (dest == X86Registers::eax)
+ m_assembler.movl_mEAX(address);
+ else {
+ move(TrustedImmPtr(address), dest);
+ load32(dest, dest);
+ }
+ }
+
+ void addDouble(AbsoluteAddress address, FPRegisterID dest)
+ {
+ move(TrustedImmPtr(address.m_ptr), scratchRegister);
+ m_assembler.addsd_mr(0, scratchRegister, dest);
+ }
+
+ void convertInt32ToDouble(TrustedImm32 imm, FPRegisterID dest)
+ {
+ move(imm, scratchRegister);
+ m_assembler.cvtsi2sd_rr(scratchRegister, dest);
+ }
+
+ void store32(TrustedImm32 imm, void* address)
+ {
+ move(TrustedImmPtr(address), scratchRegister);
+ store32(imm, scratchRegister);
+ }
+
+ void store32(RegisterID source, void* address)
+ {
+ if (source == X86Registers::eax)
+ m_assembler.movl_EAXm(address);
+ else {
+ move(TrustedImmPtr(address), scratchRegister);
+ store32(source, scratchRegister);
+ }
+ }
+
+ void store8(TrustedImm32 imm, void* address)
+ {
+ move(TrustedImmPtr(address), scratchRegister);
+ store8(imm, Address(scratchRegister));
+ }
+
+ void store8(RegisterID reg, void* address)
+ {
+ move(TrustedImmPtr(address), scratchRegister);
+ store8(reg, Address(scratchRegister));
+ }
+
+#if OS(WINDOWS)
+ Call callWithSlowPathReturnType()
+ {
+ // On Win64, when the return type is larger than 8 bytes, we need to allocate space on the stack for the return value.
+ // On entry, rcx should contain a pointer to this stack space. The other parameters are shifted to the right,
+ // rdx should contain the first argument, r8 should contain the second argument, and r9 should contain the third argument.
+ // On return, rax contains a pointer to this stack value. See http://msdn.microsoft.com/en-us/library/7572ztz4.aspx.
+ // We then need to copy the 16 byte return value into rax and rdx, since JIT expects the return value to be split between the two.
+ // It is assumed that the parameters are already shifted to the right, when entering this method.
+ // Note: this implementation supports up to 3 parameters.
+
+ // JIT relies on the CallerFrame (frame pointer) being put on the stack,
+ // On Win64 we need to manually copy the frame pointer to the stack, since MSVC may not maintain a frame pointer on 64-bit.
+ // See http://msdn.microsoft.com/en-us/library/9z1stfyw.aspx where it's stated that rbp MAY be used as a frame pointer.
+ store64(X86Registers::ebp, Address(X86Registers::esp, -16));
+
+ // We also need to allocate the shadow space on the stack for the 4 parameter registers.
+ // In addition, we need to allocate 16 bytes for the return value.
+ // Also, we should allocate 16 bytes for the frame pointer, and return address (not populated).
+ sub64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp);
+
+ // The first parameter register should contain a pointer to the stack allocated space for the return value.
+ move(X86Registers::esp, X86Registers::ecx);
+ add64(TrustedImm32(4 * sizeof(int64_t)), X86Registers::ecx);
+
+ DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister);
+ Call result = Call(m_assembler.call(scratchRegister), Call::Linkable);
+
+ add64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp);
+
+ // Copy the return value into rax and rdx.
+ load64(Address(X86Registers::eax, sizeof(int64_t)), X86Registers::edx);
+ load64(Address(X86Registers::eax), X86Registers::eax);
+
+ ASSERT_UNUSED(label, differenceBetween(label, result) == REPTACH_OFFSET_CALL_R11);
+ return result;
+ }
+#endif
+
+ Call call()
+ {
+#if OS(WINDOWS)
+ // JIT relies on the CallerFrame (frame pointer) being put on the stack,
+ // On Win64 we need to manually copy the frame pointer to the stack, since MSVC may not maintain a frame pointer on 64-bit.
+ // See http://msdn.microsoft.com/en-us/library/9z1stfyw.aspx where it's stated that rbp MAY be used as a frame pointer.
+ store64(X86Registers::ebp, Address(X86Registers::esp, -16));
+
+ // On Windows we need to copy the arguments that don't fit in registers to the stack location where the callee expects to find them.
+ // We don't know the number of arguments at this point, so the arguments (5, 6, ...) should always be copied.
+
+ // Copy argument 5
+ load64(Address(X86Registers::esp, 4 * sizeof(int64_t)), scratchRegister);
+ store64(scratchRegister, Address(X86Registers::esp, -4 * static_cast<int32_t>(sizeof(int64_t))));
+
+ // Copy argument 6
+ load64(Address(X86Registers::esp, 5 * sizeof(int64_t)), scratchRegister);
+ store64(scratchRegister, Address(X86Registers::esp, -3 * static_cast<int32_t>(sizeof(int64_t))));
+
+ // We also need to allocate the shadow space on the stack for the 4 parameter registers.
+ // Also, we should allocate 16 bytes for the frame pointer, and return address (not populated).
+ // In addition, we need to allocate 16 bytes for two more parameters, since the call can have up to 6 parameters.
+ sub64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp);
+#endif
+ DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister);
+ Call result = Call(m_assembler.call(scratchRegister), Call::Linkable);
+#if OS(WINDOWS)
+ add64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp);
+#endif
+ ASSERT_UNUSED(label, differenceBetween(label, result) == REPTACH_OFFSET_CALL_R11);
+ return result;
+ }
+
+ // Address is a memory location containing the address to jump to
+ void jump(AbsoluteAddress address)
+ {
+ move(TrustedImmPtr(address.m_ptr), scratchRegister);
+ jump(Address(scratchRegister));
+ }
+
+ Call tailRecursiveCall()
+ {
+ DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister);
+ Jump newJump = Jump(m_assembler.jmp_r(scratchRegister));
+ ASSERT_UNUSED(label, differenceBetween(label, newJump) == REPTACH_OFFSET_CALL_R11);
+ return Call::fromTailJump(newJump);
+ }
+
+ Call makeTailRecursiveCall(Jump oldJump)
+ {
+ oldJump.link(this);
+ DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister);
+ Jump newJump = Jump(m_assembler.jmp_r(scratchRegister));
+ ASSERT_UNUSED(label, differenceBetween(label, newJump) == REPTACH_OFFSET_CALL_R11);
+ return Call::fromTailJump(newJump);
+ }
+
+ Jump branchAdd32(ResultCondition cond, TrustedImm32 src, AbsoluteAddress dest)
+ {
+ move(TrustedImmPtr(dest.m_ptr), scratchRegister);
+ add32(src, Address(scratchRegister));
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ void add64(RegisterID src, RegisterID dest)
+ {
+ m_assembler.addq_rr(src, dest);
+ }
+
+ void add64(Address src, RegisterID dest)
+ {
+ m_assembler.addq_mr(src.offset, src.base, dest);
+ }
+
+ void add64(AbsoluteAddress src, RegisterID dest)
+ {
+ move(TrustedImmPtr(src.m_ptr), scratchRegister);
+ add64(Address(scratchRegister), dest);
+ }
+
+ void add64(TrustedImm32 imm, RegisterID srcDest)
+ {
+ if (imm.m_value == 1)
+ m_assembler.incq_r(srcDest);
+ else
+ m_assembler.addq_ir(imm.m_value, srcDest);
+ }
+
+ void add64(TrustedImm64 imm, RegisterID dest)
+ {
+ if (imm.m_value == 1)
+ m_assembler.incq_r(dest);
+ else {
+ move(imm, scratchRegister);
+ add64(scratchRegister, dest);
+ }
+ }
+
+ void add64(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ m_assembler.leaq_mr(imm.m_value, src, dest);
+ }
+
+ void add64(TrustedImm32 imm, Address address)
+ {
+ if (imm.m_value == 1)
+ m_assembler.incq_m(address.offset, address.base);
+ else
+ m_assembler.addq_im(imm.m_value, address.offset, address.base);
+ }
+
+ void add64(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ move(TrustedImmPtr(address.m_ptr), scratchRegister);
+ add64(imm, Address(scratchRegister));
+ }
+
+ void addPtrNoFlags(TrustedImm32 imm, RegisterID srcDest)
+ {
+ m_assembler.leaq_mr(imm.m_value, srcDest, srcDest);
+ }
+
+ void and64(RegisterID src, RegisterID dest)
+ {
+ m_assembler.andq_rr(src, dest);
+ }
+
+ void and64(TrustedImm32 imm, RegisterID srcDest)
+ {
+ m_assembler.andq_ir(imm.m_value, srcDest);
+ }
+
+ void and64(TrustedImmPtr imm, RegisterID srcDest)
+ {
+ move(imm, scratchRegister);
+ and64(scratchRegister, srcDest);
+ }
+
+ void lshift64(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.shlq_i8r(imm.m_value, dest);
+ }
+
+ void rshift64(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.sarq_i8r(imm.m_value, dest);
+ }
+
+ void urshift64(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.shrq_i8r(imm.m_value, dest);
+ }
+
+ void mul64(RegisterID src, RegisterID dest)
+ {
+ m_assembler.imulq_rr(src, dest);
+ }
+
+ void neg64(RegisterID dest)
+ {
+ m_assembler.negq_r(dest);
+ }
+
+ void or64(RegisterID src, RegisterID dest)
+ {
+ m_assembler.orq_rr(src, dest);
+ }
+
+ void or64(TrustedImm64 imm, RegisterID dest)
+ {
+ move(imm, scratchRegister);
+ or64(scratchRegister, dest);
+ }
+
+ void or64(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.orq_ir(imm.m_value, dest);
+ }
+
+ void or64(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ if (op1 == op2)
+ move(op1, dest);
+ else if (op1 == dest)
+ or64(op2, dest);
+ else {
+ move(op2, dest);
+ or64(op1, dest);
+ }
+ }
+
+ void or64(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ move(src, dest);
+ or64(imm, dest);
+ }
+
+ void rotateRight64(TrustedImm32 imm, RegisterID srcDst)
+ {
+ m_assembler.rorq_i8r(imm.m_value, srcDst);
+ }
+
+ void sub64(RegisterID src, RegisterID dest)
+ {
+ m_assembler.subq_rr(src, dest);
+ }
+
+ void sub64(TrustedImm32 imm, RegisterID dest)
+ {
+ if (imm.m_value == 1)
+ m_assembler.decq_r(dest);
+ else
+ m_assembler.subq_ir(imm.m_value, dest);
+ }
+
+ void sub64(TrustedImm64 imm, RegisterID dest)
+ {
+ if (imm.m_value == 1)
+ m_assembler.decq_r(dest);
+ else {
+ move(imm, scratchRegister);
+ sub64(scratchRegister, dest);
+ }
+ }
+
+ void xor64(RegisterID src, RegisterID dest)
+ {
+ m_assembler.xorq_rr(src, dest);
+ }
+
+ void xor64(RegisterID src, Address dest)
+ {
+ m_assembler.xorq_rm(src, dest.offset, dest.base);
+ }
+
+ void xor64(TrustedImm32 imm, RegisterID srcDest)
+ {
+ m_assembler.xorq_ir(imm.m_value, srcDest);
+ }
+
+ void load64(ImplicitAddress address, RegisterID dest)
+ {
+ m_assembler.movq_mr(address.offset, address.base, dest);
+ }
+
+ void load64(BaseIndex address, RegisterID dest)
+ {
+ m_assembler.movq_mr(address.offset, address.base, address.index, address.scale, dest);
+ }
+
+ void load64(const void* address, RegisterID dest)
+ {
+ if (dest == X86Registers::eax)
+ m_assembler.movq_mEAX(address);
+ else {
+ move(TrustedImmPtr(address), dest);
+ load64(dest, dest);
+ }
+ }
+
+ DataLabel32 load64WithAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ padBeforePatch();
+ m_assembler.movq_mr_disp32(address.offset, address.base, dest);
+ return DataLabel32(this);
+ }
+
+ DataLabelCompact load64WithCompactAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ padBeforePatch();
+ m_assembler.movq_mr_disp8(address.offset, address.base, dest);
+ return DataLabelCompact(this);
+ }
+
+ void store64(RegisterID src, ImplicitAddress address)
+ {
+ m_assembler.movq_rm(src, address.offset, address.base);
+ }
+
+ void store64(RegisterID src, BaseIndex address)
+ {
+ m_assembler.movq_rm(src, address.offset, address.base, address.index, address.scale);
+ }
+
+ void store64(RegisterID src, void* address)
+ {
+ if (src == X86Registers::eax)
+ m_assembler.movq_EAXm(address);
+ else {
+ move(TrustedImmPtr(address), scratchRegister);
+ store64(src, scratchRegister);
+ }
+ }
+
+ void store64(TrustedImm64 imm, ImplicitAddress address)
+ {
+ if (CAN_SIGN_EXTEND_32_64(imm.m_value))
+ m_assembler.movq_i32m(static_cast<int>(imm.m_value), address.offset, address.base);
+ else {
+ move(imm, scratchRegister);
+ store64(scratchRegister, address);
+ }
+ }
+
+ void store64(TrustedImm64 imm, BaseIndex address)
+ {
+ move(imm, scratchRegister);
+ m_assembler.movq_rm(scratchRegister, address.offset, address.base, address.index, address.scale);
+ }
+
+ DataLabel32 store64WithAddressOffsetPatch(RegisterID src, Address address)
+ {
+ padBeforePatch();
+ m_assembler.movq_rm_disp32(src, address.offset, address.base);
+ return DataLabel32(this);
+ }
+
+ void move64ToDouble(RegisterID src, FPRegisterID dest)
+ {
+ m_assembler.movq_rr(src, dest);
+ }
+
+ void moveDoubleTo64(FPRegisterID src, RegisterID dest)
+ {
+ m_assembler.movq_rr(src, dest);
+ }
+
+ void compare64(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
+ {
+ if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
+ m_assembler.testq_rr(left, left);
+ else
+ m_assembler.cmpq_ir(right.m_value, left);
+ m_assembler.setCC_r(x86Condition(cond), dest);
+ m_assembler.movzbl_rr(dest, dest);
+ }
+
+ void compare64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
+ {
+ m_assembler.cmpq_rr(right, left);
+ m_assembler.setCC_r(x86Condition(cond), dest);
+ m_assembler.movzbl_rr(dest, dest);
+ }
+
+ Jump branch64(RelationalCondition cond, RegisterID left, RegisterID right)
+ {
+ m_assembler.cmpq_rr(right, left);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branch64(RelationalCondition cond, RegisterID left, TrustedImm64 right)
+ {
+ if (((cond == Equal) || (cond == NotEqual)) && !right.m_value) {
+ m_assembler.testq_rr(left, left);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+ move(right, scratchRegister);
+ return branch64(cond, left, scratchRegister);
+ }
+
+ Jump branch64(RelationalCondition cond, RegisterID left, Address right)
+ {
+ m_assembler.cmpq_mr(right.offset, right.base, left);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branch64(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
+ {
+ move(TrustedImmPtr(left.m_ptr), scratchRegister);
+ return branch64(cond, Address(scratchRegister), right);
+ }
+
+ Jump branch64(RelationalCondition cond, Address left, RegisterID right)
+ {
+ m_assembler.cmpq_rm(right, left.offset, left.base);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branch64(RelationalCondition cond, Address left, TrustedImm64 right)
+ {
+ move(right, scratchRegister);
+ return branch64(cond, left, scratchRegister);
+ }
+
+ Jump branch64(RelationalCondition cond, BaseIndex address, RegisterID right)
+ {
+ m_assembler.cmpq_rm(right, address.offset, address.base, address.index, address.scale);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchPtr(RelationalCondition cond, BaseIndex left, RegisterID right)
+ {
+ return branch64(cond, left, right);
+ }
+
+ Jump branchPtr(RelationalCondition cond, BaseIndex left, TrustedImmPtr right)
+ {
+ move(right, scratchRegister);
+ return branchPtr(cond, left, scratchRegister);
+ }
+
+ Jump branchTest64(ResultCondition cond, RegisterID reg, RegisterID mask)
+ {
+ m_assembler.testq_rr(reg, mask);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchTest64(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ // if we are only interested in the low seven bits, this can be tested with a testb
+ if (mask.m_value == -1)
+ m_assembler.testq_rr(reg, reg);
+ else if ((mask.m_value & ~0x7f) == 0)
+ m_assembler.testb_i8r(mask.m_value, reg);
+ else
+ m_assembler.testq_i32r(mask.m_value, reg);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ void test64(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest)
+ {
+ if (mask.m_value == -1)
+ m_assembler.testq_rr(reg, reg);
+ else if ((mask.m_value & ~0x7f) == 0)
+ m_assembler.testb_i8r(mask.m_value, reg);
+ else
+ m_assembler.testq_i32r(mask.m_value, reg);
+ set32(x86Condition(cond), dest);
+ }
+
+ void test64(ResultCondition cond, RegisterID reg, RegisterID mask, RegisterID dest)
+ {
+ m_assembler.testq_rr(reg, mask);
+ set32(x86Condition(cond), dest);
+ }
+
+ Jump branchTest64(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ load64(address.m_ptr, scratchRegister);
+ return branchTest64(cond, scratchRegister, mask);
+ }
+
+ Jump branchTest64(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ if (mask.m_value == -1)
+ m_assembler.cmpq_im(0, address.offset, address.base);
+ else
+ m_assembler.testq_i32m(mask.m_value, address.offset, address.base);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchTest64(ResultCondition cond, Address address, RegisterID reg)
+ {
+ m_assembler.testq_rm(reg, address.offset, address.base);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchTest64(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ if (mask.m_value == -1)
+ m_assembler.cmpq_im(0, address.offset, address.base, address.index, address.scale);
+ else
+ m_assembler.testq_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+
+ Jump branchAdd64(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ add64(imm, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchAdd64(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ add64(src, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchMul64(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ mul64(src, dest);
+ if (cond != Overflow)
+ m_assembler.testq_rr(dest, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchSub64(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ sub64(imm, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchSub64(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ sub64(src, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchSub64(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest)
+ {
+ move(src1, dest);
+ return branchSub64(cond, src2, dest);
+ }
+
+ Jump branchNeg64(ResultCondition cond, RegisterID srcDest)
+ {
+ neg64(srcDest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ void abortWithReason(AbortReason reason)
+ {
+ move(TrustedImm32(reason), X86Registers::r11);
+ breakpoint();
+ }
+
+ void abortWithReason(AbortReason reason, intptr_t misc)
+ {
+ move(TrustedImm64(misc), X86Registers::r10);
+ abortWithReason(reason);
+ }
+
+ ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
+ {
+ ConvertibleLoadLabel result = ConvertibleLoadLabel(this);
+ m_assembler.movq_mr(address.offset, address.base, dest);
+ return result;
+ }
+
+ DataLabelPtr moveWithPatch(TrustedImmPtr initialValue, RegisterID dest)
+ {
+ padBeforePatch();
+ m_assembler.movq_i64r(initialValue.asIntptr(), dest);
+ return DataLabelPtr(this);
+ }
+
+ DataLabelPtr moveWithPatch(TrustedImm32 initialValue, RegisterID dest)
+ {
+ padBeforePatch();
+ m_assembler.movq_i64r(initialValue.m_value, dest);
+ return DataLabelPtr(this);
+ }
+
+ Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+ {
+ dataLabel = moveWithPatch(initialRightValue, scratchRegister);
+ return branch64(cond, left, scratchRegister);
+ }
+
+ Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+ {
+ dataLabel = moveWithPatch(initialRightValue, scratchRegister);
+ return branch64(cond, left, scratchRegister);
+ }
+
+ Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
+ {
+ padBeforePatch();
+ m_assembler.movl_i32r(initialRightValue.m_value, scratchRegister);
+ dataLabel = DataLabel32(this);
+ return branch32(cond, left, scratchRegister);
+ }
+
+ DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
+ {
+ DataLabelPtr label = moveWithPatch(initialValue, scratchRegister);
+ store64(scratchRegister, address);
+ return label;
+ }
+
+ PatchableJump patchableBranch64(RelationalCondition cond, RegisterID reg, TrustedImm64 imm)
+ {
+ return PatchableJump(branch64(cond, reg, imm));
+ }
+
+ PatchableJump patchableBranch64(RelationalCondition cond, RegisterID left, RegisterID right)
+ {
+ return PatchableJump(branch64(cond, left, right));
+ }
+
+ using MacroAssemblerX86Common::branch8;
+ Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
+ {
+ MacroAssemblerX86Common::move(TrustedImmPtr(left.m_ptr), scratchRegister);
+ return MacroAssemblerX86Common::branch8(cond, Address(scratchRegister), right);
+ }
+
+ using MacroAssemblerX86Common::branchTest8;
+ Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ TrustedImmPtr addr(reinterpret_cast<void*>(address.offset));
+ MacroAssemblerX86Common::move(addr, scratchRegister);
+ return MacroAssemblerX86Common::branchTest8(cond, BaseIndex(scratchRegister, address.base, TimesOne), mask);
+ }
+
+ Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ MacroAssemblerX86Common::move(TrustedImmPtr(address.m_ptr), scratchRegister);
+ return MacroAssemblerX86Common::branchTest8(cond, Address(scratchRegister), mask);
+ }
+
+ void convertInt64ToDouble(RegisterID src, FPRegisterID dest)
+ {
+ m_assembler.cvtsi2sdq_rr(src, dest);
+ }
+
+ static bool supportsFloatingPoint() { return true; }
+ static bool supportsFloatingPointTruncate() { return true; }
+ static bool supportsFloatingPointSqrt() { return true; }
+ static bool supportsFloatingPointAbs() { return true; }
+
+ static FunctionPtr readCallTarget(CodeLocationCall call)
+ {
+ return FunctionPtr(X86Assembler::readPointer(call.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).dataLocation()));
+ }
+
+ static bool haveScratchRegisterForBlinding() { return true; }
+ static RegisterID scratchRegisterForBlinding() { return scratchRegister; }
+
+ static bool canJumpReplacePatchableBranchPtrWithPatch() { return true; }
+ static bool canJumpReplacePatchableBranch32WithPatch() { return true; }
+
+ static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
+ {
+ const int rexBytes = 1;
+ const int opcodeBytes = 1;
+ const int immediateBytes = 8;
+ const int totalBytes = rexBytes + opcodeBytes + immediateBytes;
+ ASSERT(totalBytes >= maxJumpReplacementSize());
+ return label.labelAtOffset(-totalBytes);
+ }
+
+ static CodeLocationLabel startOfBranch32WithPatchOnRegister(CodeLocationDataLabel32 label)
+ {
+ const int rexBytes = 1;
+ const int opcodeBytes = 1;
+ const int immediateBytes = 4;
+ const int totalBytes = rexBytes + opcodeBytes + immediateBytes;
+ ASSERT(totalBytes >= maxJumpReplacementSize());
+ return label.labelAtOffset(-totalBytes);
+ }
+
+ static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr label)
+ {
+ return startOfBranchPtrWithPatchOnRegister(label);
+ }
+
+ static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32 label)
+ {
+ return startOfBranch32WithPatchOnRegister(label);
+ }
+
+ static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel instructionStart, Address, void* initialValue)
+ {
+ X86Assembler::revertJumpTo_movq_i64r(instructionStart.executableAddress(), reinterpret_cast<intptr_t>(initialValue), scratchRegister);
+ }
+
+ static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel instructionStart, Address, int32_t initialValue)
+ {
+ X86Assembler::revertJumpTo_movl_i32r(instructionStart.executableAddress(), initialValue, scratchRegister);
+ }
+
+ static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID, void* initialValue)
+ {
+ X86Assembler::revertJumpTo_movq_i64r(instructionStart.executableAddress(), reinterpret_cast<intptr_t>(initialValue), scratchRegister);
+ }
+
+private:
+ friend class LinkBuffer;
+ friend class RepatchBuffer;
+
+ static void linkCall(void* code, Call call, FunctionPtr function)
+ {
+ if (!call.isFlagSet(Call::Near))
+ X86Assembler::linkPointer(code, call.m_label.labelAtOffset(-REPTACH_OFFSET_CALL_R11), function.value());
+ else
+ X86Assembler::linkCall(code, call.m_label, function.value());
+ }
+
+ static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
+ {
+ X86Assembler::repatchPointer(call.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).dataLocation(), destination.executableAddress());
+ }
+
+ static void repatchCall(CodeLocationCall call, FunctionPtr destination)
+ {
+ X86Assembler::repatchPointer(call.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).dataLocation(), destination.executableAddress());
+ }
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // MacroAssemblerX86_64_h
diff --git a/Source/JavaScriptCore/assembler/MaxFrameExtentForSlowPathCall.h b/Source/JavaScriptCore/assembler/MaxFrameExtentForSlowPathCall.h
new file mode 100644
index 000000000..39ed6fac5
--- /dev/null
+++ b/Source/JavaScriptCore/assembler/MaxFrameExtentForSlowPathCall.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MaxFrameExtentForSlowPathCall_h
+#define MaxFrameExtentForSlowPathCall_h
+
+#include "JSStack.h"
+#include "Register.h"
+#include "StackAlignment.h"
+#include <wtf/Assertions.h>
+
+namespace JSC {
+
+// The maxFrameExtentForSlowPathCall is the max amount of stack space (in bytes)
+// that can be used for outgoing args when calling a slow path C function
+// from JS code.
+
+#if !ENABLE(JIT)
+static const size_t maxFrameExtentForSlowPathCall = 0;
+
+#elif CPU(X86_64) && OS(WINDOWS)
+// 4 args in registers, but stack space needs to be allocated for all args.
+static const size_t maxFrameExtentForSlowPathCall = 64;
+
+#elif CPU(X86_64)
+// All args in registers.
+static const size_t maxFrameExtentForSlowPathCall = 0;
+
+#elif CPU(X86)
+// 7 args on stack (28 bytes).
+static const size_t maxFrameExtentForSlowPathCall = 40;
+
+#elif CPU(ARM64)
+// All args in registers.
+static const size_t maxFrameExtentForSlowPathCall = 0;
+
+#elif CPU(ARM)
+// First four args in registers, remaining 4 args on stack.
+static const size_t maxFrameExtentForSlowPathCall = 24;
+
+#elif CPU(SH4)
+// First four args in registers, remaining 4 args on stack.
+static const size_t maxFrameExtentForSlowPathCall = 24;
+
+#elif CPU(MIPS)
+// Though args are in registers, there need to be space on the stack for all args.
+static const size_t maxFrameExtentForSlowPathCall = 40;
+
+#else
+#error "Unsupported CPU: need value for maxFrameExtentForSlowPathCall"
+
+#endif
+
+COMPILE_ASSERT(!(maxFrameExtentForSlowPathCall % sizeof(Register)), extent_must_be_in_multiples_of_registers);
+
+#if ENABLE(JIT)
+// Make sure that cfr - maxFrameExtentForSlowPathCall bytes will make the stack pointer aligned
+COMPILE_ASSERT((maxFrameExtentForSlowPathCall % 16) == 16 - sizeof(CallerFrameAndPC), extent_must_align_stack_from_callframe_pointer);
+#endif
+
+static const size_t maxFrameExtentForSlowPathCallInRegisters = maxFrameExtentForSlowPathCall / sizeof(Register);
+
+} // namespace JSC
+
+#endif // MaxFrameExtentForSlowPathCall_h
+
diff --git a/Source/JavaScriptCore/assembler/RepatchBuffer.h b/Source/JavaScriptCore/assembler/RepatchBuffer.h
new file mode 100644
index 000000000..241ce14c7
--- /dev/null
+++ b/Source/JavaScriptCore/assembler/RepatchBuffer.h
@@ -0,0 +1,201 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef RepatchBuffer_h
+#define RepatchBuffer_h
+
+#if ENABLE(JIT)
+
+#include "CodeBlock.h"
+#include <MacroAssembler.h>
+#include <wtf/Noncopyable.h>
+
+namespace JSC {
+
+// RepatchBuffer:
+//
+// This class is used to modify code after code generation has been completed,
+// and after the code has potentially already been executed. This mechanism is
+// used to apply optimizations to the code.
+//
+class RepatchBuffer {
+ typedef MacroAssemblerCodePtr CodePtr;
+
+public:
+ RepatchBuffer(CodeBlock* codeBlock)
+ : m_codeBlock(codeBlock)
+ {
+#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
+ RefPtr<JITCode> code = codeBlock->jitCode();
+ m_start = code->start();
+ m_size = code->size();
+
+ ExecutableAllocator::makeWritable(m_start, m_size);
+#endif
+ }
+
+ ~RepatchBuffer()
+ {
+#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
+ ExecutableAllocator::makeExecutable(m_start, m_size);
+#endif
+ }
+
+ CodeBlock* codeBlock() const { return m_codeBlock; }
+
+ void relink(CodeLocationJump jump, CodeLocationLabel destination)
+ {
+ MacroAssembler::repatchJump(jump, destination);
+ }
+
+ void relink(CodeLocationCall call, CodeLocationLabel destination)
+ {
+ MacroAssembler::repatchCall(call, destination);
+ }
+
+ void relink(CodeLocationCall call, FunctionPtr destination)
+ {
+ MacroAssembler::repatchCall(call, destination);
+ }
+
+ void relink(CodeLocationNearCall nearCall, CodePtr destination)
+ {
+ MacroAssembler::repatchNearCall(nearCall, CodeLocationLabel(destination));
+ }
+
+ void relink(CodeLocationNearCall nearCall, CodeLocationLabel destination)
+ {
+ MacroAssembler::repatchNearCall(nearCall, destination);
+ }
+
+ void repatch(CodeLocationDataLabel32 dataLabel32, int32_t value)
+ {
+ MacroAssembler::repatchInt32(dataLabel32, value);
+ }
+
+ void repatch(CodeLocationDataLabelCompact dataLabelCompact, int32_t value)
+ {
+ MacroAssembler::repatchCompact(dataLabelCompact, value);
+ }
+
+ void repatch(CodeLocationDataLabelPtr dataLabelPtr, void* value)
+ {
+ MacroAssembler::repatchPointer(dataLabelPtr, value);
+ }
+
+ void relinkCallerToTrampoline(ReturnAddressPtr returnAddress, CodeLocationLabel label)
+ {
+ relink(CodeLocationCall(CodePtr(returnAddress)), label);
+ }
+
+ void relinkCallerToTrampoline(ReturnAddressPtr returnAddress, CodePtr newCalleeFunction)
+ {
+ relinkCallerToTrampoline(returnAddress, CodeLocationLabel(newCalleeFunction));
+ }
+
+ void relinkCallerToFunction(ReturnAddressPtr returnAddress, FunctionPtr function)
+ {
+ relink(CodeLocationCall(CodePtr(returnAddress)), function);
+ }
+
+ void relinkNearCallerToTrampoline(ReturnAddressPtr returnAddress, CodeLocationLabel label)
+ {
+ relink(CodeLocationNearCall(CodePtr(returnAddress)), label);
+ }
+
+ void relinkNearCallerToTrampoline(ReturnAddressPtr returnAddress, CodePtr newCalleeFunction)
+ {
+ relinkNearCallerToTrampoline(returnAddress, CodeLocationLabel(newCalleeFunction));
+ }
+
+ void replaceWithLoad(CodeLocationConvertibleLoad label)
+ {
+ MacroAssembler::replaceWithLoad(label);
+ }
+
+ void replaceWithAddressComputation(CodeLocationConvertibleLoad label)
+ {
+ MacroAssembler::replaceWithAddressComputation(label);
+ }
+
+ void setLoadInstructionIsActive(CodeLocationConvertibleLoad label, bool isActive)
+ {
+ if (isActive)
+ replaceWithLoad(label);
+ else
+ replaceWithAddressComputation(label);
+ }
+
+ static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
+ {
+ return MacroAssembler::startOfBranchPtrWithPatchOnRegister(label);
+ }
+
+ static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr label)
+ {
+ return MacroAssembler::startOfPatchableBranchPtrWithPatchOnAddress(label);
+ }
+
+ static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32 label)
+ {
+ return MacroAssembler::startOfPatchableBranch32WithPatchOnAddress(label);
+ }
+
+ void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
+ {
+ MacroAssembler::replaceWithJump(instructionStart, destination);
+ }
+
+ // This is a *bit* of a silly API, since we currently always also repatch the
+ // immediate after calling this. But I'm fine with that, since this just feels
+ // less yucky.
+ void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, MacroAssembler::RegisterID reg, void* value)
+ {
+ MacroAssembler::revertJumpReplacementToBranchPtrWithPatch(instructionStart, reg, value);
+ }
+
+ void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel instructionStart, MacroAssembler::Address address, void* value)
+ {
+ MacroAssembler::revertJumpReplacementToPatchableBranchPtrWithPatch(instructionStart, address, value);
+ }
+
+ void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel instructionStart, MacroAssembler::Address address, int32_t value)
+ {
+ MacroAssembler::revertJumpReplacementToPatchableBranch32WithPatch(instructionStart, address, value);
+ }
+
+private:
+ CodeBlock* m_codeBlock;
+#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
+ void* m_start;
+ size_t m_size;
+#endif
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // RepatchBuffer_h
diff --git a/Source/JavaScriptCore/assembler/SH4Assembler.h b/Source/JavaScriptCore/assembler/SH4Assembler.h
new file mode 100644
index 000000000..d326279c5
--- /dev/null
+++ b/Source/JavaScriptCore/assembler/SH4Assembler.h
@@ -0,0 +1,2225 @@
+/*
+ * Copyright (C) 2013 Cisco Systems, Inc. All rights reserved.
+ * Copyright (C) 2009-2011 STMicroelectronics. All rights reserved.
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef SH4Assembler_h
+#define SH4Assembler_h
+
+#if ENABLE(ASSEMBLER) && CPU(SH4)
+
+#include "AssemblerBuffer.h"
+#include "AssemblerBufferWithConstantPool.h"
+#include "JITCompilationEffort.h"
+#include <limits.h>
+#include <stdarg.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <wtf/Assertions.h>
+#include <wtf/DataLog.h>
+#include <wtf/Vector.h>
+
+#ifndef NDEBUG
+#define SH4_ASSEMBLER_TRACING
+#endif
+
+namespace JSC {
+typedef uint16_t SH4Word;
+
+enum {
+ INVALID_OPCODE = 0xffff,
+ ADD_OPCODE = 0x300c,
+ ADDIMM_OPCODE = 0x7000,
+ ADDC_OPCODE = 0x300e,
+ ADDV_OPCODE = 0x300f,
+ AND_OPCODE = 0x2009,
+ ANDIMM_OPCODE = 0xc900,
+ DIV0_OPCODE = 0x2007,
+ DIV1_OPCODE = 0x3004,
+ BF_OPCODE = 0x8b00,
+ BFS_OPCODE = 0x8f00,
+ BRA_OPCODE = 0xa000,
+ BRAF_OPCODE = 0x0023,
+ NOP_OPCODE = 0x0009,
+ BSR_OPCODE = 0xb000,
+ RTS_OPCODE = 0x000b,
+ BT_OPCODE = 0x8900,
+ BTS_OPCODE = 0x8d00,
+ BSRF_OPCODE = 0x0003,
+ BRK_OPCODE = 0x003b,
+ FTRC_OPCODE = 0xf03d,
+ CMPEQ_OPCODE = 0x3000,
+ CMPEQIMM_OPCODE = 0x8800,
+ CMPGE_OPCODE = 0x3003,
+ CMPGT_OPCODE = 0x3007,
+ CMPHI_OPCODE = 0x3006,
+ CMPHS_OPCODE = 0x3002,
+ CMPPL_OPCODE = 0x4015,
+ CMPPZ_OPCODE = 0x4011,
+ CMPSTR_OPCODE = 0x200c,
+ DT_OPCODE = 0x4010,
+ FCMPEQ_OPCODE = 0xf004,
+ FCMPGT_OPCODE = 0xf005,
+ FMOV_OPCODE = 0xf00c,
+ FADD_OPCODE = 0xf000,
+ FMUL_OPCODE = 0xf002,
+ FSUB_OPCODE = 0xf001,
+ FDIV_OPCODE = 0xf003,
+ FNEG_OPCODE = 0xf04d,
+ JMP_OPCODE = 0x402b,
+ JSR_OPCODE = 0x400b,
+ LDSPR_OPCODE = 0x402a,
+ LDSLPR_OPCODE = 0x4026,
+ MOV_OPCODE = 0x6003,
+ MOVIMM_OPCODE = 0xe000,
+ MOVB_WRITE_RN_OPCODE = 0x2000,
+ MOVB_WRITE_RNDEC_OPCODE = 0x2004,
+ MOVB_WRITE_R0RN_OPCODE = 0x0004,
+ MOVB_WRITE_OFFGBR_OPCODE = 0xc000,
+ MOVB_WRITE_OFFRN_OPCODE = 0x8000,
+ MOVB_READ_RM_OPCODE = 0x6000,
+ MOVB_READ_RMINC_OPCODE = 0x6004,
+ MOVB_READ_R0RM_OPCODE = 0x000c,
+ MOVB_READ_OFFGBR_OPCODE = 0xc400,
+ MOVB_READ_OFFRM_OPCODE = 0x8400,
+ MOVL_WRITE_RN_OPCODE = 0x2002,
+ MOVL_WRITE_RNDEC_OPCODE = 0x2006,
+ MOVL_WRITE_R0RN_OPCODE = 0x0006,
+ MOVL_WRITE_OFFGBR_OPCODE = 0xc200,
+ MOVL_WRITE_OFFRN_OPCODE = 0x1000,
+ MOVL_READ_RM_OPCODE = 0x6002,
+ MOVL_READ_RMINC_OPCODE = 0x6006,
+ MOVL_READ_R0RM_OPCODE = 0x000e,
+ MOVL_READ_OFFGBR_OPCODE = 0xc600,
+ MOVL_READ_OFFPC_OPCODE = 0xd000,
+ MOVL_READ_OFFRM_OPCODE = 0x5000,
+ MOVW_WRITE_RN_OPCODE = 0x2001,
+ MOVW_WRITE_R0RN_OPCODE = 0x0005,
+ MOVW_READ_RM_OPCODE = 0x6001,
+ MOVW_READ_RMINC_OPCODE = 0x6005,
+ MOVW_READ_R0RM_OPCODE = 0x000d,
+ MOVW_READ_OFFRM_OPCODE = 0x8500,
+ MOVW_READ_OFFPC_OPCODE = 0x9000,
+ MOVA_READ_OFFPC_OPCODE = 0xc700,
+ MOVT_OPCODE = 0x0029,
+ MULL_OPCODE = 0x0007,
+ DMULL_L_OPCODE = 0x3005,
+ STSMACL_OPCODE = 0x001a,
+ STSMACH_OPCODE = 0x000a,
+ DMULSL_OPCODE = 0x300d,
+ NEG_OPCODE = 0x600b,
+ NEGC_OPCODE = 0x600a,
+ NOT_OPCODE = 0x6007,
+ OR_OPCODE = 0x200b,
+ ORIMM_OPCODE = 0xcb00,
+ ORBIMM_OPCODE = 0xcf00,
+ SETS_OPCODE = 0x0058,
+ SETT_OPCODE = 0x0018,
+ SHAD_OPCODE = 0x400c,
+ SHAL_OPCODE = 0x4020,
+ SHAR_OPCODE = 0x4021,
+ SHLD_OPCODE = 0x400d,
+ SHLL_OPCODE = 0x4000,
+ SHLL2_OPCODE = 0x4008,
+ SHLL8_OPCODE = 0x4018,
+ SHLL16_OPCODE = 0x4028,
+ SHLR_OPCODE = 0x4001,
+ SHLR2_OPCODE = 0x4009,
+ SHLR8_OPCODE = 0x4019,
+ SHLR16_OPCODE = 0x4029,
+ STSPR_OPCODE = 0x002a,
+ STSLPR_OPCODE = 0x4022,
+ FLOAT_OPCODE = 0xf02d,
+ SUB_OPCODE = 0x3008,
+ SUBC_OPCODE = 0x300a,
+ SUBV_OPCODE = 0x300b,
+ TST_OPCODE = 0x2008,
+ TSTIMM_OPCODE = 0xc800,
+ TSTB_OPCODE = 0xcc00,
+ EXTUB_OPCODE = 0x600c,
+ EXTUW_OPCODE = 0x600d,
+ XOR_OPCODE = 0x200a,
+ XORIMM_OPCODE = 0xca00,
+ XORB_OPCODE = 0xce00,
+ FMOVS_READ_RM_INC_OPCODE = 0xf009,
+ FMOVS_READ_RM_OPCODE = 0xf008,
+ FMOVS_READ_R0RM_OPCODE = 0xf006,
+ FMOVS_WRITE_RN_OPCODE = 0xf00a,
+ FMOVS_WRITE_RN_DEC_OPCODE = 0xf00b,
+ FMOVS_WRITE_R0RN_OPCODE = 0xf007,
+ FCNVDS_DRM_FPUL_OPCODE = 0xf0bd,
+ FCNVSD_FPUL_DRN_OPCODE = 0xf0ad,
+ LDS_RM_FPUL_OPCODE = 0x405a,
+ FLDS_FRM_FPUL_OPCODE = 0xf01d,
+ STS_FPUL_RN_OPCODE = 0x005a,
+ FSTS_FPUL_FRN_OPCODE = 0xF00d,
+ LDSFPSCR_OPCODE = 0x406a,
+ STSFPSCR_OPCODE = 0x006a,
+ LDSRMFPUL_OPCODE = 0x405a,
+ FSTSFPULFRN_OPCODE = 0xf00d,
+ FABS_OPCODE = 0xf05d,
+ FSQRT_OPCODE = 0xf06d,
+ FSCHG_OPCODE = 0xf3fd,
+ CLRT_OPCODE = 8,
+ SYNCO_OPCODE = 0x00ab,
+};
+
+namespace SH4Registers {
+typedef enum {
+ r0,
+ r1,
+ r2,
+ r3,
+ r4,
+ r5,
+ r6,
+ r7,
+ r8,
+ r9,
+ r10,
+ r11,
+ r12,
+ r13,
+ r14, fp = r14,
+ r15, sp = r15,
+ pc,
+ pr,
+} RegisterID;
+
+typedef enum {
+ fr0, dr0 = fr0,
+ fr1,
+ fr2, dr2 = fr2,
+ fr3,
+ fr4, dr4 = fr4,
+ fr5,
+ fr6, dr6 = fr6,
+ fr7,
+ fr8, dr8 = fr8,
+ fr9,
+ fr10, dr10 = fr10,
+ fr11,
+ fr12, dr12 = fr12,
+ fr13,
+ fr14, dr14 = fr14,
+ fr15,
+} FPRegisterID;
+}
+
+inline uint16_t getOpcodeGroup1(uint16_t opc, int rm, int rn)
+{
+ return (opc | ((rm & 0xf) << 8) | ((rn & 0xf) << 4));
+}
+
+inline uint16_t getOpcodeGroup2(uint16_t opc, int rm)
+{
+ return (opc | ((rm & 0xf) << 8));
+}
+
+inline uint16_t getOpcodeGroup3(uint16_t opc, int rm, int rn)
+{
+ return (opc | ((rm & 0xf) << 8) | (rn & 0xff));
+}
+
+inline uint16_t getOpcodeGroup4(uint16_t opc, int rm, int rn, int offset)
+{
+ return (opc | ((rm & 0xf) << 8) | ((rn & 0xf) << 4) | (offset & 0xf));
+}
+
+inline uint16_t getOpcodeGroup5(uint16_t opc, int rm)
+{
+ return (opc | (rm & 0xff));
+}
+
+inline uint16_t getOpcodeGroup6(uint16_t opc, int rm)
+{
+ return (opc | (rm & 0xfff));
+}
+
+inline uint16_t getOpcodeGroup7(uint16_t opc, int rm)
+{
+ return (opc | ((rm & 0x7) << 9));
+}
+
+inline uint16_t getOpcodeGroup8(uint16_t opc, int rm, int rn)
+{
+ return (opc | ((rm & 0x7) << 9) | ((rn & 0x7) << 5));
+}
+
+inline uint16_t getOpcodeGroup9(uint16_t opc, int rm, int rn)
+{
+ return (opc | ((rm & 0xf) << 8) | ((rn & 0x7) << 5));
+}
+
+inline uint16_t getOpcodeGroup10(uint16_t opc, int rm, int rn)
+{
+ return (opc | ((rm & 0x7) << 9) | ((rn & 0xf) << 4));
+}
+
+inline uint16_t getOpcodeGroup11(uint16_t opc, int rm, int rn)
+{
+ return (opc | ((rm & 0xf) << 4) | (rn & 0xf));
+}
+
+inline uint16_t getRn(uint16_t x)
+{
+ return ((x & 0xf00) >> 8);
+}
+
+inline uint16_t getRm(uint16_t x)
+{
+ return ((x & 0xf0) >> 4);
+}
+
+inline uint16_t getDisp(uint16_t x)
+{
+ return (x & 0xf);
+}
+
+inline uint16_t getImm8(uint16_t x)
+{
+ return (x & 0xff);
+}
+
+inline uint16_t getImm12(uint16_t x)
+{
+ return (x & 0xfff);
+}
+
+inline uint16_t getDRn(uint16_t x)
+{
+ return ((x & 0xe00) >> 9);
+}
+
+inline uint16_t getDRm(uint16_t x)
+{
+ return ((x & 0xe0) >> 5);
+}
+
+class SH4Assembler {
+public:
+ typedef SH4Registers::RegisterID RegisterID;
+ typedef SH4Registers::FPRegisterID FPRegisterID;
+ typedef AssemblerBufferWithConstantPool<512, 4, 2, SH4Assembler> SH4Buffer;
+ static const RegisterID scratchReg1 = SH4Registers::r3;
+ static const RegisterID scratchReg2 = SH4Registers::r11;
+ static const uint32_t maxInstructionSize = 16;
+
+ static RegisterID firstRegister() { return SH4Registers::r0; }
+ static RegisterID lastRegister() { return SH4Registers::r15; }
+
+ static FPRegisterID firstFPRegister() { return SH4Registers::dr0; }
+ static FPRegisterID lastFPRegister() { return SH4Registers::dr14; }
+
+ enum {
+ padForAlign8 = 0x00,
+ padForAlign16 = 0x0009,
+ padForAlign32 = 0x00090009,
+ };
+
+ enum JumpType {
+ JumpFar,
+ JumpNear
+ };
+
+ SH4Assembler()
+ : m_claimscratchReg(0x0)
+ , m_indexOfLastWatchpoint(INT_MIN)
+ , m_indexOfTailOfLastWatchpoint(INT_MIN)
+ {
+ }
+
+ SH4Buffer& buffer() { return m_buffer; }
+
+ // SH4 condition codes
+ typedef enum {
+ EQ = 0x0, // Equal
+ NE = 0x1, // Not Equal
+ HS = 0x2, // Unsigned Greater Than equal
+ HI = 0x3, // Unsigned Greater Than
+ LS = 0x4, // Unsigned Lower or Same
+ LI = 0x5, // Unsigned Lower
+ GE = 0x6, // Greater or Equal
+ LT = 0x7, // Less Than
+ GT = 0x8, // Greater Than
+ LE = 0x9, // Less or Equal
+ OF = 0xa, // OverFlow
+ SI = 0xb, // Signed
+ NS = 0xc, // Not Signed
+ EQU= 0xd, // Equal or unordered(NaN)
+ NEU= 0xe,
+ GTU= 0xf,
+ GEU= 0x10,
+ LTU= 0x11,
+ LEU= 0x12,
+ } Condition;
+
+ // Opaque label types
+public:
+ bool isImmediate(int constant)
+ {
+ return ((constant <= 127) && (constant >= -128));
+ }
+
+ RegisterID claimScratch()
+ {
+ ASSERT((m_claimscratchReg != 0x3));
+
+ if (!(m_claimscratchReg & 0x1)) {
+ m_claimscratchReg = (m_claimscratchReg | 0x1);
+ return scratchReg1;
+ }
+
+ m_claimscratchReg = (m_claimscratchReg | 0x2);
+ return scratchReg2;
+ }
+
+ void releaseScratch(RegisterID scratchR)
+ {
+ if (scratchR == scratchReg1)
+ m_claimscratchReg = (m_claimscratchReg & 0x2);
+ else
+ m_claimscratchReg = (m_claimscratchReg & 0x1);
+ }
+
+ // Stack operations
+
+ void pushReg(RegisterID reg)
+ {
+ if (reg == SH4Registers::pr) {
+ oneShortOp(getOpcodeGroup2(STSLPR_OPCODE, SH4Registers::sp));
+ return;
+ }
+
+ oneShortOp(getOpcodeGroup1(MOVL_WRITE_RNDEC_OPCODE, SH4Registers::sp, reg));
+ }
+
+ void popReg(RegisterID reg)
+ {
+ if (reg == SH4Registers::pr) {
+ oneShortOp(getOpcodeGroup2(LDSLPR_OPCODE, SH4Registers::sp));
+ return;
+ }
+
+ oneShortOp(getOpcodeGroup1(MOVL_READ_RMINC_OPCODE, reg, SH4Registers::sp));
+ }
+
+ void movt(RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup2(MOVT_OPCODE, dst);
+ oneShortOp(opc);
+ }
+
+ // Arithmetic operations
+
+ void addlRegReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(ADD_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void addclRegReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(ADDC_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void addvlRegReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(ADDV_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void addlImm8r(int imm8, RegisterID dst)
+ {
+ ASSERT((imm8 <= 127) && (imm8 >= -128));
+
+ uint16_t opc = getOpcodeGroup3(ADDIMM_OPCODE, dst, imm8);
+ oneShortOp(opc);
+ }
+
+ void andlRegReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(AND_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void andlImm8r(int imm8, RegisterID dst)
+ {
+ ASSERT((imm8 <= 255) && (imm8 >= 0));
+ ASSERT_UNUSED(dst, dst == SH4Registers::r0);
+
+ uint16_t opc = getOpcodeGroup5(ANDIMM_OPCODE, imm8);
+ oneShortOp(opc);
+ }
+
+ void div1lRegReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(DIV1_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void div0lRegReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(DIV0_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void notlReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(NOT_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void orlRegReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(OR_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void orlImm8r(int imm8, RegisterID dst)
+ {
+ ASSERT((imm8 <= 255) && (imm8 >= 0));
+ ASSERT_UNUSED(dst, dst == SH4Registers::r0);
+
+ uint16_t opc = getOpcodeGroup5(ORIMM_OPCODE, imm8);
+ oneShortOp(opc);
+ }
+
+ void sublRegReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(SUB_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void subvlRegReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(SUBV_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void xorlRegReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(XOR_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void xorlImm8r(int imm8, RegisterID dst)
+ {
+ ASSERT((imm8 <= 255) && (imm8 >= 0));
+ ASSERT_UNUSED(dst, dst == SH4Registers::r0);
+
+ uint16_t opc = getOpcodeGroup5(XORIMM_OPCODE, imm8);
+ oneShortOp(opc);
+ }
+
+ void shllImm8r(int imm, RegisterID dst)
+ {
+ switch (imm) {
+ case 1:
+ oneShortOp(getOpcodeGroup2(SHLL_OPCODE, dst));
+ break;
+ case 2:
+ oneShortOp(getOpcodeGroup2(SHLL2_OPCODE, dst));
+ break;
+ case 8:
+ oneShortOp(getOpcodeGroup2(SHLL8_OPCODE, dst));
+ break;
+ case 16:
+ oneShortOp(getOpcodeGroup2(SHLL16_OPCODE, dst));
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ }
+
+ void neg(RegisterID dst, RegisterID src)
+ {
+ uint16_t opc = getOpcodeGroup1(NEG_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void shldRegReg(RegisterID dst, RegisterID rShift)
+ {
+ oneShortOp(getOpcodeGroup1(SHLD_OPCODE, dst, rShift));
+ }
+
+ void shadRegReg(RegisterID dst, RegisterID rShift)
+ {
+ oneShortOp(getOpcodeGroup1(SHAD_OPCODE, dst, rShift));
+ }
+
+ void shlrImm8r(int imm, RegisterID dst)
+ {
+ switch (imm) {
+ case 1:
+ oneShortOp(getOpcodeGroup2(SHLR_OPCODE, dst));
+ break;
+ case 2:
+ oneShortOp(getOpcodeGroup2(SHLR2_OPCODE, dst));
+ break;
+ case 8:
+ oneShortOp(getOpcodeGroup2(SHLR8_OPCODE, dst));
+ break;
+ case 16:
+ oneShortOp(getOpcodeGroup2(SHLR16_OPCODE, dst));
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ }
+
+ void shalImm8r(int imm, RegisterID dst)
+ {
+ switch (imm) {
+ case 1:
+ oneShortOp(getOpcodeGroup2(SHAL_OPCODE, dst));
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ }
+
+ void sharImm8r(int imm, RegisterID dst)
+ {
+ switch (imm) {
+ case 1:
+ oneShortOp(getOpcodeGroup2(SHAR_OPCODE, dst));
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ }
+
+ void imullRegReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(MULL_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void dmullRegReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(DMULL_L_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void dmulslRegReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(DMULSL_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void stsmacl(RegisterID reg)
+ {
+ uint16_t opc = getOpcodeGroup2(STSMACL_OPCODE, reg);
+ oneShortOp(opc);
+ }
+
+ void stsmach(RegisterID reg)
+ {
+ uint16_t opc = getOpcodeGroup2(STSMACH_OPCODE, reg);
+ oneShortOp(opc);
+ }
+
+ // Comparisons
+
+ void cmplRegReg(RegisterID left, RegisterID right, Condition cond)
+ {
+ switch (cond) {
+ case NE:
+ oneShortOp(getOpcodeGroup1(CMPEQ_OPCODE, right, left));
+ break;
+ case GT:
+ oneShortOp(getOpcodeGroup1(CMPGT_OPCODE, right, left));
+ break;
+ case EQ:
+ oneShortOp(getOpcodeGroup1(CMPEQ_OPCODE, right, left));
+ break;
+ case GE:
+ oneShortOp(getOpcodeGroup1(CMPGE_OPCODE, right, left));
+ break;
+ case HS:
+ oneShortOp(getOpcodeGroup1(CMPHS_OPCODE, right, left));
+ break;
+ case HI:
+ oneShortOp(getOpcodeGroup1(CMPHI_OPCODE, right, left));
+ break;
+ case LI:
+ oneShortOp(getOpcodeGroup1(CMPHI_OPCODE, left, right));
+ break;
+ case LS:
+ oneShortOp(getOpcodeGroup1(CMPHS_OPCODE, left, right));
+ break;
+ case LE:
+ oneShortOp(getOpcodeGroup1(CMPGE_OPCODE, left, right));
+ break;
+ case LT:
+ oneShortOp(getOpcodeGroup1(CMPGT_OPCODE, left, right));
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ }
+
+ void cmppl(RegisterID reg)
+ {
+ uint16_t opc = getOpcodeGroup2(CMPPL_OPCODE, reg);
+ oneShortOp(opc);
+ }
+
+ void cmppz(RegisterID reg)
+ {
+ uint16_t opc = getOpcodeGroup2(CMPPZ_OPCODE, reg);
+ oneShortOp(opc);
+ }
+
+ void cmpEqImmR0(int imm, RegisterID dst)
+ {
+ ASSERT_UNUSED(dst, dst == SH4Registers::r0);
+ uint16_t opc = getOpcodeGroup5(CMPEQIMM_OPCODE, imm);
+ oneShortOp(opc);
+ }
+
+ void testlRegReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(TST_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void testlImm8r(int imm, RegisterID dst)
+ {
+ ASSERT((imm <= 255) && (imm >= 0));
+ ASSERT_UNUSED(dst, dst == SH4Registers::r0);
+
+ uint16_t opc = getOpcodeGroup5(TSTIMM_OPCODE, imm);
+ oneShortOp(opc);
+ }
+
+ void nop()
+ {
+ oneShortOp(NOP_OPCODE, false);
+ }
+
+ void synco()
+ {
+ oneShortOp(SYNCO_OPCODE);
+ }
+
+ void sett()
+ {
+ oneShortOp(SETT_OPCODE);
+ }
+
+ void clrt()
+ {
+ oneShortOp(CLRT_OPCODE);
+ }
+
+ void fschg()
+ {
+ oneShortOp(FSCHG_OPCODE);
+ }
+
+ void bkpt()
+ {
+ oneShortOp(BRK_OPCODE, false);
+ }
+
+ void branch(uint16_t opc, int label)
+ {
+ switch (opc) {
+ case BT_OPCODE:
+ ASSERT((label <= 127) && (label >= -128));
+ oneShortOp(getOpcodeGroup5(BT_OPCODE, label));
+ break;
+ case BRA_OPCODE:
+ ASSERT((label <= 2047) && (label >= -2048));
+ oneShortOp(getOpcodeGroup6(BRA_OPCODE, label));
+ break;
+ case BF_OPCODE:
+ ASSERT((label <= 127) && (label >= -128));
+ oneShortOp(getOpcodeGroup5(BF_OPCODE, label));
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ }
+
+ void branch(uint16_t opc, RegisterID reg)
+ {
+ switch (opc) {
+ case BRAF_OPCODE:
+ oneShortOp(getOpcodeGroup2(BRAF_OPCODE, reg));
+ break;
+ case JMP_OPCODE:
+ oneShortOp(getOpcodeGroup2(JMP_OPCODE, reg));
+ break;
+ case JSR_OPCODE:
+ oneShortOp(getOpcodeGroup2(JSR_OPCODE, reg));
+ break;
+ case BSRF_OPCODE:
+ oneShortOp(getOpcodeGroup2(BSRF_OPCODE, reg));
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ }
+
+ void ldspr(RegisterID reg)
+ {
+ uint16_t opc = getOpcodeGroup2(LDSPR_OPCODE, reg);
+ oneShortOp(opc);
+ }
+
+ void stspr(RegisterID reg)
+ {
+ uint16_t opc = getOpcodeGroup2(STSPR_OPCODE, reg);
+ oneShortOp(opc);
+ }
+
+ void extub(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(EXTUB_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void extuw(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(EXTUW_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ // float operations
+
+ void ldsrmfpul(RegisterID src)
+ {
+ uint16_t opc = getOpcodeGroup2(LDS_RM_FPUL_OPCODE, src);
+ oneShortOp(opc);
+ }
+
+ void fneg(FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup2(FNEG_OPCODE, dst);
+ oneShortOp(opc, true, false);
+ }
+
+ void fsqrt(FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup2(FSQRT_OPCODE, dst);
+ oneShortOp(opc, true, false);
+ }
+
+ void stsfpulReg(RegisterID src)
+ {
+ uint16_t opc = getOpcodeGroup2(STS_FPUL_RN_OPCODE, src);
+ oneShortOp(opc);
+ }
+
+ void floatfpulfrn(FPRegisterID src)
+ {
+ uint16_t opc = getOpcodeGroup2(FLOAT_OPCODE, src);
+ oneShortOp(opc, true, false);
+ }
+
+ void fmull(FPRegisterID src, FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(FMUL_OPCODE, dst, src);
+ oneShortOp(opc, true, false);
+ }
+
+ void fmovsRegReg(FPRegisterID src, FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(FMOV_OPCODE, dst, src);
+ oneShortOp(opc, true, false);
+ }
+
+ void fmovsReadrm(RegisterID src, FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(FMOVS_READ_RM_OPCODE, dst, src);
+ oneShortOp(opc, true, false);
+ }
+
+ void fmovsWriterm(FPRegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(FMOVS_WRITE_RN_OPCODE, dst, src);
+ oneShortOp(opc, true, false);
+ }
+
+ void fmovsWriter0r(FPRegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(FMOVS_WRITE_R0RN_OPCODE, dst, src);
+ oneShortOp(opc, true, false);
+ }
+
+ void fmovsReadr0r(RegisterID src, FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(FMOVS_READ_R0RM_OPCODE, dst, src);
+ oneShortOp(opc, true, false);
+ }
+
+ void fmovsReadrminc(RegisterID src, FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(FMOVS_READ_RM_INC_OPCODE, dst, src);
+ oneShortOp(opc, true, false);
+ }
+
+ void fmovsWriterndec(FPRegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(FMOVS_WRITE_RN_DEC_OPCODE, dst, src);
+ oneShortOp(opc, true, false);
+ }
+
+ void ftrcRegfpul(FPRegisterID src)
+ {
+ uint16_t opc = getOpcodeGroup2(FTRC_OPCODE, src);
+ oneShortOp(opc, true, false);
+ }
+
+ void fldsfpul(FPRegisterID src)
+ {
+ uint16_t opc = getOpcodeGroup2(FLDS_FRM_FPUL_OPCODE, src);
+ oneShortOp(opc);
+ }
+
+ void fstsfpul(FPRegisterID src)
+ {
+ uint16_t opc = getOpcodeGroup2(FSTS_FPUL_FRN_OPCODE, src);
+ oneShortOp(opc);
+ }
+
+ void ldsfpscr(RegisterID reg)
+ {
+ uint16_t opc = getOpcodeGroup2(LDSFPSCR_OPCODE, reg);
+ oneShortOp(opc);
+ }
+
+ void stsfpscr(RegisterID reg)
+ {
+ uint16_t opc = getOpcodeGroup2(STSFPSCR_OPCODE, reg);
+ oneShortOp(opc);
+ }
+
+ // double operations
+
+ void dcnvds(FPRegisterID src)
+ {
+ uint16_t opc = getOpcodeGroup7(FCNVDS_DRM_FPUL_OPCODE, src >> 1);
+ oneShortOp(opc);
+ }
+
+ void dcnvsd(FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup7(FCNVSD_FPUL_DRN_OPCODE, dst >> 1);
+ oneShortOp(opc);
+ }
+
+ void dcmppeq(FPRegisterID src, FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup8(FCMPEQ_OPCODE, dst >> 1, src >> 1);
+ oneShortOp(opc);
+ }
+
+ void dcmppgt(FPRegisterID src, FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup8(FCMPGT_OPCODE, dst >> 1, src >> 1);
+ oneShortOp(opc);
+ }
+
+ void dmulRegReg(FPRegisterID src, FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup8(FMUL_OPCODE, dst >> 1, src >> 1);
+ oneShortOp(opc);
+ }
+
+ void dsubRegReg(FPRegisterID src, FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup8(FSUB_OPCODE, dst >> 1, src >> 1);
+ oneShortOp(opc);
+ }
+
+ void daddRegReg(FPRegisterID src, FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup8(FADD_OPCODE, dst >> 1, src >> 1);
+ oneShortOp(opc);
+ }
+
+ void dmovRegReg(FPRegisterID src, FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup8(FMOV_OPCODE, dst >> 1, src >> 1);
+ oneShortOp(opc);
+ }
+
+ void ddivRegReg(FPRegisterID src, FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup8(FDIV_OPCODE, dst >> 1, src >> 1);
+ oneShortOp(opc);
+ }
+
+ void dabs(FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup7(FABS_OPCODE, dst >> 1);
+ oneShortOp(opc);
+ }
+
+ void dsqrt(FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup7(FSQRT_OPCODE, dst >> 1);
+ oneShortOp(opc);
+ }
+
+ void dneg(FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup7(FNEG_OPCODE, dst >> 1);
+ oneShortOp(opc);
+ }
+
+ void fmovReadrm(RegisterID src, FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup10(FMOVS_READ_RM_OPCODE, dst >> 1, src);
+ oneShortOp(opc);
+ }
+
+ void fmovWriterm(FPRegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup9(FMOVS_WRITE_RN_OPCODE, dst, src >> 1);
+ oneShortOp(opc);
+ }
+
+ void fmovWriter0r(FPRegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup9(FMOVS_WRITE_R0RN_OPCODE, dst, src >> 1);
+ oneShortOp(opc);
+ }
+
+ void fmovReadr0r(RegisterID src, FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup10(FMOVS_READ_R0RM_OPCODE, dst >> 1, src);
+ oneShortOp(opc);
+ }
+
+ void fmovReadrminc(RegisterID src, FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup10(FMOVS_READ_RM_INC_OPCODE, dst >> 1, src);
+ oneShortOp(opc);
+ }
+
+ void fmovWriterndec(FPRegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup9(FMOVS_WRITE_RN_DEC_OPCODE, dst, src >> 1);
+ oneShortOp(opc);
+ }
+
+ void floatfpulDreg(FPRegisterID src)
+ {
+ uint16_t opc = getOpcodeGroup7(FLOAT_OPCODE, src >> 1);
+ oneShortOp(opc);
+ }
+
+ void ftrcdrmfpul(FPRegisterID src)
+ {
+ uint16_t opc = getOpcodeGroup7(FTRC_OPCODE, src >> 1);
+ oneShortOp(opc);
+ }
+
+ // Various move ops
+
+ void movImm8(int imm8, RegisterID dst)
+ {
+ ASSERT((imm8 <= 127) && (imm8 >= -128));
+
+ uint16_t opc = getOpcodeGroup3(MOVIMM_OPCODE, dst, imm8);
+ oneShortOp(opc);
+ }
+
+ void movlRegReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(MOV_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void movwRegMem(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(MOVW_WRITE_RN_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void movwMemReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(MOVW_READ_RM_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void movwMemRegIn(RegisterID base, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(MOVW_READ_RMINC_OPCODE, dst, base);
+ oneShortOp(opc);
+ }
+
+ void movwPCReg(int offset, RegisterID base, RegisterID dst)
+ {
+ ASSERT_UNUSED(base, base == SH4Registers::pc);
+ ASSERT((offset <= 255) && (offset >= 0));
+
+ uint16_t opc = getOpcodeGroup3(MOVW_READ_OFFPC_OPCODE, dst, offset);
+ oneShortOp(opc);
+ }
+
+ void movwMemReg(int offset, RegisterID base, RegisterID dst)
+ {
+ ASSERT_UNUSED(dst, dst == SH4Registers::r0);
+
+ uint16_t opc = getOpcodeGroup11(MOVW_READ_OFFRM_OPCODE, base, offset);
+ oneShortOp(opc);
+ }
+
+ void movwR0mr(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(MOVW_READ_R0RM_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void movwRegMemr0(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(MOVW_WRITE_R0RN_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void movlRegMem(RegisterID src, int offset, RegisterID base)
+ {
+ ASSERT((offset <= 15) && (offset >= 0));
+
+ if (!offset) {
+ oneShortOp(getOpcodeGroup1(MOVL_WRITE_RN_OPCODE, base, src));
+ return;
+ }
+
+ oneShortOp(getOpcodeGroup4(MOVL_WRITE_OFFRN_OPCODE, base, src, offset));
+ }
+
+ void movlRegMem(RegisterID src, RegisterID base)
+ {
+ uint16_t opc = getOpcodeGroup1(MOVL_WRITE_RN_OPCODE, base, src);
+ oneShortOp(opc);
+ }
+
+ void movlMemReg(int offset, RegisterID base, RegisterID dst)
+ {
+ if (base == SH4Registers::pc) {
+ ASSERT((offset <= 255) && (offset >= 0));
+ oneShortOp(getOpcodeGroup3(MOVL_READ_OFFPC_OPCODE, dst, offset));
+ return;
+ }
+
+ ASSERT((offset <= 15) && (offset >= 0));
+ if (!offset) {
+ oneShortOp(getOpcodeGroup1(MOVL_READ_RM_OPCODE, dst, base));
+ return;
+ }
+
+ oneShortOp(getOpcodeGroup4(MOVL_READ_OFFRM_OPCODE, dst, base, offset));
+ }
+
+ void movlMemRegCompact(int offset, RegisterID base, RegisterID dst)
+ {
+ oneShortOp(getOpcodeGroup4(MOVL_READ_OFFRM_OPCODE, dst, base, offset));
+ }
+
+ void movbRegMem(RegisterID src, RegisterID base)
+ {
+ uint16_t opc = getOpcodeGroup1(MOVB_WRITE_RN_OPCODE, base, src);
+ oneShortOp(opc);
+ }
+
+ void movbMemReg(int offset, RegisterID base, RegisterID dst)
+ {
+ ASSERT_UNUSED(dst, dst == SH4Registers::r0);
+
+ uint16_t opc = getOpcodeGroup11(MOVB_READ_OFFRM_OPCODE, base, offset);
+ oneShortOp(opc);
+ }
+
+ void movbR0mr(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(MOVB_READ_R0RM_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void movbMemReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(MOVB_READ_RM_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void movbMemRegIn(RegisterID base, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(MOVB_READ_RMINC_OPCODE, dst, base);
+ oneShortOp(opc);
+ }
+
+ void movbRegMemr0(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(MOVB_WRITE_R0RN_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void movlMemReg(RegisterID base, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(MOVL_READ_RM_OPCODE, dst, base);
+ oneShortOp(opc);
+ }
+
+ void movlMemRegIn(RegisterID base, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(MOVL_READ_RMINC_OPCODE, dst, base);
+ oneShortOp(opc);
+ }
+
+ void movlR0mr(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(MOVL_READ_R0RM_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void movlRegMemr0(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(MOVL_WRITE_R0RN_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void loadConstant(uint32_t constant, RegisterID dst)
+ {
+ if (((int)constant <= 0x7f) && ((int)constant >= -0x80)) {
+ movImm8(constant, dst);
+ return;
+ }
+
+ uint16_t opc = getOpcodeGroup3(MOVIMM_OPCODE, dst, 0);
+
+ m_buffer.ensureSpace(maxInstructionSize, sizeof(uint32_t));
+ printInstr(getOpcodeGroup3(MOVIMM_OPCODE, dst, constant), m_buffer.codeSize());
+ m_buffer.putShortWithConstantInt(opc, constant, true);
+ }
+
+ void loadConstantUnReusable(uint32_t constant, RegisterID dst, bool ensureSpace = false)
+ {
+ uint16_t opc = getOpcodeGroup3(MOVIMM_OPCODE, dst, 0);
+
+ if (ensureSpace)
+ m_buffer.ensureSpace(maxInstructionSize, sizeof(uint32_t));
+
+ printInstr(getOpcodeGroup3(MOVIMM_OPCODE, dst, constant), m_buffer.codeSize());
+ m_buffer.putShortWithConstantInt(opc, constant);
+ }
+
+ // Flow control
+
+ AssemblerLabel call()
+ {
+ RegisterID scr = claimScratch();
+ m_buffer.ensureSpace(maxInstructionSize + 4, sizeof(uint32_t));
+ loadConstantUnReusable(0x0, scr);
+ branch(JSR_OPCODE, scr);
+ nop();
+ releaseScratch(scr);
+ return m_buffer.label();
+ }
+
+ AssemblerLabel call(RegisterID dst)
+ {
+ m_buffer.ensureSpace(maxInstructionSize + 2);
+ branch(JSR_OPCODE, dst);
+ nop();
+ return m_buffer.label();
+ }
+
+ AssemblerLabel jmp()
+ {
+ RegisterID scr = claimScratch();
+ m_buffer.ensureSpace(maxInstructionSize + 4, sizeof(uint32_t));
+ loadConstantUnReusable(0x0, scr);
+ branch(BRAF_OPCODE, scr);
+ nop();
+ releaseScratch(scr);
+ return m_buffer.label();
+ }
+
+ AssemblerLabel extraInstrForBranch(RegisterID dst)
+ {
+ loadConstantUnReusable(0x0, dst);
+ branch(BRAF_OPCODE, dst);
+ nop();
+ return m_buffer.label();
+ }
+
+ AssemblerLabel jmp(RegisterID dst)
+ {
+ jmpReg(dst);
+ return m_buffer.label();
+ }
+
+ void jmpReg(RegisterID dst)
+ {
+ m_buffer.ensureSpace(maxInstructionSize + 2);
+ branch(JMP_OPCODE, dst);
+ nop();
+ }
+
+ AssemblerLabel jne()
+ {
+ branch(BF_OPCODE, 0);
+ return m_buffer.label();
+ }
+
+ AssemblerLabel je()
+ {
+ branch(BT_OPCODE, 0);
+ return m_buffer.label();
+ }
+
+ AssemblerLabel bra()
+ {
+ branch(BRA_OPCODE, 0);
+ return m_buffer.label();
+ }
+
+ void ret()
+ {
+ m_buffer.ensureSpace(maxInstructionSize + 2);
+ oneShortOp(RTS_OPCODE, false);
+ }
+
+ AssemblerLabel labelIgnoringWatchpoints()
+ {
+ m_buffer.ensureSpaceForAnyInstruction();
+ return m_buffer.label();
+ }
+
+ AssemblerLabel labelForWatchpoint()
+ {
+ m_buffer.ensureSpaceForAnyInstruction();
+ AssemblerLabel result = m_buffer.label();
+ if (static_cast<int>(result.m_offset) != m_indexOfLastWatchpoint)
+ result = label();
+ m_indexOfLastWatchpoint = result.m_offset;
+ m_indexOfTailOfLastWatchpoint = result.m_offset + maxJumpReplacementSize();
+ return result;
+ }
+
+ AssemblerLabel label()
+ {
+ AssemblerLabel result = labelIgnoringWatchpoints();
+ while (UNLIKELY(static_cast<int>(result.m_offset) < m_indexOfTailOfLastWatchpoint)) {
+ nop();
+ result = labelIgnoringWatchpoints();
+ }
+ return result;
+ }
+
+ int sizeOfConstantPool()
+ {
+ return m_buffer.sizeOfConstantPool();
+ }
+
+ AssemblerLabel align(int alignment)
+ {
+ m_buffer.ensureSpace(maxInstructionSize + 2);
+ while (!m_buffer.isAligned(alignment)) {
+ nop();
+ m_buffer.ensureSpace(maxInstructionSize + 2);
+ }
+ return label();
+ }
+
+ static void changePCrelativeAddress(int offset, uint16_t* instructionPtr, uint32_t newAddress)
+ {
+ ASSERT((instructionPtr[0] & 0xf000) == MOVL_READ_OFFPC_OPCODE);
+ uint32_t address = (offset << 2) + ((reinterpret_cast<uint32_t>(instructionPtr) + 4) &(~0x3));
+ *reinterpret_cast<uint32_t*>(address) = newAddress;
+ }
+
+ static uint32_t readPCrelativeAddress(int offset, uint16_t* instructionPtr)
+ {
+ ASSERT((instructionPtr[0] & 0xf000) == MOVL_READ_OFFPC_OPCODE);
+ uint32_t address = (offset << 2) + ((reinterpret_cast<uint32_t>(instructionPtr) + 4) &(~0x3));
+ return *reinterpret_cast<uint32_t*>(address);
+ }
+
+ static uint16_t* getInstructionPtr(void* code, int offset)
+ {
+ return reinterpret_cast<uint16_t*> (reinterpret_cast<uint32_t>(code) + offset);
+ }
+
+ static void linkJump(void* code, AssemblerLabel from, void* to)
+ {
+ ASSERT(from.isSet());
+
+ uint16_t* instructionPtr = getInstructionPtr(code, from.m_offset) - 3;
+ int offsetBits = (reinterpret_cast<uint32_t>(to) - reinterpret_cast<uint32_t>(code)) - from.m_offset;
+
+ /* MOV #imm, reg => LDR reg
+ braf @reg braf @reg
+ nop nop
+ */
+ ASSERT((instructionPtr[0] & 0xf000) == MOVL_READ_OFFPC_OPCODE);
+ ASSERT((instructionPtr[1] & 0xf0ff) == BRAF_OPCODE);
+ changePCrelativeAddress((*instructionPtr & 0xff), instructionPtr, offsetBits);
+ printInstr(*instructionPtr, from.m_offset + 2);
+ }
+
+ static void linkCall(void* code, AssemblerLabel from, void* to)
+ {
+ uint16_t* instructionPtr = getInstructionPtr(code, from.m_offset);
+ instructionPtr -= 3;
+ ASSERT((instructionPtr[0] & 0xf000) == MOVL_READ_OFFPC_OPCODE);
+ changePCrelativeAddress((*instructionPtr & 0xff), instructionPtr, reinterpret_cast<uint32_t>(to));
+ }
+
+ static void linkPointer(void* code, AssemblerLabel where, void* value)
+ {
+ uint16_t* instructionPtr = getInstructionPtr(code, where.m_offset);
+ ASSERT((instructionPtr[0] & 0xf000) == MOVL_READ_OFFPC_OPCODE);
+ changePCrelativeAddress((*instructionPtr & 0xff), instructionPtr, reinterpret_cast<uint32_t>(value));
+ }
+
+ static unsigned getCallReturnOffset(AssemblerLabel call)
+ {
+ ASSERT(call.isSet());
+ return call.m_offset;
+ }
+
+ static uint32_t* getLdrImmAddressOnPool(SH4Word* insn, uint32_t* constPool)
+ {
+ return (constPool + (*insn & 0xff));
+ }
+
+ static SH4Word patchConstantPoolLoad(SH4Word load, int value)
+ {
+ return ((load & ~0xff) | value);
+ }
+
+ static SH4Buffer::TwoShorts placeConstantPoolBarrier(int offset)
+ {
+ ASSERT(((offset >> 1) <= 2047) && ((offset >> 1) >= -2048));
+
+ SH4Buffer::TwoShorts m_barrier;
+ m_barrier.high = (BRA_OPCODE | (offset >> 1));
+ m_barrier.low = NOP_OPCODE;
+ printInstr(((BRA_OPCODE | (offset >> 1))), 0);
+ printInstr(NOP_OPCODE, 0);
+ return m_barrier;
+ }
+
+ static void patchConstantPoolLoad(void* loadAddr, void* constPoolAddr)
+ {
+ SH4Word* instructionPtr = reinterpret_cast<SH4Word*>(loadAddr);
+ SH4Word instruction = *instructionPtr;
+ SH4Word index = instruction & 0xff;
+
+ if ((instruction & 0xf000) != MOVIMM_OPCODE)
+ return;
+
+ ASSERT((((reinterpret_cast<uint32_t>(constPoolAddr) - reinterpret_cast<uint32_t>(loadAddr)) + index * 4)) < 1024);
+
+ int offset = reinterpret_cast<uint32_t>(constPoolAddr) + (index * 4) - ((reinterpret_cast<uint32_t>(instructionPtr) & ~0x03) + 4);
+ instruction &= 0x0f00;
+ instruction |= 0xd000;
+ offset &= 0x03ff;
+ instruction |= (offset >> 2);
+ *instructionPtr = instruction;
+ printInstr(instruction, reinterpret_cast<uint32_t>(loadAddr));
+ }
+
+ static void repatchPointer(void* where, void* value)
+ {
+ patchPointer(where, value);
+ }
+
+ static void* readPointer(void* code)
+ {
+ return reinterpret_cast<void*>(readInt32(code));
+ }
+
+ static void repatchInt32(void* where, int32_t value)
+ {
+ uint16_t* instructionPtr = reinterpret_cast<uint16_t*>(where);
+ ASSERT((instructionPtr[0] & 0xf000) == MOVL_READ_OFFPC_OPCODE);
+ changePCrelativeAddress((*instructionPtr & 0xff), instructionPtr, value);
+ }
+
+ static void repatchCompact(void* where, int32_t value)
+ {
+ uint16_t* instructionPtr = reinterpret_cast<uint16_t*>(where);
+ ASSERT(value >= 0);
+ ASSERT(value <= 60);
+
+ // Handle the uncommon case where a flushConstantPool occured in movlMemRegCompact.
+ if ((instructionPtr[0] & 0xf000) == BRA_OPCODE)
+ instructionPtr += (instructionPtr[0] & 0x0fff) + 2;
+
+ ASSERT((instructionPtr[0] & 0xf000) == MOVL_READ_OFFRM_OPCODE);
+ instructionPtr[0] = (instructionPtr[0] & 0xfff0) | (value >> 2);
+ cacheFlush(instructionPtr, sizeof(uint16_t));
+ }
+
+ static void relinkCall(void* from, void* to)
+ {
+ uint16_t* instructionPtr = reinterpret_cast<uint16_t*>(from);
+ instructionPtr -= 3;
+ ASSERT((instructionPtr[0] & 0xf000) == MOVL_READ_OFFPC_OPCODE);
+ changePCrelativeAddress((*instructionPtr & 0xff), instructionPtr, reinterpret_cast<uint32_t>(to));
+ }
+
+ static void relinkJump(void* from, void* to)
+ {
+ uint16_t* instructionPtr = reinterpret_cast<uint16_t*> (from);
+ instructionPtr -= 3;
+ ASSERT((instructionPtr[0] & 0xf000) == MOVL_READ_OFFPC_OPCODE);
+ ASSERT((instructionPtr[1] & 0xf0ff) == BRAF_OPCODE);
+ changePCrelativeAddress((*instructionPtr & 0xff), instructionPtr, reinterpret_cast<uint32_t>(to) - reinterpret_cast<uint32_t>(from));
+ }
+
+ // Linking & patching
+
+ static ptrdiff_t maxJumpReplacementSize()
+ {
+ return sizeof(SH4Word) * 6;
+ }
+
+ static void replaceWithJump(void *instructionStart, void *to)
+ {
+ SH4Word* instruction = reinterpret_cast<SH4Word*>(instructionStart);
+ intptr_t difference = reinterpret_cast<intptr_t>(to) - (reinterpret_cast<intptr_t>(instruction) + 3 * sizeof(SH4Word));
+
+ if ((instruction[0] & 0xf000) == MOVL_READ_OFFPC_OPCODE) {
+ // We have an entry in constant pool and we potentially replace a branchPtrWithPatch, so let's backup what would be the
+ // condition (CMP/xx and Bx opcodes) for later use in revertJumpReplacementToBranchPtrWithPatch before putting the jump.
+ instruction[4] = instruction[1];
+ instruction[5] = instruction[2];
+ instruction[1] = (BRAF_OPCODE | (instruction[0] & 0x0f00));
+ instruction[2] = NOP_OPCODE;
+ cacheFlush(&instruction[1], 2 * sizeof(SH4Word));
+ } else {
+ instruction[0] = getOpcodeGroup3(MOVL_READ_OFFPC_OPCODE, SH4Registers::r13, 1);
+ instruction[1] = getOpcodeGroup2(BRAF_OPCODE, SH4Registers::r13);
+ instruction[2] = NOP_OPCODE;
+ cacheFlush(instruction, 3 * sizeof(SH4Word));
+ }
+
+ changePCrelativeAddress(instruction[0] & 0x00ff, instruction, difference);
+ }
+
+ static void revertJumpReplacementToBranchPtrWithPatch(void* instructionStart, RegisterID rd, int imm)
+ {
+ SH4Word *insn = reinterpret_cast<SH4Word*>(instructionStart);
+ ASSERT((insn[0] & 0xf000) == MOVL_READ_OFFPC_OPCODE);
+ ASSERT((insn[0] & 0x00ff) != 1);
+
+ insn[0] = getOpcodeGroup3(MOVL_READ_OFFPC_OPCODE, SH4Registers::r13, insn[0] & 0x00ff);
+ if ((insn[1] & 0xf0ff) == BRAF_OPCODE) {
+ insn[1] = (insn[4] & 0xf00f) | (rd << 8) | (SH4Registers::r13 << 4); // Restore CMP/xx opcode.
+ insn[2] = insn[5];
+ ASSERT(((insn[2] & 0xff00) == BT_OPCODE) || ((insn[2] & 0xff00) == BF_OPCODE));
+ ASSERT((insn[3] & 0xf000) == MOVL_READ_OFFPC_OPCODE);
+ insn[4] = (BRAF_OPCODE | (insn[3] & 0x0f00));
+ insn[5] = NOP_OPCODE;
+ cacheFlush(insn, 6 * sizeof(SH4Word));
+ } else {
+ // The branchPtrWithPatch has already been restored, so we just patch the immediate value and ASSERT all is as expected.
+ ASSERT((insn[1] & 0xf000) == 0x3000);
+ insn[1] = (insn[1] & 0xf00f) | (rd << 8) | (SH4Registers::r13 << 4);
+ cacheFlush(insn, 2 * sizeof(SH4Word));
+ ASSERT(((insn[2] & 0xff00) == BT_OPCODE) || ((insn[2] & 0xff00) == BF_OPCODE));
+ ASSERT((insn[3] & 0xf000) == MOVL_READ_OFFPC_OPCODE);
+ ASSERT(insn[5] == NOP_OPCODE);
+ }
+
+ changePCrelativeAddress(insn[0] & 0x00ff, insn, imm);
+ }
+
+ void linkJump(AssemblerLabel from, AssemblerLabel to, JumpType type = JumpFar)
+ {
+ ASSERT(to.isSet());
+ ASSERT(from.isSet());
+
+ uint16_t* instructionPtr = getInstructionPtr(data(), from.m_offset) - 1;
+ int offsetBits = (to.m_offset - from.m_offset);
+
+ if (type == JumpNear) {
+ uint16_t instruction = instructionPtr[0];
+ int offset = (offsetBits - 2);
+ ASSERT((((instruction == BT_OPCODE) || (instruction == BF_OPCODE)) && (offset >= -256) && (offset <= 254))
+ || ((instruction == BRA_OPCODE) && (offset >= -4096) && (offset <= 4094)));
+ *instructionPtr++ = instruction | (offset >> 1);
+ printInstr(*instructionPtr, from.m_offset + 2);
+ return;
+ }
+
+ /* MOV # imm, reg => LDR reg
+ braf @reg braf @reg
+ nop nop
+ */
+ instructionPtr -= 2;
+ ASSERT((instructionPtr[1] & 0xf0ff) == BRAF_OPCODE);
+
+ if ((instructionPtr[0] & 0xf000) == MOVIMM_OPCODE) {
+ uint32_t* addr = getLdrImmAddressOnPool(instructionPtr, m_buffer.poolAddress());
+ *addr = offsetBits;
+ printInstr(*instructionPtr, from.m_offset + 2);
+ return;
+ }
+
+ ASSERT((instructionPtr[0] & 0xf000) == MOVL_READ_OFFPC_OPCODE);
+ changePCrelativeAddress((*instructionPtr & 0xff), instructionPtr, offsetBits);
+ printInstr(*instructionPtr, from.m_offset + 2);
+ }
+
+ static void* getRelocatedAddress(void* code, AssemblerLabel label)
+ {
+ return reinterpret_cast<void*>(reinterpret_cast<char*>(code) + label.m_offset);
+ }
+
+ static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b)
+ {
+ return b.m_offset - a.m_offset;
+ }
+
+ static void patchPointer(void* code, AssemblerLabel where, void* value)
+ {
+ patchPointer(reinterpret_cast<uint32_t*>(code) + where.m_offset, value);
+ }
+
+ static void patchPointer(void* code, void* value)
+ {
+ patchInt32(code, reinterpret_cast<uint32_t>(value));
+ }
+
+ static void patchInt32(void* code, uint32_t value)
+ {
+ changePCrelativeAddress((*(reinterpret_cast<uint16_t*>(code)) & 0xff), reinterpret_cast<uint16_t*>(code), value);
+ }
+
+ static uint32_t readInt32(void* code)
+ {
+ return readPCrelativeAddress((*(reinterpret_cast<uint16_t*>(code)) & 0xff), reinterpret_cast<uint16_t*>(code));
+ }
+
+ static void* readCallTarget(void* from)
+ {
+ uint16_t* instructionPtr = static_cast<uint16_t*>(from);
+ instructionPtr -= 3;
+ return reinterpret_cast<void*>(readPCrelativeAddress((*instructionPtr & 0xff), instructionPtr));
+ }
+
+ static void cacheFlush(void* code, size_t size)
+ {
+#if OS(LINUX)
+ // Flush each page separately, otherwise the whole flush will fail if an uncommited page is in the area.
+ unsigned currentPage = reinterpret_cast<unsigned>(code) & ~(pageSize() - 1);
+ unsigned lastPage = (reinterpret_cast<unsigned>(code) + size - 1) & ~(pageSize() - 1);
+ do {
+#if defined CACHEFLUSH_D_L2
+ syscall(__NR_cacheflush, currentPage, pageSize(), CACHEFLUSH_D_WB | CACHEFLUSH_I | CACHEFLUSH_D_L2);
+#else
+ syscall(__NR_cacheflush, currentPage, pageSize(), CACHEFLUSH_D_WB | CACHEFLUSH_I);
+#endif
+ currentPage += pageSize();
+ } while (lastPage >= currentPage);
+#else
+#error "The cacheFlush support is missing on this platform."
+#endif
+ }
+
+ void prefix(uint16_t pre)
+ {
+ m_buffer.putByte(pre);
+ }
+
+ void oneShortOp(uint16_t opcode, bool checksize = true, bool isDouble = true)
+ {
+ printInstr(opcode, m_buffer.codeSize(), isDouble);
+ if (checksize)
+ m_buffer.ensureSpace(maxInstructionSize);
+ m_buffer.putShortUnchecked(opcode);
+ }
+
+ void ensureSpace(int space)
+ {
+ m_buffer.ensureSpace(space);
+ }
+
+ void ensureSpace(int insnSpace, int constSpace)
+ {
+ m_buffer.ensureSpace(insnSpace, constSpace);
+ }
+
+ // Administrative methods
+
+ void* data() const { return m_buffer.data(); }
+ size_t codeSize() const { return m_buffer.codeSize(); }
+
+ unsigned debugOffset() { return m_buffer.debugOffset(); }
+
+#ifdef SH4_ASSEMBLER_TRACING
+ static void printInstr(uint16_t opc, unsigned size, bool isdoubleInst = true)
+ {
+ if (!getenv("JavaScriptCoreDumpJIT"))
+ return;
+
+ const char *format = 0;
+ printfStdoutInstr("offset: 0x%8.8x\t", size);
+ switch (opc) {
+ case BRK_OPCODE:
+ format = " BRK\n";
+ break;
+ case NOP_OPCODE:
+ format = " NOP\n";
+ break;
+ case RTS_OPCODE:
+ format =" *RTS\n";
+ break;
+ case SETS_OPCODE:
+ format = " SETS\n";
+ break;
+ case SETT_OPCODE:
+ format = " SETT\n";
+ break;
+ case CLRT_OPCODE:
+ format = " CLRT\n";
+ break;
+ case FSCHG_OPCODE:
+ format = " FSCHG\n";
+ break;
+ }
+ if (format) {
+ printfStdoutInstr(format);
+ return;
+ }
+ switch (opc & 0xf0ff) {
+ case BRAF_OPCODE:
+ format = " *BRAF R%d\n";
+ break;
+ case DT_OPCODE:
+ format = " DT R%d\n";
+ break;
+ case CMPPL_OPCODE:
+ format = " CMP/PL R%d\n";
+ break;
+ case CMPPZ_OPCODE:
+ format = " CMP/PZ R%d\n";
+ break;
+ case JMP_OPCODE:
+ format = " *JMP @R%d\n";
+ break;
+ case JSR_OPCODE:
+ format = " *JSR @R%d\n";
+ break;
+ case LDSPR_OPCODE:
+ format = " LDS R%d, PR\n";
+ break;
+ case LDSLPR_OPCODE:
+ format = " LDS.L @R%d+, PR\n";
+ break;
+ case MOVT_OPCODE:
+ format = " MOVT R%d\n";
+ break;
+ case SHAL_OPCODE:
+ format = " SHAL R%d\n";
+ break;
+ case SHAR_OPCODE:
+ format = " SHAR R%d\n";
+ break;
+ case SHLL_OPCODE:
+ format = " SHLL R%d\n";
+ break;
+ case SHLL2_OPCODE:
+ format = " SHLL2 R%d\n";
+ break;
+ case SHLL8_OPCODE:
+ format = " SHLL8 R%d\n";
+ break;
+ case SHLL16_OPCODE:
+ format = " SHLL16 R%d\n";
+ break;
+ case SHLR_OPCODE:
+ format = " SHLR R%d\n";
+ break;
+ case SHLR2_OPCODE:
+ format = " SHLR2 R%d\n";
+ break;
+ case SHLR8_OPCODE:
+ format = " SHLR8 R%d\n";
+ break;
+ case SHLR16_OPCODE:
+ format = " SHLR16 R%d\n";
+ break;
+ case STSPR_OPCODE:
+ format = " STS PR, R%d\n";
+ break;
+ case STSLPR_OPCODE:
+ format = " STS.L PR, @-R%d\n";
+ break;
+ case LDS_RM_FPUL_OPCODE:
+ format = " LDS R%d, FPUL\n";
+ break;
+ case STS_FPUL_RN_OPCODE:
+ format = " STS FPUL, R%d \n";
+ break;
+ case FLDS_FRM_FPUL_OPCODE:
+ format = " FLDS FR%d, FPUL\n";
+ break;
+ case FSTS_FPUL_FRN_OPCODE:
+ format = " FSTS FPUL, R%d \n";
+ break;
+ case LDSFPSCR_OPCODE:
+ format = " LDS R%d, FPSCR \n";
+ break;
+ case STSFPSCR_OPCODE:
+ format = " STS FPSCR, R%d \n";
+ break;
+ case STSMACL_OPCODE:
+ format = " STS MACL, R%d \n";
+ break;
+ case STSMACH_OPCODE:
+ format = " STS MACH, R%d \n";
+ break;
+ case BSRF_OPCODE:
+ format = " *BSRF R%d";
+ break;
+ case FTRC_OPCODE:
+ format = " FTRC FR%d, FPUL\n";
+ break;
+ }
+ if (format) {
+ printfStdoutInstr(format, getRn(opc));
+ return;
+ }
+ switch (opc & 0xf0ff) {
+ case FNEG_OPCODE:
+ format = " FNEG DR%d\n";
+ break;
+ case FLOAT_OPCODE:
+ format = " FLOAT DR%d\n";
+ break;
+ case FTRC_OPCODE:
+ format = " FTRC FR%d, FPUL\n";
+ break;
+ case FABS_OPCODE:
+ format = " FABS FR%d\n";
+ break;
+ case FSQRT_OPCODE:
+ format = " FSQRT FR%d\n";
+ break;
+ case FCNVDS_DRM_FPUL_OPCODE:
+ format = " FCNVDS FR%d, FPUL\n";
+ break;
+ case FCNVSD_FPUL_DRN_OPCODE:
+ format = " FCNVSD FPUL, FR%d\n";
+ break;
+ }
+ if (format) {
+ if (isdoubleInst)
+ printfStdoutInstr(format, getDRn(opc) << 1);
+ else
+ printfStdoutInstr(format, getRn(opc));
+ return;
+ }
+ switch (opc & 0xf00f) {
+ case ADD_OPCODE:
+ format = " ADD R%d, R%d\n";
+ break;
+ case ADDC_OPCODE:
+ format = " ADDC R%d, R%d\n";
+ break;
+ case ADDV_OPCODE:
+ format = " ADDV R%d, R%d\n";
+ break;
+ case AND_OPCODE:
+ format = " AND R%d, R%d\n";
+ break;
+ case DIV1_OPCODE:
+ format = " DIV1 R%d, R%d\n";
+ break;
+ case CMPEQ_OPCODE:
+ format = " CMP/EQ R%d, R%d\n";
+ break;
+ case CMPGE_OPCODE:
+ format = " CMP/GE R%d, R%d\n";
+ break;
+ case CMPGT_OPCODE:
+ format = " CMP/GT R%d, R%d\n";
+ break;
+ case CMPHI_OPCODE:
+ format = " CMP/HI R%d, R%d\n";
+ break;
+ case CMPHS_OPCODE:
+ format = " CMP/HS R%d, R%d\n";
+ break;
+ case MOV_OPCODE:
+ format = " MOV R%d, R%d\n";
+ break;
+ case MOVB_WRITE_RN_OPCODE:
+ format = " MOV.B R%d, @R%d\n";
+ break;
+ case MOVB_WRITE_RNDEC_OPCODE:
+ format = " MOV.B R%d, @-R%d\n";
+ break;
+ case MOVB_WRITE_R0RN_OPCODE:
+ format = " MOV.B R%d, @(R0, R%d)\n";
+ break;
+ case MOVB_READ_RM_OPCODE:
+ format = " MOV.B @R%d, R%d\n";
+ break;
+ case MOVB_READ_RMINC_OPCODE:
+ format = " MOV.B @R%d+, R%d\n";
+ break;
+ case MOVB_READ_R0RM_OPCODE:
+ format = " MOV.B @(R0, R%d), R%d\n";
+ break;
+ case MOVL_WRITE_RN_OPCODE:
+ format = " MOV.L R%d, @R%d\n";
+ break;
+ case MOVL_WRITE_RNDEC_OPCODE:
+ format = " MOV.L R%d, @-R%d\n";
+ break;
+ case MOVL_WRITE_R0RN_OPCODE:
+ format = " MOV.L R%d, @(R0, R%d)\n";
+ break;
+ case MOVL_READ_RM_OPCODE:
+ format = " MOV.L @R%d, R%d\n";
+ break;
+ case MOVL_READ_RMINC_OPCODE:
+ format = " MOV.L @R%d+, R%d\n";
+ break;
+ case MOVL_READ_R0RM_OPCODE:
+ format = " MOV.L @(R0, R%d), R%d\n";
+ break;
+ case MULL_OPCODE:
+ format = " MUL.L R%d, R%d\n";
+ break;
+ case DMULL_L_OPCODE:
+ format = " DMULU.L R%d, R%d\n";
+ break;
+ case DMULSL_OPCODE:
+ format = " DMULS.L R%d, R%d\n";
+ break;
+ case NEG_OPCODE:
+ format = " NEG R%d, R%d\n";
+ break;
+ case NEGC_OPCODE:
+ format = " NEGC R%d, R%d\n";
+ break;
+ case NOT_OPCODE:
+ format = " NOT R%d, R%d\n";
+ break;
+ case OR_OPCODE:
+ format = " OR R%d, R%d\n";
+ break;
+ case SHAD_OPCODE:
+ format = " SHAD R%d, R%d\n";
+ break;
+ case SHLD_OPCODE:
+ format = " SHLD R%d, R%d\n";
+ break;
+ case SUB_OPCODE:
+ format = " SUB R%d, R%d\n";
+ break;
+ case SUBC_OPCODE:
+ format = " SUBC R%d, R%d\n";
+ break;
+ case SUBV_OPCODE:
+ format = " SUBV R%d, R%d\n";
+ break;
+ case TST_OPCODE:
+ format = " TST R%d, R%d\n";
+ break;
+ case XOR_OPCODE:
+ format = " XOR R%d, R%d\n";break;
+ case MOVW_WRITE_RN_OPCODE:
+ format = " MOV.W R%d, @R%d\n";
+ break;
+ case MOVW_READ_RM_OPCODE:
+ format = " MOV.W @R%d, R%d\n";
+ break;
+ case MOVW_READ_RMINC_OPCODE:
+ format = " MOV.W @R%d+, R%d\n";
+ break;
+ case MOVW_READ_R0RM_OPCODE:
+ format = " MOV.W @(R0, R%d), R%d\n";
+ break;
+ case MOVW_WRITE_R0RN_OPCODE:
+ format = " MOV.W R%d, @(R0, R%d)\n";
+ break;
+ case EXTUB_OPCODE:
+ format = " EXTU.B R%d, R%d\n";
+ break;
+ case EXTUW_OPCODE:
+ format = " EXTU.W R%d, R%d\n";
+ break;
+ }
+ if (format) {
+ printfStdoutInstr(format, getRm(opc), getRn(opc));
+ return;
+ }
+ switch (opc & 0xf00f) {
+ case FSUB_OPCODE:
+ format = " FSUB FR%d, FR%d\n";
+ break;
+ case FADD_OPCODE:
+ format = " FADD FR%d, FR%d\n";
+ break;
+ case FDIV_OPCODE:
+ format = " FDIV FR%d, FR%d\n";
+ break;
+ case FMUL_OPCODE:
+ format = " DMULL FR%d, FR%d\n";
+ break;
+ case FMOV_OPCODE:
+ format = " FMOV FR%d, FR%d\n";
+ break;
+ case FCMPEQ_OPCODE:
+ format = " FCMP/EQ FR%d, FR%d\n";
+ break;
+ case FCMPGT_OPCODE:
+ format = " FCMP/GT FR%d, FR%d\n";
+ break;
+ }
+ if (format) {
+ if (isdoubleInst)
+ printfStdoutInstr(format, getDRm(opc) << 1, getDRn(opc) << 1);
+ else
+ printfStdoutInstr(format, getRm(opc), getRn(opc));
+ return;
+ }
+ switch (opc & 0xf00f) {
+ case FMOVS_WRITE_RN_DEC_OPCODE:
+ format = " %s FR%d, @-R%d\n";
+ break;
+ case FMOVS_WRITE_RN_OPCODE:
+ format = " %s FR%d, @R%d\n";
+ break;
+ case FMOVS_WRITE_R0RN_OPCODE:
+ format = " %s FR%d, @(R0, R%d)\n";
+ break;
+ }
+ if (format) {
+ if (isdoubleInst)
+ printfStdoutInstr(format, "FMOV", getDRm(opc) << 1, getDRn(opc));
+ else
+ printfStdoutInstr(format, "FMOV.S", getRm(opc), getRn(opc));
+ return;
+ }
+ switch (opc & 0xf00f) {
+ case FMOVS_READ_RM_OPCODE:
+ format = " %s @R%d, FR%d\n";
+ break;
+ case FMOVS_READ_RM_INC_OPCODE:
+ format = " %s @R%d+, FR%d\n";
+ break;
+ case FMOVS_READ_R0RM_OPCODE:
+ format = " %s @(R0, R%d), FR%d\n";
+ break;
+ }
+ if (format) {
+ if (isdoubleInst)
+ printfStdoutInstr(format, "FMOV", getDRm(opc), getDRn(opc) << 1);
+ else
+ printfStdoutInstr(format, "FMOV.S", getRm(opc), getRn(opc));
+ return;
+ }
+ switch (opc & 0xff00) {
+ case BF_OPCODE:
+ format = " BF %d\n";
+ break;
+ case BFS_OPCODE:
+ format = " *BF/S %d\n";
+ break;
+ case ANDIMM_OPCODE:
+ format = " AND #%d, R0\n";
+ break;
+ case BT_OPCODE:
+ format = " BT %d\n";
+ break;
+ case BTS_OPCODE:
+ format = " *BT/S %d\n";
+ break;
+ case CMPEQIMM_OPCODE:
+ format = " CMP/EQ #%d, R0\n";
+ break;
+ case MOVB_WRITE_OFFGBR_OPCODE:
+ format = " MOV.B R0, @(%d, GBR)\n";
+ break;
+ case MOVB_READ_OFFGBR_OPCODE:
+ format = " MOV.B @(%d, GBR), R0\n";
+ break;
+ case MOVL_WRITE_OFFGBR_OPCODE:
+ format = " MOV.L R0, @(%d, GBR)\n";
+ break;
+ case MOVL_READ_OFFGBR_OPCODE:
+ format = " MOV.L @(%d, GBR), R0\n";
+ break;
+ case MOVA_READ_OFFPC_OPCODE:
+ format = " MOVA @(%d, PC), R0\n";
+ break;
+ case ORIMM_OPCODE:
+ format = " OR #%d, R0\n";
+ break;
+ case ORBIMM_OPCODE:
+ format = " OR.B #%d, @(R0, GBR)\n";
+ break;
+ case TSTIMM_OPCODE:
+ format = " TST #%d, R0\n";
+ break;
+ case TSTB_OPCODE:
+ format = " TST.B %d, @(R0, GBR)\n";
+ break;
+ case XORIMM_OPCODE:
+ format = " XOR #%d, R0\n";
+ break;
+ case XORB_OPCODE:
+ format = " XOR.B %d, @(R0, GBR)\n";
+ break;
+ }
+ if (format) {
+ printfStdoutInstr(format, getImm8(opc));
+ return;
+ }
+ switch (opc & 0xff00) {
+ case MOVB_WRITE_OFFRN_OPCODE:
+ format = " MOV.B R0, @(%d, R%d)\n";
+ break;
+ case MOVB_READ_OFFRM_OPCODE:
+ format = " MOV.B @(%d, R%d), R0\n";
+ break;
+ }
+ if (format) {
+ printfStdoutInstr(format, getDisp(opc), getRm(opc));
+ return;
+ }
+ switch (opc & 0xf000) {
+ case BRA_OPCODE:
+ format = " *BRA %d\n";
+ break;
+ case BSR_OPCODE:
+ format = " *BSR %d\n";
+ break;
+ }
+ if (format) {
+ printfStdoutInstr(format, getImm12(opc));
+ return;
+ }
+ switch (opc & 0xf000) {
+ case MOVL_READ_OFFPC_OPCODE:
+ format = " MOV.L @(%d, PC), R%d\n";
+ break;
+ case ADDIMM_OPCODE:
+ format = " ADD #%d, R%d\n";
+ break;
+ case MOVIMM_OPCODE:
+ format = " MOV #%d, R%d\n";
+ break;
+ case MOVW_READ_OFFPC_OPCODE:
+ format = " MOV.W @(%d, PC), R%d\n";
+ break;
+ }
+ if (format) {
+ printfStdoutInstr(format, getImm8(opc), getRn(opc));
+ return;
+ }
+ switch (opc & 0xf000) {
+ case MOVL_WRITE_OFFRN_OPCODE:
+ format = " MOV.L R%d, @(%d, R%d)\n";
+ printfStdoutInstr(format, getRm(opc), getDisp(opc), getRn(opc));
+ break;
+ case MOVL_READ_OFFRM_OPCODE:
+ format = " MOV.L @(%d, R%d), R%d\n";
+ printfStdoutInstr(format, getDisp(opc), getRm(opc), getRn(opc));
+ break;
+ }
+ }
+
+ static void printfStdoutInstr(const char* format, ...)
+ {
+ if (getenv("JavaScriptCoreDumpJIT")) {
+ va_list args;
+ va_start(args, format);
+ vprintfStdoutInstr(format, args);
+ va_end(args);
+ }
+ }
+
+ static void vprintfStdoutInstr(const char* format, va_list args)
+ {
+ if (getenv("JavaScriptCoreDumpJIT"))
+ WTF::dataLogFV(format, args);
+ }
+
+ static void printBlockInstr(uint16_t* first, unsigned offset, int nbInstr)
+ {
+ printfStdoutInstr(">> repatch instructions after link\n");
+ for (int i = 0; i <= nbInstr; i++)
+ printInstr(*(first + i), offset + i);
+ printfStdoutInstr(">> end repatch\n");
+ }
+#else
+ static void printInstr(uint16_t, unsigned, bool = true) { };
+ static void printBlockInstr(uint16_t*, unsigned, int) { };
+#endif
+
+ static void replaceWithLoad(void* instructionStart)
+ {
+ SH4Word* insPtr = reinterpret_cast<SH4Word*>(instructionStart);
+
+ insPtr += 2; // skip MOV and ADD opcodes
+
+ if (((*insPtr) & 0xf00f) != MOVL_READ_RM_OPCODE) {
+ *insPtr = MOVL_READ_RM_OPCODE | (*insPtr & 0x0ff0);
+ cacheFlush(insPtr, sizeof(SH4Word));
+ }
+ }
+
+ static void replaceWithAddressComputation(void* instructionStart)
+ {
+ SH4Word* insPtr = reinterpret_cast<SH4Word*>(instructionStart);
+
+ insPtr += 2; // skip MOV and ADD opcodes
+
+ if (((*insPtr) & 0xf00f) != MOV_OPCODE) {
+ *insPtr = MOV_OPCODE | (*insPtr & 0x0ff0);
+ cacheFlush(insPtr, sizeof(SH4Word));
+ }
+ }
+
+private:
+ SH4Buffer m_buffer;
+ int m_claimscratchReg;
+ int m_indexOfLastWatchpoint;
+ int m_indexOfTailOfLastWatchpoint;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER) && CPU(SH4)
+
+#endif // SH4Assembler_h
diff --git a/Source/JavaScriptCore/assembler/X86Assembler.h b/Source/JavaScriptCore/assembler/X86Assembler.h
new file mode 100644
index 000000000..da3181e58
--- /dev/null
+++ b/Source/JavaScriptCore/assembler/X86Assembler.h
@@ -0,0 +1,2850 @@
+/*
+ * Copyright (C) 2008, 2012-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef X86Assembler_h
+#define X86Assembler_h
+
+#if ENABLE(ASSEMBLER) && (CPU(X86) || CPU(X86_64))
+
+#include "AssemblerBuffer.h"
+#include "JITCompilationEffort.h"
+#include <limits.h>
+#include <stdint.h>
+#include <wtf/Assertions.h>
+#include <wtf/Vector.h>
+
+namespace JSC {
+
+inline bool CAN_SIGN_EXTEND_8_32(int32_t value) { return value == (int32_t)(signed char)value; }
+
+namespace X86Registers {
+
+#define FOR_EACH_CPU_REGISTER(V) \
+ FOR_EACH_CPU_GPREGISTER(V) \
+ FOR_EACH_CPU_SPECIAL_REGISTER(V) \
+ FOR_EACH_CPU_FPREGISTER(V)
+
+// The following are defined as pairs of the following value:
+// 1. type of the storage needed to save the register value by the JIT probe.
+// 2. name of the register.
+#define FOR_EACH_CPU_GPREGISTER(V) \
+ V(void*, eax) \
+ V(void*, ecx) \
+ V(void*, edx) \
+ V(void*, ebx) \
+ V(void*, esp) \
+ V(void*, ebp) \
+ V(void*, esi) \
+ V(void*, edi) \
+ FOR_EACH_X86_64_CPU_GPREGISTER(V)
+
+#define FOR_EACH_CPU_SPECIAL_REGISTER(V) \
+ V(void*, eip) \
+ V(void*, eflags) \
+
+// Note: the JITs only stores double values in the FP registers.
+#define FOR_EACH_CPU_FPREGISTER(V) \
+ V(double, xmm0) \
+ V(double, xmm1) \
+ V(double, xmm2) \
+ V(double, xmm3) \
+ V(double, xmm4) \
+ V(double, xmm5) \
+ V(double, xmm6) \
+ V(double, xmm7) \
+ FOR_EACH_X86_64_CPU_FPREGISTER(V)
+
+#if CPU(X86)
+
+#define FOR_EACH_X86_64_CPU_GPREGISTER(V) // Nothing to add.
+#define FOR_EACH_X86_64_CPU_FPREGISTER(V) // Nothing to add.
+
+#elif CPU(X86_64)
+
+#define FOR_EACH_X86_64_CPU_GPREGISTER(V) \
+ V(void*, r8) \
+ V(void*, r9) \
+ V(void*, r10) \
+ V(void*, r11) \
+ V(void*, r12) \
+ V(void*, r13) \
+ V(void*, r14) \
+ V(void*, r15)
+
+#define FOR_EACH_X86_64_CPU_FPREGISTER(V) \
+ V(double, xmm8) \
+ V(double, xmm9) \
+ V(double, xmm10) \
+ V(double, xmm11) \
+ V(double, xmm12) \
+ V(double, xmm13) \
+ V(double, xmm14) \
+ V(double, xmm15)
+
+#endif // CPU(X86_64)
+
+typedef enum {
+ #define DECLARE_REGISTER(_type, _regName) _regName,
+ FOR_EACH_CPU_GPREGISTER(DECLARE_REGISTER)
+ #undef DECLARE_REGISTER
+} RegisterID;
+
+typedef enum {
+ #define DECLARE_REGISTER(_type, _regName) _regName,
+ FOR_EACH_CPU_FPREGISTER(DECLARE_REGISTER)
+ #undef DECLARE_REGISTER
+} XMMRegisterID;
+
+} // namespace X86Register
+
+class X86Assembler {
+public:
+ typedef X86Registers::RegisterID RegisterID;
+
+ static RegisterID firstRegister() { return X86Registers::eax; }
+ static RegisterID lastRegister()
+ {
+#if CPU(X86_64)
+ return X86Registers::r15;
+#else
+ return X86Registers::edi;
+#endif
+ }
+
+ typedef X86Registers::XMMRegisterID XMMRegisterID;
+ typedef XMMRegisterID FPRegisterID;
+
+ static FPRegisterID firstFPRegister() { return X86Registers::xmm0; }
+ static FPRegisterID lastFPRegister()
+ {
+#if CPU(X86_64)
+ return X86Registers::xmm15;
+#else
+ return X86Registers::xmm7;
+#endif
+ }
+
+ typedef enum {
+ ConditionO,
+ ConditionNO,
+ ConditionB,
+ ConditionAE,
+ ConditionE,
+ ConditionNE,
+ ConditionBE,
+ ConditionA,
+ ConditionS,
+ ConditionNS,
+ ConditionP,
+ ConditionNP,
+ ConditionL,
+ ConditionGE,
+ ConditionLE,
+ ConditionG,
+
+ ConditionC = ConditionB,
+ ConditionNC = ConditionAE,
+ } Condition;
+
+private:
+ typedef enum {
+ OP_ADD_EvGv = 0x01,
+ OP_ADD_GvEv = 0x03,
+ OP_ADD_EAXIv = 0x05,
+ OP_OR_EvGv = 0x09,
+ OP_OR_GvEv = 0x0B,
+ OP_OR_EAXIv = 0x0D,
+ OP_2BYTE_ESCAPE = 0x0F,
+ OP_AND_EvGv = 0x21,
+ OP_AND_GvEv = 0x23,
+ OP_SUB_EvGv = 0x29,
+ OP_SUB_GvEv = 0x2B,
+ OP_SUB_EAXIv = 0x2D,
+ PRE_PREDICT_BRANCH_NOT_TAKEN = 0x2E,
+ OP_XOR_EvGv = 0x31,
+ OP_XOR_GvEv = 0x33,
+ OP_XOR_EAXIv = 0x35,
+ OP_CMP_EvGv = 0x39,
+ OP_CMP_GvEv = 0x3B,
+ OP_CMP_EAXIv = 0x3D,
+#if CPU(X86_64)
+ PRE_REX = 0x40,
+#endif
+ OP_PUSH_EAX = 0x50,
+ OP_POP_EAX = 0x58,
+#if CPU(X86_64)
+ OP_MOVSXD_GvEv = 0x63,
+#endif
+ PRE_OPERAND_SIZE = 0x66,
+ PRE_SSE_66 = 0x66,
+ OP_PUSH_Iz = 0x68,
+ OP_IMUL_GvEvIz = 0x69,
+ OP_GROUP1_EbIb = 0x80,
+ OP_GROUP1_EvIz = 0x81,
+ OP_GROUP1_EvIb = 0x83,
+ OP_TEST_EbGb = 0x84,
+ OP_TEST_EvGv = 0x85,
+ OP_XCHG_EvGv = 0x87,
+ OP_MOV_EbGb = 0x88,
+ OP_MOV_EvGv = 0x89,
+ OP_MOV_GvEv = 0x8B,
+ OP_LEA = 0x8D,
+ OP_GROUP1A_Ev = 0x8F,
+ OP_NOP = 0x90,
+ OP_XCHG_EAX = 0x90,
+ OP_CDQ = 0x99,
+ OP_MOV_EAXOv = 0xA1,
+ OP_MOV_OvEAX = 0xA3,
+ OP_TEST_ALIb = 0xA8,
+ OP_TEST_EAXIv = 0xA9,
+ OP_MOV_EAXIv = 0xB8,
+ OP_GROUP2_EvIb = 0xC1,
+ OP_RET = 0xC3,
+ OP_GROUP11_EvIb = 0xC6,
+ OP_GROUP11_EvIz = 0xC7,
+ OP_INT3 = 0xCC,
+ OP_GROUP2_Ev1 = 0xD1,
+ OP_GROUP2_EvCL = 0xD3,
+ OP_ESCAPE_DD = 0xDD,
+ OP_CALL_rel32 = 0xE8,
+ OP_JMP_rel32 = 0xE9,
+ PRE_SSE_F2 = 0xF2,
+ PRE_SSE_F3 = 0xF3,
+ OP_HLT = 0xF4,
+ OP_GROUP3_EbIb = 0xF6,
+ OP_GROUP3_Ev = 0xF7,
+ OP_GROUP3_EvIz = 0xF7, // OP_GROUP3_Ev has an immediate, when instruction is a test.
+ OP_GROUP5_Ev = 0xFF,
+ } OneByteOpcodeID;
+
+ typedef enum {
+ OP2_MOVSD_VsdWsd = 0x10,
+ OP2_MOVSD_WsdVsd = 0x11,
+ OP2_MOVSS_VsdWsd = 0x10,
+ OP2_MOVSS_WsdVsd = 0x11,
+ OP2_CVTSI2SD_VsdEd = 0x2A,
+ OP2_CVTTSD2SI_GdWsd = 0x2C,
+ OP2_UCOMISD_VsdWsd = 0x2E,
+ OP2_ADDSD_VsdWsd = 0x58,
+ OP2_MULSD_VsdWsd = 0x59,
+ OP2_CVTSD2SS_VsdWsd = 0x5A,
+ OP2_CVTSS2SD_VsdWsd = 0x5A,
+ OP2_SUBSD_VsdWsd = 0x5C,
+ OP2_DIVSD_VsdWsd = 0x5E,
+ OP2_MOVMSKPD_VdEd = 0x50,
+ OP2_SQRTSD_VsdWsd = 0x51,
+ OP2_ANDNPD_VpdWpd = 0x55,
+ OP2_XORPD_VpdWpd = 0x57,
+ OP2_MOVD_VdEd = 0x6E,
+ OP2_MOVD_EdVd = 0x7E,
+ OP2_JCC_rel32 = 0x80,
+ OP_SETCC = 0x90,
+ OP2_3BYTE_ESCAPE = 0xAE,
+ OP2_IMUL_GvEv = 0xAF,
+ OP2_MOVZX_GvEb = 0xB6,
+ OP2_BSR = 0xBD,
+ OP2_MOVSX_GvEb = 0xBE,
+ OP2_MOVZX_GvEw = 0xB7,
+ OP2_MOVSX_GvEw = 0xBF,
+ OP2_PEXTRW_GdUdIb = 0xC5,
+ OP2_PSLLQ_UdqIb = 0x73,
+ OP2_PSRLQ_UdqIb = 0x73,
+ OP2_POR_VdqWdq = 0XEB,
+ } TwoByteOpcodeID;
+
+ typedef enum {
+ OP3_MFENCE = 0xF0,
+ } ThreeByteOpcodeID;
+
+ TwoByteOpcodeID jccRel32(Condition cond)
+ {
+ return (TwoByteOpcodeID)(OP2_JCC_rel32 + cond);
+ }
+
+ TwoByteOpcodeID setccOpcode(Condition cond)
+ {
+ return (TwoByteOpcodeID)(OP_SETCC + cond);
+ }
+
+ typedef enum {
+ GROUP1_OP_ADD = 0,
+ GROUP1_OP_OR = 1,
+ GROUP1_OP_ADC = 2,
+ GROUP1_OP_AND = 4,
+ GROUP1_OP_SUB = 5,
+ GROUP1_OP_XOR = 6,
+ GROUP1_OP_CMP = 7,
+
+ GROUP1A_OP_POP = 0,
+
+ GROUP2_OP_ROL = 0,
+ GROUP2_OP_ROR = 1,
+ GROUP2_OP_RCL = 2,
+ GROUP2_OP_RCR = 3,
+
+ GROUP2_OP_SHL = 4,
+ GROUP2_OP_SHR = 5,
+ GROUP2_OP_SAR = 7,
+
+ GROUP3_OP_TEST = 0,
+ GROUP3_OP_NOT = 2,
+ GROUP3_OP_NEG = 3,
+ GROUP3_OP_IDIV = 7,
+
+ GROUP5_OP_CALLN = 2,
+ GROUP5_OP_JMPN = 4,
+ GROUP5_OP_PUSH = 6,
+
+ GROUP11_MOV = 0,
+
+ GROUP14_OP_PSLLQ = 6,
+ GROUP14_OP_PSRLQ = 2,
+
+ ESCAPE_DD_FSTP_doubleReal = 3,
+ } GroupOpcodeID;
+
+ class X86InstructionFormatter;
+public:
+
+ X86Assembler()
+ : m_indexOfLastWatchpoint(INT_MIN)
+ , m_indexOfTailOfLastWatchpoint(INT_MIN)
+ {
+ }
+
+ AssemblerBuffer& buffer() { return m_formatter.m_buffer; }
+
+ // Stack operations:
+
+ void push_r(RegisterID reg)
+ {
+ m_formatter.oneByteOp(OP_PUSH_EAX, reg);
+ }
+
+ void pop_r(RegisterID reg)
+ {
+ m_formatter.oneByteOp(OP_POP_EAX, reg);
+ }
+
+ void push_i32(int imm)
+ {
+ m_formatter.oneByteOp(OP_PUSH_Iz);
+ m_formatter.immediate32(imm);
+ }
+
+ void push_m(int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_PUSH, base, offset);
+ }
+
+ void pop_m(int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP1A_Ev, GROUP1A_OP_POP, base, offset);
+ }
+
+ // Arithmetic operations:
+
+#if !CPU(X86_64)
+ void adcl_im(int imm, const void* addr)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADC, addr);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADC, addr);
+ m_formatter.immediate32(imm);
+ }
+ }
+#endif
+
+ void addl_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_ADD_EvGv, src, dst);
+ }
+
+ void addl_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_ADD_GvEv, dst, base, offset);
+ }
+
+#if !CPU(X86_64)
+ void addl_mr(const void* addr, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_ADD_GvEv, dst, addr);
+ }
+#endif
+
+ void addl_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_ADD_EvGv, src, base, offset);
+ }
+
+ void addl_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ if (dst == X86Registers::eax)
+ m_formatter.oneByteOp(OP_ADD_EAXIv);
+ else
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void addl_im(int imm, int offset, RegisterID base)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+#if CPU(X86_64)
+ void addq_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_ADD_EvGv, src, dst);
+ }
+
+ void addq_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_ADD_GvEv, dst, base, offset);
+ }
+
+ void addq_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ if (dst == X86Registers::eax)
+ m_formatter.oneByteOp64(OP_ADD_EAXIv);
+ else
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void addq_im(int imm, int offset, RegisterID base)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
+#else
+ void addl_im(int imm, const void* addr)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, addr);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, addr);
+ m_formatter.immediate32(imm);
+ }
+ }
+#endif
+
+ void andl_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_AND_EvGv, src, dst);
+ }
+
+ void andl_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_AND_GvEv, dst, base, offset);
+ }
+
+ void andl_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_AND_EvGv, src, base, offset);
+ }
+
+ void andl_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void andl_im(int imm, int offset, RegisterID base)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, base, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, base, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+#if CPU(X86_64)
+ void andq_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_AND_EvGv, src, dst);
+ }
+
+ void andq_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_AND, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_AND, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+#else
+ void andl_im(int imm, const void* addr)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, addr);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, addr);
+ m_formatter.immediate32(imm);
+ }
+ }
+#endif
+
+ void dec_r(RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP1_OP_OR, dst);
+ }
+
+#if CPU(X86_64)
+ void decq_r(RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_GROUP5_Ev, GROUP1_OP_OR, dst);
+ }
+#endif // CPU(X86_64)
+
+ void inc_r(RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP1_OP_ADD, dst);
+ }
+
+#if CPU(X86_64)
+ void incq_r(RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_GROUP5_Ev, GROUP1_OP_ADD, dst);
+ }
+
+ void incq_m(int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp64(OP_GROUP5_Ev, GROUP1_OP_ADD, base, offset);
+ }
+#endif // CPU(X86_64)
+
+ void negl_r(RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NEG, dst);
+ }
+
+#if CPU(X86_64)
+ void negq_r(RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_GROUP3_Ev, GROUP3_OP_NEG, dst);
+ }
+#endif
+
+ void negl_m(int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NEG, base, offset);
+ }
+
+ void notl_r(RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NOT, dst);
+ }
+
+ void notl_m(int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NOT, base, offset);
+ }
+
+ void orl_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_OR_EvGv, src, dst);
+ }
+
+ void orl_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_OR_GvEv, dst, base, offset);
+ }
+
+ void orl_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_OR_EvGv, src, base, offset);
+ }
+
+ void orl_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ if (dst == X86Registers::eax)
+ m_formatter.oneByteOp(OP_OR_EAXIv);
+ else
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void orl_im(int imm, int offset, RegisterID base)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, base, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, base, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+#if CPU(X86_64)
+ void orq_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_OR_EvGv, src, dst);
+ }
+
+ void orq_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_OR, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ if (dst == X86Registers::eax)
+ m_formatter.oneByteOp64(OP_OR_EAXIv);
+ else
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+#else
+ void orl_im(int imm, const void* addr)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, addr);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, addr);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void orl_rm(RegisterID src, const void* addr)
+ {
+ m_formatter.oneByteOp(OP_OR_EvGv, src, addr);
+ }
+#endif
+
+ void subl_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_SUB_EvGv, src, dst);
+ }
+
+ void subl_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_SUB_GvEv, dst, base, offset);
+ }
+
+ void subl_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_SUB_EvGv, src, base, offset);
+ }
+
+ void subl_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ if (dst == X86Registers::eax)
+ m_formatter.oneByteOp(OP_SUB_EAXIv);
+ else
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void subl_im(int imm, int offset, RegisterID base)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, base, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, base, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+#if CPU(X86_64)
+ void subq_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_SUB_EvGv, src, dst);
+ }
+
+ void subq_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ if (dst == X86Registers::eax)
+ m_formatter.oneByteOp64(OP_SUB_EAXIv);
+ else
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+#else
+ void subl_im(int imm, const void* addr)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, addr);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, addr);
+ m_formatter.immediate32(imm);
+ }
+ }
+#endif
+
+ void xorl_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_XOR_EvGv, src, dst);
+ }
+
+ void xorl_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_XOR_GvEv, dst, base, offset);
+ }
+
+ void xorl_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_XOR_EvGv, src, base, offset);
+ }
+
+ void xorl_im(int imm, int offset, RegisterID base)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_XOR, base, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, base, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void xorl_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ if (dst == X86Registers::eax)
+ m_formatter.oneByteOp(OP_XOR_EAXIv);
+ else
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+#if CPU(X86_64)
+ void xorq_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_XOR_EvGv, src, dst);
+ }
+
+ void xorq_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ if (dst == X86Registers::eax)
+ m_formatter.oneByteOp64(OP_XOR_EAXIv);
+ else
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void xorq_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp64(OP_XOR_EvGv, src, base, offset);
+ }
+
+ void rorq_i8r(int imm, RegisterID dst)
+ {
+ if (imm == 1)
+ m_formatter.oneByteOp64(OP_GROUP2_Ev1, GROUP2_OP_ROR, dst);
+ else {
+ m_formatter.oneByteOp64(OP_GROUP2_EvIb, GROUP2_OP_ROR, dst);
+ m_formatter.immediate8(imm);
+ }
+ }
+
+#endif
+
+ void bsr_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.twoByteOp(OP2_BSR, dst, src);
+ }
+
+ void sarl_i8r(int imm, RegisterID dst)
+ {
+ if (imm == 1)
+ m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SAR, dst);
+ else {
+ m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SAR, dst);
+ m_formatter.immediate8(imm);
+ }
+ }
+
+ void sarl_CLr(RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst);
+ }
+
+ void shrl_i8r(int imm, RegisterID dst)
+ {
+ if (imm == 1)
+ m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SHR, dst);
+ else {
+ m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SHR, dst);
+ m_formatter.immediate8(imm);
+ }
+ }
+
+ void shrl_CLr(RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SHR, dst);
+ }
+
+ void shll_i8r(int imm, RegisterID dst)
+ {
+ if (imm == 1)
+ m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SHL, dst);
+ else {
+ m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SHL, dst);
+ m_formatter.immediate8(imm);
+ }
+ }
+
+ void shll_CLr(RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SHL, dst);
+ }
+
+#if CPU(X86_64)
+ void sarq_CLr(RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst);
+ }
+
+ void sarq_i8r(int imm, RegisterID dst)
+ {
+ if (imm == 1)
+ m_formatter.oneByteOp64(OP_GROUP2_Ev1, GROUP2_OP_SAR, dst);
+ else {
+ m_formatter.oneByteOp64(OP_GROUP2_EvIb, GROUP2_OP_SAR, dst);
+ m_formatter.immediate8(imm);
+ }
+ }
+
+ void shrq_i8r(int imm, RegisterID dst)
+ {
+ if (imm == 1)
+ m_formatter.oneByteOp64(OP_GROUP2_Ev1, GROUP2_OP_SHR, dst);
+ else {
+ m_formatter.oneByteOp64(OP_GROUP2_EvIb, GROUP2_OP_SHR, dst);
+ m_formatter.immediate8(imm);
+ }
+ }
+
+ void shlq_i8r(int imm, RegisterID dst)
+ {
+ if (imm == 1)
+ m_formatter.oneByteOp64(OP_GROUP2_Ev1, GROUP2_OP_SHL, dst);
+ else {
+ m_formatter.oneByteOp64(OP_GROUP2_EvIb, GROUP2_OP_SHL, dst);
+ m_formatter.immediate8(imm);
+ }
+ }
+#endif // CPU(X86_64)
+
+ void imull_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.twoByteOp(OP2_IMUL_GvEv, dst, src);
+ }
+
+#if CPU(X86_64)
+ void imulq_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.twoByteOp64(OP2_IMUL_GvEv, dst, src);
+ }
+#endif // CPU(X86_64)
+
+ void imull_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.twoByteOp(OP2_IMUL_GvEv, dst, base, offset);
+ }
+
+ void imull_i32r(RegisterID src, int32_t value, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_IMUL_GvEvIz, dst, src);
+ m_formatter.immediate32(value);
+ }
+
+ void idivl_r(RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_IDIV, dst);
+ }
+
+ // Comparisons:
+
+ void cmpl_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_CMP_EvGv, src, dst);
+ }
+
+ void cmpl_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_CMP_EvGv, src, base, offset);
+ }
+
+ void cmpl_mr(int offset, RegisterID base, RegisterID src)
+ {
+ m_formatter.oneByteOp(OP_CMP_GvEv, src, base, offset);
+ }
+
+ void cmpl_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ if (dst == X86Registers::eax)
+ m_formatter.oneByteOp(OP_CMP_EAXIv);
+ else
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void cmpl_ir_force32(int imm, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
+ m_formatter.immediate32(imm);
+ }
+
+ void cmpl_im(int imm, int offset, RegisterID base)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void cmpb_im(int imm, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP1_EbIb, GROUP1_OP_CMP, base, offset);
+ m_formatter.immediate8(imm);
+ }
+
+ void cmpb_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.oneByteOp(OP_GROUP1_EbIb, GROUP1_OP_CMP, base, index, scale, offset);
+ m_formatter.immediate8(imm);
+ }
+
+#if CPU(X86)
+ void cmpb_im(int imm, const void* addr)
+ {
+ m_formatter.oneByteOp(OP_GROUP1_EbIb, GROUP1_OP_CMP, addr);
+ m_formatter.immediate8(imm);
+ }
+#endif
+
+ void cmpl_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void cmpl_im_force32(int imm, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
+ m_formatter.immediate32(imm);
+ }
+
+#if CPU(X86_64)
+ void cmpq_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_CMP_EvGv, src, dst);
+ }
+
+ void cmpq_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp64(OP_CMP_EvGv, src, base, offset);
+ }
+
+ void cmpq_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.oneByteOp64(OP_CMP_EvGv, src, base, index, scale, offset);
+ }
+
+ void cmpq_mr(int offset, RegisterID base, RegisterID src)
+ {
+ m_formatter.oneByteOp64(OP_CMP_GvEv, src, base, offset);
+ }
+
+ void cmpq_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ if (dst == X86Registers::eax)
+ m_formatter.oneByteOp64(OP_CMP_EAXIv);
+ else
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void cmpq_im(int imm, int offset, RegisterID base)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void cmpq_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
+#else
+ void cmpl_rm(RegisterID reg, const void* addr)
+ {
+ m_formatter.oneByteOp(OP_CMP_EvGv, reg, addr);
+ }
+
+ void cmpl_im(int imm, const void* addr)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, addr);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, addr);
+ m_formatter.immediate32(imm);
+ }
+ }
+#endif
+
+ void cmpw_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
+ m_formatter.immediate16(imm);
+ }
+ }
+
+ void cmpw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_CMP_EvGv, src, base, index, scale, offset);
+ }
+
+ void cmpw_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
+ m_formatter.immediate16(imm);
+ }
+ }
+
+ void testl_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_TEST_EvGv, src, dst);
+ }
+
+ void testl_i32r(int imm, RegisterID dst)
+ {
+ if (dst == X86Registers::eax)
+ m_formatter.oneByteOp(OP_TEST_EAXIv);
+ else
+ m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
+ m_formatter.immediate32(imm);
+ }
+
+ void testl_i32m(int imm, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, offset);
+ m_formatter.immediate32(imm);
+ }
+
+ void testb_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp8(OP_TEST_EbGb, src, dst);
+ }
+
+ void testb_im(int imm, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_EbIb, GROUP3_OP_TEST, base, offset);
+ m_formatter.immediate8(imm);
+ }
+
+ void testb_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_EbIb, GROUP3_OP_TEST, base, index, scale, offset);
+ m_formatter.immediate8(imm);
+ }
+
+#if CPU(X86)
+ void testb_im(int imm, const void* addr)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_EbIb, GROUP3_OP_TEST, addr);
+ m_formatter.immediate8(imm);
+ }
+#endif
+
+ void testl_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, index, scale, offset);
+ m_formatter.immediate32(imm);
+ }
+
+#if CPU(X86_64)
+ void testq_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_TEST_EvGv, src, dst);
+ }
+
+ void testq_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp64(OP_TEST_EvGv, src, base, offset);
+ }
+
+ void testq_i32r(int imm, RegisterID dst)
+ {
+ if (dst == X86Registers::eax)
+ m_formatter.oneByteOp64(OP_TEST_EAXIv);
+ else
+ m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
+ m_formatter.immediate32(imm);
+ }
+
+ void testq_i32m(int imm, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, offset);
+ m_formatter.immediate32(imm);
+ }
+
+ void testq_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, index, scale, offset);
+ m_formatter.immediate32(imm);
+ }
+#endif
+
+ void testw_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_TEST_EvGv, src, dst);
+ }
+
+ void testb_i8r(int imm, RegisterID dst)
+ {
+ if (dst == X86Registers::eax)
+ m_formatter.oneByteOp(OP_TEST_ALIb);
+ else
+ m_formatter.oneByteOp8(OP_GROUP3_EbIb, GROUP3_OP_TEST, dst);
+ m_formatter.immediate8(imm);
+ }
+
+ void setCC_r(Condition cond, RegisterID dst)
+ {
+ m_formatter.twoByteOp8(setccOpcode(cond), (GroupOpcodeID)0, dst);
+ }
+
+ void sete_r(RegisterID dst)
+ {
+ m_formatter.twoByteOp8(setccOpcode(ConditionE), (GroupOpcodeID)0, dst);
+ }
+
+ void setz_r(RegisterID dst)
+ {
+ sete_r(dst);
+ }
+
+ void setne_r(RegisterID dst)
+ {
+ m_formatter.twoByteOp8(setccOpcode(ConditionNE), (GroupOpcodeID)0, dst);
+ }
+
+ void setnz_r(RegisterID dst)
+ {
+ setne_r(dst);
+ }
+
+ // Various move ops:
+
+ void cdq()
+ {
+ m_formatter.oneByteOp(OP_CDQ);
+ }
+
+ void fstpl(int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_ESCAPE_DD, ESCAPE_DD_FSTP_doubleReal, base, offset);
+ }
+
+ void xchgl_rr(RegisterID src, RegisterID dst)
+ {
+ if (src == X86Registers::eax)
+ m_formatter.oneByteOp(OP_XCHG_EAX, dst);
+ else if (dst == X86Registers::eax)
+ m_formatter.oneByteOp(OP_XCHG_EAX, src);
+ else
+ m_formatter.oneByteOp(OP_XCHG_EvGv, src, dst);
+ }
+
+#if CPU(X86_64)
+ void xchgq_rr(RegisterID src, RegisterID dst)
+ {
+ if (src == X86Registers::eax)
+ m_formatter.oneByteOp64(OP_XCHG_EAX, dst);
+ else if (dst == X86Registers::eax)
+ m_formatter.oneByteOp64(OP_XCHG_EAX, src);
+ else
+ m_formatter.oneByteOp64(OP_XCHG_EvGv, src, dst);
+ }
+#endif
+
+ void movl_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_MOV_EvGv, src, dst);
+ }
+
+ void movl_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_MOV_EvGv, src, base, offset);
+ }
+
+ void movl_rm_disp32(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp_disp32(OP_MOV_EvGv, src, base, offset);
+ }
+
+ void movl_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.oneByteOp(OP_MOV_EvGv, src, base, index, scale, offset);
+ }
+
+ void movl_mEAX(const void* addr)
+ {
+ m_formatter.oneByteOp(OP_MOV_EAXOv);
+#if CPU(X86_64)
+ m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
+#else
+ m_formatter.immediate32(reinterpret_cast<int>(addr));
+#endif
+ }
+
+ void movl_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_MOV_GvEv, dst, base, offset);
+ }
+
+ void movl_mr_disp32(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp_disp32(OP_MOV_GvEv, dst, base, offset);
+ }
+
+ void movl_mr_disp8(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp_disp8(OP_MOV_GvEv, dst, base, offset);
+ }
+
+ void movl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_MOV_GvEv, dst, base, index, scale, offset);
+ }
+
+ void movl_i32r(int imm, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_MOV_EAXIv, dst);
+ m_formatter.immediate32(imm);
+ }
+
+ void movl_i32m(int imm, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, base, offset);
+ m_formatter.immediate32(imm);
+ }
+
+ void movl_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, base, index, scale, offset);
+ m_formatter.immediate32(imm);
+ }
+
+#if !CPU(X86_64)
+ void movb_i8m(int imm, const void* addr)
+ {
+ ASSERT(-128 <= imm && imm < 128);
+ m_formatter.oneByteOp(OP_GROUP11_EvIb, GROUP11_MOV, addr);
+ m_formatter.immediate8(imm);
+ }
+#endif
+
+ void movb_i8m(int imm, int offset, RegisterID base)
+ {
+ ASSERT(-128 <= imm && imm < 128);
+ m_formatter.oneByteOp(OP_GROUP11_EvIb, GROUP11_MOV, base, offset);
+ m_formatter.immediate8(imm);
+ }
+
+ void movb_i8m(int imm, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ ASSERT(-128 <= imm && imm < 128);
+ m_formatter.oneByteOp(OP_GROUP11_EvIb, GROUP11_MOV, base, index, scale, offset);
+ m_formatter.immediate8(imm);
+ }
+
+#if !CPU(X86_64)
+ void movb_rm(RegisterID src, const void* addr)
+ {
+ m_formatter.oneByteOp(OP_MOV_EbGb, src, addr);
+ }
+#endif
+
+ void movb_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp8(OP_MOV_EbGb, src, base, offset);
+ }
+
+ void movb_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.oneByteOp8(OP_MOV_EbGb, src, base, index, scale, offset);
+ }
+
+ void movw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp8(OP_MOV_EvGv, src, base, index, scale, offset);
+ }
+
+ void movl_EAXm(const void* addr)
+ {
+ m_formatter.oneByteOp(OP_MOV_OvEAX);
+#if CPU(X86_64)
+ m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
+#else
+ m_formatter.immediate32(reinterpret_cast<int>(addr));
+#endif
+ }
+
+#if CPU(X86_64)
+ void movq_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_MOV_EvGv, src, dst);
+ }
+
+ void movq_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp64(OP_MOV_EvGv, src, base, offset);
+ }
+
+ void movq_rm_disp32(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp64_disp32(OP_MOV_EvGv, src, base, offset);
+ }
+
+ void movq_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.oneByteOp64(OP_MOV_EvGv, src, base, index, scale, offset);
+ }
+
+ void movq_mEAX(const void* addr)
+ {
+ m_formatter.oneByteOp64(OP_MOV_EAXOv);
+ m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
+ }
+
+ void movq_EAXm(const void* addr)
+ {
+ m_formatter.oneByteOp64(OP_MOV_OvEAX);
+ m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
+ }
+
+ void movq_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_MOV_GvEv, dst, base, offset);
+ }
+
+ void movq_mr_disp32(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp64_disp32(OP_MOV_GvEv, dst, base, offset);
+ }
+
+ void movq_mr_disp8(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp64_disp8(OP_MOV_GvEv, dst, base, offset);
+ }
+
+ void movq_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_MOV_GvEv, dst, base, index, scale, offset);
+ }
+
+ void movq_i32m(int imm, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp64(OP_GROUP11_EvIz, GROUP11_MOV, base, offset);
+ m_formatter.immediate32(imm);
+ }
+
+ void movq_i64r(int64_t imm, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_MOV_EAXIv, dst);
+ m_formatter.immediate64(imm);
+ }
+
+ void movsxd_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_MOVSXD_GvEv, dst, src);
+ }
+
+
+#else
+ void movl_rm(RegisterID src, const void* addr)
+ {
+ if (src == X86Registers::eax)
+ movl_EAXm(addr);
+ else
+ m_formatter.oneByteOp(OP_MOV_EvGv, src, addr);
+ }
+
+ void movl_mr(const void* addr, RegisterID dst)
+ {
+ if (dst == X86Registers::eax)
+ movl_mEAX(addr);
+ else
+ m_formatter.oneByteOp(OP_MOV_GvEv, dst, addr);
+ }
+
+ void movl_i32m(int imm, const void* addr)
+ {
+ m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, addr);
+ m_formatter.immediate32(imm);
+ }
+#endif
+
+ void movzwl_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.twoByteOp(OP2_MOVZX_GvEw, dst, base, offset);
+ }
+
+ void movzwl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+ {
+ m_formatter.twoByteOp(OP2_MOVZX_GvEw, dst, base, index, scale, offset);
+ }
+
+ void movswl_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.twoByteOp(OP2_MOVSX_GvEw, dst, base, offset);
+ }
+
+ void movswl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+ {
+ m_formatter.twoByteOp(OP2_MOVSX_GvEw, dst, base, index, scale, offset);
+ }
+
+ void movzbl_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.twoByteOp(OP2_MOVZX_GvEb, dst, base, offset);
+ }
+
+ void movzbl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+ {
+ m_formatter.twoByteOp(OP2_MOVZX_GvEb, dst, base, index, scale, offset);
+ }
+
+#if !CPU(X86_64)
+ void movzbl_mr(const void* address, RegisterID dst)
+ {
+ m_formatter.twoByteOp(OP2_MOVZX_GvEb, dst, address);
+ }
+#endif
+
+ void movsbl_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.twoByteOp(OP2_MOVSX_GvEb, dst, base, offset);
+ }
+
+ void movsbl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+ {
+ m_formatter.twoByteOp(OP2_MOVSX_GvEb, dst, base, index, scale, offset);
+ }
+
+ void movzbl_rr(RegisterID src, RegisterID dst)
+ {
+ // In 64-bit, this may cause an unnecessary REX to be planted (if the dst register
+ // is in the range ESP-EDI, and the src would not have required a REX). Unneeded
+ // REX prefixes are defined to be silently ignored by the processor.
+ m_formatter.twoByteOp8(OP2_MOVZX_GvEb, dst, src);
+ }
+
+ void leal_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_LEA, dst, base, offset);
+ }
+#if CPU(X86_64)
+ void leaq_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_LEA, dst, base, offset);
+ }
+#endif
+
+ // Flow control:
+
+ AssemblerLabel call()
+ {
+ m_formatter.oneByteOp(OP_CALL_rel32);
+ return m_formatter.immediateRel32();
+ }
+
+ AssemblerLabel call(RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_CALLN, dst);
+ return m_formatter.label();
+ }
+
+ void call_m(int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_CALLN, base, offset);
+ }
+
+ AssemblerLabel jmp()
+ {
+ m_formatter.oneByteOp(OP_JMP_rel32);
+ return m_formatter.immediateRel32();
+ }
+
+ // Return a AssemblerLabel so we have a label to the jump, so we can use this
+ // To make a tail recursive call on x86-64. The MacroAssembler
+ // really shouldn't wrap this as a Jump, since it can't be linked. :-/
+ AssemblerLabel jmp_r(RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, dst);
+ return m_formatter.label();
+ }
+
+ void jmp_m(int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, base, offset);
+ }
+
+#if !CPU(X86_64)
+ void jmp_m(const void* address)
+ {
+ m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, address);
+ }
+#endif
+
+ AssemblerLabel jne()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionNE));
+ return m_formatter.immediateRel32();
+ }
+
+ AssemblerLabel jnz()
+ {
+ return jne();
+ }
+
+ AssemblerLabel je()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionE));
+ return m_formatter.immediateRel32();
+ }
+
+ AssemblerLabel jz()
+ {
+ return je();
+ }
+
+ AssemblerLabel jl()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionL));
+ return m_formatter.immediateRel32();
+ }
+
+ AssemblerLabel jb()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionB));
+ return m_formatter.immediateRel32();
+ }
+
+ AssemblerLabel jle()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionLE));
+ return m_formatter.immediateRel32();
+ }
+
+ AssemblerLabel jbe()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionBE));
+ return m_formatter.immediateRel32();
+ }
+
+ AssemblerLabel jge()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionGE));
+ return m_formatter.immediateRel32();
+ }
+
+ AssemblerLabel jg()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionG));
+ return m_formatter.immediateRel32();
+ }
+
+ AssemblerLabel ja()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionA));
+ return m_formatter.immediateRel32();
+ }
+
+ AssemblerLabel jae()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionAE));
+ return m_formatter.immediateRel32();
+ }
+
+ AssemblerLabel jo()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionO));
+ return m_formatter.immediateRel32();
+ }
+
+ AssemblerLabel jnp()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionNP));
+ return m_formatter.immediateRel32();
+ }
+
+ AssemblerLabel jp()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionP));
+ return m_formatter.immediateRel32();
+ }
+
+ AssemblerLabel js()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionS));
+ return m_formatter.immediateRel32();
+ }
+
+ AssemblerLabel jCC(Condition cond)
+ {
+ m_formatter.twoByteOp(jccRel32(cond));
+ return m_formatter.immediateRel32();
+ }
+
+ // SSE operations:
+
+ void addsd_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+ }
+
+ void addsd_mr(int offset, RegisterID base, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, base, offset);
+ }
+
+#if !CPU(X86_64)
+ void addsd_mr(const void* address, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, address);
+ }
+#endif
+
+ void cvtsi2sd_rr(RegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, src);
+ }
+
+#if CPU(X86_64)
+ void cvtsi2sdq_rr(RegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp64(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, src);
+ }
+#endif
+
+ void cvtsi2sd_mr(int offset, RegisterID base, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, base, offset);
+ }
+
+#if !CPU(X86_64)
+ void cvtsi2sd_mr(const void* address, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, address);
+ }
+#endif
+
+ void cvttsd2si_rr(XMMRegisterID src, RegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_CVTTSD2SI_GdWsd, dst, (RegisterID)src);
+ }
+
+ void cvtsd2ss_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_CVTSD2SS_VsdWsd, dst, (RegisterID)src);
+ }
+
+ void cvtss2sd_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F3);
+ m_formatter.twoByteOp(OP2_CVTSS2SD_VsdWsd, dst, (RegisterID)src);
+ }
+
+#if CPU(X86_64)
+ void cvttsd2siq_rr(XMMRegisterID src, RegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp64(OP2_CVTTSD2SI_GdWsd, dst, (RegisterID)src);
+ }
+#endif
+
+ void movd_rr(XMMRegisterID src, RegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp(OP2_MOVD_EdVd, (RegisterID)src, dst);
+ }
+
+ void movd_rr(RegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp(OP2_MOVD_VdEd, (RegisterID)dst, src);
+ }
+
+#if CPU(X86_64)
+ void movmskpd_rr(XMMRegisterID src, RegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp64(OP2_MOVMSKPD_VdEd, dst, (RegisterID)src);
+ }
+
+ void movq_rr(XMMRegisterID src, RegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp64(OP2_MOVD_EdVd, (RegisterID)src, dst);
+ }
+
+ void movq_rr(RegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp64(OP2_MOVD_VdEd, (RegisterID)dst, src);
+ }
+#endif
+
+ void movsd_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+ }
+
+ void movsd_rm(XMMRegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, offset);
+ }
+
+ void movsd_rm(XMMRegisterID src, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, index, scale, offset);
+ }
+
+ void movss_rm(XMMRegisterID src, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.prefix(PRE_SSE_F3);
+ m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, index, scale, offset);
+ }
+
+ void movsd_mr(int offset, RegisterID base, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, base, offset);
+ }
+
+ void movsd_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, dst, base, index, scale, offset);
+ }
+
+ void movss_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F3);
+ m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, dst, base, index, scale, offset);
+ }
+
+#if !CPU(X86_64)
+ void movsd_mr(const void* address, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, address);
+ }
+ void movsd_rm(XMMRegisterID src, const void* address)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, address);
+ }
+#endif
+
+ void mulsd_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+ }
+
+ void mulsd_mr(int offset, RegisterID base, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, base, offset);
+ }
+
+ void pextrw_irr(int whichWord, XMMRegisterID src, RegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp(OP2_PEXTRW_GdUdIb, (RegisterID)dst, (RegisterID)src);
+ m_formatter.immediate8(whichWord);
+ }
+
+ void psllq_i8r(int imm, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp8(OP2_PSLLQ_UdqIb, GROUP14_OP_PSLLQ, (RegisterID)dst);
+ m_formatter.immediate8(imm);
+ }
+
+ void psrlq_i8r(int imm, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp8(OP2_PSRLQ_UdqIb, GROUP14_OP_PSRLQ, (RegisterID)dst);
+ m_formatter.immediate8(imm);
+ }
+
+ void por_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp(OP2_POR_VdqWdq, (RegisterID)dst, (RegisterID)src);
+ }
+
+ void subsd_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+ }
+
+ void subsd_mr(int offset, RegisterID base, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, base, offset);
+ }
+
+ void ucomisd_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+ }
+
+ void ucomisd_mr(int offset, RegisterID base, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, base, offset);
+ }
+
+ void divsd_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_DIVSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+ }
+
+ void divsd_mr(int offset, RegisterID base, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_DIVSD_VsdWsd, (RegisterID)dst, base, offset);
+ }
+
+ void xorpd_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp(OP2_XORPD_VpdWpd, (RegisterID)dst, (RegisterID)src);
+ }
+
+ void andnpd_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp(OP2_ANDNPD_VpdWpd, (RegisterID)dst, (RegisterID)src);
+ }
+
+ void sqrtsd_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_SQRTSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+ }
+
+ // Misc instructions:
+
+ void int3()
+ {
+ m_formatter.oneByteOp(OP_INT3);
+ }
+
+ void ret()
+ {
+ m_formatter.oneByteOp(OP_RET);
+ }
+
+ void predictNotTaken()
+ {
+ m_formatter.prefix(PRE_PREDICT_BRANCH_NOT_TAKEN);
+ }
+
+ void mfence()
+ {
+ m_formatter.threeByteOp(OP3_MFENCE);
+ }
+
+ // Assembler admin methods:
+
+ size_t codeSize() const
+ {
+ return m_formatter.codeSize();
+ }
+
+ AssemblerLabel labelForWatchpoint()
+ {
+ AssemblerLabel result = m_formatter.label();
+ if (static_cast<int>(result.m_offset) != m_indexOfLastWatchpoint)
+ result = label();
+ m_indexOfLastWatchpoint = result.m_offset;
+ m_indexOfTailOfLastWatchpoint = result.m_offset + maxJumpReplacementSize();
+ return result;
+ }
+
+ AssemblerLabel labelIgnoringWatchpoints()
+ {
+ return m_formatter.label();
+ }
+
+ AssemblerLabel label()
+ {
+ AssemblerLabel result = m_formatter.label();
+ while (UNLIKELY(static_cast<int>(result.m_offset) < m_indexOfTailOfLastWatchpoint)) {
+ nop();
+ result = m_formatter.label();
+ }
+ return result;
+ }
+
+ AssemblerLabel align(int alignment)
+ {
+ while (!m_formatter.isAligned(alignment))
+ m_formatter.oneByteOp(OP_HLT);
+
+ return label();
+ }
+
+ // Linking & patching:
+ //
+ // 'link' and 'patch' methods are for use on unprotected code - such as the code
+ // within the AssemblerBuffer, and code being patched by the patch buffer. Once
+ // code has been finalized it is (platform support permitting) within a non-
+ // writable region of memory; to modify the code in an execute-only execuable
+ // pool the 'repatch' and 'relink' methods should be used.
+
+ void linkJump(AssemblerLabel from, AssemblerLabel to)
+ {
+ ASSERT(from.isSet());
+ ASSERT(to.isSet());
+
+ char* code = reinterpret_cast<char*>(m_formatter.data());
+ ASSERT(!reinterpret_cast<int32_t*>(code + from.m_offset)[-1]);
+ setRel32(code + from.m_offset, code + to.m_offset);
+ }
+
+ static void linkJump(void* code, AssemblerLabel from, void* to)
+ {
+ ASSERT(from.isSet());
+
+ setRel32(reinterpret_cast<char*>(code) + from.m_offset, to);
+ }
+
+ static void linkCall(void* code, AssemblerLabel from, void* to)
+ {
+ ASSERT(from.isSet());
+
+ setRel32(reinterpret_cast<char*>(code) + from.m_offset, to);
+ }
+
+ static void linkPointer(void* code, AssemblerLabel where, void* value)
+ {
+ ASSERT(where.isSet());
+
+ setPointer(reinterpret_cast<char*>(code) + where.m_offset, value);
+ }
+
+ static void relinkJump(void* from, void* to)
+ {
+ setRel32(from, to);
+ }
+
+ static void relinkCall(void* from, void* to)
+ {
+ setRel32(from, to);
+ }
+
+ static void repatchCompact(void* where, int32_t value)
+ {
+ ASSERT(value >= std::numeric_limits<int8_t>::min());
+ ASSERT(value <= std::numeric_limits<int8_t>::max());
+ setInt8(where, value);
+ }
+
+ static void repatchInt32(void* where, int32_t value)
+ {
+ setInt32(where, value);
+ }
+
+ static void repatchPointer(void* where, void* value)
+ {
+ setPointer(where, value);
+ }
+
+ static void* readPointer(void* where)
+ {
+ return reinterpret_cast<void**>(where)[-1];
+ }
+
+ static void replaceWithJump(void* instructionStart, void* to)
+ {
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart);
+ uint8_t* dstPtr = reinterpret_cast<uint8_t*>(to);
+ intptr_t distance = (intptr_t)(dstPtr - (ptr + 5));
+ ptr[0] = static_cast<uint8_t>(OP_JMP_rel32);
+ *reinterpret_cast<int32_t*>(ptr + 1) = static_cast<int32_t>(distance);
+ }
+
+ static ptrdiff_t maxJumpReplacementSize()
+ {
+ return 5;
+ }
+
+#if CPU(X86_64)
+ static void revertJumpTo_movq_i64r(void* instructionStart, int64_t imm, RegisterID dst)
+ {
+ const unsigned instructionSize = 10; // REX.W MOV IMM64
+ const int rexBytes = 1;
+ const int opcodeBytes = 1;
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart);
+ ptr[0] = PRE_REX | (1 << 3) | (dst >> 3);
+ ptr[1] = OP_MOV_EAXIv | (dst & 7);
+
+ union {
+ uint64_t asWord;
+ uint8_t asBytes[8];
+ } u;
+ u.asWord = imm;
+ for (unsigned i = rexBytes + opcodeBytes; i < instructionSize; ++i)
+ ptr[i] = u.asBytes[i - rexBytes - opcodeBytes];
+ }
+
+ static void revertJumpTo_movl_i32r(void* instructionStart, int32_t imm, RegisterID dst)
+ {
+ // We only revert jumps on inline caches, and inline caches always use the scratch register (r11).
+ // FIXME: If the above is ever false then we need to make this smarter with respect to emitting
+ // the REX byte.
+ ASSERT(dst == X86Registers::r11);
+ const unsigned instructionSize = 6; // REX MOV IMM32
+ const int rexBytes = 1;
+ const int opcodeBytes = 1;
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart);
+ ptr[0] = PRE_REX | (dst >> 3);
+ ptr[1] = OP_MOV_EAXIv | (dst & 7);
+
+ union {
+ uint32_t asWord;
+ uint8_t asBytes[4];
+ } u;
+ u.asWord = imm;
+ for (unsigned i = rexBytes + opcodeBytes; i < instructionSize; ++i)
+ ptr[i] = u.asBytes[i - rexBytes - opcodeBytes];
+ }
+#endif
+
+ static void revertJumpTo_cmpl_ir_force32(void* instructionStart, int32_t imm, RegisterID dst)
+ {
+ const int opcodeBytes = 1;
+ const int modRMBytes = 1;
+ ASSERT(opcodeBytes + modRMBytes <= maxJumpReplacementSize());
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart);
+ ptr[0] = OP_GROUP1_EvIz;
+ ptr[1] = (X86InstructionFormatter::ModRmRegister << 6) | (GROUP1_OP_CMP << 3) | dst;
+ union {
+ uint32_t asWord;
+ uint8_t asBytes[4];
+ } u;
+ u.asWord = imm;
+ for (unsigned i = opcodeBytes + modRMBytes; i < static_cast<unsigned>(maxJumpReplacementSize()); ++i)
+ ptr[i] = u.asBytes[i - opcodeBytes - modRMBytes];
+ }
+
+ static void revertJumpTo_cmpl_im_force32(void* instructionStart, int32_t imm, int offset, RegisterID dst)
+ {
+ ASSERT_UNUSED(offset, !offset);
+ const int opcodeBytes = 1;
+ const int modRMBytes = 1;
+ ASSERT(opcodeBytes + modRMBytes <= maxJumpReplacementSize());
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart);
+ ptr[0] = OP_GROUP1_EvIz;
+ ptr[1] = (X86InstructionFormatter::ModRmMemoryNoDisp << 6) | (GROUP1_OP_CMP << 3) | dst;
+ union {
+ uint32_t asWord;
+ uint8_t asBytes[4];
+ } u;
+ u.asWord = imm;
+ for (unsigned i = opcodeBytes + modRMBytes; i < static_cast<unsigned>(maxJumpReplacementSize()); ++i)
+ ptr[i] = u.asBytes[i - opcodeBytes - modRMBytes];
+ }
+
+ static void replaceWithLoad(void* instructionStart)
+ {
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart);
+#if CPU(X86_64)
+ if ((*ptr & ~15) == PRE_REX)
+ ptr++;
+#endif
+ switch (*ptr) {
+ case OP_MOV_GvEv:
+ break;
+ case OP_LEA:
+ *ptr = OP_MOV_GvEv;
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ }
+
+ static void replaceWithAddressComputation(void* instructionStart)
+ {
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart);
+#if CPU(X86_64)
+ if ((*ptr & ~15) == PRE_REX)
+ ptr++;
+#endif
+ switch (*ptr) {
+ case OP_MOV_GvEv:
+ *ptr = OP_LEA;
+ break;
+ case OP_LEA:
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ }
+
+ static unsigned getCallReturnOffset(AssemblerLabel call)
+ {
+ ASSERT(call.isSet());
+ return call.m_offset;
+ }
+
+ static void* getRelocatedAddress(void* code, AssemblerLabel label)
+ {
+ ASSERT(label.isSet());
+ return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + label.m_offset);
+ }
+
+ static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b)
+ {
+ return b.m_offset - a.m_offset;
+ }
+
+ unsigned debugOffset() { return m_formatter.debugOffset(); }
+
+ void nop()
+ {
+ m_formatter.oneByteOp(OP_NOP);
+ }
+
+ static void fillNops(void* base, size_t size)
+ {
+#if CPU(X86_64)
+ static const uint8_t nops[10][10] = {
+ // nop
+ {0x90},
+ // xchg %ax,%ax
+ {0x66, 0x90},
+ // nopl (%[re]ax)
+ {0x0f, 0x1f, 0x00},
+ // nopl 8(%[re]ax)
+ {0x0f, 0x1f, 0x40, 0x08},
+ // nopl 8(%[re]ax,%[re]ax,1)
+ {0x0f, 0x1f, 0x44, 0x00, 0x08},
+ // nopw 8(%[re]ax,%[re]ax,1)
+ {0x66, 0x0f, 0x1f, 0x44, 0x00, 0x08},
+ // nopl 512(%[re]ax)
+ {0x0f, 0x1f, 0x80, 0x00, 0x02, 0x00, 0x00},
+ // nopl 512(%[re]ax,%[re]ax,1)
+ {0x0f, 0x1f, 0x84, 0x00, 0x00, 0x02, 0x00, 0x00},
+ // nopw 512(%[re]ax,%[re]ax,1)
+ {0x66, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x02, 0x00, 0x00},
+ // nopw %cs:512(%[re]ax,%[re]ax,1)
+ {0x66, 0x2e, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x02, 0x00, 0x00}
+ };
+
+ uint8_t* where = reinterpret_cast<uint8_t*>(base);
+ while (size) {
+ unsigned nopSize = static_cast<unsigned>(std::min<size_t>(size, 15));
+ unsigned numPrefixes = nopSize <= 10 ? 0 : nopSize - 10;
+ for (unsigned i = 0; i != numPrefixes; ++i)
+ *where++ = 0x66;
+
+ unsigned nopRest = nopSize - numPrefixes;
+ for (unsigned i = 0; i != nopRest; ++i)
+ *where++ = nops[nopRest-1][i];
+
+ size -= nopSize;
+ }
+#else
+ memset(base, OP_NOP, size);
+#endif
+ }
+
+ // This is a no-op on x86
+ ALWAYS_INLINE static void cacheFlush(void*, size_t) { }
+
+private:
+
+ static void setPointer(void* where, void* value)
+ {
+ reinterpret_cast<void**>(where)[-1] = value;
+ }
+
+ static void setInt32(void* where, int32_t value)
+ {
+ reinterpret_cast<int32_t*>(where)[-1] = value;
+ }
+
+ static void setInt8(void* where, int8_t value)
+ {
+ reinterpret_cast<int8_t*>(where)[-1] = value;
+ }
+
+ static void setRel32(void* from, void* to)
+ {
+ intptr_t offset = reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from);
+ ASSERT(offset == static_cast<int32_t>(offset));
+
+ setInt32(from, offset);
+ }
+
+ class X86InstructionFormatter {
+
+ static const int maxInstructionSize = 16;
+
+ public:
+
+ enum ModRmMode {
+ ModRmMemoryNoDisp,
+ ModRmMemoryDisp8,
+ ModRmMemoryDisp32,
+ ModRmRegister,
+ };
+
+ // Legacy prefix bytes:
+ //
+ // These are emmitted prior to the instruction.
+
+ void prefix(OneByteOpcodeID pre)
+ {
+ m_buffer.putByte(pre);
+ }
+
+ // Word-sized operands / no operand instruction formatters.
+ //
+ // In addition to the opcode, the following operand permutations are supported:
+ // * None - instruction takes no operands.
+ // * One register - the low three bits of the RegisterID are added into the opcode.
+ // * Two registers - encode a register form ModRm (for all ModRm formats, the reg field is passed first, and a GroupOpcodeID may be passed in its place).
+ // * Three argument ModRM - a register, and a register and an offset describing a memory operand.
+ // * Five argument ModRM - a register, and a base register, an index, scale, and offset describing a memory operand.
+ //
+ // For 32-bit x86 targets, the address operand may also be provided as a void*.
+ // On 64-bit targets REX prefixes will be planted as necessary, where high numbered registers are used.
+ //
+ // The twoByteOp methods plant two-byte Intel instructions sequences (first opcode byte 0x0F).
+
+ void oneByteOp(OneByteOpcodeID opcode)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ m_buffer.putByteUnchecked(opcode);
+ }
+
+ void oneByteOp(OneByteOpcodeID opcode, RegisterID reg)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIfNeeded(0, 0, reg);
+ m_buffer.putByteUnchecked(opcode + (reg & 7));
+ }
+
+ void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID rm)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIfNeeded(reg, 0, rm);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(reg, rm);
+ }
+
+ void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIfNeeded(reg, 0, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(reg, base, offset);
+ }
+
+ void oneByteOp_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIfNeeded(reg, 0, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM_disp32(reg, base, offset);
+ }
+
+ void oneByteOp_disp8(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIfNeeded(reg, 0, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM_disp8(reg, base, offset);
+ }
+
+ void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIfNeeded(reg, index, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(reg, base, index, scale, offset);
+ }
+
+#if !CPU(X86_64)
+ void oneByteOp(OneByteOpcodeID opcode, int reg, const void* address)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(reg, address);
+ }
+#endif
+
+ void twoByteOp(TwoByteOpcodeID opcode)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ }
+
+ void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID rm)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIfNeeded(reg, 0, rm);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(reg, rm);
+ }
+
+ void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID base, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIfNeeded(reg, 0, base);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(reg, base, offset);
+ }
+
+ void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIfNeeded(reg, index, base);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(reg, base, index, scale, offset);
+ }
+
+#if !CPU(X86_64)
+ void twoByteOp(TwoByteOpcodeID opcode, int reg, const void* address)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(reg, address);
+ }
+#endif
+
+ void threeByteOp(ThreeByteOpcodeID opcode)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(OP2_3BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ }
+
+#if CPU(X86_64)
+ // Quad-word-sized operands:
+ //
+ // Used to format 64-bit operantions, planting a REX.w prefix.
+ // When planting d64 or f64 instructions, not requiring a REX.w prefix,
+ // the normal (non-'64'-postfixed) formatters should be used.
+
+ void oneByteOp64(OneByteOpcodeID opcode)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexW(0, 0, 0);
+ m_buffer.putByteUnchecked(opcode);
+ }
+
+ void oneByteOp64(OneByteOpcodeID opcode, RegisterID reg)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexW(0, 0, reg);
+ m_buffer.putByteUnchecked(opcode + (reg & 7));
+ }
+
+ void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID rm)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexW(reg, 0, rm);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(reg, rm);
+ }
+
+ void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexW(reg, 0, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(reg, base, offset);
+ }
+
+ void oneByteOp64_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexW(reg, 0, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM_disp32(reg, base, offset);
+ }
+
+ void oneByteOp64_disp8(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexW(reg, 0, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM_disp8(reg, base, offset);
+ }
+
+ void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexW(reg, index, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(reg, base, index, scale, offset);
+ }
+
+ void twoByteOp64(TwoByteOpcodeID opcode, int reg, RegisterID rm)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexW(reg, 0, rm);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(reg, rm);
+ }
+#endif
+
+ // Byte-operands:
+ //
+ // These methods format byte operations. Byte operations differ from the normal
+ // formatters in the circumstances under which they will decide to emit REX prefixes.
+ // These should be used where any register operand signifies a byte register.
+ //
+ // The disctinction is due to the handling of register numbers in the range 4..7 on
+ // x86-64. These register numbers may either represent the second byte of the first
+ // four registers (ah..bh) or the first byte of the second four registers (spl..dil).
+ //
+ // Since ah..bh cannot be used in all permutations of operands (specifically cannot
+ // be accessed where a REX prefix is present), these are likely best treated as
+ // deprecated. In order to ensure the correct registers spl..dil are selected a
+ // REX prefix will be emitted for any byte register operand in the range 4..15.
+ //
+ // These formatters may be used in instructions where a mix of operand sizes, in which
+ // case an unnecessary REX will be emitted, for example:
+ // movzbl %al, %edi
+ // In this case a REX will be planted since edi is 7 (and were this a byte operand
+ // a REX would be required to specify dil instead of bh). Unneeded REX prefixes will
+ // be silently ignored by the processor.
+ //
+ // Address operands should still be checked using regRequiresRex(), while byteRegRequiresRex()
+ // is provided to check byte register operands.
+
+ void oneByteOp8(OneByteOpcodeID opcode, GroupOpcodeID groupOp, RegisterID rm)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIf(byteRegRequiresRex(rm), 0, 0, rm);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(groupOp, rm);
+ }
+
+ void oneByteOp8(OneByteOpcodeID opcode, int reg, RegisterID rm)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIf(byteRegRequiresRex(reg) || byteRegRequiresRex(rm), reg, 0, rm);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(reg, rm);
+ }
+
+ void oneByteOp8(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIf(byteRegRequiresRex(reg) || byteRegRequiresRex(base), reg, 0, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(reg, base, offset);
+ }
+
+ void oneByteOp8(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIf(byteRegRequiresRex(reg) || regRequiresRex(index) || regRequiresRex(base), reg, index, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(reg, base, index, scale, offset);
+ }
+
+ void twoByteOp8(TwoByteOpcodeID opcode, RegisterID reg, RegisterID rm)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIf(byteRegRequiresRex(reg)|byteRegRequiresRex(rm), reg, 0, rm);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(reg, rm);
+ }
+
+ void twoByteOp8(TwoByteOpcodeID opcode, GroupOpcodeID groupOp, RegisterID rm)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIf(byteRegRequiresRex(rm), 0, 0, rm);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(groupOp, rm);
+ }
+
+ // Immediates:
+ //
+ // An immedaite should be appended where appropriate after an op has been emitted.
+ // The writes are unchecked since the opcode formatters above will have ensured space.
+
+ void immediate8(int imm)
+ {
+ m_buffer.putByteUnchecked(imm);
+ }
+
+ void immediate16(int imm)
+ {
+ m_buffer.putShortUnchecked(imm);
+ }
+
+ void immediate32(int imm)
+ {
+ m_buffer.putIntUnchecked(imm);
+ }
+
+ void immediate64(int64_t imm)
+ {
+ m_buffer.putInt64Unchecked(imm);
+ }
+
+ AssemblerLabel immediateRel32()
+ {
+ m_buffer.putIntUnchecked(0);
+ return label();
+ }
+
+ // Administrative methods:
+
+ size_t codeSize() const { return m_buffer.codeSize(); }
+ AssemblerLabel label() const { return m_buffer.label(); }
+ bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
+ void* data() const { return m_buffer.data(); }
+
+ unsigned debugOffset() { return m_buffer.debugOffset(); }
+
+ private:
+
+ // Internals; ModRm and REX formatters.
+
+ static const RegisterID noBase = X86Registers::ebp;
+ static const RegisterID hasSib = X86Registers::esp;
+ static const RegisterID noIndex = X86Registers::esp;
+#if CPU(X86_64)
+ static const RegisterID noBase2 = X86Registers::r13;
+ static const RegisterID hasSib2 = X86Registers::r12;
+
+ // Registers r8 & above require a REX prefixe.
+ inline bool regRequiresRex(int reg)
+ {
+ return (reg >= X86Registers::r8);
+ }
+
+ // Byte operand register spl & above require a REX prefix (to prevent the 'H' registers be accessed).
+ inline bool byteRegRequiresRex(int reg)
+ {
+ return (reg >= X86Registers::esp);
+ }
+
+ // Format a REX prefix byte.
+ inline void emitRex(bool w, int r, int x, int b)
+ {
+ ASSERT(r >= 0);
+ ASSERT(x >= 0);
+ ASSERT(b >= 0);
+ m_buffer.putByteUnchecked(PRE_REX | ((int)w << 3) | ((r>>3)<<2) | ((x>>3)<<1) | (b>>3));
+ }
+
+ // Used to plant a REX byte with REX.w set (for 64-bit operations).
+ inline void emitRexW(int r, int x, int b)
+ {
+ emitRex(true, r, x, b);
+ }
+
+ // Used for operations with byte operands - use byteRegRequiresRex() to check register operands,
+ // regRequiresRex() to check other registers (i.e. address base & index).
+ inline void emitRexIf(bool condition, int r, int x, int b)
+ {
+ if (condition) emitRex(false, r, x, b);
+ }
+
+ // Used for word sized operations, will plant a REX prefix if necessary (if any register is r8 or above).
+ inline void emitRexIfNeeded(int r, int x, int b)
+ {
+ emitRexIf(regRequiresRex(r) || regRequiresRex(x) || regRequiresRex(b), r, x, b);
+ }
+#else
+ // No REX prefix bytes on 32-bit x86.
+ inline bool regRequiresRex(int) { return false; }
+ inline bool byteRegRequiresRex(int) { return false; }
+ inline void emitRexIf(bool, int, int, int) {}
+ inline void emitRexIfNeeded(int, int, int) {}
+#endif
+
+ void putModRm(ModRmMode mode, int reg, RegisterID rm)
+ {
+ m_buffer.putByteUnchecked((mode << 6) | ((reg & 7) << 3) | (rm & 7));
+ }
+
+ void putModRmSib(ModRmMode mode, int reg, RegisterID base, RegisterID index, int scale)
+ {
+ ASSERT(mode != ModRmRegister);
+
+ putModRm(mode, reg, hasSib);
+ m_buffer.putByteUnchecked((scale << 6) | ((index & 7) << 3) | (base & 7));
+ }
+
+ void registerModRM(int reg, RegisterID rm)
+ {
+ putModRm(ModRmRegister, reg, rm);
+ }
+
+ void memoryModRM(int reg, RegisterID base, int offset)
+ {
+ // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
+#if CPU(X86_64)
+ if ((base == hasSib) || (base == hasSib2)) {
+#else
+ if (base == hasSib) {
+#endif
+ if (!offset) // No need to check if the base is noBase, since we know it is hasSib!
+ putModRmSib(ModRmMemoryNoDisp, reg, base, noIndex, 0);
+ else if (CAN_SIGN_EXTEND_8_32(offset)) {
+ putModRmSib(ModRmMemoryDisp8, reg, base, noIndex, 0);
+ m_buffer.putByteUnchecked(offset);
+ } else {
+ putModRmSib(ModRmMemoryDisp32, reg, base, noIndex, 0);
+ m_buffer.putIntUnchecked(offset);
+ }
+ } else {
+#if CPU(X86_64)
+ if (!offset && (base != noBase) && (base != noBase2))
+#else
+ if (!offset && (base != noBase))
+#endif
+ putModRm(ModRmMemoryNoDisp, reg, base);
+ else if (CAN_SIGN_EXTEND_8_32(offset)) {
+ putModRm(ModRmMemoryDisp8, reg, base);
+ m_buffer.putByteUnchecked(offset);
+ } else {
+ putModRm(ModRmMemoryDisp32, reg, base);
+ m_buffer.putIntUnchecked(offset);
+ }
+ }
+ }
+
+ void memoryModRM_disp8(int reg, RegisterID base, int offset)
+ {
+ // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
+ ASSERT(CAN_SIGN_EXTEND_8_32(offset));
+#if CPU(X86_64)
+ if ((base == hasSib) || (base == hasSib2)) {
+#else
+ if (base == hasSib) {
+#endif
+ putModRmSib(ModRmMemoryDisp8, reg, base, noIndex, 0);
+ m_buffer.putByteUnchecked(offset);
+ } else {
+ putModRm(ModRmMemoryDisp8, reg, base);
+ m_buffer.putByteUnchecked(offset);
+ }
+ }
+
+ void memoryModRM_disp32(int reg, RegisterID base, int offset)
+ {
+ // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
+#if CPU(X86_64)
+ if ((base == hasSib) || (base == hasSib2)) {
+#else
+ if (base == hasSib) {
+#endif
+ putModRmSib(ModRmMemoryDisp32, reg, base, noIndex, 0);
+ m_buffer.putIntUnchecked(offset);
+ } else {
+ putModRm(ModRmMemoryDisp32, reg, base);
+ m_buffer.putIntUnchecked(offset);
+ }
+ }
+
+ void memoryModRM(int reg, RegisterID base, RegisterID index, int scale, int offset)
+ {
+ ASSERT(index != noIndex);
+
+#if CPU(X86_64)
+ if (!offset && (base != noBase) && (base != noBase2))
+#else
+ if (!offset && (base != noBase))
+#endif
+ putModRmSib(ModRmMemoryNoDisp, reg, base, index, scale);
+ else if (CAN_SIGN_EXTEND_8_32(offset)) {
+ putModRmSib(ModRmMemoryDisp8, reg, base, index, scale);
+ m_buffer.putByteUnchecked(offset);
+ } else {
+ putModRmSib(ModRmMemoryDisp32, reg, base, index, scale);
+ m_buffer.putIntUnchecked(offset);
+ }
+ }
+
+#if !CPU(X86_64)
+ void memoryModRM(int reg, const void* address)
+ {
+ // noBase + ModRmMemoryNoDisp means noBase + ModRmMemoryDisp32!
+ putModRm(ModRmMemoryNoDisp, reg, noBase);
+ m_buffer.putIntUnchecked(reinterpret_cast<int32_t>(address));
+ }
+#endif
+
+ public:
+ AssemblerBuffer m_buffer;
+ } m_formatter;
+ int m_indexOfLastWatchpoint;
+ int m_indexOfTailOfLastWatchpoint;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER) && CPU(X86)
+
+#endif // X86Assembler_h