diff options
Diffstat (limited to 'Source/JavaScriptCore/assembler')
37 files changed, 15637 insertions, 1347 deletions
diff --git a/Source/JavaScriptCore/assembler/ARM64Assembler.h b/Source/JavaScriptCore/assembler/ARM64Assembler.h new file mode 100644 index 000000000..e573620c4 --- /dev/null +++ b/Source/JavaScriptCore/assembler/ARM64Assembler.h @@ -0,0 +1,3514 @@ +/* + * Copyright (C) 2012, 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef ARM64Assembler_h +#define ARM64Assembler_h + +#if ENABLE(ASSEMBLER) && CPU(ARM64) + +#include "AssemblerBuffer.h" +#include "AssemblerCommon.h" +#include <limits.h> +#include <wtf/Assertions.h> +#include <wtf/Vector.h> +#include <stdint.h> + +#define CHECK_DATASIZE_OF(datasize) ASSERT(datasize == 32 || datasize == 64) +#define DATASIZE_OF(datasize) ((datasize == 64) ? Datasize_64 : Datasize_32) +#define MEMOPSIZE_OF(datasize) ((datasize == 8 || datasize == 128) ? MemOpSize_8_or_128 : (datasize == 16) ? MemOpSize_16 : (datasize == 32) ? MemOpSize_32 : MemOpSize_64) +#define CHECK_DATASIZE() CHECK_DATASIZE_OF(datasize) +#define CHECK_VECTOR_DATASIZE() ASSERT(datasize == 64 || datasize == 128) +#define DATASIZE DATASIZE_OF(datasize) +#define MEMOPSIZE MEMOPSIZE_OF(datasize) +#define CHECK_FP_MEMOP_DATASIZE() ASSERT(datasize == 8 || datasize == 16 || datasize == 32 || datasize == 64 || datasize == 128) +#define MEMPAIROPSIZE_INT(datasize) ((datasize == 64) ? MemPairOp_64 : MemPairOp_32) +#define MEMPAIROPSIZE_FP(datasize) ((datasize == 128) ? MemPairOp_V128 : (datasize == 64) ? MemPairOp_V64 : MemPairOp_32) + +namespace JSC { + +ALWAYS_INLINE bool isInt7(int32_t value) +{ + return value == ((value << 25) >> 25); +} + +ALWAYS_INLINE bool isInt11(int32_t value) +{ + return value == ((value << 21) >> 21); +} + +ALWAYS_INLINE bool isUInt5(int32_t value) +{ + return !(value & ~0x1f); +} + +class UInt5 { +public: + explicit UInt5(int value) + : m_value(value) + { + ASSERT(isUInt5(value)); + } + + operator int() { return m_value; } + +private: + int m_value; +}; + +class UInt12 { +public: + explicit UInt12(int value) + : m_value(value) + { + ASSERT(isUInt12(value)); + } + + operator int() { return m_value; } + +private: + int m_value; +}; + +class PostIndex { +public: + explicit PostIndex(int value) + : m_value(value) + { + ASSERT(isInt9(value)); + } + + operator int() { return m_value; } + +private: + int m_value; +}; + +class PreIndex { +public: + explicit PreIndex(int value) + : m_value(value) + { + ASSERT(isInt9(value)); + } + + operator int() { return m_value; } + +private: + int m_value; +}; + +class PairPostIndex { +public: + explicit PairPostIndex(int value) + : m_value(value) + { + ASSERT(isInt11(value)); + } + + operator int() { return m_value; } + +private: + int m_value; +}; + +class PairPreIndex { +public: + explicit PairPreIndex(int value) + : m_value(value) + { + ASSERT(isInt11(value)); + } + + operator int() { return m_value; } + +private: + int m_value; +}; + +typedef ARM64LogicalImmediate LogicalImmediate; + +inline uint16_t getHalfword(uint64_t value, int which) +{ + return value >> (which << 4); +} + +namespace ARM64Registers { + +#define FOR_EACH_CPU_REGISTER(V) \ + FOR_EACH_CPU_GPREGISTER(V) \ + FOR_EACH_CPU_SPECIAL_REGISTER(V) \ + FOR_EACH_CPU_FPREGISTER(V) + +// The following are defined as pairs of the following value: +// 1. type of the storage needed to save the register value by the JIT probe. +// 2. name of the register. +#define FOR_EACH_CPU_GPREGISTER(V) \ + /* Parameter/result registers */ \ + V(void*, x0) \ + V(void*, x1) \ + V(void*, x2) \ + V(void*, x3) \ + V(void*, x4) \ + V(void*, x5) \ + V(void*, x6) \ + V(void*, x7) \ + /* Indirect result location register */ \ + V(void*, x8) \ + /* Temporary registers */ \ + V(void*, x9) \ + V(void*, x10) \ + V(void*, x11) \ + V(void*, x12) \ + V(void*, x13) \ + V(void*, x14) \ + V(void*, x15) \ + /* Intra-procedure-call scratch registers (temporary) */ \ + V(void*, x16) \ + V(void*, x17) \ + /* Platform Register (temporary) */ \ + V(void*, x18) \ + /* Callee-saved */ \ + V(void*, x19) \ + V(void*, x20) \ + V(void*, x21) \ + V(void*, x22) \ + V(void*, x23) \ + V(void*, x24) \ + V(void*, x25) \ + V(void*, x26) \ + V(void*, x27) \ + V(void*, x28) \ + /* Special */ \ + V(void*, fp) \ + V(void*, lr) \ + V(void*, sp) + +#define FOR_EACH_CPU_SPECIAL_REGISTER(V) \ + V(void*, pc) \ + V(void*, nzcv) \ + V(void*, fpsr) \ + +// ARM64 always has 32 FPU registers 128-bits each. See http://llvm.org/devmtg/2012-11/Northover-AArch64.pdf +// and Section 5.1.2 in http://infocenter.arm.com/help/topic/com.arm.doc.ihi0055b/IHI0055B_aapcs64.pdf. +// However, we only use them for 64-bit doubles. +#define FOR_EACH_CPU_FPREGISTER(V) \ + /* Parameter/result registers */ \ + V(double, q0) \ + V(double, q1) \ + V(double, q2) \ + V(double, q3) \ + V(double, q4) \ + V(double, q5) \ + V(double, q6) \ + V(double, q7) \ + /* Callee-saved (up to 64-bits only!) */ \ + V(double, q8) \ + V(double, q9) \ + V(double, q10) \ + V(double, q11) \ + V(double, q12) \ + V(double, q13) \ + V(double, q14) \ + V(double, q15) \ + /* Temporary registers */ \ + V(double, q16) \ + V(double, q17) \ + V(double, q18) \ + V(double, q19) \ + V(double, q20) \ + V(double, q21) \ + V(double, q22) \ + V(double, q23) \ + V(double, q24) \ + V(double, q25) \ + V(double, q26) \ + V(double, q27) \ + V(double, q28) \ + V(double, q29) \ + V(double, q30) \ + V(double, q31) + +typedef enum { + #define DECLARE_REGISTER(_type, _regName) _regName, + FOR_EACH_CPU_GPREGISTER(DECLARE_REGISTER) + #undef DECLARE_REGISTER + + ip0 = x16, + ip1 = x17, + x29 = fp, + x30 = lr, + zr = 0x3f, +} RegisterID; + +typedef enum { + #define DECLARE_REGISTER(_type, _regName) _regName, + FOR_EACH_CPU_FPREGISTER(DECLARE_REGISTER) + #undef DECLARE_REGISTER +} FPRegisterID; + +static constexpr bool isSp(RegisterID reg) { return reg == sp; } +static constexpr bool isZr(RegisterID reg) { return reg == zr; } + +} // namespace ARM64Registers + +class ARM64Assembler { +public: + typedef ARM64Registers::RegisterID RegisterID; + typedef ARM64Registers::FPRegisterID FPRegisterID; + + static constexpr RegisterID firstRegister() { return ARM64Registers::x0; } + static constexpr RegisterID lastRegister() { return ARM64Registers::sp; } + + static constexpr FPRegisterID firstFPRegister() { return ARM64Registers::q0; } + static constexpr FPRegisterID lastFPRegister() { return ARM64Registers::q31; } + +private: + static constexpr bool isSp(RegisterID reg) { return ARM64Registers::isSp(reg); } + static constexpr bool isZr(RegisterID reg) { return ARM64Registers::isZr(reg); } + +public: + ARM64Assembler() + : m_indexOfLastWatchpoint(INT_MIN) + , m_indexOfTailOfLastWatchpoint(INT_MIN) + { + } + + AssemblerBuffer& buffer() { return m_buffer; } + + // (HS, LO, HI, LS) -> (AE, B, A, BE) + // (VS, VC) -> (O, NO) + typedef enum { + ConditionEQ, + ConditionNE, + ConditionHS, ConditionCS = ConditionHS, + ConditionLO, ConditionCC = ConditionLO, + ConditionMI, + ConditionPL, + ConditionVS, + ConditionVC, + ConditionHI, + ConditionLS, + ConditionGE, + ConditionLT, + ConditionGT, + ConditionLE, + ConditionAL, + ConditionInvalid + } Condition; + + static Condition invert(Condition cond) + { + return static_cast<Condition>(cond ^ 1); + } + + typedef enum { + LSL, + LSR, + ASR, + ROR + } ShiftType; + + typedef enum { + UXTB, + UXTH, + UXTW, + UXTX, + SXTB, + SXTH, + SXTW, + SXTX + } ExtendType; + + enum SetFlags { + DontSetFlags, + S + }; + +#define JUMP_ENUM_WITH_SIZE(index, value) (((value) << 4) | (index)) +#define JUMP_ENUM_SIZE(jump) ((jump) >> 4) + enum JumpType { JumpFixed = JUMP_ENUM_WITH_SIZE(0, 0), + JumpNoCondition = JUMP_ENUM_WITH_SIZE(1, 1 * sizeof(uint32_t)), + JumpCondition = JUMP_ENUM_WITH_SIZE(2, 2 * sizeof(uint32_t)), + JumpCompareAndBranch = JUMP_ENUM_WITH_SIZE(3, 2 * sizeof(uint32_t)), + JumpTestBit = JUMP_ENUM_WITH_SIZE(4, 2 * sizeof(uint32_t)), + JumpNoConditionFixedSize = JUMP_ENUM_WITH_SIZE(5, 1 * sizeof(uint32_t)), + JumpConditionFixedSize = JUMP_ENUM_WITH_SIZE(6, 2 * sizeof(uint32_t)), + JumpCompareAndBranchFixedSize = JUMP_ENUM_WITH_SIZE(7, 2 * sizeof(uint32_t)), + JumpTestBitFixedSize = JUMP_ENUM_WITH_SIZE(8, 2 * sizeof(uint32_t)), + }; + enum JumpLinkType { + LinkInvalid = JUMP_ENUM_WITH_SIZE(0, 0), + LinkJumpNoCondition = JUMP_ENUM_WITH_SIZE(1, 1 * sizeof(uint32_t)), + LinkJumpConditionDirect = JUMP_ENUM_WITH_SIZE(2, 1 * sizeof(uint32_t)), + LinkJumpCondition = JUMP_ENUM_WITH_SIZE(3, 2 * sizeof(uint32_t)), + LinkJumpCompareAndBranch = JUMP_ENUM_WITH_SIZE(4, 2 * sizeof(uint32_t)), + LinkJumpCompareAndBranchDirect = JUMP_ENUM_WITH_SIZE(5, 1 * sizeof(uint32_t)), + LinkJumpTestBit = JUMP_ENUM_WITH_SIZE(6, 2 * sizeof(uint32_t)), + LinkJumpTestBitDirect = JUMP_ENUM_WITH_SIZE(7, 1 * sizeof(uint32_t)), + }; + + class LinkRecord { + public: + LinkRecord(intptr_t from, intptr_t to, JumpType type, Condition condition) + { + data.realTypes.m_from = from; + data.realTypes.m_to = to; + data.realTypes.m_type = type; + data.realTypes.m_linkType = LinkInvalid; + data.realTypes.m_condition = condition; + } + LinkRecord(intptr_t from, intptr_t to, JumpType type, Condition condition, bool is64Bit, RegisterID compareRegister) + { + data.realTypes.m_from = from; + data.realTypes.m_to = to; + data.realTypes.m_type = type; + data.realTypes.m_linkType = LinkInvalid; + data.realTypes.m_condition = condition; + data.realTypes.m_is64Bit = is64Bit; + data.realTypes.m_compareRegister = compareRegister; + } + LinkRecord(intptr_t from, intptr_t to, JumpType type, Condition condition, unsigned bitNumber, RegisterID compareRegister) + { + data.realTypes.m_from = from; + data.realTypes.m_to = to; + data.realTypes.m_type = type; + data.realTypes.m_linkType = LinkInvalid; + data.realTypes.m_condition = condition; + data.realTypes.m_bitNumber = bitNumber; + data.realTypes.m_compareRegister = compareRegister; + } + void operator=(const LinkRecord& other) + { + data.copyTypes.content[0] = other.data.copyTypes.content[0]; + data.copyTypes.content[1] = other.data.copyTypes.content[1]; + data.copyTypes.content[2] = other.data.copyTypes.content[2]; + } + intptr_t from() const { return data.realTypes.m_from; } + void setFrom(intptr_t from) { data.realTypes.m_from = from; } + intptr_t to() const { return data.realTypes.m_to; } + JumpType type() const { return data.realTypes.m_type; } + JumpLinkType linkType() const { return data.realTypes.m_linkType; } + void setLinkType(JumpLinkType linkType) { ASSERT(data.realTypes.m_linkType == LinkInvalid); data.realTypes.m_linkType = linkType; } + Condition condition() const { return data.realTypes.m_condition; } + bool is64Bit() const { return data.realTypes.m_is64Bit; } + unsigned bitNumber() const { return data.realTypes.m_bitNumber; } + RegisterID compareRegister() const { return data.realTypes.m_compareRegister; } + + private: + union { + struct RealTypes { + intptr_t m_from : 48; + intptr_t m_to : 48; + JumpType m_type : 8; + JumpLinkType m_linkType : 8; + Condition m_condition : 4; + unsigned m_bitNumber : 6; + RegisterID m_compareRegister : 6; + bool m_is64Bit : 1; + } realTypes; + struct CopyTypes { + uint64_t content[3]; + } copyTypes; + COMPILE_ASSERT(sizeof(RealTypes) == sizeof(CopyTypes), LinkRecordCopyStructSizeEqualsRealStruct); + } data; + }; + + // bits(N) VFPExpandImm(bits(8) imm8); + // + // Encoding of floating point immediates is a litte complicated. Here's a + // high level description: + // +/-m*2-n where m and n are integers, 16 <= m <= 31, 0 <= n <= 7 + // and the algirithm for expanding to a single precision float: + // return imm8<7>:NOT(imm8<6>):Replicate(imm8<6>,5):imm8<5:0>:Zeros(19); + // + // The trickiest bit is how the exponent is handled. The following table + // may help clarify things a little: + // 654 + // 100 01111100 124 -3 1020 01111111100 + // 101 01111101 125 -2 1021 01111111101 + // 110 01111110 126 -1 1022 01111111110 + // 111 01111111 127 0 1023 01111111111 + // 000 10000000 128 1 1024 10000000000 + // 001 10000001 129 2 1025 10000000001 + // 010 10000010 130 3 1026 10000000010 + // 011 10000011 131 4 1027 10000000011 + // The first column shows the bit pattern stored in bits 6-4 of the arm + // encoded immediate. The second column shows the 8-bit IEEE 754 single + // -precision exponent in binary, the third column shows the raw decimal + // value. IEEE 754 single-precision numbers are stored with a bias of 127 + // to the exponent, so the fourth column shows the resulting exponent. + // From this was can see that the exponent can be in the range -3..4, + // which agrees with the high level description given above. The fifth + // and sixth columns shows the value stored in a IEEE 754 double-precision + // number to represent these exponents in decimal and binary, given the + // bias of 1023. + // + // Ultimately, detecting doubles that can be encoded as immediates on arm + // and encoding doubles is actually not too bad. A floating point value can + // be encoded by retaining the sign bit, the low three bits of the exponent + // and the high 4 bits of the mantissa. To validly be able to encode an + // immediate the remainder of the mantissa must be zero, and the high part + // of the exponent must match the top bit retained, bar the highest bit + // which must be its inverse. + static bool canEncodeFPImm(double d) + { + // Discard the sign bit, the low two bits of the exponent & the highest + // four bits of the mantissa. + uint64_t masked = bitwise_cast<uint64_t>(d) & 0x7fc0ffffffffffffull; + return (masked == 0x3fc0000000000000ull) || (masked == 0x4000000000000000ull); + } + + template<int datasize> + static bool canEncodePImmOffset(int32_t offset) + { + return isValidScaledUImm12<datasize>(offset); + } + + static bool canEncodeSImmOffset(int32_t offset) + { + return isValidSignedImm9(offset); + } + +private: + int encodeFPImm(double d) + { + ASSERT(canEncodeFPImm(d)); + uint64_t u64 = bitwise_cast<uint64_t>(d); + return (static_cast<int>(u64 >> 56) & 0x80) | (static_cast<int>(u64 >> 48) & 0x7f); + } + + template<int datasize> + int encodeShiftAmount(int amount) + { + ASSERT(!amount || datasize == (8 << amount)); + return amount; + } + + template<int datasize> + static int encodePositiveImmediate(unsigned pimm) + { + ASSERT(!(pimm & ((datasize / 8) - 1))); + return pimm / (datasize / 8); + } + + enum Datasize { + Datasize_32, + Datasize_64, + Datasize_64_top, + Datasize_16 + }; + + enum MemOpSize { + MemOpSize_8_or_128, + MemOpSize_16, + MemOpSize_32, + MemOpSize_64, + }; + + enum BranchType { + BranchType_JMP, + BranchType_CALL, + BranchType_RET + }; + + enum AddOp { + AddOp_ADD, + AddOp_SUB + }; + + enum BitfieldOp { + BitfieldOp_SBFM, + BitfieldOp_BFM, + BitfieldOp_UBFM + }; + + enum DataOp1Source { + DataOp_RBIT, + DataOp_REV16, + DataOp_REV32, + DataOp_REV64, + DataOp_CLZ, + DataOp_CLS + }; + + enum DataOp2Source { + DataOp_UDIV = 2, + DataOp_SDIV = 3, + DataOp_LSLV = 8, + DataOp_LSRV = 9, + DataOp_ASRV = 10, + DataOp_RORV = 11 + }; + + enum DataOp3Source { + DataOp_MADD = 0, + DataOp_MSUB = 1, + DataOp_SMADDL = 2, + DataOp_SMSUBL = 3, + DataOp_SMULH = 4, + DataOp_UMADDL = 10, + DataOp_UMSUBL = 11, + DataOp_UMULH = 12 + }; + + enum ExcepnOp { + ExcepnOp_EXCEPTION = 0, + ExcepnOp_BREAKPOINT = 1, + ExcepnOp_HALT = 2, + ExcepnOp_DCPS = 5 + }; + + enum FPCmpOp { + FPCmpOp_FCMP = 0x00, + FPCmpOp_FCMP0 = 0x08, + FPCmpOp_FCMPE = 0x10, + FPCmpOp_FCMPE0 = 0x18 + }; + + enum FPCondCmpOp { + FPCondCmpOp_FCMP, + FPCondCmpOp_FCMPE + }; + + enum FPDataOp1Source { + FPDataOp_FMOV = 0, + FPDataOp_FABS = 1, + FPDataOp_FNEG = 2, + FPDataOp_FSQRT = 3, + FPDataOp_FCVT_toSingle = 4, + FPDataOp_FCVT_toDouble = 5, + FPDataOp_FCVT_toHalf = 7, + FPDataOp_FRINTN = 8, + FPDataOp_FRINTP = 9, + FPDataOp_FRINTM = 10, + FPDataOp_FRINTZ = 11, + FPDataOp_FRINTA = 12, + FPDataOp_FRINTX = 14, + FPDataOp_FRINTI = 15 + }; + + enum FPDataOp2Source { + FPDataOp_FMUL, + FPDataOp_FDIV, + FPDataOp_FADD, + FPDataOp_FSUB, + FPDataOp_FMAX, + FPDataOp_FMIN, + FPDataOp_FMAXNM, + FPDataOp_FMINNM, + FPDataOp_FNMUL + }; + + enum SIMD3Same { + SIMD_LogicalOp_AND = 0x03 + }; + + enum FPIntConvOp { + FPIntConvOp_FCVTNS = 0x00, + FPIntConvOp_FCVTNU = 0x01, + FPIntConvOp_SCVTF = 0x02, + FPIntConvOp_UCVTF = 0x03, + FPIntConvOp_FCVTAS = 0x04, + FPIntConvOp_FCVTAU = 0x05, + FPIntConvOp_FMOV_QtoX = 0x06, + FPIntConvOp_FMOV_XtoQ = 0x07, + FPIntConvOp_FCVTPS = 0x08, + FPIntConvOp_FCVTPU = 0x09, + FPIntConvOp_FMOV_QtoX_top = 0x0e, + FPIntConvOp_FMOV_XtoQ_top = 0x0f, + FPIntConvOp_FCVTMS = 0x10, + FPIntConvOp_FCVTMU = 0x11, + FPIntConvOp_FCVTZS = 0x18, + FPIntConvOp_FCVTZU = 0x19, + }; + + enum LogicalOp { + LogicalOp_AND, + LogicalOp_ORR, + LogicalOp_EOR, + LogicalOp_ANDS + }; + + enum MemOp { + MemOp_STORE, + MemOp_LOAD, + MemOp_STORE_V128, + MemOp_LOAD_V128, + MemOp_PREFETCH = 2, // size must be 3 + MemOp_LOAD_signed64 = 2, // size may be 0, 1 or 2 + MemOp_LOAD_signed32 = 3 // size may be 0 or 1 + }; + + enum MemPairOpSize { + MemPairOp_32 = 0, + MemPairOp_LoadSigned_32 = 1, + MemPairOp_64 = 2, + + MemPairOp_V32 = MemPairOp_32, + MemPairOp_V64 = 1, + MemPairOp_V128 = 2 + }; + + enum MoveWideOp { + MoveWideOp_N = 0, + MoveWideOp_Z = 2, + MoveWideOp_K = 3 + }; + + enum LdrLiteralOp { + LdrLiteralOp_32BIT = 0, + LdrLiteralOp_64BIT = 1, + LdrLiteralOp_LDRSW = 2, + LdrLiteralOp_128BIT = 2 + }; + + static unsigned memPairOffsetShift(bool V, MemPairOpSize size) + { + // return the log2 of the size in bytes, e.g. 64 bit size returns 3 + if (V) + return size + 2; + return (size >> 1) + 2; + } + +public: + // Integer Instructions: + + template<int datasize, SetFlags setFlags = DontSetFlags> + ALWAYS_INLINE void adc(RegisterID rd, RegisterID rn, RegisterID rm) + { + CHECK_DATASIZE(); + insn(addSubtractWithCarry(DATASIZE, AddOp_ADD, setFlags, rm, rn, rd)); + } + + template<int datasize, SetFlags setFlags = DontSetFlags> + ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, UInt12 imm12, int shift = 0) + { + CHECK_DATASIZE(); + ASSERT(!shift || shift == 12); + insn(addSubtractImmediate(DATASIZE, AddOp_ADD, setFlags, shift == 12, imm12, rn, rd)); + } + + template<int datasize, SetFlags setFlags = DontSetFlags> + ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm) + { + add<datasize, setFlags>(rd, rn, rm, LSL, 0); + } + + template<int datasize, SetFlags setFlags = DontSetFlags> + ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm, ExtendType extend, int amount) + { + CHECK_DATASIZE(); + insn(addSubtractExtendedRegister(DATASIZE, AddOp_ADD, setFlags, rm, extend, amount, rn, rd)); + } + + template<int datasize, SetFlags setFlags = DontSetFlags> + ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount) + { + CHECK_DATASIZE(); + if (isSp(rd) || isSp(rn)) { + ASSERT(shift == LSL); + ASSERT(!isSp(rm)); + add<datasize, setFlags>(rd, rn, rm, UXTX, amount); + } else + insn(addSubtractShiftedRegister(DATASIZE, AddOp_ADD, setFlags, shift, rm, amount, rn, rd)); + } + + ALWAYS_INLINE void adr(RegisterID rd, int offset) + { + insn(pcRelative(false, offset, rd)); + } + + ALWAYS_INLINE void adrp(RegisterID rd, int offset) + { + ASSERT(!(offset & 0xfff)); + insn(pcRelative(true, offset >> 12, rd)); + nopCortexA53Fix843419(); + } + + template<int datasize, SetFlags setFlags = DontSetFlags> + ALWAYS_INLINE void and_(RegisterID rd, RegisterID rn, RegisterID rm) + { + and_<datasize, setFlags>(rd, rn, rm, LSL, 0); + } + + template<int datasize, SetFlags setFlags = DontSetFlags> + ALWAYS_INLINE void and_(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount) + { + CHECK_DATASIZE(); + insn(logicalShiftedRegister(DATASIZE, setFlags ? LogicalOp_ANDS : LogicalOp_AND, shift, false, rm, amount, rn, rd)); + } + + template<int datasize, SetFlags setFlags = DontSetFlags> + ALWAYS_INLINE void and_(RegisterID rd, RegisterID rn, LogicalImmediate imm) + { + CHECK_DATASIZE(); + insn(logicalImmediate(DATASIZE, setFlags ? LogicalOp_ANDS : LogicalOp_AND, imm.value(), rn, rd)); + } + + template<int datasize> + ALWAYS_INLINE void asr(RegisterID rd, RegisterID rn, int shift) + { + ASSERT(shift < datasize); + sbfm<datasize>(rd, rn, shift, datasize - 1); + } + + template<int datasize> + ALWAYS_INLINE void asr(RegisterID rd, RegisterID rn, RegisterID rm) + { + asrv<datasize>(rd, rn, rm); + } + + template<int datasize> + ALWAYS_INLINE void asrv(RegisterID rd, RegisterID rn, RegisterID rm) + { + CHECK_DATASIZE(); + insn(dataProcessing2Source(DATASIZE, rm, DataOp_ASRV, rn, rd)); + } + + ALWAYS_INLINE void b(int32_t offset = 0) + { + ASSERT(!(offset & 3)); + offset >>= 2; + ASSERT(offset == (offset << 6) >> 6); + insn(unconditionalBranchImmediate(false, offset)); + } + + ALWAYS_INLINE void b_cond(Condition cond, int32_t offset = 0) + { + ASSERT(!(offset & 3)); + offset >>= 2; + ASSERT(offset == (offset << 13) >> 13); + insn(conditionalBranchImmediate(offset, cond)); + } + + template<int datasize> + ALWAYS_INLINE void bfi(RegisterID rd, RegisterID rn, int lsb, int width) + { + bfm<datasize>(rd, rn, (datasize - lsb) & (datasize - 1), width - 1); + } + + template<int datasize> + ALWAYS_INLINE void bfm(RegisterID rd, RegisterID rn, int immr, int imms) + { + CHECK_DATASIZE(); + insn(bitfield(DATASIZE, BitfieldOp_BFM, immr, imms, rn, rd)); + } + + template<int datasize> + ALWAYS_INLINE void bfxil(RegisterID rd, RegisterID rn, int lsb, int width) + { + bfm<datasize>(rd, rn, lsb, lsb + width - 1); + } + + template<int datasize, SetFlags setFlags = DontSetFlags> + ALWAYS_INLINE void bic(RegisterID rd, RegisterID rn, RegisterID rm) + { + bic<datasize, setFlags>(rd, rn, rm, LSL, 0); + } + + template<int datasize, SetFlags setFlags = DontSetFlags> + ALWAYS_INLINE void bic(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount) + { + CHECK_DATASIZE(); + insn(logicalShiftedRegister(DATASIZE, setFlags ? LogicalOp_ANDS : LogicalOp_AND, shift, true, rm, amount, rn, rd)); + } + + ALWAYS_INLINE void bl(int32_t offset = 0) + { + ASSERT(!(offset & 3)); + offset >>= 2; + insn(unconditionalBranchImmediate(true, offset)); + } + + ALWAYS_INLINE void blr(RegisterID rn) + { + insn(unconditionalBranchRegister(BranchType_CALL, rn)); + } + + ALWAYS_INLINE void br(RegisterID rn) + { + insn(unconditionalBranchRegister(BranchType_JMP, rn)); + } + + ALWAYS_INLINE void brk(uint16_t imm) + { + insn(excepnGeneration(ExcepnOp_BREAKPOINT, imm, 0)); + } + + template<int datasize> + ALWAYS_INLINE void cbnz(RegisterID rt, int32_t offset = 0) + { + CHECK_DATASIZE(); + ASSERT(!(offset & 3)); + offset >>= 2; + insn(compareAndBranchImmediate(DATASIZE, true, offset, rt)); + } + + template<int datasize> + ALWAYS_INLINE void cbz(RegisterID rt, int32_t offset = 0) + { + CHECK_DATASIZE(); + ASSERT(!(offset & 3)); + offset >>= 2; + insn(compareAndBranchImmediate(DATASIZE, false, offset, rt)); + } + + template<int datasize> + ALWAYS_INLINE void ccmn(RegisterID rn, RegisterID rm, int nzcv, Condition cond) + { + CHECK_DATASIZE(); + insn(conditionalCompareRegister(DATASIZE, AddOp_ADD, rm, cond, rn, nzcv)); + } + + template<int datasize> + ALWAYS_INLINE void ccmn(RegisterID rn, UInt5 imm, int nzcv, Condition cond) + { + CHECK_DATASIZE(); + insn(conditionalCompareImmediate(DATASIZE, AddOp_ADD, imm, cond, rn, nzcv)); + } + + template<int datasize> + ALWAYS_INLINE void ccmp(RegisterID rn, RegisterID rm, int nzcv, Condition cond) + { + CHECK_DATASIZE(); + insn(conditionalCompareRegister(DATASIZE, AddOp_SUB, rm, cond, rn, nzcv)); + } + + template<int datasize> + ALWAYS_INLINE void ccmp(RegisterID rn, UInt5 imm, int nzcv, Condition cond) + { + CHECK_DATASIZE(); + insn(conditionalCompareImmediate(DATASIZE, AddOp_SUB, imm, cond, rn, nzcv)); + } + + template<int datasize> + ALWAYS_INLINE void cinc(RegisterID rd, RegisterID rn, Condition cond) + { + csinc<datasize>(rd, rn, rn, invert(cond)); + } + + template<int datasize> + ALWAYS_INLINE void cinv(RegisterID rd, RegisterID rn, Condition cond) + { + csinv<datasize>(rd, rn, rn, invert(cond)); + } + + template<int datasize> + ALWAYS_INLINE void cls(RegisterID rd, RegisterID rn) + { + CHECK_DATASIZE(); + insn(dataProcessing1Source(DATASIZE, DataOp_CLS, rn, rd)); + } + + template<int datasize> + ALWAYS_INLINE void clz(RegisterID rd, RegisterID rn) + { + CHECK_DATASIZE(); + insn(dataProcessing1Source(DATASIZE, DataOp_CLZ, rn, rd)); + } + + template<int datasize> + ALWAYS_INLINE void cmn(RegisterID rn, UInt12 imm12, int shift = 0) + { + add<datasize, S>(ARM64Registers::zr, rn, imm12, shift); + } + + template<int datasize> + ALWAYS_INLINE void cmn(RegisterID rn, RegisterID rm) + { + add<datasize, S>(ARM64Registers::zr, rn, rm); + } + + template<int datasize> + ALWAYS_INLINE void cmn(RegisterID rn, RegisterID rm, ExtendType extend, int amount) + { + add<datasize, S>(ARM64Registers::zr, rn, rm, extend, amount); + } + + template<int datasize> + ALWAYS_INLINE void cmn(RegisterID rn, RegisterID rm, ShiftType shift, int amount) + { + add<datasize, S>(ARM64Registers::zr, rn, rm, shift, amount); + } + + template<int datasize> + ALWAYS_INLINE void cmp(RegisterID rn, UInt12 imm12, int shift = 0) + { + sub<datasize, S>(ARM64Registers::zr, rn, imm12, shift); + } + + template<int datasize> + ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm) + { + sub<datasize, S>(ARM64Registers::zr, rn, rm); + } + + template<int datasize> + ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm, ExtendType extend, int amount) + { + sub<datasize, S>(ARM64Registers::zr, rn, rm, extend, amount); + } + + template<int datasize> + ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm, ShiftType shift, int amount) + { + sub<datasize, S>(ARM64Registers::zr, rn, rm, shift, amount); + } + + template<int datasize> + ALWAYS_INLINE void cneg(RegisterID rd, RegisterID rn, Condition cond) + { + csneg<datasize>(rd, rn, rn, invert(cond)); + } + + template<int datasize> + ALWAYS_INLINE void csel(RegisterID rd, RegisterID rn, RegisterID rm, Condition cond) + { + CHECK_DATASIZE(); + insn(conditionalSelect(DATASIZE, false, rm, cond, false, rn, rd)); + } + + template<int datasize> + ALWAYS_INLINE void cset(RegisterID rd, Condition cond) + { + csinc<datasize>(rd, ARM64Registers::zr, ARM64Registers::zr, invert(cond)); + } + + template<int datasize> + ALWAYS_INLINE void csetm(RegisterID rd, Condition cond) + { + csinv<datasize>(rd, ARM64Registers::zr, ARM64Registers::zr, invert(cond)); + } + + template<int datasize> + ALWAYS_INLINE void csinc(RegisterID rd, RegisterID rn, RegisterID rm, Condition cond) + { + CHECK_DATASIZE(); + insn(conditionalSelect(DATASIZE, false, rm, cond, true, rn, rd)); + } + + template<int datasize> + ALWAYS_INLINE void csinv(RegisterID rd, RegisterID rn, RegisterID rm, Condition cond) + { + CHECK_DATASIZE(); + insn(conditionalSelect(DATASIZE, true, rm, cond, false, rn, rd)); + } + + template<int datasize> + ALWAYS_INLINE void csneg(RegisterID rd, RegisterID rn, RegisterID rm, Condition cond) + { + CHECK_DATASIZE(); + insn(conditionalSelect(DATASIZE, true, rm, cond, true, rn, rd)); + } + + template<int datasize> + ALWAYS_INLINE void eon(RegisterID rd, RegisterID rn, RegisterID rm) + { + eon<datasize>(rd, rn, rm, LSL, 0); + } + + template<int datasize> + ALWAYS_INLINE void eon(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount) + { + CHECK_DATASIZE(); + insn(logicalShiftedRegister(DATASIZE, LogicalOp_EOR, shift, true, rm, amount, rn, rd)); + } + + template<int datasize> + ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, RegisterID rm) + { + eor<datasize>(rd, rn, rm, LSL, 0); + } + + template<int datasize> + ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount) + { + CHECK_DATASIZE(); + insn(logicalShiftedRegister(DATASIZE, LogicalOp_EOR, shift, false, rm, amount, rn, rd)); + } + + template<int datasize> + ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, LogicalImmediate imm) + { + CHECK_DATASIZE(); + insn(logicalImmediate(DATASIZE, LogicalOp_EOR, imm.value(), rn, rd)); + } + + template<int datasize> + ALWAYS_INLINE void extr(RegisterID rd, RegisterID rn, RegisterID rm, int lsb) + { + CHECK_DATASIZE(); + insn(extract(DATASIZE, rm, lsb, rn, rd)); + } + + ALWAYS_INLINE void hint(int imm) + { + insn(hintPseudo(imm)); + } + + ALWAYS_INLINE void hlt(uint16_t imm) + { + insn(excepnGeneration(ExcepnOp_HALT, imm, 0)); + } + + template<int datasize> + ALWAYS_INLINE void ldp(RegisterID rt, RegisterID rt2, RegisterID rn, PairPostIndex simm) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterPairPostIndex(MEMPAIROPSIZE_INT(datasize), false, MemOp_LOAD, simm, rn, rt, rt2)); + } + + template<int datasize> + ALWAYS_INLINE void ldp(RegisterID rt, RegisterID rt2, RegisterID rn, PairPreIndex simm) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterPairPreIndex(MEMPAIROPSIZE_INT(datasize), false, MemOp_LOAD, simm, rn, rt, rt2)); + } + + template<int datasize> + ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, RegisterID rm) + { + ldr<datasize>(rt, rn, rm, UXTX, 0); + } + + template<int datasize> + ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterRegisterOffset(MEMOPSIZE, false, MemOp_LOAD, rm, extend, encodeShiftAmount<datasize>(amount), rn, rt)); + } + + template<int datasize> + ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, unsigned pimm) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterUnsignedImmediate(MEMOPSIZE, false, MemOp_LOAD, encodePositiveImmediate<datasize>(pimm), rn, rt)); + } + + template<int datasize> + ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, PostIndex simm) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterPostIndex(MEMOPSIZE, false, MemOp_LOAD, simm, rn, rt)); + } + + template<int datasize> + ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, PreIndex simm) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterPreIndex(MEMOPSIZE, false, MemOp_LOAD, simm, rn, rt)); + } + + template<int datasize> + ALWAYS_INLINE void ldr_literal(RegisterID rt, int offset = 0) + { + CHECK_DATASIZE(); + ASSERT(!(offset & 3)); + insn(loadRegisterLiteral(datasize == 64 ? LdrLiteralOp_64BIT : LdrLiteralOp_32BIT, false, offset >> 2, rt)); + } + + ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, RegisterID rm) + { + // Not calling the 5 argument form of ldrb, since is amount is ommitted S is false. + insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128, false, MemOp_LOAD, rm, UXTX, false, rn, rt)); + } + + ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount) + { + ASSERT_UNUSED(amount, !amount); + insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128, false, MemOp_LOAD, rm, extend, true, rn, rt)); + } + + ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, unsigned pimm) + { + insn(loadStoreRegisterUnsignedImmediate(MemOpSize_8_or_128, false, MemOp_LOAD, encodePositiveImmediate<8>(pimm), rn, rt)); + } + + ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, PostIndex simm) + { + insn(loadStoreRegisterPostIndex(MemOpSize_8_or_128, false, MemOp_LOAD, simm, rn, rt)); + } + + ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, PreIndex simm) + { + insn(loadStoreRegisterPreIndex(MemOpSize_8_or_128, false, MemOp_LOAD, simm, rn, rt)); + } + + ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, RegisterID rm) + { + ldrh(rt, rn, rm, UXTX, 0); + } + + ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount) + { + ASSERT(!amount || amount == 1); + insn(loadStoreRegisterRegisterOffset(MemOpSize_16, false, MemOp_LOAD, rm, extend, amount == 1, rn, rt)); + } + + ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, unsigned pimm) + { + insn(loadStoreRegisterUnsignedImmediate(MemOpSize_16, false, MemOp_LOAD, encodePositiveImmediate<16>(pimm), rn, rt)); + } + + ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, PostIndex simm) + { + insn(loadStoreRegisterPostIndex(MemOpSize_16, false, MemOp_LOAD, simm, rn, rt)); + } + + ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, PreIndex simm) + { + insn(loadStoreRegisterPreIndex(MemOpSize_16, false, MemOp_LOAD, simm, rn, rt)); + } + + template<int datasize> + ALWAYS_INLINE void ldrsb(RegisterID rt, RegisterID rn, RegisterID rm) + { + CHECK_DATASIZE(); + // Not calling the 5 argument form of ldrsb, since is amount is ommitted S is false. + insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, rm, UXTX, false, rn, rt)); + } + + template<int datasize> + ALWAYS_INLINE void ldrsb(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount) + { + CHECK_DATASIZE(); + ASSERT_UNUSED(amount, !amount); + insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, rm, extend, true, rn, rt)); + } + + template<int datasize> + ALWAYS_INLINE void ldrsb(RegisterID rt, RegisterID rn, unsigned pimm) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterUnsignedImmediate(MemOpSize_8_or_128, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, encodePositiveImmediate<8>(pimm), rn, rt)); + } + + template<int datasize> + ALWAYS_INLINE void ldrsb(RegisterID rt, RegisterID rn, PostIndex simm) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterPostIndex(MemOpSize_8_or_128, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, simm, rn, rt)); + } + + template<int datasize> + ALWAYS_INLINE void ldrsb(RegisterID rt, RegisterID rn, PreIndex simm) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterPreIndex(MemOpSize_8_or_128, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, simm, rn, rt)); + } + + template<int datasize> + ALWAYS_INLINE void ldrsh(RegisterID rt, RegisterID rn, RegisterID rm) + { + ldrsh<datasize>(rt, rn, rm, UXTX, 0); + } + + template<int datasize> + ALWAYS_INLINE void ldrsh(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount) + { + CHECK_DATASIZE(); + ASSERT(!amount || amount == 1); + insn(loadStoreRegisterRegisterOffset(MemOpSize_16, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, rm, extend, amount == 1, rn, rt)); + } + + template<int datasize> + ALWAYS_INLINE void ldrsh(RegisterID rt, RegisterID rn, unsigned pimm) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterUnsignedImmediate(MemOpSize_16, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, encodePositiveImmediate<16>(pimm), rn, rt)); + } + + template<int datasize> + ALWAYS_INLINE void ldrsh(RegisterID rt, RegisterID rn, PostIndex simm) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterPostIndex(MemOpSize_16, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, simm, rn, rt)); + } + + template<int datasize> + ALWAYS_INLINE void ldrsh(RegisterID rt, RegisterID rn, PreIndex simm) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterPreIndex(MemOpSize_16, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, simm, rn, rt)); + } + + ALWAYS_INLINE void ldrsw(RegisterID rt, RegisterID rn, RegisterID rm) + { + ldrsw(rt, rn, rm, UXTX, 0); + } + + ALWAYS_INLINE void ldrsw(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount) + { + ASSERT(!amount || amount == 2); + insn(loadStoreRegisterRegisterOffset(MemOpSize_32, false, MemOp_LOAD_signed64, rm, extend, amount == 2, rn, rt)); + } + + ALWAYS_INLINE void ldrsw(RegisterID rt, RegisterID rn, unsigned pimm) + { + insn(loadStoreRegisterUnsignedImmediate(MemOpSize_32, false, MemOp_LOAD_signed64, encodePositiveImmediate<32>(pimm), rn, rt)); + } + + ALWAYS_INLINE void ldrsw(RegisterID rt, RegisterID rn, PostIndex simm) + { + insn(loadStoreRegisterPostIndex(MemOpSize_32, false, MemOp_LOAD_signed64, simm, rn, rt)); + } + + ALWAYS_INLINE void ldrsw(RegisterID rt, RegisterID rn, PreIndex simm) + { + insn(loadStoreRegisterPreIndex(MemOpSize_32, false, MemOp_LOAD_signed64, simm, rn, rt)); + } + + ALWAYS_INLINE void ldrsw_literal(RegisterID rt, int offset = 0) + { + ASSERT(!(offset & 3)); + insn(loadRegisterLiteral(LdrLiteralOp_LDRSW, false, offset >> 2, rt)); + } + + template<int datasize> + ALWAYS_INLINE void ldur(RegisterID rt, RegisterID rn, int simm) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterUnscaledImmediate(MEMOPSIZE, false, MemOp_LOAD, simm, rn, rt)); + } + + ALWAYS_INLINE void ldurb(RegisterID rt, RegisterID rn, int simm) + { + insn(loadStoreRegisterUnscaledImmediate(MemOpSize_8_or_128, false, MemOp_LOAD, simm, rn, rt)); + } + + ALWAYS_INLINE void ldurh(RegisterID rt, RegisterID rn, int simm) + { + insn(loadStoreRegisterUnscaledImmediate(MemOpSize_16, false, MemOp_LOAD, simm, rn, rt)); + } + + template<int datasize> + ALWAYS_INLINE void ldursb(RegisterID rt, RegisterID rn, int simm) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterUnscaledImmediate(MemOpSize_8_or_128, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, simm, rn, rt)); + } + + template<int datasize> + ALWAYS_INLINE void ldursh(RegisterID rt, RegisterID rn, int simm) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterUnscaledImmediate(MemOpSize_16, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, simm, rn, rt)); + } + + ALWAYS_INLINE void ldursw(RegisterID rt, RegisterID rn, int simm) + { + insn(loadStoreRegisterUnscaledImmediate(MemOpSize_32, false, MemOp_LOAD_signed64, simm, rn, rt)); + } + + template<int datasize> + ALWAYS_INLINE void lsl(RegisterID rd, RegisterID rn, int shift) + { + ASSERT(shift < datasize); + ubfm<datasize>(rd, rn, (datasize - shift) & (datasize - 1), datasize - 1 - shift); + } + + template<int datasize> + ALWAYS_INLINE void lsl(RegisterID rd, RegisterID rn, RegisterID rm) + { + lslv<datasize>(rd, rn, rm); + } + + template<int datasize> + ALWAYS_INLINE void lslv(RegisterID rd, RegisterID rn, RegisterID rm) + { + CHECK_DATASIZE(); + insn(dataProcessing2Source(DATASIZE, rm, DataOp_LSLV, rn, rd)); + } + + template<int datasize> + ALWAYS_INLINE void lsr(RegisterID rd, RegisterID rn, int shift) + { + ASSERT(shift < datasize); + ubfm<datasize>(rd, rn, shift, datasize - 1); + } + + template<int datasize> + ALWAYS_INLINE void lsr(RegisterID rd, RegisterID rn, RegisterID rm) + { + lsrv<datasize>(rd, rn, rm); + } + + template<int datasize> + ALWAYS_INLINE void lsrv(RegisterID rd, RegisterID rn, RegisterID rm) + { + CHECK_DATASIZE(); + insn(dataProcessing2Source(DATASIZE, rm, DataOp_LSRV, rn, rd)); + } + + template<int datasize> + ALWAYS_INLINE void madd(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra) + { + CHECK_DATASIZE(); + nopCortexA53Fix835769<datasize>(); + insn(dataProcessing3Source(DATASIZE, DataOp_MADD, rm, ra, rn, rd)); + } + + template<int datasize> + ALWAYS_INLINE void mneg(RegisterID rd, RegisterID rn, RegisterID rm) + { + msub<datasize>(rd, rn, rm, ARM64Registers::zr); + } + + template<int datasize> + ALWAYS_INLINE void mov(RegisterID rd, RegisterID rm) + { + if (isSp(rd) || isSp(rm)) + add<datasize>(rd, rm, UInt12(0)); + else + orr<datasize>(rd, ARM64Registers::zr, rm); + } + + template<int datasize> + ALWAYS_INLINE void movi(RegisterID rd, LogicalImmediate imm) + { + orr<datasize>(rd, ARM64Registers::zr, imm); + } + + template<int datasize> + ALWAYS_INLINE void movk(RegisterID rd, uint16_t value, int shift = 0) + { + CHECK_DATASIZE(); + ASSERT(!(shift & 0xf)); + insn(moveWideImediate(DATASIZE, MoveWideOp_K, shift >> 4, value, rd)); + } + + template<int datasize> + ALWAYS_INLINE void movn(RegisterID rd, uint16_t value, int shift = 0) + { + CHECK_DATASIZE(); + ASSERT(!(shift & 0xf)); + insn(moveWideImediate(DATASIZE, MoveWideOp_N, shift >> 4, value, rd)); + } + + template<int datasize> + ALWAYS_INLINE void movz(RegisterID rd, uint16_t value, int shift = 0) + { + CHECK_DATASIZE(); + ASSERT(!(shift & 0xf)); + insn(moveWideImediate(DATASIZE, MoveWideOp_Z, shift >> 4, value, rd)); + } + + template<int datasize> + ALWAYS_INLINE void msub(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra) + { + CHECK_DATASIZE(); + nopCortexA53Fix835769<datasize>(); + insn(dataProcessing3Source(DATASIZE, DataOp_MSUB, rm, ra, rn, rd)); + } + + template<int datasize> + ALWAYS_INLINE void mul(RegisterID rd, RegisterID rn, RegisterID rm) + { + madd<datasize>(rd, rn, rm, ARM64Registers::zr); + } + + template<int datasize> + ALWAYS_INLINE void mvn(RegisterID rd, RegisterID rm) + { + orn<datasize>(rd, ARM64Registers::zr, rm); + } + + template<int datasize> + ALWAYS_INLINE void mvn(RegisterID rd, RegisterID rm, ShiftType shift, int amount) + { + orn<datasize>(rd, ARM64Registers::zr, rm, shift, amount); + } + + template<int datasize, SetFlags setFlags = DontSetFlags> + ALWAYS_INLINE void neg(RegisterID rd, RegisterID rm) + { + sub<datasize, setFlags>(rd, ARM64Registers::zr, rm); + } + + template<int datasize, SetFlags setFlags = DontSetFlags> + ALWAYS_INLINE void neg(RegisterID rd, RegisterID rm, ShiftType shift, int amount) + { + sub<datasize, setFlags>(rd, ARM64Registers::zr, rm, shift, amount); + } + + template<int datasize, SetFlags setFlags = DontSetFlags> + ALWAYS_INLINE void ngc(RegisterID rd, RegisterID rm) + { + sbc<datasize, setFlags>(rd, ARM64Registers::zr, rm); + } + + template<int datasize, SetFlags setFlags = DontSetFlags> + ALWAYS_INLINE void ngc(RegisterID rd, RegisterID rm, ShiftType shift, int amount) + { + sbc<datasize, setFlags>(rd, ARM64Registers::zr, rm, shift, amount); + } + + ALWAYS_INLINE void nop() + { + insn(nopPseudo()); + } + + static void fillNops(void* base, size_t size) + { + RELEASE_ASSERT(!(size % sizeof(int32_t))); + size_t n = size / sizeof(int32_t); + for (int32_t* ptr = static_cast<int32_t*>(base); n--;) + *ptr++ = nopPseudo(); + } + + ALWAYS_INLINE void dmbSY() + { + insn(0xd5033fbf); + } + + template<int datasize> + ALWAYS_INLINE void orn(RegisterID rd, RegisterID rn, RegisterID rm) + { + orn<datasize>(rd, rn, rm, LSL, 0); + } + + template<int datasize> + ALWAYS_INLINE void orn(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount) + { + CHECK_DATASIZE(); + insn(logicalShiftedRegister(DATASIZE, LogicalOp_ORR, shift, true, rm, amount, rn, rd)); + } + + template<int datasize> + ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, RegisterID rm) + { + orr<datasize>(rd, rn, rm, LSL, 0); + } + + template<int datasize> + ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount) + { + CHECK_DATASIZE(); + insn(logicalShiftedRegister(DATASIZE, LogicalOp_ORR, shift, false, rm, amount, rn, rd)); + } + + template<int datasize> + ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, LogicalImmediate imm) + { + CHECK_DATASIZE(); + insn(logicalImmediate(DATASIZE, LogicalOp_ORR, imm.value(), rn, rd)); + } + + template<int datasize> + ALWAYS_INLINE void rbit(RegisterID rd, RegisterID rn) + { + CHECK_DATASIZE(); + insn(dataProcessing1Source(DATASIZE, DataOp_RBIT, rn, rd)); + } + + ALWAYS_INLINE void ret(RegisterID rn = ARM64Registers::lr) + { + insn(unconditionalBranchRegister(BranchType_RET, rn)); + } + + template<int datasize> + ALWAYS_INLINE void rev(RegisterID rd, RegisterID rn) + { + CHECK_DATASIZE(); + if (datasize == 32) // 'rev' mnemonic means REV32 or REV64 depending on the operand width. + insn(dataProcessing1Source(Datasize_32, DataOp_REV32, rn, rd)); + else + insn(dataProcessing1Source(Datasize_64, DataOp_REV64, rn, rd)); + } + + template<int datasize> + ALWAYS_INLINE void rev16(RegisterID rd, RegisterID rn) + { + CHECK_DATASIZE(); + insn(dataProcessing1Source(DATASIZE, DataOp_REV16, rn, rd)); + } + + template<int datasize> + ALWAYS_INLINE void rev32(RegisterID rd, RegisterID rn) + { + ASSERT(datasize == 64); // 'rev32' only valid with 64-bit operands. + insn(dataProcessing1Source(Datasize_64, DataOp_REV32, rn, rd)); + } + + template<int datasize> + ALWAYS_INLINE void ror(RegisterID rd, RegisterID rn, RegisterID rm) + { + rorv<datasize>(rd, rn, rm); + } + + template<int datasize> + ALWAYS_INLINE void ror(RegisterID rd, RegisterID rs, int shift) + { + extr<datasize>(rd, rs, rs, shift); + } + + template<int datasize> + ALWAYS_INLINE void rorv(RegisterID rd, RegisterID rn, RegisterID rm) + { + CHECK_DATASIZE(); + insn(dataProcessing2Source(DATASIZE, rm, DataOp_RORV, rn, rd)); + } + + template<int datasize, SetFlags setFlags = DontSetFlags> + ALWAYS_INLINE void sbc(RegisterID rd, RegisterID rn, RegisterID rm) + { + CHECK_DATASIZE(); + insn(addSubtractWithCarry(DATASIZE, AddOp_SUB, setFlags, rm, rn, rd)); + } + + template<int datasize> + ALWAYS_INLINE void sbfiz(RegisterID rd, RegisterID rn, int lsb, int width) + { + sbfm<datasize>(rd, rn, (datasize - lsb) & (datasize - 1), width - 1); + } + + template<int datasize> + ALWAYS_INLINE void sbfm(RegisterID rd, RegisterID rn, int immr, int imms) + { + CHECK_DATASIZE(); + insn(bitfield(DATASIZE, BitfieldOp_SBFM, immr, imms, rn, rd)); + } + + template<int datasize> + ALWAYS_INLINE void sbfx(RegisterID rd, RegisterID rn, int lsb, int width) + { + sbfm<datasize>(rd, rn, lsb, lsb + width - 1); + } + + template<int datasize> + ALWAYS_INLINE void sdiv(RegisterID rd, RegisterID rn, RegisterID rm) + { + CHECK_DATASIZE(); + insn(dataProcessing2Source(DATASIZE, rm, DataOp_SDIV, rn, rd)); + } + + ALWAYS_INLINE void smaddl(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra) + { + nopCortexA53Fix835769<64>(); + insn(dataProcessing3Source(Datasize_64, DataOp_SMADDL, rm, ra, rn, rd)); + } + + ALWAYS_INLINE void smnegl(RegisterID rd, RegisterID rn, RegisterID rm) + { + smsubl(rd, rn, rm, ARM64Registers::zr); + } + + ALWAYS_INLINE void smsubl(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra) + { + nopCortexA53Fix835769<64>(); + insn(dataProcessing3Source(Datasize_64, DataOp_SMSUBL, rm, ra, rn, rd)); + } + + ALWAYS_INLINE void smulh(RegisterID rd, RegisterID rn, RegisterID rm) + { + insn(dataProcessing3Source(Datasize_64, DataOp_SMULH, rm, ARM64Registers::zr, rn, rd)); + } + + ALWAYS_INLINE void smull(RegisterID rd, RegisterID rn, RegisterID rm) + { + smaddl(rd, rn, rm, ARM64Registers::zr); + } + + template<int datasize> + ALWAYS_INLINE void stp(RegisterID rt, RegisterID rt2, RegisterID rn, PairPostIndex simm) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterPairPostIndex(MEMPAIROPSIZE_INT(datasize), false, MemOp_STORE, simm, rn, rt, rt2)); + } + + template<int datasize> + ALWAYS_INLINE void stp(RegisterID rt, RegisterID rt2, RegisterID rn, PairPreIndex simm) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterPairPreIndex(MEMPAIROPSIZE_INT(datasize), false, MemOp_STORE, simm, rn, rt, rt2)); + } + + template<int datasize> + ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, RegisterID rm) + { + str<datasize>(rt, rn, rm, UXTX, 0); + } + + template<int datasize> + ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterRegisterOffset(MEMOPSIZE, false, MemOp_STORE, rm, extend, encodeShiftAmount<datasize>(amount), rn, rt)); + } + + template<int datasize> + ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, unsigned pimm) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterUnsignedImmediate(MEMOPSIZE, false, MemOp_STORE, encodePositiveImmediate<datasize>(pimm), rn, rt)); + } + + template<int datasize> + ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, PostIndex simm) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterPostIndex(MEMOPSIZE, false, MemOp_STORE, simm, rn, rt)); + } + + template<int datasize> + ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, PreIndex simm) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterPreIndex(MEMOPSIZE, false, MemOp_STORE, simm, rn, rt)); + } + + ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, RegisterID rm) + { + // Not calling the 5 argument form of strb, since is amount is ommitted S is false. + insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128, false, MemOp_STORE, rm, UXTX, false, rn, rt)); + } + + ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount) + { + ASSERT_UNUSED(amount, !amount); + insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128, false, MemOp_STORE, rm, extend, true, rn, rt)); + } + + ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, unsigned pimm) + { + insn(loadStoreRegisterUnsignedImmediate(MemOpSize_8_or_128, false, MemOp_STORE, encodePositiveImmediate<8>(pimm), rn, rt)); + } + + ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, PostIndex simm) + { + insn(loadStoreRegisterPostIndex(MemOpSize_8_or_128, false, MemOp_STORE, simm, rn, rt)); + } + + ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, PreIndex simm) + { + insn(loadStoreRegisterPreIndex(MemOpSize_8_or_128, false, MemOp_STORE, simm, rn, rt)); + } + + ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, RegisterID rm) + { + strh(rt, rn, rm, UXTX, 0); + } + + ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount) + { + ASSERT(!amount || amount == 1); + insn(loadStoreRegisterRegisterOffset(MemOpSize_16, false, MemOp_STORE, rm, extend, amount == 1, rn, rt)); + } + + ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, unsigned pimm) + { + insn(loadStoreRegisterUnsignedImmediate(MemOpSize_16, false, MemOp_STORE, encodePositiveImmediate<16>(pimm), rn, rt)); + } + + ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, PostIndex simm) + { + insn(loadStoreRegisterPostIndex(MemOpSize_16, false, MemOp_STORE, simm, rn, rt)); + } + + ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, PreIndex simm) + { + insn(loadStoreRegisterPreIndex(MemOpSize_16, false, MemOp_STORE, simm, rn, rt)); + } + + template<int datasize> + ALWAYS_INLINE void stur(RegisterID rt, RegisterID rn, int simm) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterUnscaledImmediate(MEMOPSIZE, false, MemOp_STORE, simm, rn, rt)); + } + + ALWAYS_INLINE void sturb(RegisterID rt, RegisterID rn, int simm) + { + insn(loadStoreRegisterUnscaledImmediate(MemOpSize_8_or_128, false, MemOp_STORE, simm, rn, rt)); + } + + ALWAYS_INLINE void sturh(RegisterID rt, RegisterID rn, int simm) + { + insn(loadStoreRegisterUnscaledImmediate(MemOpSize_16, false, MemOp_STORE, simm, rn, rt)); + } + + template<int datasize, SetFlags setFlags = DontSetFlags> + ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, UInt12 imm12, int shift = 0) + { + CHECK_DATASIZE(); + ASSERT(!shift || shift == 12); + insn(addSubtractImmediate(DATASIZE, AddOp_SUB, setFlags, shift == 12, imm12, rn, rd)); + } + + template<int datasize, SetFlags setFlags = DontSetFlags> + ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm) + { + ASSERT_WITH_MESSAGE(!isSp(rd) || setFlags == DontSetFlags, "SUBS with shifted register does not support SP for Xd, it uses XZR for the register 31. SUBS with extended register support SP for Xd, but only if SetFlag is not used, otherwise register 31 is Xd."); + ASSERT_WITH_MESSAGE(!isSp(rm), "No encoding of SUBS supports SP for the third operand."); + + if (isSp(rd) || isSp(rn)) + sub<datasize, setFlags>(rd, rn, rm, UXTX, 0); + else + sub<datasize, setFlags>(rd, rn, rm, LSL, 0); + } + + template<int datasize, SetFlags setFlags = DontSetFlags> + ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm, ExtendType extend, int amount) + { + CHECK_DATASIZE(); + insn(addSubtractExtendedRegister(DATASIZE, AddOp_SUB, setFlags, rm, extend, amount, rn, rd)); + } + + template<int datasize, SetFlags setFlags = DontSetFlags> + ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount) + { + CHECK_DATASIZE(); + ASSERT(!isSp(rd) && !isSp(rn) && !isSp(rm)); + insn(addSubtractShiftedRegister(DATASIZE, AddOp_SUB, setFlags, shift, rm, amount, rn, rd)); + } + + template<int datasize> + ALWAYS_INLINE void sxtb(RegisterID rd, RegisterID rn) + { + sbfm<datasize>(rd, rn, 0, 7); + } + + template<int datasize> + ALWAYS_INLINE void sxth(RegisterID rd, RegisterID rn) + { + sbfm<datasize>(rd, rn, 0, 15); + } + + ALWAYS_INLINE void sxtw(RegisterID rd, RegisterID rn) + { + sbfm<64>(rd, rn, 0, 31); + } + + ALWAYS_INLINE void tbz(RegisterID rt, int imm, int offset = 0) + { + ASSERT(!(offset & 3)); + offset >>= 2; + insn(testAndBranchImmediate(false, imm, offset, rt)); + } + + ALWAYS_INLINE void tbnz(RegisterID rt, int imm, int offset = 0) + { + ASSERT(!(offset & 3)); + offset >>= 2; + insn(testAndBranchImmediate(true, imm, offset, rt)); + } + + template<int datasize> + ALWAYS_INLINE void tst(RegisterID rn, RegisterID rm) + { + and_<datasize, S>(ARM64Registers::zr, rn, rm); + } + + template<int datasize> + ALWAYS_INLINE void tst(RegisterID rn, RegisterID rm, ShiftType shift, int amount) + { + and_<datasize, S>(ARM64Registers::zr, rn, rm, shift, amount); + } + + template<int datasize> + ALWAYS_INLINE void tst(RegisterID rn, LogicalImmediate imm) + { + and_<datasize, S>(ARM64Registers::zr, rn, imm); + } + + template<int datasize> + ALWAYS_INLINE void ubfiz(RegisterID rd, RegisterID rn, int lsb, int width) + { + ubfm<datasize>(rd, rn, (datasize - lsb) & (datasize - 1), width - 1); + } + + template<int datasize> + ALWAYS_INLINE void ubfm(RegisterID rd, RegisterID rn, int immr, int imms) + { + CHECK_DATASIZE(); + insn(bitfield(DATASIZE, BitfieldOp_UBFM, immr, imms, rn, rd)); + } + + template<int datasize> + ALWAYS_INLINE void ubfx(RegisterID rd, RegisterID rn, int lsb, int width) + { + ubfm<datasize>(rd, rn, lsb, lsb + width - 1); + } + + template<int datasize> + ALWAYS_INLINE void udiv(RegisterID rd, RegisterID rn, RegisterID rm) + { + CHECK_DATASIZE(); + insn(dataProcessing2Source(DATASIZE, rm, DataOp_UDIV, rn, rd)); + } + + ALWAYS_INLINE void umaddl(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra) + { + nopCortexA53Fix835769<64>(); + insn(dataProcessing3Source(Datasize_64, DataOp_UMADDL, rm, ra, rn, rd)); + } + + ALWAYS_INLINE void umnegl(RegisterID rd, RegisterID rn, RegisterID rm) + { + umsubl(rd, rn, rm, ARM64Registers::zr); + } + + ALWAYS_INLINE void umsubl(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra) + { + nopCortexA53Fix835769<64>(); + insn(dataProcessing3Source(Datasize_64, DataOp_UMSUBL, rm, ra, rn, rd)); + } + + ALWAYS_INLINE void umulh(RegisterID rd, RegisterID rn, RegisterID rm) + { + insn(dataProcessing3Source(Datasize_64, DataOp_UMULH, rm, ARM64Registers::zr, rn, rd)); + } + + ALWAYS_INLINE void umull(RegisterID rd, RegisterID rn, RegisterID rm) + { + umaddl(rd, rn, rm, ARM64Registers::zr); + } + + template<int datasize> + ALWAYS_INLINE void uxtb(RegisterID rd, RegisterID rn) + { + ubfm<datasize>(rd, rn, 0, 7); + } + + template<int datasize> + ALWAYS_INLINE void uxth(RegisterID rd, RegisterID rn) + { + ubfm<datasize>(rd, rn, 0, 15); + } + + ALWAYS_INLINE void uxtw(RegisterID rd, RegisterID rn) + { + ubfm<64>(rd, rn, 0, 31); + } + + // Floating Point Instructions: + + template<int datasize> + ALWAYS_INLINE void fabs(FPRegisterID vd, FPRegisterID vn) + { + CHECK_DATASIZE(); + insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FABS, vn, vd)); + } + + template<int datasize> + ALWAYS_INLINE void fadd(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm) + { + CHECK_DATASIZE(); + insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FADD, vn, vd)); + } + + template<int datasize> + ALWAYS_INLINE void fccmp(FPRegisterID vn, FPRegisterID vm, int nzcv, Condition cond) + { + CHECK_DATASIZE(); + insn(floatingPointConditionalCompare(DATASIZE, vm, cond, vn, FPCondCmpOp_FCMP, nzcv)); + } + + template<int datasize> + ALWAYS_INLINE void fccmpe(FPRegisterID vn, FPRegisterID vm, int nzcv, Condition cond) + { + CHECK_DATASIZE(); + insn(floatingPointConditionalCompare(DATASIZE, vm, cond, vn, FPCondCmpOp_FCMPE, nzcv)); + } + + template<int datasize> + ALWAYS_INLINE void fcmp(FPRegisterID vn, FPRegisterID vm) + { + CHECK_DATASIZE(); + insn(floatingPointCompare(DATASIZE, vm, vn, FPCmpOp_FCMP)); + } + + template<int datasize> + ALWAYS_INLINE void fcmp_0(FPRegisterID vn) + { + CHECK_DATASIZE(); + insn(floatingPointCompare(DATASIZE, static_cast<FPRegisterID>(0), vn, FPCmpOp_FCMP0)); + } + + template<int datasize> + ALWAYS_INLINE void fcmpe(FPRegisterID vn, FPRegisterID vm) + { + CHECK_DATASIZE(); + insn(floatingPointCompare(DATASIZE, vm, vn, FPCmpOp_FCMPE)); + } + + template<int datasize> + ALWAYS_INLINE void fcmpe_0(FPRegisterID vn) + { + CHECK_DATASIZE(); + insn(floatingPointCompare(DATASIZE, static_cast<FPRegisterID>(0), vn, FPCmpOp_FCMPE0)); + } + + template<int datasize> + ALWAYS_INLINE void fcsel(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm, Condition cond) + { + CHECK_DATASIZE(); + insn(floatingPointConditionalSelect(DATASIZE, vm, cond, vn, vd)); + } + + template<int dstsize, int srcsize> + ALWAYS_INLINE void fcvt(FPRegisterID vd, FPRegisterID vn) + { + ASSERT(dstsize == 16 || dstsize == 32 || dstsize == 64); + ASSERT(srcsize == 16 || srcsize == 32 || srcsize == 64); + ASSERT(dstsize != srcsize); + Datasize type = (srcsize == 64) ? Datasize_64 : (srcsize == 32) ? Datasize_32 : Datasize_16; + FPDataOp1Source opcode = (dstsize == 64) ? FPDataOp_FCVT_toDouble : (dstsize == 32) ? FPDataOp_FCVT_toSingle : FPDataOp_FCVT_toHalf; + insn(floatingPointDataProcessing1Source(type, opcode, vn, vd)); + } + + template<int dstsize, int srcsize> + ALWAYS_INLINE void fcvtas(RegisterID rd, FPRegisterID vn) + { + CHECK_DATASIZE_OF(dstsize); + CHECK_DATASIZE_OF(srcsize); + insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTAS, vn, rd)); + } + + template<int dstsize, int srcsize> + ALWAYS_INLINE void fcvtau(RegisterID rd, FPRegisterID vn) + { + CHECK_DATASIZE_OF(dstsize); + CHECK_DATASIZE_OF(srcsize); + insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTAU, vn, rd)); + } + + template<int dstsize, int srcsize> + ALWAYS_INLINE void fcvtms(RegisterID rd, FPRegisterID vn) + { + CHECK_DATASIZE_OF(dstsize); + CHECK_DATASIZE_OF(srcsize); + insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTMS, vn, rd)); + } + + template<int dstsize, int srcsize> + ALWAYS_INLINE void fcvtmu(RegisterID rd, FPRegisterID vn) + { + CHECK_DATASIZE_OF(dstsize); + CHECK_DATASIZE_OF(srcsize); + insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTMU, vn, rd)); + } + + template<int dstsize, int srcsize> + ALWAYS_INLINE void fcvtns(RegisterID rd, FPRegisterID vn) + { + CHECK_DATASIZE_OF(dstsize); + CHECK_DATASIZE_OF(srcsize); + insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTNS, vn, rd)); + } + + template<int dstsize, int srcsize> + ALWAYS_INLINE void fcvtnu(RegisterID rd, FPRegisterID vn) + { + CHECK_DATASIZE_OF(dstsize); + CHECK_DATASIZE_OF(srcsize); + insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTNU, vn, rd)); + } + + template<int dstsize, int srcsize> + ALWAYS_INLINE void fcvtps(RegisterID rd, FPRegisterID vn) + { + CHECK_DATASIZE_OF(dstsize); + CHECK_DATASIZE_OF(srcsize); + insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTPS, vn, rd)); + } + + template<int dstsize, int srcsize> + ALWAYS_INLINE void fcvtpu(RegisterID rd, FPRegisterID vn) + { + CHECK_DATASIZE_OF(dstsize); + CHECK_DATASIZE_OF(srcsize); + insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTPU, vn, rd)); + } + + template<int dstsize, int srcsize> + ALWAYS_INLINE void fcvtzs(RegisterID rd, FPRegisterID vn) + { + CHECK_DATASIZE_OF(dstsize); + CHECK_DATASIZE_OF(srcsize); + insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTZS, vn, rd)); + } + + template<int dstsize, int srcsize> + ALWAYS_INLINE void fcvtzu(RegisterID rd, FPRegisterID vn) + { + CHECK_DATASIZE_OF(dstsize); + CHECK_DATASIZE_OF(srcsize); + insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTZU, vn, rd)); + } + + template<int datasize> + ALWAYS_INLINE void fdiv(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm) + { + CHECK_DATASIZE(); + insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FDIV, vn, vd)); + } + + template<int datasize> + ALWAYS_INLINE void fmadd(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm, FPRegisterID va) + { + CHECK_DATASIZE(); + insn(floatingPointDataProcessing3Source(DATASIZE, false, vm, AddOp_ADD, va, vn, vd)); + } + + template<int datasize> + ALWAYS_INLINE void fmax(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm) + { + CHECK_DATASIZE(); + insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FMAX, vn, vd)); + } + + template<int datasize> + ALWAYS_INLINE void fmaxnm(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm) + { + CHECK_DATASIZE(); + insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FMAXNM, vn, vd)); + } + + template<int datasize> + ALWAYS_INLINE void fmin(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm) + { + CHECK_DATASIZE(); + insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FMIN, vn, vd)); + } + + template<int datasize> + ALWAYS_INLINE void fminnm(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm) + { + CHECK_DATASIZE(); + insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FMINNM, vn, vd)); + } + + template<int datasize> + ALWAYS_INLINE void fmov(FPRegisterID vd, FPRegisterID vn) + { + CHECK_DATASIZE(); + insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FMOV, vn, vd)); + } + + template<int datasize> + ALWAYS_INLINE void fmov(FPRegisterID vd, RegisterID rn) + { + CHECK_DATASIZE(); + insn(floatingPointIntegerConversions(DATASIZE, DATASIZE, FPIntConvOp_FMOV_XtoQ, rn, vd)); + } + + template<int datasize> + ALWAYS_INLINE void fmov(RegisterID rd, FPRegisterID vn) + { + CHECK_DATASIZE(); + insn(floatingPointIntegerConversions(DATASIZE, DATASIZE, FPIntConvOp_FMOV_QtoX, vn, rd)); + } + + template<int datasize> + ALWAYS_INLINE void fmov(FPRegisterID vd, double imm) + { + CHECK_DATASIZE(); + insn(floatingPointImmediate(DATASIZE, encodeFPImm(imm), vd)); + } + + ALWAYS_INLINE void fmov_top(FPRegisterID vd, RegisterID rn) + { + insn(floatingPointIntegerConversions(Datasize_64, Datasize_64, FPIntConvOp_FMOV_XtoQ_top, rn, vd)); + } + + ALWAYS_INLINE void fmov_top(RegisterID rd, FPRegisterID vn) + { + insn(floatingPointIntegerConversions(Datasize_64, Datasize_64, FPIntConvOp_FMOV_QtoX_top, vn, rd)); + } + + template<int datasize> + ALWAYS_INLINE void fmsub(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm, FPRegisterID va) + { + CHECK_DATASIZE(); + insn(floatingPointDataProcessing3Source(DATASIZE, false, vm, AddOp_SUB, va, vn, vd)); + } + + template<int datasize> + ALWAYS_INLINE void fmul(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm) + { + CHECK_DATASIZE(); + insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FMUL, vn, vd)); + } + + template<int datasize> + ALWAYS_INLINE void fneg(FPRegisterID vd, FPRegisterID vn) + { + CHECK_DATASIZE(); + insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FNEG, vn, vd)); + } + + template<int datasize> + ALWAYS_INLINE void fnmadd(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm, FPRegisterID va) + { + CHECK_DATASIZE(); + insn(floatingPointDataProcessing3Source(DATASIZE, true, vm, AddOp_ADD, va, vn, vd)); + } + + template<int datasize> + ALWAYS_INLINE void fnmsub(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm, FPRegisterID va) + { + CHECK_DATASIZE(); + insn(floatingPointDataProcessing3Source(DATASIZE, true, vm, AddOp_SUB, va, vn, vd)); + } + + template<int datasize> + ALWAYS_INLINE void fnmul(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm) + { + CHECK_DATASIZE(); + insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FNMUL, vn, vd)); + } + + template<int datasize> + ALWAYS_INLINE void vand(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm) + { + CHECK_VECTOR_DATASIZE(); + insn(vectorDataProcessing2Source(SIMD_LogicalOp_AND, vm, vn, vd)); + } + + template<int datasize> + ALWAYS_INLINE void frinta(FPRegisterID vd, FPRegisterID vn) + { + CHECK_DATASIZE(); + insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTA, vn, vd)); + } + + template<int datasize> + ALWAYS_INLINE void frinti(FPRegisterID vd, FPRegisterID vn) + { + CHECK_DATASIZE(); + insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTI, vn, vd)); + } + + template<int datasize> + ALWAYS_INLINE void frintm(FPRegisterID vd, FPRegisterID vn) + { + CHECK_DATASIZE(); + insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTM, vn, vd)); + } + + template<int datasize> + ALWAYS_INLINE void frintn(FPRegisterID vd, FPRegisterID vn) + { + CHECK_DATASIZE(); + insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTN, vn, vd)); + } + + template<int datasize> + ALWAYS_INLINE void frintp(FPRegisterID vd, FPRegisterID vn) + { + CHECK_DATASIZE(); + insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTP, vn, vd)); + } + + template<int datasize> + ALWAYS_INLINE void frintx(FPRegisterID vd, FPRegisterID vn) + { + CHECK_DATASIZE(); + insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTX, vn, vd)); + } + + template<int datasize> + ALWAYS_INLINE void frintz(FPRegisterID vd, FPRegisterID vn) + { + CHECK_DATASIZE(); + insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTZ, vn, vd)); + } + + template<int datasize> + ALWAYS_INLINE void fsqrt(FPRegisterID vd, FPRegisterID vn) + { + CHECK_DATASIZE(); + insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FSQRT, vn, vd)); + } + + template<int datasize> + ALWAYS_INLINE void fsub(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm) + { + CHECK_DATASIZE(); + insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FSUB, vn, vd)); + } + + template<int datasize> + ALWAYS_INLINE void ldr(FPRegisterID rt, RegisterID rn, RegisterID rm) + { + ldr<datasize>(rt, rn, rm, UXTX, 0); + } + + template<int datasize> + ALWAYS_INLINE void ldr(FPRegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount) + { + CHECK_FP_MEMOP_DATASIZE(); + insn(loadStoreRegisterRegisterOffset(MEMOPSIZE, true, datasize == 128 ? MemOp_LOAD_V128 : MemOp_LOAD, rm, extend, encodeShiftAmount<datasize>(amount), rn, rt)); + } + + template<int datasize> + ALWAYS_INLINE void ldr(FPRegisterID rt, RegisterID rn, unsigned pimm) + { + CHECK_FP_MEMOP_DATASIZE(); + insn(loadStoreRegisterUnsignedImmediate(MEMOPSIZE, true, datasize == 128 ? MemOp_LOAD_V128 : MemOp_LOAD, encodePositiveImmediate<datasize>(pimm), rn, rt)); + } + + template<int datasize> + ALWAYS_INLINE void ldr(FPRegisterID rt, RegisterID rn, PostIndex simm) + { + CHECK_FP_MEMOP_DATASIZE(); + insn(loadStoreRegisterPostIndex(MEMOPSIZE, true, datasize == 128 ? MemOp_LOAD_V128 : MemOp_LOAD, simm, rn, rt)); + } + + template<int datasize> + ALWAYS_INLINE void ldr(FPRegisterID rt, RegisterID rn, PreIndex simm) + { + CHECK_FP_MEMOP_DATASIZE(); + insn(loadStoreRegisterPreIndex(MEMOPSIZE, true, datasize == 128 ? MemOp_LOAD_V128 : MemOp_LOAD, simm, rn, rt)); + } + + template<int datasize> + ALWAYS_INLINE void ldr_literal(FPRegisterID rt, int offset = 0) + { + CHECK_FP_MEMOP_DATASIZE(); + ASSERT(datasize >= 32); + ASSERT(!(offset & 3)); + insn(loadRegisterLiteral(datasize == 128 ? LdrLiteralOp_128BIT : datasize == 64 ? LdrLiteralOp_64BIT : LdrLiteralOp_32BIT, true, offset >> 2, rt)); + } + + template<int datasize> + ALWAYS_INLINE void ldur(FPRegisterID rt, RegisterID rn, int simm) + { + CHECK_FP_MEMOP_DATASIZE(); + insn(loadStoreRegisterUnscaledImmediate(MEMOPSIZE, true, datasize == 128 ? MemOp_LOAD_V128 : MemOp_LOAD, simm, rn, rt)); + } + + template<int dstsize, int srcsize> + ALWAYS_INLINE void scvtf(FPRegisterID vd, RegisterID rn) + { + CHECK_DATASIZE_OF(dstsize); + CHECK_DATASIZE_OF(srcsize); + insn(floatingPointIntegerConversions(DATASIZE_OF(srcsize), DATASIZE_OF(dstsize), FPIntConvOp_SCVTF, rn, vd)); + } + + template<int datasize> + ALWAYS_INLINE void str(FPRegisterID rt, RegisterID rn, RegisterID rm) + { + str<datasize>(rt, rn, rm, UXTX, 0); + } + + template<int datasize> + ALWAYS_INLINE void str(FPRegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount) + { + CHECK_FP_MEMOP_DATASIZE(); + insn(loadStoreRegisterRegisterOffset(MEMOPSIZE, true, datasize == 128 ? MemOp_STORE_V128 : MemOp_STORE, rm, extend, encodeShiftAmount<datasize>(amount), rn, rt)); + } + + template<int datasize> + ALWAYS_INLINE void str(FPRegisterID rt, RegisterID rn, unsigned pimm) + { + CHECK_FP_MEMOP_DATASIZE(); + insn(loadStoreRegisterUnsignedImmediate(MEMOPSIZE, true, datasize == 128 ? MemOp_STORE_V128 : MemOp_STORE, encodePositiveImmediate<datasize>(pimm), rn, rt)); + } + + template<int datasize> + ALWAYS_INLINE void str(FPRegisterID rt, RegisterID rn, PostIndex simm) + { + CHECK_FP_MEMOP_DATASIZE(); + insn(loadStoreRegisterPostIndex(MEMOPSIZE, true, datasize == 128 ? MemOp_STORE_V128 : MemOp_STORE, simm, rn, rt)); + } + + template<int datasize> + ALWAYS_INLINE void str(FPRegisterID rt, RegisterID rn, PreIndex simm) + { + CHECK_FP_MEMOP_DATASIZE(); + insn(loadStoreRegisterPreIndex(MEMOPSIZE, true, datasize == 128 ? MemOp_STORE_V128 : MemOp_STORE, simm, rn, rt)); + } + + template<int datasize> + ALWAYS_INLINE void stur(FPRegisterID rt, RegisterID rn, int simm) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterUnscaledImmediate(MEMOPSIZE, true, datasize == 128 ? MemOp_STORE_V128 : MemOp_STORE, simm, rn, rt)); + } + + template<int dstsize, int srcsize> + ALWAYS_INLINE void ucvtf(FPRegisterID vd, RegisterID rn) + { + CHECK_DATASIZE_OF(dstsize); + CHECK_DATASIZE_OF(srcsize); + insn(floatingPointIntegerConversions(DATASIZE_OF(srcsize), DATASIZE_OF(dstsize), FPIntConvOp_UCVTF, rn, vd)); + } + + // Admin methods: + + AssemblerLabel labelIgnoringWatchpoints() + { + return m_buffer.label(); + } + + AssemblerLabel labelForWatchpoint() + { + AssemblerLabel result = m_buffer.label(); + if (static_cast<int>(result.m_offset) != m_indexOfLastWatchpoint) + result = label(); + m_indexOfLastWatchpoint = result.m_offset; + m_indexOfTailOfLastWatchpoint = result.m_offset + maxJumpReplacementSize(); + return result; + } + + AssemblerLabel label() + { + AssemblerLabel result = m_buffer.label(); + while (UNLIKELY(static_cast<int>(result.m_offset) < m_indexOfTailOfLastWatchpoint)) { + nop(); + result = m_buffer.label(); + } + return result; + } + + AssemblerLabel align(int alignment) + { + ASSERT(!(alignment & 3)); + while (!m_buffer.isAligned(alignment)) + brk(0); + return label(); + } + + static void* getRelocatedAddress(void* code, AssemblerLabel label) + { + ASSERT(label.isSet()); + return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + label.m_offset); + } + + static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b) + { + return b.m_offset - a.m_offset; + } + + void* unlinkedCode() { return m_buffer.data(); } + size_t codeSize() const { return m_buffer.codeSize(); } + + static unsigned getCallReturnOffset(AssemblerLabel call) + { + ASSERT(call.isSet()); + return call.m_offset; + } + + // Linking & patching: + // + // 'link' and 'patch' methods are for use on unprotected code - such as the code + // within the AssemblerBuffer, and code being patched by the patch buffer. Once + // code has been finalized it is (platform support permitting) within a non- + // writable region of memory; to modify the code in an execute-only execuable + // pool the 'repatch' and 'relink' methods should be used. + + void linkJump(AssemblerLabel from, AssemblerLabel to, JumpType type, Condition condition) + { + ASSERT(to.isSet()); + ASSERT(from.isSet()); + m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset, type, condition)); + } + + void linkJump(AssemblerLabel from, AssemblerLabel to, JumpType type, Condition condition, bool is64Bit, RegisterID compareRegister) + { + ASSERT(to.isSet()); + ASSERT(from.isSet()); + m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset, type, condition, is64Bit, compareRegister)); + } + + void linkJump(AssemblerLabel from, AssemblerLabel to, JumpType type, Condition condition, unsigned bitNumber, RegisterID compareRegister) + { + ASSERT(to.isSet()); + ASSERT(from.isSet()); + m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset, type, condition, bitNumber, compareRegister)); + } + + void linkJump(AssemblerLabel from, AssemblerLabel to) + { + ASSERT(from.isSet()); + ASSERT(to.isSet()); + relinkJumpOrCall<false>(addressOf(from), addressOf(to)); + } + + static void linkJump(void* code, AssemblerLabel from, void* to) + { + ASSERT(from.isSet()); + relinkJumpOrCall<false>(addressOf(code, from), to); + } + + static void linkCall(void* code, AssemblerLabel from, void* to) + { + ASSERT(from.isSet()); + linkJumpOrCall<true>(addressOf(code, from) - 1, to); + } + + static void linkPointer(void* code, AssemblerLabel where, void* valuePtr) + { + linkPointer(addressOf(code, where), valuePtr); + } + + static void replaceWithJump(void* where, void* to) + { + intptr_t offset = (reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(where)) >> 2; + ASSERT(static_cast<int>(offset) == offset); + *static_cast<int*>(where) = unconditionalBranchImmediate(false, static_cast<int>(offset)); + cacheFlush(where, sizeof(int)); + } + + static ptrdiff_t maxJumpReplacementSize() + { + return 4; + } + + static void replaceWithLoad(void* where) + { + Datasize sf; + AddOp op; + SetFlags S; + int shift; + int imm12; + RegisterID rn; + RegisterID rd; + if (disassembleAddSubtractImmediate(where, sf, op, S, shift, imm12, rn, rd)) { + ASSERT(sf == Datasize_64); + ASSERT(op == AddOp_ADD); + ASSERT(!S); + ASSERT(!shift); + ASSERT(!(imm12 & ~0xff8)); + *static_cast<int*>(where) = loadStoreRegisterUnsignedImmediate(MemOpSize_64, false, MemOp_LOAD, encodePositiveImmediate<64>(imm12), rn, rd); + cacheFlush(where, sizeof(int)); + } +#if !ASSERT_DISABLED + else { + MemOpSize size; + bool V; + MemOp opc; + int imm12; + RegisterID rn; + RegisterID rt; + ASSERT(disassembleLoadStoreRegisterUnsignedImmediate(where, size, V, opc, imm12, rn, rt)); + ASSERT(size == MemOpSize_64); + ASSERT(!V); + ASSERT(opc == MemOp_LOAD); + ASSERT(!(imm12 & ~0x1ff)); + } +#endif + } + + static void replaceWithAddressComputation(void* where) + { + MemOpSize size; + bool V; + MemOp opc; + int imm12; + RegisterID rn; + RegisterID rt; + if (disassembleLoadStoreRegisterUnsignedImmediate(where, size, V, opc, imm12, rn, rt)) { + ASSERT(size == MemOpSize_64); + ASSERT(!V); + ASSERT(opc == MemOp_LOAD); + ASSERT(!(imm12 & ~0x1ff)); + *static_cast<int*>(where) = addSubtractImmediate(Datasize_64, AddOp_ADD, DontSetFlags, 0, imm12 * sizeof(void*), rn, rt); + cacheFlush(where, sizeof(int)); + } +#if !ASSERT_DISABLED + else { + Datasize sf; + AddOp op; + SetFlags S; + int shift; + int imm12; + RegisterID rn; + RegisterID rd; + ASSERT(disassembleAddSubtractImmediate(where, sf, op, S, shift, imm12, rn, rd)); + ASSERT(sf == Datasize_64); + ASSERT(op == AddOp_ADD); + ASSERT(!S); + ASSERT(!shift); + ASSERT(!(imm12 & ~0xff8)); + } +#endif + } + + static void repatchPointer(void* where, void* valuePtr) + { + linkPointer(static_cast<int*>(where), valuePtr, true); + } + + static void setPointer(int* address, void* valuePtr, RegisterID rd, bool flush) + { + uintptr_t value = reinterpret_cast<uintptr_t>(valuePtr); + address[0] = moveWideImediate(Datasize_64, MoveWideOp_Z, 0, getHalfword(value, 0), rd); + address[1] = moveWideImediate(Datasize_64, MoveWideOp_K, 1, getHalfword(value, 1), rd); + address[2] = moveWideImediate(Datasize_64, MoveWideOp_K, 2, getHalfword(value, 2), rd); + + if (flush) + cacheFlush(address, sizeof(int) * 3); + } + + static void repatchInt32(void* where, int32_t value) + { + int* address = static_cast<int*>(where); + + Datasize sf; + MoveWideOp opc; + int hw; + uint16_t imm16; + RegisterID rd; + bool expected = disassembleMoveWideImediate(address, sf, opc, hw, imm16, rd); + ASSERT_UNUSED(expected, expected && !sf && (opc == MoveWideOp_Z || opc == MoveWideOp_N) && !hw); + ASSERT(checkMovk<Datasize_32>(address[1], 1, rd)); + + if (value >= 0) { + address[0] = moveWideImediate(Datasize_32, MoveWideOp_Z, 0, getHalfword(value, 0), rd); + address[1] = moveWideImediate(Datasize_32, MoveWideOp_K, 1, getHalfword(value, 1), rd); + } else { + address[0] = moveWideImediate(Datasize_32, MoveWideOp_N, 0, ~getHalfword(value, 0), rd); + address[1] = moveWideImediate(Datasize_32, MoveWideOp_K, 1, getHalfword(value, 1), rd); + } + + cacheFlush(where, sizeof(int) * 2); + } + + static void* readPointer(void* where) + { + int* address = static_cast<int*>(where); + + Datasize sf; + MoveWideOp opc; + int hw; + uint16_t imm16; + RegisterID rdFirst, rd; + + bool expected = disassembleMoveWideImediate(address, sf, opc, hw, imm16, rdFirst); + ASSERT_UNUSED(expected, expected && sf && opc == MoveWideOp_Z && !hw); + uintptr_t result = imm16; + + expected = disassembleMoveWideImediate(address + 1, sf, opc, hw, imm16, rd); + ASSERT_UNUSED(expected, expected && sf && opc == MoveWideOp_K && hw == 1 && rd == rdFirst); + result |= static_cast<uintptr_t>(imm16) << 16; + + expected = disassembleMoveWideImediate(address + 2, sf, opc, hw, imm16, rd); + ASSERT_UNUSED(expected, expected && sf && opc == MoveWideOp_K && hw == 2 && rd == rdFirst); + result |= static_cast<uintptr_t>(imm16) << 32; + + return reinterpret_cast<void*>(result); + } + + static void* readCallTarget(void* from) + { + return readPointer(reinterpret_cast<int*>(from) - 4); + } + + static void relinkJump(void* from, void* to) + { + relinkJumpOrCall<false>(reinterpret_cast<int*>(from), to); + cacheFlush(from, sizeof(int)); + } + + static void relinkCall(void* from, void* to) + { + relinkJumpOrCall<true>(reinterpret_cast<int*>(from) - 1, to); + cacheFlush(reinterpret_cast<int*>(from) - 1, sizeof(int)); + } + + static void repatchCompact(void* where, int32_t value) + { + ASSERT(!(value & ~0x3ff8)); + + MemOpSize size; + bool V; + MemOp opc; + int imm12; + RegisterID rn; + RegisterID rt; + bool expected = disassembleLoadStoreRegisterUnsignedImmediate(where, size, V, opc, imm12, rn, rt); + ASSERT_UNUSED(expected, expected && size >= MemOpSize_32 && !V && opc == MemOp_LOAD); // expect 32/64 bit load to GPR. + + if (size == MemOpSize_32) + imm12 = encodePositiveImmediate<32>(value); + else + imm12 = encodePositiveImmediate<64>(value); + *static_cast<int*>(where) = loadStoreRegisterUnsignedImmediate(size, V, opc, imm12, rn, rt); + + cacheFlush(where, sizeof(int)); + } + + unsigned debugOffset() { return m_buffer.debugOffset(); } + +#if OS(LINUX) && COMPILER(GCC_OR_CLANG) + static inline void linuxPageFlush(uintptr_t begin, uintptr_t end) + { + __builtin___clear_cache(reinterpret_cast<char*>(begin), reinterpret_cast<char*>(end)); + } +#endif + + static void cacheFlush(void* code, size_t size) + { +#if OS(IOS) + sys_cache_control(kCacheFunctionPrepareForExecution, code, size); +#elif OS(LINUX) + size_t page = pageSize(); + uintptr_t current = reinterpret_cast<uintptr_t>(code); + uintptr_t end = current + size; + uintptr_t firstPageEnd = (current & ~(page - 1)) + page; + + if (end <= firstPageEnd) { + linuxPageFlush(current, end); + return; + } + + linuxPageFlush(current, firstPageEnd); + + for (current = firstPageEnd; current + page < end; current += page) + linuxPageFlush(current, current + page); + + linuxPageFlush(current, end); +#else +#error "The cacheFlush support is missing on this platform." +#endif + } + + // Assembler admin methods: + + static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return JUMP_ENUM_SIZE(jumpType) - JUMP_ENUM_SIZE(jumpLinkType); } + + static ALWAYS_INLINE bool linkRecordSourceComparator(const LinkRecord& a, const LinkRecord& b) + { + return a.from() < b.from(); + } + + static bool canCompact(JumpType jumpType) + { + // Fixed jumps cannot be compacted + return (jumpType == JumpNoCondition) || (jumpType == JumpCondition) || (jumpType == JumpCompareAndBranch) || (jumpType == JumpTestBit); + } + + static JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) + { + switch (jumpType) { + case JumpFixed: + return LinkInvalid; + case JumpNoConditionFixedSize: + return LinkJumpNoCondition; + case JumpConditionFixedSize: + return LinkJumpCondition; + case JumpCompareAndBranchFixedSize: + return LinkJumpCompareAndBranch; + case JumpTestBitFixedSize: + return LinkJumpTestBit; + case JumpNoCondition: + return LinkJumpNoCondition; + case JumpCondition: { + ASSERT(!(reinterpret_cast<intptr_t>(from) & 0x3)); + ASSERT(!(reinterpret_cast<intptr_t>(to) & 0x3)); + intptr_t relative = reinterpret_cast<intptr_t>(to) - (reinterpret_cast<intptr_t>(from)); + + if (((relative << 43) >> 43) == relative) + return LinkJumpConditionDirect; + + return LinkJumpCondition; + } + case JumpCompareAndBranch: { + ASSERT(!(reinterpret_cast<intptr_t>(from) & 0x3)); + ASSERT(!(reinterpret_cast<intptr_t>(to) & 0x3)); + intptr_t relative = reinterpret_cast<intptr_t>(to) - (reinterpret_cast<intptr_t>(from)); + + if (((relative << 43) >> 43) == relative) + return LinkJumpCompareAndBranchDirect; + + return LinkJumpCompareAndBranch; + } + case JumpTestBit: { + ASSERT(!(reinterpret_cast<intptr_t>(from) & 0x3)); + ASSERT(!(reinterpret_cast<intptr_t>(to) & 0x3)); + intptr_t relative = reinterpret_cast<intptr_t>(to) - (reinterpret_cast<intptr_t>(from)); + + if (((relative << 50) >> 50) == relative) + return LinkJumpTestBitDirect; + + return LinkJumpTestBit; + } + default: + ASSERT_NOT_REACHED(); + } + + return LinkJumpNoCondition; + } + + static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) + { + JumpLinkType linkType = computeJumpType(record.type(), from, to); + record.setLinkType(linkType); + return linkType; + } + + Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink() + { + std::sort(m_jumpsToLink.begin(), m_jumpsToLink.end(), linkRecordSourceComparator); + return m_jumpsToLink; + } + + static void ALWAYS_INLINE link(LinkRecord& record, uint8_t* from, uint8_t* to) + { + switch (record.linkType()) { + case LinkJumpNoCondition: + linkJumpOrCall<false>(reinterpret_cast<int*>(from), to); + break; + case LinkJumpConditionDirect: + linkConditionalBranch<true>(record.condition(), reinterpret_cast<int*>(from), to); + break; + case LinkJumpCondition: + linkConditionalBranch<false>(record.condition(), reinterpret_cast<int*>(from) - 1, to); + break; + case LinkJumpCompareAndBranchDirect: + linkCompareAndBranch<true>(record.condition(), record.is64Bit(), record.compareRegister(), reinterpret_cast<int*>(from), to); + break; + case LinkJumpCompareAndBranch: + linkCompareAndBranch<false>(record.condition(), record.is64Bit(), record.compareRegister(), reinterpret_cast<int*>(from) - 1, to); + break; + case LinkJumpTestBitDirect: + linkTestAndBranch<true>(record.condition(), record.bitNumber(), record.compareRegister(), reinterpret_cast<int*>(from), to); + break; + case LinkJumpTestBit: + linkTestAndBranch<false>(record.condition(), record.bitNumber(), record.compareRegister(), reinterpret_cast<int*>(from) - 1, to); + break; + default: + ASSERT_NOT_REACHED(); + break; + } + } + +private: + template<Datasize size> + static bool checkMovk(int insn, int _hw, RegisterID _rd) + { + Datasize sf; + MoveWideOp opc; + int hw; + uint16_t imm16; + RegisterID rd; + bool expected = disassembleMoveWideImediate(&insn, sf, opc, hw, imm16, rd); + + return expected + && sf == size + && opc == MoveWideOp_K + && hw == _hw + && rd == _rd; + } + + static void linkPointer(int* address, void* valuePtr, bool flush = false) + { + Datasize sf; + MoveWideOp opc; + int hw; + uint16_t imm16; + RegisterID rd; + bool expected = disassembleMoveWideImediate(address, sf, opc, hw, imm16, rd); + ASSERT_UNUSED(expected, expected && sf && opc == MoveWideOp_Z && !hw); + ASSERT(checkMovk<Datasize_64>(address[1], 1, rd)); + ASSERT(checkMovk<Datasize_64>(address[2], 2, rd)); + + setPointer(address, valuePtr, rd, flush); + } + + template<bool isCall> + static void linkJumpOrCall(int* from, void* to) + { + bool link; + int imm26; + bool isUnconditionalBranchImmediateOrNop = disassembleUnconditionalBranchImmediate(from, link, imm26) || disassembleNop(from); + + ASSERT_UNUSED(isUnconditionalBranchImmediateOrNop, isUnconditionalBranchImmediateOrNop); + ASSERT_UNUSED(isCall, (link == isCall) || disassembleNop(from)); + ASSERT(!(reinterpret_cast<intptr_t>(from) & 3)); + ASSERT(!(reinterpret_cast<intptr_t>(to) & 3)); + intptr_t offset = (reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from)) >> 2; + ASSERT(static_cast<int>(offset) == offset); + + *from = unconditionalBranchImmediate(isCall, static_cast<int>(offset)); + } + + template<bool isDirect> + static void linkCompareAndBranch(Condition condition, bool is64Bit, RegisterID rt, int* from, void* to) + { + ASSERT(!(reinterpret_cast<intptr_t>(from) & 3)); + ASSERT(!(reinterpret_cast<intptr_t>(to) & 3)); + intptr_t offset = (reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from)) >> 2; + ASSERT(((offset << 38) >> 38) == offset); + + bool useDirect = ((offset << 45) >> 45) == offset; // Fits in 19 bits + ASSERT(!isDirect || useDirect); + + if (useDirect || isDirect) { + *from = compareAndBranchImmediate(is64Bit ? Datasize_64 : Datasize_32, condition == ConditionNE, static_cast<int>(offset), rt); + if (!isDirect) + *(from + 1) = nopPseudo(); + } else { + *from = compareAndBranchImmediate(is64Bit ? Datasize_64 : Datasize_32, invert(condition) == ConditionNE, 2, rt); + linkJumpOrCall<false>(from + 1, to); + } + } + + template<bool isDirect> + static void linkConditionalBranch(Condition condition, int* from, void* to) + { + ASSERT(!(reinterpret_cast<intptr_t>(from) & 3)); + ASSERT(!(reinterpret_cast<intptr_t>(to) & 3)); + intptr_t offset = (reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from)) >> 2; + ASSERT(((offset << 38) >> 38) == offset); + + bool useDirect = ((offset << 45) >> 45) == offset; // Fits in 19 bits + ASSERT(!isDirect || useDirect); + + if (useDirect || isDirect) { + *from = conditionalBranchImmediate(static_cast<int>(offset), condition); + if (!isDirect) + *(from + 1) = nopPseudo(); + } else { + *from = conditionalBranchImmediate(2, invert(condition)); + linkJumpOrCall<false>(from + 1, to); + } + } + + template<bool isDirect> + static void linkTestAndBranch(Condition condition, unsigned bitNumber, RegisterID rt, int* from, void* to) + { + ASSERT(!(reinterpret_cast<intptr_t>(from) & 3)); + ASSERT(!(reinterpret_cast<intptr_t>(to) & 3)); + intptr_t offset = (reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from)) >> 2; + ASSERT(static_cast<int>(offset) == offset); + ASSERT(((offset << 38) >> 38) == offset); + + bool useDirect = ((offset << 50) >> 50) == offset; // Fits in 14 bits + ASSERT(!isDirect || useDirect); + + if (useDirect || isDirect) { + *from = testAndBranchImmediate(condition == ConditionNE, static_cast<int>(bitNumber), static_cast<int>(offset), rt); + if (!isDirect) + *(from + 1) = nopPseudo(); + } else { + *from = testAndBranchImmediate(invert(condition) == ConditionNE, static_cast<int>(bitNumber), 2, rt); + linkJumpOrCall<false>(from + 1, to); + } + } + + template<bool isCall> + static void relinkJumpOrCall(int* from, void* to) + { + if (!isCall && disassembleNop(from)) { + unsigned op01; + int imm19; + Condition condition; + bool isConditionalBranchImmediate = disassembleConditionalBranchImmediate(from - 1, op01, imm19, condition); + + if (isConditionalBranchImmediate) { + ASSERT_UNUSED(op01, !op01); + ASSERT_UNUSED(isCall, !isCall); + + if (imm19 == 8) + condition = invert(condition); + + linkConditionalBranch<false>(condition, from - 1, to); + return; + } + + Datasize opSize; + bool op; + RegisterID rt; + bool isCompareAndBranchImmediate = disassembleCompareAndBranchImmediate(from - 1, opSize, op, imm19, rt); + + if (isCompareAndBranchImmediate) { + if (imm19 == 8) + op = !op; + + linkCompareAndBranch<false>(op ? ConditionNE : ConditionEQ, opSize == Datasize_64, rt, from - 1, to); + return; + } + + int imm14; + unsigned bitNumber; + bool isTestAndBranchImmediate = disassembleTestAndBranchImmediate(from - 1, op, bitNumber, imm14, rt); + + if (isTestAndBranchImmediate) { + if (imm14 == 8) + op = !op; + + linkTestAndBranch<false>(op ? ConditionNE : ConditionEQ, bitNumber, rt, from - 1, to); + return; + } + } + + linkJumpOrCall<isCall>(from, to); + } + + static int* addressOf(void* code, AssemblerLabel label) + { + return reinterpret_cast<int*>(static_cast<char*>(code) + label.m_offset); + } + + int* addressOf(AssemblerLabel label) + { + return addressOf(m_buffer.data(), label); + } + + static RegisterID disassembleXOrSp(int reg) { return reg == 31 ? ARM64Registers::sp : static_cast<RegisterID>(reg); } + static RegisterID disassembleXOrZr(int reg) { return reg == 31 ? ARM64Registers::zr : static_cast<RegisterID>(reg); } + static RegisterID disassembleXOrZrOrSp(bool useZr, int reg) { return reg == 31 ? (useZr ? ARM64Registers::zr : ARM64Registers::sp) : static_cast<RegisterID>(reg); } + + static bool disassembleAddSubtractImmediate(void* address, Datasize& sf, AddOp& op, SetFlags& S, int& shift, int& imm12, RegisterID& rn, RegisterID& rd) + { + int insn = *static_cast<int*>(address); + sf = static_cast<Datasize>((insn >> 31) & 1); + op = static_cast<AddOp>((insn >> 30) & 1); + S = static_cast<SetFlags>((insn >> 29) & 1); + shift = (insn >> 22) & 3; + imm12 = (insn >> 10) & 0x3ff; + rn = disassembleXOrSp((insn >> 5) & 0x1f); + rd = disassembleXOrZrOrSp(S, insn & 0x1f); + return (insn & 0x1f000000) == 0x11000000; + } + + static bool disassembleLoadStoreRegisterUnsignedImmediate(void* address, MemOpSize& size, bool& V, MemOp& opc, int& imm12, RegisterID& rn, RegisterID& rt) + { + int insn = *static_cast<int*>(address); + size = static_cast<MemOpSize>((insn >> 30) & 3); + V = (insn >> 26) & 1; + opc = static_cast<MemOp>((insn >> 22) & 3); + imm12 = (insn >> 10) & 0xfff; + rn = disassembleXOrSp((insn >> 5) & 0x1f); + rt = disassembleXOrZr(insn & 0x1f); + return (insn & 0x3b000000) == 0x39000000; + } + + static bool disassembleMoveWideImediate(void* address, Datasize& sf, MoveWideOp& opc, int& hw, uint16_t& imm16, RegisterID& rd) + { + int insn = *static_cast<int*>(address); + sf = static_cast<Datasize>((insn >> 31) & 1); + opc = static_cast<MoveWideOp>((insn >> 29) & 3); + hw = (insn >> 21) & 3; + imm16 = insn >> 5; + rd = disassembleXOrZr(insn & 0x1f); + return (insn & 0x1f800000) == 0x12800000; + } + + static bool disassembleNop(void* address) + { + unsigned insn = *static_cast<unsigned*>(address); + return insn == 0xd503201f; + } + + static bool disassembleCompareAndBranchImmediate(void* address, Datasize& sf, bool& op, int& imm19, RegisterID& rt) + { + int insn = *static_cast<int*>(address); + sf = static_cast<Datasize>((insn >> 31) & 1); + op = (insn >> 24) & 0x1; + imm19 = (insn << 8) >> 13; + rt = static_cast<RegisterID>(insn & 0x1f); + return (insn & 0x7e000000) == 0x34000000; + + } + + static bool disassembleConditionalBranchImmediate(void* address, unsigned& op01, int& imm19, Condition &condition) + { + int insn = *static_cast<int*>(address); + op01 = ((insn >> 23) & 0x2) | ((insn >> 4) & 0x1); + imm19 = (insn << 8) >> 13; + condition = static_cast<Condition>(insn & 0xf); + return (insn & 0xfe000000) == 0x54000000; + } + + static bool disassembleTestAndBranchImmediate(void* address, bool& op, unsigned& bitNumber, int& imm14, RegisterID& rt) + { + int insn = *static_cast<int*>(address); + op = (insn >> 24) & 0x1; + imm14 = (insn << 13) >> 18; + bitNumber = static_cast<unsigned>((((insn >> 26) & 0x20)) | ((insn >> 19) & 0x1f)); + rt = static_cast<RegisterID>(insn & 0x1f); + return (insn & 0x7e000000) == 0x36000000; + + } + + static bool disassembleUnconditionalBranchImmediate(void* address, bool& op, int& imm26) + { + int insn = *static_cast<int*>(address); + op = (insn >> 31) & 1; + imm26 = (insn << 6) >> 6; + return (insn & 0x7c000000) == 0x14000000; + } + + static int xOrSp(RegisterID reg) + { + ASSERT(!isZr(reg)); + ASSERT(!isIOS() || reg != ARM64Registers::x18); + return reg; + } + static int xOrZr(RegisterID reg) + { + ASSERT(!isSp(reg)); + ASSERT(!isIOS() || reg != ARM64Registers::x18); + return reg & 31; + } + static FPRegisterID xOrZrAsFPR(RegisterID reg) { return static_cast<FPRegisterID>(xOrZr(reg)); } + static int xOrZrOrSp(bool useZr, RegisterID reg) { return useZr ? xOrZr(reg) : xOrSp(reg); } + + ALWAYS_INLINE void insn(int instruction) + { + m_buffer.putInt(instruction); + } + + ALWAYS_INLINE static int addSubtractExtendedRegister(Datasize sf, AddOp op, SetFlags S, RegisterID rm, ExtendType option, int imm3, RegisterID rn, RegisterID rd) + { + ASSERT(imm3 < 5); + // The only allocated values for opt is 0. + const int opt = 0; + return (0x0b200000 | sf << 31 | op << 30 | S << 29 | opt << 22 | xOrZr(rm) << 16 | option << 13 | (imm3 & 0x7) << 10 | xOrSp(rn) << 5 | xOrZrOrSp(S, rd)); + } + + ALWAYS_INLINE static int addSubtractImmediate(Datasize sf, AddOp op, SetFlags S, int shift, int imm12, RegisterID rn, RegisterID rd) + { + ASSERT(shift < 2); + ASSERT(isUInt12(imm12)); + return (0x11000000 | sf << 31 | op << 30 | S << 29 | shift << 22 | (imm12 & 0xfff) << 10 | xOrSp(rn) << 5 | xOrZrOrSp(S, rd)); + } + + ALWAYS_INLINE static int addSubtractShiftedRegister(Datasize sf, AddOp op, SetFlags S, ShiftType shift, RegisterID rm, int imm6, RegisterID rn, RegisterID rd) + { + ASSERT(shift < 3); + ASSERT(!(imm6 & (sf ? ~63 : ~31))); + return (0x0b000000 | sf << 31 | op << 30 | S << 29 | shift << 22 | xOrZr(rm) << 16 | (imm6 & 0x3f) << 10 | xOrZr(rn) << 5 | xOrZr(rd)); + } + + ALWAYS_INLINE static int addSubtractWithCarry(Datasize sf, AddOp op, SetFlags S, RegisterID rm, RegisterID rn, RegisterID rd) + { + const int opcode2 = 0; + return (0x1a000000 | sf << 31 | op << 30 | S << 29 | xOrZr(rm) << 16 | opcode2 << 10 | xOrZr(rn) << 5 | xOrZr(rd)); + } + + ALWAYS_INLINE static int bitfield(Datasize sf, BitfieldOp opc, int immr, int imms, RegisterID rn, RegisterID rd) + { + ASSERT(immr < (sf ? 64 : 32)); + ASSERT(imms < (sf ? 64 : 32)); + const int N = sf; + return (0x13000000 | sf << 31 | opc << 29 | N << 22 | immr << 16 | imms << 10 | xOrZr(rn) << 5 | xOrZr(rd)); + } + + // 'op' means negate + ALWAYS_INLINE static int compareAndBranchImmediate(Datasize sf, bool op, int32_t imm19, RegisterID rt) + { + ASSERT(imm19 == (imm19 << 13) >> 13); + return (0x34000000 | sf << 31 | op << 24 | (imm19 & 0x7ffff) << 5 | xOrZr(rt)); + } + + ALWAYS_INLINE static int conditionalBranchImmediate(int32_t imm19, Condition cond) + { + ASSERT(imm19 == (imm19 << 13) >> 13); + ASSERT(!(cond & ~15)); + // The only allocated values for o1 & o0 are 0. + const int o1 = 0; + const int o0 = 0; + return (0x54000000 | o1 << 24 | (imm19 & 0x7ffff) << 5 | o0 << 4 | cond); + } + + ALWAYS_INLINE static int conditionalCompareImmediate(Datasize sf, AddOp op, int imm5, Condition cond, RegisterID rn, int nzcv) + { + ASSERT(!(imm5 & ~0x1f)); + ASSERT(nzcv < 16); + const int S = 1; + const int o2 = 0; + const int o3 = 0; + return (0x1a400800 | sf << 31 | op << 30 | S << 29 | (imm5 & 0x1f) << 16 | cond << 12 | o2 << 10 | xOrZr(rn) << 5 | o3 << 4 | nzcv); + } + + ALWAYS_INLINE static int conditionalCompareRegister(Datasize sf, AddOp op, RegisterID rm, Condition cond, RegisterID rn, int nzcv) + { + ASSERT(nzcv < 16); + const int S = 1; + const int o2 = 0; + const int o3 = 0; + return (0x1a400000 | sf << 31 | op << 30 | S << 29 | xOrZr(rm) << 16 | cond << 12 | o2 << 10 | xOrZr(rn) << 5 | o3 << 4 | nzcv); + } + + // 'op' means negate + // 'op2' means increment + ALWAYS_INLINE static int conditionalSelect(Datasize sf, bool op, RegisterID rm, Condition cond, bool op2, RegisterID rn, RegisterID rd) + { + const int S = 0; + return (0x1a800000 | sf << 31 | op << 30 | S << 29 | xOrZr(rm) << 16 | cond << 12 | op2 << 10 | xOrZr(rn) << 5 | xOrZr(rd)); + } + + ALWAYS_INLINE static int dataProcessing1Source(Datasize sf, DataOp1Source opcode, RegisterID rn, RegisterID rd) + { + const int S = 0; + const int opcode2 = 0; + return (0x5ac00000 | sf << 31 | S << 29 | opcode2 << 16 | opcode << 10 | xOrZr(rn) << 5 | xOrZr(rd)); + } + + ALWAYS_INLINE static int dataProcessing2Source(Datasize sf, RegisterID rm, DataOp2Source opcode, RegisterID rn, RegisterID rd) + { + const int S = 0; + return (0x1ac00000 | sf << 31 | S << 29 | xOrZr(rm) << 16 | opcode << 10 | xOrZr(rn) << 5 | xOrZr(rd)); + } + + ALWAYS_INLINE static int dataProcessing3Source(Datasize sf, DataOp3Source opcode, RegisterID rm, RegisterID ra, RegisterID rn, RegisterID rd) + { + int op54 = opcode >> 4; + int op31 = (opcode >> 1) & 7; + int op0 = opcode & 1; + return (0x1b000000 | sf << 31 | op54 << 29 | op31 << 21 | xOrZr(rm) << 16 | op0 << 15 | xOrZr(ra) << 10 | xOrZr(rn) << 5 | xOrZr(rd)); + } + + ALWAYS_INLINE static int excepnGeneration(ExcepnOp opc, uint16_t imm16, int LL) + { + ASSERT((opc == ExcepnOp_BREAKPOINT || opc == ExcepnOp_HALT) ? !LL : (LL && (LL < 4))); + const int op2 = 0; + return (0xd4000000 | opc << 21 | imm16 << 5 | op2 << 2 | LL); + } + + ALWAYS_INLINE static int extract(Datasize sf, RegisterID rm, int imms, RegisterID rn, RegisterID rd) + { + ASSERT(imms < (sf ? 64 : 32)); + const int op21 = 0; + const int N = sf; + const int o0 = 0; + return (0x13800000 | sf << 31 | op21 << 29 | N << 22 | o0 << 21 | xOrZr(rm) << 16 | imms << 10 | xOrZr(rn) << 5 | xOrZr(rd)); + } + + ALWAYS_INLINE static int floatingPointCompare(Datasize type, FPRegisterID rm, FPRegisterID rn, FPCmpOp opcode2) + { + const int M = 0; + const int S = 0; + const int op = 0; + return (0x1e202000 | M << 31 | S << 29 | type << 22 | rm << 16 | op << 14 | rn << 5 | opcode2); + } + + ALWAYS_INLINE static int floatingPointConditionalCompare(Datasize type, FPRegisterID rm, Condition cond, FPRegisterID rn, FPCondCmpOp op, int nzcv) + { + ASSERT(nzcv < 16); + const int M = 0; + const int S = 0; + return (0x1e200400 | M << 31 | S << 29 | type << 22 | rm << 16 | cond << 12 | rn << 5 | op << 4 | nzcv); + } + + ALWAYS_INLINE static int floatingPointConditionalSelect(Datasize type, FPRegisterID rm, Condition cond, FPRegisterID rn, FPRegisterID rd) + { + const int M = 0; + const int S = 0; + return (0x1e200c00 | M << 31 | S << 29 | type << 22 | rm << 16 | cond << 12 | rn << 5 | rd); + } + + ALWAYS_INLINE static int floatingPointImmediate(Datasize type, int imm8, FPRegisterID rd) + { + const int M = 0; + const int S = 0; + const int imm5 = 0; + return (0x1e201000 | M << 31 | S << 29 | type << 22 | (imm8 & 0xff) << 13 | imm5 << 5 | rd); + } + + ALWAYS_INLINE static int floatingPointIntegerConversions(Datasize sf, Datasize type, FPIntConvOp rmodeOpcode, FPRegisterID rn, FPRegisterID rd) + { + const int S = 0; + return (0x1e200000 | sf << 31 | S << 29 | type << 22 | rmodeOpcode << 16 | rn << 5 | rd); + } + + ALWAYS_INLINE static int floatingPointIntegerConversions(Datasize sf, Datasize type, FPIntConvOp rmodeOpcode, FPRegisterID rn, RegisterID rd) + { + return floatingPointIntegerConversions(sf, type, rmodeOpcode, rn, xOrZrAsFPR(rd)); + } + + ALWAYS_INLINE static int floatingPointIntegerConversions(Datasize sf, Datasize type, FPIntConvOp rmodeOpcode, RegisterID rn, FPRegisterID rd) + { + return floatingPointIntegerConversions(sf, type, rmodeOpcode, xOrZrAsFPR(rn), rd); + } + + ALWAYS_INLINE static int floatingPointDataProcessing1Source(Datasize type, FPDataOp1Source opcode, FPRegisterID rn, FPRegisterID rd) + { + const int M = 0; + const int S = 0; + return (0x1e204000 | M << 31 | S << 29 | type << 22 | opcode << 15 | rn << 5 | rd); + } + + ALWAYS_INLINE static int floatingPointDataProcessing2Source(Datasize type, FPRegisterID rm, FPDataOp2Source opcode, FPRegisterID rn, FPRegisterID rd) + { + const int M = 0; + const int S = 0; + return (0x1e200800 | M << 31 | S << 29 | type << 22 | rm << 16 | opcode << 12 | rn << 5 | rd); + } + + ALWAYS_INLINE static int vectorDataProcessing2Source(SIMD3Same opcode, unsigned size, FPRegisterID vm, FPRegisterID vn, FPRegisterID vd) + { + const int Q = 0; + return (0xe201c00 | Q << 30 | size << 22 | vm << 16 | opcode << 11 | vn << 5 | vd); + } + + ALWAYS_INLINE static int vectorDataProcessing2Source(SIMD3Same opcode, FPRegisterID vm, FPRegisterID vn, FPRegisterID vd) + { + return vectorDataProcessing2Source(opcode, 0, vm, vn, vd); + } + + + // 'o1' means negate + ALWAYS_INLINE static int floatingPointDataProcessing3Source(Datasize type, bool o1, FPRegisterID rm, AddOp o2, FPRegisterID ra, FPRegisterID rn, FPRegisterID rd) + { + const int M = 0; + const int S = 0; + return (0x1f000000 | M << 31 | S << 29 | type << 22 | o1 << 21 | rm << 16 | o2 << 15 | ra << 10 | rn << 5 | rd); + } + + // 'V' means vector + ALWAYS_INLINE static int loadRegisterLiteral(LdrLiteralOp opc, bool V, int imm19, FPRegisterID rt) + { + ASSERT(((imm19 << 13) >> 13) == imm19); + return (0x18000000 | opc << 30 | V << 26 | (imm19 & 0x7ffff) << 5 | rt); + } + + ALWAYS_INLINE static int loadRegisterLiteral(LdrLiteralOp opc, bool V, int imm19, RegisterID rt) + { + return loadRegisterLiteral(opc, V, imm19, xOrZrAsFPR(rt)); + } + + // 'V' means vector + ALWAYS_INLINE static int loadStoreRegisterPostIndex(MemOpSize size, bool V, MemOp opc, int imm9, RegisterID rn, FPRegisterID rt) + { + ASSERT(!(size && V && (opc & 2))); // Maximum vector size is 128 bits. + ASSERT(!((size & 2) && !V && (opc == 3))); // signed 32-bit load must be extending from 8/16 bits. + ASSERT(isInt9(imm9)); + return (0x38000400 | size << 30 | V << 26 | opc << 22 | (imm9 & 0x1ff) << 12 | xOrSp(rn) << 5 | rt); + } + + ALWAYS_INLINE static int loadStoreRegisterPostIndex(MemOpSize size, bool V, MemOp opc, int imm9, RegisterID rn, RegisterID rt) + { + return loadStoreRegisterPostIndex(size, V, opc, imm9, rn, xOrZrAsFPR(rt)); + } + + // 'V' means vector + ALWAYS_INLINE static int loadStoreRegisterPairPostIndex(MemPairOpSize size, bool V, MemOp opc, int immediate, RegisterID rn, FPRegisterID rt, FPRegisterID rt2) + { + ASSERT(size < 3); + ASSERT(opc == (opc & 1)); // Only load or store, load signed 64 is handled via size. + ASSERT(V || (size != MemPairOp_LoadSigned_32) || (opc == MemOp_LOAD)); // There isn't an integer store signed. + unsigned immedShiftAmount = memPairOffsetShift(V, size); + int imm7 = immediate >> immedShiftAmount; + ASSERT((imm7 << immedShiftAmount) == immediate && isInt7(imm7)); + return (0x28800000 | size << 30 | V << 26 | opc << 22 | (imm7 & 0x7f) << 15 | rt2 << 10 | xOrSp(rn) << 5 | rt); + } + + ALWAYS_INLINE static int loadStoreRegisterPairPostIndex(MemPairOpSize size, bool V, MemOp opc, int immediate, RegisterID rn, RegisterID rt, RegisterID rt2) + { + return loadStoreRegisterPairPostIndex(size, V, opc, immediate, rn, xOrZrAsFPR(rt), xOrZrAsFPR(rt2)); + } + + // 'V' means vector + ALWAYS_INLINE static int loadStoreRegisterPreIndex(MemOpSize size, bool V, MemOp opc, int imm9, RegisterID rn, FPRegisterID rt) + { + ASSERT(!(size && V && (opc & 2))); // Maximum vector size is 128 bits. + ASSERT(!((size & 2) && !V && (opc == 3))); // signed 32-bit load must be extending from 8/16 bits. + ASSERT(isInt9(imm9)); + return (0x38000c00 | size << 30 | V << 26 | opc << 22 | (imm9 & 0x1ff) << 12 | xOrSp(rn) << 5 | rt); + } + + ALWAYS_INLINE static int loadStoreRegisterPreIndex(MemOpSize size, bool V, MemOp opc, int imm9, RegisterID rn, RegisterID rt) + { + return loadStoreRegisterPreIndex(size, V, opc, imm9, rn, xOrZrAsFPR(rt)); + } + + // 'V' means vector + ALWAYS_INLINE static int loadStoreRegisterPairPreIndex(MemPairOpSize size, bool V, MemOp opc, int immediate, RegisterID rn, FPRegisterID rt, FPRegisterID rt2) + { + ASSERT(size < 3); + ASSERT(opc == (opc & 1)); // Only load or store, load signed 64 is handled via size. + ASSERT(V || (size != MemPairOp_LoadSigned_32) || (opc == MemOp_LOAD)); // There isn't an integer store signed. + unsigned immedShiftAmount = memPairOffsetShift(V, size); + int imm7 = immediate >> immedShiftAmount; + ASSERT((imm7 << immedShiftAmount) == immediate && isInt7(imm7)); + return (0x29800000 | size << 30 | V << 26 | opc << 22 | (imm7 & 0x7f) << 15 | rt2 << 10 | xOrSp(rn) << 5 | rt); + } + + ALWAYS_INLINE static int loadStoreRegisterPairPreIndex(MemPairOpSize size, bool V, MemOp opc, int immediate, RegisterID rn, RegisterID rt, RegisterID rt2) + { + return loadStoreRegisterPairPreIndex(size, V, opc, immediate, rn, xOrZrAsFPR(rt), xOrZrAsFPR(rt2)); + } + + // 'V' means vector + // 'S' means shift rm + ALWAYS_INLINE static int loadStoreRegisterRegisterOffset(MemOpSize size, bool V, MemOp opc, RegisterID rm, ExtendType option, bool S, RegisterID rn, FPRegisterID rt) + { + ASSERT(!(size && V && (opc & 2))); // Maximum vector size is 128 bits. + ASSERT(!((size & 2) && !V && (opc == 3))); // signed 32-bit load must be extending from 8/16 bits. + ASSERT(option & 2); // The ExtendType for the address must be 32/64 bit, signed or unsigned - not 8/16bit. + return (0x38200800 | size << 30 | V << 26 | opc << 22 | xOrZr(rm) << 16 | option << 13 | S << 12 | xOrSp(rn) << 5 | rt); + } + + ALWAYS_INLINE static int loadStoreRegisterRegisterOffset(MemOpSize size, bool V, MemOp opc, RegisterID rm, ExtendType option, bool S, RegisterID rn, RegisterID rt) + { + return loadStoreRegisterRegisterOffset(size, V, opc, rm, option, S, rn, xOrZrAsFPR(rt)); + } + + // 'V' means vector + ALWAYS_INLINE static int loadStoreRegisterUnscaledImmediate(MemOpSize size, bool V, MemOp opc, int imm9, RegisterID rn, FPRegisterID rt) + { + ASSERT(!(size && V && (opc & 2))); // Maximum vector size is 128 bits. + ASSERT(!((size & 2) && !V && (opc == 3))); // signed 32-bit load must be extending from 8/16 bits. + ASSERT(isInt9(imm9)); + return (0x38000000 | size << 30 | V << 26 | opc << 22 | (imm9 & 0x1ff) << 12 | xOrSp(rn) << 5 | rt); + } + + ALWAYS_INLINE static int loadStoreRegisterUnscaledImmediate(MemOpSize size, bool V, MemOp opc, int imm9, RegisterID rn, RegisterID rt) + { + ASSERT(isInt9(imm9)); + return loadStoreRegisterUnscaledImmediate(size, V, opc, imm9, rn, xOrZrAsFPR(rt)); + } + + // 'V' means vector + ALWAYS_INLINE static int loadStoreRegisterUnsignedImmediate(MemOpSize size, bool V, MemOp opc, int imm12, RegisterID rn, FPRegisterID rt) + { + ASSERT(!(size && V && (opc & 2))); // Maximum vector size is 128 bits. + ASSERT(!((size & 2) && !V && (opc == 3))); // signed 32-bit load must be extending from 8/16 bits. + ASSERT(isUInt12(imm12)); + return (0x39000000 | size << 30 | V << 26 | opc << 22 | (imm12 & 0xfff) << 10 | xOrSp(rn) << 5 | rt); + } + + ALWAYS_INLINE static int loadStoreRegisterUnsignedImmediate(MemOpSize size, bool V, MemOp opc, int imm12, RegisterID rn, RegisterID rt) + { + return loadStoreRegisterUnsignedImmediate(size, V, opc, imm12, rn, xOrZrAsFPR(rt)); + } + + ALWAYS_INLINE static int logicalImmediate(Datasize sf, LogicalOp opc, int N_immr_imms, RegisterID rn, RegisterID rd) + { + ASSERT(!(N_immr_imms & (sf ? ~0x1fff : ~0xfff))); + return (0x12000000 | sf << 31 | opc << 29 | N_immr_imms << 10 | xOrZr(rn) << 5 | xOrZrOrSp(opc == LogicalOp_ANDS, rd)); + } + + // 'N' means negate rm + ALWAYS_INLINE static int logicalShiftedRegister(Datasize sf, LogicalOp opc, ShiftType shift, bool N, RegisterID rm, int imm6, RegisterID rn, RegisterID rd) + { + ASSERT(!(imm6 & (sf ? ~63 : ~31))); + return (0x0a000000 | sf << 31 | opc << 29 | shift << 22 | N << 21 | xOrZr(rm) << 16 | (imm6 & 0x3f) << 10 | xOrZr(rn) << 5 | xOrZr(rd)); + } + + ALWAYS_INLINE static int moveWideImediate(Datasize sf, MoveWideOp opc, int hw, uint16_t imm16, RegisterID rd) + { + ASSERT(hw < (sf ? 4 : 2)); + return (0x12800000 | sf << 31 | opc << 29 | hw << 21 | (int)imm16 << 5 | xOrZr(rd)); + } + + // 'op' means link + ALWAYS_INLINE static int unconditionalBranchImmediate(bool op, int32_t imm26) + { + ASSERT(imm26 == (imm26 << 6) >> 6); + return (0x14000000 | op << 31 | (imm26 & 0x3ffffff)); + } + + // 'op' means page + ALWAYS_INLINE static int pcRelative(bool op, int32_t imm21, RegisterID rd) + { + ASSERT(imm21 == (imm21 << 11) >> 11); + int32_t immlo = imm21 & 3; + int32_t immhi = (imm21 >> 2) & 0x7ffff; + return (0x10000000 | op << 31 | immlo << 29 | immhi << 5 | xOrZr(rd)); + } + + ALWAYS_INLINE static int system(bool L, int op0, int op1, int crn, int crm, int op2, RegisterID rt) + { + return (0xd5000000 | L << 21 | op0 << 19 | op1 << 16 | crn << 12 | crm << 8 | op2 << 5 | xOrZr(rt)); + } + + ALWAYS_INLINE static int hintPseudo(int imm) + { + ASSERT(!(imm & ~0x7f)); + return system(0, 0, 3, 2, (imm >> 3) & 0xf, imm & 0x7, ARM64Registers::zr); + } + + ALWAYS_INLINE static int nopPseudo() + { + return hintPseudo(0); + } + + // 'op' means negate + ALWAYS_INLINE static int testAndBranchImmediate(bool op, int b50, int imm14, RegisterID rt) + { + ASSERT(!(b50 & ~0x3f)); + ASSERT(imm14 == (imm14 << 18) >> 18); + int b5 = b50 >> 5; + int b40 = b50 & 0x1f; + return (0x36000000 | b5 << 31 | op << 24 | b40 << 19 | (imm14 & 0x3fff) << 5 | xOrZr(rt)); + } + + ALWAYS_INLINE static int unconditionalBranchRegister(BranchType opc, RegisterID rn) + { + // The only allocated values for op2 is 0x1f, for op3 & op4 are 0. + const int op2 = 0x1f; + const int op3 = 0; + const int op4 = 0; + return (0xd6000000 | opc << 21 | op2 << 16 | op3 << 10 | xOrZr(rn) << 5 | op4); + } + + // Workaround for Cortex-A53 erratum (835769). Emit an extra nop if the + // last instruction in the buffer is a load, store or prefetch. Needed + // before 64-bit multiply-accumulate instructions. + template<int datasize> + ALWAYS_INLINE void nopCortexA53Fix835769() + { +#if CPU(ARM64_CORTEXA53) + CHECK_DATASIZE(); + if (datasize == 64) { + if (LIKELY(m_buffer.codeSize() >= sizeof(int32_t))) { + // From ARMv8 Reference Manual, Section C4.1: the encoding of the + // instructions in the Loads and stores instruction group is: + // ---- 1-0- ---- ---- ---- ---- ---- ---- + if (UNLIKELY((*reinterpret_cast_ptr<int32_t*>(reinterpret_cast_ptr<char*>(m_buffer.data()) + m_buffer.codeSize() - sizeof(int32_t)) & 0x0a000000) == 0x08000000)) + nop(); + } + } +#endif + } + + // Workaround for Cortex-A53 erratum (843419). Emit extra nops to avoid + // wrong address access after ADRP instruction. + ALWAYS_INLINE void nopCortexA53Fix843419() + { +#if CPU(ARM64_CORTEXA53) + nop(); + nop(); + nop(); +#endif + } + + AssemblerBuffer m_buffer; + Vector<LinkRecord, 0, UnsafeVectorOverflow> m_jumpsToLink; + int m_indexOfLastWatchpoint; + int m_indexOfTailOfLastWatchpoint; +}; + +} // namespace JSC + +#undef CHECK_DATASIZE_OF +#undef DATASIZE_OF +#undef MEMOPSIZE_OF +#undef CHECK_DATASIZE +#undef DATASIZE +#undef MEMOPSIZE +#undef CHECK_FP_MEMOP_DATASIZE + +#endif // ENABLE(ASSEMBLER) && CPU(ARM64) + +#endif // ARM64Assembler_h diff --git a/Source/JavaScriptCore/assembler/ARMAssembler.cpp b/Source/JavaScriptCore/assembler/ARMAssembler.cpp index 4f4199bf2..f9100d4c9 100644 --- a/Source/JavaScriptCore/assembler/ARMAssembler.cpp +++ b/Source/JavaScriptCore/assembler/ARMAssembler.cpp @@ -391,15 +391,15 @@ void ARMAssembler::baseIndexTransferFloat(DataTransferTypeFloat transferType, FP dataTransferFloat(transferType, srcDst, ARMRegisters::S1, offset); } -PassRefPtr<ExecutableMemoryHandle> ARMAssembler::executableCopy(VM& vm, void* ownerUID, JITCompilationEffort effort) +void ARMAssembler::prepareExecutableCopy(void* to) { // 64-bit alignment is required for next constant pool and JIT code as well m_buffer.flushWithoutBarrier(true); if (!m_buffer.isAligned(8)) bkpt(0); - RefPtr<ExecutableMemoryHandle> result = m_buffer.executableCopy(vm, ownerUID, effort); - char* data = reinterpret_cast<char*>(result->start()); + char* data = reinterpret_cast<char*>(m_buffer.data()); + ptrdiff_t delta = reinterpret_cast<char*>(to) - data; for (Jumps::Iterator iter = m_jumps.begin(); iter != m_jumps.end(); ++iter) { // The last bit is set if the constant must be placed on constant pool. @@ -415,30 +415,11 @@ PassRefPtr<ExecutableMemoryHandle> ARMAssembler::executableCopy(VM& vm, void* ow continue; } } - *addr = reinterpret_cast<ARMWord>(data + *addr); + *addr = reinterpret_cast<ARMWord>(data + delta + *addr); } } - - return result; } -#if OS(LINUX) && COMPILER(RVCT) - -__asm void ARMAssembler::cacheFlush(void* code, size_t size) -{ - ARM - push {r7} - add r1, r1, r0 - mov r7, #0xf0000 - add r7, r7, #0x2 - mov r2, #0x0 - svc #0x0 - pop {r7} - bx lr -} - -#endif - } // namespace JSC #endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL) diff --git a/Source/JavaScriptCore/assembler/ARMAssembler.h b/Source/JavaScriptCore/assembler/ARMAssembler.h index 19db71dc6..fa2f3340a 100644 --- a/Source/JavaScriptCore/assembler/ARMAssembler.h +++ b/Source/JavaScriptCore/assembler/ARMAssembler.h @@ -36,59 +36,90 @@ namespace JSC { typedef uint32_t ARMWord; + #define FOR_EACH_CPU_REGISTER(V) \ + FOR_EACH_CPU_GPREGISTER(V) \ + FOR_EACH_CPU_SPECIAL_REGISTER(V) \ + FOR_EACH_CPU_FPREGISTER(V) + + #define FOR_EACH_CPU_GPREGISTER(V) \ + V(void*, r0) \ + V(void*, r1) \ + V(void*, r2) \ + V(void*, r3) \ + V(void*, r4) \ + V(void*, r5) \ + V(void*, r6) \ + V(void*, r7) \ + V(void*, r8) \ + V(void*, r9) \ + V(void*, r10) \ + V(void*, fp) \ + V(void*, ip) \ + V(void*, sp) \ + V(void*, lr) \ + V(void*, pc) \ + + #define FOR_EACH_CPU_SPECIAL_REGISTER(V) \ + V(void*, apsr) \ + V(void*, fpscr) \ + + #define FOR_EACH_CPU_FPREGISTER(V) \ + V(double, d0) \ + V(double, d1) \ + V(double, d2) \ + V(double, d3) \ + V(double, d4) \ + V(double, d5) \ + V(double, d6) \ + V(double, d7) \ + V(double, d8) \ + V(double, d9) \ + V(double, d10) \ + V(double, d11) \ + V(double, d12) \ + V(double, d13) \ + V(double, d14) \ + V(double, d15) \ + V(double, d16) \ + V(double, d17) \ + V(double, d18) \ + V(double, d19) \ + V(double, d20) \ + V(double, d21) \ + V(double, d22) \ + V(double, d23) \ + V(double, d24) \ + V(double, d25) \ + V(double, d26) \ + V(double, d27) \ + V(double, d28) \ + V(double, d29) \ + V(double, d30) \ + V(double, d31) \ + namespace ARMRegisters { + typedef enum { - r0 = 0, - r1, - r2, - r3, - r4, - r5, - r6, S0 = r6, - r7, - r8, - r9, - r10, - r11, - r12, S1 = r12, - r13, sp = r13, - r14, lr = r14, - r15, pc = r15 + #define DECLARE_REGISTER(_type, _regName) _regName, + FOR_EACH_CPU_GPREGISTER(DECLARE_REGISTER) + #undef DECLARE_REGISTER + + // Pseudonyms for some of the registers. + S0 = r6, + r11 = fp, // frame pointer + r12 = ip, S1 = ip, + r13 = sp, + r14 = lr, + r15 = pc } RegisterID; typedef enum { - d0, - d1, - d2, - d3, - d4, - d5, - d6, - d7, SD0 = d7, /* Same as thumb assembler. */ - d8, - d9, - d10, - d11, - d12, - d13, - d14, - d15, - d16, - d17, - d18, - d19, - d20, - d21, - d22, - d23, - d24, - d25, - d26, - d27, - d28, - d29, - d30, - d31 + #define DECLARE_REGISTER(_type, _regName) _regName, + FOR_EACH_CPU_FPREGISTER(DECLARE_REGISTER) + #undef DECLARE_REGISTER + + // Pseudonyms for some of the registers. + SD0 = d7, /* Same as thumb assembler. */ } FPRegisterID; } // namespace ARMRegisters @@ -105,6 +136,14 @@ namespace JSC { { } + ARMBuffer& buffer() { return m_buffer; } + + static constexpr RegisterID firstRegister() { return ARMRegisters::r0; } + static constexpr RegisterID lastRegister() { return ARMRegisters::r15; } + + static constexpr FPRegisterID firstFPRegister() { return ARMRegisters::d0; } + static constexpr FPRegisterID lastFPRegister() { return ARMRegisters::d31; } + // ARM conditional constants typedef enum { EQ = 0x00000000, // Zero / Equal. @@ -176,6 +215,11 @@ namespace JSC { MOVT = 0x03400000, #endif NOP = 0xe1a00000, + DMB_SY = 0xf57ff05f, +#if HAVE(ARM_IDIV_INSTRUCTIONS) + SDIV = 0x0710f010, + UDIV = 0x0730f010, +#endif }; enum { @@ -437,6 +481,26 @@ namespace JSC { m_buffer.putInt(toARMWord(cc) | MULL | RN(rdhi) | RD(rdlo) | RS(rn) | RM(rm)); } +#if HAVE(ARM_IDIV_INSTRUCTIONS) + template<int datasize> + void sdiv(int rd, int rn, int rm, Condition cc = AL) + { + static_assert(datasize == 32, "sdiv datasize must be 32 for armv7s"); + ASSERT(rd != ARMRegisters::pc); + ASSERT(rn != ARMRegisters::pc); + ASSERT(rm != ARMRegisters::pc); + m_buffer.putInt(toARMWord(cc) | SDIV | RN(rd) | RM(rn) | RS(rm)); + } + + void udiv(int rd, int rn, int rm, Condition cc = AL) + { + ASSERT(rd != ARMRegisters::pc); + ASSERT(rn != ARMRegisters::pc); + ASSERT(rm != ARMRegisters::pc); + m_buffer.putInt(toARMWord(cc) | UDIV | RN(rd) | RM(rn) | RS(rm)); + } +#endif + void vmov_f64(int dd, int dm, Condition cc = AL) { emitDoublePrecisionInstruction(toARMWord(cc) | VMOV_F64, dd, 0, dm); @@ -642,6 +706,11 @@ namespace JSC { m_buffer.putInt(NOP); } + void dmbSY() + { + m_buffer.putInt(DMB_SY); + } + void bx(int rm, Condition cc = AL) { emitInstruction(toARMWord(cc) | BX, 0, 0, RM(rm)); @@ -760,7 +829,7 @@ namespace JSC { return loadBranchTarget(ARMRegisters::pc, cc, useConstantPool); } - PassRefPtr<ExecutableMemoryHandle> executableCopy(VM&, void* ownerUID, JITCompilationEffort); + void prepareExecutableCopy(void* to); unsigned debugOffset() { return m_buffer.debugOffset(); } @@ -1022,7 +1091,7 @@ namespace JSC { return AL | B | (offset & BranchOffsetMask); } -#if OS(LINUX) && COMPILER(GCC) +#if OS(LINUX) && COMPILER(GCC_OR_CLANG) static inline void linuxPageFlush(uintptr_t begin, uintptr_t end) { asm volatile( @@ -1040,12 +1109,9 @@ namespace JSC { } #endif -#if OS(LINUX) && COMPILER(RVCT) - static __asm void cacheFlush(void* code, size_t); -#else static void cacheFlush(void* code, size_t size) { -#if OS(LINUX) && COMPILER(GCC) +#if OS(LINUX) && COMPILER(GCC_OR_CLANG) size_t page = pageSize(); uintptr_t current = reinterpret_cast<uintptr_t>(code); uintptr_t end = current + size; @@ -1062,18 +1128,10 @@ namespace JSC { linuxPageFlush(current, current + page); linuxPageFlush(current, end); -#elif OS(WINCE) - CacheRangeFlush(code, size, CACHE_SYNC_ALL); -#elif OS(QNX) && ENABLE(ASSEMBLER_WX_EXCLUSIVE) - UNUSED_PARAM(code); - UNUSED_PARAM(size); -#elif OS(QNX) - msync(code, size, MS_INVALIDATE_ICACHE); #else #error "The cacheFlush support is missing on this platform." #endif } -#endif private: static ARMWord RM(int reg) diff --git a/Source/JavaScriptCore/assembler/ARMv7Assembler.cpp b/Source/JavaScriptCore/assembler/ARMv7Assembler.cpp deleted file mode 100644 index faca66421..000000000 --- a/Source/JavaScriptCore/assembler/ARMv7Assembler.cpp +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright (C) 2010 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, - * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - * THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "config.h" - -#if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2) - -#include "ARMv7Assembler.h" - -namespace JSC { - -} - -#endif diff --git a/Source/JavaScriptCore/assembler/ARMv7Assembler.h b/Source/JavaScriptCore/assembler/ARMv7Assembler.h index ddb57b19d..6b9f305e9 100644 --- a/Source/JavaScriptCore/assembler/ARMv7Assembler.h +++ b/Source/JavaScriptCore/assembler/ARMv7Assembler.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2009, 2010, 2012, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2009, 2010, 2012, 2013, 2014 Apple Inc. All rights reserved. * Copyright (C) 2010 University of Szeged * * Redistribution and use in source and binary forms, with or without @@ -30,6 +30,8 @@ #if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2) #include "AssemblerBuffer.h" +#include "AssemblerCommon.h" +#include <limits.h> #include <wtf/Assertions.h> #include <wtf/Vector.h> #include <stdint.h> @@ -37,23 +39,83 @@ namespace JSC { namespace ARMRegisters { + + #define FOR_EACH_CPU_REGISTER(V) \ + FOR_EACH_CPU_GPREGISTER(V) \ + FOR_EACH_CPU_SPECIAL_REGISTER(V) \ + FOR_EACH_CPU_FPREGISTER(V) + + // The following are defined as pairs of the following value: + // 1. type of the storage needed to save the register value by the JIT probe. + // 2. name of the register. + #define FOR_EACH_CPU_GPREGISTER(V) \ + V(void*, r0) \ + V(void*, r1) \ + V(void*, r2) \ + V(void*, r3) \ + V(void*, r4) \ + V(void*, r5) \ + V(void*, r6) \ + V(void*, r7) \ + V(void*, r8) \ + V(void*, r9) \ + V(void*, r10) \ + V(void*, r11) \ + V(void*, ip) \ + V(void*, sp) \ + V(void*, lr) \ + V(void*, pc) + + #define FOR_EACH_CPU_SPECIAL_REGISTER(V) \ + V(void*, apsr) \ + V(void*, fpscr) \ + + #define FOR_EACH_CPU_FPREGISTER(V) \ + V(double, d0) \ + V(double, d1) \ + V(double, d2) \ + V(double, d3) \ + V(double, d4) \ + V(double, d5) \ + V(double, d6) \ + V(double, d7) \ + V(double, d8) \ + V(double, d9) \ + V(double, d10) \ + V(double, d11) \ + V(double, d12) \ + V(double, d13) \ + V(double, d14) \ + V(double, d15) \ + V(double, d16) \ + V(double, d17) \ + V(double, d18) \ + V(double, d19) \ + V(double, d20) \ + V(double, d21) \ + V(double, d22) \ + V(double, d23) \ + V(double, d24) \ + V(double, d25) \ + V(double, d26) \ + V(double, d27) \ + V(double, d28) \ + V(double, d29) \ + V(double, d30) \ + V(double, d31) + typedef enum { - r0, - r1, - r2, - r3, - r4, - r5, - r6, - r7, wr = r7, // thumb work register - r8, - r9, sb = r9, // static base - r10, sl = r10, // stack limit - r11, fp = r11, // frame pointer - r12, ip = r12, - r13, sp = r13, - r14, lr = r14, - r15, pc = r15, + #define DECLARE_REGISTER(_type, _regName) _regName, + FOR_EACH_CPU_GPREGISTER(DECLARE_REGISTER) + #undef DECLARE_REGISTER + + fp = r7, // frame pointer + sb = r9, // static base + sl = r10, // stack limit + r12 = ip, + r13 = sp, + r14 = lr, + r15 = pc } RegisterID; typedef enum { @@ -92,38 +154,9 @@ namespace ARMRegisters { } FPSingleRegisterID; typedef enum { - d0, - d1, - d2, - d3, - d4, - d5, - d6, - d7, - d8, - d9, - d10, - d11, - d12, - d13, - d14, - d15, - d16, - d17, - d18, - d19, - d20, - d21, - d22, - d23, - d24, - d25, - d26, - d27, - d28, - d29, - d30, - d31, + #define DECLARE_REGISTER(_type, _regName) _regName, + FOR_EACH_CPU_FPREGISTER(DECLARE_REGISTER) + #undef DECLARE_REGISTER } FPDoubleRegisterID; typedef enum { @@ -172,7 +205,8 @@ namespace ARMRegisters { ASSERT(!(reg & 1)); return (FPDoubleRegisterID)(reg >> 1); } -} + +} // namespace ARMRegisters class ARMv7Assembler; class ARMThumbImmediate { @@ -418,6 +452,13 @@ public: typedef ARMRegisters::FPSingleRegisterID FPSingleRegisterID; typedef ARMRegisters::FPDoubleRegisterID FPDoubleRegisterID; typedef ARMRegisters::FPQuadRegisterID FPQuadRegisterID; + typedef FPDoubleRegisterID FPRegisterID; + + static constexpr RegisterID firstRegister() { return ARMRegisters::r0; } + static constexpr RegisterID lastRegister() { return ARMRegisters::r13; } + + static constexpr FPRegisterID firstFPRegister() { return ARMRegisters::d0; } + static constexpr FPRegisterID lastFPRegister() { return ARMRegisters::d31; } // (HS, LO, HI, LS) -> (AE, B, A, BE) // (VS, VC) -> (O, NO) @@ -504,6 +545,8 @@ public: { } + AssemblerBuffer& buffer() { return m_formatter.m_buffer; } + private: // ARMv7, Appx-A.6.3 @@ -567,6 +610,8 @@ private: OP_ADD_SP_imm_T1 = 0xA800, OP_ADD_SP_imm_T2 = 0xB000, OP_SUB_SP_imm_T1 = 0xB080, + OP_PUSH_T1 = 0xB400, + OP_POP_T1 = 0xBC00, OP_BKPT = 0xBE00, OP_IT = 0xBF00, OP_NOP_T1 = 0xBF00, @@ -575,6 +620,8 @@ private: typedef enum { OP_B_T1 = 0xD000, OP_B_T2 = 0xE000, + OP_POP_T2 = 0xE8BD, + OP_PUSH_T2 = 0xE92D, OP_AND_reg_T2 = 0xEA00, OP_TST_reg_T2 = 0xEA10, OP_ORR_reg_T2 = 0xEA40, @@ -635,6 +682,7 @@ private: OP_MOVT = 0xF2C0, OP_UBFX_T1 = 0xF3C0, OP_NOP_T2a = 0xF3AF, + OP_DMB_SY_T2a = 0xF3BF, OP_STRB_imm_T3 = 0xF800, OP_STRB_reg_T2 = 0xF800, OP_LDRB_imm_T3 = 0xF810, @@ -661,7 +709,7 @@ private: OP_ROR_reg_T2 = 0xFA60, OP_CLZ = 0xFAB0, OP_SMULL_T1 = 0xFB80, -#if CPU(APPLE_ARMV7S) +#if HAVE(ARM_IDIV_INSTRUCTIONS) OP_SDIV_T1 = 0xFB90, OP_UDIV_T1 = 0xFBB0, #endif @@ -691,6 +739,7 @@ private: OP_VCVTSD_T1b = 0x0A40, OP_VCVTDS_T1b = 0x0A40, OP_NOP_T2b = 0x8000, + OP_DMB_SY_T2b = 0x8F5F, OP_B_T3b = 0x8000, OP_B_T4b = 0x9000, } OpcodeID2; @@ -718,11 +767,11 @@ private: class ARMInstructionFormatter; // false means else! - bool ifThenElseConditionBit(Condition condition, bool isIf) + static bool ifThenElseConditionBit(Condition condition, bool isIf) { return isIf ? (condition & 1) : !(condition & 1); } - uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if, bool inst4if) + static uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if, bool inst4if) { int mask = (ifThenElseConditionBit(condition, inst2if) << 3) | (ifThenElseConditionBit(condition, inst3if) << 2) @@ -731,7 +780,7 @@ private: ASSERT((condition != ConditionAL) || !(mask & (mask - 1))); return (condition << 4) | mask; } - uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if) + static uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if) { int mask = (ifThenElseConditionBit(condition, inst2if) << 3) | (ifThenElseConditionBit(condition, inst3if) << 2) @@ -739,7 +788,7 @@ private: ASSERT((condition != ConditionAL) || !(mask & (mask - 1))); return (condition << 4) | mask; } - uint8_t ifThenElse(Condition condition, bool inst2if) + static uint8_t ifThenElse(Condition condition, bool inst2if) { int mask = (ifThenElseConditionBit(condition, inst2if) << 3) | 4; @@ -747,7 +796,7 @@ private: return (condition << 4) | mask; } - uint8_t ifThenElse(Condition condition) + static uint8_t ifThenElse(Condition condition) { int mask = 8; return (condition << 4) | mask; @@ -774,7 +823,7 @@ public: ASSERT(rn != ARMRegisters::pc); ASSERT(imm.isValid()); - if (rn == ARMRegisters::sp) { + if (rn == ARMRegisters::sp && imm.isUInt16()) { ASSERT(!(imm.getUInt16() & 3)); if (!(rd & 8) && imm.isUInt10()) { m_formatter.oneWordOp5Reg3Imm8(OP_ADD_SP_imm_T1, rd, static_cast<uint8_t>(imm.getUInt10() >> 2)); @@ -813,6 +862,11 @@ public: // NOTE: In an IT block, add doesn't modify the flags register. ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm) { + if (rd == ARMRegisters::sp) { + mov(rd, rn); + rn = rd; + } + if (rd == rn) m_formatter.oneWordOp8RegReg143(OP_ADD_reg_T2, rm, rd); else if (rd == rm) @@ -1102,9 +1156,10 @@ public: { ASSERT(rn != ARMRegisters::pc); // LDR (literal) ASSERT(imm.isUInt12()); + ASSERT(!(imm.getUInt12() & 1)); if (!((rt | rn) & 8) && imm.isUInt6()) - m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRH_imm_T1, imm.getUInt6() >> 2, rn, rt); + m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRH_imm_T1, imm.getUInt6() >> 1, rn, rt); else m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T2, rn, rt, imm.getUInt12()); } @@ -1266,7 +1321,7 @@ public: m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T3, imm.m_value.imm4, rd, imm); } -#if OS(LINUX) || OS(QNX) +#if OS(LINUX) static void revertJumpTo_movT3movtcmpT2(void* instructionStart, RegisterID left, RegisterID right, uintptr_t imm) { uint16_t* address = static_cast<uint16_t*>(instructionStart); @@ -1407,9 +1462,49 @@ public: m_formatter.twoWordOp12Reg4FourFours(OP_ROR_reg_T2, rn, FourFours(0xf, rd, 0, rm)); } -#if CPU(APPLE_ARMV7S) + ALWAYS_INLINE void pop(RegisterID dest) + { + if (dest < ARMRegisters::r8) + m_formatter.oneWordOp7Imm9(OP_POP_T1, 1 << dest); + else { + // Load postindexed with writeback. + ldr(dest, ARMRegisters::sp, sizeof(void*), false, true); + } + } + + ALWAYS_INLINE void pop(uint32_t registerList) + { + ASSERT(WTF::bitCount(registerList) > 1); + ASSERT(!((1 << ARMRegisters::pc) & registerList) || !((1 << ARMRegisters::lr) & registerList)); + ASSERT(!((1 << ARMRegisters::sp) & registerList)); + m_formatter.twoWordOp16Imm16(OP_POP_T2, registerList); + } + + ALWAYS_INLINE void push(RegisterID src) + { + if (src < ARMRegisters::r8) + m_formatter.oneWordOp7Imm9(OP_PUSH_T1, 1 << src); + else if (src == ARMRegisters::lr) + m_formatter.oneWordOp7Imm9(OP_PUSH_T1, 0x100); + else { + // Store preindexed with writeback. + str(src, ARMRegisters::sp, -sizeof(void*), true, true); + } + } + + ALWAYS_INLINE void push(uint32_t registerList) + { + ASSERT(WTF::bitCount(registerList) > 1); + ASSERT(!((1 << ARMRegisters::pc) & registerList)); + ASSERT(!((1 << ARMRegisters::sp) & registerList)); + m_formatter.twoWordOp16Imm16(OP_PUSH_T2, registerList); + } + +#if HAVE(ARM_IDIV_INSTRUCTIONS) + template<int datasize> ALWAYS_INLINE void sdiv(RegisterID rd, RegisterID rn, RegisterID rm) { + static_assert(datasize == 32, "sdiv datasize must be 32 for armv7s"); ASSERT(!BadReg(rd)); ASSERT(!BadReg(rn)); ASSERT(!BadReg(rm)); @@ -1554,8 +1649,8 @@ public: ASSERT(rn != ARMRegisters::pc); ASSERT(imm.isUInt12()); - if (!((rt | rn) & 8) && imm.isUInt7()) - m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STRH_imm_T1, imm.getUInt7() >> 2, rn, rt); + if (!((rt | rn) & 8) && imm.isUInt6()) + m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STRH_imm_T1, imm.getUInt6() >> 1, rn, rt); else m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRH_imm_T2, rn, rt, imm.getUInt12()); } @@ -1753,7 +1848,7 @@ public: m_formatter.twoWordOp12Reg40Imm3Reg4Imm20Imm5(OP_UBFX_T1, rd, rn, (lsb & 0x1c) << 10, (lsb & 0x3) << 6, (width - 1) & 0x1f); } -#if CPU(APPLE_ARMV7S) +#if HAVE(ARM_IDIV_INSTRUCTIONS) ALWAYS_INLINE void udiv(RegisterID rd, RegisterID rn, RegisterID rm) { ASSERT(!BadReg(rd)); @@ -1902,6 +1997,11 @@ public: { m_formatter.twoWordOp16Op16(OP_NOP_T2a, OP_NOP_T2b); } + + void dmbSY() + { + m_formatter.twoWordOp16Op16(OP_DMB_SY_T2a, OP_DMB_SY_T2b); + } AssemblerLabel labelIgnoringWatchpoints() { @@ -1950,25 +2050,16 @@ public: return b.m_offset - a.m_offset; } - int executableOffsetFor(int location) - { - if (!location) - return 0; - return static_cast<int32_t*>(m_formatter.data())[location / sizeof(int32_t) - 1]; - } - - int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return JUMP_ENUM_SIZE(jumpType) - JUMP_ENUM_SIZE(jumpLinkType); } + static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return JUMP_ENUM_SIZE(jumpType) - JUMP_ENUM_SIZE(jumpLinkType); } // Assembler admin methods: -#if !OS(QNX) - ALWAYS_INLINE -#endif - static bool linkRecordSourceComparator(const LinkRecord& a, const LinkRecord& b) + + static ALWAYS_INLINE bool linkRecordSourceComparator(const LinkRecord& a, const LinkRecord& b) { return a.from() < b.from(); } - bool canCompact(JumpType jumpType) + static bool canCompact(JumpType jumpType) { // The following cannot be compacted: // JumpFixed: represents custom jump sequence @@ -1977,7 +2068,7 @@ public: return (jumpType == JumpNoCondition) || (jumpType == JumpCondition); } - JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) + static JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { if (jumpType == JumpFixed) return LinkInvalid; @@ -2021,29 +2112,20 @@ public: return LinkConditionalBX; } - JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) + static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { JumpLinkType linkType = computeJumpType(record.type(), from, to); record.setLinkType(linkType); return linkType; } - void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset) - { - int32_t ptr = regionStart / sizeof(int32_t); - const int32_t end = regionEnd / sizeof(int32_t); - int32_t* offsets = static_cast<int32_t*>(m_formatter.data()); - while (ptr < end) - offsets[ptr++] = offset; - } - Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink() { std::sort(m_jumpsToLink.begin(), m_jumpsToLink.end(), linkRecordSourceComparator); return m_jumpsToLink; } - void ALWAYS_INLINE link(LinkRecord& record, uint8_t* from, uint8_t* to) + static void ALWAYS_INLINE link(LinkRecord& record, uint8_t* from, uint8_t* to) { switch (record.linkType()) { case LinkJumpT1: @@ -2109,7 +2191,6 @@ public: { ASSERT(!(reinterpret_cast<intptr_t>(code) & 1)); ASSERT(from.isSet()); - ASSERT(reinterpret_cast<intptr_t>(to) & 1); setPointer(reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.m_offset) - 1, to, false); } @@ -2132,7 +2213,6 @@ public: static void relinkCall(void* from, void* to) { ASSERT(!(reinterpret_cast<intptr_t>(from) & 1)); - ASSERT(reinterpret_cast<intptr_t>(to) & 1); setPointer(reinterpret_cast<uint16_t*>(from) - 1, to, true); } @@ -2186,7 +2266,7 @@ public: ASSERT(!(bitwise_cast<uintptr_t>(instructionStart) & 1)); ASSERT(!(bitwise_cast<uintptr_t>(to) & 1)); -#if OS(LINUX) || OS(QNX) +#if OS(LINUX) if (canBeJumpT4(reinterpret_cast<uint16_t*>(instructionStart), to)) { uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart) + 2; linkJumpT4(ptr, to); @@ -2205,7 +2285,7 @@ public: static ptrdiff_t maxJumpReplacementSize() { -#if OS(LINUX) || OS(QNX) +#if OS(LINUX) return 10; #else return 4; @@ -2293,15 +2373,6 @@ public: linuxPageFlush(current, current + page); linuxPageFlush(current, end); -#elif OS(WINCE) - CacheRangeFlush(code, size, CACHE_SYNC_ALL); -#elif OS(QNX) -#if !ENABLE(ASSEMBLER_WX_EXCLUSIVE) - msync(code, size, MS_INVALIDATE_ICACHE); -#else - UNUSED_PARAM(code); - UNUSED_PARAM(size); -#endif #else #error "The cacheFlush support is missing on this platform." #endif @@ -2503,7 +2574,7 @@ private: return ((relative << 7) >> 7) == relative; } - void linkJumpT1(Condition cond, uint16_t* instruction, void* target) + static void linkJumpT1(Condition cond, uint16_t* instruction, void* target) { // FIMXE: this should be up in the MacroAssembler layer. :-( ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1)); @@ -2539,7 +2610,7 @@ private: instruction[-1] = OP_B_T2 | ((relative & 0xffe) >> 1); } - void linkJumpT3(Condition cond, uint16_t* instruction, void* target) + static void linkJumpT3(Condition cond, uint16_t* instruction, void* target) { // FIMXE: this should be up in the MacroAssembler layer. :-( ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1)); @@ -2572,7 +2643,7 @@ private: instruction[-1] = OP_B_T4b | ((relative & 0x800000) >> 10) | ((relative & 0x400000) >> 11) | ((relative & 0xffe) >> 1); } - void linkConditionalJumpT4(Condition cond, uint16_t* instruction, void* target) + static void linkConditionalJumpT4(Condition cond, uint16_t* instruction, void* target) { // FIMXE: this should be up in the MacroAssembler layer. :-( ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1)); @@ -2598,7 +2669,7 @@ private: instruction[-1] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3); } - void linkConditionalBX(Condition cond, uint16_t* instruction, void* target) + static void linkConditionalBX(Condition cond, uint16_t* instruction, void* target) { // FIMXE: this should be up in the MacroAssembler layer. :-( ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1)); @@ -2678,6 +2749,11 @@ private: m_buffer.putShort(op | (reg1 << 6) | (reg2 << 3) | reg3); } + ALWAYS_INLINE void oneWordOp7Imm9(OpcodeID op, uint16_t imm) + { + m_buffer.putShort(op | imm); + } + ALWAYS_INLINE void oneWordOp8Imm8(OpcodeID op, uint8_t imm) { m_buffer.putShort(op | imm); @@ -2716,6 +2792,12 @@ private: m_buffer.putShort(op2); } + ALWAYS_INLINE void twoWordOp16Imm16(OpcodeID1 op1, uint16_t imm) + { + m_buffer.putShort(op1); + m_buffer.putShort(imm); + } + ALWAYS_INLINE void twoWordOp5i6Imm4Reg4EncodedImm(OpcodeID1 op, int imm4, RegisterID rd, ARMThumbImmediate imm) { ARMThumbImmediate newImm = imm; @@ -2776,7 +2858,6 @@ private: unsigned debugOffset() { return m_buffer.debugOffset(); } - private: AssemblerBuffer m_buffer; } m_formatter; diff --git a/Source/JavaScriptCore/assembler/AbortReason.h b/Source/JavaScriptCore/assembler/AbortReason.h new file mode 100644 index 000000000..0e82dad57 --- /dev/null +++ b/Source/JavaScriptCore/assembler/AbortReason.h @@ -0,0 +1,81 @@ +/* + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef AbortReason_h +#define AbortReason_h + +namespace JSC { + +// It's important to not change the values of existing abort reasons unless we really +// have to. For this reason there is a BASIC-style numbering that should allow us to +// sneak new reasons in without changing the numbering of existing reasons - at least +// for a while. +enum AbortReason { + AHCallFrameMisaligned = 10, + AHIndexingTypeIsValid = 20, + AHInsaneArgumentCount = 30, + AHIsNotCell = 40, + AHIsNotInt32 = 50, + AHIsNotJSDouble = 60, + AHIsNotJSInt32 = 70, + AHIsNotJSNumber = 80, + AHIsNotNull = 90, + AHStackPointerMisaligned = 100, + AHStructureIDIsValid = 110, + AHTagMaskNotInPlace = 120, + AHTagTypeNumberNotInPlace = 130, + AHTypeInfoInlineTypeFlagsAreValid = 140, + AHTypeInfoIsValid = 150, + B3Oops = 155, + DFGBailedAtTopOfBlock = 161, + DFGBailedAtEndOfNode = 162, + DFGBasicStorageAllocatorZeroSize = 170, + DFGIsNotCell = 180, + DFGIneffectiveWatchpoint = 190, + DFGNegativeStringLength = 200, + DFGSlowPathGeneratorFellThrough = 210, + DFGUnreachableBasicBlock = 220, + DFGUnreasonableOSREntryJumpDestination = 230, + DFGVarargsThrowingPathDidNotThrow = 235, + JITDidReturnFromTailCall = 237, + JITDivOperandsAreNotNumbers = 240, + JITGetByValResultIsNotEmpty = 250, + JITNotSupported = 260, + JITOffsetIsNotOutOfLine = 270, + JITUncoughtExceptionAfterCall = 275, + JITUnexpectedCallFrameSize = 277, + JITUnreasonableLoopHintJumpTarget = 280, + RPWUnreasonableJumpTarget = 290, + RepatchIneffectiveWatchpoint = 300, + RepatchInsaneArgumentCount = 310, + TGInvalidPointer = 320, + TGNotSupported = 330, + YARRNoInputConsumed = 340, +}; + +} // namespace JSC + +#endif // AbortReason_h + diff --git a/Source/JavaScriptCore/assembler/AbstractMacroAssembler.h b/Source/JavaScriptCore/assembler/AbstractMacroAssembler.h index 09a688804..1e2d295c9 100644 --- a/Source/JavaScriptCore/assembler/AbstractMacroAssembler.h +++ b/Source/JavaScriptCore/assembler/AbstractMacroAssembler.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008, 2012 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2012, 2014-2016 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,37 +26,32 @@ #ifndef AbstractMacroAssembler_h #define AbstractMacroAssembler_h +#include "AbortReason.h" #include "AssemblerBuffer.h" #include "CodeLocation.h" #include "MacroAssemblerCodeRef.h" +#include "Options.h" #include <wtf/CryptographicallyRandomNumber.h> #include <wtf/Noncopyable.h> +#include <wtf/SharedTask.h> +#include <wtf/WeakRandom.h> #if ENABLE(ASSEMBLER) - -#if PLATFORM(QT) -#define ENABLE_JIT_CONSTANT_BLINDING 0 -#endif - -#ifndef ENABLE_JIT_CONSTANT_BLINDING -#define ENABLE_JIT_CONSTANT_BLINDING 1 -#endif - namespace JSC { -inline bool isARMv7s() +inline bool isARMv7IDIVSupported() { -#if CPU(APPLE_ARMV7S) +#if HAVE(ARM_IDIV_INSTRUCTIONS) return true; #else return false; #endif } -inline bool isMIPS() +inline bool isARM64() { -#if CPU(MIPS) +#if CPU(ARM64) return true; #else return false; @@ -72,18 +67,48 @@ inline bool isX86() #endif } -class JumpReplacementWatchpoint; +inline bool isX86_64() +{ +#if CPU(X86_64) + return true; +#else + return false; +#endif +} + +inline bool optimizeForARMv7IDIVSupported() +{ + return isARMv7IDIVSupported() && Options::useArchitectureSpecificOptimizations(); +} + +inline bool optimizeForARM64() +{ + return isARM64() && Options::useArchitectureSpecificOptimizations(); +} + +inline bool optimizeForX86() +{ + return isX86() && Options::useArchitectureSpecificOptimizations(); +} + +inline bool optimizeForX86_64() +{ + return isX86_64() && Options::useArchitectureSpecificOptimizations(); +} + +class AllowMacroScratchRegisterUsage; +class DisallowMacroScratchRegisterUsage; class LinkBuffer; -class RepatchBuffer; class Watchpoint; namespace DFG { struct OSRExit; } -template <class AssemblerType> +template <class AssemblerType, class MacroAssemblerType> class AbstractMacroAssembler { public: friend class JITWriteBarrierBase; + typedef AbstractMacroAssembler<AssemblerType, MacroAssemblerType> AbstractMacroAssemblerType; typedef AssemblerType AssemblerType_T; typedef MacroAssemblerCodePtr CodePtr; @@ -92,6 +117,13 @@ public: class Jump; typedef typename AssemblerType::RegisterID RegisterID; + typedef typename AssemblerType::FPRegisterID FPRegisterID; + + static constexpr RegisterID firstRegister() { return AssemblerType::firstRegister(); } + static constexpr RegisterID lastRegister() { return AssemblerType::lastRegister(); } + + static constexpr FPRegisterID firstFPRegister() { return AssemblerType::firstFPRegister(); } + static constexpr FPRegisterID lastFPRegister() { return AssemblerType::lastFPRegister(); } // Section 1: MacroAssembler operand types // @@ -104,6 +136,13 @@ public: TimesFour, TimesEight, }; + + static Scale timesPtr() + { + if (sizeof(void*) == 4) + return TimesFour; + return TimesEight; + } // Address: // @@ -114,7 +153,12 @@ public: , offset(offset) { } - + + Address withOffset(int32_t additionalOffset) + { + return Address(base, offset + additionalOffset); + } + RegisterID base; int32_t offset; }; @@ -177,6 +221,11 @@ public: RegisterID index; Scale scale; int32_t offset; + + BaseIndex withOffset(int32_t additionalOffset) + { + return BaseIndex(base, index, scale, offset + additionalOffset); + } }; // AbsoluteAddress: @@ -226,12 +275,7 @@ public: const void* m_value; }; - struct ImmPtr : -#if ENABLE(JIT_CONSTANT_BLINDING) - private TrustedImmPtr -#else - public TrustedImmPtr -#endif + struct ImmPtr : private TrustedImmPtr { explicit ImmPtr(const void* value) : TrustedImmPtr(value) @@ -266,13 +310,7 @@ public: }; - struct Imm32 : -#if ENABLE(JIT_CONSTANT_BLINDING) - private TrustedImm32 -#else - public TrustedImm32 -#endif - { + struct Imm32 : private TrustedImm32 { explicit Imm32(int32_t value) : TrustedImm32(value) { @@ -301,7 +339,7 @@ public: { } -#if CPU(X86_64) +#if CPU(X86_64) || CPU(ARM64) explicit TrustedImm64(TrustedImmPtr ptr) : m_value(ptr.asIntptr()) { @@ -311,18 +349,13 @@ public: int64_t m_value; }; - struct Imm64 : -#if ENABLE(JIT_CONSTANT_BLINDING) - private TrustedImm64 -#else - public TrustedImm64 -#endif + struct Imm64 : private TrustedImm64 { explicit Imm64(int64_t value) : TrustedImm64(value) { } -#if CPU(X86_64) +#if CPU(X86_64) || CPU(ARM64) explicit Imm64(TrustedImmPtr ptr) : TrustedImm64(ptr) { @@ -344,11 +377,10 @@ public: // A Label records a point in the generated instruction stream, typically such that // it may be used as a destination for a jump. class Label { - template<class TemplateAssemblerType> + template<class TemplateAssemblerType, class TemplateMacroAssemblerType> friend class AbstractMacroAssembler; friend struct DFG::OSRExit; friend class Jump; - friend class JumpReplacementWatchpoint; friend class MacroAssemblerCodeRef; friend class LinkBuffer; friend class Watchpoint; @@ -358,11 +390,14 @@ public: { } - Label(AbstractMacroAssembler<AssemblerType>* masm) + Label(AbstractMacroAssemblerType* masm) : m_label(masm->m_assembler.label()) { + masm->invalidateAllTempRegisters(); } - + + bool operator==(const Label& other) const { return m_label == other.m_label; } + bool isSet() const { return m_label.isSet(); } private: AssemblerLabel m_label; @@ -379,7 +414,7 @@ public: // // addPtr(TrustedImmPtr(i), a, b) class ConvertibleLoadLabel { - template<class TemplateAssemblerType> + template<class TemplateAssemblerType, class TemplateMacroAssemblerType> friend class AbstractMacroAssembler; friend class LinkBuffer; @@ -388,7 +423,7 @@ public: { } - ConvertibleLoadLabel(AbstractMacroAssembler<AssemblerType>* masm) + ConvertibleLoadLabel(AbstractMacroAssemblerType* masm) : m_label(masm->m_assembler.labelIgnoringWatchpoints()) { } @@ -403,7 +438,7 @@ public: // A DataLabelPtr is used to refer to a location in the code containing a pointer to be // patched after the code has been generated. class DataLabelPtr { - template<class TemplateAssemblerType> + template<class TemplateAssemblerType, class TemplateMacroAssemblerType> friend class AbstractMacroAssembler; friend class LinkBuffer; public: @@ -411,11 +446,11 @@ public: { } - DataLabelPtr(AbstractMacroAssembler<AssemblerType>* masm) + DataLabelPtr(AbstractMacroAssemblerType* masm) : m_label(masm->m_assembler.label()) { } - + bool isSet() const { return m_label.isSet(); } private: @@ -424,10 +459,10 @@ public: // DataLabel32: // - // A DataLabelPtr is used to refer to a location in the code containing a pointer to be + // A DataLabel32 is used to refer to a location in the code containing a 32-bit constant to be // patched after the code has been generated. class DataLabel32 { - template<class TemplateAssemblerType> + template<class TemplateAssemblerType, class TemplateMacroAssemblerType> friend class AbstractMacroAssembler; friend class LinkBuffer; public: @@ -435,7 +470,7 @@ public: { } - DataLabel32(AbstractMacroAssembler<AssemblerType>* masm) + DataLabel32(AbstractMacroAssemblerType* masm) : m_label(masm->m_assembler.label()) { } @@ -451,7 +486,7 @@ public: // A DataLabelCompact is used to refer to a location in the code containing a // compact immediate to be patched after the code has been generated. class DataLabelCompact { - template<class TemplateAssemblerType> + template<class TemplateAssemblerType, class TemplateMacroAssemblerType> friend class AbstractMacroAssembler; friend class LinkBuffer; public: @@ -459,16 +494,18 @@ public: { } - DataLabelCompact(AbstractMacroAssembler<AssemblerType>* masm) + DataLabelCompact(AbstractMacroAssemblerType* masm) : m_label(masm->m_assembler.label()) { } - + DataLabelCompact(AssemblerLabel label) : m_label(label) { } + AssemblerLabel label() const { return m_label; } + private: AssemblerLabel m_label; }; @@ -480,7 +517,7 @@ public: // relative offset such that when executed it will call to the desired // destination. class Call { - template<class TemplateAssemblerType> + template<class TemplateAssemblerType, class TemplateMacroAssemblerType> friend class AbstractMacroAssembler; public: @@ -488,7 +525,9 @@ public: None = 0x0, Linkable = 0x1, Near = 0x2, + Tail = 0x4, LinkableNear = 0x3, + LinkableNearTail = 0x7, }; Call() @@ -524,7 +563,7 @@ public: // relative offset such that when executed it will jump to the desired // destination. class Jump { - template<class TemplateAssemblerType> + template<class TemplateAssemblerType, class TemplateMacroAssemblerType> friend class AbstractMacroAssembler; friend class Call; friend struct DFG::OSRExit; @@ -542,6 +581,33 @@ public: , m_condition(condition) { } +#elif CPU(ARM64) + Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type = ARM64Assembler::JumpNoCondition, ARM64Assembler::Condition condition = ARM64Assembler::ConditionInvalid) + : m_label(jmp) + , m_type(type) + , m_condition(condition) + { + } + + Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type, ARM64Assembler::Condition condition, bool is64Bit, ARM64Assembler::RegisterID compareRegister) + : m_label(jmp) + , m_type(type) + , m_condition(condition) + , m_is64Bit(is64Bit) + , m_compareRegister(compareRegister) + { + ASSERT((type == ARM64Assembler::JumpCompareAndBranch) || (type == ARM64Assembler::JumpCompareAndBranchFixedSize)); + } + + Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type, ARM64Assembler::Condition condition, unsigned bitNumber, ARM64Assembler::RegisterID compareRegister) + : m_label(jmp) + , m_type(type) + , m_condition(condition) + , m_bitNumber(bitNumber) + , m_compareRegister(compareRegister) + { + ASSERT((type == ARM64Assembler::JumpTestBit) || (type == ARM64Assembler::JumpTestBitFixedSize)); + } #elif CPU(SH4) Jump(AssemblerLabel jmp, SH4Assembler::JumpType type = SH4Assembler::JumpFar) : m_label(jmp) @@ -562,14 +628,23 @@ public: return result; } - void link(AbstractMacroAssembler<AssemblerType>* masm) const + void link(AbstractMacroAssemblerType* masm) const { + masm->invalidateAllTempRegisters(); + #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION) masm->checkRegisterAllocationAgainstBranchRange(m_label.m_offset, masm->debugOffset()); #endif #if CPU(ARM_THUMB2) masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition); +#elif CPU(ARM64) + if ((m_type == ARM64Assembler::JumpCompareAndBranch) || (m_type == ARM64Assembler::JumpCompareAndBranchFixedSize)) + masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition, m_is64Bit, m_compareRegister); + else if ((m_type == ARM64Assembler::JumpTestBit) || (m_type == ARM64Assembler::JumpTestBitFixedSize)) + masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition, m_bitNumber, m_compareRegister); + else + masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition); #elif CPU(SH4) masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type); #else @@ -577,7 +652,7 @@ public: #endif } - void linkTo(Label label, AbstractMacroAssembler<AssemblerType>* masm) const + void linkTo(Label label, AbstractMacroAssemblerType* masm) const { #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION) masm->checkRegisterAllocationAgainstBranchRange(label.m_label.m_offset, m_label.m_offset); @@ -585,6 +660,13 @@ public: #if CPU(ARM_THUMB2) masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition); +#elif CPU(ARM64) + if ((m_type == ARM64Assembler::JumpCompareAndBranch) || (m_type == ARM64Assembler::JumpCompareAndBranchFixedSize)) + masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition, m_is64Bit, m_compareRegister); + else if ((m_type == ARM64Assembler::JumpTestBit) || (m_type == ARM64Assembler::JumpTestBitFixedSize)) + masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition, m_bitNumber, m_compareRegister); + else + masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition); #else masm->m_assembler.linkJump(m_label, label.m_label); #endif @@ -597,6 +679,12 @@ public: #if CPU(ARM_THUMB2) ARMv7Assembler::JumpType m_type; ARMv7Assembler::Condition m_condition; +#elif CPU(ARM64) + ARM64Assembler::JumpType m_type; + ARM64Assembler::Condition m_condition; + bool m_is64Bit; + unsigned m_bitNumber; + ARM64Assembler::RegisterID m_compareRegister; #endif #if CPU(SH4) SH4Assembler::JumpType m_type; @@ -632,10 +720,11 @@ public: JumpList(Jump jump) { - append(jump); + if (jump.isSet()) + append(jump); } - void link(AbstractMacroAssembler<AssemblerType>* masm) + void link(AbstractMacroAssemblerType* masm) { size_t size = m_jumps.size(); for (size_t i = 0; i < size; ++i) @@ -643,7 +732,7 @@ public: m_jumps.clear(); } - void linkTo(Label label, AbstractMacroAssembler<AssemblerType>* masm) + void linkTo(Label label, AbstractMacroAssemblerType* masm) { size_t size = m_jumps.size(); for (size_t i = 0; i < size; ++i) @@ -725,7 +814,7 @@ public: { } - void check(unsigned low, unsigned high) + void checkOffsets(unsigned low, unsigned high) { RELEASE_ASSERT_WITH_MESSAGE(!(low <= m_offset && m_offset <= high), "Unsafe branch over register allocation at instruction offset %u in jump offset range %u..%u", m_offset, low, high); } @@ -751,7 +840,7 @@ public: size_t size = m_registerAllocationForOffsets.size(); for (size_t i = 0; i < size; ++i) - m_registerAllocationForOffsets[i].check(offset1, offset2); + m_registerAllocationForOffsets[i].checkOffsets(offset1, offset2); } #endif @@ -772,34 +861,109 @@ public: { AssemblerType::cacheFlush(code, size); } -protected: - AbstractMacroAssembler() - : m_randomSource(cryptographicallyRandomNumber()) - { - } - AssemblerType m_assembler; - - uint32_t random() - { - return m_randomSource.getUint32(); - } +#if ENABLE(MASM_PROBE) - WeakRandom m_randomSource; + struct CPUState { + #define DECLARE_REGISTER(_type, _regName) \ + _type _regName; + FOR_EACH_CPU_REGISTER(DECLARE_REGISTER) + #undef DECLARE_REGISTER -#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION) - Vector<RegisterAllocationOffset, 10> m_registerAllocationForOffsets; -#endif + static const char* gprName(RegisterID regID) + { + switch (regID) { + #define DECLARE_REGISTER(_type, _regName) \ + case RegisterID::_regName: \ + return #_regName; + FOR_EACH_CPU_GPREGISTER(DECLARE_REGISTER) + #undef DECLARE_REGISTER + default: + RELEASE_ASSERT_NOT_REACHED(); + } + } -#if ENABLE(JIT_CONSTANT_BLINDING) - static bool scratchRegisterForBlinding() { return false; } - static bool shouldBlindForSpecificArch(uint32_t) { return true; } - static bool shouldBlindForSpecificArch(uint64_t) { return true; } -#endif + static const char* fprName(FPRegisterID regID) + { + switch (regID) { + #define DECLARE_REGISTER(_type, _regName) \ + case FPRegisterID::_regName: \ + return #_regName; + FOR_EACH_CPU_FPREGISTER(DECLARE_REGISTER) + #undef DECLARE_REGISTER + default: + RELEASE_ASSERT_NOT_REACHED(); + } + } - friend class LinkBuffer; - friend class RepatchBuffer; + void*& gpr(RegisterID regID) + { + switch (regID) { + #define DECLARE_REGISTER(_type, _regName) \ + case RegisterID::_regName: \ + return _regName; + FOR_EACH_CPU_GPREGISTER(DECLARE_REGISTER) + #undef DECLARE_REGISTER + default: + RELEASE_ASSERT_NOT_REACHED(); + } + } + + double& fpr(FPRegisterID regID) + { + switch (regID) { + #define DECLARE_REGISTER(_type, _regName) \ + case FPRegisterID::_regName: \ + return _regName; + FOR_EACH_CPU_FPREGISTER(DECLARE_REGISTER) + #undef DECLARE_REGISTER + default: + RELEASE_ASSERT_NOT_REACHED(); + } + } + }; + + struct ProbeContext; + typedef void (*ProbeFunction)(struct ProbeContext*); + + struct ProbeContext { + ProbeFunction probeFunction; + void* arg1; + void* arg2; + CPUState cpu; + + // Convenience methods: + void*& gpr(RegisterID regID) { return cpu.gpr(regID); } + double& fpr(FPRegisterID regID) { return cpu.fpr(regID); } + const char* gprName(RegisterID regID) { return cpu.gprName(regID); } + const char* fprName(FPRegisterID regID) { return cpu.fprName(regID); } + }; + + // This function emits code to preserve the CPUState (e.g. registers), + // call a user supplied probe function, and restore the CPUState before + // continuing with other JIT generated code. + // + // The user supplied probe function will be called with a single pointer to + // a ProbeContext struct (defined above) which contains, among other things, + // the preserved CPUState. This allows the user probe function to inspect + // the CPUState at that point in the JIT generated code. + // + // If the user probe function alters the register values in the ProbeContext, + // the altered values will be loaded into the CPU registers when the probe + // returns. + // + // The ProbeContext is stack allocated and is only valid for the duration + // of the call to the user probe function. + // + // Note: probe() should be implemented by the target specific MacroAssembler. + // This prototype is only provided here to document the interface. + + void probe(ProbeFunction, void* arg1, void* arg2); +#endif // ENABLE(MASM_PROBE) + + AssemblerType m_assembler; + static void linkJump(void* code, Jump jump, CodeLocationLabel target) { AssemblerType::linkJump(code, jump.m_label, target.dataLocation()); @@ -827,7 +991,15 @@ protected: static void repatchNearCall(CodeLocationNearCall nearCall, CodeLocationLabel destination) { - AssemblerType::relinkCall(nearCall.dataLocation(), destination.executableAddress()); + switch (nearCall.callMode()) { + case NearCallMode::Tail: + AssemblerType::relinkJump(nearCall.dataLocation(), destination.dataLocation()); + return; + case NearCallMode::Regular: + AssemblerType::relinkCall(nearCall.dataLocation(), destination.executableAddress()); + return; + } + RELEASE_ASSERT_NOT_REACHED(); } static void repatchCompact(CodeLocationDataLabelCompact dataLabelCompact, int32_t value) @@ -859,7 +1031,115 @@ protected: { AssemblerType::replaceWithAddressComputation(label.dataLocation()); } -}; + + template<typename Functor> + void addLinkTask(const Functor& functor) + { + m_linkTasks.append(createSharedTask<void(LinkBuffer&)>(functor)); + } + +protected: + AbstractMacroAssembler() + : m_randomSource(cryptographicallyRandomNumber()) + { + invalidateAllTempRegisters(); + } + + uint32_t random() + { + return m_randomSource.getUint32(); + } + + WeakRandom m_randomSource; + +#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION) + Vector<RegisterAllocationOffset, 10> m_registerAllocationForOffsets; +#endif + + static bool haveScratchRegisterForBlinding() + { + return false; + } + static RegisterID scratchRegisterForBlinding() + { + UNREACHABLE_FOR_PLATFORM(); + return firstRegister(); + } + static bool canBlind() { return false; } + static bool shouldBlindForSpecificArch(uint32_t) { return false; } + static bool shouldBlindForSpecificArch(uint64_t) { return false; } + + class CachedTempRegister { + friend class DataLabelPtr; + friend class DataLabel32; + friend class DataLabelCompact; + friend class Jump; + friend class Label; + + public: + CachedTempRegister(AbstractMacroAssemblerType* masm, RegisterID registerID) + : m_masm(masm) + , m_registerID(registerID) + , m_value(0) + , m_validBit(1 << static_cast<unsigned>(registerID)) + { + ASSERT(static_cast<unsigned>(registerID) < (sizeof(unsigned) * 8)); + } + + ALWAYS_INLINE RegisterID registerIDInvalidate() { invalidate(); return m_registerID; } + + ALWAYS_INLINE RegisterID registerIDNoInvalidate() { return m_registerID; } + + bool value(intptr_t& value) + { + value = m_value; + return m_masm->isTempRegisterValid(m_validBit); + } + + void setValue(intptr_t value) + { + m_value = value; + m_masm->setTempRegisterValid(m_validBit); + } + + ALWAYS_INLINE void invalidate() { m_masm->clearTempRegisterValid(m_validBit); } + + private: + AbstractMacroAssemblerType* m_masm; + RegisterID m_registerID; + intptr_t m_value; + unsigned m_validBit; + }; + + ALWAYS_INLINE void invalidateAllTempRegisters() + { + m_tempRegistersValidBits = 0; + } + + ALWAYS_INLINE bool isTempRegisterValid(unsigned registerMask) + { + return (m_tempRegistersValidBits & registerMask); + } + + ALWAYS_INLINE void clearTempRegisterValid(unsigned registerMask) + { + m_tempRegistersValidBits &= ~registerMask; + } + + ALWAYS_INLINE void setTempRegisterValid(unsigned registerMask) + { + m_tempRegistersValidBits |= registerMask; + } + + friend class AllowMacroScratchRegisterUsage; + friend class DisallowMacroScratchRegisterUsage; + unsigned m_tempRegistersValidBits; + bool m_allowScratchRegister { true }; + + Vector<RefPtr<SharedTask<void(LinkBuffer&)>>> m_linkTasks; + + friend class LinkBuffer; +}; // class AbstractMacroAssembler } // namespace JSC diff --git a/Source/JavaScriptCore/assembler/AllowMacroScratchRegisterUsage.h b/Source/JavaScriptCore/assembler/AllowMacroScratchRegisterUsage.h new file mode 100644 index 000000000..733d05746 --- /dev/null +++ b/Source/JavaScriptCore/assembler/AllowMacroScratchRegisterUsage.h @@ -0,0 +1,59 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef AllowMacroScratchRegisterUsage_h +#define AllowMacroScratchRegisterUsage_h + +#if ENABLE(ASSEMBLER) + +#include "MacroAssembler.h" + +namespace JSC { + +class AllowMacroScratchRegisterUsage { +public: + AllowMacroScratchRegisterUsage(MacroAssembler& masm) + : m_masm(masm) + , m_oldValueOfAllowScratchRegister(masm.m_allowScratchRegister) + { + masm.m_allowScratchRegister = true; + } + + ~AllowMacroScratchRegisterUsage() + { + m_masm.m_allowScratchRegister = m_oldValueOfAllowScratchRegister; + } + +private: + MacroAssembler& m_masm; + bool m_oldValueOfAllowScratchRegister; +}; + +} // namespace JSC + +#endif // ENABLE(ASSEMBLER) + +#endif // AllowMacroScratchRegisterUsage_h + diff --git a/Source/JavaScriptCore/assembler/AssemblerBuffer.h b/Source/JavaScriptCore/assembler/AssemblerBuffer.h index d82c0b946..d9546931d 100644 --- a/Source/JavaScriptCore/assembler/AssemblerBuffer.h +++ b/Source/JavaScriptCore/assembler/AssemblerBuffer.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008, 2012 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2012, 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -30,7 +30,6 @@ #include "ExecutableAllocator.h" #include "JITCompilationEffort.h" -#include "VM.h" #include "stdint.h" #include <string.h> #include <wtf/Assertions.h> @@ -57,53 +56,85 @@ namespace JSC { return AssemblerLabel(m_offset + offset); } + bool operator==(const AssemblerLabel& other) const { return m_offset == other.m_offset; } + uint32_t m_offset; }; - class AssemblerBuffer { - static const int inlineCapacity = 128; + class AssemblerData { public: - AssemblerBuffer() - : m_storage(inlineCapacity) - , m_buffer(m_storage.begin()) - , m_capacity(inlineCapacity) - , m_index(0) + AssemblerData() + : m_buffer(nullptr) + , m_capacity(0) { } - ~AssemblerBuffer() + AssemblerData(unsigned initialCapacity) { + m_capacity = initialCapacity; + m_buffer = static_cast<char*>(fastMalloc(m_capacity)); } - bool isAvailable(int space) + AssemblerData(AssemblerData&& other) { - return m_index + space <= m_capacity; + m_buffer = other.m_buffer; + other.m_buffer = nullptr; + m_capacity = other.m_capacity; + other.m_capacity = 0; } - void ensureSpace(int space) + AssemblerData& operator=(AssemblerData&& other) { - if (!isAvailable(space)) - grow(); + m_buffer = other.m_buffer; + other.m_buffer = nullptr; + m_capacity = other.m_capacity; + other.m_capacity = 0; + return *this; } - bool isAligned(int alignment) const + ~AssemblerData() { - return !(m_index & (alignment - 1)); + fastFree(m_buffer); } - template<typename IntegralType> - void putIntegral(IntegralType value) + char* buffer() const { return m_buffer; } + + unsigned capacity() const { return m_capacity; } + + void grow(unsigned extraCapacity = 0) { - ensureSpace(sizeof(IntegralType)); - putIntegralUnchecked(value); + m_capacity = m_capacity + m_capacity / 2 + extraCapacity; + m_buffer = static_cast<char*>(fastRealloc(m_buffer, m_capacity)); } - template<typename IntegralType> - void putIntegralUnchecked(IntegralType value) + private: + char* m_buffer; + unsigned m_capacity; + }; + + class AssemblerBuffer { + static const int initialCapacity = 128; + public: + AssemblerBuffer() + : m_storage(initialCapacity) + , m_index(0) { - ASSERT(isAvailable(sizeof(IntegralType))); - *reinterpret_cast_ptr<IntegralType*>(m_buffer + m_index) = value; - m_index += sizeof(IntegralType); + } + + bool isAvailable(int space) + { + return m_index + space <= m_storage.capacity(); + } + + void ensureSpace(int space) + { + if (!isAvailable(space)) + grow(); + } + + bool isAligned(int alignment) const + { + return !(m_index & (alignment - 1)); } void putByteUnchecked(int8_t value) { putIntegralUnchecked(value); } @@ -117,7 +148,7 @@ namespace JSC { void* data() const { - return m_buffer; + return m_storage.buffer(); } size_t codeSize() const @@ -130,48 +161,47 @@ namespace JSC { return AssemblerLabel(m_index); } - PassRefPtr<ExecutableMemoryHandle> executableCopy(VM& vm, void* ownerUID, JITCompilationEffort effort) - { - if (!m_index) - return 0; - - RefPtr<ExecutableMemoryHandle> result = vm.executableAllocator.allocate(vm, m_index, ownerUID, effort); - - if (!result) - return 0; + unsigned debugOffset() { return m_index; } - ExecutableAllocator::makeWritable(result->start(), result->sizeInBytes()); + AssemblerData releaseAssemblerData() { return WTFMove(m_storage); } - memcpy(result->start(), m_buffer, m_index); - - return result.release(); + protected: + template<typename IntegralType> + void putIntegral(IntegralType value) + { + unsigned nextIndex = m_index + sizeof(IntegralType); + if (UNLIKELY(nextIndex > m_storage.capacity())) + grow(); + ASSERT(isAvailable(sizeof(IntegralType))); + *reinterpret_cast_ptr<IntegralType*>(m_storage.buffer() + m_index) = value; + m_index = nextIndex; } - unsigned debugOffset() { return m_index; } + template<typename IntegralType> + void putIntegralUnchecked(IntegralType value) + { + ASSERT(isAvailable(sizeof(IntegralType))); + *reinterpret_cast_ptr<IntegralType*>(m_storage.buffer() + m_index) = value; + m_index += sizeof(IntegralType); + } - protected: void append(const char* data, int size) { if (!isAvailable(size)) grow(size); - memcpy(m_buffer + m_index, data, size); + memcpy(m_storage.buffer() + m_index, data, size); m_index += size; } void grow(int extraCapacity = 0) { - m_capacity += m_capacity / 2 + extraCapacity; - - m_storage.grow(m_capacity); - m_buffer = m_storage.begin(); + m_storage.grow(extraCapacity); } private: - Vector<char, inlineCapacity, UnsafeVectorOverflow> m_storage; - char* m_buffer; - int m_capacity; - int m_index; + AssemblerData m_storage; + unsigned m_index; }; } // namespace JSC diff --git a/Source/JavaScriptCore/assembler/AssemblerBufferWithConstantPool.h b/Source/JavaScriptCore/assembler/AssemblerBufferWithConstantPool.h index 852f86df7..053884b01 100644 --- a/Source/JavaScriptCore/assembler/AssemblerBufferWithConstantPool.h +++ b/Source/JavaScriptCore/assembler/AssemblerBufferWithConstantPool.h @@ -195,12 +195,6 @@ public: putIntegralUnchecked(value.low); } - PassRefPtr<ExecutableMemoryHandle> executableCopy(VM& vm, void* ownerUID, JITCompilationEffort effort) - { - flushConstantPool(false); - return AssemblerBuffer::executableCopy(vm, ownerUID, effort); - } - void putShortWithConstantInt(uint16_t insn, uint32_t constant, bool isReusable = false) { putIntegralWithConstantInt(insn, constant, isReusable); @@ -229,6 +223,41 @@ public: return m_numConsts; } + void flushConstantPool(bool useBarrier = true) + { + if (!m_numConsts) + return; + int alignPool = (codeSize() + (useBarrier ? barrierSize : 0)) & (sizeof(uint64_t) - 1); + + if (alignPool) + alignPool = sizeof(uint64_t) - alignPool; + + // Callback to protect the constant pool from execution + if (useBarrier) + putIntegral(AssemblerType::placeConstantPoolBarrier(m_numConsts * sizeof(uint32_t) + alignPool)); + + if (alignPool) { + if (alignPool & 1) + AssemblerBuffer::putByte(AssemblerType::padForAlign8); + if (alignPool & 2) + AssemblerBuffer::putShort(AssemblerType::padForAlign16); + if (alignPool & 4) + AssemblerBuffer::putInt(AssemblerType::padForAlign32); + } + + int constPoolOffset = codeSize(); + append(reinterpret_cast<char*>(m_pool), m_numConsts * sizeof(uint32_t)); + + // Patch each PC relative load + for (LoadOffsets::Iterator iter = m_loadOffsets.begin(); iter != m_loadOffsets.end(); ++iter) { + void* loadAddr = reinterpret_cast<char*>(data()) + *iter; + AssemblerType::patchConstantPoolLoad(loadAddr, reinterpret_cast<char*>(data()) + constPoolOffset); + } + + m_loadOffsets.clear(); + m_numConsts = 0; + } + private: void correctDeltas(int insnSize) { @@ -273,41 +302,6 @@ private: correctDeltas(sizeof(IntegralType), 4); } - void flushConstantPool(bool useBarrier = true) - { - if (m_numConsts == 0) - return; - int alignPool = (codeSize() + (useBarrier ? barrierSize : 0)) & (sizeof(uint64_t) - 1); - - if (alignPool) - alignPool = sizeof(uint64_t) - alignPool; - - // Callback to protect the constant pool from execution - if (useBarrier) - putIntegral(AssemblerType::placeConstantPoolBarrier(m_numConsts * sizeof(uint32_t) + alignPool)); - - if (alignPool) { - if (alignPool & 1) - AssemblerBuffer::putByte(AssemblerType::padForAlign8); - if (alignPool & 2) - AssemblerBuffer::putShort(AssemblerType::padForAlign16); - if (alignPool & 4) - AssemblerBuffer::putInt(AssemblerType::padForAlign32); - } - - int constPoolOffset = codeSize(); - append(reinterpret_cast<char*>(m_pool), m_numConsts * sizeof(uint32_t)); - - // Patch each PC relative load - for (LoadOffsets::Iterator iter = m_loadOffsets.begin(); iter != m_loadOffsets.end(); ++iter) { - void* loadAddr = reinterpret_cast<char*>(data()) + *iter; - AssemblerType::patchConstantPoolLoad(loadAddr, reinterpret_cast<char*>(data()) + constPoolOffset); - } - - m_loadOffsets.clear(); - m_numConsts = 0; - } - void flushIfNoSpaceFor(int nextInsnSize) { if (m_numConsts == 0) diff --git a/Source/JavaScriptCore/assembler/AssemblerCommon.h b/Source/JavaScriptCore/assembler/AssemblerCommon.h new file mode 100644 index 000000000..21ca7a20d --- /dev/null +++ b/Source/JavaScriptCore/assembler/AssemblerCommon.h @@ -0,0 +1,294 @@ +/* + * Copyright (C) 2012, 2014, 2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef AssemblerCommon_h +#define AssemblerCommon_h + +namespace JSC { + +ALWAYS_INLINE bool isIOS() +{ +#if PLATFORM(IOS) + return true; +#else + return false; +#endif +} + +ALWAYS_INLINE bool isInt9(int32_t value) +{ + return value == ((value << 23) >> 23); +} + +template<typename Type> +ALWAYS_INLINE bool isUInt12(Type value) +{ + return !(value & ~static_cast<Type>(0xfff)); +} + +template<int datasize> +ALWAYS_INLINE bool isValidScaledUImm12(int32_t offset) +{ + int32_t maxPImm = 4095 * (datasize / 8); + if (offset < 0) + return false; + if (offset > maxPImm) + return false; + if (offset & ((datasize / 8) - 1)) + return false; + return true; +} + +ALWAYS_INLINE bool isValidSignedImm9(int32_t value) +{ + return isInt9(value); +} + +class ARM64LogicalImmediate { +public: + static ARM64LogicalImmediate create32(uint32_t value) + { + // Check for 0, -1 - these cannot be encoded. + if (!value || !~value) + return InvalidLogicalImmediate; + + // First look for a 32-bit pattern, then for repeating 16-bit + // patterns, 8-bit, 4-bit, and finally 2-bit. + + unsigned hsb, lsb; + bool inverted; + if (findBitRange<32>(value, hsb, lsb, inverted)) + return encodeLogicalImmediate<32>(hsb, lsb, inverted); + + if ((value & 0xffff) != (value >> 16)) + return InvalidLogicalImmediate; + value &= 0xffff; + + if (findBitRange<16>(value, hsb, lsb, inverted)) + return encodeLogicalImmediate<16>(hsb, lsb, inverted); + + if ((value & 0xff) != (value >> 8)) + return InvalidLogicalImmediate; + value &= 0xff; + + if (findBitRange<8>(value, hsb, lsb, inverted)) + return encodeLogicalImmediate<8>(hsb, lsb, inverted); + + if ((value & 0xf) != (value >> 4)) + return InvalidLogicalImmediate; + value &= 0xf; + + if (findBitRange<4>(value, hsb, lsb, inverted)) + return encodeLogicalImmediate<4>(hsb, lsb, inverted); + + if ((value & 0x3) != (value >> 2)) + return InvalidLogicalImmediate; + value &= 0x3; + + if (findBitRange<2>(value, hsb, lsb, inverted)) + return encodeLogicalImmediate<2>(hsb, lsb, inverted); + + return InvalidLogicalImmediate; + } + + static ARM64LogicalImmediate create64(uint64_t value) + { + // Check for 0, -1 - these cannot be encoded. + if (!value || !~value) + return InvalidLogicalImmediate; + + // Look for a contiguous bit range. + unsigned hsb, lsb; + bool inverted; + if (findBitRange<64>(value, hsb, lsb, inverted)) + return encodeLogicalImmediate<64>(hsb, lsb, inverted); + + // If the high & low 32 bits are equal, we can try for a 32-bit (or narrower) pattern. + if (static_cast<uint32_t>(value) == static_cast<uint32_t>(value >> 32)) + return create32(static_cast<uint32_t>(value)); + return InvalidLogicalImmediate; + } + + int value() const + { + ASSERT(isValid()); + return m_value; + } + + bool isValid() const + { + return m_value != InvalidLogicalImmediate; + } + + bool is64bit() const + { + return m_value & (1 << 12); + } + +private: + ARM64LogicalImmediate(int value) + : m_value(value) + { + } + + // Generate a mask with bits in the range hsb..0 set, for example: + // hsb:63 = 0xffffffffffffffff + // hsb:42 = 0x000007ffffffffff + // hsb: 0 = 0x0000000000000001 + static uint64_t mask(unsigned hsb) + { + ASSERT(hsb < 64); + return 0xffffffffffffffffull >> (63 - hsb); + } + + template<unsigned N> + static void partialHSB(uint64_t& value, unsigned&result) + { + if (value & (0xffffffffffffffffull << N)) { + result += N; + value >>= N; + } + } + + // Find the bit number of the highest bit set in a non-zero value, for example: + // 0x8080808080808080 = hsb:63 + // 0x0000000000000001 = hsb: 0 + // 0x000007ffffe00000 = hsb:42 + static unsigned highestSetBit(uint64_t value) + { + ASSERT(value); + unsigned hsb = 0; + partialHSB<32>(value, hsb); + partialHSB<16>(value, hsb); + partialHSB<8>(value, hsb); + partialHSB<4>(value, hsb); + partialHSB<2>(value, hsb); + partialHSB<1>(value, hsb); + return hsb; + } + + // This function takes a value and a bit width, where value obeys the following constraints: + // * bits outside of the width of the value must be zero. + // * bits within the width of value must neither be all clear or all set. + // The input is inspected to detect values that consist of either two or three contiguous + // ranges of bits. The output range hsb..lsb will describe the second range of the value. + // if the range is set, inverted will be false, and if the range is clear, inverted will + // be true. For example (with width 8): + // 00001111 = hsb:3, lsb:0, inverted:false + // 11110000 = hsb:3, lsb:0, inverted:true + // 00111100 = hsb:5, lsb:2, inverted:false + // 11000011 = hsb:5, lsb:2, inverted:true + template<unsigned width> + static bool findBitRange(uint64_t value, unsigned& hsb, unsigned& lsb, bool& inverted) + { + ASSERT(value & mask(width - 1)); + ASSERT(value != mask(width - 1)); + ASSERT(!(value & ~mask(width - 1))); + + // Detect cases where the top bit is set; if so, flip all the bits & set invert. + // This halves the number of patterns we need to look for. + const uint64_t msb = 1ull << (width - 1); + if ((inverted = (value & msb))) + value ^= mask(width - 1); + + // Find the highest set bit in value, generate a corresponding mask & flip all + // bits under it. + hsb = highestSetBit(value); + value ^= mask(hsb); + if (!value) { + // If this cleared the value, then the range hsb..0 was all set. + lsb = 0; + return true; + } + + // Try making one more mask, and flipping the bits! + lsb = highestSetBit(value); + value ^= mask(lsb); + if (!value) { + // Success - but lsb actually points to the hsb of a third range - add one + // to get to the lsb of the mid range. + ++lsb; + return true; + } + + return false; + } + + // Encodes the set of immN:immr:imms fields found in a logical immediate. + template<unsigned width> + static int encodeLogicalImmediate(unsigned hsb, unsigned lsb, bool inverted) + { + // Check width is a power of 2! + ASSERT(!(width & (width -1))); + ASSERT(width <= 64 && width >= 2); + ASSERT(hsb >= lsb); + ASSERT(hsb < width); + + int immN = 0; + int imms = 0; + int immr = 0; + + // For 64-bit values this is easy - just set immN to true, and imms just + // contains the bit number of the highest set bit of the set range. For + // values with narrower widths, these are encoded by a leading set of + // one bits, followed by a zero bit, followed by the remaining set of bits + // being the high bit of the range. For a 32-bit immediate there are no + // leading one bits, just a zero followed by a five bit number. For a + // 16-bit immediate there is one one bit, a zero bit, and then a four bit + // bit-position, etc. + if (width == 64) + immN = 1; + else + imms = 63 & ~(width + width - 1); + + if (inverted) { + // if width is 64 & hsb is 62, then we have a value something like: + // 0x80000000ffffffff (in this case with lsb 32). + // The ror should be by 1, imms (effectively set width minus 1) is + // 32. Set width is full width minus cleared width. + immr = (width - 1) - hsb; + imms |= (width - ((hsb - lsb) + 1)) - 1; + } else { + // if width is 64 & hsb is 62, then we have a value something like: + // 0x7fffffff00000000 (in this case with lsb 32). + // The value is effectively rol'ed by lsb, which is equivalent to + // a ror by width - lsb (or 0, in the case where lsb is 0). imms + // is hsb - lsb. + immr = (width - lsb) & (width - 1); + imms |= hsb - lsb; + } + + return immN << 12 | immr << 6 | imms; + } + + static const int InvalidLogicalImmediate = -1; + + int m_value; +}; + + +} // namespace JSC. + +#endif // AssemblerCommon_h diff --git a/Source/JavaScriptCore/assembler/CodeLocation.h b/Source/JavaScriptCore/assembler/CodeLocation.h index 86d1f2b75..3116e0602 100644 --- a/Source/JavaScriptCore/assembler/CodeLocation.h +++ b/Source/JavaScriptCore/assembler/CodeLocation.h @@ -32,6 +32,8 @@ namespace JSC { +enum NearCallMode { Regular, Tail }; + class CodeLocationInstruction; class CodeLocationLabel; class CodeLocationJump; @@ -59,7 +61,7 @@ public: CodeLocationLabel labelAtOffset(int offset); CodeLocationJump jumpAtOffset(int offset); CodeLocationCall callAtOffset(int offset); - CodeLocationNearCall nearCallAtOffset(int offset); + CodeLocationNearCall nearCallAtOffset(int offset, NearCallMode); CodeLocationDataLabelPtr dataLabelPtrAtOffset(int offset); CodeLocationDataLabel32 dataLabel32AtOffset(int offset); CodeLocationDataLabelCompact dataLabelCompactAtOffset(int offset); @@ -115,10 +117,13 @@ public: class CodeLocationNearCall : public CodeLocationCommon { public: CodeLocationNearCall() {} - explicit CodeLocationNearCall(MacroAssemblerCodePtr location) - : CodeLocationCommon(location) {} - explicit CodeLocationNearCall(void* location) - : CodeLocationCommon(MacroAssemblerCodePtr(location)) {} + explicit CodeLocationNearCall(MacroAssemblerCodePtr location, NearCallMode callMode) + : CodeLocationCommon(location), m_callMode(callMode) { } + explicit CodeLocationNearCall(void* location, NearCallMode callMode) + : CodeLocationCommon(MacroAssemblerCodePtr(location)), m_callMode(callMode) { } + NearCallMode callMode() { return m_callMode; } +private: + NearCallMode m_callMode = NearCallMode::Regular; }; class CodeLocationDataLabel32 : public CodeLocationCommon { @@ -181,10 +186,10 @@ inline CodeLocationCall CodeLocationCommon::callAtOffset(int offset) return CodeLocationCall(reinterpret_cast<char*>(dataLocation()) + offset); } -inline CodeLocationNearCall CodeLocationCommon::nearCallAtOffset(int offset) +inline CodeLocationNearCall CodeLocationCommon::nearCallAtOffset(int offset, NearCallMode callMode) { ASSERT_VALID_CODE_OFFSET(offset); - return CodeLocationNearCall(reinterpret_cast<char*>(dataLocation()) + offset); + return CodeLocationNearCall(reinterpret_cast<char*>(dataLocation()) + offset, callMode); } inline CodeLocationDataLabelPtr CodeLocationCommon::dataLabelPtrAtOffset(int offset) diff --git a/Source/JavaScriptCore/assembler/DisallowMacroScratchRegisterUsage.h b/Source/JavaScriptCore/assembler/DisallowMacroScratchRegisterUsage.h new file mode 100644 index 000000000..e94ad0830 --- /dev/null +++ b/Source/JavaScriptCore/assembler/DisallowMacroScratchRegisterUsage.h @@ -0,0 +1,59 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DisallowMacroScratchRegisterUsage_h +#define DisallowMacroScratchRegisterUsage_h + +#if ENABLE(ASSEMBLER) + +#include "MacroAssembler.h" + +namespace JSC { + +class DisallowMacroScratchRegisterUsage { +public: + DisallowMacroScratchRegisterUsage(MacroAssembler& masm) + : m_masm(masm) + , m_oldValueOfAllowScratchRegister(masm.m_allowScratchRegister) + { + masm.m_allowScratchRegister = false; + } + + ~DisallowMacroScratchRegisterUsage() + { + m_masm.m_allowScratchRegister = m_oldValueOfAllowScratchRegister; + } + +private: + MacroAssembler& m_masm; + bool m_oldValueOfAllowScratchRegister; +}; + +} // namespace JSC + +#endif // ENABLE(ASSEMBLER) + +#endif // DisallowMacroScratchRegisterUsage_h + diff --git a/Source/JavaScriptCore/assembler/LinkBuffer.cpp b/Source/JavaScriptCore/assembler/LinkBuffer.cpp index cd393be65..82fb2ed39 100644 --- a/Source/JavaScriptCore/assembler/LinkBuffer.cpp +++ b/Source/JavaScriptCore/assembler/LinkBuffer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012 Apple Inc. All rights reserved. + * Copyright (C) 2012-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -28,59 +28,86 @@ #if ENABLE(ASSEMBLER) +#include "CodeBlock.h" +#include "JITCode.h" +#include "JSCInlines.h" #include "Options.h" +#include "VM.h" +#include <wtf/CompilationThread.h> namespace JSC { +bool shouldDumpDisassemblyFor(CodeBlock* codeBlock) +{ + if (JITCode::isOptimizingJIT(codeBlock->jitType()) && Options::dumpDFGDisassembly()) + return true; + return Options::dumpDisassembly(); +} + LinkBuffer::CodeRef LinkBuffer::finalizeCodeWithoutDisassembly() { performFinalization(); - return CodeRef(m_executableMemory); + ASSERT(m_didAllocate); + if (m_executableMemory) + return CodeRef(m_executableMemory); + + return CodeRef::createSelfManagedCodeRef(MacroAssemblerCodePtr(m_code)); } LinkBuffer::CodeRef LinkBuffer::finalizeCodeWithDisassembly(const char* format, ...) { - ASSERT(Options::showDisassembly() || Options::showDFGDisassembly()); - CodeRef result = finalizeCodeWithoutDisassembly(); + + if (m_alreadyDisassembled) + return result; - dataLogF("Generated JIT code for "); + StringPrintStream out; + out.printf("Generated JIT code for "); va_list argList; va_start(argList, format); - WTF::dataLogFV(format, argList); + out.vprintf(format, argList); va_end(argList); - dataLogF(":\n"); + out.printf(":\n"); + + out.printf(" Code at [%p, %p):\n", result.code().executableAddress(), static_cast<char*>(result.code().executableAddress()) + result.size()); - dataLogF(" Code at [%p, %p):\n", result.code().executableAddress(), static_cast<char*>(result.code().executableAddress()) + result.size()); + CString header = out.toCString(); + + if (Options::asyncDisassembly()) { + disassembleAsynchronously(header, result, m_size, " "); + return result; + } + + dataLog(header); disassemble(result.code(), m_size, " ", WTF::dataFile()); return result; } -void LinkBuffer::linkCode(void* ownerUID, JITCompilationEffort effort) +#if ENABLE(BRANCH_COMPACTION) +static ALWAYS_INLINE void recordLinkOffsets(AssemblerData& assemblerData, int32_t regionStart, int32_t regionEnd, int32_t offset) { - ASSERT(!m_code); -#if !ENABLE(BRANCH_COMPACTION) - m_executableMemory = m_assembler->m_assembler.executableCopy(*m_vm, ownerUID, effort); - if (!m_executableMemory) - return; - m_code = m_executableMemory->start(); - m_size = m_assembler->m_assembler.codeSize(); - ASSERT(m_code); -#else - m_initialSize = m_assembler->m_assembler.codeSize(); - m_executableMemory = m_vm->executableAllocator.allocate(*m_vm, m_initialSize, ownerUID, effort); - if (!m_executableMemory) + int32_t ptr = regionStart / sizeof(int32_t); + const int32_t end = regionEnd / sizeof(int32_t); + int32_t* offsets = reinterpret_cast<int32_t*>(assemblerData.buffer()); + while (ptr < end) + offsets[ptr++] = offset; +} + +template <typename InstructionType> +void LinkBuffer::copyCompactAndLinkCode(MacroAssembler& macroAssembler, void* ownerUID, JITCompilationEffort effort) +{ + m_initialSize = macroAssembler.m_assembler.codeSize(); + allocate(m_initialSize, ownerUID, effort); + if (didFailToAllocate()) return; - m_code = (uint8_t*)m_executableMemory->start(); - ASSERT(m_code); - ExecutableAllocator::makeWritable(m_code, m_initialSize); - uint8_t* inData = (uint8_t*)m_assembler->unlinkedCode(); + Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink = macroAssembler.jumpsToLink(); + m_assemblerStorage = macroAssembler.m_assembler.buffer().releaseAssemblerData(); + uint8_t* inData = reinterpret_cast<uint8_t*>(m_assemblerStorage.buffer()); uint8_t* outData = reinterpret_cast<uint8_t*>(m_code); int readPtr = 0; int writePtr = 0; - Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink = m_assembler->jumpsToLink(); unsigned jumpCount = jumpsToLink.size(); for (unsigned i = 0; i < jumpCount; ++i) { int offset = readPtr - writePtr; @@ -88,15 +115,15 @@ void LinkBuffer::linkCode(void* ownerUID, JITCompilationEffort effort) // Copy the instructions from the last jump to the current one. size_t regionSize = jumpsToLink[i].from() - readPtr; - uint16_t* copySource = reinterpret_cast_ptr<uint16_t*>(inData + readPtr); - uint16_t* copyEnd = reinterpret_cast_ptr<uint16_t*>(inData + readPtr + regionSize); - uint16_t* copyDst = reinterpret_cast_ptr<uint16_t*>(outData + writePtr); + InstructionType* copySource = reinterpret_cast_ptr<InstructionType*>(inData + readPtr); + InstructionType* copyEnd = reinterpret_cast_ptr<InstructionType*>(inData + readPtr + regionSize); + InstructionType* copyDst = reinterpret_cast_ptr<InstructionType*>(outData + writePtr); ASSERT(!(regionSize % 2)); ASSERT(!(readPtr % 2)); ASSERT(!(writePtr % 2)); while (copySource != copyEnd) *copyDst++ = *copySource++; - m_assembler->recordLinkOffsets(readPtr, jumpsToLink[i].from(), offset); + recordLinkOffsets(m_assemblerStorage, readPtr, jumpsToLink[i].from(), offset); readPtr += regionSize; writePtr += regionSize; @@ -106,33 +133,32 @@ void LinkBuffer::linkCode(void* ownerUID, JITCompilationEffort effort) if (jumpsToLink[i].to() >= jumpsToLink[i].from()) target = outData + jumpsToLink[i].to() - offset; // Compensate for what we have collapsed so far else - target = outData + jumpsToLink[i].to() - m_assembler->executableOffsetFor(jumpsToLink[i].to()); + target = outData + jumpsToLink[i].to() - executableOffsetFor(jumpsToLink[i].to()); - JumpLinkType jumpLinkType = m_assembler->computeJumpType(jumpsToLink[i], outData + writePtr, target); + JumpLinkType jumpLinkType = MacroAssembler::computeJumpType(jumpsToLink[i], outData + writePtr, target); // Compact branch if we can... - if (m_assembler->canCompact(jumpsToLink[i].type())) { + if (MacroAssembler::canCompact(jumpsToLink[i].type())) { // Step back in the write stream - int32_t delta = m_assembler->jumpSizeDelta(jumpsToLink[i].type(), jumpLinkType); + int32_t delta = MacroAssembler::jumpSizeDelta(jumpsToLink[i].type(), jumpLinkType); if (delta) { writePtr -= delta; - m_assembler->recordLinkOffsets(jumpsToLink[i].from() - delta, readPtr, readPtr - writePtr); + recordLinkOffsets(m_assemblerStorage, jumpsToLink[i].from() - delta, readPtr, readPtr - writePtr); } } jumpsToLink[i].setFrom(writePtr); } // Copy everything after the last jump memcpy(outData + writePtr, inData + readPtr, m_initialSize - readPtr); - m_assembler->recordLinkOffsets(readPtr, m_initialSize, readPtr - writePtr); + recordLinkOffsets(m_assemblerStorage, readPtr, m_initialSize, readPtr - writePtr); for (unsigned i = 0; i < jumpCount; ++i) { uint8_t* location = outData + jumpsToLink[i].from(); - uint8_t* target = outData + jumpsToLink[i].to() - m_assembler->executableOffsetFor(jumpsToLink[i].to()); - m_assembler->link(jumpsToLink[i], location, target); + uint8_t* target = outData + jumpsToLink[i].to() - executableOffsetFor(jumpsToLink[i].to()); + MacroAssembler::link(jumpsToLink[i], location, target); } jumpsToLink.clear(); - m_size = writePtr + m_initialSize - readPtr; - m_executableMemory->shrink(m_size); + shrink(writePtr + m_initialSize - readPtr); #if DUMP_LINK_STATISTICS dumpLinkStatistics(m_code, m_initialSize, m_size); @@ -140,22 +166,76 @@ void LinkBuffer::linkCode(void* ownerUID, JITCompilationEffort effort) #if DUMP_CODE dumpCode(m_code, m_size); #endif +} #endif + + +void LinkBuffer::linkCode(MacroAssembler& macroAssembler, void* ownerUID, JITCompilationEffort effort) +{ +#if !ENABLE(BRANCH_COMPACTION) +#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL + macroAssembler.m_assembler.buffer().flushConstantPool(false); +#endif + AssemblerBuffer& buffer = macroAssembler.m_assembler.buffer(); + allocate(buffer.codeSize(), ownerUID, effort); + if (!m_didAllocate) + return; + ASSERT(m_code); +#if CPU(ARM_TRADITIONAL) + macroAssembler.m_assembler.prepareExecutableCopy(m_code); +#endif + memcpy(m_code, buffer.data(), buffer.codeSize()); +#if CPU(MIPS) + macroAssembler.m_assembler.relocateJumps(buffer.data(), m_code); +#endif +#elif CPU(ARM_THUMB2) + copyCompactAndLinkCode<uint16_t>(macroAssembler, ownerUID, effort); +#elif CPU(ARM64) + copyCompactAndLinkCode<uint32_t>(macroAssembler, ownerUID, effort); +#endif + + m_linkTasks = WTFMove(macroAssembler.m_linkTasks); +} + +void LinkBuffer::allocate(size_t initialSize, void* ownerUID, JITCompilationEffort effort) +{ + if (m_code) { + if (initialSize > m_size) + return; + + m_didAllocate = true; + m_size = initialSize; + return; + } + + m_executableMemory = m_vm->executableAllocator.allocate(*m_vm, initialSize, ownerUID, effort); + if (!m_executableMemory) + return; + m_code = m_executableMemory->start(); + m_size = initialSize; + m_didAllocate = true; +} + +void LinkBuffer::shrink(size_t newSize) +{ + if (!m_executableMemory) + return; + m_size = newSize; + m_executableMemory->shrink(m_size); } void LinkBuffer::performFinalization() { + for (auto& task : m_linkTasks) + task->run(*this); + #ifndef NDEBUG + ASSERT(!isCompilationThread()); ASSERT(!m_completed); ASSERT(isValid()); m_completed = true; #endif -#if ENABLE(BRANCH_COMPACTION) - ExecutableAllocator::makeExecutable(code(), m_initialSize); -#else - ExecutableAllocator::makeExecutable(code(), m_size); -#endif MacroAssembler::cacheFlush(code(), m_size); } diff --git a/Source/JavaScriptCore/assembler/LinkBuffer.h b/Source/JavaScriptCore/assembler/LinkBuffer.h index 52630fe50..ce893bf09 100644 --- a/Source/JavaScriptCore/assembler/LinkBuffer.h +++ b/Source/JavaScriptCore/assembler/LinkBuffer.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2009, 2010, 2012 Apple Inc. All rights reserved. + * Copyright (C) 2009, 2010, 2012-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -33,14 +33,17 @@ #define GLOBAL_THUNK_ID reinterpret_cast<void*>(static_cast<intptr_t>(-1)) #define REGEXP_CODE_ID reinterpret_cast<void*>(static_cast<intptr_t>(-2)) +#define CSS_CODE_ID reinterpret_cast<void*>(static_cast<intptr_t>(-3)) #include "JITCompilationEffort.h" #include "MacroAssembler.h" #include <wtf/DataLog.h> +#include <wtf/FastMalloc.h> #include <wtf/Noncopyable.h> namespace JSC { +class CodeBlock; class VM; // LinkBuffer: @@ -58,7 +61,8 @@ class VM; // * The value referenced by a DataLabel may be set. // class LinkBuffer { - WTF_MAKE_NONCOPYABLE(LinkBuffer); + WTF_MAKE_NONCOPYABLE(LinkBuffer); WTF_MAKE_FAST_ALLOCATED; + typedef MacroAssemblerCodeRef CodeRef; typedef MacroAssemblerCodePtr CodePtr; typedef MacroAssembler::Label Label; @@ -76,30 +80,43 @@ class LinkBuffer { #endif public: - LinkBuffer(VM& vm, MacroAssembler* masm, void* ownerUID, JITCompilationEffort effort = JITCompilationMustSucceed) + LinkBuffer(VM& vm, MacroAssembler& macroAssembler, void* ownerUID, JITCompilationEffort effort = JITCompilationMustSucceed) : m_size(0) #if ENABLE(BRANCH_COMPACTION) , m_initialSize(0) #endif + , m_didAllocate(false) , m_code(0) - , m_assembler(masm) , m_vm(&vm) #ifndef NDEBUG , m_completed(false) - , m_effort(effort) #endif { - linkCode(ownerUID, effort); + linkCode(macroAssembler, ownerUID, effort); + } + + LinkBuffer(VM& vm, MacroAssembler& macroAssembler, void* code, size_t size) + : m_size(size) +#if ENABLE(BRANCH_COMPACTION) + , m_initialSize(0) +#endif + , m_didAllocate(false) + , m_code(code) + , m_vm(&vm) +#ifndef NDEBUG + , m_completed(false) +#endif + { + linkCode(macroAssembler, 0, JITCompilationCanFail); } ~LinkBuffer() { - ASSERT(m_completed || (!m_executableMemory && m_effort == JITCompilationCanFail)); } bool didFailToAllocate() const { - return !m_executableMemory; + return !m_didAllocate; } bool isValid() const @@ -116,6 +133,11 @@ public: MacroAssembler::linkCall(code(), call, function); } + void link(Call call, CodeLocationLabel label) + { + link(call, FunctionPtr(label.executableAddress())); + } + void link(Jump jump, CodeLocationLabel label) { jump.m_label = applyOffset(jump.m_label); @@ -141,6 +163,11 @@ public: } // These methods are used to obtain handles to allow the code to be relinked / repatched later. + + CodeLocationLabel entrypoint() + { + return CodeLocationLabel(code()); + } CodeLocationCall locationOf(Call call) { @@ -153,7 +180,8 @@ public: { ASSERT(call.isFlagSet(Call::Linkable)); ASSERT(call.isFlagSet(Call::Near)); - return CodeLocationNearCall(MacroAssembler::getLinkerAddress(code(), applyOffset(call.m_label))); + return CodeLocationNearCall(MacroAssembler::getLinkerAddress(code(), applyOffset(call.m_label)), + call.isFlagSet(Call::Tail) ? NearCallMode::Tail : NearCallMode::Regular); } CodeLocationLabel locationOf(PatchableJump jump) @@ -199,13 +227,18 @@ public: return applyOffset(label.m_label).m_offset; } + unsigned offsetOf(PatchableJump jump) + { + return applyOffset(jump.m_jump.m_label).m_offset; + } + // Upon completion of all patching 'FINALIZE_CODE()' should be called once to // complete generation of the code. Alternatively, call // finalizeCodeWithoutDisassembly() directly if you have your own way of // displaying disassembly. - CodeRef finalizeCodeWithoutDisassembly(); - CodeRef finalizeCodeWithDisassembly(const char* format, ...) WTF_ATTRIBUTE_PRINTF(2, 3); + JS_EXPORT_PRIVATE CodeRef finalizeCodeWithoutDisassembly(); + JS_EXPORT_PRIVATE CodeRef finalizeCodeWithDisassembly(const char* format, ...) WTF_ATTRIBUTE_PRINTF(2, 3); CodePtr trampolineAt(Label label) { @@ -216,17 +249,32 @@ public: { return m_code; } - - size_t debugSize() + + // FIXME: this does not account for the AssemblerData size! + size_t size() { return m_size; } + + bool wasAlreadyDisassembled() const { return m_alreadyDisassembled; } + void didAlreadyDisassemble() { m_alreadyDisassembled = true; } + + VM& vm() { return *m_vm; } private: +#if ENABLE(BRANCH_COMPACTION) + int executableOffsetFor(int location) + { + if (!location) + return 0; + return bitwise_cast<int32_t*>(m_assemblerStorage.buffer())[location / sizeof(int32_t) - 1]; + } +#endif + template <typename T> T applyOffset(T src) { #if ENABLE(BRANCH_COMPACTION) - src.m_offset -= m_assembler->executableOffsetFor(src.m_offset); + src.m_offset -= executableOffsetFor(src.m_offset); #endif return src; } @@ -236,8 +284,15 @@ private: { return m_code; } + + void allocate(size_t initialSize, void* ownerUID, JITCompilationEffort); + void shrink(size_t newSize); - void linkCode(void* ownerUID, JITCompilationEffort); + JS_EXPORT_PRIVATE void linkCode(MacroAssembler&, void* ownerUID, JITCompilationEffort); +#if ENABLE(BRANCH_COMPACTION) + template <typename InstructionType> + void copyCompactAndLinkCode(MacroAssembler&, void* ownerUID, JITCompilationEffort); +#endif void performFinalization(); @@ -253,14 +308,16 @@ private: size_t m_size; #if ENABLE(BRANCH_COMPACTION) size_t m_initialSize; + AssemblerData m_assemblerStorage; #endif + bool m_didAllocate; void* m_code; - MacroAssembler* m_assembler; VM* m_vm; #ifndef NDEBUG bool m_completed; - JITCompilationEffort m_effort; #endif + bool m_alreadyDisassembled { false }; + Vector<RefPtr<SharedTask<void(LinkBuffer&)>>> m_linkTasks; }; #define FINALIZE_CODE_IF(condition, linkBufferReference, dataLogFArgumentsForHeading) \ @@ -268,6 +325,11 @@ private: ? ((linkBufferReference).finalizeCodeWithDisassembly dataLogFArgumentsForHeading) \ : (linkBufferReference).finalizeCodeWithoutDisassembly()) +bool shouldDumpDisassemblyFor(CodeBlock*); + +#define FINALIZE_CODE_FOR(codeBlock, linkBufferReference, dataLogFArgumentsForHeading) \ + FINALIZE_CODE_IF(shouldDumpDisassemblyFor(codeBlock) || Options::asyncDisassembly(), linkBufferReference, dataLogFArgumentsForHeading) + // Use this to finalize code, like so: // // CodeRef code = FINALIZE_CODE(linkBuffer, ("my super thingy number %d", number)); @@ -281,14 +343,14 @@ private: // // ... and so on. // -// Note that the dataLogFArgumentsForHeading are only evaluated when showDisassembly +// Note that the dataLogFArgumentsForHeading are only evaluated when dumpDisassembly // is true, so you can hide expensive disassembly-only computations inside there. #define FINALIZE_CODE(linkBufferReference, dataLogFArgumentsForHeading) \ - FINALIZE_CODE_IF(Options::showDisassembly(), linkBufferReference, dataLogFArgumentsForHeading) + FINALIZE_CODE_IF(JSC::Options::asyncDisassembly() || JSC::Options::dumpDisassembly(), linkBufferReference, dataLogFArgumentsForHeading) #define FINALIZE_DFG_CODE(linkBufferReference, dataLogFArgumentsForHeading) \ - FINALIZE_CODE_IF((Options::showDisassembly() || Options::showDFGDisassembly()), linkBufferReference, dataLogFArgumentsForHeading) + FINALIZE_CODE_IF(JSC::Options::asyncDisassembly() || JSC::Options::dumpDisassembly() || Options::dumpDFGDisassembly(), linkBufferReference, dataLogFArgumentsForHeading) } // namespace JSC diff --git a/Source/JavaScriptCore/assembler/MIPSAssembler.h b/Source/JavaScriptCore/assembler/MIPSAssembler.h index 7e49e9fd6..dc518433e 100644 --- a/Source/JavaScriptCore/assembler/MIPSAssembler.h +++ b/Source/JavaScriptCore/assembler/MIPSAssembler.h @@ -151,12 +151,20 @@ public: typedef MIPSRegisters::FPRegisterID FPRegisterID; typedef SegmentedVector<AssemblerLabel, 64> Jumps; + static constexpr RegisterID firstRegister() { return MIPSRegisters::r0; } + static constexpr RegisterID lastRegister() { return MIPSRegisters::r31; } + + static constexpr FPRegisterID firstFPRegister() { return MIPSRegisters::f0; } + static constexpr FPRegisterID lastFPRegister() { return MIPSRegisters::f31; } + MIPSAssembler() : m_indexOfLastWatchpoint(INT_MIN) , m_indexOfTailOfLastWatchpoint(INT_MIN) { } + AssemblerBuffer& buffer() { return m_buffer; } + // MIPS instruction opcode field position enum { OP_SH_RD = 11, @@ -185,6 +193,11 @@ public: emitInst(0x00000000); } + void sync() + { + emitInst(0x0000000f); + } + /* Need to insert one load data delay nop for mips1. */ void loadDelayNop() { @@ -227,6 +240,11 @@ public: emitInst(0x3c000000 | (rt << OP_SH_RT) | (imm & 0xffff)); } + void clz(RegisterID rd, RegisterID rs) + { + emitInst(0x70000020 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rd << OP_SH_RT)); + } + void addiu(RegisterID rt, RegisterID rs, int imm) { emitInst(0x24000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (imm & 0xffff)); @@ -529,11 +547,6 @@ public: emitInst(0x46200004 | (fd << OP_SH_FD) | (fs << OP_SH_FS)); } - void absd(FPRegisterID fd, FPRegisterID fs) - { - emitInst(0x46200005 | (fd << OP_SH_FD) | (fs << OP_SH_FS)); - } - void movd(FPRegisterID fd, FPRegisterID fs) { emitInst(0x46200006 | (fd << OP_SH_FD) | (fs << OP_SH_FS)); @@ -681,16 +694,6 @@ public: return m_buffer.codeSize(); } - PassRefPtr<ExecutableMemoryHandle> executableCopy(VM& vm, void* ownerUID, JITCompilationEffort effort) - { - RefPtr<ExecutableMemoryHandle> result = m_buffer.executableCopy(vm, ownerUID, effort); - if (!result) - return 0; - - relocateJumps(m_buffer.data(), result->start()); - return result.release(); - } - unsigned debugOffset() { return m_buffer.debugOffset(); } // Assembly helpers for moving data between fp and registers. @@ -730,6 +733,35 @@ public: // writable region of memory; to modify the code in an execute-only execuable // pool the 'repatch' and 'relink' methods should be used. + static size_t linkDirectJump(void* code, void* to) + { + MIPSWord* insn = reinterpret_cast<MIPSWord*>(reinterpret_cast<intptr_t>(code)); + size_t ops = 0; + int32_t slotAddr = reinterpret_cast<int>(insn) + 4; + int32_t toAddr = reinterpret_cast<int>(to); + + if ((slotAddr & 0xf0000000) != (toAddr & 0xf0000000)) { + // lui + *insn = 0x3c000000 | (MIPSRegisters::t9 << OP_SH_RT) | ((toAddr >> 16) & 0xffff); + ++insn; + // ori + *insn = 0x34000000 | (MIPSRegisters::t9 << OP_SH_RT) | (MIPSRegisters::t9 << OP_SH_RS) | (toAddr & 0xffff); + ++insn; + // jr + *insn = 0x00000008 | (MIPSRegisters::t9 << OP_SH_RS); + ++insn; + ops = 4 * sizeof(MIPSWord); + } else { + // j + *insn = 0x08000000 | ((toAddr & 0x0fffffff) >> 2); + ++insn; + ops = 2 * sizeof(MIPSWord); + } + // nop + *insn = 0x00000000; + return ops; + } + void linkJump(AssemblerLabel from, AssemblerLabel to) { ASSERT(to.isSet()); @@ -843,68 +875,40 @@ public: static void cacheFlush(void* code, size_t size) { -#if GCC_VERSION_AT_LEAST(4, 3, 0) -#if WTF_MIPS_ISA_REV(2) && !GCC_VERSION_AT_LEAST(4, 4, 3) - int lineSize; - asm("rdhwr %0, $1" : "=r" (lineSize)); - // - // Modify "start" and "end" to avoid GCC 4.3.0-4.4.2 bug in - // mips_expand_synci_loop that may execute synci one more time. - // "start" points to the fisrt byte of the cache line. - // "end" points to the last byte of the line before the last cache line. - // Because size is always a multiple of 4, this is safe to set - // "end" to the last byte. - // - intptr_t start = reinterpret_cast<intptr_t>(code) & (-lineSize); - intptr_t end = ((reinterpret_cast<intptr_t>(code) + size - 1) & (-lineSize)) - 1; - __builtin___clear_cache(reinterpret_cast<char*>(start), reinterpret_cast<char*>(end)); -#else intptr_t end = reinterpret_cast<intptr_t>(code) + size; __builtin___clear_cache(reinterpret_cast<char*>(code), reinterpret_cast<char*>(end)); -#endif -#else - _flush_cache(reinterpret_cast<char*>(code), size, BCACHE); -#endif } static ptrdiff_t maxJumpReplacementSize() { - return sizeof(MIPSWord) * 2; + return sizeof(MIPSWord) * 4; } static void revertJumpToMove(void* instructionStart, RegisterID rt, int imm) { MIPSWord* insn = static_cast<MIPSWord*>(instructionStart); + size_t codeSize = 2 * sizeof(MIPSWord); // lui *insn = 0x3c000000 | (rt << OP_SH_RT) | ((imm >> 16) & 0xffff); ++insn; // ori *insn = 0x34000000 | (rt << OP_SH_RS) | (rt << OP_SH_RT) | (imm & 0xffff); - cacheFlush(insn, 2 * sizeof(MIPSWord)); - } - - static bool canJumpWithJ(void* instructionStart, void* to) - { - intptr_t slotAddr = reinterpret_cast<intptr_t>(instructionStart) + 4; - intptr_t toAddr = reinterpret_cast<intptr_t>(to); - return (slotAddr & 0xf0000000) == (toAddr & 0xf0000000); + ++insn; + // if jr $t9 + if (*insn == 0x03200008) { + *insn = 0x00000000; + codeSize += sizeof(MIPSWord); + } + cacheFlush(insn, codeSize); } static void replaceWithJump(void* instructionStart, void* to) { ASSERT(!(bitwise_cast<uintptr_t>(instructionStart) & 3)); ASSERT(!(bitwise_cast<uintptr_t>(to) & 3)); - ASSERT(canJumpWithJ(instructionStart, to)); - MIPSWord* insn = reinterpret_cast<MIPSWord*>(instructionStart); - int32_t toAddr = reinterpret_cast<int32_t>(to); - - // j <to> - *insn = 0x08000000 | ((toAddr & 0x0fffffff) >> 2); - ++insn; - // nop - *insn = 0x00000000; - cacheFlush(instructionStart, 2 * sizeof(MIPSWord)); + size_t ops = linkDirectJump(instructionStart, to); + cacheFlush(instructionStart, ops); } static void replaceWithLoad(void* instructionStart) @@ -929,7 +933,6 @@ public: cacheFlush(insn, 4); } -private: /* Update each jump in the buffer of newBase. */ void relocateJumps(void* oldBase, void* newBase) { @@ -972,6 +975,7 @@ private: } } +private: static int linkWithOffset(MIPSWord* insn, void* to) { ASSERT((*insn & 0xfc000000) == 0x10000000 // beq diff --git a/Source/JavaScriptCore/assembler/MacroAssembler.cpp b/Source/JavaScriptCore/assembler/MacroAssembler.cpp index 2cff056d2..0cd5bcfb0 100644 --- a/Source/JavaScriptCore/assembler/MacroAssembler.cpp +++ b/Source/JavaScriptCore/assembler/MacroAssembler.cpp @@ -28,11 +28,135 @@ #if ENABLE(ASSEMBLER) +#include <wtf/PrintStream.h> + namespace JSC { const double MacroAssembler::twoToThe32 = (double)0x100000000ull; +#if ENABLE(MASM_PROBE) +static void stdFunctionCallback(MacroAssembler::ProbeContext* context) +{ + auto func = static_cast<const std::function<void (MacroAssembler::ProbeContext*)>*>(context->arg1); + (*func)(context); +} + +void MacroAssembler::probe(std::function<void (MacroAssembler::ProbeContext*)> func) +{ + probe(stdFunctionCallback, new std::function<void (MacroAssembler::ProbeContext*)>(func), 0); +} +#endif // ENABLE(MASM_PROBE) + } // namespace JSC +namespace WTF { + +using namespace JSC; + +void printInternal(PrintStream& out, MacroAssembler::RelationalCondition cond) +{ + switch (cond) { + case MacroAssembler::Equal: + out.print("Equal"); + return; + case MacroAssembler::NotEqual: + out.print("NotEqual"); + return; + case MacroAssembler::Above: + out.print("Above"); + return; + case MacroAssembler::AboveOrEqual: + out.print("AboveOrEqual"); + return; + case MacroAssembler::Below: + out.print("Below"); + return; + case MacroAssembler::BelowOrEqual: + out.print("BelowOrEqual"); + return; + case MacroAssembler::GreaterThan: + out.print("GreaterThan"); + return; + case MacroAssembler::GreaterThanOrEqual: + out.print("GreaterThanOrEqual"); + return; + case MacroAssembler::LessThan: + out.print("LessThan"); + return; + case MacroAssembler::LessThanOrEqual: + out.print("LessThanOrEqual"); + return; + } + RELEASE_ASSERT_NOT_REACHED(); +} + +void printInternal(PrintStream& out, MacroAssembler::ResultCondition cond) +{ + switch (cond) { + case MacroAssembler::Overflow: + out.print("Overflow"); + return; + case MacroAssembler::Signed: + out.print("Signed"); + return; + case MacroAssembler::PositiveOrZero: + out.print("PositiveOrZero"); + return; + case MacroAssembler::Zero: + out.print("Zero"); + return; + case MacroAssembler::NonZero: + out.print("NonZero"); + return; + } + RELEASE_ASSERT_NOT_REACHED(); +} + +void printInternal(PrintStream& out, MacroAssembler::DoubleCondition cond) +{ + switch (cond) { + case MacroAssembler::DoubleEqual: + out.print("DoubleEqual"); + return; + case MacroAssembler::DoubleNotEqual: + out.print("DoubleNotEqual"); + return; + case MacroAssembler::DoubleGreaterThan: + out.print("DoubleGreaterThan"); + return; + case MacroAssembler::DoubleGreaterThanOrEqual: + out.print("DoubleGreaterThanOrEqual"); + return; + case MacroAssembler::DoubleLessThan: + out.print("DoubleLessThan"); + return; + case MacroAssembler::DoubleLessThanOrEqual: + out.print("DoubleLessThanOrEqual"); + return; + case MacroAssembler::DoubleEqualOrUnordered: + out.print("DoubleEqualOrUnordered"); + return; + case MacroAssembler::DoubleNotEqualOrUnordered: + out.print("DoubleNotEqualOrUnordered"); + return; + case MacroAssembler::DoubleGreaterThanOrUnordered: + out.print("DoubleGreaterThanOrUnordered"); + return; + case MacroAssembler::DoubleGreaterThanOrEqualOrUnordered: + out.print("DoubleGreaterThanOrEqualOrUnordered"); + return; + case MacroAssembler::DoubleLessThanOrUnordered: + out.print("DoubleLessThanOrUnordered"); + return; + case MacroAssembler::DoubleLessThanOrEqualOrUnordered: + out.print("DoubleLessThanOrEqualOrUnordered"); + return; + } + + RELEASE_ASSERT_NOT_REACHED(); +} + +} // namespace WTF + #endif // ENABLE(ASSEMBLER) diff --git a/Source/JavaScriptCore/assembler/MacroAssembler.h b/Source/JavaScriptCore/assembler/MacroAssembler.h index f74680d7f..35a3647dc 100644 --- a/Source/JavaScriptCore/assembler/MacroAssembler.h +++ b/Source/JavaScriptCore/assembler/MacroAssembler.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008, 2012 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2012-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,14 +26,16 @@ #ifndef MacroAssembler_h #define MacroAssembler_h -#include <wtf/Platform.h> - #if ENABLE(ASSEMBLER) #if CPU(ARM_THUMB2) #include "MacroAssemblerARMv7.h" namespace JSC { typedef MacroAssemblerARMv7 MacroAssemblerBase; }; +#elif CPU(ARM64) +#include "MacroAssemblerARM64.h" +namespace JSC { typedef MacroAssemblerARM64 MacroAssemblerBase; }; + #elif CPU(ARM_TRADITIONAL) #include "MacroAssemblerARM.h" namespace JSC { typedef MacroAssemblerARM MacroAssemblerBase; }; @@ -67,16 +69,59 @@ namespace JSC { class MacroAssembler : public MacroAssemblerBase { public: + static constexpr RegisterID nextRegister(RegisterID reg) + { + return static_cast<RegisterID>(reg + 1); + } + + static constexpr FPRegisterID nextFPRegister(FPRegisterID reg) + { + return static_cast<FPRegisterID>(reg + 1); + } + + static constexpr unsigned numberOfRegisters() + { + return lastRegister() - firstRegister() + 1; + } + + static constexpr unsigned registerIndex(RegisterID reg) + { + return reg - firstRegister(); + } + + static constexpr unsigned numberOfFPRegisters() + { + return lastFPRegister() - firstFPRegister() + 1; + } + + static constexpr unsigned fpRegisterIndex(FPRegisterID reg) + { + return reg - firstFPRegister(); + } + + static constexpr unsigned registerIndex(FPRegisterID reg) + { + return fpRegisterIndex(reg) + numberOfRegisters(); + } + + static constexpr unsigned totalNumberOfRegisters() + { + return numberOfRegisters() + numberOfFPRegisters(); + } + using MacroAssemblerBase::pop; using MacroAssemblerBase::jump; using MacroAssemblerBase::branch32; + using MacroAssemblerBase::compare32; using MacroAssemblerBase::move; - -#if ENABLE(JIT_CONSTANT_BLINDING) using MacroAssemblerBase::add32; + using MacroAssemblerBase::mul32; using MacroAssemblerBase::and32; using MacroAssemblerBase::branchAdd32; using MacroAssemblerBase::branchMul32; +#if CPU(ARM64) || CPU(ARM_THUMB2) || CPU(X86_64) + using MacroAssemblerBase::branchPtr; +#endif using MacroAssemblerBase::branchSub32; using MacroAssemblerBase::lshift32; using MacroAssemblerBase::or32; @@ -85,7 +130,11 @@ public: using MacroAssemblerBase::sub32; using MacroAssemblerBase::urshift32; using MacroAssemblerBase::xor32; -#endif + + static bool isPtrAlignedAddressOffset(ptrdiff_t value) + { + return value == static_cast<int32_t>(value); + } static const double twoToThe32; // This is super useful for some double code. @@ -120,10 +169,9 @@ public: return DoubleGreaterThanOrEqual; case DoubleLessThanOrEqualOrUnordered: return DoubleGreaterThan; - default: - RELEASE_ASSERT_NOT_REACHED(); - return DoubleEqual; // make compiler happy } + RELEASE_ASSERT_NOT_REACHED(); + return DoubleEqual; // make compiler happy } static bool isInvertible(ResultCondition cond) @@ -131,6 +179,8 @@ public: switch (cond) { case Zero: case NonZero: + case Signed: + case PositiveOrZero: return true; default: return false; @@ -144,11 +194,81 @@ public: return NonZero; case NonZero: return Zero; + case Signed: + return PositiveOrZero; + case PositiveOrZero: + return Signed; default: RELEASE_ASSERT_NOT_REACHED(); return Zero; // Make compiler happy for release builds. } } + + static RelationalCondition flip(RelationalCondition cond) + { + switch (cond) { + case Equal: + case NotEqual: + return cond; + case Above: + return Below; + case AboveOrEqual: + return BelowOrEqual; + case Below: + return Above; + case BelowOrEqual: + return AboveOrEqual; + case GreaterThan: + return LessThan; + case GreaterThanOrEqual: + return LessThanOrEqual; + case LessThan: + return GreaterThan; + case LessThanOrEqual: + return GreaterThanOrEqual; + } + + RELEASE_ASSERT_NOT_REACHED(); + return Equal; + } + + // True if this: + // branch8(cond, value, value) + // Is the same as this: + // branch32(cond, signExt8(value), signExt8(value)) + static bool isSigned(RelationalCondition cond) + { + switch (cond) { + case Equal: + case NotEqual: + case GreaterThan: + case GreaterThanOrEqual: + case LessThan: + case LessThanOrEqual: + return true; + default: + return false; + } + } + + // True if this: + // branch8(cond, value, value) + // Is the same as this: + // branch32(cond, zeroExt8(value), zeroExt8(value)) + static bool isUnsigned(RelationalCondition cond) + { + switch (cond) { + case Equal: + case NotEqual: + case Above: + case AboveOrEqual: + case Below: + case BelowOrEqual: + return true; + default: + return false; + } + } #endif // Platform agnostic onvenience functions, @@ -183,7 +303,34 @@ public: storePtr(imm, addressForPoke(index)); } -#if CPU(X86_64) +#if !CPU(ARM64) + void pushToSave(RegisterID src) + { + push(src); + } + void pushToSaveImmediateWithoutTouchingRegisters(TrustedImm32 imm) + { + push(imm); + } + void popToRestore(RegisterID dest) + { + pop(dest); + } + void pushToSave(FPRegisterID src) + { + subPtr(TrustedImm32(sizeof(double)), stackPointerRegister); + storeDouble(src, stackPointerRegister); + } + void popToRestore(FPRegisterID dest) + { + loadDouble(stackPointerRegister, dest); + addPtr(TrustedImm32(sizeof(double)), stackPointerRegister); + } + + static ptrdiff_t pushToSaveByteOffset() { return sizeof(void*); } +#endif // !CPU(ARM64) + +#if CPU(X86_64) || CPU(ARM64) void peek64(RegisterID dest, int index = 0) { load64(Address(stackPointerRegister, (index * sizeof(void*))), dest); @@ -208,6 +355,13 @@ public: } #endif + // Immediate shifts only have 5 controllable bits + // so we'll consider them safe for now. + TrustedImm32 trustedImm32ForShift(Imm32 imm) + { + return TrustedImm32(imm.asTrustedImm32().m_value & 31); + } + // Backwards banches, these are currently all implemented using existing forwards branch mechanisms. void branchPtr(RelationalCondition cond, RegisterID op1, TrustedImmPtr imm, Label target) { @@ -248,12 +402,17 @@ public: return branch32(commute(cond), right, left); } + void compare32(RelationalCondition cond, Imm32 left, RegisterID right, RegisterID dest) + { + compare32(commute(cond), right, left, dest); + } + void branchTestPtr(ResultCondition cond, RegisterID reg, Label target) { branchTestPtr(cond, reg).linkTo(target, this); } -#if !CPU(ARM_THUMB2) +#if !CPU(ARM_THUMB2) && !CPU(ARM64) PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right = TrustedImmPtr(0)) { return PatchableJump(branchPtr(cond, left, right)); @@ -264,6 +423,12 @@ public: return PatchableJump(branchPtrWithPatch(cond, left, dataLabel, initialRightValue)); } + PatchableJump patchableBranch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0)) + { + return PatchableJump(branch32WithPatch(cond, left, dataLabel, initialRightValue)); + } + +#if !CPU(ARM_TRADITIONAL) PatchableJump patchableJump() { return PatchableJump(jump()); @@ -273,14 +438,18 @@ public: { return PatchableJump(branchTest32(cond, reg, mask)); } -#endif // !CPU(ARM_THUMB2) -#if !CPU(ARM) PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm) { return PatchableJump(branch32(cond, reg, imm)); } -#endif // !(CPU(ARM) + + PatchableJump patchableBranch32(RelationalCondition cond, Address address, TrustedImm32 imm) + { + return PatchableJump(branch32(cond, address, imm)); + } +#endif +#endif void jump(Label target) { @@ -316,6 +485,18 @@ public: return condition; } + void oops() + { + abortWithReason(B3Oops); + } + + // B3 has additional pseudo-opcodes for returning, when it wants to signal that the return + // consumes some register in some way. + void ret32(RegisterID) { ret(); } + void ret64(RegisterID) { ret(); } + void retFloat(FPRegisterID) { ret(); } + void retDouble(FPRegisterID) { ret(); } + static const unsigned BlindingModulus = 64; bool shouldConsiderBlinding() { @@ -325,7 +506,7 @@ public: // Ptr methods // On 32-bit platforms (i.e. x86), these methods directly map onto their 32-bit equivalents. // FIXME: should this use a test for 32-bitness instead of this specific exception? -#if !CPU(X86_64) +#if !CPU(X86_64) && !CPU(ARM64) void addPtr(Address src, RegisterID dest) { add32(src, dest); @@ -341,6 +522,11 @@ public: add32(src, dest); } + void addPtr(RegisterID left, RegisterID right, RegisterID dest) + { + add32(left, right, dest); + } + void addPtr(TrustedImm32 imm, RegisterID srcDest) { add32(imm, srcDest); @@ -370,7 +556,27 @@ public: { and32(imm, srcDest); } + + void andPtr(TrustedImmPtr imm, RegisterID srcDest) + { + and32(TrustedImm32(imm), srcDest); + } + + void lshiftPtr(Imm32 imm, RegisterID srcDest) + { + lshift32(trustedImm32ForShift(imm), srcDest); + } + void rshiftPtr(Imm32 imm, RegisterID srcDest) + { + rshift32(trustedImm32ForShift(imm), srcDest); + } + + void urshiftPtr(Imm32 imm, RegisterID srcDest) + { + urshift32(trustedImm32ForShift(imm), srcDest); + } + void negPtr(RegisterID dest) { neg32(dest); @@ -457,6 +663,11 @@ public: compare32(cond, left, right, dest); } + void comparePtr(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest) + { + compare32(cond, left, right, dest); + } + void storePtr(RegisterID src, ImplicitAddress address) { store32(src, address); @@ -487,6 +698,16 @@ public: store32(TrustedImm32(imm), address); } + void storePtr(TrustedImm32 imm, ImplicitAddress address) + { + store32(imm, address); + } + + void storePtr(TrustedImmPtr imm, BaseIndex address) + { + store32(TrustedImm32(imm), address); + } + DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address) { return store32WithAddressOffsetPatch(src, address); @@ -571,11 +792,18 @@ public: { return MacroAssemblerBase::branchTest8(cond, Address(address.base, address.offset), mask); } -#else + +#else // !CPU(X86_64) + void addPtr(RegisterID src, RegisterID dest) { add64(src, dest); } + + void addPtr(RegisterID left, RegisterID right, RegisterID dest) + { + add64(left, right, dest); + } void addPtr(Address src, RegisterID dest) { @@ -622,6 +850,26 @@ public: and64(imm, srcDest); } + void andPtr(TrustedImmPtr imm, RegisterID srcDest) + { + and64(imm, srcDest); + } + + void lshiftPtr(Imm32 imm, RegisterID srcDest) + { + lshift64(trustedImm32ForShift(imm), srcDest); + } + + void rshiftPtr(Imm32 imm, RegisterID srcDest) + { + rshift64(trustedImm32ForShift(imm), srcDest); + } + + void urshiftPtr(Imm32 imm, RegisterID srcDest) + { + urshift64(trustedImm32ForShift(imm), srcDest); + } + void negPtr(RegisterID dest) { neg64(dest); @@ -732,6 +980,11 @@ public: store64(TrustedImm64(imm), address); } + void storePtr(TrustedImm32 imm, ImplicitAddress address) + { + store64(imm, address); + } + void storePtr(TrustedImmPtr imm, BaseIndex address) { store64(TrustedImm64(imm), address); @@ -847,7 +1100,6 @@ public: return branchSub64(cond, src1, src2, dest); } -#if ENABLE(JIT_CONSTANT_BLINDING) using MacroAssemblerBase::and64; using MacroAssemblerBase::convertInt32ToDouble; using MacroAssemblerBase::store64; @@ -862,7 +1114,7 @@ public: if (bitwise_cast<uint64_t>(value * 1.0) != bitwise_cast<uint64_t>(value)) return shouldConsiderBlinding(); - value = abs(value); + value = fabs(value); // Only allow a limited set of fractional components double scaledValue = value * 8; if (scaledValue / 8 != value) @@ -874,13 +1126,23 @@ public: return value > 0xff; } + bool shouldBlindPointerForSpecificArch(uintptr_t value) + { + if (sizeof(void*) == 4) + return shouldBlindForSpecificArch(static_cast<uint32_t>(value)); + return shouldBlindForSpecificArch(static_cast<uint64_t>(value)); + } + bool shouldBlind(ImmPtr imm) - { + { + if (!canBlind()) + return false; + #if ENABLE(FORCED_JIT_BLINDING) UNUSED_PARAM(imm); // Debug always blind all constants, if only so we know // if we've broken blinding during patch development. - return true; + return true; #endif // First off we'll special case common, "safe" values to avoid hurting @@ -906,7 +1168,7 @@ public: if (!shouldConsiderBlinding()) return false; - return shouldBlindForSpecificArch(value); + return shouldBlindPointerForSpecificArch(value); } struct RotatedImmPtr { @@ -1003,7 +1265,7 @@ public: void convertInt32ToDouble(Imm32 imm, FPRegisterID dest) { - if (shouldBlind(imm)) { + if (shouldBlind(imm) && haveScratchRegisterForBlinding()) { RegisterID scratchRegister = scratchRegisterForBlinding(); loadXorBlindedConstant(xorBlindConstant(imm), scratchRegister); convertInt32ToDouble(scratchRegister, dest); @@ -1039,7 +1301,7 @@ public: Jump branchPtr(RelationalCondition cond, RegisterID left, ImmPtr right) { - if (shouldBlind(right)) { + if (shouldBlind(right) && haveScratchRegisterForBlinding()) { RegisterID scratchRegister = scratchRegisterForBlinding(); loadRotationBlindedConstant(rotationBlindConstant(right), scratchRegister); return branchPtr(cond, left, scratchRegister); @@ -1049,7 +1311,7 @@ public: void storePtr(ImmPtr imm, Address dest) { - if (shouldBlind(imm)) { + if (shouldBlind(imm) && haveScratchRegisterForBlinding()) { RegisterID scratchRegister = scratchRegisterForBlinding(); loadRotationBlindedConstant(rotationBlindConstant(imm), scratchRegister); storePtr(scratchRegister, dest); @@ -1059,7 +1321,7 @@ public: void store64(Imm64 imm, Address dest) { - if (shouldBlind(imm)) { + if (shouldBlind(imm) && haveScratchRegisterForBlinding()) { RegisterID scratchRegister = scratchRegisterForBlinding(); loadRotationBlindedConstant(rotationBlindConstant(imm), scratchRegister); store64(scratchRegister, dest); @@ -1067,11 +1329,32 @@ public: store64(imm.asTrustedImm64(), dest); } +#endif // !CPU(X86_64) + +#if ENABLE(B3_JIT) + // We should implement this the right way eventually, but for now, it's fine because it arises so + // infrequently. + void compareDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID dest) + { + move(TrustedImm32(0), dest); + Jump falseCase = branchDouble(invert(cond), left, right); + move(TrustedImm32(1), dest); + falseCase.link(this); + } + void compareFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID dest) + { + move(TrustedImm32(0), dest); + Jump falseCase = branchFloat(invert(cond), left, right); + move(TrustedImm32(1), dest); + falseCase.link(this); + } #endif -#endif // !CPU(X86_64) + void lea(Address address, RegisterID dest) + { + addPtr(TrustedImm32(address.offset), address.base, dest); + } -#if ENABLE(JIT_CONSTANT_BLINDING) bool shouldBlind(Imm32 imm) { #if ENABLE(FORCED_JIT_BLINDING) @@ -1079,7 +1362,7 @@ public: // Debug always blind all constants, if only so we know // if we've broken blinding during patch development. return true; -#else +#else // ENABLE(FORCED_JIT_BLINDING) // First off we'll special case common, "safe" values to avoid hurting // performance too much @@ -1100,7 +1383,7 @@ public: return false; return shouldBlindForSpecificArch(value); -#endif +#endif // ENABLE(FORCED_JIT_BLINDING) } struct BlindedImm32 { @@ -1185,6 +1468,16 @@ public: } else add32(imm.asTrustedImm32(), dest); } + + void add32(Imm32 imm, RegisterID src, RegisterID dest) + { + if (shouldBlind(imm)) { + BlindedImm32 key = additionBlindedConstant(imm); + add32(key.value1, src, dest); + add32(key.value2, dest); + } else + add32(imm.asTrustedImm32(), src, dest); + } void addPtr(Imm32 imm, RegisterID dest) { @@ -1196,6 +1489,27 @@ public: addPtr(imm.asTrustedImm32(), dest); } + void mul32(Imm32 imm, RegisterID src, RegisterID dest) + { + if (shouldBlind(imm)) { + if (src != dest || haveScratchRegisterForBlinding()) { + if (src == dest) { + move(src, scratchRegisterForBlinding()); + src = scratchRegisterForBlinding(); + } + loadXorBlindedConstant(xorBlindConstant(imm), dest); + mul32(src, dest); + return; + } + // If we don't have a scratch register available for use, we'll just + // place a random number of nops. + uint32_t nopCount = random() & 3; + while (nopCount--) + nop(); + } + mul32(imm.asTrustedImm32(), src, dest); + } + void and32(Imm32 imm, RegisterID dest) { if (shouldBlind(imm)) { @@ -1266,12 +1580,12 @@ public: storePtr(value, addressForPoke(index)); } -#if CPU(X86_64) +#if CPU(X86_64) || CPU(ARM64) void poke(Imm64 value, int index = 0) { store64(value, addressForPoke(index)); } -#endif +#endif // CPU(X86_64) void store32(Imm32 imm, Address dest) { @@ -1280,10 +1594,10 @@ public: BlindedImm32 blind = xorBlindConstant(imm); store32(blind.value1, dest); xor32(blind.value2, dest); -#else - if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) { - loadXorBlindedConstant(xorBlindConstant(imm), scratchRegister); - store32(scratchRegister, dest); +#else // CPU(X86) || CPU(X86_64) + if (haveScratchRegisterForBlinding()) { + loadXorBlindedConstant(xorBlindConstant(imm), scratchRegisterForBlinding()); + store32(scratchRegisterForBlinding(), dest); } else { // If we don't have a scratch register available for use, we'll just // place a random number of nops. @@ -1292,7 +1606,7 @@ public: nop(); store32(imm.asTrustedImm32(), dest); } -#endif +#endif // CPU(X86) || CPU(X86_64) } else store32(imm.asTrustedImm32(), dest); } @@ -1340,9 +1654,9 @@ public: Jump branch32(RelationalCondition cond, RegisterID left, Imm32 right) { if (shouldBlind(right)) { - if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) { - loadXorBlindedConstant(xorBlindConstant(right), scratchRegister); - return branch32(cond, left, scratchRegister); + if (haveScratchRegisterForBlinding()) { + loadXorBlindedConstant(xorBlindConstant(right), scratchRegisterForBlinding()); + return branch32(cond, left, scratchRegisterForBlinding()); } // If we don't have a scratch register available for use, we'll just // place a random number of nops. @@ -1355,17 +1669,38 @@ public: return branch32(cond, left, right.asTrustedImm32()); } + void compare32(RelationalCondition cond, RegisterID left, Imm32 right, RegisterID dest) + { + if (shouldBlind(right)) { + if (left != dest || haveScratchRegisterForBlinding()) { + RegisterID blindedConstantReg = dest; + if (left == dest) + blindedConstantReg = scratchRegisterForBlinding(); + loadXorBlindedConstant(xorBlindConstant(right), blindedConstantReg); + compare32(cond, left, blindedConstantReg, dest); + return; + } + // If we don't have a scratch register available for use, we'll just + // place a random number of nops. + uint32_t nopCount = random() & 3; + while (nopCount--) + nop(); + compare32(cond, left, right.asTrustedImm32(), dest); + return; + } + + compare32(cond, left, right.asTrustedImm32(), dest); + } + Jump branchAdd32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest) { if (src == dest) - ASSERT(scratchRegisterForBlinding()); + ASSERT(haveScratchRegisterForBlinding()); if (shouldBlind(imm)) { if (src == dest) { - if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) { - move(src, scratchRegister); - src = scratchRegister; - } + move(src, scratchRegisterForBlinding()); + src = scratchRegisterForBlinding(); } loadXorBlindedConstant(xorBlindConstant(imm), dest); return branchAdd32(cond, src, dest); @@ -1373,22 +1708,20 @@ public: return branchAdd32(cond, src, imm.asTrustedImm32(), dest); } - Jump branchMul32(ResultCondition cond, Imm32 imm, RegisterID src, RegisterID dest) + Jump branchMul32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest) { if (src == dest) - ASSERT(scratchRegisterForBlinding()); + ASSERT(haveScratchRegisterForBlinding()); if (shouldBlind(imm)) { if (src == dest) { - if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) { - move(src, scratchRegister); - src = scratchRegister; - } + move(src, scratchRegisterForBlinding()); + src = scratchRegisterForBlinding(); } loadXorBlindedConstant(xorBlindConstant(imm), dest); return branchMul32(cond, src, dest); } - return branchMul32(cond, imm.asTrustedImm32(), src, dest); + return branchMul32(cond, src, imm.asTrustedImm32(), dest); } // branchSub32 takes a scratch register as 32 bit platforms make use of this, @@ -1404,13 +1737,6 @@ public: return branchSub32(cond, src, imm.asTrustedImm32(), dest); } - // Immediate shifts only have 5 controllable bits - // so we'll consider them safe for now. - TrustedImm32 trustedImm32ForShift(Imm32 imm) - { - return TrustedImm32(imm.asTrustedImm32().m_value & 31); - } - void lshift32(Imm32 imm, RegisterID dest) { lshift32(trustedImm32ForShift(imm), dest); @@ -1440,13 +1766,35 @@ public: { urshift32(src, trustedImm32ForShift(amount), dest); } + +#if ENABLE(MASM_PROBE) + using MacroAssemblerBase::probe; + + // Let's you print from your JIT generated code. + // See comments in MacroAssemblerPrinter.h for examples of how to use this. + template<typename... Arguments> + void print(Arguments... args); + + void probe(std::function<void (ProbeContext*)>); #endif }; } // namespace JSC +namespace WTF { + +class PrintStream; + +void printInternal(PrintStream&, JSC::MacroAssembler::RelationalCondition); +void printInternal(PrintStream&, JSC::MacroAssembler::ResultCondition); +void printInternal(PrintStream&, JSC::MacroAssembler::DoubleCondition); + +} // namespace WTF + #else // ENABLE(ASSEMBLER) +namespace JSC { + // If there is no assembler for this platform, at least allow code to make references to // some of the things it would otherwise define, albeit without giving that code any way // of doing anything useful. @@ -1460,6 +1808,8 @@ public: enum FPRegisterID { NoFPRegister }; }; +} // namespace JSC + #endif // ENABLE(ASSEMBLER) #endif // MacroAssembler_h diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerARM.cpp b/Source/JavaScriptCore/assembler/MacroAssemblerARM.cpp index ca0fa210a..9b1440fed 100644 --- a/Source/JavaScriptCore/assembler/MacroAssemblerARM.cpp +++ b/Source/JavaScriptCore/assembler/MacroAssemblerARM.cpp @@ -1,4 +1,5 @@ /* + * Copyright (C) 2013-2015 Apple Inc. * Copyright (C) 2009 University of Szeged * All rights reserved. * @@ -30,30 +31,24 @@ #include "MacroAssemblerARM.h" +#include <wtf/InlineASM.h> + #if OS(LINUX) #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <unistd.h> #include <elf.h> -# if OS(ANDROID) && PLATFORM(QT) -# include <asm/procinfo.h> -# else -# include <asm/hwcap.h> -# endif +#include <asm/hwcap.h> #endif namespace JSC { static bool isVFPPresent() { -#if defined(__SOFTFP__) - return false; -#endif - #if OS(LINUX) int fd = open("/proc/self/auxv", O_RDONLY); - if (fd > 0) { + if (fd != -1) { Elf32_auxv_t aux; while (read(fd, &aux, sizeof(Elf32_auxv_t))) { if (aux.a_type == AT_HWCAP) { @@ -63,9 +58,9 @@ static bool isVFPPresent() } close(fd); } -#endif +#endif // OS(LINUX) -#if (COMPILER(RVCT) && defined(__TARGET_FPU_VFP)) || (COMPILER(GCC) && defined(__VFP_FP__)) +#if (COMPILER(GCC_OR_CLANG) && defined(__VFP_FP__)) return true; #else return false; @@ -74,7 +69,7 @@ static bool isVFPPresent() const bool MacroAssemblerARM::s_isVFPPresent = isVFPPresent(); -#if !CPU(ARM_FEATURE_UNALIGNED) +#if CPU(ARMV5_OR_LOWER) /* On ARMv5 and below, natural alignment is required. */ void MacroAssemblerARM::load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest) { @@ -100,8 +95,276 @@ void MacroAssemblerARM::load32WithUnalignedHalfWords(BaseIndex address, Register } m_assembler.orr(dest, dest, m_assembler.lsl(ARMRegisters::S0, 16)); } -#endif +#endif // CPU(ARMV5_OR_LOWER) + +#if ENABLE(MASM_PROBE) + +extern "C" void ctiMasmProbeTrampoline(); + +#if COMPILER(GCC_OR_CLANG) + +// The following are offsets for MacroAssemblerARM::ProbeContext fields accessed +// by the ctiMasmProbeTrampoline stub. + +#define PTR_SIZE 4 +#define PROBE_PROBE_FUNCTION_OFFSET (0 * PTR_SIZE) +#define PROBE_ARG1_OFFSET (1 * PTR_SIZE) +#define PROBE_ARG2_OFFSET (2 * PTR_SIZE) + +#define PROBE_FIRST_GPREG_OFFSET (4 * PTR_SIZE) + +#define GPREG_SIZE 4 +#define PROBE_CPU_R0_OFFSET (PROBE_FIRST_GPREG_OFFSET + (0 * GPREG_SIZE)) +#define PROBE_CPU_R1_OFFSET (PROBE_FIRST_GPREG_OFFSET + (1 * GPREG_SIZE)) +#define PROBE_CPU_R2_OFFSET (PROBE_FIRST_GPREG_OFFSET + (2 * GPREG_SIZE)) +#define PROBE_CPU_R3_OFFSET (PROBE_FIRST_GPREG_OFFSET + (3 * GPREG_SIZE)) +#define PROBE_CPU_R4_OFFSET (PROBE_FIRST_GPREG_OFFSET + (4 * GPREG_SIZE)) +#define PROBE_CPU_R5_OFFSET (PROBE_FIRST_GPREG_OFFSET + (5 * GPREG_SIZE)) +#define PROBE_CPU_R6_OFFSET (PROBE_FIRST_GPREG_OFFSET + (6 * GPREG_SIZE)) +#define PROBE_CPU_R7_OFFSET (PROBE_FIRST_GPREG_OFFSET + (7 * GPREG_SIZE)) +#define PROBE_CPU_R8_OFFSET (PROBE_FIRST_GPREG_OFFSET + (8 * GPREG_SIZE)) +#define PROBE_CPU_R9_OFFSET (PROBE_FIRST_GPREG_OFFSET + (9 * GPREG_SIZE)) +#define PROBE_CPU_R10_OFFSET (PROBE_FIRST_GPREG_OFFSET + (10 * GPREG_SIZE)) +#define PROBE_CPU_R11_OFFSET (PROBE_FIRST_GPREG_OFFSET + (11 * GPREG_SIZE)) +#define PROBE_CPU_IP_OFFSET (PROBE_FIRST_GPREG_OFFSET + (12 * GPREG_SIZE)) +#define PROBE_CPU_SP_OFFSET (PROBE_FIRST_GPREG_OFFSET + (13 * GPREG_SIZE)) +#define PROBE_CPU_LR_OFFSET (PROBE_FIRST_GPREG_OFFSET + (14 * GPREG_SIZE)) +#define PROBE_CPU_PC_OFFSET (PROBE_FIRST_GPREG_OFFSET + (15 * GPREG_SIZE)) + +#define PROBE_CPU_APSR_OFFSET (PROBE_FIRST_GPREG_OFFSET + (16 * GPREG_SIZE)) +#define PROBE_CPU_FPSCR_OFFSET (PROBE_FIRST_GPREG_OFFSET + (17 * GPREG_SIZE)) + +#define PROBE_FIRST_FPREG_OFFSET (PROBE_FIRST_GPREG_OFFSET + (18 * GPREG_SIZE)) + +#define FPREG_SIZE 8 +#define PROBE_CPU_D0_OFFSET (PROBE_FIRST_FPREG_OFFSET + (0 * FPREG_SIZE)) +#define PROBE_CPU_D1_OFFSET (PROBE_FIRST_FPREG_OFFSET + (1 * FPREG_SIZE)) +#define PROBE_CPU_D2_OFFSET (PROBE_FIRST_FPREG_OFFSET + (2 * FPREG_SIZE)) +#define PROBE_CPU_D3_OFFSET (PROBE_FIRST_FPREG_OFFSET + (3 * FPREG_SIZE)) +#define PROBE_CPU_D4_OFFSET (PROBE_FIRST_FPREG_OFFSET + (4 * FPREG_SIZE)) +#define PROBE_CPU_D5_OFFSET (PROBE_FIRST_FPREG_OFFSET + (5 * FPREG_SIZE)) +#define PROBE_CPU_D6_OFFSET (PROBE_FIRST_FPREG_OFFSET + (6 * FPREG_SIZE)) +#define PROBE_CPU_D7_OFFSET (PROBE_FIRST_FPREG_OFFSET + (7 * FPREG_SIZE)) +#define PROBE_CPU_D8_OFFSET (PROBE_FIRST_FPREG_OFFSET + (8 * FPREG_SIZE)) +#define PROBE_CPU_D9_OFFSET (PROBE_FIRST_FPREG_OFFSET + (9 * FPREG_SIZE)) +#define PROBE_CPU_D10_OFFSET (PROBE_FIRST_FPREG_OFFSET + (10 * FPREG_SIZE)) +#define PROBE_CPU_D11_OFFSET (PROBE_FIRST_FPREG_OFFSET + (11 * FPREG_SIZE)) +#define PROBE_CPU_D12_OFFSET (PROBE_FIRST_FPREG_OFFSET + (12 * FPREG_SIZE)) +#define PROBE_CPU_D13_OFFSET (PROBE_FIRST_FPREG_OFFSET + (13 * FPREG_SIZE)) +#define PROBE_CPU_D14_OFFSET (PROBE_FIRST_FPREG_OFFSET + (14 * FPREG_SIZE)) +#define PROBE_CPU_D15_OFFSET (PROBE_FIRST_FPREG_OFFSET + (15 * FPREG_SIZE)) + +#define PROBE_SIZE (PROBE_FIRST_FPREG_OFFSET + (16 * FPREG_SIZE)) + +// These ASSERTs remind you that if you change the layout of ProbeContext, +// you need to change ctiMasmProbeTrampoline offsets above to match. +#define PROBE_OFFSETOF(x) offsetof(struct MacroAssemblerARM::ProbeContext, x) +COMPILE_ASSERT(PROBE_OFFSETOF(probeFunction) == PROBE_PROBE_FUNCTION_OFFSET, ProbeContext_probeFunction_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(arg1) == PROBE_ARG1_OFFSET, ProbeContext_arg1_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(arg2) == PROBE_ARG2_OFFSET, ProbeContext_arg2_offset_matches_ctiMasmProbeTrampoline); + +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r0) == PROBE_CPU_R0_OFFSET, ProbeContext_cpu_r0_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r1) == PROBE_CPU_R1_OFFSET, ProbeContext_cpu_r1_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r2) == PROBE_CPU_R2_OFFSET, ProbeContext_cpu_r2_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r3) == PROBE_CPU_R3_OFFSET, ProbeContext_cpu_r3_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r4) == PROBE_CPU_R4_OFFSET, ProbeContext_cpu_r4_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r5) == PROBE_CPU_R5_OFFSET, ProbeContext_cpu_r5_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r6) == PROBE_CPU_R6_OFFSET, ProbeContext_cpu_r6_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r7) == PROBE_CPU_R7_OFFSET, ProbeContext_cpu_r7_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r8) == PROBE_CPU_R8_OFFSET, ProbeContext_cpu_r8_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r9) == PROBE_CPU_R9_OFFSET, ProbeContext_cpu_r9_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r10) == PROBE_CPU_R10_OFFSET, ProbeContext_cpu_r10_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r11) == PROBE_CPU_R11_OFFSET, ProbeContext_cpu_r11_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.ip) == PROBE_CPU_IP_OFFSET, ProbeContext_cpu_ip_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.sp) == PROBE_CPU_SP_OFFSET, ProbeContext_cpu_sp_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.lr) == PROBE_CPU_LR_OFFSET, ProbeContext_cpu_lr_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.pc) == PROBE_CPU_PC_OFFSET, ProbeContext_cpu_pc_offset_matches_ctiMasmProbeTrampoline); + +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.apsr) == PROBE_CPU_APSR_OFFSET, ProbeContext_cpu_apsr_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.fpscr) == PROBE_CPU_FPSCR_OFFSET, ProbeContext_cpu_fpscr_offset_matches_ctiMasmProbeTrampoline); + +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d0) == PROBE_CPU_D0_OFFSET, ProbeContext_cpu_d0_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d1) == PROBE_CPU_D1_OFFSET, ProbeContext_cpu_d1_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d2) == PROBE_CPU_D2_OFFSET, ProbeContext_cpu_d2_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d3) == PROBE_CPU_D3_OFFSET, ProbeContext_cpu_d3_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d4) == PROBE_CPU_D4_OFFSET, ProbeContext_cpu_d4_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d5) == PROBE_CPU_D5_OFFSET, ProbeContext_cpu_d5_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d6) == PROBE_CPU_D6_OFFSET, ProbeContext_cpu_d6_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d7) == PROBE_CPU_D7_OFFSET, ProbeContext_cpu_d7_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d8) == PROBE_CPU_D8_OFFSET, ProbeContext_cpu_d8_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d9) == PROBE_CPU_D9_OFFSET, ProbeContext_cpu_d9_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d10) == PROBE_CPU_D10_OFFSET, ProbeContext_cpu_d10_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d11) == PROBE_CPU_D11_OFFSET, ProbeContext_cpu_d11_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d12) == PROBE_CPU_D12_OFFSET, ProbeContext_cpu_d12_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d13) == PROBE_CPU_D13_OFFSET, ProbeContext_cpu_d13_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d14) == PROBE_CPU_D14_OFFSET, ProbeContext_cpu_d14_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d15) == PROBE_CPU_D15_OFFSET, ProbeContext_cpu_d15_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(sizeof(MacroAssemblerARM::ProbeContext) == PROBE_SIZE, ProbeContext_size_matches_ctiMasmProbeTrampoline); +#undef PROBE_OFFSETOF + +asm ( + ".text" "\n" + ".globl " SYMBOL_STRING(ctiMasmProbeTrampoline) "\n" + HIDE_SYMBOL(ctiMasmProbeTrampoline) "\n" + INLINE_ARM_FUNCTION(ctiMasmProbeTrampoline) "\n" + SYMBOL_STRING(ctiMasmProbeTrampoline) ":" "\n" + + // MacroAssemblerARM::probe() has already generated code to store some values. + // The top of stack now looks like this: + // esp[0 * ptrSize]: probeFunction + // esp[1 * ptrSize]: arg1 + // esp[2 * ptrSize]: arg2 + // esp[3 * ptrSize]: saved r3 / S0 + // esp[4 * ptrSize]: saved ip + // esp[5 * ptrSize]: saved lr + // esp[6 * ptrSize]: saved sp + + "mov ip, sp" "\n" + "mov r3, sp" "\n" + "sub r3, r3, #" STRINGIZE_VALUE_OF(PROBE_SIZE) "\n" + + // The ARM EABI specifies that the stack needs to be 16 byte aligned. + "bic r3, r3, #0xf" "\n" + "mov sp, r3" "\n" + + "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n" + "add lr, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_R0_OFFSET) "\n" + "stmia lr, { r0-r11 }" "\n" + "mrs lr, APSR" "\n" + "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n" + "vmrs lr, FPSCR" "\n" + "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_FPSCR_OFFSET) "]" "\n" + + "ldr lr, [ip, #0 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" + "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "]" "\n" + "ldr lr, [ip, #1 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" + "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_ARG1_OFFSET) "]" "\n" + "ldr lr, [ip, #2 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" + "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_ARG2_OFFSET) "]" "\n" + "ldr lr, [ip, #3 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" + "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_R3_OFFSET) "]" "\n" + "ldr lr, [ip, #4 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" + "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n" + "ldr lr, [ip, #5 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" + "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n" + "ldr lr, [ip, #6 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" + "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n" + + "ldr lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n" + + "add ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_D0_OFFSET) "\n" + "vstmia.64 ip, { d0-d15 }" "\n" + + "mov fp, sp" "\n" // Save the ProbeContext*. + + "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "]" "\n" + "mov r0, sp" "\n" // the ProbeContext* arg. + "blx ip" "\n" + + "mov sp, fp" "\n" + + // To enable probes to modify register state, we copy all registers + // out of the ProbeContext before returning. + + "add ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_D15_OFFSET + FPREG_SIZE) "\n" + "vldmdb.64 ip!, { d0-d15 }" "\n" + "add ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_R11_OFFSET + GPREG_SIZE) "\n" + "ldmdb ip, { r0-r11 }" "\n" + "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_FPSCR_OFFSET) "]" "\n" + "vmsr FPSCR, ip" "\n" + + // There are 5 more registers left to restore: ip, sp, lr, pc, and apsr. + // There are 2 issues that complicate the restoration of these last few + // registers: + // + // 1. Normal ARM calling convention relies on moving lr to pc to return to + // the caller. In our case, the address to return to is specified by + // ProbeContext.cpu.pc. And at that moment, we won't have any available + // scratch registers to hold the return address (lr needs to hold + // ProbeContext.cpu.lr, not the return address). + // + // The solution is to store the return address on the stack and load the + // pc from there. + // + // 2. Issue 1 means we will need to write to the stack location at + // ProbeContext.cpu.sp - 4. But if the user probe function had modified + // the value of ProbeContext.cpu.sp to point in the range between + // &ProbeContext.cpu.ip thru &ProbeContext.cpu.aspr, then the action for + // Issue 1 may trash the values to be restored before we can restore + // them. + // + // The solution is to check if ProbeContext.cpu.sp contains a value in + // the undesirable range. If so, we copy the remaining ProbeContext + // register data to a safe range (at memory lower than where + // ProbeContext.cpu.sp points) first, and restore the remaining register + // from this new range. + + "add ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "\n" + "ldr lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n" + "cmp lr, ip" "\n" + "bgt " SYMBOL_STRING(ctiMasmProbeTrampolineEnd) "\n" + + // We get here because the new expected stack pointer location is lower + // than where it's supposed to be. This means the safe range of stack + // memory where we'll be copying the remaining register restore values to + // might be in a region of memory below the sp i.e. unallocated stack + // memory. This in turn makes it vulnerable to interrupts potentially + // trashing the copied values. To prevent that, we must first allocate the + // needed stack memory by adjusting the sp before the copying. + + "sub lr, lr, #(6 * " STRINGIZE_VALUE_OF(PTR_SIZE) + " + " STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) ")" "\n" + + "mov ip, sp" "\n" + "mov sp, lr" "\n" + "mov lr, ip" "\n" + + "ldr ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n" + "str ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n" + "ldr ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n" + "str ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n" + "ldr ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n" + "str ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n" + "ldr ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n" + "str ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n" + "ldr ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n" + "str ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n" + + SYMBOL_STRING(ctiMasmProbeTrampolineEnd) ":" "\n" + "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n" + "ldr lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n" + "sub lr, lr, #" STRINGIZE_VALUE_OF(PTR_SIZE) "\n" + "str ip, [lr]" "\n" + "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n" + + "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n" + "msr APSR, ip" "\n" + "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n" + "mov lr, ip" "\n" + "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n" + "ldr sp, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n" + + "pop { pc }" "\n" +); +#endif // COMPILER(GCC_OR_CLANG) + +void MacroAssemblerARM::probe(MacroAssemblerARM::ProbeFunction function, void* arg1, void* arg2) +{ + push(RegisterID::sp); + push(RegisterID::lr); + push(RegisterID::ip); + push(RegisterID::S0); + // The following uses RegisterID::S0. So, they must come after we push S0 above. + push(trustedImm32FromPtr(arg2)); + push(trustedImm32FromPtr(arg1)); + push(trustedImm32FromPtr(function)); + + move(trustedImm32FromPtr(ctiMasmProbeTrampoline), RegisterID::S0); + m_assembler.blx(RegisterID::S0); } +#endif // ENABLE(MASM_PROBE) + +} // namespace JSC #endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL) diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerARM.h b/Source/JavaScriptCore/assembler/MacroAssemblerARM.h index d9093413f..749cbab11 100644 --- a/Source/JavaScriptCore/assembler/MacroAssemblerARM.h +++ b/Source/JavaScriptCore/assembler/MacroAssemblerARM.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008 Apple Inc. + * Copyright (C) 2008, 2013-2015 Apple Inc. * Copyright (C) 2009, 2010 University of Szeged * All rights reserved. * @@ -35,7 +35,7 @@ namespace JSC { -class MacroAssemblerARM : public AbstractMacroAssembler<ARMAssembler> { +class MacroAssemblerARM : public AbstractMacroAssembler<ARMAssembler, MacroAssemblerARM> { static const int DoubleConditionMask = 0x0f; static const int DoubleConditionBitSpecial = 0x10; COMPILE_ASSERT(!(DoubleConditionBitSpecial & DoubleConditionMask), DoubleConditionBitSpecial_should_not_interfere_with_ARMAssembler_Condition_codes); @@ -81,6 +81,7 @@ public: }; static const RegisterID stackPointerRegister = ARMRegisters::sp; + static const RegisterID framePointerRegister = ARMRegisters::fp; static const RegisterID linkRegister = ARMRegisters::lr; static const Scale ScalePtr = TimesFour; @@ -227,13 +228,24 @@ public: store32(ARMRegisters::S1, ARMRegisters::S0); } + void or32(TrustedImm32 imm, AbsoluteAddress dest) + { + move(TrustedImmPtr(dest.m_ptr), ARMRegisters::S0); + load32(Address(ARMRegisters::S0), ARMRegisters::S1); + or32(imm, ARMRegisters::S1); // It uses S0 as temporary register, we need to reload the address. + move(TrustedImmPtr(dest.m_ptr), ARMRegisters::S0); + store32(ARMRegisters::S1, ARMRegisters::S0); + } + void or32(TrustedImm32 imm, RegisterID dest) { + ASSERT(dest != ARMRegisters::S0); m_assembler.orrs(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0)); } void or32(TrustedImm32 imm, RegisterID src, RegisterID dest) { + ASSERT(src != ARMRegisters::S0); m_assembler.orrs(dest, src, m_assembler.getImm(imm.m_value, ARMRegisters::S0)); } @@ -262,7 +274,10 @@ public: void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest) { - m_assembler.movs(dest, m_assembler.asr(src, imm.m_value & 0x1f)); + if (!imm.m_value) + move(src, dest); + else + m_assembler.movs(dest, m_assembler.asr(src, imm.m_value & 0x1f)); } void urshift32(RegisterID shiftAmount, RegisterID dest) @@ -285,7 +300,10 @@ public: void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest) { - m_assembler.movs(dest, m_assembler.lsr(src, imm.m_value & 0x1f)); + if (!imm.m_value) + move(src, dest); + else + m_assembler.movs(dest, m_assembler.lsr(src, imm.m_value & 0x1f)); } void sub32(RegisterID src, RegisterID dest) @@ -363,7 +381,13 @@ public: m_assembler.baseIndexTransfer32(ARMAssembler::LoadUint8, dest, address.base, address.index, static_cast<int>(address.scale), address.offset); } - void load8Signed(BaseIndex address, RegisterID dest) + void load8(const void* address, RegisterID dest) + { + move(TrustedImmPtr(address), ARMRegisters::S0); + m_assembler.dataTransfer32(ARMAssembler::LoadUint8, dest, ARMRegisters::S0, 0); + } + + void load8SignedExtendTo32(BaseIndex address, RegisterID dest) { m_assembler.baseIndexTransfer16(ARMAssembler::LoadInt8, dest, address.base, address.index, static_cast<int>(address.scale), address.offset); } @@ -378,7 +402,7 @@ public: m_assembler.baseIndexTransfer16(ARMAssembler::LoadUint16, dest, address.base, address.index, static_cast<int>(address.scale), address.offset); } - void load16Signed(BaseIndex address, RegisterID dest) + void load16SignedExtendTo32(BaseIndex address, RegisterID dest) { m_assembler.baseIndexTransfer16(ARMAssembler::LoadInt16, dest, address.base, address.index, static_cast<int>(address.scale), address.offset); } @@ -393,7 +417,7 @@ public: m_assembler.baseIndexTransfer32(ARMAssembler::LoadUint32, dest, address.base, address.index, static_cast<int>(address.scale), address.offset); } -#if !CPU(ARM_FEATURE_UNALIGNED) +#if CPU(ARMV5_OR_LOWER) void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest); #else void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest) @@ -407,6 +431,18 @@ public: load16(address, dest); } + void abortWithReason(AbortReason reason) + { + move(TrustedImm32(reason), ARMRegisters::S0); + breakpoint(); + } + + void abortWithReason(AbortReason reason, intptr_t misc) + { + move(TrustedImm32(misc), ARMRegisters::S1); + abortWithReason(reason); + } + ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest) { ConvertibleLoadLabel result(this); @@ -452,6 +488,23 @@ public: m_assembler.baseIndexTransfer32(ARMAssembler::StoreUint8, src, address.base, address.index, static_cast<int>(address.scale), address.offset); } + void store8(RegisterID src, ImplicitAddress address) + { + m_assembler.dtrUp(ARMAssembler::StoreUint8, src, address.base, address.offset); + } + + void store8(RegisterID src, const void* address) + { + move(TrustedImmPtr(address), ARMRegisters::S0); + m_assembler.dtrUp(ARMAssembler::StoreUint8, src, ARMRegisters::S0, 0); + } + + void store8(TrustedImm32 imm, ImplicitAddress address) + { + move(imm, ARMRegisters::S1); + store8(ARMRegisters::S1, address); + } + void store8(TrustedImm32 imm, const void* address) { move(TrustedImm32(reinterpret_cast<ARMWord>(address)), ARMRegisters::S0); @@ -504,6 +557,12 @@ public: m_assembler.pop(dest); } + void popPair(RegisterID dest1, RegisterID dest2) + { + m_assembler.pop(dest1); + m_assembler.pop(dest2); + } + void push(RegisterID src) { m_assembler.push(src); @@ -521,6 +580,12 @@ public: push(ARMRegisters::S0); } + void pushPair(RegisterID src1, RegisterID src2) + { + m_assembler.push(src2); + m_assembler.push(src1); + } + void move(TrustedImm32 imm, RegisterID dest) { m_assembler.moveImm(imm.m_value, dest); @@ -569,6 +634,13 @@ public: return branch32(cond, ARMRegisters::S1, right); } + Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right) + { + move(TrustedImmPtr(left.m_ptr), ARMRegisters::S1); + load8(Address(ARMRegisters::S1), ARMRegisters::S1); + return branch32(cond, ARMRegisters::S1, right); + } + Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right, int useConstantPool = 0) { m_assembler.cmp(left, right); @@ -617,6 +689,12 @@ public: return branchTest32(cond, ARMRegisters::S1, mask); } + Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1)) + { + load8(address, ARMRegisters::S1); + return branchTest32(cond, ARMRegisters::S1, mask); + } + Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1)) { move(TrustedImmPtr(address.m_ptr), ARMRegisters::S1); @@ -626,14 +704,14 @@ public: Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask) { - ASSERT((cond == Zero) || (cond == NonZero)); + ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == PositiveOrZero); m_assembler.tst(reg, mask); return Jump(m_assembler.jmp(ARMCondition(cond))); } Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1)) { - ASSERT((cond == Zero) || (cond == NonZero)); + ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == PositiveOrZero); ARMWord w = m_assembler.getImm(mask.m_value, ARMRegisters::S0, true); if (w & ARMAssembler::Op2InvertedImmediate) m_assembler.bics(ARMRegisters::S0, reg, w & ~ARMAssembler::Op2InvertedImmediate); @@ -764,7 +842,7 @@ public: return branchMul32(cond, src, dest, dest); } - Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest) + Jump branchMul32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest) { ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero)); if (cond == Overflow) { @@ -819,6 +897,11 @@ public: return Jump(m_assembler.jmp(ARMCondition(cond))); } + PatchableJump patchableJump() + { + return PatchableJump(m_assembler.jmp(ARMAssembler::AL, 1)); + } + PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm) { internalCompare32(reg, imm); @@ -838,6 +921,11 @@ public: return Call(m_assembler.blx(ARMRegisters::S1), Call::LinkableNear); } + Call nearTailCall() + { + return Call(m_assembler.jmp(), Call::LinkableNearTail); + } + Call call(RegisterID target) { return Call(m_assembler.blx(target), Call::None); @@ -876,7 +964,7 @@ public: void test32(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest) { if (mask.m_value == -1) - m_assembler.cmp(0, reg); + m_assembler.tst(reg, reg); else m_assembler.tst(reg, m_assembler.getImm(mask.m_value, ARMRegisters::S0)); m_assembler.mov(dest, ARMAssembler::getOp2Byte(0)); @@ -990,6 +1078,13 @@ public: return dataLabel; } + DataLabel32 moveWithPatch(TrustedImm32 initialValue, RegisterID dest) + { + DataLabel32 dataLabel(this); + m_assembler.ldrUniqueImmediate(dest, static_cast<ARMWord>(initialValue.m_value)); + return dataLabel; + } + Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0)) { ensureSpace(3 * sizeof(ARMWord), 2 * sizeof(ARMWord)); @@ -1007,6 +1102,15 @@ public: return jump; } + Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0)) + { + load32(left, ARMRegisters::S1); + ensureSpace(3 * sizeof(ARMWord), 2 * sizeof(ARMWord)); + dataLabel = moveWithPatch(initialRightValue, ARMRegisters::S0); + Jump jump = branch32(cond, ARMRegisters::S0, ARMRegisters::S1, true); + return jump; + } + DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address) { DataLabelPtr dataLabel = moveWithPatch(initialValue, ARMRegisters::S1); @@ -1035,6 +1139,7 @@ public: return s_isVFPPresent; } static bool supportsFloatingPointAbs() { return false; } + static bool supportsFloatingPointRounding() { return false; } void loadFloat(BaseIndex address, FPRegisterID dest) { @@ -1051,12 +1156,24 @@ public: m_assembler.baseIndexTransferFloat(ARMAssembler::LoadDouble, dest, address.base, address.index, static_cast<int>(address.scale), address.offset); } - void loadDouble(const void* address, FPRegisterID dest) + void loadDouble(TrustedImmPtr address, FPRegisterID dest) { - move(TrustedImm32(reinterpret_cast<ARMWord>(address)), ARMRegisters::S0); + move(TrustedImm32(reinterpret_cast<ARMWord>(address.m_value)), ARMRegisters::S0); m_assembler.doubleDtrUp(ARMAssembler::LoadDouble, dest, ARMRegisters::S0, 0); } + NO_RETURN_DUE_TO_CRASH void ceilDouble(FPRegisterID, FPRegisterID) + { + ASSERT(!supportsFloatingPointRounding()); + CRASH(); + } + + NO_RETURN_DUE_TO_CRASH void floorDouble(FPRegisterID, FPRegisterID) + { + ASSERT(!supportsFloatingPointRounding()); + CRASH(); + } + void storeFloat(FPRegisterID src, BaseIndex address) { m_assembler.baseIndexTransferFloat(ARMAssembler::StoreFloat, src, address.base, address.index, static_cast<int>(address.scale), address.offset); @@ -1072,9 +1189,9 @@ public: m_assembler.baseIndexTransferFloat(ARMAssembler::StoreDouble, src, address.base, address.index, static_cast<int>(address.scale), address.offset); } - void storeDouble(FPRegisterID src, const void* address) + void storeDouble(FPRegisterID src, TrustedImmPtr address) { - move(TrustedImm32(reinterpret_cast<ARMWord>(address)), ARMRegisters::S0); + move(TrustedImm32(reinterpret_cast<ARMWord>(address.m_value)), ARMRegisters::S0); m_assembler.dataTransferFloat(ARMAssembler::StoreDouble, src, ARMRegisters::S0, 0); } @@ -1084,6 +1201,12 @@ public: m_assembler.vmov_f64(dest, src); } + void moveZeroToDouble(FPRegisterID reg) + { + static double zeroConstant = 0.; + loadDouble(TrustedImmPtr(&zeroConstant), reg); + } + void addDouble(FPRegisterID src, FPRegisterID dest) { m_assembler.vadd_f64(dest, dest, src); @@ -1102,7 +1225,7 @@ public: void addDouble(AbsoluteAddress address, FPRegisterID dest) { - loadDouble(address.m_ptr, ARMRegisters::SD0); + loadDouble(TrustedImmPtr(address.m_ptr), ARMRegisters::SD0); addDouble(ARMRegisters::SD0, dest); } @@ -1294,6 +1417,11 @@ public: m_assembler.nop(); } + void memoryFence() + { + m_assembler.dmbSY(); + } + static FunctionPtr readCallTarget(CodeLocationCall call) { return FunctionPtr(reinterpret_cast<void(*)()>(ARMAssembler::readCallTarget(call.dataLocation()))); @@ -1311,6 +1439,13 @@ public: } static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; } + static bool canJumpReplacePatchableBranch32WithPatch() { return false; } + + static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32) + { + UNREACHABLE_FOR_PLATFORM(); + return CodeLocationLabel(); + } static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr) { @@ -1328,11 +1463,30 @@ public: ARMAssembler::revertBranchPtrWithPatch(instructionStart.dataLocation(), reg, reinterpret_cast<uintptr_t>(initialValue) & 0xffff); } + static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel, Address, int32_t) + { + UNREACHABLE_FOR_PLATFORM(); + } + static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*) { UNREACHABLE_FOR_PLATFORM(); } + static void repatchCall(CodeLocationCall call, CodeLocationLabel destination) + { + ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress()); + } + + static void repatchCall(CodeLocationCall call, FunctionPtr destination) + { + ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress()); + } + +#if ENABLE(MASM_PROBE) + void probe(ProbeFunction, void* arg1, void* arg2); +#endif // ENABLE(MASM_PROBE) + protected: ARMAssembler::Condition ARMCondition(RelationalCondition cond) { @@ -1362,7 +1516,6 @@ protected: private: friend class LinkBuffer; - friend class RepatchBuffer; void internalCompare32(RegisterID left, TrustedImm32 right) { @@ -1375,18 +1528,29 @@ private: static void linkCall(void* code, Call call, FunctionPtr function) { - ARMAssembler::linkCall(code, call.m_label, function.value()); + if (call.isFlagSet(Call::Tail)) + ARMAssembler::linkJump(code, call.m_label, function.value()); + else + ARMAssembler::linkCall(code, call.m_label, function.value()); } - static void repatchCall(CodeLocationCall call, CodeLocationLabel destination) + +#if ENABLE(MASM_PROBE) + inline TrustedImm32 trustedImm32FromPtr(void* ptr) { - ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress()); + return TrustedImm32(TrustedImmPtr(ptr)); } - static void repatchCall(CodeLocationCall call, FunctionPtr destination) + inline TrustedImm32 trustedImm32FromPtr(ProbeFunction function) { - ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress()); + return TrustedImm32(TrustedImmPtr(reinterpret_cast<void*>(function))); + } + + inline TrustedImm32 trustedImm32FromPtr(void (*function)()) + { + return TrustedImm32(TrustedImmPtr(reinterpret_cast<void*>(function))); } +#endif static const bool s_isVFPPresent; }; diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerARM64.cpp b/Source/JavaScriptCore/assembler/MacroAssemblerARM64.cpp new file mode 100644 index 000000000..8e7b51b9f --- /dev/null +++ b/Source/JavaScriptCore/assembler/MacroAssemblerARM64.cpp @@ -0,0 +1,507 @@ +/* + * Copyright (C) 2013-2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" + +#if ENABLE(ASSEMBLER) && CPU(ARM64) +#include "MacroAssemblerARM64.h" + +#include <wtf/InlineASM.h> + +namespace JSC { + +#if ENABLE(MASM_PROBE) + +extern "C" void ctiMasmProbeTrampoline(); + +using namespace ARM64Registers; + +#if COMPILER(GCC_OR_CLANG) + +// The following are offsets for MacroAssemblerARM64::ProbeContext fields accessed +// by the ctiMasmProbeTrampoline stub. +#define PTR_SIZE 8 +#define PROBE_PROBE_FUNCTION_OFFSET (0 * PTR_SIZE) +#define PROBE_ARG1_OFFSET (1 * PTR_SIZE) +#define PROBE_ARG2_OFFSET (2 * PTR_SIZE) + +#define PROBE_FIRST_GPREG_OFFSET (3 * PTR_SIZE) + +#define GPREG_SIZE 8 +#define PROBE_CPU_X0_OFFSET (PROBE_FIRST_GPREG_OFFSET + (0 * GPREG_SIZE)) +#define PROBE_CPU_X1_OFFSET (PROBE_FIRST_GPREG_OFFSET + (1 * GPREG_SIZE)) +#define PROBE_CPU_X2_OFFSET (PROBE_FIRST_GPREG_OFFSET + (2 * GPREG_SIZE)) +#define PROBE_CPU_X3_OFFSET (PROBE_FIRST_GPREG_OFFSET + (3 * GPREG_SIZE)) +#define PROBE_CPU_X4_OFFSET (PROBE_FIRST_GPREG_OFFSET + (4 * GPREG_SIZE)) +#define PROBE_CPU_X5_OFFSET (PROBE_FIRST_GPREG_OFFSET + (5 * GPREG_SIZE)) +#define PROBE_CPU_X6_OFFSET (PROBE_FIRST_GPREG_OFFSET + (6 * GPREG_SIZE)) +#define PROBE_CPU_X7_OFFSET (PROBE_FIRST_GPREG_OFFSET + (7 * GPREG_SIZE)) +#define PROBE_CPU_X8_OFFSET (PROBE_FIRST_GPREG_OFFSET + (8 * GPREG_SIZE)) +#define PROBE_CPU_X9_OFFSET (PROBE_FIRST_GPREG_OFFSET + (9 * GPREG_SIZE)) +#define PROBE_CPU_X10_OFFSET (PROBE_FIRST_GPREG_OFFSET + (10 * GPREG_SIZE)) +#define PROBE_CPU_X11_OFFSET (PROBE_FIRST_GPREG_OFFSET + (11 * GPREG_SIZE)) +#define PROBE_CPU_X12_OFFSET (PROBE_FIRST_GPREG_OFFSET + (12 * GPREG_SIZE)) +#define PROBE_CPU_X13_OFFSET (PROBE_FIRST_GPREG_OFFSET + (13 * GPREG_SIZE)) +#define PROBE_CPU_X14_OFFSET (PROBE_FIRST_GPREG_OFFSET + (14 * GPREG_SIZE)) +#define PROBE_CPU_X15_OFFSET (PROBE_FIRST_GPREG_OFFSET + (15 * GPREG_SIZE)) +#define PROBE_CPU_X16_OFFSET (PROBE_FIRST_GPREG_OFFSET + (16 * GPREG_SIZE)) +#define PROBE_CPU_X17_OFFSET (PROBE_FIRST_GPREG_OFFSET + (17 * GPREG_SIZE)) +#define PROBE_CPU_X18_OFFSET (PROBE_FIRST_GPREG_OFFSET + (18 * GPREG_SIZE)) +#define PROBE_CPU_X19_OFFSET (PROBE_FIRST_GPREG_OFFSET + (19 * GPREG_SIZE)) +#define PROBE_CPU_X20_OFFSET (PROBE_FIRST_GPREG_OFFSET + (20 * GPREG_SIZE)) +#define PROBE_CPU_X21_OFFSET (PROBE_FIRST_GPREG_OFFSET + (21 * GPREG_SIZE)) +#define PROBE_CPU_X22_OFFSET (PROBE_FIRST_GPREG_OFFSET + (22 * GPREG_SIZE)) +#define PROBE_CPU_X23_OFFSET (PROBE_FIRST_GPREG_OFFSET + (23 * GPREG_SIZE)) +#define PROBE_CPU_X24_OFFSET (PROBE_FIRST_GPREG_OFFSET + (24 * GPREG_SIZE)) +#define PROBE_CPU_X25_OFFSET (PROBE_FIRST_GPREG_OFFSET + (25 * GPREG_SIZE)) +#define PROBE_CPU_X26_OFFSET (PROBE_FIRST_GPREG_OFFSET + (26 * GPREG_SIZE)) +#define PROBE_CPU_X27_OFFSET (PROBE_FIRST_GPREG_OFFSET + (27 * GPREG_SIZE)) +#define PROBE_CPU_X28_OFFSET (PROBE_FIRST_GPREG_OFFSET + (28 * GPREG_SIZE)) +#define PROBE_CPU_FP_OFFSET (PROBE_FIRST_GPREG_OFFSET + (29 * GPREG_SIZE)) +#define PROBE_CPU_LR_OFFSET (PROBE_FIRST_GPREG_OFFSET + (30 * GPREG_SIZE)) +#define PROBE_CPU_SP_OFFSET (PROBE_FIRST_GPREG_OFFSET + (31 * GPREG_SIZE)) + +#define PROBE_CPU_PC_OFFSET (PROBE_FIRST_GPREG_OFFSET + (32 * GPREG_SIZE)) +#define PROBE_CPU_NZCV_OFFSET (PROBE_FIRST_GPREG_OFFSET + (33 * GPREG_SIZE)) +#define PROBE_CPU_FPSR_OFFSET (PROBE_FIRST_GPREG_OFFSET + (34 * GPREG_SIZE)) + +#define PROBE_FIRST_FPREG_OFFSET (PROBE_FIRST_GPREG_OFFSET + (35 * GPREG_SIZE)) + +#define FPREG_SIZE 8 +#define PROBE_CPU_Q0_OFFSET (PROBE_FIRST_FPREG_OFFSET + (0 * FPREG_SIZE)) +#define PROBE_CPU_Q1_OFFSET (PROBE_FIRST_FPREG_OFFSET + (1 * FPREG_SIZE)) +#define PROBE_CPU_Q2_OFFSET (PROBE_FIRST_FPREG_OFFSET + (2 * FPREG_SIZE)) +#define PROBE_CPU_Q3_OFFSET (PROBE_FIRST_FPREG_OFFSET + (3 * FPREG_SIZE)) +#define PROBE_CPU_Q4_OFFSET (PROBE_FIRST_FPREG_OFFSET + (4 * FPREG_SIZE)) +#define PROBE_CPU_Q5_OFFSET (PROBE_FIRST_FPREG_OFFSET + (5 * FPREG_SIZE)) +#define PROBE_CPU_Q6_OFFSET (PROBE_FIRST_FPREG_OFFSET + (6 * FPREG_SIZE)) +#define PROBE_CPU_Q7_OFFSET (PROBE_FIRST_FPREG_OFFSET + (7 * FPREG_SIZE)) +#define PROBE_CPU_Q8_OFFSET (PROBE_FIRST_FPREG_OFFSET + (8 * FPREG_SIZE)) +#define PROBE_CPU_Q9_OFFSET (PROBE_FIRST_FPREG_OFFSET + (9 * FPREG_SIZE)) +#define PROBE_CPU_Q10_OFFSET (PROBE_FIRST_FPREG_OFFSET + (10 * FPREG_SIZE)) +#define PROBE_CPU_Q11_OFFSET (PROBE_FIRST_FPREG_OFFSET + (11 * FPREG_SIZE)) +#define PROBE_CPU_Q12_OFFSET (PROBE_FIRST_FPREG_OFFSET + (12 * FPREG_SIZE)) +#define PROBE_CPU_Q13_OFFSET (PROBE_FIRST_FPREG_OFFSET + (13 * FPREG_SIZE)) +#define PROBE_CPU_Q14_OFFSET (PROBE_FIRST_FPREG_OFFSET + (14 * FPREG_SIZE)) +#define PROBE_CPU_Q15_OFFSET (PROBE_FIRST_FPREG_OFFSET + (15 * FPREG_SIZE)) +#define PROBE_CPU_Q16_OFFSET (PROBE_FIRST_FPREG_OFFSET + (16 * FPREG_SIZE)) +#define PROBE_CPU_Q17_OFFSET (PROBE_FIRST_FPREG_OFFSET + (17 * FPREG_SIZE)) +#define PROBE_CPU_Q18_OFFSET (PROBE_FIRST_FPREG_OFFSET + (18 * FPREG_SIZE)) +#define PROBE_CPU_Q19_OFFSET (PROBE_FIRST_FPREG_OFFSET + (19 * FPREG_SIZE)) +#define PROBE_CPU_Q20_OFFSET (PROBE_FIRST_FPREG_OFFSET + (20 * FPREG_SIZE)) +#define PROBE_CPU_Q21_OFFSET (PROBE_FIRST_FPREG_OFFSET + (21 * FPREG_SIZE)) +#define PROBE_CPU_Q22_OFFSET (PROBE_FIRST_FPREG_OFFSET + (22 * FPREG_SIZE)) +#define PROBE_CPU_Q23_OFFSET (PROBE_FIRST_FPREG_OFFSET + (23 * FPREG_SIZE)) +#define PROBE_CPU_Q24_OFFSET (PROBE_FIRST_FPREG_OFFSET + (24 * FPREG_SIZE)) +#define PROBE_CPU_Q25_OFFSET (PROBE_FIRST_FPREG_OFFSET + (25 * FPREG_SIZE)) +#define PROBE_CPU_Q26_OFFSET (PROBE_FIRST_FPREG_OFFSET + (26 * FPREG_SIZE)) +#define PROBE_CPU_Q27_OFFSET (PROBE_FIRST_FPREG_OFFSET + (27 * FPREG_SIZE)) +#define PROBE_CPU_Q28_OFFSET (PROBE_FIRST_FPREG_OFFSET + (28 * FPREG_SIZE)) +#define PROBE_CPU_Q29_OFFSET (PROBE_FIRST_FPREG_OFFSET + (29 * FPREG_SIZE)) +#define PROBE_CPU_Q30_OFFSET (PROBE_FIRST_FPREG_OFFSET + (30 * FPREG_SIZE)) +#define PROBE_CPU_Q31_OFFSET (PROBE_FIRST_FPREG_OFFSET + (31 * FPREG_SIZE)) +#define PROBE_SIZE (PROBE_FIRST_FPREG_OFFSET + (32 * FPREG_SIZE)) +#define SAVED_CALLER_SP PROBE_SIZE +#define PROBE_SIZE_PLUS_SAVED_CALLER_SP (SAVED_CALLER_SP + PTR_SIZE) + +// These ASSERTs remind you that if you change the layout of ProbeContext, +// you need to change ctiMasmProbeTrampoline offsets above to match. +#define PROBE_OFFSETOF(x) offsetof(struct MacroAssemblerARM64::ProbeContext, x) +COMPILE_ASSERT(PROBE_OFFSETOF(probeFunction) == PROBE_PROBE_FUNCTION_OFFSET, ProbeContext_probeFunction_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(arg1) == PROBE_ARG1_OFFSET, ProbeContext_arg1_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(arg2) == PROBE_ARG2_OFFSET, ProbeContext_arg2_offset_matches_ctiMasmProbeTrampoline); + +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x0) == PROBE_CPU_X0_OFFSET, ProbeContext_cpu_x0_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x1) == PROBE_CPU_X1_OFFSET, ProbeContext_cpu_x1_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x2) == PROBE_CPU_X2_OFFSET, ProbeContext_cpu_x2_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x3) == PROBE_CPU_X3_OFFSET, ProbeContext_cpu_x3_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x4) == PROBE_CPU_X4_OFFSET, ProbeContext_cpu_x4_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x5) == PROBE_CPU_X5_OFFSET, ProbeContext_cpu_x5_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x6) == PROBE_CPU_X6_OFFSET, ProbeContext_cpu_x6_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x7) == PROBE_CPU_X7_OFFSET, ProbeContext_cpu_x7_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x8) == PROBE_CPU_X8_OFFSET, ProbeContext_cpu_x8_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x9) == PROBE_CPU_X9_OFFSET, ProbeContext_cpu_x9_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x10) == PROBE_CPU_X10_OFFSET, ProbeContext_cpu_x10_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x11) == PROBE_CPU_X11_OFFSET, ProbeContext_cpu_x11_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x12) == PROBE_CPU_X12_OFFSET, ProbeContext_cpu_x12_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x13) == PROBE_CPU_X13_OFFSET, ProbeContext_cpu_x13_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x14) == PROBE_CPU_X14_OFFSET, ProbeContext_cpu_x14_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x15) == PROBE_CPU_X15_OFFSET, ProbeContext_cpu_x15_offset_matches_ctiMasmProbeTrampoline); + +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x16) == PROBE_CPU_X16_OFFSET, ProbeContext_cpu_x16_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x17) == PROBE_CPU_X17_OFFSET, ProbeContext_cpu_x17_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x18) == PROBE_CPU_X18_OFFSET, ProbeContext_cpu_x18_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x19) == PROBE_CPU_X19_OFFSET, ProbeContext_cpu_x19_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x20) == PROBE_CPU_X20_OFFSET, ProbeContext_cpu_x20_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x21) == PROBE_CPU_X21_OFFSET, ProbeContext_cpu_x21_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x22) == PROBE_CPU_X22_OFFSET, ProbeContext_cpu_x22_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x23) == PROBE_CPU_X23_OFFSET, ProbeContext_cpu_x23_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x24) == PROBE_CPU_X24_OFFSET, ProbeContext_cpu_x24_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x25) == PROBE_CPU_X25_OFFSET, ProbeContext_cpu_x25_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x26) == PROBE_CPU_X26_OFFSET, ProbeContext_cpu_x26_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x27) == PROBE_CPU_X27_OFFSET, ProbeContext_cpu_x27_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x28) == PROBE_CPU_X28_OFFSET, ProbeContext_cpu_x28_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.fp) == PROBE_CPU_FP_OFFSET, ProbeContext_cpu_fp_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.lr) == PROBE_CPU_LR_OFFSET, ProbeContext_cpu_lr_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.sp) == PROBE_CPU_SP_OFFSET, ProbeContext_cpu_sp_offset_matches_ctiMasmProbeTrampoline); + +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.pc) == PROBE_CPU_PC_OFFSET, ProbeContext_cpu_pc_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.nzcv) == PROBE_CPU_NZCV_OFFSET, ProbeContext_cpu_nzcv_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.fpsr) == PROBE_CPU_FPSR_OFFSET, ProbeContext_cpu_fpsr_offset_matches_ctiMasmProbeTrampoline); + +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q0) == PROBE_CPU_Q0_OFFSET, ProbeContext_cpu_q0_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q1) == PROBE_CPU_Q1_OFFSET, ProbeContext_cpu_q1_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q2) == PROBE_CPU_Q2_OFFSET, ProbeContext_cpu_q2_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q3) == PROBE_CPU_Q3_OFFSET, ProbeContext_cpu_q3_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q4) == PROBE_CPU_Q4_OFFSET, ProbeContext_cpu_q4_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q5) == PROBE_CPU_Q5_OFFSET, ProbeContext_cpu_q5_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q6) == PROBE_CPU_Q6_OFFSET, ProbeContext_cpu_q6_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q7) == PROBE_CPU_Q7_OFFSET, ProbeContext_cpu_q7_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q8) == PROBE_CPU_Q8_OFFSET, ProbeContext_cpu_q8_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q9) == PROBE_CPU_Q9_OFFSET, ProbeContext_cpu_q9_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q10) == PROBE_CPU_Q10_OFFSET, ProbeContext_cpu_q10_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q11) == PROBE_CPU_Q11_OFFSET, ProbeContext_cpu_q11_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q12) == PROBE_CPU_Q12_OFFSET, ProbeContext_cpu_q12_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q13) == PROBE_CPU_Q13_OFFSET, ProbeContext_cpu_q13_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q14) == PROBE_CPU_Q14_OFFSET, ProbeContext_cpu_q14_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q15) == PROBE_CPU_Q15_OFFSET, ProbeContext_cpu_q15_offset_matches_ctiMasmProbeTrampoline); + +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q16) == PROBE_CPU_Q16_OFFSET, ProbeContext_cpu_q16_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q17) == PROBE_CPU_Q17_OFFSET, ProbeContext_cpu_q17_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q18) == PROBE_CPU_Q18_OFFSET, ProbeContext_cpu_q18_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q19) == PROBE_CPU_Q19_OFFSET, ProbeContext_cpu_q19_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q20) == PROBE_CPU_Q20_OFFSET, ProbeContext_cpu_q20_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q21) == PROBE_CPU_Q21_OFFSET, ProbeContext_cpu_q21_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q22) == PROBE_CPU_Q22_OFFSET, ProbeContext_cpu_q22_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q23) == PROBE_CPU_Q23_OFFSET, ProbeContext_cpu_q23_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q24) == PROBE_CPU_Q24_OFFSET, ProbeContext_cpu_q24_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q25) == PROBE_CPU_Q25_OFFSET, ProbeContext_cpu_q25_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q26) == PROBE_CPU_Q26_OFFSET, ProbeContext_cpu_q26_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q27) == PROBE_CPU_Q27_OFFSET, ProbeContext_cpu_q27_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q28) == PROBE_CPU_Q28_OFFSET, ProbeContext_cpu_q28_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q29) == PROBE_CPU_Q29_OFFSET, ProbeContext_cpu_q29_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q30) == PROBE_CPU_Q30_OFFSET, ProbeContext_cpu_q30_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q31) == PROBE_CPU_Q31_OFFSET, ProbeContext_cpu_q31_offset_matches_ctiMasmProbeTrampoline); + +COMPILE_ASSERT(sizeof(MacroAssemblerARM64::ProbeContext) == PROBE_SIZE, ProbeContext_size_matches_ctiMasmProbeTrampoline); + +#undef PROBE_OFFSETOF + +asm ( + ".text" "\n" + ".align 2" "\n" + ".globl " SYMBOL_STRING(ctiMasmProbeTrampoline) "\n" + HIDE_SYMBOL(ctiMasmProbeTrampoline) "\n" + SYMBOL_STRING(ctiMasmProbeTrampoline) ":" "\n" + + // MacroAssemblerARM64::probe() has already generated code to store some values. + // The top of stack (the caller save buffer) now looks like this: + // sp[0 * ptrSize]: probeFunction + // sp[1 * ptrSize]: arg1 + // sp[2 * ptrSize]: arg2 + // sp[3 * ptrSize]: address of arm64ProbeTrampoline() + // sp[4 * ptrSize]: saved x27 + // sp[5 * ptrSize]: saved x28 + // sp[6 * ptrSize]: saved lr + // sp[7 * ptrSize]: saved sp + + "mov x27, sp" "\n" + "mov x28, sp" "\n" + + "sub x28, x28, #" STRINGIZE_VALUE_OF(PROBE_SIZE_PLUS_SAVED_CALLER_SP) "\n" + + // The ARM EABI specifies that the stack needs to be 16 byte aligned. + "bic x28, x28, #0xf" "\n" + "mov sp, x28" "\n" + + "str x27, [sp, #" STRINGIZE_VALUE_OF(SAVED_CALLER_SP) "]" "\n" + + "str x0, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X0_OFFSET) "]" "\n" + "str x1, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X1_OFFSET) "]" "\n" + "str x2, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X2_OFFSET) "]" "\n" + "str x3, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X3_OFFSET) "]" "\n" + "str x4, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X4_OFFSET) "]" "\n" + "str x5, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X5_OFFSET) "]" "\n" + "str x6, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X6_OFFSET) "]" "\n" + "str x7, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X7_OFFSET) "]" "\n" + "str x8, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X8_OFFSET) "]" "\n" + "str x9, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X9_OFFSET) "]" "\n" + "str x10, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X10_OFFSET) "]" "\n" + "str x11, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X11_OFFSET) "]" "\n" + "str x12, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X12_OFFSET) "]" "\n" + "str x13, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X13_OFFSET) "]" "\n" + "str x14, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X14_OFFSET) "]" "\n" + "str x15, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X15_OFFSET) "]" "\n" + "str x16, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X16_OFFSET) "]" "\n" + "str x17, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X17_OFFSET) "]" "\n" + "str x18, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X18_OFFSET) "]" "\n" + "str x19, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X19_OFFSET) "]" "\n" + "str x20, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X20_OFFSET) "]" "\n" + "str x21, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X21_OFFSET) "]" "\n" + "str x22, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X22_OFFSET) "]" "\n" + "str x23, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X23_OFFSET) "]" "\n" + "str x24, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X24_OFFSET) "]" "\n" + "str x25, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X25_OFFSET) "]" "\n" + "str x26, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X26_OFFSET) "]" "\n" + + "ldr x0, [x27, #4 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" + "str x0, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X27_OFFSET) "]" "\n" + "ldr x0, [x27, #5 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" + "str x0, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X28_OFFSET) "]" "\n" + + "str fp, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_FP_OFFSET) "]" "\n" + + "ldr x0, [x27, #6 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" + "str x0, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n" + "ldr x0, [x27, #7 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" + "str x0, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n" + + "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n" + + "mrs x0, nzcv" "\n" + "str x0, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_NZCV_OFFSET) "]" "\n" + "mrs x0, fpsr" "\n" + "str x0, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_FPSR_OFFSET) "]" "\n" + + "ldr x0, [x27, #0 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" + "str x0, [sp, #" STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "]" "\n" + "ldr x0, [x27, #1 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" + "str x0, [sp, #" STRINGIZE_VALUE_OF(PROBE_ARG1_OFFSET) "]" "\n" + "ldr x0, [x27, #2 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" + "str x0, [sp, #" STRINGIZE_VALUE_OF(PROBE_ARG2_OFFSET) "]" "\n" + + "str d0, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q0_OFFSET) "]" "\n" + "str d1, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q1_OFFSET) "]" "\n" + "str d2, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q2_OFFSET) "]" "\n" + "str d3, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q3_OFFSET) "]" "\n" + "str d4, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q4_OFFSET) "]" "\n" + "str d5, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q5_OFFSET) "]" "\n" + "str d6, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q6_OFFSET) "]" "\n" + "str d7, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q7_OFFSET) "]" "\n" + "str d8, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q8_OFFSET) "]" "\n" + "str d9, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q9_OFFSET) "]" "\n" + "str d10, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q10_OFFSET) "]" "\n" + "str d11, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q11_OFFSET) "]" "\n" + "str d12, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q12_OFFSET) "]" "\n" + "str d13, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q13_OFFSET) "]" "\n" + "str d14, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q14_OFFSET) "]" "\n" + "str d15, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q15_OFFSET) "]" "\n" + "str d16, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q16_OFFSET) "]" "\n" + "str d17, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q17_OFFSET) "]" "\n" + "str d18, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q18_OFFSET) "]" "\n" + "str d19, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q19_OFFSET) "]" "\n" + "str d20, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q20_OFFSET) "]" "\n" + "str d21, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q21_OFFSET) "]" "\n" + "str d22, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q22_OFFSET) "]" "\n" + "str d23, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q23_OFFSET) "]" "\n" + "str d24, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q24_OFFSET) "]" "\n" + "str d25, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q25_OFFSET) "]" "\n" + "str d26, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q26_OFFSET) "]" "\n" + "str d27, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q27_OFFSET) "]" "\n" + "str d28, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q28_OFFSET) "]" "\n" + "str d29, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q29_OFFSET) "]" "\n" + "str d30, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q30_OFFSET) "]" "\n" + "str d31, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q31_OFFSET) "]" "\n" + + "mov x28, sp" "\n" // Save the ProbeContext*. + + "mov x0, sp" "\n" // the ProbeContext* arg. + "ldr x27, [x27, #3 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" + "blr x27" "\n" + + "mov sp, x28" "\n" + + // To enable probes to modify register state, we copy all registers + // out of the ProbeContext before returning. That is except for x18, pc and sp. + + // x18 is "reserved for the platform. Conforming software should not make use of it." + // Hence, the JITs would not be using it, and the probe should also not be modifying it. + // See https://developer.apple.com/library/ios/documentation/Xcode/Conceptual/iPhoneOSABIReference/Articles/ARM64FunctionCallingConventions.html. + + // We can't modify the pc, because the only way to set its value on ARM64 is via + // an indirect branch or a ret, which means we'll need a free register to do so. + // The probe mechanism is required to not perturb any registers that the caller + // may use. Hence, we don't have this free register available. + + // In order to return to the caller, we need to ret via lr. The probe mechanism will + // restore lr's value after returning to the caller by loading the restore value + // from the caller save buffer. The caller expects to access the caller save buffer via + // sp. Hence, we cannot allow sp to be modified by the probe. + + "ldr d0, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q0_OFFSET) "]" "\n" + "ldr d1, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q1_OFFSET) "]" "\n" + "ldr d2, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q2_OFFSET) "]" "\n" + "ldr d3, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q3_OFFSET) "]" "\n" + "ldr d4, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q4_OFFSET) "]" "\n" + "ldr d5, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q5_OFFSET) "]" "\n" + "ldr d6, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q6_OFFSET) "]" "\n" + "ldr d7, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q7_OFFSET) "]" "\n" + "ldr d8, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q8_OFFSET) "]" "\n" + "ldr d9, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q9_OFFSET) "]" "\n" + "ldr d10, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q10_OFFSET) "]" "\n" + "ldr d11, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q11_OFFSET) "]" "\n" + "ldr d12, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q12_OFFSET) "]" "\n" + "ldr d13, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q13_OFFSET) "]" "\n" + "ldr d14, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q14_OFFSET) "]" "\n" + "ldr d15, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q15_OFFSET) "]" "\n" + "ldr d16, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q16_OFFSET) "]" "\n" + "ldr d17, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q17_OFFSET) "]" "\n" + "ldr d18, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q18_OFFSET) "]" "\n" + "ldr d19, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q19_OFFSET) "]" "\n" + "ldr d20, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q20_OFFSET) "]" "\n" + "ldr d21, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q21_OFFSET) "]" "\n" + "ldr d22, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q22_OFFSET) "]" "\n" + "ldr d23, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q23_OFFSET) "]" "\n" + "ldr d24, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q24_OFFSET) "]" "\n" + "ldr d25, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q25_OFFSET) "]" "\n" + "ldr d26, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q26_OFFSET) "]" "\n" + "ldr d27, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q27_OFFSET) "]" "\n" + "ldr d28, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q28_OFFSET) "]" "\n" + "ldr d29, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q29_OFFSET) "]" "\n" + "ldr d30, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q30_OFFSET) "]" "\n" + "ldr d31, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q31_OFFSET) "]" "\n" + + "ldr x0, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X0_OFFSET) "]" "\n" + "ldr x1, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X1_OFFSET) "]" "\n" + "ldr x2, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X2_OFFSET) "]" "\n" + "ldr x3, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X3_OFFSET) "]" "\n" + "ldr x4, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X4_OFFSET) "]" "\n" + "ldr x5, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X5_OFFSET) "]" "\n" + "ldr x6, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X6_OFFSET) "]" "\n" + "ldr x7, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X7_OFFSET) "]" "\n" + "ldr x8, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X8_OFFSET) "]" "\n" + "ldr x9, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X9_OFFSET) "]" "\n" + "ldr x10, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X10_OFFSET) "]" "\n" + "ldr x11, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X11_OFFSET) "]" "\n" + "ldr x12, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X12_OFFSET) "]" "\n" + "ldr x13, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X13_OFFSET) "]" "\n" + "ldr x14, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X14_OFFSET) "]" "\n" + "ldr x15, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X15_OFFSET) "]" "\n" + "ldr x16, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X16_OFFSET) "]" "\n" + "ldr x17, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X17_OFFSET) "]" "\n" + // x18 should not be modified by the probe. See comment above for details. + "ldr x19, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X19_OFFSET) "]" "\n" + "ldr x20, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X20_OFFSET) "]" "\n" + "ldr x21, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X21_OFFSET) "]" "\n" + "ldr x22, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X22_OFFSET) "]" "\n" + "ldr x23, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X23_OFFSET) "]" "\n" + "ldr x24, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X24_OFFSET) "]" "\n" + "ldr x25, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X25_OFFSET) "]" "\n" + "ldr x26, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X26_OFFSET) "]" "\n" + + "ldr x27, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_FPSR_OFFSET) "]" "\n" + "msr fpsr, x27" "\n" + + "ldr x27, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_NZCV_OFFSET) "]" "\n" + "msr nzcv, x27" "\n" + "ldr fp, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_FP_OFFSET) "]" "\n" + + "ldr x27, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X27_OFFSET) "]" "\n" + "ldr x28, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X28_OFFSET) "]" "\n" + + // There are 4 more registers left to restore: x27, x28, lr, sp, and pc. + // The JIT code's lr and sp will be restored by the caller. + + // Restore pc by loading it into lr. The ret below will put in the pc. + "ldr lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n" + + // We need x27 as a scratch register to help with popping the ProbeContext. + // Hence, before we pop the ProbeContext, we need to copy the restore value + // for x27 from the ProbeContext to the caller save buffer. + "ldr x28, [sp, #" STRINGIZE_VALUE_OF(SAVED_CALLER_SP) "]" "\n" + "ldr x27, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X27_OFFSET) "]" "\n" + "str x27, [x28, #4 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" + + // Since lr is also restored by the caller, we need to copy its restore + // value to the caller save buffer too. + "ldr x27, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n" + "str x27, [x28, #6 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" + + // We're now done with x28, and can restore its value. + "ldr x28, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X28_OFFSET) "]" "\n" + + // We're now done with the ProbeContext, and can pop it to restore sp so that + // it points to the caller save buffer. + "ldr x27, [sp, #" STRINGIZE_VALUE_OF(SAVED_CALLER_SP) "]" "\n" + "mov sp, x27" "\n" + + // We're now done with x27, and can restore it. + "ldr x27, [sp, #4 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" + + "ret" "\n" +); +#endif // COMPILER(GCC_OR_CLANG) + +static void arm64ProbeTrampoline(MacroAssemblerARM64::ProbeContext* context) +{ + void* origSP = context->cpu.sp; + void* origPC = context->cpu.pc; + + context->probeFunction(context); + + if (context->cpu.sp != origSP) { + dataLog("MacroAssembler probe ERROR: ARM64 does not support the probe changing the SP. The change will be ignored\n"); + context->cpu.sp = origSP; + } + if (context->cpu.pc != origPC) { + dataLog("MacroAssembler probe ERROR: ARM64 does not support the probe changing the PC. The change will be ignored\n"); + context->cpu.pc = origPC; + } +} + +void MacroAssemblerARM64::probe(MacroAssemblerARM64::ProbeFunction function, void* arg1, void* arg2) +{ + sub64(TrustedImm32(8 * 8), sp); + + store64(x27, Address(sp, 4 * 8)); + store64(x28, Address(sp, 5 * 8)); + store64(lr, Address(sp, 6 * 8)); + + add64(TrustedImm32(8 * 8), sp, x28); + store64(x28, Address(sp, 7 * 8)); // Save original sp value. + + move(TrustedImmPtr(reinterpret_cast<void*>(function)), x28); + store64(x28, Address(sp)); + move(TrustedImmPtr(arg1), x28); + store64(x28, Address(sp, 1 * 8)); + move(TrustedImmPtr(arg2), x28); + store64(x28, Address(sp, 2 * 8)); + move(TrustedImmPtr(reinterpret_cast<void*>(arm64ProbeTrampoline)), x28); + store64(x28, Address(sp, 3 * 8)); + + move(TrustedImmPtr(reinterpret_cast<void*>(ctiMasmProbeTrampoline)), x28); + m_assembler.blr(x28); + + // ctiMasmProbeTrampoline should have restored every register except for + // lr and the sp. + load64(Address(sp, 6 * 8), lr); + add64(TrustedImm32(8 * 8), sp); +} +#endif // ENABLE(MASM_PROBE) + +} // namespace JSC + +#endif // ENABLE(ASSEMBLER) && CPU(ARM64) + diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerARM64.h b/Source/JavaScriptCore/assembler/MacroAssemblerARM64.h new file mode 100644 index 000000000..42ac400fc --- /dev/null +++ b/Source/JavaScriptCore/assembler/MacroAssemblerARM64.h @@ -0,0 +1,3679 @@ +/* + * Copyright (C) 2012, 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef MacroAssemblerARM64_h +#define MacroAssemblerARM64_h + +#if ENABLE(ASSEMBLER) + +#include "ARM64Assembler.h" +#include "AbstractMacroAssembler.h" +#include <wtf/MathExtras.h> +#include <wtf/Optional.h> + +namespace JSC { + +class MacroAssemblerARM64 : public AbstractMacroAssembler<ARM64Assembler, MacroAssemblerARM64> { +public: + static const RegisterID dataTempRegister = ARM64Registers::ip0; + static const RegisterID memoryTempRegister = ARM64Registers::ip1; + + RegisterID scratchRegister() + { + RELEASE_ASSERT(m_allowScratchRegister); + return getCachedDataTempRegisterIDAndInvalidate(); + } + +private: + static const ARM64Registers::FPRegisterID fpTempRegister = ARM64Registers::q31; + static const ARM64Assembler::SetFlags S = ARM64Assembler::S; + static const intptr_t maskHalfWord0 = 0xffffl; + static const intptr_t maskHalfWord1 = 0xffff0000l; + static const intptr_t maskUpperWord = 0xffffffff00000000l; + + // 4 instructions - 3 to load the function pointer, + blr. + static const ptrdiff_t REPATCH_OFFSET_CALL_TO_POINTER = -16; + +public: + MacroAssemblerARM64() + : m_dataMemoryTempRegister(this, dataTempRegister) + , m_cachedMemoryTempRegister(this, memoryTempRegister) + , m_makeJumpPatchable(false) + { + } + + typedef ARM64Assembler::LinkRecord LinkRecord; + typedef ARM64Assembler::JumpType JumpType; + typedef ARM64Assembler::JumpLinkType JumpLinkType; + typedef ARM64Assembler::Condition Condition; + + static const ARM64Assembler::Condition DefaultCondition = ARM64Assembler::ConditionInvalid; + static const ARM64Assembler::JumpType DefaultJump = ARM64Assembler::JumpNoConditionFixedSize; + + Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink() { return m_assembler.jumpsToLink(); } + void* unlinkedCode() { return m_assembler.unlinkedCode(); } + static bool canCompact(JumpType jumpType) { return ARM64Assembler::canCompact(jumpType); } + static JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return ARM64Assembler::computeJumpType(jumpType, from, to); } + static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return ARM64Assembler::computeJumpType(record, from, to); } + static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return ARM64Assembler::jumpSizeDelta(jumpType, jumpLinkType); } + static void link(LinkRecord& record, uint8_t* from, uint8_t* to) { return ARM64Assembler::link(record, from, to); } + + static const Scale ScalePtr = TimesEight; + + static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value) + { + // This is the largest 32-bit access allowed, aligned to 64-bit boundary. + return !(value & ~0x3ff8); + } + + enum RelationalCondition { + Equal = ARM64Assembler::ConditionEQ, + NotEqual = ARM64Assembler::ConditionNE, + Above = ARM64Assembler::ConditionHI, + AboveOrEqual = ARM64Assembler::ConditionHS, + Below = ARM64Assembler::ConditionLO, + BelowOrEqual = ARM64Assembler::ConditionLS, + GreaterThan = ARM64Assembler::ConditionGT, + GreaterThanOrEqual = ARM64Assembler::ConditionGE, + LessThan = ARM64Assembler::ConditionLT, + LessThanOrEqual = ARM64Assembler::ConditionLE + }; + + enum ResultCondition { + Overflow = ARM64Assembler::ConditionVS, + Signed = ARM64Assembler::ConditionMI, + PositiveOrZero = ARM64Assembler::ConditionPL, + Zero = ARM64Assembler::ConditionEQ, + NonZero = ARM64Assembler::ConditionNE + }; + + enum ZeroCondition { + IsZero = ARM64Assembler::ConditionEQ, + IsNonZero = ARM64Assembler::ConditionNE + }; + + enum DoubleCondition { + // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN. + DoubleEqual = ARM64Assembler::ConditionEQ, + DoubleNotEqual = ARM64Assembler::ConditionVC, // Not the right flag! check for this & handle differently. + DoubleGreaterThan = ARM64Assembler::ConditionGT, + DoubleGreaterThanOrEqual = ARM64Assembler::ConditionGE, + DoubleLessThan = ARM64Assembler::ConditionLO, + DoubleLessThanOrEqual = ARM64Assembler::ConditionLS, + // If either operand is NaN, these conditions always evaluate to true. + DoubleEqualOrUnordered = ARM64Assembler::ConditionVS, // Not the right flag! check for this & handle differently. + DoubleNotEqualOrUnordered = ARM64Assembler::ConditionNE, + DoubleGreaterThanOrUnordered = ARM64Assembler::ConditionHI, + DoubleGreaterThanOrEqualOrUnordered = ARM64Assembler::ConditionHS, + DoubleLessThanOrUnordered = ARM64Assembler::ConditionLT, + DoubleLessThanOrEqualOrUnordered = ARM64Assembler::ConditionLE, + }; + + static const RegisterID stackPointerRegister = ARM64Registers::sp; + static const RegisterID framePointerRegister = ARM64Registers::fp; + static const RegisterID linkRegister = ARM64Registers::lr; + + // FIXME: Get reasonable implementations for these + static bool shouldBlindForSpecificArch(uint32_t value) { return value >= 0x00ffffff; } + static bool shouldBlindForSpecificArch(uint64_t value) { return value >= 0x00ffffff; } + + // Integer operations: + + void add32(RegisterID a, RegisterID b, RegisterID dest) + { + ASSERT(a != ARM64Registers::sp && b != ARM64Registers::sp); + m_assembler.add<32>(dest, a, b); + } + + void add32(RegisterID src, RegisterID dest) + { + m_assembler.add<32>(dest, dest, src); + } + + void add32(TrustedImm32 imm, RegisterID dest) + { + add32(imm, dest, dest); + } + + void add32(TrustedImm32 imm, RegisterID src, RegisterID dest) + { + if (isUInt12(imm.m_value)) + m_assembler.add<32>(dest, src, UInt12(imm.m_value)); + else if (isUInt12(-imm.m_value)) + m_assembler.sub<32>(dest, src, UInt12(-imm.m_value)); + else { + move(imm, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.add<32>(dest, src, dataTempRegister); + } + } + + void add32(TrustedImm32 imm, Address address) + { + load32(address, getCachedDataTempRegisterIDAndInvalidate()); + + if (isUInt12(imm.m_value)) + m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value)); + else if (isUInt12(-imm.m_value)) + m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value)); + else { + move(imm, getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.add<32>(dataTempRegister, dataTempRegister, memoryTempRegister); + } + + store32(dataTempRegister, address); + } + + void add32(TrustedImm32 imm, AbsoluteAddress address) + { + load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate()); + + if (isUInt12(imm.m_value)) { + m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value)); + store32(dataTempRegister, address.m_ptr); + return; + } + + if (isUInt12(-imm.m_value)) { + m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value)); + store32(dataTempRegister, address.m_ptr); + return; + } + + move(imm, getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.add<32>(dataTempRegister, dataTempRegister, memoryTempRegister); + store32(dataTempRegister, address.m_ptr); + } + + void add32(Address src, RegisterID dest) + { + load32(src, getCachedDataTempRegisterIDAndInvalidate()); + add32(dataTempRegister, dest); + } + + void add64(RegisterID a, RegisterID b, RegisterID dest) + { + ASSERT(a != ARM64Registers::sp || b != ARM64Registers::sp); + if (b == ARM64Registers::sp) + std::swap(a, b); + m_assembler.add<64>(dest, a, b); + } + + void add64(RegisterID src, RegisterID dest) + { + if (src == ARM64Registers::sp) + m_assembler.add<64>(dest, src, dest); + else + m_assembler.add<64>(dest, dest, src); + } + + void add64(TrustedImm32 imm, RegisterID dest) + { + if (isUInt12(imm.m_value)) { + m_assembler.add<64>(dest, dest, UInt12(imm.m_value)); + return; + } + if (isUInt12(-imm.m_value)) { + m_assembler.sub<64>(dest, dest, UInt12(-imm.m_value)); + return; + } + + signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.add<64>(dest, dest, dataTempRegister); + } + + void add64(TrustedImm64 imm, RegisterID dest) + { + intptr_t immediate = imm.m_value; + + if (isUInt12(immediate)) { + m_assembler.add<64>(dest, dest, UInt12(static_cast<int32_t>(immediate))); + return; + } + if (isUInt12(-immediate)) { + m_assembler.sub<64>(dest, dest, UInt12(static_cast<int32_t>(-immediate))); + return; + } + + move(imm, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.add<64>(dest, dest, dataTempRegister); + } + + void add64(TrustedImm32 imm, RegisterID src, RegisterID dest) + { + if (isUInt12(imm.m_value)) { + m_assembler.add<64>(dest, src, UInt12(imm.m_value)); + return; + } + if (isUInt12(-imm.m_value)) { + m_assembler.sub<64>(dest, src, UInt12(-imm.m_value)); + return; + } + + signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.add<64>(dest, src, dataTempRegister); + } + + void add64(TrustedImm32 imm, Address address) + { + load64(address, getCachedDataTempRegisterIDAndInvalidate()); + + if (isUInt12(imm.m_value)) + m_assembler.add<64>(dataTempRegister, dataTempRegister, UInt12(imm.m_value)); + else if (isUInt12(-imm.m_value)) + m_assembler.sub<64>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value)); + else { + signExtend32ToPtr(imm, getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.add<64>(dataTempRegister, dataTempRegister, memoryTempRegister); + } + + store64(dataTempRegister, address); + } + + void add64(TrustedImm32 imm, AbsoluteAddress address) + { + load64(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate()); + + if (isUInt12(imm.m_value)) { + m_assembler.add<64>(dataTempRegister, dataTempRegister, UInt12(imm.m_value)); + store64(dataTempRegister, address.m_ptr); + return; + } + + if (isUInt12(-imm.m_value)) { + m_assembler.sub<64>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value)); + store64(dataTempRegister, address.m_ptr); + return; + } + + signExtend32ToPtr(imm, getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.add<64>(dataTempRegister, dataTempRegister, memoryTempRegister); + store64(dataTempRegister, address.m_ptr); + } + + void addPtrNoFlags(TrustedImm32 imm, RegisterID srcDest) + { + add64(imm, srcDest); + } + + void add64(Address src, RegisterID dest) + { + load64(src, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.add<64>(dest, dest, dataTempRegister); + } + + void add64(AbsoluteAddress src, RegisterID dest) + { + load64(src.m_ptr, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.add<64>(dest, dest, dataTempRegister); + } + + void and32(RegisterID src, RegisterID dest) + { + and32(dest, src, dest); + } + + void and32(RegisterID op1, RegisterID op2, RegisterID dest) + { + m_assembler.and_<32>(dest, op1, op2); + } + + void and32(TrustedImm32 imm, RegisterID dest) + { + and32(imm, dest, dest); + } + + void and32(TrustedImm32 imm, RegisterID src, RegisterID dest) + { + LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value); + + if (logicalImm.isValid()) { + m_assembler.and_<32>(dest, src, logicalImm); + return; + } + + move(imm, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.and_<32>(dest, src, dataTempRegister); + } + + void and32(Address src, RegisterID dest) + { + load32(src, dataTempRegister); + and32(dataTempRegister, dest); + } + + void and64(RegisterID src1, RegisterID src2, RegisterID dest) + { + m_assembler.and_<64>(dest, src1, src2); + } + + void and64(TrustedImm64 imm, RegisterID src, RegisterID dest) + { + LogicalImmediate logicalImm = LogicalImmediate::create64(imm.m_value); + + if (logicalImm.isValid()) { + m_assembler.and_<64>(dest, src, logicalImm); + return; + } + + move(imm, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.and_<64>(dest, src, dataTempRegister); + } + + void and64(RegisterID src, RegisterID dest) + { + m_assembler.and_<64>(dest, dest, src); + } + + void and64(TrustedImm32 imm, RegisterID dest) + { + LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value))); + + if (logicalImm.isValid()) { + m_assembler.and_<64>(dest, dest, logicalImm); + return; + } + + signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.and_<64>(dest, dest, dataTempRegister); + } + + void and64(TrustedImmPtr imm, RegisterID dest) + { + LogicalImmediate logicalImm = LogicalImmediate::create64(reinterpret_cast<uint64_t>(imm.m_value)); + + if (logicalImm.isValid()) { + m_assembler.and_<64>(dest, dest, logicalImm); + return; + } + + move(imm, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.and_<64>(dest, dest, dataTempRegister); + } + + void countLeadingZeros32(RegisterID src, RegisterID dest) + { + m_assembler.clz<32>(dest, src); + } + + void countLeadingZeros64(RegisterID src, RegisterID dest) + { + m_assembler.clz<64>(dest, src); + } + + void lshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest) + { + m_assembler.lsl<32>(dest, src, shiftAmount); + } + + void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest) + { + m_assembler.lsl<32>(dest, src, imm.m_value & 0x1f); + } + + void lshift32(RegisterID shiftAmount, RegisterID dest) + { + lshift32(dest, shiftAmount, dest); + } + + void lshift32(TrustedImm32 imm, RegisterID dest) + { + lshift32(dest, imm, dest); + } + + void lshift64(RegisterID src, RegisterID shiftAmount, RegisterID dest) + { + m_assembler.lsl<64>(dest, src, shiftAmount); + } + + void lshift64(RegisterID src, TrustedImm32 imm, RegisterID dest) + { + m_assembler.lsl<64>(dest, src, imm.m_value & 0x3f); + } + + void lshift64(RegisterID shiftAmount, RegisterID dest) + { + lshift64(dest, shiftAmount, dest); + } + + void lshift64(TrustedImm32 imm, RegisterID dest) + { + lshift64(dest, imm, dest); + } + + void mul32(RegisterID left, RegisterID right, RegisterID dest) + { + m_assembler.mul<32>(dest, left, right); + } + + void mul32(RegisterID src, RegisterID dest) + { + m_assembler.mul<32>(dest, dest, src); + } + + void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest) + { + move(imm, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.mul<32>(dest, src, dataTempRegister); + } + + void mul64(RegisterID src, RegisterID dest) + { + m_assembler.mul<64>(dest, dest, src); + } + + void mul64(RegisterID left, RegisterID right, RegisterID dest) + { + m_assembler.mul<64>(dest, left, right); + } + + void multiplyAdd32(RegisterID mulLeft, RegisterID mulRight, RegisterID summand, RegisterID dest) + { + m_assembler.madd<32>(dest, mulLeft, mulRight, summand); + } + + void multiplySub32(RegisterID mulLeft, RegisterID mulRight, RegisterID minuend, RegisterID dest) + { + m_assembler.msub<32>(dest, mulLeft, mulRight, minuend); + } + + void multiplyNeg32(RegisterID mulLeft, RegisterID mulRight, RegisterID dest) + { + m_assembler.msub<32>(dest, mulLeft, mulRight, ARM64Registers::zr); + } + + void multiplyAdd64(RegisterID mulLeft, RegisterID mulRight, RegisterID summand, RegisterID dest) + { + m_assembler.madd<64>(dest, mulLeft, mulRight, summand); + } + + void multiplySub64(RegisterID mulLeft, RegisterID mulRight, RegisterID minuend, RegisterID dest) + { + m_assembler.msub<64>(dest, mulLeft, mulRight, minuend); + } + + void multiplyNeg64(RegisterID mulLeft, RegisterID mulRight, RegisterID dest) + { + m_assembler.msub<64>(dest, mulLeft, mulRight, ARM64Registers::zr); + } + + void div32(RegisterID dividend, RegisterID divisor, RegisterID dest) + { + m_assembler.sdiv<32>(dest, dividend, divisor); + } + + void div64(RegisterID dividend, RegisterID divisor, RegisterID dest) + { + m_assembler.sdiv<64>(dest, dividend, divisor); + } + + void neg32(RegisterID dest) + { + m_assembler.neg<32>(dest, dest); + } + + void neg64(RegisterID dest) + { + m_assembler.neg<64>(dest, dest); + } + + void or32(RegisterID src, RegisterID dest) + { + or32(dest, src, dest); + } + + void or32(RegisterID op1, RegisterID op2, RegisterID dest) + { + m_assembler.orr<32>(dest, op1, op2); + } + + void or32(TrustedImm32 imm, RegisterID dest) + { + or32(imm, dest, dest); + } + + void or32(TrustedImm32 imm, RegisterID src, RegisterID dest) + { + LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value); + + if (logicalImm.isValid()) { + m_assembler.orr<32>(dest, src, logicalImm); + return; + } + + ASSERT(src != dataTempRegister); + move(imm, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.orr<32>(dest, src, dataTempRegister); + } + + void or32(RegisterID src, AbsoluteAddress address) + { + load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.orr<32>(dataTempRegister, dataTempRegister, src); + store32(dataTempRegister, address.m_ptr); + } + + void or32(TrustedImm32 imm, AbsoluteAddress address) + { + LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value); + if (logicalImm.isValid()) { + load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.orr<32>(dataTempRegister, dataTempRegister, logicalImm); + store32(dataTempRegister, address.m_ptr); + } else { + load32(address.m_ptr, getCachedMemoryTempRegisterIDAndInvalidate()); + or32(imm, memoryTempRegister, getCachedDataTempRegisterIDAndInvalidate()); + store32(dataTempRegister, address.m_ptr); + } + } + + void or32(TrustedImm32 imm, Address address) + { + load32(address, getCachedDataTempRegisterIDAndInvalidate()); + or32(imm, dataTempRegister, dataTempRegister); + store32(dataTempRegister, address); + } + + void or64(RegisterID src, RegisterID dest) + { + or64(dest, src, dest); + } + + void or64(RegisterID op1, RegisterID op2, RegisterID dest) + { + m_assembler.orr<64>(dest, op1, op2); + } + + void or64(TrustedImm32 imm, RegisterID dest) + { + or64(imm, dest, dest); + } + + void or64(TrustedImm32 imm, RegisterID src, RegisterID dest) + { + LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value))); + + if (logicalImm.isValid()) { + m_assembler.orr<64>(dest, src, logicalImm); + return; + } + + signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.orr<64>(dest, src, dataTempRegister); + } + + void or64(TrustedImm64 imm, RegisterID src, RegisterID dest) + { + LogicalImmediate logicalImm = LogicalImmediate::create64(imm.m_value); + + if (logicalImm.isValid()) { + m_assembler.orr<64>(dest, src, logicalImm); + return; + } + + move(imm, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.orr<64>(dest, src, dataTempRegister); + } + + void or64(TrustedImm64 imm, RegisterID dest) + { + LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value))); + + if (logicalImm.isValid()) { + m_assembler.orr<64>(dest, dest, logicalImm); + return; + } + + move(imm, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.orr<64>(dest, dest, dataTempRegister); + } + + void rotateRight64(TrustedImm32 imm, RegisterID srcDst) + { + m_assembler.ror<64>(srcDst, srcDst, imm.m_value & 63); + } + + void rshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest) + { + m_assembler.asr<32>(dest, src, shiftAmount); + } + + void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest) + { + m_assembler.asr<32>(dest, src, imm.m_value & 0x1f); + } + + void rshift32(RegisterID shiftAmount, RegisterID dest) + { + rshift32(dest, shiftAmount, dest); + } + + void rshift32(TrustedImm32 imm, RegisterID dest) + { + rshift32(dest, imm, dest); + } + + void rshift64(RegisterID src, RegisterID shiftAmount, RegisterID dest) + { + m_assembler.asr<64>(dest, src, shiftAmount); + } + + void rshift64(RegisterID src, TrustedImm32 imm, RegisterID dest) + { + m_assembler.asr<64>(dest, src, imm.m_value & 0x3f); + } + + void rshift64(RegisterID shiftAmount, RegisterID dest) + { + rshift64(dest, shiftAmount, dest); + } + + void rshift64(TrustedImm32 imm, RegisterID dest) + { + rshift64(dest, imm, dest); + } + + void sub32(RegisterID src, RegisterID dest) + { + m_assembler.sub<32>(dest, dest, src); + } + + void sub32(TrustedImm32 imm, RegisterID dest) + { + if (isUInt12(imm.m_value)) { + m_assembler.sub<32>(dest, dest, UInt12(imm.m_value)); + return; + } + if (isUInt12(-imm.m_value)) { + m_assembler.add<32>(dest, dest, UInt12(-imm.m_value)); + return; + } + + move(imm, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.sub<32>(dest, dest, dataTempRegister); + } + + void sub32(TrustedImm32 imm, Address address) + { + load32(address, getCachedDataTempRegisterIDAndInvalidate()); + + if (isUInt12(imm.m_value)) + m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value)); + else if (isUInt12(-imm.m_value)) + m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value)); + else { + move(imm, getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.sub<32>(dataTempRegister, dataTempRegister, memoryTempRegister); + } + + store32(dataTempRegister, address); + } + + void sub32(TrustedImm32 imm, AbsoluteAddress address) + { + load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate()); + + if (isUInt12(imm.m_value)) { + m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value)); + store32(dataTempRegister, address.m_ptr); + return; + } + + if (isUInt12(-imm.m_value)) { + m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value)); + store32(dataTempRegister, address.m_ptr); + return; + } + + move(imm, getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.sub<32>(dataTempRegister, dataTempRegister, memoryTempRegister); + store32(dataTempRegister, address.m_ptr); + } + + void sub32(Address src, RegisterID dest) + { + load32(src, getCachedDataTempRegisterIDAndInvalidate()); + sub32(dataTempRegister, dest); + } + + void sub64(RegisterID src, RegisterID dest) + { + m_assembler.sub<64>(dest, dest, src); + } + + void sub64(TrustedImm32 imm, RegisterID dest) + { + if (isUInt12(imm.m_value)) { + m_assembler.sub<64>(dest, dest, UInt12(imm.m_value)); + return; + } + if (isUInt12(-imm.m_value)) { + m_assembler.add<64>(dest, dest, UInt12(-imm.m_value)); + return; + } + + signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.sub<64>(dest, dest, dataTempRegister); + } + + void sub64(TrustedImm64 imm, RegisterID dest) + { + intptr_t immediate = imm.m_value; + + if (isUInt12(immediate)) { + m_assembler.sub<64>(dest, dest, UInt12(static_cast<int32_t>(immediate))); + return; + } + if (isUInt12(-immediate)) { + m_assembler.add<64>(dest, dest, UInt12(static_cast<int32_t>(-immediate))); + return; + } + + move(imm, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.sub<64>(dest, dest, dataTempRegister); + } + + void urshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest) + { + m_assembler.lsr<32>(dest, src, shiftAmount); + } + + void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest) + { + m_assembler.lsr<32>(dest, src, imm.m_value & 0x1f); + } + + void urshift32(RegisterID shiftAmount, RegisterID dest) + { + urshift32(dest, shiftAmount, dest); + } + + void urshift32(TrustedImm32 imm, RegisterID dest) + { + urshift32(dest, imm, dest); + } + + void urshift64(RegisterID src, RegisterID shiftAmount, RegisterID dest) + { + m_assembler.lsr<64>(dest, src, shiftAmount); + } + + void urshift64(RegisterID src, TrustedImm32 imm, RegisterID dest) + { + m_assembler.lsr<64>(dest, src, imm.m_value & 0x3f); + } + + void urshift64(RegisterID shiftAmount, RegisterID dest) + { + urshift64(dest, shiftAmount, dest); + } + + void urshift64(TrustedImm32 imm, RegisterID dest) + { + urshift64(dest, imm, dest); + } + + void xor32(RegisterID src, RegisterID dest) + { + xor32(dest, src, dest); + } + + void xor32(RegisterID op1, RegisterID op2, RegisterID dest) + { + m_assembler.eor<32>(dest, op1, op2); + } + + void xor32(TrustedImm32 imm, RegisterID dest) + { + xor32(imm, dest, dest); + } + + void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest) + { + if (imm.m_value == -1) + m_assembler.mvn<32>(dest, src); + else { + LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value); + + if (logicalImm.isValid()) { + m_assembler.eor<32>(dest, src, logicalImm); + return; + } + + move(imm, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.eor<32>(dest, src, dataTempRegister); + } + } + + void xor64(RegisterID src, Address address) + { + load64(address, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.eor<64>(dataTempRegister, dataTempRegister, src); + store64(dataTempRegister, address); + } + + void xor64(RegisterID src, RegisterID dest) + { + xor64(dest, src, dest); + } + + void xor64(RegisterID op1, RegisterID op2, RegisterID dest) + { + m_assembler.eor<64>(dest, op1, op2); + } + + void xor64(TrustedImm32 imm, RegisterID dest) + { + xor64(imm, dest, dest); + } + + void xor64(TrustedImm64 imm, RegisterID src, RegisterID dest) + { + if (imm.m_value == -1) + m_assembler.mvn<64>(dest, src); + else { + LogicalImmediate logicalImm = LogicalImmediate::create64(imm.m_value); + + if (logicalImm.isValid()) { + m_assembler.eor<64>(dest, src, logicalImm); + return; + } + + move(imm, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.eor<64>(dest, src, dataTempRegister); + } + } + + void xor64(TrustedImm32 imm, RegisterID src, RegisterID dest) + { + if (imm.m_value == -1) + m_assembler.mvn<64>(dest, src); + else { + LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value))); + + if (logicalImm.isValid()) { + m_assembler.eor<64>(dest, src, logicalImm); + return; + } + + signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.eor<64>(dest, src, dataTempRegister); + } + } + + void not32(RegisterID src, RegisterID dest) + { + m_assembler.mvn<32>(dest, src); + } + + void not64(RegisterID src, RegisterID dest) + { + m_assembler.mvn<64>(dest, src); + } + + // Memory access operations: + + void load64(ImplicitAddress address, RegisterID dest) + { + if (tryLoadWithOffset<64>(dest, address.base, address.offset)) + return; + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.ldr<64>(dest, address.base, memoryTempRegister); + } + + void load64(BaseIndex address, RegisterID dest) + { + if (!address.offset && (!address.scale || address.scale == 3)) { + m_assembler.ldr<64>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale); + return; + } + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale); + m_assembler.ldr<64>(dest, address.base, memoryTempRegister); + } + + void load64(const void* address, RegisterID dest) + { + load<64>(address, dest); + } + + DataLabel32 load64WithAddressOffsetPatch(Address address, RegisterID dest) + { + DataLabel32 label(this); + signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.ldr<64>(dest, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0); + return label; + } + + DataLabelCompact load64WithCompactAddressOffsetPatch(Address address, RegisterID dest) + { + ASSERT(isCompactPtrAlignedAddressOffset(address.offset)); + DataLabelCompact label(this); + m_assembler.ldr<64>(dest, address.base, address.offset); + return label; + } + + void abortWithReason(AbortReason reason) + { + move(TrustedImm32(reason), dataTempRegister); + breakpoint(); + } + + void abortWithReason(AbortReason reason, intptr_t misc) + { + move(TrustedImm64(misc), memoryTempRegister); + abortWithReason(reason); + } + + ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest) + { + ConvertibleLoadLabel result(this); + ASSERT(!(address.offset & ~0xff8)); + m_assembler.ldr<64>(dest, address.base, address.offset); + return result; + } + + void load32(ImplicitAddress address, RegisterID dest) + { + if (tryLoadWithOffset<32>(dest, address.base, address.offset)) + return; + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.ldr<32>(dest, address.base, memoryTempRegister); + } + + void load32(BaseIndex address, RegisterID dest) + { + if (!address.offset && (!address.scale || address.scale == 2)) { + m_assembler.ldr<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale); + return; + } + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale); + m_assembler.ldr<32>(dest, address.base, memoryTempRegister); + } + + void load32(const void* address, RegisterID dest) + { + load<32>(address, dest); + } + + DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest) + { + DataLabel32 label(this); + signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.ldr<32>(dest, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0); + return label; + } + + DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest) + { + ASSERT(isCompactPtrAlignedAddressOffset(address.offset)); + DataLabelCompact label(this); + m_assembler.ldr<32>(dest, address.base, address.offset); + return label; + } + + void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest) + { + load32(address, dest); + } + + void load16(ImplicitAddress address, RegisterID dest) + { + if (tryLoadWithOffset<16>(dest, address.base, address.offset)) + return; + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.ldrh(dest, address.base, memoryTempRegister); + } + + void load16(BaseIndex address, RegisterID dest) + { + if (!address.offset && (!address.scale || address.scale == 1)) { + m_assembler.ldrh(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale); + return; + } + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale); + m_assembler.ldrh(dest, address.base, memoryTempRegister); + } + + void load16Unaligned(BaseIndex address, RegisterID dest) + { + load16(address, dest); + } + + void load16SignedExtendTo32(ImplicitAddress address, RegisterID dest) + { + if (tryLoadSignedWithOffset<16>(dest, address.base, address.offset)) + return; + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.ldrsh<32>(dest, address.base, memoryTempRegister); + } + + void load16SignedExtendTo32(BaseIndex address, RegisterID dest) + { + if (!address.offset && (!address.scale || address.scale == 1)) { + m_assembler.ldrsh<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale); + return; + } + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale); + m_assembler.ldrsh<32>(dest, address.base, memoryTempRegister); + } + + void zeroExtend16To32(RegisterID src, RegisterID dest) + { + m_assembler.uxth<64>(dest, src); + } + + void signExtend16To32(RegisterID src, RegisterID dest) + { + m_assembler.sxth<64>(dest, src); + } + + void load8(ImplicitAddress address, RegisterID dest) + { + if (tryLoadWithOffset<8>(dest, address.base, address.offset)) + return; + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.ldrb(dest, address.base, memoryTempRegister); + } + + void load8(BaseIndex address, RegisterID dest) + { + if (!address.offset && !address.scale) { + m_assembler.ldrb(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale); + return; + } + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale); + m_assembler.ldrb(dest, address.base, memoryTempRegister); + } + + void load8(const void* address, RegisterID dest) + { + moveToCachedReg(TrustedImmPtr(address), cachedMemoryTempRegister()); + m_assembler.ldrb(dest, memoryTempRegister, ARM64Registers::zr); + if (dest == memoryTempRegister) + cachedMemoryTempRegister().invalidate(); + } + + void load8SignedExtendTo32(ImplicitAddress address, RegisterID dest) + { + if (tryLoadSignedWithOffset<8>(dest, address.base, address.offset)) + return; + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.ldrsb<32>(dest, address.base, memoryTempRegister); + } + + void load8SignedExtendTo32(BaseIndex address, RegisterID dest) + { + if (!address.offset && !address.scale) { + m_assembler.ldrsb<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale); + return; + } + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale); + m_assembler.ldrsb<32>(dest, address.base, memoryTempRegister); + } + + void zeroExtend8To32(RegisterID src, RegisterID dest) + { + m_assembler.uxtb<64>(dest, src); + } + + void signExtend8To32(RegisterID src, RegisterID dest) + { + m_assembler.sxtb<64>(dest, src); + } + + void store64(RegisterID src, ImplicitAddress address) + { + if (tryStoreWithOffset<64>(src, address.base, address.offset)) + return; + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.str<64>(src, address.base, memoryTempRegister); + } + + void store64(RegisterID src, BaseIndex address) + { + if (!address.offset && (!address.scale || address.scale == 3)) { + m_assembler.str<64>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale); + return; + } + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale); + m_assembler.str<64>(src, address.base, memoryTempRegister); + } + + void store64(RegisterID src, const void* address) + { + store<64>(src, address); + } + + void store64(TrustedImm32 imm, ImplicitAddress address) + { + store64(TrustedImm64(imm.m_value), address); + } + + void store64(TrustedImm64 imm, ImplicitAddress address) + { + if (!imm.m_value) { + store64(ARM64Registers::zr, address); + return; + } + + moveToCachedReg(imm, dataMemoryTempRegister()); + store64(dataTempRegister, address); + } + + void store64(TrustedImm64 imm, BaseIndex address) + { + if (!imm.m_value) { + store64(ARM64Registers::zr, address); + return; + } + + moveToCachedReg(imm, dataMemoryTempRegister()); + store64(dataTempRegister, address); + } + + DataLabel32 store64WithAddressOffsetPatch(RegisterID src, Address address) + { + DataLabel32 label(this); + signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.str<64>(src, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0); + return label; + } + + void store32(RegisterID src, ImplicitAddress address) + { + if (tryStoreWithOffset<32>(src, address.base, address.offset)) + return; + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.str<32>(src, address.base, memoryTempRegister); + } + + void store32(RegisterID src, BaseIndex address) + { + if (!address.offset && (!address.scale || address.scale == 2)) { + m_assembler.str<32>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale); + return; + } + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale); + m_assembler.str<32>(src, address.base, memoryTempRegister); + } + + void store32(RegisterID src, const void* address) + { + store<32>(src, address); + } + + void store32(TrustedImm32 imm, ImplicitAddress address) + { + if (!imm.m_value) { + store32(ARM64Registers::zr, address); + return; + } + + moveToCachedReg(imm, dataMemoryTempRegister()); + store32(dataTempRegister, address); + } + + void store32(TrustedImm32 imm, BaseIndex address) + { + if (!imm.m_value) { + store32(ARM64Registers::zr, address); + return; + } + + moveToCachedReg(imm, dataMemoryTempRegister()); + store32(dataTempRegister, address); + } + + void store32(TrustedImm32 imm, const void* address) + { + if (!imm.m_value) { + store32(ARM64Registers::zr, address); + return; + } + + moveToCachedReg(imm, dataMemoryTempRegister()); + store32(dataTempRegister, address); + } + + void storeZero32(ImplicitAddress address) + { + store32(ARM64Registers::zr, address); + } + + void storeZero32(BaseIndex address) + { + store32(ARM64Registers::zr, address); + } + + DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address) + { + DataLabel32 label(this); + signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.str<32>(src, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0); + return label; + } + + void store16(RegisterID src, ImplicitAddress address) + { + if (tryStoreWithOffset<16>(src, address.base, address.offset)) + return; + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.str<16>(src, address.base, memoryTempRegister); + } + + void store16(RegisterID src, BaseIndex address) + { + if (!address.offset && (!address.scale || address.scale == 1)) { + m_assembler.strh(src, address.base, address.index, ARM64Assembler::UXTX, address.scale); + return; + } + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale); + m_assembler.strh(src, address.base, memoryTempRegister); + } + + void store8(RegisterID src, BaseIndex address) + { + if (!address.offset && !address.scale) { + m_assembler.strb(src, address.base, address.index, ARM64Assembler::UXTX, address.scale); + return; + } + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale); + m_assembler.strb(src, address.base, memoryTempRegister); + } + + void store8(RegisterID src, void* address) + { + move(TrustedImmPtr(address), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.strb(src, memoryTempRegister, 0); + } + + void store8(RegisterID src, ImplicitAddress address) + { + if (tryStoreWithOffset<8>(src, address.base, address.offset)) + return; + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.str<8>(src, address.base, memoryTempRegister); + } + + void store8(TrustedImm32 imm, void* address) + { + if (!imm.m_value) { + store8(ARM64Registers::zr, address); + return; + } + + move(imm, getCachedDataTempRegisterIDAndInvalidate()); + store8(dataTempRegister, address); + } + + void store8(TrustedImm32 imm, ImplicitAddress address) + { + if (!imm.m_value) { + store8(ARM64Registers::zr, address); + return; + } + + move(imm, getCachedDataTempRegisterIDAndInvalidate()); + store8(dataTempRegister, address); + } + + // Floating-point operations: + + static bool supportsFloatingPoint() { return true; } + static bool supportsFloatingPointTruncate() { return true; } + static bool supportsFloatingPointSqrt() { return true; } + static bool supportsFloatingPointAbs() { return true; } + static bool supportsFloatingPointRounding() { return true; } + + enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful }; + + void absDouble(FPRegisterID src, FPRegisterID dest) + { + m_assembler.fabs<64>(dest, src); + } + + void absFloat(FPRegisterID src, FPRegisterID dest) + { + m_assembler.fabs<32>(dest, src); + } + + void addDouble(FPRegisterID src, FPRegisterID dest) + { + addDouble(dest, src, dest); + } + + void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) + { + m_assembler.fadd<64>(dest, op1, op2); + } + + void addDouble(Address src, FPRegisterID dest) + { + loadDouble(src, fpTempRegister); + addDouble(fpTempRegister, dest); + } + + void addDouble(AbsoluteAddress address, FPRegisterID dest) + { + loadDouble(TrustedImmPtr(address.m_ptr), fpTempRegister); + addDouble(fpTempRegister, dest); + } + + void addFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) + { + m_assembler.fadd<32>(dest, op1, op2); + } + + void ceilDouble(FPRegisterID src, FPRegisterID dest) + { + m_assembler.frintp<64>(dest, src); + } + + void ceilFloat(FPRegisterID src, FPRegisterID dest) + { + m_assembler.frintp<32>(dest, src); + } + + void floorDouble(FPRegisterID src, FPRegisterID dest) + { + m_assembler.frintm<64>(dest, src); + } + + void floorFloat(FPRegisterID src, FPRegisterID dest) + { + m_assembler.frintm<32>(dest, src); + } + + // Convert 'src' to an integer, and places the resulting 'dest'. + // If the result is not representable as a 32 bit value, branch. + // May also branch for some values that are representable in 32 bits + // (specifically, in this case, 0). + void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID, bool negZeroCheck = true) + { + m_assembler.fcvtns<32, 64>(dest, src); + + // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump. + m_assembler.scvtf<64, 32>(fpTempRegister, dest); + failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, fpTempRegister)); + + // Test for negative zero. + if (negZeroCheck) { + Jump valueIsNonZero = branchTest32(NonZero, dest); + RegisterID scratch = getCachedMemoryTempRegisterIDAndInvalidate(); + m_assembler.fmov<64>(scratch, src); + failureCases.append(makeTestBitAndBranch(scratch, 63, IsNonZero)); + valueIsNonZero.link(this); + } + } + + Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right) + { + m_assembler.fcmp<64>(left, right); + return jumpAfterFloatingPointCompare(cond); + } + + Jump branchFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right) + { + m_assembler.fcmp<32>(left, right); + return jumpAfterFloatingPointCompare(cond); + } + + Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID) + { + m_assembler.fcmp_0<64>(reg); + Jump unordered = makeBranch(ARM64Assembler::ConditionVS); + Jump result = makeBranch(ARM64Assembler::ConditionNE); + unordered.link(this); + return result; + } + + Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID) + { + m_assembler.fcmp_0<64>(reg); + Jump unordered = makeBranch(ARM64Assembler::ConditionVS); + Jump notEqual = makeBranch(ARM64Assembler::ConditionNE); + unordered.link(this); + // We get here if either unordered or equal. + Jump result = jump(); + notEqual.link(this); + return result; + } + + Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed) + { + // Truncate to a 64-bit integer in dataTempRegister, copy the low 32-bit to dest. + m_assembler.fcvtzs<64, 64>(getCachedDataTempRegisterIDAndInvalidate(), src); + zeroExtend32ToPtr(dataTempRegister, dest); + // Check thlow 32-bits sign extend to be equal to the full value. + m_assembler.cmp<64>(dataTempRegister, dataTempRegister, ARM64Assembler::SXTW, 0); + return Jump(makeBranch(branchType == BranchIfTruncateSuccessful ? Equal : NotEqual)); + } + + void convertDoubleToFloat(FPRegisterID src, FPRegisterID dest) + { + m_assembler.fcvt<32, 64>(dest, src); + } + + void convertFloatToDouble(FPRegisterID src, FPRegisterID dest) + { + m_assembler.fcvt<64, 32>(dest, src); + } + + void convertInt32ToDouble(TrustedImm32 imm, FPRegisterID dest) + { + move(imm, getCachedDataTempRegisterIDAndInvalidate()); + convertInt32ToDouble(dataTempRegister, dest); + } + + void convertInt32ToDouble(RegisterID src, FPRegisterID dest) + { + m_assembler.scvtf<64, 32>(dest, src); + } + + void convertInt32ToDouble(Address address, FPRegisterID dest) + { + load32(address, getCachedDataTempRegisterIDAndInvalidate()); + convertInt32ToDouble(dataTempRegister, dest); + } + + void convertInt32ToDouble(AbsoluteAddress address, FPRegisterID dest) + { + load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate()); + convertInt32ToDouble(dataTempRegister, dest); + } + + void convertInt64ToDouble(RegisterID src, FPRegisterID dest) + { + m_assembler.scvtf<64, 64>(dest, src); + } + + void divDouble(FPRegisterID src, FPRegisterID dest) + { + divDouble(dest, src, dest); + } + + void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) + { + m_assembler.fdiv<64>(dest, op1, op2); + } + + void divFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) + { + m_assembler.fdiv<32>(dest, op1, op2); + } + + void loadDouble(ImplicitAddress address, FPRegisterID dest) + { + if (tryLoadWithOffset<64>(dest, address.base, address.offset)) + return; + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.ldr<64>(dest, address.base, memoryTempRegister); + } + + void loadDouble(BaseIndex address, FPRegisterID dest) + { + if (!address.offset && (!address.scale || address.scale == 3)) { + m_assembler.ldr<64>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale); + return; + } + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale); + m_assembler.ldr<64>(dest, address.base, memoryTempRegister); + } + + void loadDouble(TrustedImmPtr address, FPRegisterID dest) + { + moveToCachedReg(address, cachedMemoryTempRegister()); + m_assembler.ldr<64>(dest, memoryTempRegister, ARM64Registers::zr); + } + + void loadFloat(ImplicitAddress address, FPRegisterID dest) + { + if (tryLoadWithOffset<32>(dest, address.base, address.offset)) + return; + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.ldr<32>(dest, address.base, memoryTempRegister); + } + + void loadFloat(BaseIndex address, FPRegisterID dest) + { + if (!address.offset && (!address.scale || address.scale == 2)) { + m_assembler.ldr<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale); + return; + } + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale); + m_assembler.ldr<32>(dest, address.base, memoryTempRegister); + } + + void moveDouble(FPRegisterID src, FPRegisterID dest) + { + m_assembler.fmov<64>(dest, src); + } + + void moveZeroToDouble(FPRegisterID reg) + { + m_assembler.fmov<64>(reg, ARM64Registers::zr); + } + + void moveDoubleTo64(FPRegisterID src, RegisterID dest) + { + m_assembler.fmov<64>(dest, src); + } + + void moveFloatTo32(FPRegisterID src, RegisterID dest) + { + m_assembler.fmov<32>(dest, src); + } + + void move64ToDouble(RegisterID src, FPRegisterID dest) + { + m_assembler.fmov<64>(dest, src); + } + + void move32ToFloat(RegisterID src, FPRegisterID dest) + { + m_assembler.fmov<32>(dest, src); + } + + void moveConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID src, RegisterID dest) + { + m_assembler.fcmp<64>(left, right); + moveConditionallyAfterFloatingPointCompare<64>(cond, src, dest); + } + + void moveConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest) + { + m_assembler.fcmp<64>(left, right); + moveConditionallyAfterFloatingPointCompare<64>(cond, thenCase, elseCase, dest); + } + + void moveConditionallyFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID src, RegisterID dest) + { + m_assembler.fcmp<32>(left, right); + moveConditionallyAfterFloatingPointCompare<64>(cond, src, dest); + } + + void moveConditionallyFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest) + { + m_assembler.fcmp<32>(left, right); + moveConditionallyAfterFloatingPointCompare<64>(cond, thenCase, elseCase, dest); + } + + template<int datasize> + void moveConditionallyAfterFloatingPointCompare(DoubleCondition cond, RegisterID src, RegisterID dest) + { + if (cond == DoubleNotEqual) { + Jump unordered = makeBranch(ARM64Assembler::ConditionVS); + m_assembler.csel<datasize>(dest, src, dest, ARM64Assembler::ConditionNE); + unordered.link(this); + return; + } + if (cond == DoubleEqualOrUnordered) { + // If the compare is unordered, src is copied to dest and the + // next csel has all arguments equal to src. + // If the compare is ordered, dest is unchanged and EQ decides + // what value to set. + m_assembler.csel<datasize>(dest, src, dest, ARM64Assembler::ConditionVS); + m_assembler.csel<datasize>(dest, src, dest, ARM64Assembler::ConditionEQ); + return; + } + m_assembler.csel<datasize>(dest, src, dest, ARM64Condition(cond)); + } + + template<int datasize> + void moveConditionallyAfterFloatingPointCompare(DoubleCondition cond, RegisterID thenCase, RegisterID elseCase, RegisterID dest) + { + if (cond == DoubleNotEqual) { + Jump unordered = makeBranch(ARM64Assembler::ConditionVS); + m_assembler.csel<datasize>(dest, thenCase, elseCase, ARM64Assembler::ConditionNE); + unordered.link(this); + return; + } + if (cond == DoubleEqualOrUnordered) { + // If the compare is unordered, thenCase is copied to elseCase and the + // next csel has all arguments equal to thenCase. + // If the compare is ordered, dest is unchanged and EQ decides + // what value to set. + m_assembler.csel<datasize>(elseCase, thenCase, elseCase, ARM64Assembler::ConditionVS); + m_assembler.csel<datasize>(dest, thenCase, elseCase, ARM64Assembler::ConditionEQ); + return; + } + m_assembler.csel<datasize>(dest, thenCase, elseCase, ARM64Condition(cond)); + } + + template<int datasize> + void moveDoubleConditionallyAfterFloatingPointCompare(DoubleCondition cond, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest) + { + if (cond == DoubleNotEqual) { + Jump unordered = makeBranch(ARM64Assembler::ConditionVS); + m_assembler.fcsel<datasize>(dest, thenCase, elseCase, ARM64Assembler::ConditionNE); + unordered.link(this); + return; + } + if (cond == DoubleEqualOrUnordered) { + // If the compare is unordered, thenCase is copied to elseCase and the + // next csel has all arguments equal to thenCase. + // If the compare is ordered, dest is unchanged and EQ decides + // what value to set. + m_assembler.fcsel<datasize>(elseCase, thenCase, elseCase, ARM64Assembler::ConditionVS); + m_assembler.fcsel<datasize>(dest, thenCase, elseCase, ARM64Assembler::ConditionEQ); + return; + } + m_assembler.fcsel<datasize>(dest, thenCase, elseCase, ARM64Condition(cond)); + } + + void moveDoubleConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest) + { + m_assembler.fcmp<64>(left, right); + moveDoubleConditionallyAfterFloatingPointCompare<64>(cond, thenCase, elseCase, dest); + } + + void moveDoubleConditionallyFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest) + { + m_assembler.fcmp<32>(left, right); + moveDoubleConditionallyAfterFloatingPointCompare<64>(cond, thenCase, elseCase, dest); + } + + void mulDouble(FPRegisterID src, FPRegisterID dest) + { + mulDouble(dest, src, dest); + } + + void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) + { + m_assembler.fmul<64>(dest, op1, op2); + } + + void mulDouble(Address src, FPRegisterID dest) + { + loadDouble(src, fpTempRegister); + mulDouble(fpTempRegister, dest); + } + + void mulFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) + { + m_assembler.fmul<32>(dest, op1, op2); + } + + void andDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) + { + m_assembler.vand<64>(dest, op1, op2); + } + + void andFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) + { + andDouble(op1, op2, dest); + } + + void negateDouble(FPRegisterID src, FPRegisterID dest) + { + m_assembler.fneg<64>(dest, src); + } + + void sqrtDouble(FPRegisterID src, FPRegisterID dest) + { + m_assembler.fsqrt<64>(dest, src); + } + + void sqrtFloat(FPRegisterID src, FPRegisterID dest) + { + m_assembler.fsqrt<32>(dest, src); + } + + void storeDouble(FPRegisterID src, ImplicitAddress address) + { + if (tryStoreWithOffset<64>(src, address.base, address.offset)) + return; + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.str<64>(src, address.base, memoryTempRegister); + } + + void storeDouble(FPRegisterID src, TrustedImmPtr address) + { + moveToCachedReg(address, cachedMemoryTempRegister()); + m_assembler.str<64>(src, memoryTempRegister, ARM64Registers::zr); + } + + void storeDouble(FPRegisterID src, BaseIndex address) + { + if (!address.offset && (!address.scale || address.scale == 3)) { + m_assembler.str<64>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale); + return; + } + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale); + m_assembler.str<64>(src, address.base, memoryTempRegister); + } + + void storeFloat(FPRegisterID src, ImplicitAddress address) + { + if (tryStoreWithOffset<32>(src, address.base, address.offset)) + return; + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.str<32>(src, address.base, memoryTempRegister); + } + + void storeFloat(FPRegisterID src, BaseIndex address) + { + if (!address.offset && (!address.scale || address.scale == 2)) { + m_assembler.str<32>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale); + return; + } + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale); + m_assembler.str<32>(src, address.base, memoryTempRegister); + } + + void subDouble(FPRegisterID src, FPRegisterID dest) + { + subDouble(dest, src, dest); + } + + void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) + { + m_assembler.fsub<64>(dest, op1, op2); + } + + void subDouble(Address src, FPRegisterID dest) + { + loadDouble(src, fpTempRegister); + subDouble(fpTempRegister, dest); + } + + void subFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) + { + m_assembler.fsub<32>(dest, op1, op2); + } + + // Result is undefined if the value is outside of the integer range. + void truncateDoubleToInt32(FPRegisterID src, RegisterID dest) + { + m_assembler.fcvtzs<32, 64>(dest, src); + } + + void truncateDoubleToUint32(FPRegisterID src, RegisterID dest) + { + m_assembler.fcvtzu<32, 64>(dest, src); + } + + + // Stack manipulation operations: + // + // The ABI is assumed to provide a stack abstraction to memory, + // containing machine word sized units of data. Push and pop + // operations add and remove a single register sized unit of data + // to or from the stack. These operations are not supported on + // ARM64. Peek and poke operations read or write values on the + // stack, without moving the current stack position. Additionally, + // there are popToRestore and pushToSave operations, which are + // designed just for quick-and-dirty saving and restoring of + // temporary values. These operations don't claim to have any + // ABI compatibility. + + void pop(RegisterID) NO_RETURN_DUE_TO_CRASH + { + CRASH(); + } + + void push(RegisterID) NO_RETURN_DUE_TO_CRASH + { + CRASH(); + } + + void push(Address) NO_RETURN_DUE_TO_CRASH + { + CRASH(); + } + + void push(TrustedImm32) NO_RETURN_DUE_TO_CRASH + { + CRASH(); + } + + void popPair(RegisterID dest1, RegisterID dest2) + { + m_assembler.ldp<64>(dest1, dest2, ARM64Registers::sp, PairPostIndex(16)); + } + + void pushPair(RegisterID src1, RegisterID src2) + { + m_assembler.stp<64>(src1, src2, ARM64Registers::sp, PairPreIndex(-16)); + } + + void popToRestore(RegisterID dest) + { + m_assembler.ldr<64>(dest, ARM64Registers::sp, PostIndex(16)); + } + + void pushToSave(RegisterID src) + { + m_assembler.str<64>(src, ARM64Registers::sp, PreIndex(-16)); + } + + void pushToSaveImmediateWithoutTouchingRegisters(TrustedImm32 imm) + { + RegisterID reg = dataTempRegister; + pushPair(reg, reg); + move(imm, reg); + store64(reg, stackPointerRegister); + load64(Address(stackPointerRegister, 8), reg); + } + + void pushToSave(Address address) + { + load32(address, getCachedDataTempRegisterIDAndInvalidate()); + pushToSave(dataTempRegister); + } + + void pushToSave(TrustedImm32 imm) + { + move(imm, getCachedDataTempRegisterIDAndInvalidate()); + pushToSave(dataTempRegister); + } + + void popToRestore(FPRegisterID dest) + { + loadDouble(stackPointerRegister, dest); + add64(TrustedImm32(16), stackPointerRegister); + } + + void pushToSave(FPRegisterID src) + { + sub64(TrustedImm32(16), stackPointerRegister); + storeDouble(src, stackPointerRegister); + } + + static ptrdiff_t pushToSaveByteOffset() { return 16; } + + // Register move operations: + + void move(RegisterID src, RegisterID dest) + { + if (src != dest) + m_assembler.mov<64>(dest, src); + } + + void move(TrustedImm32 imm, RegisterID dest) + { + moveInternal<TrustedImm32, int32_t>(imm, dest); + } + + void move(TrustedImmPtr imm, RegisterID dest) + { + moveInternal<TrustedImmPtr, intptr_t>(imm, dest); + } + + void move(TrustedImm64 imm, RegisterID dest) + { + moveInternal<TrustedImm64, int64_t>(imm, dest); + } + + void swap(RegisterID reg1, RegisterID reg2) + { + move(reg1, getCachedDataTempRegisterIDAndInvalidate()); + move(reg2, reg1); + move(dataTempRegister, reg2); + } + + void signExtend32ToPtr(TrustedImm32 imm, RegisterID dest) + { + move(TrustedImmPtr(reinterpret_cast<void*>(static_cast<intptr_t>(imm.m_value))), dest); + } + + void signExtend32ToPtr(RegisterID src, RegisterID dest) + { + m_assembler.sxtw(dest, src); + } + + void zeroExtend32ToPtr(RegisterID src, RegisterID dest) + { + m_assembler.uxtw(dest, src); + } + + void moveConditionally32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID src, RegisterID dest) + { + m_assembler.cmp<32>(left, right); + m_assembler.csel<32>(dest, src, dest, ARM64Condition(cond)); + } + + void moveConditionally32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest) + { + m_assembler.cmp<32>(left, right); + m_assembler.csel<32>(dest, thenCase, elseCase, ARM64Condition(cond)); + } + + void moveConditionally32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID thenCase, RegisterID elseCase, RegisterID dest) + { + if (!right.m_value) { + if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) { + moveConditionallyTest32(*resultCondition, left, left, thenCase, elseCase, dest); + return; + } + } + + if (isUInt12(right.m_value)) + m_assembler.cmp<32>(left, UInt12(right.m_value)); + else if (isUInt12(-right.m_value)) + m_assembler.cmn<32>(left, UInt12(-right.m_value)); + else { + moveToCachedReg(right, dataMemoryTempRegister()); + m_assembler.cmp<32>(left, dataTempRegister); + } + m_assembler.csel<64>(dest, thenCase, elseCase, ARM64Condition(cond)); + } + + void moveConditionally64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID src, RegisterID dest) + { + m_assembler.cmp<64>(left, right); + m_assembler.csel<64>(dest, src, dest, ARM64Condition(cond)); + } + + void moveConditionally64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest) + { + m_assembler.cmp<64>(left, right); + m_assembler.csel<64>(dest, thenCase, elseCase, ARM64Condition(cond)); + } + + void moveConditionally64(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID thenCase, RegisterID elseCase, RegisterID dest) + { + if (!right.m_value) { + if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) { + moveConditionallyTest64(*resultCondition, left, left, thenCase, elseCase, dest); + return; + } + } + + if (isUInt12(right.m_value)) + m_assembler.cmp<64>(left, UInt12(right.m_value)); + else if (isUInt12(-right.m_value)) + m_assembler.cmn<64>(left, UInt12(-right.m_value)); + else { + moveToCachedReg(right, dataMemoryTempRegister()); + m_assembler.cmp<64>(left, dataTempRegister); + } + m_assembler.csel<64>(dest, thenCase, elseCase, ARM64Condition(cond)); + } + + void moveConditionallyTest32(ResultCondition cond, RegisterID testReg, RegisterID mask, RegisterID src, RegisterID dest) + { + m_assembler.tst<32>(testReg, mask); + m_assembler.csel<32>(dest, src, dest, ARM64Condition(cond)); + } + + void moveConditionallyTest32(ResultCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest) + { + m_assembler.tst<32>(left, right); + m_assembler.csel<32>(dest, thenCase, elseCase, ARM64Condition(cond)); + } + + void moveConditionallyTest32(ResultCondition cond, RegisterID left, TrustedImm32 right, RegisterID thenCase, RegisterID elseCase, RegisterID dest) + { + test32(left, right); + m_assembler.csel<32>(dest, thenCase, elseCase, ARM64Condition(cond)); + } + + void moveConditionallyTest64(ResultCondition cond, RegisterID testReg, RegisterID mask, RegisterID src, RegisterID dest) + { + m_assembler.tst<64>(testReg, mask); + m_assembler.csel<64>(dest, src, dest, ARM64Condition(cond)); + } + + void moveConditionallyTest64(ResultCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest) + { + m_assembler.tst<64>(left, right); + m_assembler.csel<64>(dest, thenCase, elseCase, ARM64Condition(cond)); + } + + void moveDoubleConditionally32(RelationalCondition cond, RegisterID left, RegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest) + { + m_assembler.cmp<32>(left, right); + m_assembler.fcsel<32>(dest, thenCase, elseCase, ARM64Condition(cond)); + } + + void moveDoubleConditionally32(RelationalCondition cond, RegisterID left, TrustedImm32 right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest) + { + if (!right.m_value) { + if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) { + moveDoubleConditionallyTest32(*resultCondition, left, left, thenCase, elseCase, dest); + return; + } + } + + if (isUInt12(right.m_value)) + m_assembler.cmp<32>(left, UInt12(right.m_value)); + else if (isUInt12(-right.m_value)) + m_assembler.cmn<32>(left, UInt12(-right.m_value)); + else { + moveToCachedReg(right, dataMemoryTempRegister()); + m_assembler.cmp<32>(left, dataTempRegister); + } + m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond)); + } + + void moveDoubleConditionally64(RelationalCondition cond, RegisterID left, RegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest) + { + m_assembler.cmp<64>(left, right); + m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond)); + } + + void moveDoubleConditionally64(RelationalCondition cond, RegisterID left, TrustedImm32 right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest) + { + if (!right.m_value) { + if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) { + moveDoubleConditionallyTest64(*resultCondition, left, left, thenCase, elseCase, dest); + return; + } + } + + if (isUInt12(right.m_value)) + m_assembler.cmp<64>(left, UInt12(right.m_value)); + else if (isUInt12(-right.m_value)) + m_assembler.cmn<64>(left, UInt12(-right.m_value)); + else { + moveToCachedReg(right, dataMemoryTempRegister()); + m_assembler.cmp<64>(left, dataTempRegister); + } + m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond)); + } + + void moveDoubleConditionallyTest32(ResultCondition cond, RegisterID left, RegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest) + { + m_assembler.tst<32>(left, right); + m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond)); + } + + void moveDoubleConditionallyTest32(ResultCondition cond, RegisterID left, TrustedImm32 right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest) + { + test32(left, right); + m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond)); + } + + void moveDoubleConditionallyTest64(ResultCondition cond, RegisterID left, RegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest) + { + m_assembler.tst<64>(left, right); + m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond)); + } + + // Forwards / external control flow operations: + // + // This set of jump and conditional branch operations return a Jump + // object which may linked at a later point, allow forwards jump, + // or jumps that will require external linkage (after the code has been + // relocated). + // + // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge + // respecitvely, for unsigned comparisons the names b, a, be, and ae are + // used (representing the names 'below' and 'above'). + // + // Operands to the comparision are provided in the expected order, e.g. + // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when + // treated as a signed 32bit value, is less than or equal to 5. + // + // jz and jnz test whether the first operand is equal to zero, and take + // an optional second operand of a mask under which to perform the test. + + Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right) + { + m_assembler.cmp<32>(left, right); + return Jump(makeBranch(cond)); + } + + Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right) + { + if (!right.m_value) { + if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) + return branchTest32(*resultCondition, left, left); + } + + if (isUInt12(right.m_value)) + m_assembler.cmp<32>(left, UInt12(right.m_value)); + else if (isUInt12(-right.m_value)) + m_assembler.cmn<32>(left, UInt12(-right.m_value)); + else { + moveToCachedReg(right, dataMemoryTempRegister()); + m_assembler.cmp<32>(left, dataTempRegister); + } + return Jump(makeBranch(cond)); + } + + Jump branch32(RelationalCondition cond, RegisterID left, Address right) + { + load32(right, getCachedMemoryTempRegisterIDAndInvalidate()); + return branch32(cond, left, memoryTempRegister); + } + + Jump branch32(RelationalCondition cond, Address left, RegisterID right) + { + load32(left, getCachedMemoryTempRegisterIDAndInvalidate()); + return branch32(cond, memoryTempRegister, right); + } + + Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right) + { + load32(left, getCachedMemoryTempRegisterIDAndInvalidate()); + return branch32(cond, memoryTempRegister, right); + } + + Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right) + { + load32(left, getCachedMemoryTempRegisterIDAndInvalidate()); + return branch32(cond, memoryTempRegister, right); + } + + Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right) + { + load32(left.m_ptr, getCachedDataTempRegisterIDAndInvalidate()); + return branch32(cond, dataTempRegister, right); + } + + Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right) + { + load32(left.m_ptr, getCachedMemoryTempRegisterIDAndInvalidate()); + return branch32(cond, memoryTempRegister, right); + } + + Jump branch64(RelationalCondition cond, RegisterID left, RegisterID right) + { + if (right == ARM64Registers::sp) { + if (cond == Equal && left != ARM64Registers::sp) { + // CMP can only use SP for the left argument, since we are testing for equality, the order + // does not matter here. + std::swap(left, right); + } else { + move(right, getCachedDataTempRegisterIDAndInvalidate()); + right = dataTempRegister; + } + } + m_assembler.cmp<64>(left, right); + return Jump(makeBranch(cond)); + } + + Jump branch64(RelationalCondition cond, RegisterID left, TrustedImm32 right) + { + if (!right.m_value) { + if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) + return branchTest64(*resultCondition, left, left); + } + + if (isUInt12(right.m_value)) + m_assembler.cmp<64>(left, UInt12(right.m_value)); + else if (isUInt12(-right.m_value)) + m_assembler.cmn<64>(left, UInt12(-right.m_value)); + else { + moveToCachedReg(right, dataMemoryTempRegister()); + m_assembler.cmp<64>(left, dataTempRegister); + } + return Jump(makeBranch(cond)); + } + + Jump branch64(RelationalCondition cond, RegisterID left, TrustedImm64 right) + { + intptr_t immediate = right.m_value; + if (!immediate) { + if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) + return branchTest64(*resultCondition, left, left); + } + + if (isUInt12(immediate)) + m_assembler.cmp<64>(left, UInt12(static_cast<int32_t>(immediate))); + else if (isUInt12(-immediate)) + m_assembler.cmn<64>(left, UInt12(static_cast<int32_t>(-immediate))); + else { + moveToCachedReg(right, dataMemoryTempRegister()); + m_assembler.cmp<64>(left, dataTempRegister); + } + return Jump(makeBranch(cond)); + } + + Jump branch64(RelationalCondition cond, RegisterID left, Address right) + { + load64(right, getCachedMemoryTempRegisterIDAndInvalidate()); + return branch64(cond, left, memoryTempRegister); + } + + Jump branch64(RelationalCondition cond, AbsoluteAddress left, RegisterID right) + { + load64(left.m_ptr, getCachedDataTempRegisterIDAndInvalidate()); + return branch64(cond, dataTempRegister, right); + } + + Jump branch64(RelationalCondition cond, Address left, RegisterID right) + { + load64(left, getCachedMemoryTempRegisterIDAndInvalidate()); + return branch64(cond, memoryTempRegister, right); + } + + Jump branch64(RelationalCondition cond, Address left, TrustedImm64 right) + { + load64(left, getCachedMemoryTempRegisterIDAndInvalidate()); + return branch64(cond, memoryTempRegister, right); + } + + Jump branchPtr(RelationalCondition cond, BaseIndex left, RegisterID right) + { + load64(left, getCachedMemoryTempRegisterIDAndInvalidate()); + return branch64(cond, memoryTempRegister, right); + } + + Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right) + { + ASSERT(!(0xffffff00 & right.m_value)); + load8(left, getCachedMemoryTempRegisterIDAndInvalidate()); + return branch32(cond, memoryTempRegister, right); + } + + Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right) + { + ASSERT(!(0xffffff00 & right.m_value)); + load8(left, getCachedMemoryTempRegisterIDAndInvalidate()); + return branch32(cond, memoryTempRegister, right); + } + + Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right) + { + ASSERT(!(0xffffff00 & right.m_value)); + load8(left.m_ptr, getCachedMemoryTempRegisterIDAndInvalidate()); + return branch32(cond, memoryTempRegister, right); + } + + Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask) + { + if (reg == mask && (cond == Zero || cond == NonZero)) + return Jump(makeCompareAndBranch<32>(static_cast<ZeroCondition>(cond), reg)); + m_assembler.tst<32>(reg, mask); + return Jump(makeBranch(cond)); + } + + void test32(RegisterID reg, TrustedImm32 mask = TrustedImm32(-1)) + { + if (mask.m_value == -1) + m_assembler.tst<32>(reg, reg); + else { + LogicalImmediate logicalImm = LogicalImmediate::create32(mask.m_value); + + if (logicalImm.isValid()) + m_assembler.tst<32>(reg, logicalImm); + else { + move(mask, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.tst<32>(reg, dataTempRegister); + } + } + } + + Jump branch(ResultCondition cond) + { + return Jump(makeBranch(cond)); + } + + Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1)) + { + if (mask.m_value == -1) { + if ((cond == Zero) || (cond == NonZero)) + return Jump(makeCompareAndBranch<32>(static_cast<ZeroCondition>(cond), reg)); + m_assembler.tst<32>(reg, reg); + } else if (hasOneBitSet(mask.m_value) && ((cond == Zero) || (cond == NonZero))) + return Jump(makeTestBitAndBranch(reg, getLSBSet(mask.m_value), static_cast<ZeroCondition>(cond))); + else { + LogicalImmediate logicalImm = LogicalImmediate::create32(mask.m_value); + if (logicalImm.isValid()) { + m_assembler.tst<32>(reg, logicalImm); + return Jump(makeBranch(cond)); + } + + move(mask, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.tst<32>(reg, dataTempRegister); + } + return Jump(makeBranch(cond)); + } + + Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1)) + { + load32(address, getCachedMemoryTempRegisterIDAndInvalidate()); + return branchTest32(cond, memoryTempRegister, mask); + } + + Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1)) + { + load32(address, getCachedMemoryTempRegisterIDAndInvalidate()); + return branchTest32(cond, memoryTempRegister, mask); + } + + Jump branchTest64(ResultCondition cond, RegisterID reg, RegisterID mask) + { + if (reg == mask && (cond == Zero || cond == NonZero)) + return Jump(makeCompareAndBranch<64>(static_cast<ZeroCondition>(cond), reg)); + m_assembler.tst<64>(reg, mask); + return Jump(makeBranch(cond)); + } + + Jump branchTest64(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1)) + { + if (mask.m_value == -1) { + if ((cond == Zero) || (cond == NonZero)) + return Jump(makeCompareAndBranch<64>(static_cast<ZeroCondition>(cond), reg)); + m_assembler.tst<64>(reg, reg); + } else if (hasOneBitSet(mask.m_value) && ((cond == Zero) || (cond == NonZero))) + return Jump(makeTestBitAndBranch(reg, getLSBSet(mask.m_value), static_cast<ZeroCondition>(cond))); + else { + LogicalImmediate logicalImm = LogicalImmediate::create64(mask.m_value); + + if (logicalImm.isValid()) { + m_assembler.tst<64>(reg, logicalImm); + return Jump(makeBranch(cond)); + } + + signExtend32ToPtr(mask, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.tst<64>(reg, dataTempRegister); + } + return Jump(makeBranch(cond)); + } + + Jump branchTest64(ResultCondition cond, RegisterID reg, TrustedImm64 mask) + { + move(mask, getCachedDataTempRegisterIDAndInvalidate()); + return branchTest64(cond, reg, dataTempRegister); + } + + Jump branchTest64(ResultCondition cond, Address address, RegisterID mask) + { + load64(address, getCachedDataTempRegisterIDAndInvalidate()); + return branchTest64(cond, dataTempRegister, mask); + } + + Jump branchTest64(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1)) + { + load64(address, getCachedDataTempRegisterIDAndInvalidate()); + return branchTest64(cond, dataTempRegister, mask); + } + + Jump branchTest64(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1)) + { + load64(address, getCachedDataTempRegisterIDAndInvalidate()); + return branchTest64(cond, dataTempRegister, mask); + } + + Jump branchTest64(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1)) + { + load64(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate()); + return branchTest64(cond, dataTempRegister, mask); + } + + Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1)) + { + load8(address, getCachedDataTempRegisterIDAndInvalidate()); + return branchTest32(cond, dataTempRegister, mask); + } + + Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1)) + { + load8(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate()); + return branchTest32(cond, dataTempRegister, mask); + } + + Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1)) + { + move(TrustedImmPtr(reinterpret_cast<void*>(address.offset)), getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.ldrb(dataTempRegister, address.base, dataTempRegister); + return branchTest32(cond, dataTempRegister, mask); + } + + Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1)) + { + load8(address, getCachedDataTempRegisterIDAndInvalidate()); + return branchTest32(cond, dataTempRegister, mask); + } + + Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right) + { + return branch32(cond, left, right); + } + + + // Arithmetic control flow operations: + // + // This set of conditional branch operations branch based + // on the result of an arithmetic operation. The operation + // is performed as normal, storing the result. + // + // * jz operations branch if the result is zero. + // * jo operations branch if the (signed) arithmetic + // operation caused an overflow to occur. + + Jump branchAdd32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest) + { + m_assembler.add<32, S>(dest, op1, op2); + return Jump(makeBranch(cond)); + } + + Jump branchAdd32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest) + { + if (isUInt12(imm.m_value)) { + m_assembler.add<32, S>(dest, op1, UInt12(imm.m_value)); + return Jump(makeBranch(cond)); + } + if (isUInt12(-imm.m_value)) { + m_assembler.sub<32, S>(dest, op1, UInt12(-imm.m_value)); + return Jump(makeBranch(cond)); + } + + signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate()); + return branchAdd32(cond, op1, dataTempRegister, dest); + } + + Jump branchAdd32(ResultCondition cond, Address src, RegisterID dest) + { + load32(src, getCachedDataTempRegisterIDAndInvalidate()); + return branchAdd32(cond, dest, dataTempRegister, dest); + } + + Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest) + { + return branchAdd32(cond, dest, src, dest); + } + + Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest) + { + return branchAdd32(cond, dest, imm, dest); + } + + Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress address) + { + load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate()); + + if (isUInt12(imm.m_value)) { + m_assembler.add<32, S>(dataTempRegister, dataTempRegister, UInt12(imm.m_value)); + store32(dataTempRegister, address.m_ptr); + } else if (isUInt12(-imm.m_value)) { + m_assembler.sub<32, S>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value)); + store32(dataTempRegister, address.m_ptr); + } else { + move(imm, getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.add<32, S>(dataTempRegister, dataTempRegister, memoryTempRegister); + store32(dataTempRegister, address.m_ptr); + } + + return Jump(makeBranch(cond)); + } + + Jump branchAdd64(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest) + { + m_assembler.add<64, S>(dest, op1, op2); + return Jump(makeBranch(cond)); + } + + Jump branchAdd64(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest) + { + if (isUInt12(imm.m_value)) { + m_assembler.add<64, S>(dest, op1, UInt12(imm.m_value)); + return Jump(makeBranch(cond)); + } + if (isUInt12(-imm.m_value)) { + m_assembler.sub<64, S>(dest, op1, UInt12(-imm.m_value)); + return Jump(makeBranch(cond)); + } + + move(imm, getCachedDataTempRegisterIDAndInvalidate()); + return branchAdd64(cond, op1, dataTempRegister, dest); + } + + Jump branchAdd64(ResultCondition cond, RegisterID src, RegisterID dest) + { + return branchAdd64(cond, dest, src, dest); + } + + Jump branchAdd64(ResultCondition cond, TrustedImm32 imm, RegisterID dest) + { + return branchAdd64(cond, dest, imm, dest); + } + + Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID scratch1, RegisterID scratch2, RegisterID dest) + { + ASSERT(cond != Signed); + + if (cond != Overflow) { + m_assembler.mul<32>(dest, src1, src2); + return branchTest32(cond, dest); + } + + // This is a signed multiple of two 32-bit values, producing a 64-bit result. + m_assembler.smull(dest, src1, src2); + // Copy bits 63..32 of the result to bits 31..0 of scratch1. + m_assembler.asr<64>(scratch1, dest, 32); + // Splat bit 31 of the result to bits 31..0 of scratch2. + m_assembler.asr<32>(scratch2, dest, 31); + // After a mul32 the top 32 bits of the register should be clear. + zeroExtend32ToPtr(dest, dest); + // Check that bits 31..63 of the original result were all equal. + return branch32(NotEqual, scratch2, scratch1); + } + + Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest) + { + return branchMul32(cond, src1, src2, getCachedDataTempRegisterIDAndInvalidate(), getCachedMemoryTempRegisterIDAndInvalidate(), dest); + } + + Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest) + { + return branchMul32(cond, dest, src, dest); + } + + Jump branchMul32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest) + { + move(imm, getCachedDataTempRegisterIDAndInvalidate()); + return branchMul32(cond, dataTempRegister, src, dest); + } + + Jump branchMul64(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID scratch1, RegisterID scratch2, RegisterID dest) + { + ASSERT(cond != Signed); + + // This is a signed multiple of two 64-bit values, producing a 64-bit result. + m_assembler.mul<64>(dest, src1, src2); + + if (cond != Overflow) + return branchTest64(cond, dest); + + // Compute bits 127..64 of the result into scratch1. + m_assembler.smulh(scratch1, src1, src2); + // Splat bit 63 of the result to bits 63..0 of scratch2. + m_assembler.asr<64>(scratch2, dest, 63); + // Check that bits 31..63 of the original result were all equal. + return branch64(NotEqual, scratch2, scratch1); + } + + Jump branchMul64(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest) + { + return branchMul64(cond, src1, src2, getCachedDataTempRegisterIDAndInvalidate(), getCachedMemoryTempRegisterIDAndInvalidate(), dest); + } + + Jump branchMul64(ResultCondition cond, RegisterID src, RegisterID dest) + { + return branchMul64(cond, dest, src, dest); + } + + Jump branchNeg32(ResultCondition cond, RegisterID dest) + { + m_assembler.neg<32, S>(dest, dest); + return Jump(makeBranch(cond)); + } + + Jump branchNeg64(ResultCondition cond, RegisterID srcDest) + { + m_assembler.neg<64, S>(srcDest, srcDest); + return Jump(makeBranch(cond)); + } + + Jump branchSub32(ResultCondition cond, RegisterID dest) + { + m_assembler.neg<32, S>(dest, dest); + return Jump(makeBranch(cond)); + } + + Jump branchSub32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest) + { + m_assembler.sub<32, S>(dest, op1, op2); + return Jump(makeBranch(cond)); + } + + Jump branchSub32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest) + { + if (isUInt12(imm.m_value)) { + m_assembler.sub<32, S>(dest, op1, UInt12(imm.m_value)); + return Jump(makeBranch(cond)); + } + if (isUInt12(-imm.m_value)) { + m_assembler.add<32, S>(dest, op1, UInt12(-imm.m_value)); + return Jump(makeBranch(cond)); + } + + signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate()); + return branchSub32(cond, op1, dataTempRegister, dest); + } + + Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest) + { + return branchSub32(cond, dest, src, dest); + } + + Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest) + { + return branchSub32(cond, dest, imm, dest); + } + + Jump branchSub64(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest) + { + m_assembler.sub<64, S>(dest, op1, op2); + return Jump(makeBranch(cond)); + } + + Jump branchSub64(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest) + { + if (isUInt12(imm.m_value)) { + m_assembler.sub<64, S>(dest, op1, UInt12(imm.m_value)); + return Jump(makeBranch(cond)); + } + if (isUInt12(-imm.m_value)) { + m_assembler.add<64, S>(dest, op1, UInt12(-imm.m_value)); + return Jump(makeBranch(cond)); + } + + move(imm, getCachedDataTempRegisterIDAndInvalidate()); + return branchSub64(cond, op1, dataTempRegister, dest); + } + + Jump branchSub64(ResultCondition cond, RegisterID src, RegisterID dest) + { + return branchSub64(cond, dest, src, dest); + } + + Jump branchSub64(ResultCondition cond, TrustedImm32 imm, RegisterID dest) + { + return branchSub64(cond, dest, imm, dest); + } + + + // Jumps, calls, returns + + ALWAYS_INLINE Call call() + { + AssemblerLabel pointerLabel = m_assembler.label(); + moveWithFixedWidth(TrustedImmPtr(0), getCachedDataTempRegisterIDAndInvalidate()); + invalidateAllTempRegisters(); + m_assembler.blr(dataTempRegister); + AssemblerLabel callLabel = m_assembler.label(); + ASSERT_UNUSED(pointerLabel, ARM64Assembler::getDifferenceBetweenLabels(callLabel, pointerLabel) == REPATCH_OFFSET_CALL_TO_POINTER); + return Call(callLabel, Call::Linkable); + } + + ALWAYS_INLINE Call call(RegisterID target) + { + invalidateAllTempRegisters(); + m_assembler.blr(target); + return Call(m_assembler.label(), Call::None); + } + + ALWAYS_INLINE Call call(Address address) + { + load64(address, getCachedDataTempRegisterIDAndInvalidate()); + return call(dataTempRegister); + } + + ALWAYS_INLINE Jump jump() + { + AssemblerLabel label = m_assembler.label(); + m_assembler.b(); + return Jump(label, m_makeJumpPatchable ? ARM64Assembler::JumpNoConditionFixedSize : ARM64Assembler::JumpNoCondition); + } + + void jump(RegisterID target) + { + m_assembler.br(target); + } + + void jump(Address address) + { + load64(address, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.br(dataTempRegister); + } + + void jump(AbsoluteAddress address) + { + move(TrustedImmPtr(address.m_ptr), getCachedDataTempRegisterIDAndInvalidate()); + load64(Address(dataTempRegister), dataTempRegister); + m_assembler.br(dataTempRegister); + } + + ALWAYS_INLINE Call makeTailRecursiveCall(Jump oldJump) + { + oldJump.link(this); + return tailRecursiveCall(); + } + + ALWAYS_INLINE Call nearCall() + { + m_assembler.bl(); + return Call(m_assembler.label(), Call::LinkableNear); + } + + ALWAYS_INLINE Call nearTailCall() + { + AssemblerLabel label = m_assembler.label(); + m_assembler.b(); + return Call(label, Call::LinkableNearTail); + } + + ALWAYS_INLINE void ret() + { + m_assembler.ret(); + } + + ALWAYS_INLINE Call tailRecursiveCall() + { + // Like a normal call, but don't link. + AssemblerLabel pointerLabel = m_assembler.label(); + moveWithFixedWidth(TrustedImmPtr(0), getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.br(dataTempRegister); + AssemblerLabel callLabel = m_assembler.label(); + ASSERT_UNUSED(pointerLabel, ARM64Assembler::getDifferenceBetweenLabels(callLabel, pointerLabel) == REPATCH_OFFSET_CALL_TO_POINTER); + return Call(callLabel, Call::Linkable); + } + + + // Comparisons operations + + void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest) + { + m_assembler.cmp<32>(left, right); + m_assembler.cset<32>(dest, ARM64Condition(cond)); + } + + void compare32(RelationalCondition cond, Address left, RegisterID right, RegisterID dest) + { + load32(left, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.cmp<32>(dataTempRegister, right); + m_assembler.cset<32>(dest, ARM64Condition(cond)); + } + + void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest) + { + if (!right.m_value) { + if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) { + test32(*resultCondition, left, left, dest); + return; + } + } + + if (isUInt12(right.m_value)) + m_assembler.cmp<32>(left, UInt12(right.m_value)); + else if (isUInt12(-right.m_value)) + m_assembler.cmn<32>(left, UInt12(-right.m_value)); + else { + move(right, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.cmp<32>(left, dataTempRegister); + } + m_assembler.cset<32>(dest, ARM64Condition(cond)); + } + + void compare64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest) + { + m_assembler.cmp<64>(left, right); + m_assembler.cset<32>(dest, ARM64Condition(cond)); + } + + void compare64(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest) + { + if (!right.m_value) { + if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) { + test64(*resultCondition, left, left, dest); + return; + } + } + + signExtend32ToPtr(right, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.cmp<64>(left, dataTempRegister); + m_assembler.cset<32>(dest, ARM64Condition(cond)); + } + + void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest) + { + load8(left, getCachedMemoryTempRegisterIDAndInvalidate()); + move(right, getCachedDataTempRegisterIDAndInvalidate()); + compare32(cond, memoryTempRegister, dataTempRegister, dest); + } + + void test32(ResultCondition cond, RegisterID src, RegisterID mask, RegisterID dest) + { + m_assembler.tst<32>(src, mask); + m_assembler.cset<32>(dest, ARM64Condition(cond)); + } + + void test32(ResultCondition cond, RegisterID src, TrustedImm32 mask, RegisterID dest) + { + test32(src, mask); + m_assembler.cset<32>(dest, ARM64Condition(cond)); + } + + void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest) + { + load32(address, getCachedMemoryTempRegisterIDAndInvalidate()); + test32(cond, memoryTempRegister, mask, dest); + } + + void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest) + { + load8(address, getCachedMemoryTempRegisterIDAndInvalidate()); + test32(cond, memoryTempRegister, mask, dest); + } + + void test64(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest) + { + m_assembler.tst<64>(op1, op2); + m_assembler.cset<32>(dest, ARM64Condition(cond)); + } + + void test64(ResultCondition cond, RegisterID src, TrustedImm32 mask, RegisterID dest) + { + if (mask.m_value == -1) + m_assembler.tst<64>(src, src); + else { + signExtend32ToPtr(mask, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.tst<64>(src, dataTempRegister); + } + m_assembler.cset<32>(dest, ARM64Condition(cond)); + } + + void setCarry(RegisterID dest) + { + m_assembler.cset<32>(dest, ARM64Assembler::ConditionCS); + } + + // Patchable operations + + ALWAYS_INLINE DataLabel32 moveWithPatch(TrustedImm32 imm, RegisterID dest) + { + DataLabel32 label(this); + moveWithFixedWidth(imm, dest); + return label; + } + + ALWAYS_INLINE DataLabelPtr moveWithPatch(TrustedImmPtr imm, RegisterID dest) + { + DataLabelPtr label(this); + moveWithFixedWidth(imm, dest); + return label; + } + + ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0)) + { + dataLabel = DataLabelPtr(this); + moveWithPatch(initialRightValue, getCachedDataTempRegisterIDAndInvalidate()); + return branch64(cond, left, dataTempRegister); + } + + ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0)) + { + dataLabel = DataLabelPtr(this); + moveWithPatch(initialRightValue, getCachedDataTempRegisterIDAndInvalidate()); + return branch64(cond, left, dataTempRegister); + } + + ALWAYS_INLINE Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0)) + { + dataLabel = DataLabel32(this); + moveWithPatch(initialRightValue, getCachedDataTempRegisterIDAndInvalidate()); + return branch32(cond, left, dataTempRegister); + } + + PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right) + { + m_makeJumpPatchable = true; + Jump result = branch64(cond, left, TrustedImm64(right)); + m_makeJumpPatchable = false; + return PatchableJump(result); + } + + PatchableJump patchableBranchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1)) + { + m_makeJumpPatchable = true; + Jump result = branchTest32(cond, reg, mask); + m_makeJumpPatchable = false; + return PatchableJump(result); + } + + PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm) + { + m_makeJumpPatchable = true; + Jump result = branch32(cond, reg, imm); + m_makeJumpPatchable = false; + return PatchableJump(result); + } + + PatchableJump patchableBranch64(RelationalCondition cond, RegisterID reg, TrustedImm64 imm) + { + m_makeJumpPatchable = true; + Jump result = branch64(cond, reg, imm); + m_makeJumpPatchable = false; + return PatchableJump(result); + } + + PatchableJump patchableBranch64(RelationalCondition cond, RegisterID left, RegisterID right) + { + m_makeJumpPatchable = true; + Jump result = branch64(cond, left, right); + m_makeJumpPatchable = false; + return PatchableJump(result); + } + + PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0)) + { + m_makeJumpPatchable = true; + Jump result = branchPtrWithPatch(cond, left, dataLabel, initialRightValue); + m_makeJumpPatchable = false; + return PatchableJump(result); + } + + PatchableJump patchableBranch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0)) + { + m_makeJumpPatchable = true; + Jump result = branch32WithPatch(cond, left, dataLabel, initialRightValue); + m_makeJumpPatchable = false; + return PatchableJump(result); + } + + PatchableJump patchableJump() + { + m_makeJumpPatchable = true; + Jump result = jump(); + m_makeJumpPatchable = false; + return PatchableJump(result); + } + + ALWAYS_INLINE DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address) + { + DataLabelPtr label(this); + moveWithFixedWidth(initialValue, getCachedDataTempRegisterIDAndInvalidate()); + store64(dataTempRegister, address); + return label; + } + + ALWAYS_INLINE DataLabelPtr storePtrWithPatch(ImplicitAddress address) + { + return storePtrWithPatch(TrustedImmPtr(0), address); + } + + static void reemitInitialMoveWithPatch(void* address, void* value) + { + ARM64Assembler::setPointer(static_cast<int*>(address), value, dataTempRegister, true); + } + + // Miscellaneous operations: + + void breakpoint(uint16_t imm = 0) + { + m_assembler.brk(imm); + } + + void nop() + { + m_assembler.nop(); + } + + void memoryFence() + { + m_assembler.dmbSY(); + } + + + // Misc helper functions. + + // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc. + static RelationalCondition invert(RelationalCondition cond) + { + return static_cast<RelationalCondition>(ARM64Assembler::invert(static_cast<ARM64Assembler::Condition>(cond))); + } + + static Optional<ResultCondition> commuteCompareToZeroIntoTest(RelationalCondition cond) + { + switch (cond) { + case Equal: + return Zero; + case NotEqual: + return NonZero; + case LessThan: + return Signed; + case GreaterThanOrEqual: + return PositiveOrZero; + break; + default: + return Nullopt; + } + } + + static FunctionPtr readCallTarget(CodeLocationCall call) + { + return FunctionPtr(reinterpret_cast<void(*)()>(ARM64Assembler::readCallTarget(call.dataLocation()))); + } + + static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination) + { + ARM64Assembler::replaceWithJump(instructionStart.dataLocation(), destination.dataLocation()); + } + + static ptrdiff_t maxJumpReplacementSize() + { + return ARM64Assembler::maxJumpReplacementSize(); + } + + RegisterID scratchRegisterForBlinding() + { + // We *do not* have a scratch register for blinding. + RELEASE_ASSERT_NOT_REACHED(); + return getCachedDataTempRegisterIDAndInvalidate(); + } + + static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; } + static bool canJumpReplacePatchableBranch32WithPatch() { return false; } + + static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label) + { + return label.labelAtOffset(0); + } + + static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr) + { + UNREACHABLE_FOR_PLATFORM(); + return CodeLocationLabel(); + } + + static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32) + { + UNREACHABLE_FOR_PLATFORM(); + return CodeLocationLabel(); + } + + static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID, void* initialValue) + { + reemitInitialMoveWithPatch(instructionStart.dataLocation(), initialValue); + } + + static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*) + { + UNREACHABLE_FOR_PLATFORM(); + } + + static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel, Address, int32_t) + { + UNREACHABLE_FOR_PLATFORM(); + } + + static void repatchCall(CodeLocationCall call, CodeLocationLabel destination) + { + ARM64Assembler::repatchPointer(call.dataLabelPtrAtOffset(REPATCH_OFFSET_CALL_TO_POINTER).dataLocation(), destination.executableAddress()); + } + + static void repatchCall(CodeLocationCall call, FunctionPtr destination) + { + ARM64Assembler::repatchPointer(call.dataLabelPtrAtOffset(REPATCH_OFFSET_CALL_TO_POINTER).dataLocation(), destination.executableAddress()); + } + +#if ENABLE(MASM_PROBE) + void probe(ProbeFunction, void* arg1, void* arg2); +#endif // ENABLE(MASM_PROBE) + +protected: + ALWAYS_INLINE Jump makeBranch(ARM64Assembler::Condition cond) + { + m_assembler.b_cond(cond); + AssemblerLabel label = m_assembler.label(); + m_assembler.nop(); + return Jump(label, m_makeJumpPatchable ? ARM64Assembler::JumpConditionFixedSize : ARM64Assembler::JumpCondition, cond); + } + ALWAYS_INLINE Jump makeBranch(RelationalCondition cond) { return makeBranch(ARM64Condition(cond)); } + ALWAYS_INLINE Jump makeBranch(ResultCondition cond) { return makeBranch(ARM64Condition(cond)); } + ALWAYS_INLINE Jump makeBranch(DoubleCondition cond) { return makeBranch(ARM64Condition(cond)); } + + template <int dataSize> + ALWAYS_INLINE Jump makeCompareAndBranch(ZeroCondition cond, RegisterID reg) + { + if (cond == IsZero) + m_assembler.cbz<dataSize>(reg); + else + m_assembler.cbnz<dataSize>(reg); + AssemblerLabel label = m_assembler.label(); + m_assembler.nop(); + return Jump(label, m_makeJumpPatchable ? ARM64Assembler::JumpCompareAndBranchFixedSize : ARM64Assembler::JumpCompareAndBranch, static_cast<ARM64Assembler::Condition>(cond), dataSize == 64, reg); + } + + ALWAYS_INLINE Jump makeTestBitAndBranch(RegisterID reg, unsigned bit, ZeroCondition cond) + { + ASSERT(bit < 64); + bit &= 0x3f; + if (cond == IsZero) + m_assembler.tbz(reg, bit); + else + m_assembler.tbnz(reg, bit); + AssemblerLabel label = m_assembler.label(); + m_assembler.nop(); + return Jump(label, m_makeJumpPatchable ? ARM64Assembler::JumpTestBitFixedSize : ARM64Assembler::JumpTestBit, static_cast<ARM64Assembler::Condition>(cond), bit, reg); + } + + ARM64Assembler::Condition ARM64Condition(RelationalCondition cond) + { + return static_cast<ARM64Assembler::Condition>(cond); + } + + ARM64Assembler::Condition ARM64Condition(ResultCondition cond) + { + return static_cast<ARM64Assembler::Condition>(cond); + } + + ARM64Assembler::Condition ARM64Condition(DoubleCondition cond) + { + return static_cast<ARM64Assembler::Condition>(cond); + } + +private: + ALWAYS_INLINE RegisterID getCachedDataTempRegisterIDAndInvalidate() + { + RELEASE_ASSERT(m_allowScratchRegister); + return dataMemoryTempRegister().registerIDInvalidate(); + } + ALWAYS_INLINE RegisterID getCachedMemoryTempRegisterIDAndInvalidate() + { + RELEASE_ASSERT(m_allowScratchRegister); + return cachedMemoryTempRegister().registerIDInvalidate(); + } + ALWAYS_INLINE CachedTempRegister& dataMemoryTempRegister() + { + RELEASE_ASSERT(m_allowScratchRegister); + return m_dataMemoryTempRegister; + } + ALWAYS_INLINE CachedTempRegister& cachedMemoryTempRegister() + { + RELEASE_ASSERT(m_allowScratchRegister); + return m_cachedMemoryTempRegister; + } + + ALWAYS_INLINE bool isInIntRange(intptr_t value) + { + return value == ((value << 32) >> 32); + } + + template<typename ImmediateType, typename rawType> + void moveInternal(ImmediateType imm, RegisterID dest) + { + const int dataSize = sizeof(rawType) * 8; + const int numberHalfWords = dataSize / 16; + rawType value = bitwise_cast<rawType>(imm.m_value); + uint16_t halfword[numberHalfWords]; + + // Handle 0 and ~0 here to simplify code below + if (!value) { + m_assembler.movz<dataSize>(dest, 0); + return; + } + if (!~value) { + m_assembler.movn<dataSize>(dest, 0); + return; + } + + LogicalImmediate logicalImm = dataSize == 64 ? LogicalImmediate::create64(static_cast<uint64_t>(value)) : LogicalImmediate::create32(static_cast<uint32_t>(value)); + + if (logicalImm.isValid()) { + m_assembler.movi<dataSize>(dest, logicalImm); + return; + } + + // Figure out how many halfwords are 0 or FFFF, then choose movz or movn accordingly. + int zeroOrNegateVote = 0; + for (int i = 0; i < numberHalfWords; ++i) { + halfword[i] = getHalfword(value, i); + if (!halfword[i]) + zeroOrNegateVote++; + else if (halfword[i] == 0xffff) + zeroOrNegateVote--; + } + + bool needToClearRegister = true; + if (zeroOrNegateVote >= 0) { + for (int i = 0; i < numberHalfWords; i++) { + if (halfword[i]) { + if (needToClearRegister) { + m_assembler.movz<dataSize>(dest, halfword[i], 16*i); + needToClearRegister = false; + } else + m_assembler.movk<dataSize>(dest, halfword[i], 16*i); + } + } + } else { + for (int i = 0; i < numberHalfWords; i++) { + if (halfword[i] != 0xffff) { + if (needToClearRegister) { + m_assembler.movn<dataSize>(dest, ~halfword[i], 16*i); + needToClearRegister = false; + } else + m_assembler.movk<dataSize>(dest, halfword[i], 16*i); + } + } + } + } + + template<int datasize> + ALWAYS_INLINE void loadUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm) + { + m_assembler.ldr<datasize>(rt, rn, pimm); + } + + template<int datasize> + ALWAYS_INLINE void loadUnscaledImmediate(RegisterID rt, RegisterID rn, int simm) + { + m_assembler.ldur<datasize>(rt, rn, simm); + } + + template<int datasize> + ALWAYS_INLINE void loadSignedAddressedByUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm) + { + loadUnsignedImmediate<datasize>(rt, rn, pimm); + } + + template<int datasize> + ALWAYS_INLINE void loadSignedAddressedByUnscaledImmediate(RegisterID rt, RegisterID rn, int simm) + { + loadUnscaledImmediate<datasize>(rt, rn, simm); + } + + template<int datasize> + ALWAYS_INLINE void storeUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm) + { + m_assembler.str<datasize>(rt, rn, pimm); + } + + template<int datasize> + ALWAYS_INLINE void storeUnscaledImmediate(RegisterID rt, RegisterID rn, int simm) + { + m_assembler.stur<datasize>(rt, rn, simm); + } + + void moveWithFixedWidth(TrustedImm32 imm, RegisterID dest) + { + int32_t value = imm.m_value; + m_assembler.movz<32>(dest, getHalfword(value, 0)); + m_assembler.movk<32>(dest, getHalfword(value, 1), 16); + } + + void moveWithFixedWidth(TrustedImmPtr imm, RegisterID dest) + { + intptr_t value = reinterpret_cast<intptr_t>(imm.m_value); + m_assembler.movz<64>(dest, getHalfword(value, 0)); + m_assembler.movk<64>(dest, getHalfword(value, 1), 16); + m_assembler.movk<64>(dest, getHalfword(value, 2), 32); + } + + void signExtend32ToPtrWithFixedWidth(int32_t value, RegisterID dest) + { + if (value >= 0) { + m_assembler.movz<32>(dest, getHalfword(value, 0)); + m_assembler.movk<32>(dest, getHalfword(value, 1), 16); + } else { + m_assembler.movn<32>(dest, ~getHalfword(value, 0)); + m_assembler.movk<32>(dest, getHalfword(value, 1), 16); + } + } + + template<int datasize> + ALWAYS_INLINE void load(const void* address, RegisterID dest) + { + intptr_t currentRegisterContents; + if (cachedMemoryTempRegister().value(currentRegisterContents)) { + intptr_t addressAsInt = reinterpret_cast<intptr_t>(address); + intptr_t addressDelta = addressAsInt - currentRegisterContents; + + if (dest == memoryTempRegister) + cachedMemoryTempRegister().invalidate(); + + if (isInIntRange(addressDelta)) { + if (ARM64Assembler::canEncodeSImmOffset(addressDelta)) { + m_assembler.ldur<datasize>(dest, memoryTempRegister, addressDelta); + return; + } + + if (ARM64Assembler::canEncodePImmOffset<datasize>(addressDelta)) { + m_assembler.ldr<datasize>(dest, memoryTempRegister, addressDelta); + return; + } + } + + if ((addressAsInt & (~maskHalfWord0)) == (currentRegisterContents & (~maskHalfWord0))) { + m_assembler.movk<64>(memoryTempRegister, addressAsInt & maskHalfWord0, 0); + cachedMemoryTempRegister().setValue(reinterpret_cast<intptr_t>(address)); + m_assembler.ldr<datasize>(dest, memoryTempRegister, ARM64Registers::zr); + return; + } + } + + move(TrustedImmPtr(address), memoryTempRegister); + if (dest == memoryTempRegister) + cachedMemoryTempRegister().invalidate(); + else + cachedMemoryTempRegister().setValue(reinterpret_cast<intptr_t>(address)); + m_assembler.ldr<datasize>(dest, memoryTempRegister, ARM64Registers::zr); + } + + template<int datasize> + ALWAYS_INLINE void store(RegisterID src, const void* address) + { + ASSERT(src != memoryTempRegister); + intptr_t currentRegisterContents; + if (cachedMemoryTempRegister().value(currentRegisterContents)) { + intptr_t addressAsInt = reinterpret_cast<intptr_t>(address); + intptr_t addressDelta = addressAsInt - currentRegisterContents; + + if (isInIntRange(addressDelta)) { + if (ARM64Assembler::canEncodeSImmOffset(addressDelta)) { + m_assembler.stur<datasize>(src, memoryTempRegister, addressDelta); + return; + } + + if (ARM64Assembler::canEncodePImmOffset<datasize>(addressDelta)) { + m_assembler.str<datasize>(src, memoryTempRegister, addressDelta); + return; + } + } + + if ((addressAsInt & (~maskHalfWord0)) == (currentRegisterContents & (~maskHalfWord0))) { + m_assembler.movk<64>(memoryTempRegister, addressAsInt & maskHalfWord0, 0); + cachedMemoryTempRegister().setValue(reinterpret_cast<intptr_t>(address)); + m_assembler.str<datasize>(src, memoryTempRegister, ARM64Registers::zr); + return; + } + } + + move(TrustedImmPtr(address), memoryTempRegister); + cachedMemoryTempRegister().setValue(reinterpret_cast<intptr_t>(address)); + m_assembler.str<datasize>(src, memoryTempRegister, ARM64Registers::zr); + } + + template <int dataSize> + ALWAYS_INLINE bool tryMoveUsingCacheRegisterContents(intptr_t immediate, CachedTempRegister& dest) + { + intptr_t currentRegisterContents; + if (dest.value(currentRegisterContents)) { + if (currentRegisterContents == immediate) + return true; + + LogicalImmediate logicalImm = dataSize == 64 ? LogicalImmediate::create64(static_cast<uint64_t>(immediate)) : LogicalImmediate::create32(static_cast<uint32_t>(immediate)); + + if (logicalImm.isValid()) { + m_assembler.movi<dataSize>(dest.registerIDNoInvalidate(), logicalImm); + dest.setValue(immediate); + return true; + } + + if ((immediate & maskUpperWord) == (currentRegisterContents & maskUpperWord)) { + if ((immediate & maskHalfWord1) != (currentRegisterContents & maskHalfWord1)) + m_assembler.movk<dataSize>(dest.registerIDNoInvalidate(), (immediate & maskHalfWord1) >> 16, 16); + + if ((immediate & maskHalfWord0) != (currentRegisterContents & maskHalfWord0)) + m_assembler.movk<dataSize>(dest.registerIDNoInvalidate(), immediate & maskHalfWord0, 0); + + dest.setValue(immediate); + return true; + } + } + + return false; + } + + void moveToCachedReg(TrustedImm32 imm, CachedTempRegister& dest) + { + if (tryMoveUsingCacheRegisterContents<32>(static_cast<intptr_t>(imm.m_value), dest)) + return; + + moveInternal<TrustedImm32, int32_t>(imm, dest.registerIDNoInvalidate()); + dest.setValue(imm.m_value); + } + + void moveToCachedReg(TrustedImmPtr imm, CachedTempRegister& dest) + { + if (tryMoveUsingCacheRegisterContents<64>(imm.asIntptr(), dest)) + return; + + moveInternal<TrustedImmPtr, intptr_t>(imm, dest.registerIDNoInvalidate()); + dest.setValue(imm.asIntptr()); + } + + void moveToCachedReg(TrustedImm64 imm, CachedTempRegister& dest) + { + if (tryMoveUsingCacheRegisterContents<64>(static_cast<intptr_t>(imm.m_value), dest)) + return; + + moveInternal<TrustedImm64, int64_t>(imm, dest.registerIDNoInvalidate()); + dest.setValue(imm.m_value); + } + + template<int datasize> + ALWAYS_INLINE bool tryLoadWithOffset(RegisterID rt, RegisterID rn, int32_t offset) + { + if (ARM64Assembler::canEncodeSImmOffset(offset)) { + loadUnscaledImmediate<datasize>(rt, rn, offset); + return true; + } + if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) { + loadUnsignedImmediate<datasize>(rt, rn, static_cast<unsigned>(offset)); + return true; + } + return false; + } + + template<int datasize> + ALWAYS_INLINE bool tryLoadSignedWithOffset(RegisterID rt, RegisterID rn, int32_t offset) + { + if (ARM64Assembler::canEncodeSImmOffset(offset)) { + loadSignedAddressedByUnscaledImmediate<datasize>(rt, rn, offset); + return true; + } + if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) { + loadSignedAddressedByUnsignedImmediate<datasize>(rt, rn, static_cast<unsigned>(offset)); + return true; + } + return false; + } + + template<int datasize> + ALWAYS_INLINE bool tryLoadWithOffset(FPRegisterID rt, RegisterID rn, int32_t offset) + { + if (ARM64Assembler::canEncodeSImmOffset(offset)) { + m_assembler.ldur<datasize>(rt, rn, offset); + return true; + } + if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) { + m_assembler.ldr<datasize>(rt, rn, static_cast<unsigned>(offset)); + return true; + } + return false; + } + + template<int datasize> + ALWAYS_INLINE bool tryStoreWithOffset(RegisterID rt, RegisterID rn, int32_t offset) + { + if (ARM64Assembler::canEncodeSImmOffset(offset)) { + storeUnscaledImmediate<datasize>(rt, rn, offset); + return true; + } + if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) { + storeUnsignedImmediate<datasize>(rt, rn, static_cast<unsigned>(offset)); + return true; + } + return false; + } + + template<int datasize> + ALWAYS_INLINE bool tryStoreWithOffset(FPRegisterID rt, RegisterID rn, int32_t offset) + { + if (ARM64Assembler::canEncodeSImmOffset(offset)) { + m_assembler.stur<datasize>(rt, rn, offset); + return true; + } + if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) { + m_assembler.str<datasize>(rt, rn, static_cast<unsigned>(offset)); + return true; + } + return false; + } + + Jump jumpAfterFloatingPointCompare(DoubleCondition cond) + { + if (cond == DoubleNotEqual) { + // ConditionNE jumps if NotEqual *or* unordered - force the unordered cases not to jump. + Jump unordered = makeBranch(ARM64Assembler::ConditionVS); + Jump result = makeBranch(ARM64Assembler::ConditionNE); + unordered.link(this); + return result; + } + if (cond == DoubleEqualOrUnordered) { + Jump unordered = makeBranch(ARM64Assembler::ConditionVS); + Jump notEqual = makeBranch(ARM64Assembler::ConditionNE); + unordered.link(this); + // We get here if either unordered or equal. + Jump result = jump(); + notEqual.link(this); + return result; + } + return makeBranch(cond); + } + + friend class LinkBuffer; + + static void linkCall(void* code, Call call, FunctionPtr function) + { + if (!call.isFlagSet(Call::Near)) + ARM64Assembler::linkPointer(code, call.m_label.labelAtOffset(REPATCH_OFFSET_CALL_TO_POINTER), function.value()); + else if (call.isFlagSet(Call::Tail)) + ARM64Assembler::linkJump(code, call.m_label, function.value()); + else + ARM64Assembler::linkCall(code, call.m_label, function.value()); + } + + CachedTempRegister m_dataMemoryTempRegister; + CachedTempRegister m_cachedMemoryTempRegister; + bool m_makeJumpPatchable; +}; + +// Extend the {load,store}{Unsigned,Unscaled}Immediate templated general register methods to cover all load/store sizes +template<> +ALWAYS_INLINE void MacroAssemblerARM64::loadUnsignedImmediate<8>(RegisterID rt, RegisterID rn, unsigned pimm) +{ + m_assembler.ldrb(rt, rn, pimm); +} + +template<> +ALWAYS_INLINE void MacroAssemblerARM64::loadUnsignedImmediate<16>(RegisterID rt, RegisterID rn, unsigned pimm) +{ + m_assembler.ldrh(rt, rn, pimm); +} + +template<> +ALWAYS_INLINE void MacroAssemblerARM64::loadSignedAddressedByUnsignedImmediate<8>(RegisterID rt, RegisterID rn, unsigned pimm) +{ + m_assembler.ldrsb<64>(rt, rn, pimm); +} + +template<> +ALWAYS_INLINE void MacroAssemblerARM64::loadSignedAddressedByUnsignedImmediate<16>(RegisterID rt, RegisterID rn, unsigned pimm) +{ + m_assembler.ldrsh<64>(rt, rn, pimm); +} + +template<> +ALWAYS_INLINE void MacroAssemblerARM64::loadUnscaledImmediate<8>(RegisterID rt, RegisterID rn, int simm) +{ + m_assembler.ldurb(rt, rn, simm); +} + +template<> +ALWAYS_INLINE void MacroAssemblerARM64::loadUnscaledImmediate<16>(RegisterID rt, RegisterID rn, int simm) +{ + m_assembler.ldurh(rt, rn, simm); +} + +template<> +ALWAYS_INLINE void MacroAssemblerARM64::loadSignedAddressedByUnscaledImmediate<8>(RegisterID rt, RegisterID rn, int simm) +{ + m_assembler.ldursb<64>(rt, rn, simm); +} + +template<> +ALWAYS_INLINE void MacroAssemblerARM64::loadSignedAddressedByUnscaledImmediate<16>(RegisterID rt, RegisterID rn, int simm) +{ + m_assembler.ldursh<64>(rt, rn, simm); +} + +template<> +ALWAYS_INLINE void MacroAssemblerARM64::storeUnsignedImmediate<8>(RegisterID rt, RegisterID rn, unsigned pimm) +{ + m_assembler.strb(rt, rn, pimm); +} + +template<> +ALWAYS_INLINE void MacroAssemblerARM64::storeUnsignedImmediate<16>(RegisterID rt, RegisterID rn, unsigned pimm) +{ + m_assembler.strh(rt, rn, pimm); +} + +template<> +ALWAYS_INLINE void MacroAssemblerARM64::storeUnscaledImmediate<8>(RegisterID rt, RegisterID rn, int simm) +{ + m_assembler.sturb(rt, rn, simm); +} + +template<> +ALWAYS_INLINE void MacroAssemblerARM64::storeUnscaledImmediate<16>(RegisterID rt, RegisterID rn, int simm) +{ + m_assembler.sturh(rt, rn, simm); +} + +} // namespace JSC + +#endif // ENABLE(ASSEMBLER) + +#endif // MacroAssemblerARM64_h diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.cpp b/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.cpp new file mode 100644 index 000000000..9b802456c --- /dev/null +++ b/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.cpp @@ -0,0 +1,345 @@ +/* + * Copyright (C) 2013-2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" + +#if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2) +#include "MacroAssemblerARMv7.h" + +#include <wtf/InlineASM.h> + +namespace JSC { + +#if ENABLE(MASM_PROBE) + +extern "C" void ctiMasmProbeTrampoline(); + +#if COMPILER(GCC_OR_CLANG) + +// The following are offsets for MacroAssemblerARMv7::ProbeContext fields accessed +// by the ctiMasmProbeTrampoline stub. + +#define PTR_SIZE 4 +#define PROBE_PROBE_FUNCTION_OFFSET (0 * PTR_SIZE) +#define PROBE_ARG1_OFFSET (1 * PTR_SIZE) +#define PROBE_ARG2_OFFSET (2 * PTR_SIZE) + +#define PROBE_FIRST_GPREG_OFFSET (3 * PTR_SIZE) + +#define GPREG_SIZE 4 +#define PROBE_CPU_R0_OFFSET (PROBE_FIRST_GPREG_OFFSET + (0 * GPREG_SIZE)) +#define PROBE_CPU_R1_OFFSET (PROBE_FIRST_GPREG_OFFSET + (1 * GPREG_SIZE)) +#define PROBE_CPU_R2_OFFSET (PROBE_FIRST_GPREG_OFFSET + (2 * GPREG_SIZE)) +#define PROBE_CPU_R3_OFFSET (PROBE_FIRST_GPREG_OFFSET + (3 * GPREG_SIZE)) +#define PROBE_CPU_R4_OFFSET (PROBE_FIRST_GPREG_OFFSET + (4 * GPREG_SIZE)) +#define PROBE_CPU_R5_OFFSET (PROBE_FIRST_GPREG_OFFSET + (5 * GPREG_SIZE)) +#define PROBE_CPU_R6_OFFSET (PROBE_FIRST_GPREG_OFFSET + (6 * GPREG_SIZE)) +#define PROBE_CPU_R7_OFFSET (PROBE_FIRST_GPREG_OFFSET + (7 * GPREG_SIZE)) +#define PROBE_CPU_R8_OFFSET (PROBE_FIRST_GPREG_OFFSET + (8 * GPREG_SIZE)) +#define PROBE_CPU_R9_OFFSET (PROBE_FIRST_GPREG_OFFSET + (9 * GPREG_SIZE)) +#define PROBE_CPU_R10_OFFSET (PROBE_FIRST_GPREG_OFFSET + (10 * GPREG_SIZE)) +#define PROBE_CPU_R11_OFFSET (PROBE_FIRST_GPREG_OFFSET + (11 * GPREG_SIZE)) +#define PROBE_CPU_IP_OFFSET (PROBE_FIRST_GPREG_OFFSET + (12 * GPREG_SIZE)) +#define PROBE_CPU_SP_OFFSET (PROBE_FIRST_GPREG_OFFSET + (13 * GPREG_SIZE)) +#define PROBE_CPU_LR_OFFSET (PROBE_FIRST_GPREG_OFFSET + (14 * GPREG_SIZE)) +#define PROBE_CPU_PC_OFFSET (PROBE_FIRST_GPREG_OFFSET + (15 * GPREG_SIZE)) + +#define PROBE_CPU_APSR_OFFSET (PROBE_FIRST_GPREG_OFFSET + (16 * GPREG_SIZE)) +#define PROBE_CPU_FPSCR_OFFSET (PROBE_FIRST_GPREG_OFFSET + (17 * GPREG_SIZE)) + +#define PROBE_FIRST_FPREG_OFFSET (PROBE_FIRST_GPREG_OFFSET + (18 * GPREG_SIZE)) + +#define FPREG_SIZE 8 +#define PROBE_CPU_D0_OFFSET (PROBE_FIRST_FPREG_OFFSET + (0 * FPREG_SIZE)) +#define PROBE_CPU_D1_OFFSET (PROBE_FIRST_FPREG_OFFSET + (1 * FPREG_SIZE)) +#define PROBE_CPU_D2_OFFSET (PROBE_FIRST_FPREG_OFFSET + (2 * FPREG_SIZE)) +#define PROBE_CPU_D3_OFFSET (PROBE_FIRST_FPREG_OFFSET + (3 * FPREG_SIZE)) +#define PROBE_CPU_D4_OFFSET (PROBE_FIRST_FPREG_OFFSET + (4 * FPREG_SIZE)) +#define PROBE_CPU_D5_OFFSET (PROBE_FIRST_FPREG_OFFSET + (5 * FPREG_SIZE)) +#define PROBE_CPU_D6_OFFSET (PROBE_FIRST_FPREG_OFFSET + (6 * FPREG_SIZE)) +#define PROBE_CPU_D7_OFFSET (PROBE_FIRST_FPREG_OFFSET + (7 * FPREG_SIZE)) +#define PROBE_CPU_D8_OFFSET (PROBE_FIRST_FPREG_OFFSET + (8 * FPREG_SIZE)) +#define PROBE_CPU_D9_OFFSET (PROBE_FIRST_FPREG_OFFSET + (9 * FPREG_SIZE)) +#define PROBE_CPU_D10_OFFSET (PROBE_FIRST_FPREG_OFFSET + (10 * FPREG_SIZE)) +#define PROBE_CPU_D11_OFFSET (PROBE_FIRST_FPREG_OFFSET + (11 * FPREG_SIZE)) +#define PROBE_CPU_D12_OFFSET (PROBE_FIRST_FPREG_OFFSET + (12 * FPREG_SIZE)) +#define PROBE_CPU_D13_OFFSET (PROBE_FIRST_FPREG_OFFSET + (13 * FPREG_SIZE)) +#define PROBE_CPU_D14_OFFSET (PROBE_FIRST_FPREG_OFFSET + (14 * FPREG_SIZE)) +#define PROBE_CPU_D15_OFFSET (PROBE_FIRST_FPREG_OFFSET + (15 * FPREG_SIZE)) +#define PROBE_CPU_D16_OFFSET (PROBE_FIRST_FPREG_OFFSET + (16 * FPREG_SIZE)) +#define PROBE_CPU_D17_OFFSET (PROBE_FIRST_FPREG_OFFSET + (17 * FPREG_SIZE)) +#define PROBE_CPU_D18_OFFSET (PROBE_FIRST_FPREG_OFFSET + (18 * FPREG_SIZE)) +#define PROBE_CPU_D19_OFFSET (PROBE_FIRST_FPREG_OFFSET + (19 * FPREG_SIZE)) +#define PROBE_CPU_D20_OFFSET (PROBE_FIRST_FPREG_OFFSET + (20 * FPREG_SIZE)) +#define PROBE_CPU_D21_OFFSET (PROBE_FIRST_FPREG_OFFSET + (21 * FPREG_SIZE)) +#define PROBE_CPU_D22_OFFSET (PROBE_FIRST_FPREG_OFFSET + (22 * FPREG_SIZE)) +#define PROBE_CPU_D23_OFFSET (PROBE_FIRST_FPREG_OFFSET + (23 * FPREG_SIZE)) +#define PROBE_CPU_D24_OFFSET (PROBE_FIRST_FPREG_OFFSET + (24 * FPREG_SIZE)) +#define PROBE_CPU_D25_OFFSET (PROBE_FIRST_FPREG_OFFSET + (25 * FPREG_SIZE)) +#define PROBE_CPU_D26_OFFSET (PROBE_FIRST_FPREG_OFFSET + (26 * FPREG_SIZE)) +#define PROBE_CPU_D27_OFFSET (PROBE_FIRST_FPREG_OFFSET + (27 * FPREG_SIZE)) +#define PROBE_CPU_D28_OFFSET (PROBE_FIRST_FPREG_OFFSET + (28 * FPREG_SIZE)) +#define PROBE_CPU_D29_OFFSET (PROBE_FIRST_FPREG_OFFSET + (29 * FPREG_SIZE)) +#define PROBE_CPU_D30_OFFSET (PROBE_FIRST_FPREG_OFFSET + (30 * FPREG_SIZE)) +#define PROBE_CPU_D31_OFFSET (PROBE_FIRST_FPREG_OFFSET + (31 * FPREG_SIZE)) +#define PROBE_SIZE (PROBE_FIRST_FPREG_OFFSET + (32 * FPREG_SIZE)) + +// These ASSERTs remind you that if you change the layout of ProbeContext, +// you need to change ctiMasmProbeTrampoline offsets above to match. +#define PROBE_OFFSETOF(x) offsetof(struct MacroAssemblerARMv7::ProbeContext, x) +COMPILE_ASSERT(PROBE_OFFSETOF(probeFunction) == PROBE_PROBE_FUNCTION_OFFSET, ProbeContext_probeFunction_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(arg1) == PROBE_ARG1_OFFSET, ProbeContext_arg1_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(arg2) == PROBE_ARG2_OFFSET, ProbeContext_arg2_offset_matches_ctiMasmProbeTrampoline); + +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r0) == PROBE_CPU_R0_OFFSET, ProbeContext_cpu_r0_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r1) == PROBE_CPU_R1_OFFSET, ProbeContext_cpu_r1_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r2) == PROBE_CPU_R2_OFFSET, ProbeContext_cpu_r2_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r3) == PROBE_CPU_R3_OFFSET, ProbeContext_cpu_r3_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r4) == PROBE_CPU_R4_OFFSET, ProbeContext_cpu_r4_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r5) == PROBE_CPU_R5_OFFSET, ProbeContext_cpu_r5_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r6) == PROBE_CPU_R6_OFFSET, ProbeContext_cpu_r6_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r7) == PROBE_CPU_R7_OFFSET, ProbeContext_cpu_r7_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r8) == PROBE_CPU_R8_OFFSET, ProbeContext_cpu_r8_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r9) == PROBE_CPU_R9_OFFSET, ProbeContext_cpu_r9_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r10) == PROBE_CPU_R10_OFFSET, ProbeContext_cpu_r10_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r11) == PROBE_CPU_R11_OFFSET, ProbeContext_cpu_r11_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.ip) == PROBE_CPU_IP_OFFSET, ProbeContext_cpu_ip_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.sp) == PROBE_CPU_SP_OFFSET, ProbeContext_cpu_sp_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.lr) == PROBE_CPU_LR_OFFSET, ProbeContext_cpu_lr_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.pc) == PROBE_CPU_PC_OFFSET, ProbeContext_cpu_pc_offset_matches_ctiMasmProbeTrampoline); + +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.apsr) == PROBE_CPU_APSR_OFFSET, ProbeContext_cpu_apsr_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.fpscr) == PROBE_CPU_FPSCR_OFFSET, ProbeContext_cpu_fpscr_offset_matches_ctiMasmProbeTrampoline); + +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d0) == PROBE_CPU_D0_OFFSET, ProbeContext_cpu_d0_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d1) == PROBE_CPU_D1_OFFSET, ProbeContext_cpu_d1_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d2) == PROBE_CPU_D2_OFFSET, ProbeContext_cpu_d2_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d3) == PROBE_CPU_D3_OFFSET, ProbeContext_cpu_d3_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d4) == PROBE_CPU_D4_OFFSET, ProbeContext_cpu_d4_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d5) == PROBE_CPU_D5_OFFSET, ProbeContext_cpu_d5_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d6) == PROBE_CPU_D6_OFFSET, ProbeContext_cpu_d6_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d7) == PROBE_CPU_D7_OFFSET, ProbeContext_cpu_d7_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d8) == PROBE_CPU_D8_OFFSET, ProbeContext_cpu_d8_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d9) == PROBE_CPU_D9_OFFSET, ProbeContext_cpu_d9_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d10) == PROBE_CPU_D10_OFFSET, ProbeContext_cpu_d10_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d11) == PROBE_CPU_D11_OFFSET, ProbeContext_cpu_d11_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d12) == PROBE_CPU_D12_OFFSET, ProbeContext_cpu_d12_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d13) == PROBE_CPU_D13_OFFSET, ProbeContext_cpu_d13_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d14) == PROBE_CPU_D14_OFFSET, ProbeContext_cpu_d14_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d15) == PROBE_CPU_D15_OFFSET, ProbeContext_cpu_d15_offset_matches_ctiMasmProbeTrampoline); + +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d16) == PROBE_CPU_D16_OFFSET, ProbeContext_cpu_d16_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d17) == PROBE_CPU_D17_OFFSET, ProbeContext_cpu_d17_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d18) == PROBE_CPU_D18_OFFSET, ProbeContext_cpu_d18_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d19) == PROBE_CPU_D19_OFFSET, ProbeContext_cpu_d19_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d20) == PROBE_CPU_D20_OFFSET, ProbeContext_cpu_d20_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d21) == PROBE_CPU_D21_OFFSET, ProbeContext_cpu_d21_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d22) == PROBE_CPU_D22_OFFSET, ProbeContext_cpu_d22_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d23) == PROBE_CPU_D23_OFFSET, ProbeContext_cpu_d23_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d24) == PROBE_CPU_D24_OFFSET, ProbeContext_cpu_d24_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d25) == PROBE_CPU_D25_OFFSET, ProbeContext_cpu_d25_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d26) == PROBE_CPU_D26_OFFSET, ProbeContext_cpu_d26_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d27) == PROBE_CPU_D27_OFFSET, ProbeContext_cpu_d27_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d28) == PROBE_CPU_D28_OFFSET, ProbeContext_cpu_d28_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d29) == PROBE_CPU_D29_OFFSET, ProbeContext_cpu_d29_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d30) == PROBE_CPU_D30_OFFSET, ProbeContext_cpu_d30_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d31) == PROBE_CPU_D31_OFFSET, ProbeContext_cpu_d31_offset_matches_ctiMasmProbeTrampoline); + +COMPILE_ASSERT(sizeof(MacroAssemblerARMv7::ProbeContext) == PROBE_SIZE, ProbeContext_size_matches_ctiMasmProbeTrampoline); + +#undef PROBE_OFFSETOF + +asm ( + ".text" "\n" + ".align 2" "\n" + ".globl " SYMBOL_STRING(ctiMasmProbeTrampoline) "\n" + HIDE_SYMBOL(ctiMasmProbeTrampoline) "\n" + ".thumb" "\n" + ".thumb_func " THUMB_FUNC_PARAM(ctiMasmProbeTrampoline) "\n" + SYMBOL_STRING(ctiMasmProbeTrampoline) ":" "\n" + + // MacroAssemblerARMv7::probe() has already generated code to store some values. + // The top of stack now looks like this: + // esp[0 * ptrSize]: probeFunction + // esp[1 * ptrSize]: arg1 + // esp[2 * ptrSize]: arg2 + // esp[3 * ptrSize]: saved r0 + // esp[4 * ptrSize]: saved ip + // esp[5 * ptrSize]: saved lr + // esp[6 * ptrSize]: saved sp + + "mov ip, sp" "\n" + "mov r0, sp" "\n" + "sub r0, r0, #" STRINGIZE_VALUE_OF(PROBE_SIZE) "\n" + + // The ARM EABI specifies that the stack needs to be 16 byte aligned. + "bic r0, r0, #0xf" "\n" + "mov sp, r0" "\n" + + "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n" + "add lr, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_R1_OFFSET) "\n" + "stmia lr, { r1-r11 }" "\n" + "mrs lr, APSR" "\n" + "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n" + "vmrs lr, FPSCR" "\n" + "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_FPSCR_OFFSET) "]" "\n" + + "ldr lr, [ip, #0 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" + "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "]" "\n" + "ldr lr, [ip, #1 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" + "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_ARG1_OFFSET) "]" "\n" + "ldr lr, [ip, #2 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" + "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_ARG2_OFFSET) "]" "\n" + "ldr lr, [ip, #3 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" + "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_R0_OFFSET) "]" "\n" + "ldr lr, [ip, #4 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" + "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n" + "ldr lr, [ip, #5 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" + "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n" + "ldr lr, [ip, #6 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" + "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n" + + "ldr lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n" + + "add ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_D0_OFFSET) "\n" + "vstmia.64 ip, { d0-d31 }" "\n" + + "mov fp, sp" "\n" // Save the ProbeContext*. + + "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "]" "\n" + "mov r0, sp" "\n" // the ProbeContext* arg. + "blx ip" "\n" + + "mov sp, fp" "\n" + + // To enable probes to modify register state, we copy all registers + // out of the ProbeContext before returning. + + "add ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_D31_OFFSET + FPREG_SIZE) "\n" + "vldmdb.64 ip!, { d0-d31 }" "\n" + "add ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_R11_OFFSET + GPREG_SIZE) "\n" + "ldmdb ip, { r0-r11 }" "\n" + "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_FPSCR_OFFSET) "]" "\n" + "vmsr FPSCR, ip" "\n" + + // There are 5 more registers left to restore: ip, sp, lr, pc, and apsr. + // There are 2 issues that complicate the restoration of these last few + // registers: + // + // 1. Normal ARM calling convention relies on moving lr to pc to return to + // the caller. In our case, the address to return to is specified by + // ProbeContext.cpu.pc. And at that moment, we won't have any available + // scratch registers to hold the return address (lr needs to hold + // ProbeContext.cpu.lr, not the return address). + // + // The solution is to store the return address on the stack and load the + // pc from there. + // + // 2. Issue 1 means we will need to write to the stack location at + // ProbeContext.cpu.sp - 4. But if the user probe function had modified + // the value of ProbeContext.cpu.sp to point in the range between + // &ProbeContext.cpu.ip thru &ProbeContext.cpu.aspr, then the action for + // Issue 1 may trash the values to be restored before we can restore + // them. + // + // The solution is to check if ProbeContext.cpu.sp contains a value in + // the undesirable range. If so, we copy the remaining ProbeContext + // register data to a safe range (at memory lower than where + // ProbeContext.cpu.sp points) first, and restore the remaining register + // from this new range. + + "add ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "\n" + "ldr lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n" + "cmp lr, ip" "\n" + "it gt" "\n" + "bgt " SYMBOL_STRING(ctiMasmProbeTrampolineEnd) "\n" + + // We get here because the new expected stack pointer location is lower + // than where it's supposed to be. This means the safe range of stack + // memory where we'll be copying the remaining register restore values to + // might be in a region of memory below the sp i.e. unallocated stack + // memory. This, in turn, makes it vulnerable to interrupts potentially + // trashing the copied values. To prevent that, we must first allocate the + // needed stack memory by adjusting the sp before the copying. + + "sub lr, lr, #(6 * " STRINGIZE_VALUE_OF(PTR_SIZE) + " + " STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) ")" "\n" + + "mov ip, sp" "\n" + "mov sp, lr" "\n" + "mov lr, ip" "\n" + + "ldr ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n" + "str ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n" + "ldr ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n" + "str ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n" + "ldr ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n" + "str ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n" + "ldr ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n" + "str ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n" + "ldr ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n" + "str ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n" + + ".thumb_func " THUMB_FUNC_PARAM(ctiMasmProbeTrampolineEnd) "\n" + SYMBOL_STRING(ctiMasmProbeTrampolineEnd) ":" "\n" + "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n" + "ldr lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n" + "sub lr, lr, #" STRINGIZE_VALUE_OF(PTR_SIZE) "\n" + "str ip, [lr]" "\n" + "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n" + + "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n" + "msr APSR, ip" "\n" + "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n" + "mov lr, ip" "\n" + "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n" + "ldr sp, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n" + + "pop { pc }" "\n" +); +#endif // COMPILER(GCC_OR_CLANG) + +void MacroAssemblerARMv7::probe(MacroAssemblerARMv7::ProbeFunction function, void* arg1, void* arg2) +{ + push(RegisterID::lr); + push(RegisterID::lr); + add32(TrustedImm32(8), RegisterID::sp, RegisterID::lr); + store32(RegisterID::lr, ArmAddress(RegisterID::sp, 4)); + push(RegisterID::ip); + push(RegisterID::r0); + // The following uses RegisterID::ip. So, they must come after we push ip above. + push(trustedImm32FromPtr(arg2)); + push(trustedImm32FromPtr(arg1)); + push(trustedImm32FromPtr(function)); + + move(trustedImm32FromPtr(ctiMasmProbeTrampoline), RegisterID::ip); + m_assembler.blx(RegisterID::ip); +} +#endif // ENABLE(MASM_PROBE) + +} // namespace JSC + +#endif // ENABLE(ASSEMBLER) + diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h b/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h index b7259810a..337a82e93 100644 --- a/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h +++ b/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2009, 2010 Apple Inc. All rights reserved. + * Copyright (C) 2009-2010, 2014-2015 Apple Inc. All rights reserved. * Copyright (C) 2010 University of Szeged * * Redistribution and use in source and binary forms, with or without @@ -34,7 +34,7 @@ namespace JSC { -class MacroAssemblerARMv7 : public AbstractMacroAssembler<ARMv7Assembler> { +class MacroAssemblerARMv7 : public AbstractMacroAssembler<ARMv7Assembler, MacroAssemblerARMv7> { static const RegisterID dataTempRegister = ARMRegisters::ip; static const RegisterID addressTempRegister = ARMRegisters::r6; @@ -50,6 +50,10 @@ public: typedef ARMv7Assembler::LinkRecord LinkRecord; typedef ARMv7Assembler::JumpType JumpType; typedef ARMv7Assembler::JumpLinkType JumpLinkType; + typedef ARMv7Assembler::Condition Condition; + + static const ARMv7Assembler::Condition DefaultCondition = ARMv7Assembler::ConditionInvalid; + static const ARMv7Assembler::JumpType DefaultJump = ARMv7Assembler::JumpNoConditionFixedSize; static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value) { @@ -58,12 +62,11 @@ public: Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink() { return m_assembler.jumpsToLink(); } void* unlinkedCode() { return m_assembler.unlinkedCode(); } - bool canCompact(JumpType jumpType) { return m_assembler.canCompact(jumpType); } - JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return m_assembler.computeJumpType(jumpType, from, to); } - JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return m_assembler.computeJumpType(record, from, to); } - void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset) {return m_assembler.recordLinkOffsets(regionStart, regionEnd, offset); } - int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return m_assembler.jumpSizeDelta(jumpType, jumpLinkType); } - void link(LinkRecord& record, uint8_t* from, uint8_t* to) { return m_assembler.link(record, from, to); } + static bool canCompact(JumpType jumpType) { return ARMv7Assembler::canCompact(jumpType); } + static JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return ARMv7Assembler::computeJumpType(jumpType, from, to); } + static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return ARMv7Assembler::computeJumpType(record, from, to); } + static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return ARMv7Assembler::jumpSizeDelta(jumpType, jumpLinkType); } + static void link(LinkRecord& record, uint8_t* from, uint8_t* to) { return ARMv7Assembler::link(record, from, to); } struct ArmAddress { enum AddressType { @@ -96,8 +99,6 @@ public: }; public: - typedef ARMRegisters::FPDoubleRegisterID FPRegisterID; - static const Scale ScalePtr = TimesFour; enum RelationalCondition { @@ -139,6 +140,7 @@ public: }; static const RegisterID stackPointerRegister = ARMRegisters::sp; + static const RegisterID framePointerRegister = ARMRegisters::fp; static const RegisterID linkRegister = ARMRegisters::lr; // Integer arithmetic operations: @@ -153,6 +155,11 @@ public: m_assembler.add(dest, dest, src); } + void add32(RegisterID left, RegisterID right, RegisterID dest) + { + m_assembler.add(dest, left, right); + } + void add32(TrustedImm32 imm, RegisterID dest) { add32(imm, dest, dest); @@ -167,6 +174,14 @@ public: void add32(TrustedImm32 imm, RegisterID src, RegisterID dest) { ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value); + + // For adds with stack pointer destination, moving the src first to sp is + // needed to avoid unpredictable instruction + if (dest == ARMRegisters::sp && src != dest) { + move(src, ARMRegisters::sp); + src = ARMRegisters::sp; + } + if (armImm.isValid()) m_assembler.add(dest, src, armImm); else { @@ -215,6 +230,11 @@ public: store32(dataTempRegister, address.m_ptr); } + void addPtrNoFlags(TrustedImm32 imm, RegisterID srcDest) + { + add32(imm, srcDest); + } + void add64(TrustedImm32 imm, AbsoluteAddress address) { move(TrustedImmPtr(address.m_ptr), addressTempRegister); @@ -326,6 +346,31 @@ public: store32(dataTempRegister, addressTempRegister); } + void or32(TrustedImm32 imm, AbsoluteAddress address) + { + ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value); + if (armImm.isValid()) { + move(TrustedImmPtr(address.m_ptr), addressTempRegister); + load32(addressTempRegister, dataTempRegister); + m_assembler.orr(dataTempRegister, dataTempRegister, armImm); + store32(dataTempRegister, addressTempRegister); + } else { + move(TrustedImmPtr(address.m_ptr), addressTempRegister); + load32(addressTempRegister, dataTempRegister); + move(imm, addressTempRegister); + m_assembler.orr(dataTempRegister, dataTempRegister, addressTempRegister); + move(TrustedImmPtr(address.m_ptr), addressTempRegister); + store32(dataTempRegister, addressTempRegister); + } + } + + void or32(TrustedImm32 imm, Address address) + { + load32(address, dataTempRegister); + or32(imm, dataTempRegister, dataTempRegister); + store32(dataTempRegister, address); + } + void or32(TrustedImm32 imm, RegisterID dest) { or32(imm, dest, dest); @@ -342,6 +387,7 @@ public: if (armImm.isValid()) m_assembler.orr(dest, src, armImm); else { + ASSERT(src != dataTempRegister); move(imm, dataTempRegister); m_assembler.orr(dest, src, dataTempRegister); } @@ -359,7 +405,10 @@ public: void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest) { - m_assembler.asr(dest, src, imm.m_value & 0x1f); + if (!imm.m_value) + move(src, dest); + else + m_assembler.asr(dest, src, imm.m_value & 0x1f); } void rshift32(RegisterID shiftAmount, RegisterID dest) @@ -384,7 +433,10 @@ public: void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest) { - m_assembler.lsr(dest, src, imm.m_value & 0x1f); + if (!imm.m_value) + move(src, dest); + else + m_assembler.lsr(dest, src, imm.m_value & 0x1f); } void urshift32(RegisterID shiftAmount, RegisterID dest) @@ -524,7 +576,7 @@ private: } } - void load16Signed(ArmAddress address, RegisterID dest) + void load16SignedExtendTo32(ArmAddress address, RegisterID dest) { ASSERT(address.type == ArmAddress::HasIndex); m_assembler.ldrsh(dest, address.base, address.u.index, address.u.scale); @@ -544,7 +596,7 @@ private: } } - void load8Signed(ArmAddress address, RegisterID dest) + void load8SignedExtendTo32(ArmAddress address, RegisterID dest) { ASSERT(address.type == ArmAddress::HasIndex); m_assembler.ldrsb(dest, address.base, address.u.index, address.u.scale); @@ -621,6 +673,18 @@ public: m_assembler.ldr(dest, addressTempRegister, ARMThumbImmediate::makeUInt16(0)); } + void abortWithReason(AbortReason reason) + { + move(TrustedImm32(reason), dataTempRegister); + breakpoint(); + } + + void abortWithReason(AbortReason reason, intptr_t misc) + { + move(TrustedImm32(misc), addressTempRegister); + abortWithReason(reason); + } + ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest) { ConvertibleLoadLabel result(this); @@ -634,7 +698,7 @@ public: load8(setupArmAddress(address), dest); } - void load8Signed(ImplicitAddress, RegisterID) + void load8SignedExtendTo32(ImplicitAddress, RegisterID) { UNREACHABLE_FOR_PLATFORM(); } @@ -644,9 +708,15 @@ public: load8(setupArmAddress(address), dest); } - void load8Signed(BaseIndex address, RegisterID dest) + void load8SignedExtendTo32(BaseIndex address, RegisterID dest) { - load8Signed(setupArmAddress(address), dest); + load8SignedExtendTo32(setupArmAddress(address), dest); + } + + void load8(const void* address, RegisterID dest) + { + move(TrustedImmPtr(address), dest); + load8(dest, dest); } DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest) @@ -674,9 +744,9 @@ public: m_assembler.ldrh(dest, makeBaseIndexBase(address), address.index, address.scale); } - void load16Signed(BaseIndex address, RegisterID dest) + void load16SignedExtendTo32(BaseIndex address, RegisterID dest) { - load16Signed(setupArmAddress(address), dest); + load16SignedExtendTo32(setupArmAddress(address), dest); } void load16(ImplicitAddress address, RegisterID dest) @@ -690,7 +760,7 @@ public: } } - void load16Signed(ImplicitAddress, RegisterID) + void load16SignedExtendTo32(ImplicitAddress, RegisterID) { UNREACHABLE_FOR_PLATFORM(); } @@ -736,6 +806,11 @@ public: store32(dataTempRegister, address); } + void store8(RegisterID src, Address address) + { + store8(src, setupArmAddress(address)); + } + void store8(RegisterID src, BaseIndex address) { store8(src, setupArmAddress(address)); @@ -753,6 +828,12 @@ public: store8(dataTempRegister, address); } + void store8(TrustedImm32 imm, Address address) + { + move(imm, dataTempRegister); + store8(dataTempRegister, address); + } + void store16(RegisterID src, BaseIndex address) { store16(src, setupArmAddress(address)); @@ -770,7 +851,6 @@ public: m_assembler.vmov(dest, src1, src2); } -#if ENABLE(JIT_CONSTANT_BLINDING) static bool shouldBlindForSpecificArch(uint32_t value) { ARMThumbImmediate immediate = ARMThumbImmediate::makeEncodedImm(value); @@ -788,7 +868,6 @@ public: // be controlled by an attacker. return !immediate.isUInt12(); } -#endif // Floating-point operations: @@ -796,6 +875,7 @@ public: static bool supportsFloatingPointTruncate() { return true; } static bool supportsFloatingPointSqrt() { return true; } static bool supportsFloatingPointAbs() { return true; } + static bool supportsFloatingPointRounding() { return false; } void loadDouble(ImplicitAddress address, FPRegisterID dest) { @@ -849,9 +929,15 @@ public: m_assembler.vmov(dest, src); } - void loadDouble(const void* address, FPRegisterID dest) + void moveZeroToDouble(FPRegisterID reg) { - move(TrustedImmPtr(address), addressTempRegister); + static double zeroConstant = 0.; + loadDouble(TrustedImmPtr(&zeroConstant), reg); + } + + void loadDouble(TrustedImmPtr address, FPRegisterID dest) + { + move(address, addressTempRegister); m_assembler.vldr(dest, addressTempRegister, 0); } @@ -885,9 +971,9 @@ public: m_assembler.fsts(ARMRegisters::asSingle(src), base, offset); } - void storeDouble(FPRegisterID src, const void* address) + void storeDouble(FPRegisterID src, TrustedImmPtr address) { - move(TrustedImmPtr(address), addressTempRegister); + move(address, addressTempRegister); storeDouble(src, addressTempRegister); } @@ -925,7 +1011,7 @@ public: void addDouble(AbsoluteAddress address, FPRegisterID dest) { - loadDouble(address.m_ptr, fpTempRegister); + loadDouble(TrustedImmPtr(address.m_ptr), fpTempRegister); m_assembler.vadd(dest, dest, fpTempRegister); } @@ -986,6 +1072,18 @@ public: m_assembler.vneg(dest, src); } + NO_RETURN_DUE_TO_CRASH void ceilDouble(FPRegisterID, FPRegisterID) + { + ASSERT(!supportsFloatingPointRounding()); + CRASH(); + } + + NO_RETURN_DUE_TO_CRASH void floorDouble(FPRegisterID, FPRegisterID) + { + ASSERT(!supportsFloatingPointRounding()); + CRASH(); + } + void convertInt32ToDouble(RegisterID src, FPRegisterID dest) { m_assembler.vmov(fpTempRegister, src, src); @@ -1067,23 +1165,6 @@ public: return failure; } - Jump branchTruncateDoubleToUint32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed) - { - m_assembler.vcvt_floatingPointToSigned(fpTempRegisterAsSingle(), src); - m_assembler.vmov(dest, fpTempRegisterAsSingle()); - - Jump overflow = branch32(Equal, dest, TrustedImm32(0x7fffffff)); - Jump success = branch32(GreaterThanOrEqual, dest, TrustedImm32(0)); - overflow.link(this); - - if (branchType == BranchIfTruncateSuccessful) - return success; - - Jump failure = jump(); - success.link(this); - return failure; - } - // Result is undefined if the value is outside of the integer range. void truncateDoubleToInt32(FPRegisterID src, RegisterID dest) { @@ -1148,14 +1229,12 @@ public: void pop(RegisterID dest) { - // store postindexed with writeback - m_assembler.ldr(dest, ARMRegisters::sp, sizeof(void*), false, true); + m_assembler.pop(dest); } void push(RegisterID src) { - // store preindexed with writeback - m_assembler.str(src, ARMRegisters::sp, -sizeof(void*), true, true); + m_assembler.push(src); } void push(Address address) @@ -1170,6 +1249,16 @@ public: push(dataTempRegister); } + void popPair(RegisterID dest1, RegisterID dest2) + { + m_assembler.pop(1 << dest1 | 1 << dest2); + } + + void pushPair(RegisterID src1, RegisterID src2) + { + m_assembler.push(1 << src1 | 1 << src2); + } + // Register move operations: // // Move values in registers. @@ -1230,6 +1319,11 @@ public: m_assembler.nop(); } + void memoryFence() + { + m_assembler.dmbSY(); + } + static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination) { ARMv7Assembler::replaceWithJump(instructionStart.dataLocation(), destination.dataLocation()); @@ -1260,25 +1354,22 @@ public: private: // Should we be using TEQ for equal/not-equal? - void compare32(RegisterID left, TrustedImm32 right) + void compare32AndSetFlags(RegisterID left, TrustedImm32 right) { int32_t imm = right.m_value; - if (!imm) - m_assembler.tst(left, left); + ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm); + if (armImm.isValid()) + m_assembler.cmp(left, armImm); + else if ((armImm = ARMThumbImmediate::makeEncodedImm(-imm)).isValid()) + m_assembler.cmn(left, armImm); else { - ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm); - if (armImm.isValid()) - m_assembler.cmp(left, armImm); - else if ((armImm = ARMThumbImmediate::makeEncodedImm(-imm)).isValid()) - m_assembler.cmn(left, armImm); - else { - move(TrustedImm32(imm), dataTempRegister); - m_assembler.cmp(left, dataTempRegister); - } + move(TrustedImm32(imm), dataTempRegister); + m_assembler.cmp(left, dataTempRegister); } } - void test32(RegisterID reg, TrustedImm32 mask) +public: + void test32(RegisterID reg, TrustedImm32 mask = TrustedImm32(-1)) { int32_t imm = mask.m_value; @@ -1286,16 +1377,28 @@ private: m_assembler.tst(reg, reg); else { ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm); - if (armImm.isValid()) - m_assembler.tst(reg, armImm); - else { + if (armImm.isValid()) { + if (reg == ARMRegisters::sp) { + move(reg, addressTempRegister); + m_assembler.tst(addressTempRegister, armImm); + } else + m_assembler.tst(reg, armImm); + } else { move(mask, dataTempRegister); - m_assembler.tst(reg, dataTempRegister); + if (reg == ARMRegisters::sp) { + move(reg, addressTempRegister); + m_assembler.tst(addressTempRegister, dataTempRegister); + } else + m_assembler.tst(reg, dataTempRegister); } } } + + Jump branch(ResultCondition cond) + { + return Jump(makeBranch(cond)); + } -public: Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right) { m_assembler.cmp(left, right); @@ -1304,7 +1407,7 @@ public: Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right) { - compare32(left, right); + compare32AndSetFlags(left, right); return Jump(makeBranch(cond)); } @@ -1354,9 +1457,15 @@ public: return branch32(cond, addressTempRegister, right); } + Jump branchPtr(RelationalCondition cond, BaseIndex left, RegisterID right) + { + load32(left, dataTempRegister); + return branch32(cond, dataTempRegister, right); + } + Jump branch8(RelationalCondition cond, RegisterID left, TrustedImm32 right) { - compare32(left, right); + compare32AndSetFlags(left, right); return Jump(makeBranch(cond)); } @@ -1376,14 +1485,24 @@ public: return branch32(cond, addressTempRegister, right); } + Jump branch8(RelationalCondition cond, AbsoluteAddress address, TrustedImm32 right) + { + // Use addressTempRegister instead of dataTempRegister, since branch32 uses dataTempRegister. + move(TrustedImmPtr(address.m_ptr), addressTempRegister); + load8(Address(addressTempRegister), addressTempRegister); + return branch32(cond, addressTempRegister, right); + } + Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask) { + ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == PositiveOrZero); m_assembler.tst(reg, mask); return Jump(makeBranch(cond)); } Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1)) { + ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == PositiveOrZero); test32(reg, mask); return Jump(makeBranch(cond)); } @@ -1402,6 +1521,13 @@ public: return branchTest32(cond, addressTempRegister, mask); } + Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1)) + { + // use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/ + load8(address, addressTempRegister); + return branchTest32(cond, addressTempRegister, mask); + } + Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1)) { // use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/ @@ -1524,7 +1650,7 @@ public: return branchMul32(cond, src, dest, dest); } - Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest) + Jump branchMul32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest) { move(imm, dataTempRegister); return branchMul32(cond, dataTempRegister, src, dest); @@ -1597,6 +1723,12 @@ public: return Call(m_assembler.blx(dataTempRegister), Call::LinkableNear); } + ALWAYS_INLINE Call nearTailCall() + { + moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister); + return Call(m_assembler.bx(dataTempRegister), Call::LinkableNearTail); + } + ALWAYS_INLINE Call call() { moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister); @@ -1641,7 +1773,7 @@ public: void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest) { - compare32(left, right); + compare32AndSetFlags(left, right); m_assembler.it(armV7Condition(cond), false); m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1)); m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0)); @@ -1696,6 +1828,13 @@ public: return branch32(cond, addressTempRegister, dataTempRegister); } + ALWAYS_INLINE Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0)) + { + load32(left, addressTempRegister); + dataLabel = moveWithPatch(initialRightValue, dataTempRegister); + return branch32(cond, addressTempRegister, dataTempRegister); + } + PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right = TrustedImmPtr(0)) { m_makeJumpPatchable = true; @@ -1728,6 +1867,14 @@ public: return PatchableJump(result); } + PatchableJump patchableBranch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0)) + { + m_makeJumpPatchable = true; + Jump result = branch32WithPatch(cond, left, dataLabel, initialRightValue); + m_makeJumpPatchable = false; + return PatchableJump(result); + } + PatchableJump patchableJump() { padBeforePatch(); @@ -1760,17 +1907,13 @@ public: } - int executableOffsetFor(int location) - { - return m_assembler.executableOffsetFor(location); - } - static FunctionPtr readCallTarget(CodeLocationCall call) { return FunctionPtr(reinterpret_cast<void(*)()>(ARMv7Assembler::readCallTarget(call.dataLocation()))); } static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; } + static bool canJumpReplacePatchableBranch32WithPatch() { return false; } static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label) { @@ -1780,7 +1923,7 @@ public: static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID rd, void* initialValue) { -#if OS(LINUX) || OS(QNX) +#if OS(LINUX) ARMv7Assembler::revertJumpTo_movT3movtcmpT2(instructionStart.dataLocation(), rd, dataTempRegister, reinterpret_cast<uintptr_t>(initialValue)); #else UNUSED_PARAM(rd); @@ -1794,11 +1937,36 @@ public: return CodeLocationLabel(); } + static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32) + { + UNREACHABLE_FOR_PLATFORM(); + return CodeLocationLabel(); + } + static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*) { UNREACHABLE_FOR_PLATFORM(); } + static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel, Address, int32_t) + { + UNREACHABLE_FOR_PLATFORM(); + } + + static void repatchCall(CodeLocationCall call, CodeLocationLabel destination) + { + ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress()); + } + + static void repatchCall(CodeLocationCall call, FunctionPtr destination) + { + ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress()); + } + +#if ENABLE(MASM_PROBE) + void probe(ProbeFunction, void* arg1, void* arg2); +#endif // ENABLE(MASM_PROBE) + protected: ALWAYS_INLINE Jump jump() { @@ -1892,23 +2060,32 @@ protected: private: friend class LinkBuffer; - friend class RepatchBuffer; static void linkCall(void* code, Call call, FunctionPtr function) { - ARMv7Assembler::linkCall(code, call.m_label, function.value()); + if (call.isFlagSet(Call::Tail)) + ARMv7Assembler::linkJump(code, call.m_label, function.value()); + else + ARMv7Assembler::linkCall(code, call.m_label, function.value()); } - static void repatchCall(CodeLocationCall call, CodeLocationLabel destination) +#if ENABLE(MASM_PROBE) + inline TrustedImm32 trustedImm32FromPtr(void* ptr) { - ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress()); + return TrustedImm32(TrustedImmPtr(ptr)); } - static void repatchCall(CodeLocationCall call, FunctionPtr destination) + inline TrustedImm32 trustedImm32FromPtr(ProbeFunction function) { - ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress()); + return TrustedImm32(TrustedImmPtr(reinterpret_cast<void*>(function))); } + inline TrustedImm32 trustedImm32FromPtr(void (*function)()) + { + return TrustedImm32(TrustedImmPtr(reinterpret_cast<void*>(function))); + } +#endif + bool m_makeJumpPatchable; }; diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.h b/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.h index ec16659d5..2d9738478 100644 --- a/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.h +++ b/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.h @@ -31,19 +31,18 @@ #include "LLIntData.h" #include <wtf/DataLog.h> #include <wtf/PassRefPtr.h> +#include <wtf/PrintStream.h> #include <wtf/RefPtr.h> // ASSERT_VALID_CODE_POINTER checks that ptr is a non-null pointer, and that it is a valid // instruction address on the platform (for example, check any alignment requirements). -#if CPU(ARM_THUMB2) -// ARM/thumb instructions must be 16-bit aligned, but all code pointers to be loaded -// into the processor are decorated with the bottom bit set, indicating that this is -// thumb code (as oposed to 32-bit traditional ARM). The first test checks for both -// decorated and undectorated null, and the second test ensures that the pointer is -// decorated. +#if CPU(ARM_THUMB2) && ENABLE(JIT) +// ARM instructions must be 16-bit aligned. Thumb2 code pointers to be loaded into +// into the processor are decorated with the bottom bit set, while traditional ARM has +// the lower bit clear. Since we don't know what kind of pointer, we check for both +// decorated and undecorated null. #define ASSERT_VALID_CODE_POINTER(ptr) \ - ASSERT(reinterpret_cast<intptr_t>(ptr) & ~1); \ - ASSERT(reinterpret_cast<intptr_t>(ptr) & 1) + ASSERT(reinterpret_cast<intptr_t>(ptr) & ~1) #define ASSERT_VALID_CODE_OFFSET(offset) \ ASSERT(!(offset & 1)) // Must be multiple of 2. #else @@ -133,6 +132,12 @@ public: ASSERT_VALID_CODE_POINTER(m_value); } + template<typename returnType, typename argType1, typename argType2, typename argType3, typename argType4, typename argType5, typename argType6> + FunctionPtr(returnType(*value)(argType1, argType2, argType3, argType4, argType5, argType6)) + : m_value((void*)value) + { + ASSERT_VALID_CODE_POINTER(m_value); + } // MSVC doesn't seem to treat functions with different calling conventions as // different types; these methods already defined for fastcall, below. #if CALLING_CONVENTION_IS_STDCALL && !OS(WINDOWS) @@ -255,6 +260,11 @@ public: } void* value() const { return m_value; } + + void dump(PrintStream& out) const + { + out.print(RawPointer(m_value)); + } private: void* m_value; @@ -289,12 +299,10 @@ public: return result; } -#if ENABLE(LLINT) - static MacroAssemblerCodePtr createLLIntCodePtr(LLIntCode codeId) + static MacroAssemblerCodePtr createLLIntCodePtr(OpcodeID codeId) { return createFromExecutableAddress(LLInt::getCodePtr(codeId)); } -#endif explicit MacroAssemblerCodePtr(ReturnAddressPtr ra) : m_value(ra.value()) @@ -310,15 +318,65 @@ public: void* dataLocation() const { ASSERT_VALID_CODE_POINTER(m_value); return m_value; } #endif - bool operator!() const + explicit operator bool() const { return m_value; } + + bool operator==(const MacroAssemblerCodePtr& other) const { - return !m_value; + return m_value == other.m_value; } + void dumpWithName(const char* name, PrintStream& out) const + { + if (!m_value) { + out.print(name, "(null)"); + return; + } + if (executableAddress() == dataLocation()) { + out.print(name, "(", RawPointer(executableAddress()), ")"); + return; + } + out.print(name, "(executable = ", RawPointer(executableAddress()), ", dataLocation = ", RawPointer(dataLocation()), ")"); + } + + void dump(PrintStream& out) const + { + dumpWithName("CodePtr", out); + } + + enum EmptyValueTag { EmptyValue }; + enum DeletedValueTag { DeletedValue }; + + MacroAssemblerCodePtr(EmptyValueTag) + : m_value(emptyValue()) + { + } + + MacroAssemblerCodePtr(DeletedValueTag) + : m_value(deletedValue()) + { + } + + bool isEmptyValue() const { return m_value == emptyValue(); } + bool isDeletedValue() const { return m_value == deletedValue(); } + + unsigned hash() const { return PtrHash<void*>::hash(m_value); } + private: + static void* emptyValue() { return bitwise_cast<void*>(static_cast<intptr_t>(1)); } + static void* deletedValue() { return bitwise_cast<void*>(static_cast<intptr_t>(2)); } + void* m_value; }; +struct MacroAssemblerCodePtrHash { + static unsigned hash(const MacroAssemblerCodePtr& ptr) { return ptr.hash(); } + static bool equal(const MacroAssemblerCodePtr& a, const MacroAssemblerCodePtr& b) + { + return a == b; + } + static const bool safeToCompareToEmptyOrDeleted = true; +}; + // MacroAssemblerCodeRef: // // A reference to a section of JIT generated code. A CodeRef consists of a @@ -356,13 +414,11 @@ public: return MacroAssemblerCodeRef(codePtr); } -#if ENABLE(LLINT) // Helper for creating self-managed code refs from LLInt. - static MacroAssemblerCodeRef createLLIntCodeRef(LLIntCode codeId) + static MacroAssemblerCodeRef createLLIntCodeRef(OpcodeID codeId) { return createSelfManagedCodeRef(MacroAssemblerCodePtr::createFromExecutableAddress(LLInt::getCodePtr(codeId))); } -#endif ExecutableMemoryHandle* executableMemory() const { @@ -386,7 +442,12 @@ public: return JSC::tryToDisassemble(m_codePtr, size(), prefix, WTF::dataFile()); } - bool operator!() const { return !m_codePtr; } + explicit operator bool() const { return !!m_codePtr; } + + void dump(PrintStream& out) const + { + m_codePtr.dumpWithName("CodeRef", out); + } private: MacroAssemblerCodePtr m_codePtr; @@ -395,4 +456,16 @@ private: } // namespace JSC +namespace WTF { + +template<typename T> struct DefaultHash; +template<> struct DefaultHash<JSC::MacroAssemblerCodePtr> { + typedef JSC::MacroAssemblerCodePtrHash Hash; +}; + +template<typename T> struct HashTraits; +template<> struct HashTraits<JSC::MacroAssemblerCodePtr> : public CustomHashTraits<JSC::MacroAssemblerCodePtr> { }; + +} // namespace WTF + #endif // MacroAssemblerCodeRef_h diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerMIPS.h b/Source/JavaScriptCore/assembler/MacroAssemblerMIPS.h index fe78431bd..a1e67e262 100644 --- a/Source/JavaScriptCore/assembler/MacroAssemblerMIPS.h +++ b/Source/JavaScriptCore/assembler/MacroAssemblerMIPS.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2014 Apple Inc. All rights reserved. * Copyright (C) 2010 MIPS Technologies, Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -34,7 +34,7 @@ namespace JSC { -class MacroAssemblerMIPS : public AbstractMacroAssembler<MIPSAssembler> { +class MacroAssemblerMIPS : public AbstractMacroAssembler<MIPSAssembler, MacroAssemblerMIPS> { public: typedef MIPSRegisters::FPRegisterID FPRegisterID; @@ -55,9 +55,9 @@ public: // For storing data loaded from the memory static const RegisterID dataTempRegister = MIPSRegisters::t1; // For storing address base - static const RegisterID addrTempRegister = MIPSRegisters::t2; + static const RegisterID addrTempRegister = MIPSRegisters::t7; // For storing compare result - static const RegisterID cmpTempRegister = MIPSRegisters::t3; + static const RegisterID cmpTempRegister = MIPSRegisters::t8; // FP temp register static const FPRegisterID fpTempRegister = MIPSRegisters::f16; @@ -101,6 +101,7 @@ public: }; static const RegisterID stackPointerRegister = MIPSRegisters::sp; + static const RegisterID framePointerRegister = MIPSRegisters::fp; static const RegisterID returnAddressRegister = MIPSRegisters::ra; // Integer arithmetic operations: @@ -288,7 +289,7 @@ public: { if (!imm.m_value && !m_fixedWidth) move(MIPSRegisters::zero, dest); - else if (imm.m_value > 0 && imm.m_value < 65535 && !m_fixedWidth) + else if (imm.m_value > 0 && imm.m_value <= 65535 && !m_fixedWidth) m_assembler.andi(dest, dest, imm.m_value); else { /* @@ -304,7 +305,7 @@ public: { if (!imm.m_value && !m_fixedWidth) move(MIPSRegisters::zero, dest); - else if (imm.m_value > 0 && imm.m_value < 65535 && !m_fixedWidth) + else if (imm.m_value > 0 && imm.m_value <= 65535 && !m_fixedWidth) m_assembler.andi(dest, src, imm.m_value); else { move(imm, immTempRegister); @@ -312,6 +313,15 @@ public: } } + void countLeadingZeros32(RegisterID src, RegisterID dest) + { +#if WTF_MIPS_ISA_AT_LEAST(32) + m_assembler.clz(dest, src); +#else + static_assert(false, "CLZ opcode is not available for this ISA"); +#endif + } + void lshift32(RegisterID shiftAmount, RegisterID dest) { m_assembler.sllv(dest, dest, shiftAmount); @@ -375,12 +385,23 @@ public: m_assembler.orInsn(dest, op1, op2); } + void or32(TrustedImm32 imm, AbsoluteAddress dest) + { + if (!imm.m_value && !m_fixedWidth) + return; + + // TODO: Swap dataTempRegister and immTempRegister usage + load32(dest.m_ptr, immTempRegister); + or32(imm, immTempRegister); + store32(immTempRegister, dest.m_ptr); + } + void or32(TrustedImm32 imm, RegisterID dest) { if (!imm.m_value && !m_fixedWidth) return; - if (imm.m_value > 0 && imm.m_value < 65535 + if (imm.m_value > 0 && imm.m_value <= 65535 && !m_fixedWidth) { m_assembler.ori(dest, dest, imm.m_value); return; @@ -401,7 +422,7 @@ public: return; } - if (imm.m_value > 0 && imm.m_value < 65535 && !m_fixedWidth) { + if (imm.m_value > 0 && imm.m_value <= 65535 && !m_fixedWidth) { m_assembler.ori(dest, src, imm.m_value); return; } @@ -621,9 +642,21 @@ public: m_assembler.sqrtd(dst, src); } - void absDouble(FPRegisterID src, FPRegisterID dst) + void absDouble(FPRegisterID, FPRegisterID) { - m_assembler.absd(dst, src); + RELEASE_ASSERT_NOT_REACHED(); + } + + NO_RETURN_DUE_TO_CRASH void ceilDouble(FPRegisterID, FPRegisterID) + { + ASSERT(!supportsFloatingPointRounding()); + CRASH(); + } + + NO_RETURN_DUE_TO_CRASH void floorDouble(FPRegisterID, FPRegisterID) + { + ASSERT(!supportsFloatingPointRounding()); + CRASH(); } ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest) @@ -693,7 +726,22 @@ public: } } - void load8Signed(BaseIndex address, RegisterID dest) + ALWAYS_INLINE void load8(AbsoluteAddress address, RegisterID dest) + { + load8(address.m_ptr, dest); + } + + void load8(const void* address, RegisterID dest) + { + /* + li addrTemp, address + lbu dest, 0(addrTemp) + */ + move(TrustedImmPtr(address), addrTempRegister); + m_assembler.lbu(dest, addrTempRegister, 0); + } + + void load8SignedExtendTo32(BaseIndex address, RegisterID dest) { if (address.offset >= -32768 && address.offset <= 32767 && !m_fixedWidth) { @@ -768,53 +816,7 @@ public: void load16Unaligned(BaseIndex address, RegisterID dest) { - if (address.offset >= -32768 && address.offset <= 32766 && !m_fixedWidth) { - /* - sll addrtemp, address.index, address.scale - addu addrtemp, addrtemp, address.base - lbu immTemp, address.offset+x(addrtemp) (x=0 for LE, x=1 for BE) - lbu dest, address.offset+x(addrtemp) (x=1 for LE, x=0 for BE) - sll dest, dest, 8 - or dest, dest, immTemp - */ - m_assembler.sll(addrTempRegister, address.index, address.scale); - m_assembler.addu(addrTempRegister, addrTempRegister, address.base); -#if CPU(BIG_ENDIAN) - m_assembler.lbu(immTempRegister, addrTempRegister, address.offset + 1); - m_assembler.lbu(dest, addrTempRegister, address.offset); -#else - m_assembler.lbu(immTempRegister, addrTempRegister, address.offset); - m_assembler.lbu(dest, addrTempRegister, address.offset + 1); -#endif - m_assembler.sll(dest, dest, 8); - m_assembler.orInsn(dest, dest, immTempRegister); - } else { - /* - sll addrTemp, address.index, address.scale - addu addrTemp, addrTemp, address.base - lui immTemp, address.offset >> 16 - ori immTemp, immTemp, address.offset & 0xffff - addu addrTemp, addrTemp, immTemp - lbu immTemp, x(addrtemp) (x=0 for LE, x=1 for BE) - lbu dest, x(addrtemp) (x=1 for LE, x=0 for BE) - sll dest, dest, 8 - or dest, dest, immTemp - */ - m_assembler.sll(addrTempRegister, address.index, address.scale); - m_assembler.addu(addrTempRegister, addrTempRegister, address.base); - m_assembler.lui(immTempRegister, address.offset >> 16); - m_assembler.ori(immTempRegister, immTempRegister, address.offset); - m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister); -#if CPU(BIG_ENDIAN) - m_assembler.lbu(immTempRegister, addrTempRegister, 1); - m_assembler.lbu(dest, addrTempRegister, 0); -#else - m_assembler.lbu(immTempRegister, addrTempRegister, 0); - m_assembler.lbu(dest, addrTempRegister, 1); -#endif - m_assembler.sll(dest, dest, 8); - m_assembler.orInsn(dest, dest, immTempRegister); - } + load16(address, dest); } void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest) @@ -951,7 +953,7 @@ public: } } - void load16Signed(BaseIndex address, RegisterID dest) + void load16SignedExtendTo32(BaseIndex address, RegisterID dest) { if (address.offset >= -32768 && address.offset <= 32767 && !m_fixedWidth) { @@ -1024,6 +1026,12 @@ public: } } + void store8(RegisterID src, void* address) + { + move(TrustedImmPtr(address), addrTempRegister); + m_assembler.sb(src, addrTempRegister, 0); + } + void store8(TrustedImm32 imm, void* address) { /* @@ -1234,15 +1242,8 @@ public: return false; #endif } - - static bool supportsFloatingPointAbs() - { -#if WTF_MIPS_DOUBLE_FLOAT - return true; -#else - return false; -#endif - } + static bool supportsFloatingPointAbs() { return false; } + static bool supportsFloatingPointRounding() { return false; } // Stack manipulation operations: // @@ -1258,6 +1259,13 @@ public: m_assembler.addiu(MIPSRegisters::sp, MIPSRegisters::sp, 4); } + void popPair(RegisterID dest1, RegisterID dest2) + { + m_assembler.lw(dest1, MIPSRegisters::sp, 0); + m_assembler.lw(dest2, MIPSRegisters::sp, 4); + m_assembler.addiu(MIPSRegisters::sp, MIPSRegisters::sp, 8); + } + void push(RegisterID src) { m_assembler.addiu(MIPSRegisters::sp, MIPSRegisters::sp, -4); @@ -1276,6 +1284,13 @@ public: push(immTempRegister); } + void pushPair(RegisterID src1, RegisterID src2) + { + m_assembler.addiu(MIPSRegisters::sp, MIPSRegisters::sp, -8); + m_assembler.sw(src2, MIPSRegisters::sp, 4); + m_assembler.sw(src1, MIPSRegisters::sp, 0); + } + // Register move operations: // // Move values in registers. @@ -1348,6 +1363,15 @@ public: return branch32(cond, dataTempRegister, immTempRegister); } + Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right) + { + // Make sure the immediate value is unsigned 8 bits. + ASSERT(!(right.m_value & 0xFFFFFF00)); + load8(left, dataTempRegister); + move(right, immTempRegister); + return branch32(cond, dataTempRegister, immTempRegister); + } + void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest) { // Make sure the immediate value is unsigned 8 bits. @@ -1500,6 +1524,12 @@ public: return branchTest32(cond, dataTempRegister, mask); } + Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1)) + { + load8(address, dataTempRegister); + return branchTest32(cond, dataTempRegister, mask); + } + Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1)) { load8(address, dataTempRegister); @@ -1844,7 +1874,7 @@ public: return Jump(); } - Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest) + Jump branchMul32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest) { move(imm, immTempRegister); return branchMul32(cond, immTempRegister, src, dest); @@ -2000,6 +2030,16 @@ public: return Call(m_assembler.label(), Call::LinkableNear); } + Call nearTailCall() + { + m_assembler.nop(); + m_assembler.nop(); + m_assembler.beq(MIPSRegisters::zero, MIPSRegisters::zero, 0); + m_assembler.nop(); + insertRelaxationWords(); + return Call(m_assembler.label(), Call::LinkableNearTail); + } + Call call() { m_assembler.lui(MIPSRegisters::t9, 0); @@ -2145,6 +2185,16 @@ public: return temp; } + Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0)) + { + m_fixedWidth = true; + load32(left, dataTempRegister); + dataLabel = moveWithPatch(initialRightValue, immTempRegister); + Jump temp = branch32(cond, dataTempRegister, immTempRegister); + m_fixedWidth = false; + return temp; + } + DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address) { m_fixedWidth = true; @@ -2293,7 +2343,7 @@ public: #endif } - void loadDouble(const void* address, FPRegisterID dest) + void loadDouble(TrustedImmPtr address, FPRegisterID dest) { #if WTF_MIPS_ISA(1) /* @@ -2301,7 +2351,7 @@ public: lwc1 dest, 0(addrTemp) lwc1 dest+1, 4(addrTemp) */ - move(TrustedImmPtr(address), addrTempRegister); + move(address, addrTempRegister); m_assembler.lwc1(dest, addrTempRegister, 0); m_assembler.lwc1(FPRegisterID(dest + 1), addrTempRegister, 4); #else @@ -2309,7 +2359,7 @@ public: li addrTemp, address ldc1 dest, 0(addrTemp) */ - move(TrustedImmPtr(address), addrTempRegister); + move(address, addrTempRegister); m_assembler.ldc1(dest, addrTempRegister, 0); #endif } @@ -2431,14 +2481,14 @@ public: #endif } - void storeDouble(FPRegisterID src, const void* address) + void storeDouble(FPRegisterID src, TrustedImmPtr address) { #if WTF_MIPS_ISA(1) - move(TrustedImmPtr(address), addrTempRegister); + move(address, addrTempRegister); m_assembler.swc1(src, addrTempRegister, 0); m_assembler.swc1(FPRegisterID(src + 1), addrTempRegister, 4); #else - move(TrustedImmPtr(address), addrTempRegister); + move(address, addrTempRegister); m_assembler.sdc1(src, addrTempRegister, 0); #endif } @@ -2449,6 +2499,11 @@ public: m_assembler.movd(dest, src); } + void moveZeroToDouble(FPRegisterID reg) + { + convertInt32ToDouble(MIPSRegisters::zero, reg); + } + void swapDouble(FPRegisterID fr1, FPRegisterID fr2) { moveDouble(fr1, fpTempRegister); @@ -2474,7 +2529,7 @@ public: void addDouble(AbsoluteAddress address, FPRegisterID dest) { - loadDouble(address.m_ptr, fpTempRegister); + loadDouble(TrustedImmPtr(address.m_ptr), fpTempRegister); m_assembler.addd(dest, dest, fpTempRegister); } @@ -2590,6 +2645,8 @@ public: Jump branchEqual(RegisterID rs, RegisterID rt) { + m_assembler.nop(); + m_assembler.nop(); m_assembler.appendJump(); m_assembler.beq(rs, rt, 0); m_assembler.nop(); @@ -2599,6 +2656,8 @@ public: Jump branchNotEqual(RegisterID rs, RegisterID rt) { + m_assembler.nop(); + m_assembler.nop(); m_assembler.appendJump(); m_assembler.bne(rs, rt, 0); m_assembler.nop(); @@ -2756,6 +2815,23 @@ public: m_assembler.nop(); } + void memoryFence() + { + m_assembler.sync(); + } + + void abortWithReason(AbortReason reason) + { + move(TrustedImm32(reason), dataTempRegister); + breakpoint(); + } + + void abortWithReason(AbortReason reason, intptr_t misc) + { + move(TrustedImm32(misc), immTempRegister); + abortWithReason(reason); + } + static FunctionPtr readCallTarget(CodeLocationCall call) { return FunctionPtr(reinterpret_cast<void(*)()>(MIPSAssembler::readCallTarget(call.dataLocation()))); @@ -2773,6 +2849,13 @@ public: } static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; } + static bool canJumpReplacePatchableBranch32WithPatch() { return false; } + + static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32) + { + UNREACHABLE_FOR_PLATFORM(); + return CodeLocationLabel(); + } static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label) { @@ -2790,23 +2873,14 @@ public: return CodeLocationLabel(); } - static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel instructionStart, Address, void* initialValue) + static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel, Address, int32_t) { UNREACHABLE_FOR_PLATFORM(); } - -private: - // If m_fixedWidth is true, we will generate a fixed number of instructions. - // Otherwise, we can emit any number of instructions. - bool m_fixedWidth; - - friend class LinkBuffer; - friend class RepatchBuffer; - - static void linkCall(void* code, Call call, FunctionPtr function) + static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*) { - MIPSAssembler::linkCall(code, call.m_label, function.value()); + UNREACHABLE_FOR_PLATFORM(); } static void repatchCall(CodeLocationCall call, CodeLocationLabel destination) @@ -2819,6 +2893,21 @@ private: MIPSAssembler::relinkCall(call.dataLocation(), destination.executableAddress()); } +private: + // If m_fixedWidth is true, we will generate a fixed number of instructions. + // Otherwise, we can emit any number of instructions. + bool m_fixedWidth; + + friend class LinkBuffer; + + static void linkCall(void* code, Call call, FunctionPtr function) + { + if (call.isFlagSet(Call::Tail)) + MIPSAssembler::linkJump(code, call.m_label, function.value()); + else + MIPSAssembler::linkCall(code, call.m_label, function.value()); + } + }; } diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerPrinter.cpp b/Source/JavaScriptCore/assembler/MacroAssemblerPrinter.cpp new file mode 100644 index 000000000..c6c175752 --- /dev/null +++ b/Source/JavaScriptCore/assembler/MacroAssemblerPrinter.cpp @@ -0,0 +1,216 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "MacroAssemblerPrinter.h" + +#if ENABLE(MASM_PROBE) + +#include "MacroAssembler.h" + +namespace JSC { + +using CPUState = MacroAssembler::CPUState; +using ProbeContext = MacroAssembler::ProbeContext; +using RegisterID = MacroAssembler::RegisterID; +using FPRegisterID = MacroAssembler::FPRegisterID; + +static void printIndent(int indentation) +{ + for (; indentation > 0; indentation--) + dataLog(" "); +} + +#define INDENT printIndent(indentation) + +void printCPU(CPUState& cpu, int indentation) +{ + INDENT, dataLog("cpu: {\n"); + printCPURegisters(cpu, indentation + 1); + INDENT, dataLog("}\n"); +} + +void printCPURegisters(CPUState& cpu, int indentation) +{ +#if USE(JSVALUE32_64) + #define INTPTR_HEX_VALUE_FORMAT "0x%08lx" +#else + #define INTPTR_HEX_VALUE_FORMAT "0x%016lx" +#endif + + #define PRINT_GPREGISTER(_type, _regName) { \ + intptr_t value = reinterpret_cast<intptr_t>(cpu._regName); \ + INDENT, dataLogF("%6s: " INTPTR_HEX_VALUE_FORMAT " %ld\n", #_regName, value, value) ; \ + } + FOR_EACH_CPU_GPREGISTER(PRINT_GPREGISTER) + FOR_EACH_CPU_SPECIAL_REGISTER(PRINT_GPREGISTER) + #undef PRINT_GPREGISTER + #undef INTPTR_HEX_VALUE_FORMAT + + #define PRINT_FPREGISTER(_type, _regName) { \ + uint64_t* u = reinterpret_cast<uint64_t*>(&cpu._regName); \ + double* d = reinterpret_cast<double*>(&cpu._regName); \ + INDENT, dataLogF("%6s: 0x%016llx %.13g\n", #_regName, *u, *d); \ + } + FOR_EACH_CPU_FPREGISTER(PRINT_FPREGISTER) + #undef PRINT_FPREGISTER +} + +static void printPC(CPUState& cpu) +{ + union { + void* voidPtr; + intptr_t intptrValue; + } u; +#if CPU(X86) || CPU(X86_64) + u.voidPtr = cpu.eip; +#elif CPU(ARM_TRADITIONAL) || CPU(ARM_THUMB2) || CPU(ARM64) + u.voidPtr = cpu.pc; +#else +#error "Unsupported CPU" +#endif + dataLogF("pc:<%p %ld>", u.voidPtr, u.intptrValue); +} + +void printRegister(CPUState& cpu, RegisterID regID) +{ + const char* name = CPUState::gprName(regID); + union { + void* voidPtr; + intptr_t intptrValue; + } u; + u.voidPtr = cpu.gpr(regID); + dataLogF("%s:<%p %ld>", name, u.voidPtr, u.intptrValue); +} + +void printRegister(CPUState& cpu, FPRegisterID regID) +{ + const char* name = CPUState::fprName(regID); + union { + double doubleValue; + uint64_t uint64Value; + } u; + u.doubleValue = cpu.fpr(regID); + dataLogF("%s:<0x%016llx %.13g>", name, u.uint64Value, u.doubleValue); +} + +void printMemory(CPUState& cpu, const Memory& memory) +{ + uint8_t* ptr = nullptr; + switch (memory.addressType) { + case Memory::AddressType::Address: { + ptr = reinterpret_cast<uint8_t*>(cpu.gpr(memory.u.address.base)); + ptr += memory.u.address.offset; + break; + } + case Memory::AddressType::AbsoluteAddress: { + ptr = reinterpret_cast<uint8_t*>(const_cast<void*>(memory.u.absoluteAddress.m_ptr)); + break; + } + } + + if (memory.dumpStyle == Memory::SingleWordDump) { + if (memory.numBytes == sizeof(int8_t)) { + auto p = reinterpret_cast<int8_t*>(ptr); + dataLogF("%p:<0x%02x %d>", p, *p, *p); + return; + } + if (memory.numBytes == sizeof(int16_t)) { + auto p = reinterpret_cast<int16_t*>(ptr); + dataLogF("%p:<0x%04x %d>", p, *p, *p); + return; + } + if (memory.numBytes == sizeof(int32_t)) { + auto p = reinterpret_cast<int32_t*>(ptr); + dataLogF("%p:<0x%08x %d>", p, *p, *p); + return; + } + if (memory.numBytes == sizeof(int64_t)) { + auto p = reinterpret_cast<int64_t*>(ptr); + dataLogF("%p:<0x%016llx %lld>", p, *p, *p); + return; + } + // Else, unknown word size. Fall thru and dump in the generic way. + } + + // Generic dump: dump rows of 16 bytes in 4 byte groupings. + size_t numBytes = memory.numBytes; + for (size_t i = 0; i < numBytes; i++) { + if (!(i % 16)) + dataLogF("%p: ", &ptr[i]); + else if (!(i % 4)) + dataLog(" "); + + dataLogF("%02x", ptr[i]); + + if (i % 16 == 15) + dataLog("\n"); + } + if (numBytes % 16 < 15) + dataLog("\n"); +} + +void MacroAssemblerPrinter::printCallback(ProbeContext* context) +{ + typedef PrintArg Arg; + PrintArgsList& argsList = + *reinterpret_cast<PrintArgsList*>(context->arg1); + for (size_t i = 0; i < argsList.size(); i++) { + auto& arg = argsList[i]; + switch (arg.type) { + case Arg::Type::AllRegisters: + printCPU(context->cpu, 1); + break; + case Arg::Type::PCRegister: + printPC(context->cpu); + break; + case Arg::Type::RegisterID: + printRegister(context->cpu, arg.u.gpRegisterID); + break; + case Arg::Type::FPRegisterID: + printRegister(context->cpu, arg.u.fpRegisterID); + break; + case Arg::Type::Memory: + printMemory(context->cpu, arg.u.memory); + break; + case Arg::Type::ConstCharPtr: + dataLog(arg.u.constCharPtr); + break; + case Arg::Type::ConstVoidPtr: + dataLogF("%p", arg.u.constVoidPtr); + break; + case Arg::Type::IntptrValue: + dataLog(arg.u.intptrValue); + break; + case Arg::Type::UintptrValue: + dataLog(arg.u.uintptrValue); + break; + } + } +} + +} // namespace JSC + +#endif // ENABLE(MASM_PROBE) diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerPrinter.h b/Source/JavaScriptCore/assembler/MacroAssemblerPrinter.h new file mode 100644 index 000000000..e25a6ea98 --- /dev/null +++ b/Source/JavaScriptCore/assembler/MacroAssemblerPrinter.h @@ -0,0 +1,305 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef MacroAssemblerPrinter_h +#define MacroAssemblerPrinter_h + +#if ENABLE(MASM_PROBE) + +#include "MacroAssembler.h" + +namespace JSC { + +// What is MacroAssembler::print()? +// =============================== +// The MacroAsssembler::print() makes it easy to add print logging +// from JIT compiled code, and can be used to print all types of values +// at runtime e.g. CPU register values being operated on by the compiled +// code. +// +// print() is built on top of MacroAsssembler::probe(), and hence +// inserting logging in JIT compiled code will not perturb register values. +// The only register value that is perturbed is the PC (program counter) +// since there is now more compiled code to do the printing. +// +// How to use the MacroAssembler print()? +// ===================================== +// 1. #include "MacroAssemblerPrinter.h" in the JIT file where you want to use print(). +// +// 2. Add print() calls like these in your JIT code: +// +// jit.print("Hello world\n"); // Emits code to print the string. +// +// CodeBlock* cb = ...; +// jit.print(cb, "\n"); // Emits code to print the pointer value. +// +// RegisterID regID = ...; +// jit.print(regID, "\n"); // Emits code to print the register value (not the id). +// +// // Emits code to print all registers. Unlike other items, this prints +// // multiple lines as follows: +// // cpu { +// // eax: 0x123456789 +// // ebx: 0x000000abc +// // ... +// // } +// jit.print(AllRegisters()); +// +// jit.print(MemWord<uint8_t>(regID), "\n"); // Emits code to print a byte pointed to by the register. +// jit.print(MemWord<uint32_t>(regID), "\n"); // Emits code to print a 32-bit word pointed to by the register. +// +// jit.print(MemWord<uint8_t>(Address(regID, 23), "\n"); // Emits code to print a byte at the address. +// jit.print(MemWord<intptr_t>(AbsoluteAddress(&cb), "\n"); // Emits code to print an intptr_t sized word at the address. +// +// jit.print(Memory(reg, 100), "\n"); // Emits code to print a 100 bytes at the address pointed by the register. +// jit.print(Memory(Address(reg, 4), 100), "\n"); // Emits code to print a 100 bytes at the address. +// +// // Print multiple things at once. This incurs the probe overhead only once +// // to print all the items. +// jit.print("cb:", cb, " regID:", regID, " cpu:\n", AllRegisters()); +// +// The type of values that can be printed is encapsulated in the PrintArg struct below. +// +// Note: print() does not automatically insert a '\n' at the end of the line. +// If you want a '\n', you'll have to add it explicitly (as in the examples above). + + +// This is a marker type only used with MacroAssemblerPrinter::print(). +// See MacroAssemblerPrinter::print() below for details. +struct AllRegisters { }; +struct PCRegister { }; + +struct Memory { + using Address = MacroAssembler::Address; + using AbsoluteAddress = MacroAssembler::AbsoluteAddress; + using RegisterID = MacroAssembler::RegisterID; + + enum class AddressType { + Address, + AbsoluteAddress, + }; + + enum DumpStyle { + SingleWordDump, + GenericDump, + }; + + Memory(RegisterID& reg, size_t bytes, DumpStyle style = GenericDump) + : addressType(AddressType::Address) + , dumpStyle(style) + , numBytes(bytes) + { + u.address = Address(reg, 0); + } + + Memory(const Address& address, size_t bytes, DumpStyle style = GenericDump) + : addressType(AddressType::Address) + , dumpStyle(style) + , numBytes(bytes) + { + u.address = address; + } + + Memory(const AbsoluteAddress& address, size_t bytes, DumpStyle style = GenericDump) + : addressType(AddressType::AbsoluteAddress) + , dumpStyle(style) + , numBytes(bytes) + { + u.absoluteAddress = address; + } + + AddressType addressType; + DumpStyle dumpStyle; + size_t numBytes; + union UnionedAddress { + UnionedAddress() { } + + Address address; + AbsoluteAddress absoluteAddress; + } u; +}; + +template <typename IntType> +struct MemWord : public Memory { + MemWord(RegisterID& reg) + : Memory(reg, sizeof(IntType), Memory::SingleWordDump) + { } + + MemWord(const Address& address) + : Memory(address, sizeof(IntType), Memory::SingleWordDump) + { } + + MemWord(const AbsoluteAddress& address) + : Memory(address, sizeof(IntType), Memory::SingleWordDump) + { } +}; + + +class MacroAssemblerPrinter { + using CPUState = MacroAssembler::CPUState; + using ProbeContext = MacroAssembler::ProbeContext; + using RegisterID = MacroAssembler::RegisterID; + using FPRegisterID = MacroAssembler::FPRegisterID; + +public: + template<typename... Arguments> + static void print(MacroAssembler* masm, Arguments... args) + { + auto argsList = std::make_unique<PrintArgsList>(); + appendPrintArg(argsList.get(), args...); + masm->probe(printCallback, argsList.release(), 0); + } + +private: + struct PrintArg { + + enum class Type { + AllRegisters, + PCRegister, + RegisterID, + FPRegisterID, + Memory, + ConstCharPtr, + ConstVoidPtr, + IntptrValue, + UintptrValue, + }; + + PrintArg(AllRegisters&) + : type(Type::AllRegisters) + { + } + + PrintArg(PCRegister&) + : type(Type::PCRegister) + { + } + + PrintArg(RegisterID regID) + : type(Type::RegisterID) + { + u.gpRegisterID = regID; + } + + PrintArg(FPRegisterID regID) + : type(Type::FPRegisterID) + { + u.fpRegisterID = regID; + } + + PrintArg(const Memory& memory) + : type(Type::Memory) + { + u.memory = memory; + } + + PrintArg(const char* ptr) + : type(Type::ConstCharPtr) + { + u.constCharPtr = ptr; + } + + PrintArg(const void* ptr) + : type(Type::ConstVoidPtr) + { + u.constVoidPtr = ptr; + } + + PrintArg(int value) + : type(Type::IntptrValue) + { + u.intptrValue = value; + } + + PrintArg(unsigned value) + : type(Type::UintptrValue) + { + u.intptrValue = value; + } + + PrintArg(intptr_t value) + : type(Type::IntptrValue) + { + u.intptrValue = value; + } + + PrintArg(uintptr_t value) + : type(Type::UintptrValue) + { + u.uintptrValue = value; + } + + Type type; + union Value { + Value() { } + + RegisterID gpRegisterID; + FPRegisterID fpRegisterID; + Memory memory; + const char* constCharPtr; + const void* constVoidPtr; + intptr_t intptrValue; + uintptr_t uintptrValue; + } u; + }; + + typedef Vector<PrintArg> PrintArgsList; + + template<typename FirstArg, typename... Arguments> + static void appendPrintArg(PrintArgsList* argsList, FirstArg& firstArg, Arguments... otherArgs) + { + argsList->append(PrintArg(firstArg)); + appendPrintArg(argsList, otherArgs...); + } + + static void appendPrintArg(PrintArgsList*) { } + +private: + static void printCallback(ProbeContext*); +}; + +template<typename... Arguments> +void MacroAssembler::print(Arguments... args) +{ + MacroAssemblerPrinter::print(this, args...); +} + + +// These printers will print a block of information. That block may be +// indented with the specified indentation. +void printCPU(MacroAssembler::CPUState&, int indentation = 0); +void printCPURegisters(MacroAssembler::CPUState&, int indentation = 0); + +// These printers will print the specified information in line in the +// print stream. Hence, no indentation will be applied. +void printRegister(MacroAssembler::CPUState&, MacroAssembler::RegisterID); +void printRegister(MacroAssembler::CPUState&, MacroAssembler::FPRegisterID); +void printMemory(MacroAssembler::CPUState&, const Memory&); + +} // namespace JSC + +#endif // ENABLE(MASM_PROBE) + +#endif // MacroAssemblerPrinter_h diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerSH4.h b/Source/JavaScriptCore/assembler/MacroAssemblerSH4.h index a65614b92..75756095b 100644 --- a/Source/JavaScriptCore/assembler/MacroAssemblerSH4.h +++ b/Source/JavaScriptCore/assembler/MacroAssemblerSH4.h @@ -1,7 +1,7 @@ /* * Copyright (C) 2013 Cisco Systems, Inc. All rights reserved. * Copyright (C) 2009-2011 STMicroelectronics. All rights reserved. - * Copyright (C) 2008 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -36,13 +36,14 @@ namespace JSC { -class MacroAssemblerSH4 : public AbstractMacroAssembler<SH4Assembler> { +class MacroAssemblerSH4 : public AbstractMacroAssembler<SH4Assembler, MacroAssemblerSH4> { public: typedef SH4Assembler::FPRegisterID FPRegisterID; static const Scale ScalePtr = TimesFour; static const FPRegisterID fscratch = SH4Registers::dr10; static const RegisterID stackPointerRegister = SH4Registers::sp; + static const RegisterID framePointerRegister = SH4Registers::fp; static const RegisterID linkRegister = SH4Registers::pr; static const RegisterID scratchReg3 = SH4Registers::r13; @@ -699,19 +700,31 @@ public: releaseScratch(scr); } + void load8(AbsoluteAddress address, RegisterID dest) + { + move(TrustedImmPtr(address.m_ptr), dest); + m_assembler.movbMemReg(dest, dest); + m_assembler.extub(dest, dest); + } + + void load8(const void* address, RegisterID dest) + { + load8(AbsoluteAddress(address), dest); + } + void load8PostInc(RegisterID base, RegisterID dest) { m_assembler.movbMemRegIn(base, dest); m_assembler.extub(dest, dest); } - void load8Signed(BaseIndex address, RegisterID dest) + void load8SignedExtendTo32(BaseIndex address, RegisterID dest) { RegisterID scr = claimScratch(); move(address.index, scr); lshift32(TrustedImm32(address.scale), scr); add32(address.base, scr); - load8Signed(scr, address.offset, dest); + load8SignedExtendTo32(scr, address.offset, dest); releaseScratch(scr); } @@ -757,7 +770,7 @@ public: releaseScratch(scr); } - void load8Signed(RegisterID base, int offset, RegisterID dest) + void load8SignedExtendTo32(RegisterID base, int offset, RegisterID dest) { if (!offset) { m_assembler.movbMemReg(base, dest); @@ -785,7 +798,7 @@ public: void load8(RegisterID base, int offset, RegisterID dest) { - load8Signed(base, offset, dest); + load8SignedExtendTo32(base, offset, dest); m_assembler.extub(dest, dest); } @@ -845,14 +858,14 @@ public: m_assembler.extuw(dest, dest); } - void load16Signed(RegisterID src, RegisterID dest) + void load16SignedExtendTo32(RegisterID src, RegisterID dest) { m_assembler.movwMemReg(src, dest); } void load16(BaseIndex address, RegisterID dest) { - load16Signed(address, dest); + load16SignedExtendTo32(address, dest); m_assembler.extuw(dest, dest); } @@ -862,7 +875,7 @@ public: m_assembler.extuw(dest, dest); } - void load16Signed(BaseIndex address, RegisterID dest) + void load16SignedExtendTo32(BaseIndex address, RegisterID dest) { RegisterID scr = claimScratch(); @@ -874,7 +887,7 @@ public: m_assembler.movwR0mr(scr, dest); else { add32(address.base, scr); - load16Signed(scr, dest); + load16SignedExtendTo32(scr, dest); } releaseScratch(scr); @@ -898,12 +911,34 @@ public: releaseScratch(scr); } + void store8(RegisterID src, void* address) + { + RegisterID destptr = claimScratch(); + move(TrustedImmPtr(address), destptr); + m_assembler.movbRegMem(src, destptr); + releaseScratch(destptr); + } + void store8(TrustedImm32 imm, void* address) { + ASSERT((imm.m_value >= -128) && (imm.m_value <= 127)); + RegisterID dstptr = claimScratch(); + move(TrustedImmPtr(address), dstptr); RegisterID srcval = claimScratch(); + move(imm, srcval); + m_assembler.movbRegMem(srcval, dstptr); + releaseScratch(dstptr); + releaseScratch(srcval); + } + + void store8(TrustedImm32 imm, Address address) + { + ASSERT((imm.m_value >= -128) && (imm.m_value <= 127)); RegisterID dstptr = claimScratch(); + move(address.base, dstptr); + add32(TrustedImm32(address.offset), dstptr); + RegisterID srcval = claimScratch(); move(imm, srcval); - m_assembler.loadConstant(reinterpret_cast<uint32_t>(address), dstptr); m_assembler.movbRegMem(srcval, dstptr); releaseScratch(dstptr); releaseScratch(srcval); @@ -1056,6 +1091,7 @@ public: static bool supportsFloatingPointTruncate() { return true; } static bool supportsFloatingPointSqrt() { return true; } static bool supportsFloatingPointAbs() { return true; } + static bool supportsFloatingPointRounding() { return false; } void moveDoubleToInts(FPRegisterID src, RegisterID dest1, RegisterID dest2) { @@ -1133,10 +1169,10 @@ public: releaseScratch(scr); } - void loadDouble(const void* address, FPRegisterID dest) + void loadDouble(TrustedImmPtr address, FPRegisterID dest) { RegisterID scr = claimScratch(); - move(TrustedImmPtr(address), scr); + move(address, scr); m_assembler.fmovsReadrminc(scr, (FPRegisterID)(dest + 1)); m_assembler.fmovsReadrm(scr, dest); releaseScratch(scr); @@ -1182,10 +1218,10 @@ public: } } - void storeDouble(FPRegisterID src, const void* address) + void storeDouble(FPRegisterID src, TrustedImmPtr address) { RegisterID scr = claimScratch(); - m_assembler.loadConstant(reinterpret_cast<uint32_t>(const_cast<void*>(address)) + 8, scr); + m_assembler.loadConstant(reinterpret_cast<uint32_t>(const_cast<void*>(address.m_value)) + 8, scr); m_assembler.fmovsWriterndec(src, scr); m_assembler.fmovsWriterndec((FPRegisterID)(src + 1), scr); releaseScratch(scr); @@ -1198,7 +1234,7 @@ public: void addDouble(AbsoluteAddress address, FPRegisterID dest) { - loadDouble(address.m_ptr, fscratch); + loadDouble(TrustedImmPtr(address.m_ptr), fscratch); addDouble(fscratch, dest); } @@ -1539,6 +1575,18 @@ public: m_assembler.dabs(dest); } + NO_RETURN_DUE_TO_CRASH void ceilDouble(FPRegisterID, FPRegisterID) + { + ASSERT(!supportsFloatingPointRounding()); + CRASH(); + } + + NO_RETURN_DUE_TO_CRASH void floorDouble(FPRegisterID, FPRegisterID) + { + ASSERT(!supportsFloatingPointRounding()); + CRASH(); + } + Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1)) { RegisterID addressTempRegister = claimScratch(); @@ -1548,6 +1596,15 @@ public: return jmp; } + Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1)) + { + RegisterID addressTempRegister = claimScratch(); + load8(address, addressTempRegister); + Jump jmp = branchTest32(cond, addressTempRegister, mask); + releaseScratch(addressTempRegister); + return jmp; + } + Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1)) { RegisterID addressTempRegister = claimScratch(); @@ -1577,6 +1634,15 @@ public: return jmp; } + Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right) + { + RegisterID addressTempRegister = claimScratch(); + load8(left, addressTempRegister); + Jump jmp = branch32(cond, addressTempRegister, right); + releaseScratch(addressTempRegister); + return jmp; + } + void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest) { RegisterID addressTempRegister = claimScratch(); @@ -1693,6 +1759,14 @@ public: return dataLabel; } + DataLabel32 moveWithPatch(TrustedImm32 initialValue, RegisterID dest) + { + m_assembler.ensureSpace(m_assembler.maxInstructionSize, sizeof(uint32_t)); + DataLabel32 dataLabel(this); + m_assembler.loadConstantUnReusable(static_cast<uint32_t>(initialValue.m_value), dest); + return dataLabel; + } + void move(RegisterID src, RegisterID dest) { if (src != dest) @@ -1761,6 +1835,26 @@ public: m_assembler.movImm8(1, dest); } + void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest) + { + ASSERT((cond == Zero) || (cond == NonZero)); + + load32(address, dest); + if (mask.m_value == -1) + compare32(0, dest, static_cast<RelationalCondition>(cond)); + else + testlImm(mask.m_value, dest); + if (cond != NonZero) { + m_assembler.movt(dest); + return; + } + + m_assembler.ensureSpace(m_assembler.maxInstructionSize + 4); + m_assembler.movImm8(0, dest); + m_assembler.branch(BT_OPCODE, 0); + m_assembler.movImm8(1, dest); + } + void loadPtrLinkReg(ImplicitAddress address) { RegisterID scr = claimScratch(); @@ -1959,7 +2053,7 @@ public: ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero)); if (cond == Overflow) - return branchMul32(cond, TrustedImm32(-1), srcDest, srcDest); + return branchMul32(cond, srcDest, TrustedImm32(-1), srcDest); neg32(srcDest); @@ -2163,7 +2257,7 @@ public: return (cond == NonZero) ? branchFalse() : branchTrue(); } - Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest) + Jump branchMul32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest) { ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero)); @@ -2322,6 +2416,11 @@ public: return Call(m_assembler.call(), Call::Linkable); } + Call nearTailCall() + { + return Call(m_assembler.jump(), Call::LinkableNearTail); + } + Call nearCall() { return Call(m_assembler.call(), Call::LinkableNear); @@ -2332,12 +2431,14 @@ public: return Call(m_assembler.call(target), Call::None); } - void call(Address address, RegisterID target) + void call(Address address) { + RegisterID target = claimScratch(); load32(address.base, address.offset, target); m_assembler.ensureSpace(m_assembler.maxInstructionSize + 2); m_assembler.branch(JSR_OPCODE, target); m_assembler.nop(); + releaseScratch(target); } void breakpoint() @@ -2380,6 +2481,23 @@ public: return branchTrue(); } + Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0)) + { + RegisterID scr = claimScratch(); + + m_assembler.loadConstant(left.offset, scr); + m_assembler.addlRegReg(left.base, scr); + m_assembler.movlMemReg(scr, scr); + RegisterID scr1 = claimScratch(); + m_assembler.ensureSpace(m_assembler.maxInstructionSize + 10, 2 * sizeof(uint32_t)); + dataLabel = moveWithPatch(initialRightValue, scr1); + m_assembler.cmplRegReg(scr1, scr, SH4Condition(cond)); + releaseScratch(scr); + releaseScratch(scr1); + + return (cond == NotEqual) ? branchFalse() : branchTrue(); + } + void ret() { m_assembler.ret(); @@ -2424,6 +2542,23 @@ public: m_assembler.nop(); } + void memoryFence() + { + m_assembler.synco(); + } + + void abortWithReason(AbortReason reason) + { + move(TrustedImm32(reason), SH4Registers::r0); + breakpoint(); + } + + void abortWithReason(AbortReason reason, intptr_t misc) + { + move(TrustedImm32(misc), SH4Registers::r1); + abortWithReason(reason); + } + static FunctionPtr readCallTarget(CodeLocationCall call) { return FunctionPtr(reinterpret_cast<void(*)()>(SH4Assembler::readCallTarget(call.dataLocation()))); @@ -2441,6 +2576,8 @@ public: static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; } + static bool canJumpReplacePatchableBranch32WithPatch() { return false; } + static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label) { return label.labelAtOffset(0); @@ -2457,7 +2594,18 @@ public: return CodeLocationLabel(); } - static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel instructionStart, Address, void* initialValue) + static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32) + { + UNREACHABLE_FOR_PLATFORM(); + return CodeLocationLabel(); + } + + static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*) + { + UNREACHABLE_FOR_PLATFORM(); + } + + static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel, Address, int32_t) { UNREACHABLE_FOR_PLATFORM(); } @@ -2474,11 +2622,13 @@ protected: } private: friend class LinkBuffer; - friend class RepatchBuffer; static void linkCall(void* code, Call call, FunctionPtr function) { - SH4Assembler::linkCall(code, call.m_label, function.value()); + if (call.isFlagSet(Call::Tail)) + SH4Assembler::linkJump(code, call.m_label, function.value()); + else + SH4Assembler::linkCall(code, call.m_label, function.value()); } static void repatchCall(CodeLocationCall call, CodeLocationLabel destination) diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerX86.h b/Source/JavaScriptCore/assembler/MacroAssemblerX86.h index 27a030edf..f05c8cec9 100644 --- a/Source/JavaScriptCore/assembler/MacroAssemblerX86.h +++ b/Source/JavaScriptCore/assembler/MacroAssemblerX86.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -43,6 +43,7 @@ public: using MacroAssemblerX86Common::sub32; using MacroAssemblerX86Common::or32; using MacroAssemblerX86Common::load32; + using MacroAssemblerX86Common::load8; using MacroAssemblerX86Common::store32; using MacroAssemblerX86Common::store8; using MacroAssemblerX86Common::branch32; @@ -52,6 +53,7 @@ public: using MacroAssemblerX86Common::loadDouble; using MacroAssemblerX86Common::storeDouble; using MacroAssemblerX86Common::convertInt32ToDouble; + using MacroAssemblerX86Common::branch8; using MacroAssemblerX86Common::branchTest8; void add32(TrustedImm32 imm, RegisterID src, RegisterID dest) @@ -99,6 +101,23 @@ public: { m_assembler.movl_mr(address, dest); } + + void load8(const void* address, RegisterID dest) + { + m_assembler.movzbl_mr(address, dest); + } + + void abortWithReason(AbortReason reason) + { + move(TrustedImm32(reason), X86Registers::eax); + breakpoint(); + } + + void abortWithReason(AbortReason reason, intptr_t misc) + { + move(TrustedImm32(misc), X86Registers::edx); + abortWithReason(reason); + } ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest) { @@ -112,10 +131,11 @@ public: m_assembler.addsd_mr(address.m_ptr, dest); } - void storeDouble(FPRegisterID src, const void* address) + void storeDouble(FPRegisterID src, TrustedImmPtr address) { ASSERT(isSSE2Present()); - m_assembler.movsd_rm(src, address); + ASSERT(address.m_value); + m_assembler.movsd_rm(src, address.m_value); } void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest) @@ -132,6 +152,11 @@ public: { m_assembler.movl_rm(src, address); } + + void store8(RegisterID src, void* address) + { + m_assembler.movb_rm(src, address); + } void store8(TrustedImm32 imm, void* address) { @@ -139,18 +164,20 @@ public: m_assembler.movb_i8m(imm.m_value, address); } - // Possibly clobbers src. void moveDoubleToInts(FPRegisterID src, RegisterID dest1, RegisterID dest2) { - movePackedToInt32(src, dest1); - rshiftPacked(TrustedImm32(32), src); - movePackedToInt32(src, dest2); + ASSERT(isSSE2Present()); + m_assembler.pextrw_irr(3, src, dest1); + m_assembler.pextrw_irr(2, src, dest2); + lshift32(TrustedImm32(16), dest1); + or32(dest1, dest2); + moveFloatTo32(src, dest1); } void moveIntsToDouble(RegisterID src1, RegisterID src2, FPRegisterID dest, FPRegisterID scratch) { - moveInt32ToPacked(src1, dest); - moveInt32ToPacked(src2, scratch); + move32ToFloat(src1, dest); + move32ToFloat(src2, scratch); lshiftPacked(TrustedImm32(32), scratch); orPacked(scratch, dest); } @@ -208,6 +235,12 @@ public: return DataLabelPtr(this); } + Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right) + { + m_assembler.cmpb_im(right.m_value, left.m_ptr); + return Jump(m_assembler.jCC(x86Condition(cond))); + } + Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1)) { ASSERT(mask.m_value >= -128 && mask.m_value <= 255); @@ -234,6 +267,14 @@ public: return Jump(m_assembler.jCC(x86Condition(cond))); } + Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0)) + { + padBeforePatch(); + m_assembler.cmpl_im_force32(initialRightValue.m_value, left.offset, left.base); + dataLabel = DataLabel32(this); + return Jump(m_assembler.jCC(x86Condition(cond))); + } + DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address) { padBeforePatch(); @@ -242,7 +283,6 @@ public: } static bool supportsFloatingPoint() { return isSSE2Present(); } - // See comment on MacroAssemblerARMv7::supportsFloatingPointTruncate() static bool supportsFloatingPointTruncate() { return isSSE2Present(); } static bool supportsFloatingPointSqrt() { return isSSE2Present(); } static bool supportsFloatingPointAbs() { return isSSE2Present(); } @@ -254,6 +294,7 @@ public: } static bool canJumpReplacePatchableBranchPtrWithPatch() { return true; } + static bool canJumpReplacePatchableBranch32WithPatch() { return true; } static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label) { @@ -276,6 +317,17 @@ public: return label.labelAtOffset(-totalBytes); } + static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32 label) + { + const int opcodeBytes = 1; + const int modRMBytes = 1; + const int offsetBytes = 0; + const int immediateBytes = 4; + const int totalBytes = opcodeBytes + modRMBytes + offsetBytes + immediateBytes; + ASSERT(totalBytes >= maxJumpReplacementSize()); + return label.labelAtOffset(-totalBytes); + } + static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID reg, void* initialValue) { X86Assembler::revertJumpTo_cmpl_ir_force32(instructionStart.executableAddress(), reinterpret_cast<intptr_t>(initialValue), reg); @@ -287,13 +339,10 @@ public: X86Assembler::revertJumpTo_cmpl_im_force32(instructionStart.executableAddress(), reinterpret_cast<intptr_t>(initialValue), 0, address.base); } -private: - friend class LinkBuffer; - friend class RepatchBuffer; - - static void linkCall(void* code, Call call, FunctionPtr function) + static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel instructionStart, Address address, int32_t initialValue) { - X86Assembler::linkCall(code, call.m_label, function.value()); + ASSERT(!address.offset); + X86Assembler::revertJumpTo_cmpl_im_force32(instructionStart.executableAddress(), initialValue, 0, address.base); } static void repatchCall(CodeLocationCall call, CodeLocationLabel destination) @@ -305,6 +354,17 @@ private: { X86Assembler::relinkCall(call.dataLocation(), destination.executableAddress()); } + +private: + friend class LinkBuffer; + + static void linkCall(void* code, Call call, FunctionPtr function) + { + if (call.isFlagSet(Call::Tail)) + X86Assembler::linkJump(code, call.m_label, function.value()); + else + X86Assembler::linkCall(code, call.m_label, function.value()); + } }; } // namespace JSC diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.cpp b/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.cpp new file mode 100644 index 000000000..0cbf3a779 --- /dev/null +++ b/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.cpp @@ -0,0 +1,560 @@ +/* + * Copyright (C) 2013-2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" + +#if ENABLE(ASSEMBLER) && (CPU(X86) || CPU(X86_64)) +#include "MacroAssemblerX86Common.h" + +#include <wtf/InlineASM.h> + +namespace JSC { + +#if ENABLE(MASM_PROBE) + +extern "C" void ctiMasmProbeTrampoline(); + +#if COMPILER(GCC_OR_CLANG) + +// The following are offsets for MacroAssemblerX86Common::ProbeContext fields accessed +// by the ctiMasmProbeTrampoline stub. + +#if CPU(X86) +#define PTR_SIZE 4 +#else // CPU(X86_64) +#define PTR_SIZE 8 +#endif + +#define PROBE_PROBE_FUNCTION_OFFSET (0 * PTR_SIZE) +#define PROBE_ARG1_OFFSET (1 * PTR_SIZE) +#define PROBE_ARG2_OFFSET (2 * PTR_SIZE) + +#define PROBE_FIRST_GPR_OFFSET (3 * PTR_SIZE) +#define PROBE_CPU_EAX_OFFSET (PROBE_FIRST_GPR_OFFSET + (0 * PTR_SIZE)) +#define PROBE_CPU_ECX_OFFSET (PROBE_FIRST_GPR_OFFSET + (1 * PTR_SIZE)) +#define PROBE_CPU_EDX_OFFSET (PROBE_FIRST_GPR_OFFSET + (2 * PTR_SIZE)) +#define PROBE_CPU_EBX_OFFSET (PROBE_FIRST_GPR_OFFSET + (3 * PTR_SIZE)) +#define PROBE_CPU_ESP_OFFSET (PROBE_FIRST_GPR_OFFSET + (4 * PTR_SIZE)) +#define PROBE_CPU_EBP_OFFSET (PROBE_FIRST_GPR_OFFSET + (5 * PTR_SIZE)) +#define PROBE_CPU_ESI_OFFSET (PROBE_FIRST_GPR_OFFSET + (6 * PTR_SIZE)) +#define PROBE_CPU_EDI_OFFSET (PROBE_FIRST_GPR_OFFSET + (7 * PTR_SIZE)) + +#if CPU(X86) +#define PROBE_FIRST_SPECIAL_OFFSET (PROBE_FIRST_GPR_OFFSET + (8 * PTR_SIZE)) +#else // CPU(X86_64) +#define PROBE_CPU_R8_OFFSET (PROBE_FIRST_GPR_OFFSET + (8 * PTR_SIZE)) +#define PROBE_CPU_R9_OFFSET (PROBE_FIRST_GPR_OFFSET + (9 * PTR_SIZE)) +#define PROBE_CPU_R10_OFFSET (PROBE_FIRST_GPR_OFFSET + (10 * PTR_SIZE)) +#define PROBE_CPU_R11_OFFSET (PROBE_FIRST_GPR_OFFSET + (11 * PTR_SIZE)) +#define PROBE_CPU_R12_OFFSET (PROBE_FIRST_GPR_OFFSET + (12 * PTR_SIZE)) +#define PROBE_CPU_R13_OFFSET (PROBE_FIRST_GPR_OFFSET + (13 * PTR_SIZE)) +#define PROBE_CPU_R14_OFFSET (PROBE_FIRST_GPR_OFFSET + (14 * PTR_SIZE)) +#define PROBE_CPU_R15_OFFSET (PROBE_FIRST_GPR_OFFSET + (15 * PTR_SIZE)) +#define PROBE_FIRST_SPECIAL_OFFSET (PROBE_FIRST_GPR_OFFSET + (16 * PTR_SIZE)) +#endif // CPU(X86_64) + +#define PROBE_CPU_EIP_OFFSET (PROBE_FIRST_SPECIAL_OFFSET + (0 * PTR_SIZE)) +#define PROBE_CPU_EFLAGS_OFFSET (PROBE_FIRST_SPECIAL_OFFSET + (1 * PTR_SIZE)) +#define PROBE_FIRST_XMM_OFFSET (PROBE_FIRST_SPECIAL_OFFSET + (2 * PTR_SIZE)) + +#define XMM_SIZE 8 +#define PROBE_CPU_XMM0_OFFSET (PROBE_FIRST_XMM_OFFSET + (0 * XMM_SIZE)) +#define PROBE_CPU_XMM1_OFFSET (PROBE_FIRST_XMM_OFFSET + (1 * XMM_SIZE)) +#define PROBE_CPU_XMM2_OFFSET (PROBE_FIRST_XMM_OFFSET + (2 * XMM_SIZE)) +#define PROBE_CPU_XMM3_OFFSET (PROBE_FIRST_XMM_OFFSET + (3 * XMM_SIZE)) +#define PROBE_CPU_XMM4_OFFSET (PROBE_FIRST_XMM_OFFSET + (4 * XMM_SIZE)) +#define PROBE_CPU_XMM5_OFFSET (PROBE_FIRST_XMM_OFFSET + (5 * XMM_SIZE)) +#define PROBE_CPU_XMM6_OFFSET (PROBE_FIRST_XMM_OFFSET + (6 * XMM_SIZE)) +#define PROBE_CPU_XMM7_OFFSET (PROBE_FIRST_XMM_OFFSET + (7 * XMM_SIZE)) + +#if CPU(X86) +#define PROBE_SIZE (PROBE_CPU_XMM7_OFFSET + XMM_SIZE) +#else // CPU(X86_64) +#define PROBE_CPU_XMM8_OFFSET (PROBE_FIRST_XMM_OFFSET + (8 * XMM_SIZE)) +#define PROBE_CPU_XMM9_OFFSET (PROBE_FIRST_XMM_OFFSET + (9 * XMM_SIZE)) +#define PROBE_CPU_XMM10_OFFSET (PROBE_FIRST_XMM_OFFSET + (10 * XMM_SIZE)) +#define PROBE_CPU_XMM11_OFFSET (PROBE_FIRST_XMM_OFFSET + (11 * XMM_SIZE)) +#define PROBE_CPU_XMM12_OFFSET (PROBE_FIRST_XMM_OFFSET + (12 * XMM_SIZE)) +#define PROBE_CPU_XMM13_OFFSET (PROBE_FIRST_XMM_OFFSET + (13 * XMM_SIZE)) +#define PROBE_CPU_XMM14_OFFSET (PROBE_FIRST_XMM_OFFSET + (14 * XMM_SIZE)) +#define PROBE_CPU_XMM15_OFFSET (PROBE_FIRST_XMM_OFFSET + (15 * XMM_SIZE)) +#define PROBE_SIZE (PROBE_CPU_XMM15_OFFSET + XMM_SIZE) +#endif // CPU(X86_64) + +// These ASSERTs remind you that if you change the layout of ProbeContext, +// you need to change ctiMasmProbeTrampoline offsets above to match. +#define PROBE_OFFSETOF(x) offsetof(struct MacroAssemblerX86Common::ProbeContext, x) +COMPILE_ASSERT(PROBE_OFFSETOF(probeFunction) == PROBE_PROBE_FUNCTION_OFFSET, ProbeContext_probeFunction_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(arg1) == PROBE_ARG1_OFFSET, ProbeContext_arg1_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(arg2) == PROBE_ARG2_OFFSET, ProbeContext_arg2_offset_matches_ctiMasmProbeTrampoline); + +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.eax) == PROBE_CPU_EAX_OFFSET, ProbeContext_cpu_eax_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.ecx) == PROBE_CPU_ECX_OFFSET, ProbeContext_cpu_ecx_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.edx) == PROBE_CPU_EDX_OFFSET, ProbeContext_cpu_edx_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.ebx) == PROBE_CPU_EBX_OFFSET, ProbeContext_cpu_ebx_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.esp) == PROBE_CPU_ESP_OFFSET, ProbeContext_cpu_esp_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.ebp) == PROBE_CPU_EBP_OFFSET, ProbeContext_cpu_ebp_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.esi) == PROBE_CPU_ESI_OFFSET, ProbeContext_cpu_esi_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.edi) == PROBE_CPU_EDI_OFFSET, ProbeContext_cpu_edi_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.eip) == PROBE_CPU_EIP_OFFSET, ProbeContext_cpu_eip_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.eflags) == PROBE_CPU_EFLAGS_OFFSET, ProbeContext_cpu_eflags_offset_matches_ctiMasmProbeTrampoline); + +#if CPU(X86_64) +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r8) == PROBE_CPU_R8_OFFSET, ProbeContext_cpu_r8_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r9) == PROBE_CPU_R9_OFFSET, ProbeContext_cpu_r9_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r10) == PROBE_CPU_R10_OFFSET, ProbeContext_cpu_r10_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r11) == PROBE_CPU_R11_OFFSET, ProbeContext_cpu_r11_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r12) == PROBE_CPU_R12_OFFSET, ProbeContext_cpu_r12_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r13) == PROBE_CPU_R13_OFFSET, ProbeContext_cpu_r13_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r14) == PROBE_CPU_R14_OFFSET, ProbeContext_cpu_r14_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r15) == PROBE_CPU_R15_OFFSET, ProbeContext_cpu_r15_offset_matches_ctiMasmProbeTrampoline); +#endif // CPU(X86_64) + +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm0) == PROBE_CPU_XMM0_OFFSET, ProbeContext_cpu_xmm0_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm1) == PROBE_CPU_XMM1_OFFSET, ProbeContext_cpu_xmm1_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm2) == PROBE_CPU_XMM2_OFFSET, ProbeContext_cpu_xmm2_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm3) == PROBE_CPU_XMM3_OFFSET, ProbeContext_cpu_xmm3_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm4) == PROBE_CPU_XMM4_OFFSET, ProbeContext_cpu_xmm4_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm5) == PROBE_CPU_XMM5_OFFSET, ProbeContext_cpu_xmm5_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm6) == PROBE_CPU_XMM6_OFFSET, ProbeContext_cpu_xmm6_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm7) == PROBE_CPU_XMM7_OFFSET, ProbeContext_cpu_xmm7_offset_matches_ctiMasmProbeTrampoline); + +#if CPU(X86_64) +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm8) == PROBE_CPU_XMM8_OFFSET, ProbeContext_cpu_xmm8_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm9) == PROBE_CPU_XMM9_OFFSET, ProbeContext_cpu_xmm9_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm10) == PROBE_CPU_XMM10_OFFSET, ProbeContext_cpu_xmm10_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm11) == PROBE_CPU_XMM11_OFFSET, ProbeContext_cpu_xmm11_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm12) == PROBE_CPU_XMM12_OFFSET, ProbeContext_cpu_xmm12_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm13) == PROBE_CPU_XMM13_OFFSET, ProbeContext_cpu_xmm13_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm14) == PROBE_CPU_XMM14_OFFSET, ProbeContext_cpu_xmm14_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm15) == PROBE_CPU_XMM15_OFFSET, ProbeContext_cpu_xmm15_offset_matches_ctiMasmProbeTrampoline); +#endif // CPU(X86_64) + +COMPILE_ASSERT(sizeof(MacroAssemblerX86Common::ProbeContext) == PROBE_SIZE, ProbeContext_size_matches_ctiMasmProbeTrampoline); + +#undef PROBE_OFFSETOF + +#if CPU(X86) +asm ( + ".globl " SYMBOL_STRING(ctiMasmProbeTrampoline) "\n" + HIDE_SYMBOL(ctiMasmProbeTrampoline) "\n" + SYMBOL_STRING(ctiMasmProbeTrampoline) ":" "\n" + + "pushfd" "\n" + + // MacroAssemblerX86Common::probe() has already generated code to store some values. + // Together with the eflags pushed above, the top of stack now looks like + // this: + // esp[0 * ptrSize]: eflags + // esp[1 * ptrSize]: return address / saved eip + // esp[2 * ptrSize]: probeFunction + // esp[3 * ptrSize]: arg1 + // esp[4 * ptrSize]: arg2 + // esp[5 * ptrSize]: saved eax + // esp[6 * ptrSize]: saved esp + + "movl %esp, %eax" "\n" + "subl $" STRINGIZE_VALUE_OF(PROBE_SIZE) ", %esp" "\n" + + // The X86_64 ABI specifies that the worse case stack alignment requirement + // is 32 bytes. + "andl $~0x1f, %esp" "\n" + + "movl %ebp, " STRINGIZE_VALUE_OF(PROBE_CPU_EBP_OFFSET) "(%esp)" "\n" + "movl %esp, %ebp" "\n" // Save the ProbeContext*. + + "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_ECX_OFFSET) "(%ebp)" "\n" + "movl %edx, " STRINGIZE_VALUE_OF(PROBE_CPU_EDX_OFFSET) "(%ebp)" "\n" + "movl %ebx, " STRINGIZE_VALUE_OF(PROBE_CPU_EBX_OFFSET) "(%ebp)" "\n" + "movl %esi, " STRINGIZE_VALUE_OF(PROBE_CPU_ESI_OFFSET) "(%ebp)" "\n" + "movl %edi, " STRINGIZE_VALUE_OF(PROBE_CPU_EDI_OFFSET) "(%ebp)" "\n" + + "movl 0 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax), %ecx" "\n" + "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) "(%ebp)" "\n" + "movl 1 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax), %ecx" "\n" + "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_EIP_OFFSET) "(%ebp)" "\n" + "movl 2 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax), %ecx" "\n" + "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "(%ebp)" "\n" + "movl 3 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax), %ecx" "\n" + "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_ARG1_OFFSET) "(%ebp)" "\n" + "movl 4 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax), %ecx" "\n" + "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_ARG2_OFFSET) "(%ebp)" "\n" + "movl 5 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax), %ecx" "\n" + "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) "(%ebp)" "\n" + "movl 6 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax), %ecx" "\n" + "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%ebp)" "\n" + + "movq %xmm0, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM0_OFFSET) "(%ebp)" "\n" + "movq %xmm1, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM1_OFFSET) "(%ebp)" "\n" + "movq %xmm2, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM2_OFFSET) "(%ebp)" "\n" + "movq %xmm3, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM3_OFFSET) "(%ebp)" "\n" + "movq %xmm4, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM4_OFFSET) "(%ebp)" "\n" + "movq %xmm5, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM5_OFFSET) "(%ebp)" "\n" + "movq %xmm6, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM6_OFFSET) "(%ebp)" "\n" + "movq %xmm7, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM7_OFFSET) "(%ebp)" "\n" + + // Reserve stack space for the arg while maintaining the required stack + // pointer 32 byte alignment: + "subl $0x20, %esp" "\n" + "movl %ebp, 0(%esp)" "\n" // the ProbeContext* arg. + + "call *" STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "(%ebp)" "\n" + + // To enable probes to modify register state, we copy all registers + // out of the ProbeContext before returning. + + "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EDX_OFFSET) "(%ebp), %edx" "\n" + "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EBX_OFFSET) "(%ebp), %ebx" "\n" + "movl " STRINGIZE_VALUE_OF(PROBE_CPU_ESI_OFFSET) "(%ebp), %esi" "\n" + "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EDI_OFFSET) "(%ebp), %edi" "\n" + + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM0_OFFSET) "(%ebp), %xmm0" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM1_OFFSET) "(%ebp), %xmm1" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM2_OFFSET) "(%ebp), %xmm2" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM3_OFFSET) "(%ebp), %xmm3" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM4_OFFSET) "(%ebp), %xmm4" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM5_OFFSET) "(%ebp), %xmm5" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM6_OFFSET) "(%ebp), %xmm6" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM7_OFFSET) "(%ebp), %xmm7" "\n" + + // There are 6 more registers left to restore: + // eax, ecx, ebp, esp, eip, and eflags. + // We need to handle these last few restores carefully because: + // + // 1. We need to push the return address on the stack for ret to use. + // That means we need to write to the stack. + // 2. The user probe function may have altered the restore value of esp to + // point to the vicinity of one of the restore values for the remaining + // registers left to be restored. + // That means, for requirement 1, we may end up writing over some of the + // restore values. We can check for this, and first copy the restore + // values to a "safe area" on the stack before commencing with the action + // for requirement 1. + // 3. For requirement 2, we need to ensure that the "safe area" is + // protected from interrupt handlers overwriting it. Hence, the esp needs + // to be adjusted to include the "safe area" before we start copying the + // the restore values. + + "movl %ebp, %eax" "\n" + "addl $" STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) ", %eax" "\n" + "cmpl %eax, " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%ebp)" "\n" + "jg " SYMBOL_STRING(ctiMasmProbeTrampolineEnd) "\n" + + // Locate the "safe area" at 2x sizeof(ProbeContext) below where the new + // rsp will be. This time we don't have to 32-byte align it because we're + // not using to store any xmm regs. + "movl " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%ebp), %eax" "\n" + "subl $2 * " STRINGIZE_VALUE_OF(PROBE_SIZE) ", %eax" "\n" + "movl %eax, %esp" "\n" + + "subl $" STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) ", %eax" "\n" + "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) "(%ebp), %ecx" "\n" + "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) "(%eax)" "\n" + "movl " STRINGIZE_VALUE_OF(PROBE_CPU_ECX_OFFSET) "(%ebp), %ecx" "\n" + "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_ECX_OFFSET) "(%eax)" "\n" + "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EBP_OFFSET) "(%ebp), %ecx" "\n" + "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_EBP_OFFSET) "(%eax)" "\n" + "movl " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%ebp), %ecx" "\n" + "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%eax)" "\n" + "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EIP_OFFSET) "(%ebp), %ecx" "\n" + "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_EIP_OFFSET) "(%eax)" "\n" + "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) "(%ebp), %ecx" "\n" + "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) "(%eax)" "\n" + "movl %eax, %ebp" "\n" + + SYMBOL_STRING(ctiMasmProbeTrampolineEnd) ":" "\n" + "movl " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%ebp), %eax" "\n" + "subl $5 * " STRINGIZE_VALUE_OF(PTR_SIZE) ", %eax" "\n" + // At this point, %esp should be < %eax. + + "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) "(%ebp), %ecx" "\n" + "movl %ecx, 0 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax)" "\n" + "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) "(%ebp), %ecx" "\n" + "movl %ecx, 1 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax)" "\n" + "movl " STRINGIZE_VALUE_OF(PROBE_CPU_ECX_OFFSET) "(%ebp), %ecx" "\n" + "movl %ecx, 2 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax)" "\n" + "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EBP_OFFSET) "(%ebp), %ecx" "\n" + "movl %ecx, 3 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax)" "\n" + "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EIP_OFFSET) "(%ebp), %ecx" "\n" + "movl %ecx, 4 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax)" "\n" + "movl %eax, %esp" "\n" + + "popfd" "\n" + "popl %eax" "\n" + "popl %ecx" "\n" + "popl %ebp" "\n" + "ret" "\n" +); +#endif // CPU(X86) + +#if CPU(X86_64) +asm ( + ".globl " SYMBOL_STRING(ctiMasmProbeTrampoline) "\n" + HIDE_SYMBOL(ctiMasmProbeTrampoline) "\n" + SYMBOL_STRING(ctiMasmProbeTrampoline) ":" "\n" + + "pushfq" "\n" + + // MacroAssemblerX86Common::probe() has already generated code to store some values. + // Together with the rflags pushed above, the top of stack now looks like + // this: + // esp[0 * ptrSize]: rflags + // esp[1 * ptrSize]: return address / saved rip + // esp[2 * ptrSize]: probeFunction + // esp[3 * ptrSize]: arg1 + // esp[4 * ptrSize]: arg2 + // esp[5 * ptrSize]: saved rax + // esp[6 * ptrSize]: saved rsp + + "movq %rsp, %rax" "\n" + "subq $" STRINGIZE_VALUE_OF(PROBE_SIZE) ", %rsp" "\n" + + // The X86_64 ABI specifies that the worse case stack alignment requirement + // is 32 bytes. + "andq $~0x1f, %rsp" "\n" + + "movq %rbp, " STRINGIZE_VALUE_OF(PROBE_CPU_EBP_OFFSET) "(%rsp)" "\n" + "movq %rsp, %rbp" "\n" // Save the ProbeContext*. + + "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_ECX_OFFSET) "(%rbp)" "\n" + "movq %rdx, " STRINGIZE_VALUE_OF(PROBE_CPU_EDX_OFFSET) "(%rbp)" "\n" + "movq %rbx, " STRINGIZE_VALUE_OF(PROBE_CPU_EBX_OFFSET) "(%rbp)" "\n" + "movq %rsi, " STRINGIZE_VALUE_OF(PROBE_CPU_ESI_OFFSET) "(%rbp)" "\n" + "movq %rdi, " STRINGIZE_VALUE_OF(PROBE_CPU_EDI_OFFSET) "(%rbp)" "\n" + + "movq 0 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax), %rcx" "\n" + "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) "(%rbp)" "\n" + "movq 1 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax), %rcx" "\n" + "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_EIP_OFFSET) "(%rbp)" "\n" + "movq 2 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax), %rcx" "\n" + "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "(%rbp)" "\n" + "movq 3 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax), %rcx" "\n" + "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_ARG1_OFFSET) "(%rbp)" "\n" + "movq 4 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax), %rcx" "\n" + "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_ARG2_OFFSET) "(%rbp)" "\n" + "movq 5 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax), %rcx" "\n" + "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) "(%rbp)" "\n" + "movq 6 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax), %rcx" "\n" + "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%rbp)" "\n" + + "movq %r8, " STRINGIZE_VALUE_OF(PROBE_CPU_R8_OFFSET) "(%rbp)" "\n" + "movq %r9, " STRINGIZE_VALUE_OF(PROBE_CPU_R9_OFFSET) "(%rbp)" "\n" + "movq %r10, " STRINGIZE_VALUE_OF(PROBE_CPU_R10_OFFSET) "(%rbp)" "\n" + "movq %r11, " STRINGIZE_VALUE_OF(PROBE_CPU_R11_OFFSET) "(%rbp)" "\n" + "movq %r12, " STRINGIZE_VALUE_OF(PROBE_CPU_R12_OFFSET) "(%rbp)" "\n" + "movq %r13, " STRINGIZE_VALUE_OF(PROBE_CPU_R13_OFFSET) "(%rbp)" "\n" + "movq %r14, " STRINGIZE_VALUE_OF(PROBE_CPU_R14_OFFSET) "(%rbp)" "\n" + "movq %r15, " STRINGIZE_VALUE_OF(PROBE_CPU_R15_OFFSET) "(%rbp)" "\n" + + "movq %xmm0, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM0_OFFSET) "(%rbp)" "\n" + "movq %xmm1, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM1_OFFSET) "(%rbp)" "\n" + "movq %xmm2, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM2_OFFSET) "(%rbp)" "\n" + "movq %xmm3, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM3_OFFSET) "(%rbp)" "\n" + "movq %xmm4, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM4_OFFSET) "(%rbp)" "\n" + "movq %xmm5, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM5_OFFSET) "(%rbp)" "\n" + "movq %xmm6, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM6_OFFSET) "(%rbp)" "\n" + "movq %xmm7, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM7_OFFSET) "(%rbp)" "\n" + "movq %xmm8, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM8_OFFSET) "(%rbp)" "\n" + "movq %xmm9, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM9_OFFSET) "(%rbp)" "\n" + "movq %xmm10, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM10_OFFSET) "(%rbp)" "\n" + "movq %xmm11, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM11_OFFSET) "(%rbp)" "\n" + "movq %xmm12, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM12_OFFSET) "(%rbp)" "\n" + "movq %xmm13, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM13_OFFSET) "(%rbp)" "\n" + "movq %xmm14, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM14_OFFSET) "(%rbp)" "\n" + "movq %xmm15, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM15_OFFSET) "(%rbp)" "\n" + + "movq %rbp, %rdi" "\n" // the ProbeContext* arg. + "call *" STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "(%rbp)" "\n" + + // To enable probes to modify register state, we copy all registers + // out of the ProbeContext before returning. + + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EDX_OFFSET) "(%rbp), %rdx" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EBX_OFFSET) "(%rbp), %rbx" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_ESI_OFFSET) "(%rbp), %rsi" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EDI_OFFSET) "(%rbp), %rdi" "\n" + + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_R8_OFFSET) "(%rbp), %r8" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_R9_OFFSET) "(%rbp), %r9" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_R10_OFFSET) "(%rbp), %r10" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_R11_OFFSET) "(%rbp), %r11" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_R12_OFFSET) "(%rbp), %r12" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_R13_OFFSET) "(%rbp), %r13" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_R14_OFFSET) "(%rbp), %r14" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_R15_OFFSET) "(%rbp), %r15" "\n" + + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM0_OFFSET) "(%rbp), %xmm0" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM1_OFFSET) "(%rbp), %xmm1" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM2_OFFSET) "(%rbp), %xmm2" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM3_OFFSET) "(%rbp), %xmm3" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM4_OFFSET) "(%rbp), %xmm4" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM5_OFFSET) "(%rbp), %xmm5" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM6_OFFSET) "(%rbp), %xmm6" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM7_OFFSET) "(%rbp), %xmm7" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM8_OFFSET) "(%rbp), %xmm8" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM9_OFFSET) "(%rbp), %xmm9" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM10_OFFSET) "(%rbp), %xmm10" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM11_OFFSET) "(%rbp), %xmm11" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM12_OFFSET) "(%rbp), %xmm12" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM13_OFFSET) "(%rbp), %xmm13" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM14_OFFSET) "(%rbp), %xmm14" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM15_OFFSET) "(%rbp), %xmm15" "\n" + + // There are 6 more registers left to restore: + // rax, rcx, rbp, rsp, rip, and rflags. + // We need to handle these last few restores carefully because: + // + // 1. We need to push the return address on the stack for ret to use + // That means we need to write to the stack. + // 2. The user probe function may have altered the restore value of esp to + // point to the vicinity of one of the restore values for the remaining + // registers left to be restored. + // That means, for requirement 1, we may end up writing over some of the + // restore values. We can check for this, and first copy the restore + // values to a "safe area" on the stack before commencing with the action + // for requirement 1. + // 3. For both requirement 2, we need to ensure that the "safe area" is + // protected from interrupt handlers overwriting it. Hence, the esp needs + // to be adjusted to include the "safe area" before we start copying the + // the restore values. + + "movq %rbp, %rax" "\n" + "addq $" STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) ", %rax" "\n" + "cmpq %rax, " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%rbp)" "\n" + "jg " SYMBOL_STRING(ctiMasmProbeTrampolineEnd) "\n" + + // Locate the "safe area" at 2x sizeof(ProbeContext) below where the new + // rsp will be. This time we don't have to 32-byte align it because we're + // not using to store any xmm regs. + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%rbp), %rax" "\n" + "subq $2 * " STRINGIZE_VALUE_OF(PROBE_SIZE) ", %rax" "\n" + "movq %rax, %rsp" "\n" + + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) "(%rbp), %rcx" "\n" + "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) "(%rax)" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_ECX_OFFSET) "(%rbp), %rcx" "\n" + "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_ECX_OFFSET) "(%rax)" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EBP_OFFSET) "(%rbp), %rcx" "\n" + "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_EBP_OFFSET) "(%rax)" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%rbp), %rcx" "\n" + "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%rax)" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EIP_OFFSET) "(%rbp), %rcx" "\n" + "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_EIP_OFFSET) "(%rax)" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) "(%rbp), %rcx" "\n" + "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) "(%rax)" "\n" + "movq %rax, %rbp" "\n" + + SYMBOL_STRING(ctiMasmProbeTrampolineEnd) ":" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%rbp), %rax" "\n" + "subq $5 * " STRINGIZE_VALUE_OF(PTR_SIZE) ", %rax" "\n" + // At this point, %rsp should be < %rax. + + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) "(%rbp), %rcx" "\n" + "movq %rcx, 0 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax)" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) "(%rbp), %rcx" "\n" + "movq %rcx, 1 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax)" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_ECX_OFFSET) "(%rbp), %rcx" "\n" + "movq %rcx, 2 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax)" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EBP_OFFSET) "(%rbp), %rcx" "\n" + "movq %rcx, 3 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax)" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EIP_OFFSET) "(%rbp), %rcx" "\n" + "movq %rcx, 4 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax)" "\n" + "movq %rax, %rsp" "\n" + + "popfq" "\n" + "popq %rax" "\n" + "popq %rcx" "\n" + "popq %rbp" "\n" + "ret" "\n" +); +#endif // CPU(X86_64) + +#endif // COMPILER(GCC_OR_CLANG) + +// What code is emitted for the probe? +// ================================== +// We want to keep the size of the emitted probe invocation code as compact as +// possible to minimize the perturbation to the JIT generated code. However, +// we also need to preserve the CPU registers and set up the ProbeContext to be +// passed to the user probe function. +// +// Hence, we do only the minimum here to preserve a scratch register (i.e. rax +// in this case) and the stack pointer (i.e. rsp), and pass the probe arguments. +// We'll let the ctiMasmProbeTrampoline handle the rest of the probe invocation +// work i.e. saving the CPUState (and setting up the ProbeContext), calling the +// user probe function, and restoring the CPUState before returning to JIT +// generated code. +// +// What registers need to be saved? +// =============================== +// The registers are saved for 2 reasons: +// 1. To preserve their state in the JITted code. This means that all registers +// that are not callee saved needs to be saved. We also need to save the +// condition code registers because the probe can be inserted between a test +// and a branch. +// 2. To allow the probe to inspect the values of the registers for debugging +// purposes. This means all registers need to be saved. +// +// In summary, save everything. But for reasons stated above, we should do the +// minimum here and let ctiMasmProbeTrampoline do the heavy lifting to save the +// full set. +// +// What values are in the saved registers? +// ====================================== +// Conceptually, the saved registers should contain values as if the probe +// is not present in the JIT generated code. Hence, they should contain values +// that are expected at the start of the instruction immediately following the +// probe. +// +// Specifically, the saved stack pointer register will point to the stack +// position before we push the ProbeContext frame. The saved rip will point to +// the address of the instruction immediately following the probe. + +void MacroAssemblerX86Common::probe(MacroAssemblerX86Common::ProbeFunction function, void* arg1, void* arg2) +{ + push(RegisterID::esp); + push(RegisterID::eax); + move(TrustedImmPtr(arg2), RegisterID::eax); + push(RegisterID::eax); + move(TrustedImmPtr(arg1), RegisterID::eax); + push(RegisterID::eax); + move(TrustedImmPtr(reinterpret_cast<void*>(function)), RegisterID::eax); + push(RegisterID::eax); + move(TrustedImmPtr(reinterpret_cast<void*>(ctiMasmProbeTrampoline)), RegisterID::eax); + call(RegisterID::eax); +} + +#endif // ENABLE(MASM_PROBE) + +#if CPU(X86) && !OS(MAC_OS_X) +MacroAssemblerX86Common::SSE2CheckState MacroAssemblerX86Common::s_sse2CheckState = NotCheckedSSE2; +#endif + +MacroAssemblerX86Common::CPUIDCheckState MacroAssemblerX86Common::s_sse4_1CheckState = CPUIDCheckState::NotChecked; +MacroAssemblerX86Common::CPUIDCheckState MacroAssemblerX86Common::s_lzcntCheckState = CPUIDCheckState::NotChecked; + +} // namespace JSC + +#endif // ENABLE(ASSEMBLER) && (CPU(X86) || CPU(X86_64)) diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.h b/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.h index f171dc408..f502a2551 100644 --- a/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.h +++ b/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2014-2016 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -30,21 +30,35 @@ #include "X86Assembler.h" #include "AbstractMacroAssembler.h" +#include <wtf/Optional.h> + +#if COMPILER(MSVC) +#include <intrin.h> +#endif namespace JSC { -class MacroAssemblerX86Common : public AbstractMacroAssembler<X86Assembler> { -protected: +class MacroAssemblerX86Common : public AbstractMacroAssembler<X86Assembler, MacroAssemblerX86Common> { +public: #if CPU(X86_64) - static const X86Registers::RegisterID scratchRegister = X86Registers::r11; + // Use this directly only if you're not generating code with it. + static const X86Registers::RegisterID s_scratchRegister = X86Registers::r11; + + // Use this when generating code so that we get enforcement of the disallowing of scratch register + // usage. + X86Registers::RegisterID scratchRegister() + { + RELEASE_ASSERT(m_allowScratchRegister); + return s_scratchRegister; + } #endif +protected: static const int DoubleConditionBitInvert = 0x10; static const int DoubleConditionBitSpecial = 0x20; static const int DoubleConditionBits = DoubleConditionBitInvert | DoubleConditionBitSpecial; public: - typedef X86Assembler::FPRegisterID FPRegisterID; typedef X86Assembler::XMMRegisterID XMMRegisterID; static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value) @@ -73,6 +87,7 @@ public: NonZero = X86Assembler::ConditionNE }; + // FIXME: it would be neat to rename this to FloatingPointCondition in every assembler. enum DoubleCondition { // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN. DoubleEqual = X86Assembler::ConditionE | DoubleConditionBitSpecial, @@ -94,16 +109,11 @@ public: DoubleConditionBits_should_not_interfere_with_X86Assembler_Condition_codes); static const RegisterID stackPointerRegister = X86Registers::esp; - -#if ENABLE(JIT_CONSTANT_BLINDING) + static const RegisterID framePointerRegister = X86Registers::ebp; + + static bool canBlind() { return true; } static bool shouldBlindForSpecificArch(uint32_t value) { return value >= 0x00ffffff; } -#if CPU(X86_64) static bool shouldBlindForSpecificArch(uint64_t value) { return value >= 0x00ffffff; } -#if OS(DARWIN) // On 64-bit systems other than DARWIN uint64_t and uintptr_t are the same type so overload is prohibited. - static bool shouldBlindForSpecificArch(uintptr_t value) { return value >= 0x00ffffff; } -#endif -#endif -#endif // Integer arithmetic operations: // @@ -122,9 +132,37 @@ public: m_assembler.addl_im(imm.m_value, address.offset, address.base); } + void add32(TrustedImm32 imm, BaseIndex address) + { + m_assembler.addl_im(imm.m_value, address.offset, address.base, address.index, address.scale); + } + + void add8(TrustedImm32 imm, Address address) + { + m_assembler.addb_im(imm.m_value, address.offset, address.base); + } + + void add8(TrustedImm32 imm, BaseIndex address) + { + m_assembler.addb_im(imm.m_value, address.offset, address.base, address.index, address.scale); + } + + void add16(TrustedImm32 imm, Address address) + { + m_assembler.addw_im(imm.m_value, address.offset, address.base); + } + + void add16(TrustedImm32 imm, BaseIndex address) + { + m_assembler.addw_im(imm.m_value, address.offset, address.base, address.index, address.scale); + } + void add32(TrustedImm32 imm, RegisterID dest) { - m_assembler.addl_ir(imm.m_value, dest); + if (imm.m_value == 1) + m_assembler.inc_r(dest); + else + m_assembler.addl_ir(imm.m_value, dest); } void add32(Address src, RegisterID dest) @@ -137,10 +175,65 @@ public: m_assembler.addl_rm(src, dest.offset, dest.base); } + void add32(RegisterID src, BaseIndex dest) + { + m_assembler.addl_rm(src, dest.offset, dest.base, dest.index, dest.scale); + } + + void add8(RegisterID src, Address dest) + { + m_assembler.addb_rm(src, dest.offset, dest.base); + } + + void add8(RegisterID src, BaseIndex dest) + { + m_assembler.addb_rm(src, dest.offset, dest.base, dest.index, dest.scale); + } + + void add16(RegisterID src, Address dest) + { + m_assembler.addw_rm(src, dest.offset, dest.base); + } + + void add16(RegisterID src, BaseIndex dest) + { + m_assembler.addw_rm(src, dest.offset, dest.base, dest.index, dest.scale); + } + void add32(TrustedImm32 imm, RegisterID src, RegisterID dest) { + if (!imm.m_value) { + zeroExtend32ToPtr(src, dest); + return; + } + + if (src == dest) { + add32(imm, dest); + return; + } + m_assembler.leal_mr(imm.m_value, src, dest); } + + void add32(RegisterID a, RegisterID b, RegisterID dest) + { + x86Lea32(BaseIndex(a, b, TimesOne), dest); + } + + void x86Lea32(BaseIndex index, RegisterID dest) + { + if (!index.scale && !index.offset) { + if (index.base == dest) { + add32(index.index, dest); + return; + } + if (index.index == dest) { + add32(index.base, dest); + return; + } + } + m_assembler.leal_mr(index.offset, index.base, index.index, index.scale, dest); + } void and32(RegisterID src, RegisterID dest) { @@ -174,24 +267,55 @@ public: else if (op1 == dest) and32(op2, dest); else { - move(op2, dest); + move32IfNeeded(op2, dest); and32(op1, dest); } } + void and32(Address op1, RegisterID op2, RegisterID dest) + { + move32IfNeeded(op2, dest); + and32(op1, dest); + } + + void and32(RegisterID op1, Address op2, RegisterID dest) + { + move32IfNeeded(op1, dest); + and32(op2, dest); + } + void and32(TrustedImm32 imm, RegisterID src, RegisterID dest) { - move(src, dest); + move32IfNeeded(src, dest); and32(imm, dest); } - void lshift32(RegisterID shift_amount, RegisterID dest) + void countLeadingZeros32(RegisterID src, RegisterID dst) { - ASSERT(shift_amount != dest); + if (supportsLZCNT()) { + m_assembler.lzcnt_rr(src, dst); + return; + } + m_assembler.bsr_rr(src, dst); + clz32AfterBsr(dst); + } + void countLeadingZeros32(Address src, RegisterID dst) + { + if (supportsLZCNT()) { + m_assembler.lzcnt_mr(src.offset, src.base, dst); + return; + } + m_assembler.bsr_mr(src.offset, src.base, dst); + clz32AfterBsr(dst); + } + + void lshift32(RegisterID shift_amount, RegisterID dest) + { if (shift_amount == X86Registers::ecx) m_assembler.shll_CLr(dest); else { + ASSERT(shift_amount != dest); // On x86 we can only shift by ecx; if asked to shift by another register we'll // need rejig the shift amount into ecx first, and restore the registers afterwards. // If we dest is ecx, then shift the swapped register! @@ -205,8 +329,7 @@ public: { ASSERT(shift_amount != dest); - if (src != dest) - move(src, dest); + move32IfNeeded(src, dest); lshift32(shift_amount, dest); } @@ -217,8 +340,7 @@ public: void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest) { - if (src != dest) - move(src, dest); + move32IfNeeded(src, dest); lshift32(imm, dest); } @@ -227,16 +349,62 @@ public: m_assembler.imull_rr(src, dest); } + void mul32(RegisterID src1, RegisterID src2, RegisterID dest) + { + if (src2 == dest) { + m_assembler.imull_rr(src1, dest); + return; + } + move32IfNeeded(src1, dest); + m_assembler.imull_rr(src2, dest); + } + void mul32(Address src, RegisterID dest) { m_assembler.imull_mr(src.offset, src.base, dest); } + + void mul32(Address src1, RegisterID src2, RegisterID dest) + { + move32IfNeeded(src2, dest); + mul32(src1, dest); + } + + void mul32(RegisterID src1, Address src2, RegisterID dest) + { + move32IfNeeded(src1, dest); + mul32(src2, dest); + } void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest) { m_assembler.imull_i32r(src, imm.m_value, dest); } + void x86ConvertToDoubleWord32() + { + m_assembler.cdq(); + } + + void x86ConvertToDoubleWord32(RegisterID eax, RegisterID edx) + { + ASSERT_UNUSED(eax, eax == X86Registers::eax); + ASSERT_UNUSED(edx, edx == X86Registers::edx); + x86ConvertToDoubleWord32(); + } + + void x86Div32(RegisterID denominator) + { + m_assembler.idivl_r(denominator); + } + + void x86Div32(RegisterID eax, RegisterID edx, RegisterID denominator) + { + ASSERT_UNUSED(eax, eax == X86Registers::eax); + ASSERT_UNUSED(edx, edx == X86Registers::edx); + x86Div32(denominator); + } + void neg32(RegisterID srcDest) { m_assembler.negl_r(srcDest); @@ -279,24 +447,36 @@ public: else if (op1 == dest) or32(op2, dest); else { - move(op2, dest); + move32IfNeeded(op2, dest); or32(op1, dest); } } + void or32(Address op1, RegisterID op2, RegisterID dest) + { + move32IfNeeded(op2, dest); + or32(op1, dest); + } + + void or32(RegisterID op1, Address op2, RegisterID dest) + { + move32IfNeeded(op1, dest); + or32(op2, dest); + } + void or32(TrustedImm32 imm, RegisterID src, RegisterID dest) { - move(src, dest); + move32IfNeeded(src, dest); or32(imm, dest); } void rshift32(RegisterID shift_amount, RegisterID dest) { - ASSERT(shift_amount != dest); - if (shift_amount == X86Registers::ecx) m_assembler.sarl_CLr(dest); else { + ASSERT(shift_amount != dest); + // On x86 we can only shift by ecx; if asked to shift by another register we'll // need rejig the shift amount into ecx first, and restore the registers afterwards. // If we dest is ecx, then shift the swapped register! @@ -310,8 +490,7 @@ public: { ASSERT(shift_amount != dest); - if (src != dest) - move(src, dest); + move32IfNeeded(src, dest); rshift32(shift_amount, dest); } @@ -322,18 +501,17 @@ public: void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest) { - if (src != dest) - move(src, dest); + move32IfNeeded(src, dest); rshift32(imm, dest); } void urshift32(RegisterID shift_amount, RegisterID dest) { - ASSERT(shift_amount != dest); - if (shift_amount == X86Registers::ecx) m_assembler.shrl_CLr(dest); else { + ASSERT(shift_amount != dest); + // On x86 we can only shift by ecx; if asked to shift by another register we'll // need rejig the shift amount into ecx first, and restore the registers afterwards. // If we dest is ecx, then shift the swapped register! @@ -347,8 +525,7 @@ public: { ASSERT(shift_amount != dest); - if (src != dest) - move(src, dest); + move32IfNeeded(src, dest); urshift32(shift_amount, dest); } @@ -359,8 +536,7 @@ public: void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest) { - if (src != dest) - move(src, dest); + move32IfNeeded(src, dest); urshift32(imm, dest); } @@ -371,7 +547,10 @@ public: void sub32(TrustedImm32 imm, RegisterID dest) { - m_assembler.subl_ir(imm.m_value, dest); + if (imm.m_value == 1) + m_assembler.dec_r(dest); + else + m_assembler.subl_ir(imm.m_value, dest); } void sub32(TrustedImm32 imm, Address address) @@ -405,9 +584,9 @@ public: void xor32(TrustedImm32 imm, RegisterID dest) { if (imm.m_value == -1) - m_assembler.notl_r(dest); + m_assembler.notl_r(dest); else - m_assembler.xorl_ir(imm.m_value, dest); + m_assembler.xorl_ir(imm.m_value, dest); } void xor32(RegisterID src, Address dest) @@ -427,27 +606,64 @@ public: else if (op1 == dest) xor32(op2, dest); else { - move(op2, dest); + move32IfNeeded(op2, dest); xor32(op1, dest); } } + void xor32(Address op1, RegisterID op2, RegisterID dest) + { + move32IfNeeded(op2, dest); + xor32(op1, dest); + } + + void xor32(RegisterID op1, Address op2, RegisterID dest) + { + move32IfNeeded(op1, dest); + xor32(op2, dest); + } + void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest) { - move(src, dest); + move32IfNeeded(src, dest); xor32(imm, dest); } + void not32(RegisterID srcDest) + { + m_assembler.notl_r(srcDest); + } + + void not32(Address dest) + { + m_assembler.notl_m(dest.offset, dest.base); + } + void sqrtDouble(FPRegisterID src, FPRegisterID dst) { m_assembler.sqrtsd_rr(src, dst); } + void sqrtDouble(Address src, FPRegisterID dst) + { + m_assembler.sqrtsd_mr(src.offset, src.base, dst); + } + + void sqrtFloat(FPRegisterID src, FPRegisterID dst) + { + m_assembler.sqrtss_rr(src, dst); + } + + void sqrtFloat(Address src, FPRegisterID dst) + { + m_assembler.sqrtss_mr(src.offset, src.base, dst); + } + void absDouble(FPRegisterID src, FPRegisterID dst) { ASSERT(src != dst); static const double negativeZeroConstant = -0.0; - loadDouble(&negativeZeroConstant, dst); + loadDouble(TrustedImmPtr(&negativeZeroConstant), dst); m_assembler.andnpd_rr(src, dst); } @@ -455,10 +671,49 @@ public: { ASSERT(src != dst); static const double negativeZeroConstant = -0.0; - loadDouble(&negativeZeroConstant, dst); + loadDouble(TrustedImmPtr(&negativeZeroConstant), dst); m_assembler.xorpd_rr(src, dst); } + void ceilDouble(FPRegisterID src, FPRegisterID dst) + { + m_assembler.roundsd_rr(src, dst, X86Assembler::RoundingType::TowardInfiniti); + } + + void ceilDouble(Address src, FPRegisterID dst) + { + m_assembler.roundsd_mr(src.offset, src.base, dst, X86Assembler::RoundingType::TowardInfiniti); + } + + void ceilFloat(FPRegisterID src, FPRegisterID dst) + { + m_assembler.roundss_rr(src, dst, X86Assembler::RoundingType::TowardInfiniti); + } + + void ceilFloat(Address src, FPRegisterID dst) + { + m_assembler.roundss_mr(src.offset, src.base, dst, X86Assembler::RoundingType::TowardInfiniti); + } + + void floorDouble(FPRegisterID src, FPRegisterID dst) + { + m_assembler.roundsd_rr(src, dst, X86Assembler::RoundingType::TowardNegativeInfiniti); + } + + void floorDouble(Address src, FPRegisterID dst) + { + m_assembler.roundsd_mr(src.offset, src.base, dst, X86Assembler::RoundingType::TowardNegativeInfiniti); + } + + void floorFloat(FPRegisterID src, FPRegisterID dst) + { + m_assembler.roundss_rr(src, dst, X86Assembler::RoundingType::TowardNegativeInfiniti); + } + + void floorFloat(Address src, FPRegisterID dst) + { + m_assembler.roundss_mr(src.offset, src.base, dst, X86Assembler::RoundingType::TowardNegativeInfiniti); + } // Memory access operations: // @@ -524,15 +779,25 @@ public: m_assembler.movzbl_mr(address.offset, address.base, dest); } - void load8Signed(BaseIndex address, RegisterID dest) + void load8SignedExtendTo32(BaseIndex address, RegisterID dest) { m_assembler.movsbl_mr(address.offset, address.base, address.index, address.scale, dest); } - void load8Signed(ImplicitAddress address, RegisterID dest) + void load8SignedExtendTo32(ImplicitAddress address, RegisterID dest) { m_assembler.movsbl_mr(address.offset, address.base, dest); } + + void zeroExtend8To32(RegisterID src, RegisterID dest) + { + m_assembler.movzbl_rr(src, dest); + } + + void signExtend8To32(RegisterID src, RegisterID dest) + { + m_assembler.movsbl_rr(src, dest); + } void load16(BaseIndex address, RegisterID dest) { @@ -544,16 +809,26 @@ public: m_assembler.movzwl_mr(address.offset, address.base, dest); } - void load16Signed(BaseIndex address, RegisterID dest) + void load16SignedExtendTo32(BaseIndex address, RegisterID dest) { m_assembler.movswl_mr(address.offset, address.base, address.index, address.scale, dest); } - void load16Signed(Address address, RegisterID dest) + void load16SignedExtendTo32(Address address, RegisterID dest) { m_assembler.movswl_mr(address.offset, address.base, dest); } + void zeroExtend16To32(RegisterID src, RegisterID dest) + { + m_assembler.movzwl_rr(src, dest); + } + + void signExtend16To32(RegisterID src, RegisterID dest) + { + m_assembler.movswl_rr(src, dest); + } + DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address) { padBeforePatch(); @@ -581,18 +856,47 @@ public: m_assembler.movl_i32m(imm.m_value, address.offset, address.base, address.index, address.scale); } + void storeZero32(ImplicitAddress address) + { + store32(TrustedImm32(0), address); + } + + void storeZero32(BaseIndex address) + { + store32(TrustedImm32(0), address); + } + void store8(TrustedImm32 imm, Address address) { - ASSERT(-128 <= imm.m_value && imm.m_value < 128); - m_assembler.movb_i8m(imm.m_value, address.offset, address.base); + m_assembler.movb_i8m(static_cast<int8_t>(imm.m_value), address.offset, address.base); } void store8(TrustedImm32 imm, BaseIndex address) { - ASSERT(-128 <= imm.m_value && imm.m_value < 128); - m_assembler.movb_i8m(imm.m_value, address.offset, address.base, address.index, address.scale); + m_assembler.movb_i8m(static_cast<int8_t>(imm.m_value), address.offset, address.base, address.index, address.scale); } - + + static ALWAYS_INLINE RegisterID getUnusedRegister(BaseIndex address) + { + if (address.base != X86Registers::eax && address.index != X86Registers::eax) + return X86Registers::eax; + + if (address.base != X86Registers::ebx && address.index != X86Registers::ebx) + return X86Registers::ebx; + + ASSERT(address.base != X86Registers::ecx && address.index != X86Registers::ecx); + return X86Registers::ecx; + } + + static ALWAYS_INLINE RegisterID getUnusedRegister(Address address) + { + if (address.base != X86Registers::eax) + return X86Registers::eax; + + ASSERT(address.base != X86Registers::edx); + return X86Registers::edx; + } + void store8(RegisterID src, BaseIndex address) { #if CPU(X86) @@ -600,15 +904,7 @@ public: // esp..edi are mapped to the 'h' registers! if (src >= 4) { // Pick a temporary register. - RegisterID temp; - if (address.base != X86Registers::eax && address.index != X86Registers::eax) - temp = X86Registers::eax; - else if (address.base != X86Registers::ebx && address.index != X86Registers::ebx) - temp = X86Registers::ebx; - else { - ASSERT(address.base != X86Registers::ecx && address.index != X86Registers::ecx); - temp = X86Registers::ecx; - } + RegisterID temp = getUnusedRegister(address); // Swap to the temporary register to perform the store. swap(src, temp); @@ -619,6 +915,25 @@ public: #endif m_assembler.movb_rm(src, address.offset, address.base, address.index, address.scale); } + + void store8(RegisterID src, Address address) + { +#if CPU(X86) + // On 32-bit x86 we can only store from the first 4 registers; + // esp..edi are mapped to the 'h' registers! + if (src >= 4) { + // Pick a temporary register. + RegisterID temp = getUnusedRegister(address); + + // Swap to the temporary register to perform the store. + swap(src, temp); + m_assembler.movb_rm(temp, address.offset, address.base); + swap(src, temp); + return; + } +#endif + m_assembler.movb_rm(src, address.offset, address.base); + } void store16(RegisterID src, BaseIndex address) { @@ -627,16 +942,8 @@ public: // esp..edi are mapped to the 'h' registers! if (src >= 4) { // Pick a temporary register. - RegisterID temp; - if (address.base != X86Registers::eax && address.index != X86Registers::eax) - temp = X86Registers::eax; - else if (address.base != X86Registers::ebx && address.index != X86Registers::ebx) - temp = X86Registers::ebx; - else { - ASSERT(address.base != X86Registers::ecx && address.index != X86Registers::ecx); - temp = X86Registers::ecx; - } - + RegisterID temp = getUnusedRegister(address); + // Swap to the temporary register to perform the store. swap(src, temp); m_assembler.movw_rm(temp, address.offset, address.base, address.index, address.scale); @@ -647,6 +954,25 @@ public: m_assembler.movw_rm(src, address.offset, address.base, address.index, address.scale); } + void store16(RegisterID src, Address address) + { +#if CPU(X86) + // On 32-bit x86 we can only store from the first 4 registers; + // esp..edi are mapped to the 'h' registers! + if (src >= 4) { + // Pick a temporary register. + RegisterID temp = getUnusedRegister(address); + + // Swap to the temporary register to perform the store. + swap(src, temp); + m_assembler.movw_rm(temp, address.offset, address.base); + swap(src, temp); + return; + } +#endif + m_assembler.movw_rm(src, address.offset, address.base); + } + // Floating-point operation: // @@ -656,17 +982,17 @@ public: { ASSERT(isSSE2Present()); if (src != dest) - m_assembler.movsd_rr(src, dest); + m_assembler.movaps_rr(src, dest); } - void loadDouble(const void* address, FPRegisterID dest) + void loadDouble(TrustedImmPtr address, FPRegisterID dest) { #if CPU(X86) ASSERT(isSSE2Present()); - m_assembler.movsd_mr(address, dest); + m_assembler.movsd_mr(address.m_value, dest); #else - move(TrustedImmPtr(address), scratchRegister); - loadDouble(scratchRegister, dest); + move(address, scratchRegister()); + loadDouble(scratchRegister(), dest); #endif } @@ -675,12 +1001,19 @@ public: ASSERT(isSSE2Present()); m_assembler.movsd_mr(address.offset, address.base, dest); } - + void loadDouble(BaseIndex address, FPRegisterID dest) { ASSERT(isSSE2Present()); m_assembler.movsd_mr(address.offset, address.base, address.index, address.scale, dest); } + + void loadFloat(ImplicitAddress address, FPRegisterID dest) + { + ASSERT(isSSE2Present()); + m_assembler.movss_mr(address.offset, address.base, dest); + } + void loadFloat(BaseIndex address, FPRegisterID dest) { ASSERT(isSSE2Present()); @@ -698,7 +1031,13 @@ public: ASSERT(isSSE2Present()); m_assembler.movsd_rm(src, address.offset, address.base, address.index, address.scale); } - + + void storeFloat(FPRegisterID src, ImplicitAddress address) + { + ASSERT(isSSE2Present()); + m_assembler.movss_rm(src, address.offset, address.base); + } + void storeFloat(FPRegisterID src, BaseIndex address) { ASSERT(isSSE2Present()); @@ -711,12 +1050,24 @@ public: m_assembler.cvtsd2ss_rr(src, dst); } + void convertDoubleToFloat(Address address, FPRegisterID dst) + { + ASSERT(isSSE2Present()); + m_assembler.cvtsd2ss_mr(address.offset, address.base, dst); + } + void convertFloatToDouble(FPRegisterID src, FPRegisterID dst) { ASSERT(isSSE2Present()); m_assembler.cvtss2sd_rr(src, dst); } + void convertFloatToDouble(Address address, FPRegisterID dst) + { + ASSERT(isSSE2Present()); + m_assembler.cvtss2sd_mr(address.offset, address.base, dst); + } + void addDouble(FPRegisterID src, FPRegisterID dest) { ASSERT(isSSE2Present()); @@ -740,6 +1091,77 @@ public: m_assembler.addsd_mr(src.offset, src.base, dest); } + void addDouble(Address op1, FPRegisterID op2, FPRegisterID dest) + { + ASSERT(isSSE2Present()); + if (op2 == dest) { + addDouble(op1, dest); + return; + } + + loadDouble(op1, dest); + addDouble(op2, dest); + } + + void addDouble(FPRegisterID op1, Address op2, FPRegisterID dest) + { + ASSERT(isSSE2Present()); + if (op1 == dest) { + addDouble(op2, dest); + return; + } + + loadDouble(op2, dest); + addDouble(op1, dest); + } + + void addFloat(FPRegisterID src, FPRegisterID dest) + { + ASSERT(isSSE2Present()); + m_assembler.addss_rr(src, dest); + } + + void addFloat(Address src, FPRegisterID dest) + { + ASSERT(isSSE2Present()); + m_assembler.addss_mr(src.offset, src.base, dest); + } + + void addFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) + { + ASSERT(isSSE2Present()); + if (op1 == dest) + addFloat(op2, dest); + else { + moveDouble(op2, dest); + addFloat(op1, dest); + } + } + + void addFloat(Address op1, FPRegisterID op2, FPRegisterID dest) + { + ASSERT(isSSE2Present()); + if (op2 == dest) { + addFloat(op1, dest); + return; + } + + loadFloat(op1, dest); + addFloat(op2, dest); + } + + void addFloat(FPRegisterID op1, Address op2, FPRegisterID dest) + { + ASSERT(isSSE2Present()); + if (op1 == dest) { + addFloat(op2, dest); + return; + } + + loadFloat(op2, dest); + addFloat(op1, dest); + } + void divDouble(FPRegisterID src, FPRegisterID dest) { ASSERT(isSSE2Present()); @@ -761,6 +1183,18 @@ public: m_assembler.divsd_mr(src.offset, src.base, dest); } + void divFloat(FPRegisterID src, FPRegisterID dest) + { + ASSERT(isSSE2Present()); + m_assembler.divss_rr(src, dest); + } + + void divFloat(Address src, FPRegisterID dest) + { + ASSERT(isSSE2Present()); + m_assembler.divss_mr(src.offset, src.base, dest); + } + void subDouble(FPRegisterID src, FPRegisterID dest) { ASSERT(isSSE2Present()); @@ -782,6 +1216,18 @@ public: m_assembler.subsd_mr(src.offset, src.base, dest); } + void subFloat(FPRegisterID src, FPRegisterID dest) + { + ASSERT(isSSE2Present()); + m_assembler.subss_rr(src, dest); + } + + void subFloat(Address src, FPRegisterID dest) + { + ASSERT(isSSE2Present()); + m_assembler.subss_mr(src.offset, src.base, dest); + } + void mulDouble(FPRegisterID src, FPRegisterID dest) { ASSERT(isSSE2Present()); @@ -805,6 +1251,134 @@ public: m_assembler.mulsd_mr(src.offset, src.base, dest); } + void mulDouble(Address op1, FPRegisterID op2, FPRegisterID dest) + { + ASSERT(isSSE2Present()); + if (op2 == dest) { + mulDouble(op1, dest); + return; + } + loadDouble(op1, dest); + mulDouble(op2, dest); + } + + void mulDouble(FPRegisterID op1, Address op2, FPRegisterID dest) + { + ASSERT(isSSE2Present()); + if (op1 == dest) { + mulDouble(op2, dest); + return; + } + loadDouble(op2, dest); + mulDouble(op1, dest); + } + + void mulFloat(FPRegisterID src, FPRegisterID dest) + { + ASSERT(isSSE2Present()); + m_assembler.mulss_rr(src, dest); + } + + void mulFloat(Address src, FPRegisterID dest) + { + ASSERT(isSSE2Present()); + m_assembler.mulss_mr(src.offset, src.base, dest); + } + + void mulFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) + { + ASSERT(isSSE2Present()); + if (op1 == dest) + mulFloat(op2, dest); + else { + moveDouble(op2, dest); + mulFloat(op1, dest); + } + } + + void mulFloat(Address op1, FPRegisterID op2, FPRegisterID dest) + { + ASSERT(isSSE2Present()); + if (op2 == dest) { + mulFloat(op1, dest); + return; + } + loadFloat(op1, dest); + mulFloat(op2, dest); + } + + void mulFloat(FPRegisterID op1, Address op2, FPRegisterID dest) + { + ASSERT(isSSE2Present()); + if (op1 == dest) { + mulFloat(op2, dest); + return; + } + loadFloat(op2, dest); + mulFloat(op1, dest); + } + + void andDouble(FPRegisterID src, FPRegisterID dst) + { + // ANDPS is defined on 128bits and is shorter than ANDPD. + m_assembler.andps_rr(src, dst); + } + + void andDouble(FPRegisterID src1, FPRegisterID src2, FPRegisterID dst) + { + if (src1 == dst) + andDouble(src2, dst); + else { + moveDouble(src2, dst); + andDouble(src1, dst); + } + } + + void andFloat(FPRegisterID src, FPRegisterID dst) + { + m_assembler.andps_rr(src, dst); + } + + void andFloat(FPRegisterID src1, FPRegisterID src2, FPRegisterID dst) + { + if (src1 == dst) + andFloat(src2, dst); + else { + moveDouble(src2, dst); + andFloat(src1, dst); + } + } + + void xorDouble(FPRegisterID src, FPRegisterID dst) + { + m_assembler.xorps_rr(src, dst); + } + + void xorDouble(FPRegisterID src1, FPRegisterID src2, FPRegisterID dst) + { + if (src1 == dst) + xorDouble(src2, dst); + else { + moveDouble(src2, dst); + xorDouble(src1, dst); + } + } + + void xorFloat(FPRegisterID src, FPRegisterID dst) + { + m_assembler.xorps_rr(src, dst); + } + + void xorFloat(FPRegisterID src1, FPRegisterID src2, FPRegisterID dst) + { + if (src1 == dst) + xorFloat(src2, dst); + else { + moveDouble(src2, dst); + xorFloat(src1, dst); + } + } + void convertInt32ToDouble(RegisterID src, FPRegisterID dest) { ASSERT(isSSE2Present()); @@ -825,27 +1399,18 @@ public: m_assembler.ucomisd_rr(left, right); else m_assembler.ucomisd_rr(right, left); + return jumpAfterFloatingPointCompare(cond, left, right); + } - if (cond == DoubleEqual) { - if (left == right) - return Jump(m_assembler.jnp()); - Jump isUnordered(m_assembler.jp()); - Jump result = Jump(m_assembler.je()); - isUnordered.link(this); - return result; - } else if (cond == DoubleNotEqualOrUnordered) { - if (left == right) - return Jump(m_assembler.jp()); - Jump isUnordered(m_assembler.jp()); - Jump isEqual(m_assembler.je()); - isUnordered.link(this); - Jump result = jump(); - isEqual.link(this); - return result; - } + Jump branchFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right) + { + ASSERT(isSSE2Present()); - ASSERT(!(cond & DoubleConditionBitSpecial)); - return Jump(m_assembler.jCC(static_cast<X86Assembler::Condition>(cond & ~DoubleConditionBits))); + if (cond & DoubleConditionBitInvert) + m_assembler.ucomiss_rr(left, right); + else + m_assembler.ucomiss_rr(right, left); + return jumpAfterFloatingPointCompare(cond, left, right); } // Truncates 'src' to an integer, and places the resulting 'dest'. @@ -860,13 +1425,6 @@ public: return branch32(branchType ? NotEqual : Equal, dest, TrustedImm32(0x80000000)); } - Jump branchTruncateDoubleToUint32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed) - { - ASSERT(isSSE2Present()); - m_assembler.cvttsd2si_rr(src, dest); - return branch32(branchType ? GreaterThanOrEqual : LessThan, dest, TrustedImm32(0)); - } - void truncateDoubleToInt32(FPRegisterID src, RegisterID dest) { ASSERT(isSSE2Present()); @@ -891,8 +1449,17 @@ public: m_assembler.cvttsd2si_rr(src, dest); // If the result is zero, it might have been -0.0, and the double comparison won't catch this! +#if CPU(X86_64) + if (negZeroCheck) { + Jump valueIsNonZero = branchTest32(NonZero, dest); + m_assembler.movmskpd_rr(src, scratchRegister()); + failureCases.append(branchTest32(NonZero, scratchRegister(), TrustedImm32(1))); + valueIsNonZero.link(this); + } +#else if (negZeroCheck) failureCases.append(branchTest32(Zero, dest)); +#endif // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump. convertInt32ToDouble(dest, fpTemp); @@ -901,6 +1468,11 @@ public: failureCases.append(m_assembler.jne()); } + void moveZeroToDouble(FPRegisterID reg) + { + m_assembler.xorps_rr(reg, reg); + } + Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch) { ASSERT(isSSE2Present()); @@ -933,13 +1505,13 @@ public: m_assembler.por_rr(src, dst); } - void moveInt32ToPacked(RegisterID src, XMMRegisterID dst) + void move32ToFloat(RegisterID src, XMMRegisterID dst) { ASSERT(isSSE2Present()); m_assembler.movd_rr(src, dst); } - void movePackedToInt32(XMMRegisterID src, RegisterID dst) + void moveFloatTo32(XMMRegisterID src, RegisterID dst) { ASSERT(isSSE2Present()); m_assembler.movd_rr(src, dst); @@ -999,20 +1571,104 @@ public: void move(TrustedImmPtr imm, RegisterID dest) { - m_assembler.movq_i64r(imm.asIntptr(), dest); + if (!imm.m_value) + m_assembler.xorq_rr(dest, dest); + else + m_assembler.movq_i64r(imm.asIntptr(), dest); } void move(TrustedImm64 imm, RegisterID dest) { - m_assembler.movq_i64r(imm.m_value, dest); + if (!imm.m_value) + m_assembler.xorq_rr(dest, dest); + else + m_assembler.movq_i64r(imm.m_value, dest); } + void moveConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID src, RegisterID dest) + { + ASSERT(isSSE2Present()); + + if (cond & DoubleConditionBitInvert) + m_assembler.ucomisd_rr(left, right); + else + m_assembler.ucomisd_rr(right, left); + moveConditionallyAfterFloatingPointCompare(cond, left, right, src, dest); + } + + void moveConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest) + { + ASSERT(isSSE2Present()); + + if (thenCase != dest && elseCase != dest) { + move(elseCase, dest); + elseCase = dest; + } + + RegisterID src; + if (elseCase == dest) + src = thenCase; + else { + cond = invert(cond); + src = elseCase; + } + + if (cond & DoubleConditionBitInvert) + m_assembler.ucomisd_rr(left, right); + else + m_assembler.ucomisd_rr(right, left); + moveConditionallyAfterFloatingPointCompare(cond, left, right, src, dest); + } + + void moveConditionallyFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID src, RegisterID dest) + { + ASSERT(isSSE2Present()); + + if (cond & DoubleConditionBitInvert) + m_assembler.ucomiss_rr(left, right); + else + m_assembler.ucomiss_rr(right, left); + moveConditionallyAfterFloatingPointCompare(cond, left, right, src, dest); + } + + void moveConditionallyFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest) + { + ASSERT(isSSE2Present()); + + if (thenCase != dest && elseCase != dest) { + move(elseCase, dest); + elseCase = dest; + } + + RegisterID src; + if (elseCase == dest) + src = thenCase; + else { + cond = invert(cond); + src = elseCase; + } + + if (cond & DoubleConditionBitInvert) + m_assembler.ucomiss_rr(left, right); + else + m_assembler.ucomiss_rr(right, left); + moveConditionallyAfterFloatingPointCompare(cond, left, right, src, dest); + } + void swap(RegisterID reg1, RegisterID reg2) { if (reg1 != reg2) m_assembler.xchgq_rr(reg1, reg2); } + void signExtend32ToPtr(TrustedImm32 imm, RegisterID dest) + { + if (!imm.m_value) + m_assembler.xorq_rr(dest, dest); + else + m_assembler.mov_i32r(imm.m_value, dest); + } + void signExtend32ToPtr(RegisterID src, RegisterID dest) { m_assembler.movsxd_rr(src, dest); @@ -1022,6 +1678,11 @@ public: { m_assembler.movl_rr(src, dest); } + + void zeroExtend32ToPtr(TrustedImm32 src, RegisterID dest) + { + m_assembler.movl_i32r(src.m_value, dest); + } #else void move(RegisterID src, RegisterID dest) { @@ -1031,7 +1692,46 @@ public: void move(TrustedImmPtr imm, RegisterID dest) { - m_assembler.movl_i32r(imm.asIntptr(), dest); + if (!imm.m_value) + m_assembler.xorl_rr(dest, dest); + else + m_assembler.movl_i32r(imm.asIntptr(), dest); + } + + void moveConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID src, RegisterID dest) + { + ASSERT(isSSE2Present()); + + if (cond & DoubleConditionBitInvert) + m_assembler.ucomisd_rr(left, right); + else + m_assembler.ucomisd_rr(right, left); + + if (cond == DoubleEqual) { + if (left == right) { + m_assembler.cmovnpl_rr(src, dest); + return; + } + + Jump isUnordered(m_assembler.jp()); + m_assembler.cmovel_rr(src, dest); + isUnordered.link(this); + return; + } + + if (cond == DoubleNotEqualOrUnordered) { + if (left == right) { + m_assembler.cmovpl_rr(src, dest); + return; + } + + m_assembler.cmovpl_rr(src, dest); + m_assembler.cmovnel_rr(src, dest); + return; + } + + ASSERT(!(cond & DoubleConditionBitSpecial)); + m_assembler.cmovl_rr(static_cast<X86Assembler::Condition>(cond & ~DoubleConditionBits), src, dest); } void swap(RegisterID reg1, RegisterID reg2) @@ -1051,6 +1751,190 @@ public: } #endif + void swap32(RegisterID src, RegisterID dest) + { + m_assembler.xchgl_rr(src, dest); + } + + void swap32(RegisterID src, Address dest) + { + m_assembler.xchgl_rm(src, dest.offset, dest.base); + } + + void moveConditionally32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID src, RegisterID dest) + { + m_assembler.cmpl_rr(right, left); + cmov(x86Condition(cond), src, dest); + } + + void moveConditionally32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest) + { + m_assembler.cmpl_rr(right, left); + + if (thenCase != dest && elseCase != dest) { + move(elseCase, dest); + elseCase = dest; + } + + if (elseCase == dest) + cmov(x86Condition(cond), thenCase, dest); + else + cmov(x86Condition(invert(cond)), elseCase, dest); + } + + void moveConditionally32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID thenCase, RegisterID elseCase, RegisterID dest) + { + if (!right.m_value) { + if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) { + moveConditionallyTest32(*resultCondition, left, left, thenCase, elseCase, dest); + return; + } + } + + m_assembler.cmpl_ir(right.m_value, left); + + if (thenCase != dest && elseCase != dest) { + move(elseCase, dest); + elseCase = dest; + } + + if (elseCase == dest) + cmov(x86Condition(cond), thenCase, dest); + else + cmov(x86Condition(invert(cond)), elseCase, dest); + } + + void moveConditionallyTest32(ResultCondition cond, RegisterID testReg, RegisterID mask, RegisterID src, RegisterID dest) + { + m_assembler.testl_rr(testReg, mask); + cmov(x86Condition(cond), src, dest); + } + + void moveConditionallyTest32(ResultCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest) + { + ASSERT(isInvertible(cond)); + ASSERT_WITH_MESSAGE(cond != Overflow, "TEST does not set the Overflow Flag."); + + m_assembler.testl_rr(right, left); + + if (thenCase != dest && elseCase != dest) { + move(elseCase, dest); + elseCase = dest; + } + + if (elseCase == dest) + cmov(x86Condition(cond), thenCase, dest); + else + cmov(x86Condition(invert(cond)), elseCase, dest); + } + + void moveConditionallyTest32(ResultCondition cond, RegisterID testReg, TrustedImm32 mask, RegisterID src, RegisterID dest) + { + test32(testReg, mask); + cmov(x86Condition(cond), src, dest); + } + + void moveConditionallyTest32(ResultCondition cond, RegisterID testReg, TrustedImm32 mask, RegisterID thenCase, RegisterID elseCase, RegisterID dest) + { + ASSERT(isInvertible(cond)); + ASSERT_WITH_MESSAGE(cond != Overflow, "TEST does not set the Overflow Flag."); + + test32(testReg, mask); + + if (thenCase != dest && elseCase != dest) { + move(elseCase, dest); + elseCase = dest; + } + + if (elseCase == dest) + cmov(x86Condition(cond), thenCase, dest); + else + cmov(x86Condition(invert(cond)), elseCase, dest); + } + + template<typename LeftType, typename RightType> + void moveDoubleConditionally32(RelationalCondition cond, LeftType left, RightType right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest) + { + static_assert(!std::is_same<LeftType, FPRegisterID>::value && !std::is_same<RightType, FPRegisterID>::value, "One of the tested argument could be aliased on dest. Use moveDoubleConditionallyDouble()."); + + if (thenCase != dest && elseCase != dest) { + moveDouble(elseCase, dest); + elseCase = dest; + } + + if (elseCase == dest) { + Jump falseCase = branch32(invert(cond), left, right); + moveDouble(thenCase, dest); + falseCase.link(this); + } else { + Jump trueCase = branch32(cond, left, right); + moveDouble(elseCase, dest); + trueCase.link(this); + } + } + + template<typename TestType, typename MaskType> + void moveDoubleConditionallyTest32(ResultCondition cond, TestType test, MaskType mask, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest) + { + static_assert(!std::is_same<TestType, FPRegisterID>::value && !std::is_same<MaskType, FPRegisterID>::value, "One of the tested argument could be aliased on dest. Use moveDoubleConditionallyDouble()."); + + if (elseCase == dest && isInvertible(cond)) { + Jump falseCase = branchTest32(invert(cond), test, mask); + moveDouble(thenCase, dest); + falseCase.link(this); + } else if (thenCase == dest) { + Jump trueCase = branchTest32(cond, test, mask); + moveDouble(elseCase, dest); + trueCase.link(this); + } + + Jump trueCase = branchTest32(cond, test, mask); + moveDouble(elseCase, dest); + Jump falseCase = jump(); + trueCase.link(this); + moveDouble(thenCase, dest); + falseCase.link(this); + } + + void moveDoubleConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest) + { + if (elseCase == dest) { + Jump falseCase = branchDouble(invert(cond), left, right); + moveDouble(thenCase, dest); + falseCase.link(this); + } else if (thenCase == dest) { + Jump trueCase = branchDouble(cond, left, right); + moveDouble(elseCase, dest); + trueCase.link(this); + } else { + Jump trueCase = branchDouble(cond, left, right); + moveDouble(elseCase, dest); + Jump falseCase = jump(); + trueCase.link(this); + moveDouble(thenCase, dest); + falseCase.link(this); + } + } + + void moveDoubleConditionallyFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest) + { + if (elseCase == dest) { + Jump falseCase = branchFloat(invert(cond), left, right); + moveDouble(thenCase, dest); + falseCase.link(this); + } else if (thenCase == dest) { + Jump trueCase = branchFloat(cond, left, right); + moveDouble(elseCase, dest); + trueCase.link(this); + } else { + Jump trueCase = branchFloat(cond, left, right); + moveDouble(elseCase, dest); + Jump falseCase = jump(); + trueCase.link(this); + moveDouble(thenCase, dest); + falseCase.link(this); + } + } // Forwards / external control flow operations: // @@ -1085,10 +1969,12 @@ public: Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right) { - if (((cond == Equal) || (cond == NotEqual)) && !right.m_value) - m_assembler.testl_rr(left, left); - else - m_assembler.cmpl_ir(right.m_value, left); + if (!right.m_value) { + if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) + return branchTest32(*resultCondition, left, left); + } + + m_assembler.cmpl_ir(right.m_value, left); return Jump(m_assembler.jCC(x86Condition(cond))); } @@ -1127,22 +2013,33 @@ public: return Jump(m_assembler.jCC(x86Condition(cond))); } - Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1)) + void test32(RegisterID reg, TrustedImm32 mask = TrustedImm32(-1)) { - // if we are only interested in the low seven bits, this can be tested with a testb if (mask.m_value == -1) m_assembler.testl_rr(reg, reg); - else + else if (!(mask.m_value & ~0xff) && reg < X86Registers::esp) { // Using esp and greater as a byte register yields the upper half of the 16 bit registers ax, cx, dx and bx, e.g. esp, register 4, is actually ah. + if (mask.m_value == 0xff) + m_assembler.testb_rr(reg, reg); + else + m_assembler.testb_i8r(mask.m_value, reg); + } else m_assembler.testl_i32r(mask.m_value, reg); + } + + Jump branch(ResultCondition cond) + { return Jump(m_assembler.jCC(x86Condition(cond))); } + Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1)) + { + test32(reg, mask); + return branch(cond); + } + Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1)) { - if (mask.m_value == -1) - m_assembler.cmpl_im(0, address.offset, address.base); - else - m_assembler.testl_i32m(mask.m_value, address.offset, address.base); + generateTest32(address, mask); return Jump(m_assembler.jCC(x86Condition(cond))); } @@ -1179,7 +2076,7 @@ public: Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right) { - ASSERT(!(right.m_value & 0xFFFFFF00)); + ASSERT(std::numeric_limits<int8_t>::min() <= right.m_value && right.m_value <= std::numeric_limits<int8_t>::max()); m_assembler.cmpb_im(right.m_value, left.offset, left.base, left.index, left.scale); return Jump(m_assembler.jCC(x86Condition(cond))); @@ -1246,13 +2143,25 @@ public: { if (src1 == dest) return branchAdd32(cond, src2, dest); - move(src2, dest); + move32IfNeeded(src2, dest); + return branchAdd32(cond, src1, dest); + } + + Jump branchAdd32(ResultCondition cond, Address src1, RegisterID src2, RegisterID dest) + { + move32IfNeeded(src2, dest); return branchAdd32(cond, src1, dest); } + Jump branchAdd32(ResultCondition cond, RegisterID src1, Address src2, RegisterID dest) + { + move32IfNeeded(src1, dest); + return branchAdd32(cond, src2, dest); + } + Jump branchAdd32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest) { - move(src, dest); + move32IfNeeded(src, dest); return branchAdd32(cond, imm, dest); } @@ -1272,7 +2181,7 @@ public: return Jump(m_assembler.jCC(x86Condition(cond))); } - Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest) + Jump branchMul32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest) { mul32(imm, src, dest); if (cond != Overflow) @@ -1284,7 +2193,7 @@ public: { if (src1 == dest) return branchMul32(cond, src2, dest); - move(src2, dest); + move32IfNeeded(src2, dest); return branchMul32(cond, src1, dest); } @@ -1323,13 +2232,13 @@ public: // B := A - B is invalid. ASSERT(src1 == dest || src2 != dest); - move(src1, dest); + move32IfNeeded(src1, dest); return branchSub32(cond, src2, dest); } Jump branchSub32(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest) { - move(src1, dest); + move32IfNeeded(src1, dest); return branchSub32(cond, src2, dest); } @@ -1353,6 +2262,11 @@ public: m_assembler.int3(); } + Call nearTailCall() + { + return Call(m_assembler.jmp(), Call::LinkableNearTail); + } + Call nearCall() { return Call(m_assembler.call(), Call::LinkableNear); @@ -1387,10 +2301,14 @@ public: void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest) { - if (((cond == Equal) || (cond == NotEqual)) && !right.m_value) - m_assembler.testl_rr(left, left); - else - m_assembler.cmpl_ir(right.m_value, left); + if (!right.m_value) { + if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) { + test32(*resultCondition, left, left, dest); + return; + } + } + + m_assembler.cmpl_ir(right.m_value, left); set32(x86Condition(cond), dest); } @@ -1410,23 +2328,121 @@ public: void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest) { - if (mask.m_value == -1) - m_assembler.cmpl_im(0, address.offset, address.base); - else - m_assembler.testl_i32m(mask.m_value, address.offset, address.base); + generateTest32(address, mask); set32(x86Condition(cond), dest); } + void test32(ResultCondition cond, RegisterID reg, RegisterID mask, RegisterID dest) + { + m_assembler.testl_rr(reg, mask); + set32(x86Condition(cond), dest); + } + + void test32(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest) + { + test32(reg, mask); + set32(x86Condition(cond), dest); + } + + void setCarry(RegisterID dest) + { + set32(X86Assembler::ConditionC, dest); + } + // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc. static RelationalCondition invert(RelationalCondition cond) { return static_cast<RelationalCondition>(cond ^ 1); } + static DoubleCondition invert(DoubleCondition cond) + { + switch (cond) { + case DoubleEqual: + return DoubleNotEqualOrUnordered; + case DoubleNotEqual: + return DoubleEqualOrUnordered; + case DoubleGreaterThan: + return DoubleLessThanOrEqualOrUnordered; + case DoubleGreaterThanOrEqual: + return DoubleLessThanOrUnordered; + case DoubleLessThan: + return DoubleGreaterThanOrEqualOrUnordered; + case DoubleLessThanOrEqual: + return DoubleGreaterThanOrUnordered; + case DoubleEqualOrUnordered: + return DoubleNotEqual; + case DoubleNotEqualOrUnordered: + return DoubleEqual; + case DoubleGreaterThanOrUnordered: + return DoubleLessThanOrEqual; + case DoubleGreaterThanOrEqualOrUnordered: + return DoubleLessThan; + case DoubleLessThanOrUnordered: + return DoubleGreaterThanOrEqual; + case DoubleLessThanOrEqualOrUnordered: + return DoubleGreaterThan; + } + RELEASE_ASSERT_NOT_REACHED(); + return DoubleEqual; // make compiler happy + } + + static bool isInvertible(ResultCondition cond) + { + switch (cond) { + case Zero: + case NonZero: + case Signed: + case PositiveOrZero: + return true; + default: + return false; + } + } + + static ResultCondition invert(ResultCondition cond) + { + switch (cond) { + case Zero: + return NonZero; + case NonZero: + return Zero; + case Signed: + return PositiveOrZero; + case PositiveOrZero: + return Signed; + default: + RELEASE_ASSERT_NOT_REACHED(); + return Zero; // Make compiler happy for release builds. + } + } + + static Optional<ResultCondition> commuteCompareToZeroIntoTest(RelationalCondition cond) + { + switch (cond) { + case Equal: + return Zero; + case NotEqual: + return NonZero; + case LessThan: + return Signed; + case GreaterThanOrEqual: + return PositiveOrZero; + break; + default: + return Nullopt; + } + } + void nop() { m_assembler.nop(); } + + void memoryFence() + { + m_assembler.mfence(); + } static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination) { @@ -1438,6 +2454,46 @@ public: return X86Assembler::maxJumpReplacementSize(); } + static bool supportsFloatingPointRounding() + { + if (s_sse4_1CheckState == CPUIDCheckState::NotChecked) { + int flags = 0; +#if COMPILER(MSVC) + int cpuInfo[4]; + __cpuid(cpuInfo, 0x1); + flags = cpuInfo[2]; +#elif COMPILER(GCC_OR_CLANG) +#if CPU(X86_64) + asm ( + "movl $0x1, %%eax;" + "cpuid;" + "movl %%ecx, %0;" + : "=g" (flags) + : + : "%eax", "%ebx", "%ecx", "%edx" + ); +#else + asm ( + "movl $0x1, %%eax;" + "pushl %%ebx;" + "cpuid;" + "popl %%ebx;" + "movl %%ecx, %0;" + : "=g" (flags) + : + : "%eax", "%ecx", "%edx" + ); +#endif +#endif // COMPILER(GCC_OR_CLANG) + s_sse4_1CheckState = (flags & (1 << 19)) ? CPUIDCheckState::Set : CPUIDCheckState::Clear; + } + return s_sse4_1CheckState == CPUIDCheckState::Set; + } + +#if ENABLE(MASM_PROBE) + void probe(ProbeFunction, void* arg1, void* arg2); +#endif // ENABLE(MASM_PROBE) + protected: X86Assembler::Condition x86Condition(RelationalCondition cond) { @@ -1466,11 +2522,150 @@ protected: m_assembler.movzbl_rr(dest, dest); } + void cmov(X86Assembler::Condition cond, RegisterID src, RegisterID dest) + { +#if CPU(X86_64) + m_assembler.cmovq_rr(cond, src, dest); +#else + m_assembler.cmovl_rr(cond, src, dest); +#endif + } + + static bool supportsLZCNT() + { + if (s_lzcntCheckState == CPUIDCheckState::NotChecked) { + int flags = 0; +#if COMPILER(MSVC) + int cpuInfo[4]; + __cpuid(cpuInfo, 0x80000001); + flags = cpuInfo[2]; +#elif COMPILER(GCC_OR_CLANG) +#if CPU(X86_64) + asm ( + "movl $0x80000001, %%eax;" + "cpuid;" + "movl %%ecx, %0;" + : "=g" (flags) + : + : "%eax", "%ebx", "%ecx", "%edx" + ); +#else + asm ( + "movl $0x80000001, %%eax;" + "pushl %%ebx;" + "cpuid;" + "popl %%ebx;" + "movl %%ecx, %0;" + : "=g" (flags) + : + : "%eax", "%ecx", "%edx" + ); +#endif +#endif // COMPILER(GCC_OR_CLANG) + s_lzcntCheckState = (flags & 0x20) ? CPUIDCheckState::Set : CPUIDCheckState::Clear; + } + return s_lzcntCheckState == CPUIDCheckState::Set; + } + private: // Only MacroAssemblerX86 should be using the following method; SSE2 is always available on // x86_64, and clients & subclasses of MacroAssembler should be using 'supportsFloatingPoint()'. friend class MacroAssemblerX86; + ALWAYS_INLINE void generateTest32(Address address, TrustedImm32 mask = TrustedImm32(-1)) + { + if (mask.m_value == -1) + m_assembler.cmpl_im(0, address.offset, address.base); + else if (!(mask.m_value & ~0xff)) + m_assembler.testb_im(mask.m_value, address.offset, address.base); + else if (!(mask.m_value & ~0xff00)) + m_assembler.testb_im(mask.m_value >> 8, address.offset + 1, address.base); + else if (!(mask.m_value & ~0xff0000)) + m_assembler.testb_im(mask.m_value >> 16, address.offset + 2, address.base); + else if (!(mask.m_value & ~0xff000000)) + m_assembler.testb_im(mask.m_value >> 24, address.offset + 3, address.base); + else + m_assembler.testl_i32m(mask.m_value, address.offset, address.base); + } + + // If lzcnt is not available, use this after BSR + // to count the leading zeros. + void clz32AfterBsr(RegisterID dst) + { + Jump srcIsNonZero = m_assembler.jCC(x86Condition(NonZero)); + move(TrustedImm32(32), dst); + + Jump skipNonZeroCase = jump(); + srcIsNonZero.link(this); + xor32(TrustedImm32(0x1f), dst); + skipNonZeroCase.link(this); + } + + Jump jumpAfterFloatingPointCompare(DoubleCondition cond, FPRegisterID left, FPRegisterID right) + { + if (cond == DoubleEqual) { + if (left == right) + return Jump(m_assembler.jnp()); + Jump isUnordered(m_assembler.jp()); + Jump result = Jump(m_assembler.je()); + isUnordered.link(this); + return result; + } + if (cond == DoubleNotEqualOrUnordered) { + if (left == right) + return Jump(m_assembler.jp()); + Jump isUnordered(m_assembler.jp()); + Jump isEqual(m_assembler.je()); + isUnordered.link(this); + Jump result = jump(); + isEqual.link(this); + return result; + } + + ASSERT(!(cond & DoubleConditionBitSpecial)); + return Jump(m_assembler.jCC(static_cast<X86Assembler::Condition>(cond & ~DoubleConditionBits))); + } + + // The 32bit Move does not need the REX byte for low registers, making it shorter. + // Use this if the top bits are irrelevant because they will be reset by the next instruction. + void move32IfNeeded(RegisterID src, RegisterID dest) + { + if (src == dest) + return; + m_assembler.movl_rr(src, dest); + } + +#if CPU(X86_64) + void moveConditionallyAfterFloatingPointCompare(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID src, RegisterID dest) + { + if (cond == DoubleEqual) { + if (left == right) { + m_assembler.cmovnpq_rr(src, dest); + return; + } + + Jump isUnordered(m_assembler.jp()); + m_assembler.cmoveq_rr(src, dest); + isUnordered.link(this); + return; + } + + if (cond == DoubleNotEqualOrUnordered) { + if (left == right) { + m_assembler.cmovpq_rr(src, dest); + return; + } + + m_assembler.cmovpq_rr(src, dest); + m_assembler.cmovneq_rr(src, dest); + return; + } + + ASSERT(!(cond & DoubleConditionBitSpecial)); + cmov(static_cast<X86Assembler::Condition>(cond & ~DoubleConditionBits), src, dest); + } +#endif + #if CPU(X86) #if OS(MAC_OS_X) @@ -1500,7 +2695,7 @@ private: cpuid; mov flags, edx; } -#elif COMPILER(GCC) +#elif COMPILER(GCC_OR_CLANG) asm ( "movl $0x1, %%eax;" "pushl %%ebx;" @@ -1534,6 +2729,14 @@ private: } #endif + + enum class CPUIDCheckState { + NotChecked, + Clear, + Set + }; + static CPUIDCheckState s_sse4_1CheckState; + static CPUIDCheckState s_lzcntCheckState; }; } // namespace JSC diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerX86_64.h b/Source/JavaScriptCore/assembler/MacroAssemblerX86_64.h index c711e6f8d..7783d940a 100644 --- a/Source/JavaScriptCore/assembler/MacroAssemblerX86_64.h +++ b/Source/JavaScriptCore/assembler/MacroAssemblerX86_64.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008, 2012 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2012, 2014-2016 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -30,7 +30,9 @@ #include "MacroAssemblerX86Common.h" -#define REPTACH_OFFSET_CALL_R11 3 +#define REPATCH_OFFSET_CALL_R11 3 + +inline bool CAN_SIGN_EXTEND_32_64(int64_t value) { return value == (int64_t)(int32_t)value; } namespace JSC { @@ -43,6 +45,7 @@ public: using MacroAssemblerX86Common::branchAdd32; using MacroAssemblerX86Common::or32; using MacroAssemblerX86Common::sub32; + using MacroAssemblerX86Common::load8; using MacroAssemblerX86Common::load32; using MacroAssemblerX86Common::store32; using MacroAssemblerX86Common::store8; @@ -54,38 +57,44 @@ public: void add32(TrustedImm32 imm, AbsoluteAddress address) { - move(TrustedImmPtr(address.m_ptr), scratchRegister); - add32(imm, Address(scratchRegister)); + move(TrustedImmPtr(address.m_ptr), scratchRegister()); + add32(imm, Address(scratchRegister())); } void and32(TrustedImm32 imm, AbsoluteAddress address) { - move(TrustedImmPtr(address.m_ptr), scratchRegister); - and32(imm, Address(scratchRegister)); + move(TrustedImmPtr(address.m_ptr), scratchRegister()); + and32(imm, Address(scratchRegister())); } void add32(AbsoluteAddress address, RegisterID dest) { - move(TrustedImmPtr(address.m_ptr), scratchRegister); - add32(Address(scratchRegister), dest); + move(TrustedImmPtr(address.m_ptr), scratchRegister()); + add32(Address(scratchRegister()), dest); } void or32(TrustedImm32 imm, AbsoluteAddress address) { - move(TrustedImmPtr(address.m_ptr), scratchRegister); - or32(imm, Address(scratchRegister)); + move(TrustedImmPtr(address.m_ptr), scratchRegister()); + or32(imm, Address(scratchRegister())); } void or32(RegisterID reg, AbsoluteAddress address) { - move(TrustedImmPtr(address.m_ptr), scratchRegister); - or32(reg, Address(scratchRegister)); + move(TrustedImmPtr(address.m_ptr), scratchRegister()); + or32(reg, Address(scratchRegister())); } void sub32(TrustedImm32 imm, AbsoluteAddress address) { - move(TrustedImmPtr(address.m_ptr), scratchRegister); - sub32(imm, Address(scratchRegister)); + move(TrustedImmPtr(address.m_ptr), scratchRegister()); + sub32(imm, Address(scratchRegister())); + } + + void load8(const void* address, RegisterID dest) + { + move(TrustedImmPtr(address), dest); + load8(dest, dest); } void load32(const void* address, RegisterID dest) @@ -100,64 +109,144 @@ public: void addDouble(AbsoluteAddress address, FPRegisterID dest) { - move(TrustedImmPtr(address.m_ptr), scratchRegister); - m_assembler.addsd_mr(0, scratchRegister, dest); + move(TrustedImmPtr(address.m_ptr), scratchRegister()); + m_assembler.addsd_mr(0, scratchRegister(), dest); } void convertInt32ToDouble(TrustedImm32 imm, FPRegisterID dest) { - move(imm, scratchRegister); - m_assembler.cvtsi2sd_rr(scratchRegister, dest); + move(imm, scratchRegister()); + m_assembler.cvtsi2sd_rr(scratchRegister(), dest); } void store32(TrustedImm32 imm, void* address) { - move(TrustedImmPtr(address), scratchRegister); - store32(imm, scratchRegister); + move(TrustedImmPtr(address), scratchRegister()); + store32(imm, scratchRegister()); + } + + void store32(RegisterID source, void* address) + { + if (source == X86Registers::eax) + m_assembler.movl_EAXm(address); + else { + move(TrustedImmPtr(address), scratchRegister()); + store32(source, scratchRegister()); + } } void store8(TrustedImm32 imm, void* address) { - move(TrustedImmPtr(address), scratchRegister); - store8(imm, Address(scratchRegister)); + move(TrustedImmPtr(address), scratchRegister()); + store8(imm, Address(scratchRegister())); + } + + void store8(RegisterID reg, void* address) + { + move(TrustedImmPtr(address), scratchRegister()); + store8(reg, Address(scratchRegister())); + } + +#if OS(WINDOWS) + Call callWithSlowPathReturnType() + { + // On Win64, when the return type is larger than 8 bytes, we need to allocate space on the stack for the return value. + // On entry, rcx should contain a pointer to this stack space. The other parameters are shifted to the right, + // rdx should contain the first argument, r8 should contain the second argument, and r9 should contain the third argument. + // On return, rax contains a pointer to this stack value. See http://msdn.microsoft.com/en-us/library/7572ztz4.aspx. + // We then need to copy the 16 byte return value into rax and rdx, since JIT expects the return value to be split between the two. + // It is assumed that the parameters are already shifted to the right, when entering this method. + // Note: this implementation supports up to 3 parameters. + + // JIT relies on the CallerFrame (frame pointer) being put on the stack, + // On Win64 we need to manually copy the frame pointer to the stack, since MSVC may not maintain a frame pointer on 64-bit. + // See http://msdn.microsoft.com/en-us/library/9z1stfyw.aspx where it's stated that rbp MAY be used as a frame pointer. + store64(X86Registers::ebp, Address(X86Registers::esp, -16)); + + // We also need to allocate the shadow space on the stack for the 4 parameter registers. + // In addition, we need to allocate 16 bytes for the return value. + // Also, we should allocate 16 bytes for the frame pointer, and return address (not populated). + sub64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp); + + // The first parameter register should contain a pointer to the stack allocated space for the return value. + move(X86Registers::esp, X86Registers::ecx); + add64(TrustedImm32(4 * sizeof(int64_t)), X86Registers::ecx); + + DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister()); + Call result = Call(m_assembler.call(scratchRegister()), Call::Linkable); + + add64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp); + + // Copy the return value into rax and rdx. + load64(Address(X86Registers::eax, sizeof(int64_t)), X86Registers::edx); + load64(Address(X86Registers::eax), X86Registers::eax); + + ASSERT_UNUSED(label, differenceBetween(label, result) == REPATCH_OFFSET_CALL_R11); + return result; } +#endif Call call() { - DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister); - Call result = Call(m_assembler.call(scratchRegister), Call::Linkable); - ASSERT_UNUSED(label, differenceBetween(label, result) == REPTACH_OFFSET_CALL_R11); +#if OS(WINDOWS) + // JIT relies on the CallerFrame (frame pointer) being put on the stack, + // On Win64 we need to manually copy the frame pointer to the stack, since MSVC may not maintain a frame pointer on 64-bit. + // See http://msdn.microsoft.com/en-us/library/9z1stfyw.aspx where it's stated that rbp MAY be used as a frame pointer. + store64(X86Registers::ebp, Address(X86Registers::esp, -16)); + + // On Windows we need to copy the arguments that don't fit in registers to the stack location where the callee expects to find them. + // We don't know the number of arguments at this point, so the arguments (5, 6, ...) should always be copied. + + // Copy argument 5 + load64(Address(X86Registers::esp, 4 * sizeof(int64_t)), scratchRegister()); + store64(scratchRegister(), Address(X86Registers::esp, -4 * static_cast<int32_t>(sizeof(int64_t)))); + + // Copy argument 6 + load64(Address(X86Registers::esp, 5 * sizeof(int64_t)), scratchRegister()); + store64(scratchRegister(), Address(X86Registers::esp, -3 * static_cast<int32_t>(sizeof(int64_t)))); + + // We also need to allocate the shadow space on the stack for the 4 parameter registers. + // Also, we should allocate 16 bytes for the frame pointer, and return address (not populated). + // In addition, we need to allocate 16 bytes for two more parameters, since the call can have up to 6 parameters. + sub64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp); +#endif + DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister()); + Call result = Call(m_assembler.call(scratchRegister()), Call::Linkable); +#if OS(WINDOWS) + add64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp); +#endif + ASSERT_UNUSED(label, differenceBetween(label, result) == REPATCH_OFFSET_CALL_R11); return result; } // Address is a memory location containing the address to jump to void jump(AbsoluteAddress address) { - move(TrustedImmPtr(address.m_ptr), scratchRegister); - jump(Address(scratchRegister)); + move(TrustedImmPtr(address.m_ptr), scratchRegister()); + jump(Address(scratchRegister())); } Call tailRecursiveCall() { - DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister); - Jump newJump = Jump(m_assembler.jmp_r(scratchRegister)); - ASSERT_UNUSED(label, differenceBetween(label, newJump) == REPTACH_OFFSET_CALL_R11); + DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister()); + Jump newJump = Jump(m_assembler.jmp_r(scratchRegister())); + ASSERT_UNUSED(label, differenceBetween(label, newJump) == REPATCH_OFFSET_CALL_R11); return Call::fromTailJump(newJump); } Call makeTailRecursiveCall(Jump oldJump) { oldJump.link(this); - DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister); - Jump newJump = Jump(m_assembler.jmp_r(scratchRegister)); - ASSERT_UNUSED(label, differenceBetween(label, newJump) == REPTACH_OFFSET_CALL_R11); + DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister()); + Jump newJump = Jump(m_assembler.jmp_r(scratchRegister())); + ASSERT_UNUSED(label, differenceBetween(label, newJump) == REPATCH_OFFSET_CALL_R11); return Call::fromTailJump(newJump); } Jump branchAdd32(ResultCondition cond, TrustedImm32 src, AbsoluteAddress dest) { - move(TrustedImmPtr(dest.m_ptr), scratchRegister); - add32(src, Address(scratchRegister)); + move(TrustedImmPtr(dest.m_ptr), scratchRegister()); + add32(src, Address(scratchRegister())); return Jump(m_assembler.jCC(x86Condition(cond))); } @@ -171,21 +260,33 @@ public: m_assembler.addq_mr(src.offset, src.base, dest); } + void add64(RegisterID src, Address dest) + { + m_assembler.addq_rm(src, dest.offset, dest.base); + } + void add64(AbsoluteAddress src, RegisterID dest) { - move(TrustedImmPtr(src.m_ptr), scratchRegister); - add64(Address(scratchRegister), dest); + move(TrustedImmPtr(src.m_ptr), scratchRegister()); + add64(Address(scratchRegister()), dest); } void add64(TrustedImm32 imm, RegisterID srcDest) { - m_assembler.addq_ir(imm.m_value, srcDest); + if (imm.m_value == 1) + m_assembler.incq_r(srcDest); + else + m_assembler.addq_ir(imm.m_value, srcDest); } void add64(TrustedImm64 imm, RegisterID dest) { - move(imm, scratchRegister); - add64(scratchRegister, dest); + if (imm.m_value == 1) + m_assembler.incq_r(dest); + else { + move(imm, scratchRegister()); + add64(scratchRegister(), dest); + } } void add64(TrustedImm32 imm, RegisterID src, RegisterID dest) @@ -195,13 +296,41 @@ public: void add64(TrustedImm32 imm, Address address) { - m_assembler.addq_im(imm.m_value, address.offset, address.base); + if (imm.m_value == 1) + m_assembler.incq_m(address.offset, address.base); + else + m_assembler.addq_im(imm.m_value, address.offset, address.base); } void add64(TrustedImm32 imm, AbsoluteAddress address) { - move(TrustedImmPtr(address.m_ptr), scratchRegister); - add64(imm, Address(scratchRegister)); + move(TrustedImmPtr(address.m_ptr), scratchRegister()); + add64(imm, Address(scratchRegister())); + } + + void add64(RegisterID a, RegisterID b, RegisterID dest) + { + x86Lea64(BaseIndex(a, b, TimesOne), dest); + } + + void x86Lea64(BaseIndex index, RegisterID dest) + { + if (!index.scale && !index.offset) { + if (index.base == dest) { + add64(index.index, dest); + return; + } + if (index.index == dest) { + add64(index.base, dest); + return; + } + } + m_assembler.leaq_mr(index.offset, index.base, index.index, index.scale, dest); + } + + void addPtrNoFlags(TrustedImm32 imm, RegisterID srcDest) + { + m_assembler.leaq_mr(imm.m_value, srcDest, srcDest); } void and64(RegisterID src, RegisterID dest) @@ -213,7 +342,147 @@ public: { m_assembler.andq_ir(imm.m_value, srcDest); } + + void and64(TrustedImmPtr imm, RegisterID srcDest) + { + intptr_t intValue = imm.asIntptr(); + if (intValue <= std::numeric_limits<int32_t>::max() + && intValue >= std::numeric_limits<int32_t>::min()) { + and64(TrustedImm32(static_cast<int32_t>(intValue)), srcDest); + return; + } + move(imm, scratchRegister()); + and64(scratchRegister(), srcDest); + } + + void and64(RegisterID op1, RegisterID op2, RegisterID dest) + { + if (op1 == op2 && op1 != dest && op2 != dest) + move(op1, dest); + else if (op1 == dest) + and64(op2, dest); + else { + move(op2, dest); + and64(op1, dest); + } + } + + void countLeadingZeros64(RegisterID src, RegisterID dst) + { + if (supportsLZCNT()) { + m_assembler.lzcntq_rr(src, dst); + return; + } + m_assembler.bsrq_rr(src, dst); + clz64AfterBsr(dst); + } + + void countLeadingZeros64(Address src, RegisterID dst) + { + if (supportsLZCNT()) { + m_assembler.lzcntq_mr(src.offset, src.base, dst); + return; + } + m_assembler.bsrq_mr(src.offset, src.base, dst); + clz64AfterBsr(dst); + } + + void lshift64(TrustedImm32 imm, RegisterID dest) + { + m_assembler.shlq_i8r(imm.m_value, dest); + } + void lshift64(RegisterID src, RegisterID dest) + { + if (src == X86Registers::ecx) + m_assembler.shlq_CLr(dest); + else { + ASSERT(src != dest); + + // Can only shift by ecx, so we do some swapping if we see anything else. + swap(src, X86Registers::ecx); + m_assembler.shlq_CLr(dest); + swap(src, X86Registers::ecx); + } + } + + void rshift64(TrustedImm32 imm, RegisterID dest) + { + m_assembler.sarq_i8r(imm.m_value, dest); + } + + void rshift64(RegisterID src, RegisterID dest) + { + if (src == X86Registers::ecx) + m_assembler.sarq_CLr(dest); + else { + ASSERT(src != dest); + + // Can only shift by ecx, so we do some swapping if we see anything else. + swap(src, X86Registers::ecx); + m_assembler.sarq_CLr(dest); + swap(src, X86Registers::ecx); + } + } + + void urshift64(TrustedImm32 imm, RegisterID dest) + { + m_assembler.shrq_i8r(imm.m_value, dest); + } + + void urshift64(RegisterID src, RegisterID dest) + { + if (src == X86Registers::ecx) + m_assembler.shrq_CLr(dest); + else { + ASSERT(src != dest); + + // Can only shift by ecx, so we do some swapping if we see anything else. + swap(src, X86Registers::ecx); + m_assembler.shrq_CLr(dest); + swap(src, X86Registers::ecx); + } + } + + void mul64(RegisterID src, RegisterID dest) + { + m_assembler.imulq_rr(src, dest); + } + + void mul64(RegisterID src1, RegisterID src2, RegisterID dest) + { + if (src2 == dest) { + m_assembler.imulq_rr(src1, dest); + return; + } + move(src1, dest); + m_assembler.imulq_rr(src2, dest); + } + + void x86ConvertToQuadWord64() + { + m_assembler.cqo(); + } + + void x86ConvertToQuadWord64(RegisterID rax, RegisterID rdx) + { + ASSERT_UNUSED(rax, rax == X86Registers::eax); + ASSERT_UNUSED(rdx, rdx == X86Registers::edx); + x86ConvertToQuadWord64(); + } + + void x86Div64(RegisterID denominator) + { + m_assembler.idivq_r(denominator); + } + + void x86Div64(RegisterID rax, RegisterID rdx, RegisterID denominator) + { + ASSERT_UNUSED(rax, rax == X86Registers::eax); + ASSERT_UNUSED(rdx, rdx == X86Registers::edx); + x86Div64(denominator); + } + void neg64(RegisterID dest) { m_assembler.negq_r(dest); @@ -224,10 +493,15 @@ public: m_assembler.orq_rr(src, dest); } - void or64(TrustedImm64 imm, RegisterID dest) + void or64(TrustedImm64 imm, RegisterID srcDest) { - move(imm, scratchRegister); - or64(scratchRegister, dest); + if (imm.m_value <= std::numeric_limits<int32_t>::max() + && imm.m_value >= std::numeric_limits<int32_t>::min()) { + or64(TrustedImm32(static_cast<int32_t>(imm.m_value)), srcDest); + return; + } + move(imm, scratchRegister()); + or64(scratchRegister(), srcDest); } void or64(TrustedImm32 imm, RegisterID dest) @@ -265,19 +539,53 @@ public: void sub64(TrustedImm32 imm, RegisterID dest) { - m_assembler.subq_ir(imm.m_value, dest); + if (imm.m_value == 1) + m_assembler.decq_r(dest); + else + m_assembler.subq_ir(imm.m_value, dest); } void sub64(TrustedImm64 imm, RegisterID dest) { - move(imm, scratchRegister); - sub64(scratchRegister, dest); + if (imm.m_value == 1) + m_assembler.decq_r(dest); + else { + move(imm, scratchRegister()); + sub64(scratchRegister(), dest); + } + } + + void sub64(TrustedImm32 imm, Address address) + { + m_assembler.subq_im(imm.m_value, address.offset, address.base); + } + + void sub64(Address src, RegisterID dest) + { + m_assembler.subq_mr(src.offset, src.base, dest); + } + + void sub64(RegisterID src, Address dest) + { + m_assembler.subq_rm(src, dest.offset, dest.base); } void xor64(RegisterID src, RegisterID dest) { m_assembler.xorq_rr(src, dest); } + + void xor64(RegisterID op1, RegisterID op2, RegisterID dest) + { + if (op1 == op2) + move(TrustedImm32(0), dest); + else if (op1 == dest) + xor64(op2, dest); + else { + move(op2, dest); + xor64(op1, dest); + } + } void xor64(RegisterID src, Address dest) { @@ -289,6 +597,16 @@ public: m_assembler.xorq_ir(imm.m_value, srcDest); } + void not64(RegisterID srcDest) + { + m_assembler.notq_r(srcDest); + } + + void not64(Address dest) + { + m_assembler.notq_m(dest.offset, dest.base); + } + void load64(ImplicitAddress address, RegisterID dest) { m_assembler.movq_mr(address.offset, address.base, dest); @@ -338,21 +656,31 @@ public: if (src == X86Registers::eax) m_assembler.movq_EAXm(address); else { - move(TrustedImmPtr(address), scratchRegister); - store64(src, scratchRegister); + move(TrustedImmPtr(address), scratchRegister()); + store64(src, scratchRegister()); } } + void store64(TrustedImm32 imm, ImplicitAddress address) + { + m_assembler.movq_i32m(imm.m_value, address.offset, address.base); + } + void store64(TrustedImm64 imm, ImplicitAddress address) { - move(imm, scratchRegister); - store64(scratchRegister, address); + if (CAN_SIGN_EXTEND_32_64(imm.m_value)) { + store64(TrustedImm32(static_cast<int32_t>(imm.m_value)), address); + return; + } + + move(imm, scratchRegister()); + store64(scratchRegister(), address); } void store64(TrustedImm64 imm, BaseIndex address) { - move(imm, scratchRegister); - m_assembler.movq_rm(scratchRegister, address.offset, address.base, address.index, address.scale); + move(imm, scratchRegister()); + m_assembler.movq_rm(scratchRegister(), address.offset, address.base, address.index, address.scale); } DataLabel32 store64WithAddressOffsetPatch(RegisterID src, Address address) @@ -362,6 +690,16 @@ public: return DataLabel32(this); } + void swap64(RegisterID src, RegisterID dest) + { + m_assembler.xchgq_rr(src, dest); + } + + void swap64(RegisterID src, Address dest) + { + m_assembler.xchgq_rm(src, dest.offset, dest.base); + } + void move64ToDouble(RegisterID src, FPRegisterID dest) { m_assembler.movq_rr(src, dest); @@ -374,35 +712,81 @@ public: void compare64(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest) { - if (((cond == Equal) || (cond == NotEqual)) && !right.m_value) - m_assembler.testq_rr(left, left); - else - m_assembler.cmpq_ir(right.m_value, left); - m_assembler.setCC_r(x86Condition(cond), dest); - m_assembler.movzbl_rr(dest, dest); + if (!right.m_value) { + if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) { + test64(*resultCondition, left, left, dest); + return; + } + } + + m_assembler.cmpq_ir(right.m_value, left); + set32(x86Condition(cond), dest); } void compare64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest) { m_assembler.cmpq_rr(right, left); - m_assembler.setCC_r(x86Condition(cond), dest); - m_assembler.movzbl_rr(dest, dest); + set32(x86Condition(cond), dest); } - + + void compareDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID dest) + { + if (cond & DoubleConditionBitInvert) + m_assembler.ucomisd_rr(left, right); + else + m_assembler.ucomisd_rr(right, left); + + if (cond == DoubleEqual) { + if (left == right) { + m_assembler.setnp_r(dest); + return; + } + + Jump isUnordered(m_assembler.jp()); + m_assembler.sete_r(dest); + isUnordered.link(this); + return; + } + + if (cond == DoubleNotEqualOrUnordered) { + if (left == right) { + m_assembler.setp_r(dest); + return; + } + + m_assembler.setp_r(dest); + m_assembler.setne_r(dest); + return; + } + + ASSERT(!(cond & DoubleConditionBitSpecial)); + m_assembler.setCC_r(static_cast<X86Assembler::Condition>(cond & ~DoubleConditionBits), dest); + } + Jump branch64(RelationalCondition cond, RegisterID left, RegisterID right) { m_assembler.cmpq_rr(right, left); return Jump(m_assembler.jCC(x86Condition(cond))); } + Jump branch64(RelationalCondition cond, RegisterID left, TrustedImm32 right) + { + if (!right.m_value) { + if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) + return branchTest64(*resultCondition, left, left); + } + m_assembler.cmpq_ir(right.m_value, left); + return Jump(m_assembler.jCC(x86Condition(cond))); + } + Jump branch64(RelationalCondition cond, RegisterID left, TrustedImm64 right) { if (((cond == Equal) || (cond == NotEqual)) && !right.m_value) { m_assembler.testq_rr(left, left); return Jump(m_assembler.jCC(x86Condition(cond))); } - move(right, scratchRegister); - return branch64(cond, left, scratchRegister); + move(right, scratchRegister()); + return branch64(cond, left, scratchRegister()); } Jump branch64(RelationalCondition cond, RegisterID left, Address right) @@ -413,8 +797,8 @@ public: Jump branch64(RelationalCondition cond, AbsoluteAddress left, RegisterID right) { - move(TrustedImmPtr(left.m_ptr), scratchRegister); - return branch64(cond, Address(scratchRegister), right); + move(TrustedImmPtr(left.m_ptr), scratchRegister()); + return branch64(cond, Address(scratchRegister()), right); } Jump branch64(RelationalCondition cond, Address left, RegisterID right) @@ -423,10 +807,33 @@ public: return Jump(m_assembler.jCC(x86Condition(cond))); } + Jump branch64(RelationalCondition cond, Address left, TrustedImm32 right) + { + m_assembler.cmpq_im(right.m_value, left.offset, left.base); + return Jump(m_assembler.jCC(x86Condition(cond))); + } + Jump branch64(RelationalCondition cond, Address left, TrustedImm64 right) { - move(right, scratchRegister); - return branch64(cond, left, scratchRegister); + move(right, scratchRegister()); + return branch64(cond, left, scratchRegister()); + } + + Jump branch64(RelationalCondition cond, BaseIndex address, RegisterID right) + { + m_assembler.cmpq_rm(right, address.offset, address.base, address.index, address.scale); + return Jump(m_assembler.jCC(x86Condition(cond))); + } + + Jump branchPtr(RelationalCondition cond, BaseIndex left, RegisterID right) + { + return branch64(cond, left, right); + } + + Jump branchPtr(RelationalCondition cond, BaseIndex left, TrustedImmPtr right) + { + move(right, scratchRegister()); + return branchPtr(cond, left, scratchRegister()); } Jump branchTest64(ResultCondition cond, RegisterID reg, RegisterID mask) @@ -447,6 +854,12 @@ public: return Jump(m_assembler.jCC(x86Condition(cond))); } + Jump branchTest64(ResultCondition cond, RegisterID reg, TrustedImm64 mask) + { + move(mask, scratchRegister()); + return branchTest64(cond, reg, scratchRegister()); + } + void test64(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest) { if (mask.m_value == -1) @@ -466,8 +879,8 @@ public: Jump branchTest64(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1)) { - load64(address.m_ptr, scratchRegister); - return branchTest64(cond, scratchRegister, mask); + load64(address.m_ptr, scratchRegister()); + return branchTest64(cond, scratchRegister(), mask); } Jump branchTest64(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1)) @@ -501,12 +914,54 @@ public: return Jump(m_assembler.jCC(x86Condition(cond))); } + Jump branchAdd64(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest) + { + if (src1 == dest) + return branchAdd64(cond, src2, dest); + move(src2, dest); + return branchAdd64(cond, src1, dest); + } + + Jump branchAdd64(ResultCondition cond, Address src1, RegisterID src2, RegisterID dest) + { + move(src2, dest); + return branchAdd64(cond, src1, dest); + } + + Jump branchAdd64(ResultCondition cond, RegisterID src1, Address src2, RegisterID dest) + { + move(src1, dest); + return branchAdd64(cond, src2, dest); + } + Jump branchAdd64(ResultCondition cond, RegisterID src, RegisterID dest) { add64(src, dest); return Jump(m_assembler.jCC(x86Condition(cond))); } + Jump branchAdd64(ResultCondition cond, Address src, RegisterID dest) + { + add64(src, dest); + return Jump(m_assembler.jCC(x86Condition(cond))); + } + + Jump branchMul64(ResultCondition cond, RegisterID src, RegisterID dest) + { + mul64(src, dest); + if (cond != Overflow) + m_assembler.testq_rr(dest, dest); + return Jump(m_assembler.jCC(x86Condition(cond))); + } + + Jump branchMul64(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest) + { + if (src1 == dest) + return branchMul64(cond, src2, dest); + move(src2, dest); + return branchMul64(cond, src1, dest); + } + Jump branchSub64(ResultCondition cond, TrustedImm32 imm, RegisterID dest) { sub64(imm, dest); @@ -525,6 +980,170 @@ public: return branchSub64(cond, src2, dest); } + Jump branchNeg64(ResultCondition cond, RegisterID srcDest) + { + neg64(srcDest); + return Jump(m_assembler.jCC(x86Condition(cond))); + } + + void moveConditionally64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID src, RegisterID dest) + { + m_assembler.cmpq_rr(right, left); + cmov(x86Condition(cond), src, dest); + } + + void moveConditionally64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest) + { + m_assembler.cmpq_rr(right, left); + + if (thenCase != dest && elseCase != dest) { + move(elseCase, dest); + elseCase = dest; + } + + if (elseCase == dest) + cmov(x86Condition(cond), thenCase, dest); + else + cmov(x86Condition(invert(cond)), elseCase, dest); + } + + void moveConditionally64(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID thenCase, RegisterID elseCase, RegisterID dest) + { + if (!right.m_value) { + if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) { + moveConditionallyTest64(*resultCondition, left, left, thenCase, elseCase, dest); + return; + } + } + + m_assembler.cmpq_ir(right.m_value, left); + + if (thenCase != dest && elseCase != dest) { + move(elseCase, dest); + elseCase = dest; + } + + if (elseCase == dest) + cmov(x86Condition(cond), thenCase, dest); + else + cmov(x86Condition(invert(cond)), elseCase, dest); + } + + void moveConditionallyTest64(ResultCondition cond, RegisterID testReg, RegisterID mask, RegisterID src, RegisterID dest) + { + m_assembler.testq_rr(testReg, mask); + cmov(x86Condition(cond), src, dest); + } + + void moveConditionallyTest64(ResultCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest) + { + ASSERT(isInvertible(cond)); + ASSERT_WITH_MESSAGE(cond != Overflow, "TEST does not set the Overflow Flag."); + + m_assembler.testq_rr(right, left); + + if (thenCase != dest && elseCase != dest) { + move(elseCase, dest); + elseCase = dest; + } + + if (elseCase == dest) + cmov(x86Condition(cond), thenCase, dest); + else + cmov(x86Condition(invert(cond)), elseCase, dest); + } + + void moveConditionallyTest64(ResultCondition cond, RegisterID testReg, TrustedImm32 mask, RegisterID src, RegisterID dest) + { + // if we are only interested in the low seven bits, this can be tested with a testb + if (mask.m_value == -1) + m_assembler.testq_rr(testReg, testReg); + else if ((mask.m_value & ~0x7f) == 0) + m_assembler.testb_i8r(mask.m_value, testReg); + else + m_assembler.testq_i32r(mask.m_value, testReg); + cmov(x86Condition(cond), src, dest); + } + + void moveConditionallyTest64(ResultCondition cond, RegisterID testReg, TrustedImm32 mask, RegisterID thenCase, RegisterID elseCase, RegisterID dest) + { + ASSERT(isInvertible(cond)); + ASSERT_WITH_MESSAGE(cond != Overflow, "TEST does not set the Overflow Flag."); + + if (mask.m_value == -1) + m_assembler.testq_rr(testReg, testReg); + else if (!(mask.m_value & ~0x7f)) + m_assembler.testb_i8r(mask.m_value, testReg); + else + m_assembler.testq_i32r(mask.m_value, testReg); + + if (thenCase != dest && elseCase != dest) { + move(elseCase, dest); + elseCase = dest; + } + + if (elseCase == dest) + cmov(x86Condition(cond), thenCase, dest); + else + cmov(x86Condition(invert(cond)), elseCase, dest); + } + + template<typename LeftType, typename RightType> + void moveDoubleConditionally64(RelationalCondition cond, LeftType left, RightType right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest) + { + static_assert(!std::is_same<LeftType, FPRegisterID>::value && !std::is_same<RightType, FPRegisterID>::value, "One of the tested argument could be aliased on dest. Use moveDoubleConditionallyDouble()."); + + if (thenCase != dest && elseCase != dest) { + moveDouble(elseCase, dest); + elseCase = dest; + } + + if (elseCase == dest) { + Jump falseCase = branch64(invert(cond), left, right); + moveDouble(thenCase, dest); + falseCase.link(this); + } else { + Jump trueCase = branch64(cond, left, right); + moveDouble(elseCase, dest); + trueCase.link(this); + } + } + + template<typename TestType, typename MaskType> + void moveDoubleConditionallyTest64(ResultCondition cond, TestType test, MaskType mask, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest) + { + static_assert(!std::is_same<TestType, FPRegisterID>::value && !std::is_same<MaskType, FPRegisterID>::value, "One of the tested argument could be aliased on dest. Use moveDoubleConditionallyDouble()."); + + if (elseCase == dest && isInvertible(cond)) { + Jump falseCase = branchTest64(invert(cond), test, mask); + moveDouble(thenCase, dest); + falseCase.link(this); + } else if (thenCase == dest) { + Jump trueCase = branchTest64(cond, test, mask); + moveDouble(elseCase, dest); + trueCase.link(this); + } + + Jump trueCase = branchTest64(cond, test, mask); + moveDouble(elseCase, dest); + Jump falseCase = jump(); + trueCase.link(this); + moveDouble(thenCase, dest); + falseCase.link(this); + } + + void abortWithReason(AbortReason reason) + { + move(TrustedImm32(reason), X86Registers::r11); + breakpoint(); + } + + void abortWithReason(AbortReason reason, intptr_t misc) + { + move(TrustedImm64(misc), X86Registers::r10); + abortWithReason(reason); + } + ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest) { ConvertibleLoadLabel result = ConvertibleLoadLabel(this); @@ -539,53 +1158,91 @@ public: return DataLabelPtr(this); } + DataLabelPtr moveWithPatch(TrustedImm32 initialValue, RegisterID dest) + { + padBeforePatch(); + m_assembler.movq_i64r(initialValue.m_value, dest); + return DataLabelPtr(this); + } + Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0)) { - dataLabel = moveWithPatch(initialRightValue, scratchRegister); - return branch64(cond, left, scratchRegister); + dataLabel = moveWithPatch(initialRightValue, scratchRegister()); + return branch64(cond, left, scratchRegister()); } Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0)) { - dataLabel = moveWithPatch(initialRightValue, scratchRegister); - return branch64(cond, left, scratchRegister); + dataLabel = moveWithPatch(initialRightValue, scratchRegister()); + return branch64(cond, left, scratchRegister()); + } + + Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0)) + { + padBeforePatch(); + m_assembler.movl_i32r(initialRightValue.m_value, scratchRegister()); + dataLabel = DataLabel32(this); + return branch32(cond, left, scratchRegister()); } DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address) { - DataLabelPtr label = moveWithPatch(initialValue, scratchRegister); - store64(scratchRegister, address); + DataLabelPtr label = moveWithPatch(initialValue, scratchRegister()); + store64(scratchRegister(), address); return label; } + + PatchableJump patchableBranch64(RelationalCondition cond, RegisterID reg, TrustedImm64 imm) + { + return PatchableJump(branch64(cond, reg, imm)); + } + + PatchableJump patchableBranch64(RelationalCondition cond, RegisterID left, RegisterID right) + { + return PatchableJump(branch64(cond, left, right)); + } + + using MacroAssemblerX86Common::branch8; + Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right) + { + MacroAssemblerX86Common::move(TrustedImmPtr(left.m_ptr), scratchRegister()); + return MacroAssemblerX86Common::branch8(cond, Address(scratchRegister()), right); + } using MacroAssemblerX86Common::branchTest8; Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1)) { TrustedImmPtr addr(reinterpret_cast<void*>(address.offset)); - MacroAssemblerX86Common::move(addr, scratchRegister); - return MacroAssemblerX86Common::branchTest8(cond, BaseIndex(scratchRegister, address.base, TimesOne), mask); + MacroAssemblerX86Common::move(addr, scratchRegister()); + return MacroAssemblerX86Common::branchTest8(cond, BaseIndex(scratchRegister(), address.base, TimesOne), mask); } Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1)) { - MacroAssemblerX86Common::move(TrustedImmPtr(address.m_ptr), scratchRegister); - return MacroAssemblerX86Common::branchTest8(cond, Address(scratchRegister), mask); + MacroAssemblerX86Common::move(TrustedImmPtr(address.m_ptr), scratchRegister()); + return MacroAssemblerX86Common::branchTest8(cond, Address(scratchRegister()), mask); + } + + void convertInt64ToDouble(RegisterID src, FPRegisterID dest) + { + m_assembler.cvtsi2sdq_rr(src, dest); } static bool supportsFloatingPoint() { return true; } - // See comment on MacroAssemblerARMv7::supportsFloatingPointTruncate() static bool supportsFloatingPointTruncate() { return true; } static bool supportsFloatingPointSqrt() { return true; } static bool supportsFloatingPointAbs() { return true; } static FunctionPtr readCallTarget(CodeLocationCall call) { - return FunctionPtr(X86Assembler::readPointer(call.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).dataLocation())); + return FunctionPtr(X86Assembler::readPointer(call.dataLabelPtrAtOffset(-REPATCH_OFFSET_CALL_R11).dataLocation())); } - static RegisterID scratchRegisterForBlinding() { return scratchRegister; } + bool haveScratchRegisterForBlinding() { return m_allowScratchRegister; } + RegisterID scratchRegisterForBlinding() { return scratchRegister(); } static bool canJumpReplacePatchableBranchPtrWithPatch() { return true; } + static bool canJumpReplacePatchableBranch32WithPatch() { return true; } static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label) { @@ -597,43 +1254,76 @@ public: return label.labelAtOffset(-totalBytes); } + static CodeLocationLabel startOfBranch32WithPatchOnRegister(CodeLocationDataLabel32 label) + { + const int rexBytes = 1; + const int opcodeBytes = 1; + const int immediateBytes = 4; + const int totalBytes = rexBytes + opcodeBytes + immediateBytes; + ASSERT(totalBytes >= maxJumpReplacementSize()); + return label.labelAtOffset(-totalBytes); + } + static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr label) { return startOfBranchPtrWithPatchOnRegister(label); } + + static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32 label) + { + return startOfBranch32WithPatchOnRegister(label); + } static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel instructionStart, Address, void* initialValue) { - X86Assembler::revertJumpTo_movq_i64r(instructionStart.executableAddress(), reinterpret_cast<intptr_t>(initialValue), scratchRegister); + X86Assembler::revertJumpTo_movq_i64r(instructionStart.executableAddress(), reinterpret_cast<intptr_t>(initialValue), s_scratchRegister); } - static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID, void* initialValue) + static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel instructionStart, Address, int32_t initialValue) { - X86Assembler::revertJumpTo_movq_i64r(instructionStart.executableAddress(), reinterpret_cast<intptr_t>(initialValue), scratchRegister); + X86Assembler::revertJumpTo_movl_i32r(instructionStart.executableAddress(), initialValue, s_scratchRegister); } -private: - friend class LinkBuffer; - friend class RepatchBuffer; - - static void linkCall(void* code, Call call, FunctionPtr function) + static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID, void* initialValue) { - if (!call.isFlagSet(Call::Near)) - X86Assembler::linkPointer(code, call.m_label.labelAtOffset(-REPTACH_OFFSET_CALL_R11), function.value()); - else - X86Assembler::linkCall(code, call.m_label, function.value()); + X86Assembler::revertJumpTo_movq_i64r(instructionStart.executableAddress(), reinterpret_cast<intptr_t>(initialValue), s_scratchRegister); } static void repatchCall(CodeLocationCall call, CodeLocationLabel destination) { - X86Assembler::repatchPointer(call.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).dataLocation(), destination.executableAddress()); + X86Assembler::repatchPointer(call.dataLabelPtrAtOffset(-REPATCH_OFFSET_CALL_R11).dataLocation(), destination.executableAddress()); } static void repatchCall(CodeLocationCall call, FunctionPtr destination) { - X86Assembler::repatchPointer(call.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).dataLocation(), destination.executableAddress()); + X86Assembler::repatchPointer(call.dataLabelPtrAtOffset(-REPATCH_OFFSET_CALL_R11).dataLocation(), destination.executableAddress()); + } + +private: + // If lzcnt is not available, use this after BSR + // to count the leading zeros. + void clz64AfterBsr(RegisterID dst) + { + Jump srcIsNonZero = m_assembler.jCC(x86Condition(NonZero)); + move(TrustedImm32(64), dst); + + Jump skipNonZeroCase = jump(); + srcIsNonZero.link(this); + xor64(TrustedImm32(0x3f), dst); + skipNonZeroCase.link(this); } + friend class LinkBuffer; + + static void linkCall(void* code, Call call, FunctionPtr function) + { + if (!call.isFlagSet(Call::Near)) + X86Assembler::linkPointer(code, call.m_label.labelAtOffset(-REPATCH_OFFSET_CALL_R11), function.value()); + else if (call.isFlagSet(Call::Tail)) + X86Assembler::linkJump(code, call.m_label, function.value()); + else + X86Assembler::linkCall(code, call.m_label, function.value()); + } }; } // namespace JSC diff --git a/Source/JavaScriptCore/assembler/MaxFrameExtentForSlowPathCall.h b/Source/JavaScriptCore/assembler/MaxFrameExtentForSlowPathCall.h new file mode 100644 index 000000000..39ed6fac5 --- /dev/null +++ b/Source/JavaScriptCore/assembler/MaxFrameExtentForSlowPathCall.h @@ -0,0 +1,88 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef MaxFrameExtentForSlowPathCall_h +#define MaxFrameExtentForSlowPathCall_h + +#include "JSStack.h" +#include "Register.h" +#include "StackAlignment.h" +#include <wtf/Assertions.h> + +namespace JSC { + +// The maxFrameExtentForSlowPathCall is the max amount of stack space (in bytes) +// that can be used for outgoing args when calling a slow path C function +// from JS code. + +#if !ENABLE(JIT) +static const size_t maxFrameExtentForSlowPathCall = 0; + +#elif CPU(X86_64) && OS(WINDOWS) +// 4 args in registers, but stack space needs to be allocated for all args. +static const size_t maxFrameExtentForSlowPathCall = 64; + +#elif CPU(X86_64) +// All args in registers. +static const size_t maxFrameExtentForSlowPathCall = 0; + +#elif CPU(X86) +// 7 args on stack (28 bytes). +static const size_t maxFrameExtentForSlowPathCall = 40; + +#elif CPU(ARM64) +// All args in registers. +static const size_t maxFrameExtentForSlowPathCall = 0; + +#elif CPU(ARM) +// First four args in registers, remaining 4 args on stack. +static const size_t maxFrameExtentForSlowPathCall = 24; + +#elif CPU(SH4) +// First four args in registers, remaining 4 args on stack. +static const size_t maxFrameExtentForSlowPathCall = 24; + +#elif CPU(MIPS) +// Though args are in registers, there need to be space on the stack for all args. +static const size_t maxFrameExtentForSlowPathCall = 40; + +#else +#error "Unsupported CPU: need value for maxFrameExtentForSlowPathCall" + +#endif + +COMPILE_ASSERT(!(maxFrameExtentForSlowPathCall % sizeof(Register)), extent_must_be_in_multiples_of_registers); + +#if ENABLE(JIT) +// Make sure that cfr - maxFrameExtentForSlowPathCall bytes will make the stack pointer aligned +COMPILE_ASSERT((maxFrameExtentForSlowPathCall % 16) == 16 - sizeof(CallerFrameAndPC), extent_must_align_stack_from_callframe_pointer); +#endif + +static const size_t maxFrameExtentForSlowPathCallInRegisters = maxFrameExtentForSlowPathCall / sizeof(Register); + +} // namespace JSC + +#endif // MaxFrameExtentForSlowPathCall_h + diff --git a/Source/JavaScriptCore/assembler/RepatchBuffer.h b/Source/JavaScriptCore/assembler/RepatchBuffer.h deleted file mode 100644 index dbb56f9ad..000000000 --- a/Source/JavaScriptCore/assembler/RepatchBuffer.h +++ /dev/null @@ -1,181 +0,0 @@ -/* - * Copyright (C) 2009 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef RepatchBuffer_h -#define RepatchBuffer_h - -#if ENABLE(JIT) - -#include "CodeBlock.h" -#include <MacroAssembler.h> -#include <wtf/Noncopyable.h> - -namespace JSC { - -// RepatchBuffer: -// -// This class is used to modify code after code generation has been completed, -// and after the code has potentially already been executed. This mechanism is -// used to apply optimizations to the code. -// -class RepatchBuffer { - typedef MacroAssemblerCodePtr CodePtr; - -public: - RepatchBuffer(CodeBlock* codeBlock) - { - JITCode& code = codeBlock->getJITCode(); - m_start = code.start(); - m_size = code.size(); - - ExecutableAllocator::makeWritable(m_start, m_size); - } - - ~RepatchBuffer() - { - ExecutableAllocator::makeExecutable(m_start, m_size); - } - - void relink(CodeLocationJump jump, CodeLocationLabel destination) - { - MacroAssembler::repatchJump(jump, destination); - } - - void relink(CodeLocationCall call, CodeLocationLabel destination) - { - MacroAssembler::repatchCall(call, destination); - } - - void relink(CodeLocationCall call, FunctionPtr destination) - { - MacroAssembler::repatchCall(call, destination); - } - - void relink(CodeLocationNearCall nearCall, CodePtr destination) - { - MacroAssembler::repatchNearCall(nearCall, CodeLocationLabel(destination)); - } - - void relink(CodeLocationNearCall nearCall, CodeLocationLabel destination) - { - MacroAssembler::repatchNearCall(nearCall, destination); - } - - void repatch(CodeLocationDataLabel32 dataLabel32, int32_t value) - { - MacroAssembler::repatchInt32(dataLabel32, value); - } - - void repatch(CodeLocationDataLabelCompact dataLabelCompact, int32_t value) - { - MacroAssembler::repatchCompact(dataLabelCompact, value); - } - - void repatch(CodeLocationDataLabelPtr dataLabelPtr, void* value) - { - MacroAssembler::repatchPointer(dataLabelPtr, value); - } - - void relinkCallerToTrampoline(ReturnAddressPtr returnAddress, CodeLocationLabel label) - { - relink(CodeLocationCall(CodePtr(returnAddress)), label); - } - - void relinkCallerToTrampoline(ReturnAddressPtr returnAddress, CodePtr newCalleeFunction) - { - relinkCallerToTrampoline(returnAddress, CodeLocationLabel(newCalleeFunction)); - } - - void relinkCallerToFunction(ReturnAddressPtr returnAddress, FunctionPtr function) - { - relink(CodeLocationCall(CodePtr(returnAddress)), function); - } - - void relinkNearCallerToTrampoline(ReturnAddressPtr returnAddress, CodeLocationLabel label) - { - relink(CodeLocationNearCall(CodePtr(returnAddress)), label); - } - - void relinkNearCallerToTrampoline(ReturnAddressPtr returnAddress, CodePtr newCalleeFunction) - { - relinkNearCallerToTrampoline(returnAddress, CodeLocationLabel(newCalleeFunction)); - } - - void replaceWithLoad(CodeLocationConvertibleLoad label) - { - MacroAssembler::replaceWithLoad(label); - } - - void replaceWithAddressComputation(CodeLocationConvertibleLoad label) - { - MacroAssembler::replaceWithAddressComputation(label); - } - - void setLoadInstructionIsActive(CodeLocationConvertibleLoad label, bool isActive) - { - if (isActive) - replaceWithLoad(label); - else - replaceWithAddressComputation(label); - } - - static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label) - { - return MacroAssembler::startOfBranchPtrWithPatchOnRegister(label); - } - - static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr label) - { - return MacroAssembler::startOfPatchableBranchPtrWithPatchOnAddress(label); - } - - void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination) - { - MacroAssembler::replaceWithJump(instructionStart, destination); - } - - // This is a *bit* of a silly API, since we currently always also repatch the - // immediate after calling this. But I'm fine with that, since this just feels - // less yucky. - void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, MacroAssembler::RegisterID reg, void* value) - { - MacroAssembler::revertJumpReplacementToBranchPtrWithPatch(instructionStart, reg, value); - } - - void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel instructionStart, MacroAssembler::Address address, void* value) - { - MacroAssembler::revertJumpReplacementToPatchableBranchPtrWithPatch(instructionStart, address, value); - } - -private: - void* m_start; - size_t m_size; -}; - -} // namespace JSC - -#endif // ENABLE(ASSEMBLER) - -#endif // RepatchBuffer_h diff --git a/Source/JavaScriptCore/assembler/SH4Assembler.h b/Source/JavaScriptCore/assembler/SH4Assembler.h index f9f70ad26..3f102178e 100644 --- a/Source/JavaScriptCore/assembler/SH4Assembler.h +++ b/Source/JavaScriptCore/assembler/SH4Assembler.h @@ -33,6 +33,7 @@ #include "AssemblerBuffer.h" #include "AssemblerBufferWithConstantPool.h" #include "JITCompilationEffort.h" +#include <limits.h> #include <stdarg.h> #include <stdint.h> #include <stdio.h> @@ -182,6 +183,7 @@ enum { FSQRT_OPCODE = 0xf06d, FSCHG_OPCODE = 0xf3fd, CLRT_OPCODE = 8, + SYNCO_OPCODE = 0x00ab, }; namespace SH4Registers { @@ -325,6 +327,12 @@ public: static const RegisterID scratchReg2 = SH4Registers::r11; static const uint32_t maxInstructionSize = 16; + static constexpr RegisterID firstRegister() { return SH4Registers::r0; } + static constexpr RegisterID lastRegister() { return SH4Registers::r15; } + + static constexpr FPRegisterID firstFPRegister() { return SH4Registers::dr0; } + static constexpr FPRegisterID lastFPRegister() { return SH4Registers::dr14; } + enum { padForAlign8 = 0x00, padForAlign16 = 0x0009, @@ -343,6 +351,8 @@ public: { } + SH4Buffer& buffer() { return m_buffer; } + // SH4 condition codes typedef enum { EQ = 0x0, // Equal @@ -459,7 +469,7 @@ public: void andlImm8r(int imm8, RegisterID dst) { ASSERT((imm8 <= 255) && (imm8 >= 0)); - ASSERT(dst == SH4Registers::r0); + ASSERT_UNUSED(dst, dst == SH4Registers::r0); uint16_t opc = getOpcodeGroup5(ANDIMM_OPCODE, imm8); oneShortOp(opc); @@ -492,7 +502,7 @@ public: void orlImm8r(int imm8, RegisterID dst) { ASSERT((imm8 <= 255) && (imm8 >= 0)); - ASSERT(dst == SH4Registers::r0); + ASSERT_UNUSED(dst, dst == SH4Registers::r0); uint16_t opc = getOpcodeGroup5(ORIMM_OPCODE, imm8); oneShortOp(opc); @@ -519,7 +529,7 @@ public: void xorlImm8r(int imm8, RegisterID dst) { ASSERT((imm8 <= 255) && (imm8 >= 0)); - ASSERT(dst == SH4Registers::r0); + ASSERT_UNUSED(dst, dst == SH4Registers::r0); uint16_t opc = getOpcodeGroup5(XORIMM_OPCODE, imm8); oneShortOp(opc); @@ -687,6 +697,7 @@ public: void cmpEqImmR0(int imm, RegisterID dst) { + ASSERT_UNUSED(dst, dst == SH4Registers::r0); uint16_t opc = getOpcodeGroup5(CMPEQIMM_OPCODE, imm); oneShortOp(opc); } @@ -699,7 +710,8 @@ public: void testlImm8r(int imm, RegisterID dst) { - ASSERT((dst == SH4Registers::r0) && (imm <= 255) && (imm >= 0)); + ASSERT((imm <= 255) && (imm >= 0)); + ASSERT_UNUSED(dst, dst == SH4Registers::r0); uint16_t opc = getOpcodeGroup5(TSTIMM_OPCODE, imm); oneShortOp(opc); @@ -710,6 +722,11 @@ public: oneShortOp(NOP_OPCODE, false); } + void synco() + { + oneShortOp(SYNCO_OPCODE); + } + void sett() { oneShortOp(SETT_OPCODE); @@ -1062,7 +1079,7 @@ public: void movwPCReg(int offset, RegisterID base, RegisterID dst) { - ASSERT(base == SH4Registers::pc); + ASSERT_UNUSED(base, base == SH4Registers::pc); ASSERT((offset <= 255) && (offset >= 0)); uint16_t opc = getOpcodeGroup3(MOVW_READ_OFFPC_OPCODE, dst, offset); @@ -1071,7 +1088,7 @@ public: void movwMemReg(int offset, RegisterID base, RegisterID dst) { - ASSERT(dst == SH4Registers::r0); + ASSERT_UNUSED(dst, dst == SH4Registers::r0); uint16_t opc = getOpcodeGroup11(MOVW_READ_OFFRM_OPCODE, base, offset); oneShortOp(opc); @@ -1137,7 +1154,7 @@ public: void movbMemReg(int offset, RegisterID base, RegisterID dst) { - ASSERT(dst == SH4Registers::r0); + ASSERT_UNUSED(dst, dst == SH4Registers::r0); uint16_t opc = getOpcodeGroup11(MOVB_READ_OFFRM_OPCODE, base, offset); oneShortOp(opc); @@ -1457,7 +1474,7 @@ public: ASSERT(value >= 0); ASSERT(value <= 60); - // Handle the uncommon case where a flushConstantPool occurred in movlMemRegCompact. + // Handle the uncommon case where a flushConstantPool occured in movlMemRegCompact. if ((instructionPtr[0] & 0xf000) == BRA_OPCODE) instructionPtr += (instructionPtr[0] & 0x0fff) + 2; @@ -1615,11 +1632,6 @@ public: return reinterpret_cast<void*>(readPCrelativeAddress((*instructionPtr & 0xff), instructionPtr)); } - PassRefPtr<ExecutableMemoryHandle> executableCopy(VM& vm, void* ownerUID, JITCompilationEffort effort) - { - return m_buffer.executableCopy(vm, ownerUID, effort); - } - static void cacheFlush(void* code, size_t size) { #if OS(LINUX) @@ -2171,8 +2183,8 @@ public: printfStdoutInstr(">> end repatch\n"); } #else - static void printInstr(uint16_t opc, unsigned size, bool isdoubleInst = true) { }; - static void printBlockInstr(uint16_t* first, unsigned offset, int nbInstr) { }; + static void printInstr(uint16_t, unsigned, bool = true) { }; + static void printBlockInstr(uint16_t*, unsigned, int) { }; #endif static void replaceWithLoad(void* instructionStart) diff --git a/Source/JavaScriptCore/assembler/X86Assembler.h b/Source/JavaScriptCore/assembler/X86Assembler.h index 2883e0a4c..857d22ba6 100644 --- a/Source/JavaScriptCore/assembler/X86Assembler.h +++ b/Source/JavaScriptCore/assembler/X86Assembler.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008, 2012 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2012-2016 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -29,7 +29,9 @@ #if ENABLE(ASSEMBLER) && (CPU(X86) || CPU(X86_64)) #include "AssemblerBuffer.h" +#include "AssemblerCommon.h" #include "JITCompilationEffort.h" +#include <limits.h> #include <stdint.h> #include <wtf/Assertions.h> #include <wtf/Vector.h> @@ -39,45 +41,111 @@ namespace JSC { inline bool CAN_SIGN_EXTEND_8_32(int32_t value) { return value == (int32_t)(signed char)value; } namespace X86Registers { - typedef enum { - eax, - ecx, - edx, - ebx, - esp, - ebp, - esi, - edi, -#if CPU(X86_64) - r8, - r9, - r10, - r11, - r12, - r13, - r14, - r15, -#endif - } RegisterID; +#define FOR_EACH_CPU_REGISTER(V) \ + FOR_EACH_CPU_GPREGISTER(V) \ + FOR_EACH_CPU_SPECIAL_REGISTER(V) \ + FOR_EACH_CPU_FPREGISTER(V) + +// The following are defined as pairs of the following value: +// 1. type of the storage needed to save the register value by the JIT probe. +// 2. name of the register. +#define FOR_EACH_CPU_GPREGISTER(V) \ + V(void*, eax) \ + V(void*, ecx) \ + V(void*, edx) \ + V(void*, ebx) \ + V(void*, esp) \ + V(void*, ebp) \ + V(void*, esi) \ + V(void*, edi) \ + FOR_EACH_X86_64_CPU_GPREGISTER(V) + +#define FOR_EACH_CPU_SPECIAL_REGISTER(V) \ + V(void*, eip) \ + V(void*, eflags) \ + +// Note: the JITs only stores double values in the FP registers. +#define FOR_EACH_CPU_FPREGISTER(V) \ + V(double, xmm0) \ + V(double, xmm1) \ + V(double, xmm2) \ + V(double, xmm3) \ + V(double, xmm4) \ + V(double, xmm5) \ + V(double, xmm6) \ + V(double, xmm7) \ + FOR_EACH_X86_64_CPU_FPREGISTER(V) - typedef enum { - xmm0, - xmm1, - xmm2, - xmm3, - xmm4, - xmm5, - xmm6, - xmm7, - } XMMRegisterID; -} +#if CPU(X86) + +#define FOR_EACH_X86_64_CPU_GPREGISTER(V) // Nothing to add. +#define FOR_EACH_X86_64_CPU_FPREGISTER(V) // Nothing to add. + +#elif CPU(X86_64) + +#define FOR_EACH_X86_64_CPU_GPREGISTER(V) \ + V(void*, r8) \ + V(void*, r9) \ + V(void*, r10) \ + V(void*, r11) \ + V(void*, r12) \ + V(void*, r13) \ + V(void*, r14) \ + V(void*, r15) + +#define FOR_EACH_X86_64_CPU_FPREGISTER(V) \ + V(double, xmm8) \ + V(double, xmm9) \ + V(double, xmm10) \ + V(double, xmm11) \ + V(double, xmm12) \ + V(double, xmm13) \ + V(double, xmm14) \ + V(double, xmm15) + +#endif // CPU(X86_64) + +typedef enum { + #define DECLARE_REGISTER(_type, _regName) _regName, + FOR_EACH_CPU_GPREGISTER(DECLARE_REGISTER) + #undef DECLARE_REGISTER +} RegisterID; + +typedef enum { + #define DECLARE_REGISTER(_type, _regName) _regName, + FOR_EACH_CPU_FPREGISTER(DECLARE_REGISTER) + #undef DECLARE_REGISTER +} XMMRegisterID; + +} // namespace X86Register class X86Assembler { public: typedef X86Registers::RegisterID RegisterID; + + static constexpr RegisterID firstRegister() { return X86Registers::eax; } + static constexpr RegisterID lastRegister() + { +#if CPU(X86_64) + return X86Registers::r15; +#else + return X86Registers::edi; +#endif + } + typedef X86Registers::XMMRegisterID XMMRegisterID; typedef XMMRegisterID FPRegisterID; + + static constexpr FPRegisterID firstFPRegister() { return X86Registers::xmm0; } + static constexpr FPRegisterID lastFPRegister() + { +#if CPU(X86_64) + return X86Registers::xmm15; +#else + return X86Registers::xmm7; +#endif + } typedef enum { ConditionO, @@ -102,21 +170,43 @@ public: } Condition; private: + // OneByteOpcodeID defines the bytecode for 1 byte instruction. It also contains the prefixes + // for two bytes instructions. + // TwoByteOpcodeID, ThreeByteOpcodeID define the opcodes for the multibytes instructions. + // + // The encoding for each instruction can be found in the Intel Architecture Manual in the appendix + // "Opcode Map." + // + // Each opcode can have a suffix describing the type of argument. The full list of suffixes is + // in the "Key to Abbreviations" section of the "Opcode Map". + // The most common argument types are: + // -E: The argument is either a GPR or a memory address. + // -G: The argument is a GPR. + // -I: The argument is an immediate. + // The most common sizes are: + // -v: 32 or 64bit depending on the operand-size attribute. + // -z: 32bit in both 32bit and 64bit mode. Common for immediate values. typedef enum { + OP_ADD_EbGb = 0x00, OP_ADD_EvGv = 0x01, OP_ADD_GvEv = 0x03, + OP_ADD_EAXIv = 0x05, OP_OR_EvGv = 0x09, OP_OR_GvEv = 0x0B, + OP_OR_EAXIv = 0x0D, OP_2BYTE_ESCAPE = 0x0F, OP_AND_EvGv = 0x21, OP_AND_GvEv = 0x23, OP_SUB_EvGv = 0x29, OP_SUB_GvEv = 0x2B, + OP_SUB_EAXIv = 0x2D, PRE_PREDICT_BRANCH_NOT_TAKEN = 0x2E, OP_XOR_EvGv = 0x31, OP_XOR_GvEv = 0x33, + OP_XOR_EAXIv = 0x35, OP_CMP_EvGv = 0x39, OP_CMP_GvEv = 0x3B, + OP_CMP_EAXIv = 0x3D, #if CPU(X86_64) PRE_REX = 0x40, #endif @@ -141,9 +231,12 @@ private: OP_LEA = 0x8D, OP_GROUP1A_Ev = 0x8F, OP_NOP = 0x90, + OP_XCHG_EAX = 0x90, OP_CDQ = 0x99, OP_MOV_EAXOv = 0xA1, OP_MOV_OvEAX = 0xA3, + OP_TEST_ALIb = 0xA8, + OP_TEST_EAXIv = 0xA9, OP_MOV_EAXIv = 0xB8, OP_GROUP2_EvIb = 0xC1, OP_RET = 0xC3, @@ -152,6 +245,7 @@ private: OP_INT3 = 0xCC, OP_GROUP2_Ev1 = 0xD1, OP_GROUP2_EvCL = 0xD3, + OP_ESCAPE_D9 = 0xD9, OP_ESCAPE_DD = 0xDD, OP_CALL_rel32 = 0xE8, OP_JMP_rel32 = 0xE9, @@ -169,24 +263,33 @@ private: OP2_MOVSD_WsdVsd = 0x11, OP2_MOVSS_VsdWsd = 0x10, OP2_MOVSS_WsdVsd = 0x11, + OP2_MOVAPD_VpdWpd = 0x28, + OP2_MOVAPS_VpdWpd = 0x28, OP2_CVTSI2SD_VsdEd = 0x2A, OP2_CVTTSD2SI_GdWsd = 0x2C, OP2_UCOMISD_VsdWsd = 0x2E, + OP2_3BYTE_ESCAPE_3A = 0x3A, + OP2_CMOVCC = 0x40, OP2_ADDSD_VsdWsd = 0x58, OP2_MULSD_VsdWsd = 0x59, OP2_CVTSD2SS_VsdWsd = 0x5A, OP2_CVTSS2SD_VsdWsd = 0x5A, OP2_SUBSD_VsdWsd = 0x5C, OP2_DIVSD_VsdWsd = 0x5E, + OP2_MOVMSKPD_VdEd = 0x50, OP2_SQRTSD_VsdWsd = 0x51, + OP2_ANDPS_VpdWpd = 0x54, OP2_ANDNPD_VpdWpd = 0x55, OP2_XORPD_VpdWpd = 0x57, OP2_MOVD_VdEd = 0x6E, OP2_MOVD_EdVd = 0x7E, OP2_JCC_rel32 = 0x80, OP_SETCC = 0x90, + OP2_3BYTE_ESCAPE_AE = 0xAE, OP2_IMUL_GvEv = 0xAF, OP2_MOVZX_GvEb = 0xB6, + OP2_BSR = 0xBD, + OP2_LZCNT = 0xBD, OP2_MOVSX_GvEb = 0xBE, OP2_MOVZX_GvEw = 0xB7, OP2_MOVSX_GvEw = 0xBF, @@ -195,6 +298,18 @@ private: OP2_PSRLQ_UdqIb = 0x73, OP2_POR_VdqWdq = 0XEB, } TwoByteOpcodeID; + + typedef enum { + OP3_ROUNDSS_VssWssIb = 0x0A, + OP3_ROUNDSD_VsdWsdIb = 0x0B, + OP3_MFENCE = 0xF0, + } ThreeByteOpcodeID; + + + TwoByteOpcodeID cmovcc(Condition cond) + { + return (TwoByteOpcodeID)(OP2_CMOVCC + cond); + } TwoByteOpcodeID jccRel32(Condition cond) { @@ -229,6 +344,7 @@ private: GROUP3_OP_TEST = 0, GROUP3_OP_NOT = 2, GROUP3_OP_NEG = 3, + GROUP3_OP_DIV = 6, GROUP3_OP_IDIV = 7, GROUP5_OP_CALLN = 2, @@ -240,6 +356,7 @@ private: GROUP14_OP_PSLLQ = 6, GROUP14_OP_PSRLQ = 2, + ESCAPE_D9_FSTP_singleReal = 3, ESCAPE_DD_FSTP_doubleReal = 3, } GroupOpcodeID; @@ -251,6 +368,8 @@ public: , m_indexOfTailOfLastWatchpoint(INT_MIN) { } + + AssemblerBuffer& buffer() { return m_formatter.m_buffer; } // Stack operations: @@ -317,13 +436,43 @@ public: m_formatter.oneByteOp(OP_ADD_EvGv, src, base, offset); } + void addl_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) + { + m_formatter.oneByteOp(OP_ADD_EvGv, src, base, index, scale, offset); + } + + void addb_rm(RegisterID src, int offset, RegisterID base) + { + m_formatter.oneByteOp8(OP_ADD_EbGb, src, base, offset); + } + + void addb_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) + { + m_formatter.oneByteOp8(OP_ADD_EbGb, src, base, index, scale, offset); + } + + void addw_rm(RegisterID src, int offset, RegisterID base) + { + m_formatter.prefix(PRE_OPERAND_SIZE); + m_formatter.oneByteOp8(OP_ADD_EvGv, src, base, offset); + } + + void addw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) + { + m_formatter.prefix(PRE_OPERAND_SIZE); + m_formatter.oneByteOp8(OP_ADD_EvGv, src, base, index, scale, offset); + } + void addl_ir(int imm, RegisterID dst) { if (CAN_SIGN_EXTEND_8_32(imm)) { m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst); m_formatter.immediate8(imm); } else { - m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst); + if (dst == X86Registers::eax) + m_formatter.oneByteOp(OP_ADD_EAXIv); + else + m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst); m_formatter.immediate32(imm); } } @@ -339,6 +488,53 @@ public: } } + void addl_im(int imm, int offset, RegisterID base, RegisterID index, int scale) + { + if (CAN_SIGN_EXTEND_8_32(imm)) { + m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, index, scale, offset); + m_formatter.immediate8(imm); + } else { + m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, index, scale, offset); + m_formatter.immediate32(imm); + } + } + + void addb_im(int imm, int offset, RegisterID base) + { + m_formatter.oneByteOp8(OP_GROUP1_EbIb, GROUP1_OP_ADD, base, offset); + m_formatter.immediate8(imm); + } + + void addb_im(int imm, int offset, RegisterID base, RegisterID index, int scale) + { + m_formatter.oneByteOp8(OP_GROUP1_EbIb, GROUP1_OP_ADD, base, index, scale, offset); + m_formatter.immediate8(imm); + } + + void addw_im(int imm, int offset, RegisterID base) + { + m_formatter.prefix(PRE_OPERAND_SIZE); + if (CAN_SIGN_EXTEND_8_32(imm)) { + m_formatter.oneByteOp8(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, offset); + m_formatter.immediate8(imm); + } else { + m_formatter.oneByteOp8(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, offset); + m_formatter.immediate16(imm); + } + } + + void addw_im(int imm, int offset, RegisterID base, RegisterID index, int scale) + { + m_formatter.prefix(PRE_OPERAND_SIZE); + if (CAN_SIGN_EXTEND_8_32(imm)) { + m_formatter.oneByteOp8(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, index, scale, offset); + m_formatter.immediate8(imm); + } else { + m_formatter.oneByteOp8(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, index, scale, offset); + m_formatter.immediate16(imm); + } + } + #if CPU(X86_64) void addq_rr(RegisterID src, RegisterID dst) { @@ -350,13 +546,21 @@ public: m_formatter.oneByteOp64(OP_ADD_GvEv, dst, base, offset); } + void addq_rm(RegisterID src, int offset, RegisterID base) + { + m_formatter.oneByteOp64(OP_ADD_EvGv, src, base, offset); + } + void addq_ir(int imm, RegisterID dst) { if (CAN_SIGN_EXTEND_8_32(imm)) { m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst); m_formatter.immediate8(imm); } else { - m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst); + if (dst == X86Registers::eax) + m_formatter.oneByteOp64(OP_ADD_EAXIv); + else + m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst); m_formatter.immediate32(imm); } } @@ -450,6 +654,35 @@ public: } #endif + void dec_r(RegisterID dst) + { + m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP1_OP_OR, dst); + } + +#if CPU(X86_64) + void decq_r(RegisterID dst) + { + m_formatter.oneByteOp64(OP_GROUP5_Ev, GROUP1_OP_OR, dst); + } +#endif // CPU(X86_64) + + void inc_r(RegisterID dst) + { + m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP1_OP_ADD, dst); + } + +#if CPU(X86_64) + void incq_r(RegisterID dst) + { + m_formatter.oneByteOp64(OP_GROUP5_Ev, GROUP1_OP_ADD, dst); + } + + void incq_m(int offset, RegisterID base) + { + m_formatter.oneByteOp64(OP_GROUP5_Ev, GROUP1_OP_ADD, base, offset); + } +#endif // CPU(X86_64) + void negl_r(RegisterID dst) { m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NEG, dst); @@ -477,6 +710,18 @@ public: m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NOT, base, offset); } +#if CPU(X86_64) + void notq_r(RegisterID dst) + { + m_formatter.oneByteOp64(OP_GROUP3_Ev, GROUP3_OP_NOT, dst); + } + + void notq_m(int offset, RegisterID base) + { + m_formatter.oneByteOp64(OP_GROUP3_Ev, GROUP3_OP_NOT, base, offset); + } +#endif + void orl_rr(RegisterID src, RegisterID dst) { m_formatter.oneByteOp(OP_OR_EvGv, src, dst); @@ -498,7 +743,10 @@ public: m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, dst); m_formatter.immediate8(imm); } else { - m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, dst); + if (dst == X86Registers::eax) + m_formatter.oneByteOp(OP_OR_EAXIv); + else + m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, dst); m_formatter.immediate32(imm); } } @@ -526,7 +774,10 @@ public: m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_OR, dst); m_formatter.immediate8(imm); } else { - m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_OR, dst); + if (dst == X86Registers::eax) + m_formatter.oneByteOp64(OP_OR_EAXIv); + else + m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_OR, dst); m_formatter.immediate32(imm); } } @@ -569,7 +820,10 @@ public: m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst); m_formatter.immediate8(imm); } else { - m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst); + if (dst == X86Registers::eax) + m_formatter.oneByteOp(OP_SUB_EAXIv); + else + m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst); m_formatter.immediate32(imm); } } @@ -591,13 +845,37 @@ public: m_formatter.oneByteOp64(OP_SUB_EvGv, src, dst); } + void subq_mr(int offset, RegisterID base, RegisterID dst) + { + m_formatter.oneByteOp64(OP_SUB_GvEv, dst, base, offset); + } + + void subq_rm(RegisterID src, int offset, RegisterID base) + { + m_formatter.oneByteOp64(OP_SUB_EvGv, src, base, offset); + } + void subq_ir(int imm, RegisterID dst) { if (CAN_SIGN_EXTEND_8_32(imm)) { m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst); m_formatter.immediate8(imm); } else { - m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst); + if (dst == X86Registers::eax) + m_formatter.oneByteOp64(OP_SUB_EAXIv); + else + m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst); + m_formatter.immediate32(imm); + } + } + + void subq_im(int imm, int offset, RegisterID base) + { + if (CAN_SIGN_EXTEND_8_32(imm)) { + m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_SUB, base, offset); + m_formatter.immediate8(imm); + } else { + m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_SUB, base, offset); m_formatter.immediate32(imm); } } @@ -646,7 +924,10 @@ public: m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst); m_formatter.immediate8(imm); } else { - m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst); + if (dst == X86Registers::eax) + m_formatter.oneByteOp(OP_XOR_EAXIv); + else + m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst); m_formatter.immediate32(imm); } } @@ -663,7 +944,10 @@ public: m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst); m_formatter.immediate8(imm); } else { - m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst); + if (dst == X86Registers::eax) + m_formatter.oneByteOp64(OP_XOR_EAXIv); + else + m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst); m_formatter.immediate32(imm); } } @@ -685,6 +969,54 @@ public: #endif + void lzcnt_rr(RegisterID src, RegisterID dst) + { + m_formatter.prefix(PRE_SSE_F3); + m_formatter.twoByteOp(OP2_LZCNT, dst, src); + } + + void lzcnt_mr(int offset, RegisterID base, RegisterID dst) + { + m_formatter.prefix(PRE_SSE_F3); + m_formatter.twoByteOp(OP2_LZCNT, dst, base, offset); + } + +#if CPU(X86_64) + void lzcntq_rr(RegisterID src, RegisterID dst) + { + m_formatter.prefix(PRE_SSE_F3); + m_formatter.twoByteOp64(OP2_LZCNT, dst, src); + } + + void lzcntq_mr(int offset, RegisterID base, RegisterID dst) + { + m_formatter.prefix(PRE_SSE_F3); + m_formatter.twoByteOp64(OP2_LZCNT, dst, base, offset); + } +#endif + + void bsr_rr(RegisterID src, RegisterID dst) + { + m_formatter.twoByteOp(OP2_BSR, dst, src); + } + + void bsr_mr(int offset, RegisterID base, RegisterID dst) + { + m_formatter.twoByteOp(OP2_BSR, dst, base, offset); + } + +#if CPU(X86_64) + void bsrq_rr(RegisterID src, RegisterID dst) + { + m_formatter.twoByteOp64(OP2_BSR, dst, src); + } + + void bsrq_mr(int offset, RegisterID base, RegisterID dst) + { + m_formatter.twoByteOp64(OP2_BSR, dst, base, offset); + } +#endif + void sarl_i8r(int imm, RegisterID dst) { if (imm == 1) @@ -745,13 +1077,50 @@ public: m_formatter.immediate8(imm); } } -#endif + + void shrq_i8r(int imm, RegisterID dst) + { + if (imm == 1) + m_formatter.oneByteOp64(OP_GROUP2_Ev1, GROUP2_OP_SHR, dst); + else { + m_formatter.oneByteOp64(OP_GROUP2_EvIb, GROUP2_OP_SHR, dst); + m_formatter.immediate8(imm); + } + } + + void shrq_CLr(RegisterID dst) + { + m_formatter.oneByteOp64(OP_GROUP2_EvCL, GROUP2_OP_SHR, dst); + } + + void shlq_i8r(int imm, RegisterID dst) + { + if (imm == 1) + m_formatter.oneByteOp64(OP_GROUP2_Ev1, GROUP2_OP_SHL, dst); + else { + m_formatter.oneByteOp64(OP_GROUP2_EvIb, GROUP2_OP_SHL, dst); + m_formatter.immediate8(imm); + } + } + + void shlq_CLr(RegisterID dst) + { + m_formatter.oneByteOp64(OP_GROUP2_EvCL, GROUP2_OP_SHL, dst); + } +#endif // CPU(X86_64) void imull_rr(RegisterID src, RegisterID dst) { m_formatter.twoByteOp(OP2_IMUL_GvEv, dst, src); } +#if CPU(X86_64) + void imulq_rr(RegisterID src, RegisterID dst) + { + m_formatter.twoByteOp64(OP2_IMUL_GvEv, dst, src); + } +#endif // CPU(X86_64) + void imull_mr(int offset, RegisterID base, RegisterID dst) { m_formatter.twoByteOp(OP2_IMUL_GvEv, dst, base, offset); @@ -763,11 +1132,23 @@ public: m_formatter.immediate32(value); } + void divl_r(RegisterID dst) + { + m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_DIV, dst); + } + void idivl_r(RegisterID dst) { m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_IDIV, dst); } +#if CPU(X86_64) + void idivq_r(RegisterID dst) + { + m_formatter.oneByteOp64(OP_GROUP3_Ev, GROUP3_OP_IDIV, dst); + } +#endif // CPU(X86_64) + // Comparisons: void cmpl_rr(RegisterID src, RegisterID dst) @@ -791,7 +1172,10 @@ public: m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst); m_formatter.immediate8(imm); } else { - m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst); + if (dst == X86Registers::eax) + m_formatter.oneByteOp(OP_CMP_EAXIv); + else + m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst); m_formatter.immediate32(imm); } } @@ -861,6 +1245,11 @@ public: m_formatter.oneByteOp64(OP_CMP_EvGv, src, base, offset); } + void cmpq_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) + { + m_formatter.oneByteOp64(OP_CMP_EvGv, src, base, index, scale, offset); + } + void cmpq_mr(int offset, RegisterID base, RegisterID src) { m_formatter.oneByteOp64(OP_CMP_GvEv, src, base, offset); @@ -872,7 +1261,10 @@ public: m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst); m_formatter.immediate8(imm); } else { - m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst); + if (dst == X86Registers::eax) + m_formatter.oneByteOp64(OP_CMP_EAXIv); + else + m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst); m_formatter.immediate32(imm); } } @@ -955,7 +1347,10 @@ public: void testl_i32r(int imm, RegisterID dst) { - m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst); + if (dst == X86Registers::eax) + m_formatter.oneByteOp(OP_TEST_EAXIv); + else + m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst); m_formatter.immediate32(imm); } @@ -1009,7 +1404,10 @@ public: void testq_i32r(int imm, RegisterID dst) { - m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst); + if (dst == X86Registers::eax) + m_formatter.oneByteOp64(OP_TEST_EAXIv); + else + m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst); m_formatter.immediate32(imm); } @@ -1034,7 +1432,10 @@ public: void testb_i8r(int imm, RegisterID dst) { - m_formatter.oneByteOp8(OP_GROUP3_EbIb, GROUP3_OP_TEST, dst); + if (dst == X86Registers::eax) + m_formatter.oneByteOp(OP_TEST_ALIb); + else + m_formatter.oneByteOp8(OP_GROUP3_EbIb, GROUP3_OP_TEST, dst); m_formatter.immediate8(imm); } @@ -1063,6 +1464,16 @@ public: setne_r(dst); } + void setnp_r(RegisterID dst) + { + m_formatter.twoByteOp8(setccOpcode(ConditionNP), (GroupOpcodeID)0, dst); + } + + void setp_r(RegisterID dst) + { + m_formatter.twoByteOp8(setccOpcode(ConditionP), (GroupOpcodeID)0, dst); + } + // Various move ops: void cdq() @@ -1070,6 +1481,18 @@ public: m_formatter.oneByteOp(OP_CDQ); } +#if CPU(X86_64) + void cqo() + { + m_formatter.oneByteOp64(OP_CDQ); + } +#endif + + void fstps(int offset, RegisterID base) + { + m_formatter.oneByteOp(OP_ESCAPE_D9, ESCAPE_D9_FSTP_singleReal, base, offset); + } + void fstpl(int offset, RegisterID base) { m_formatter.oneByteOp(OP_ESCAPE_DD, ESCAPE_DD_FSTP_doubleReal, base, offset); @@ -1077,13 +1500,33 @@ public: void xchgl_rr(RegisterID src, RegisterID dst) { - m_formatter.oneByteOp(OP_XCHG_EvGv, src, dst); + if (src == X86Registers::eax) + m_formatter.oneByteOp(OP_XCHG_EAX, dst); + else if (dst == X86Registers::eax) + m_formatter.oneByteOp(OP_XCHG_EAX, src); + else + m_formatter.oneByteOp(OP_XCHG_EvGv, src, dst); + } + + void xchgl_rm(RegisterID src, int offset, RegisterID base) + { + m_formatter.oneByteOp(OP_XCHG_EvGv, src, base, offset); } #if CPU(X86_64) void xchgq_rr(RegisterID src, RegisterID dst) { - m_formatter.oneByteOp64(OP_XCHG_EvGv, src, dst); + if (src == X86Registers::eax) + m_formatter.oneByteOp64(OP_XCHG_EAX, dst); + else if (dst == X86Registers::eax) + m_formatter.oneByteOp64(OP_XCHG_EAX, src); + else + m_formatter.oneByteOp64(OP_XCHG_EvGv, src, dst); + } + + void xchgq_rm(RegisterID src, int offset, RegisterID base) + { + m_formatter.oneByteOp64(OP_XCHG_EvGv, src, base, offset); } #endif @@ -1177,12 +1620,33 @@ public: m_formatter.oneByteOp(OP_GROUP11_EvIb, GROUP11_MOV, base, index, scale, offset); m_formatter.immediate8(imm); } + +#if !CPU(X86_64) + void movb_rm(RegisterID src, const void* addr) + { + m_formatter.oneByteOp(OP_MOV_EbGb, src, addr); + } +#endif + + void movb_rm(RegisterID src, int offset, RegisterID base) + { + m_formatter.oneByteOp8(OP_MOV_EbGb, src, base, offset); + } void movb_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) { m_formatter.oneByteOp8(OP_MOV_EbGb, src, base, index, scale, offset); } - + + void movw_rm(RegisterID src, int offset, RegisterID base) + { + m_formatter.prefix(PRE_OPERAND_SIZE); + + // FIXME: We often use oneByteOp8 for 16-bit operations. It's not clear that this is + // necessary. https://bugs.webkit.org/show_bug.cgi?id=153433 + m_formatter.oneByteOp8(OP_MOV_EvGv, src, base, offset); + } + void movw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) { m_formatter.prefix(PRE_OPERAND_SIZE); @@ -1263,6 +1727,12 @@ public: m_formatter.oneByteOp64(OP_MOV_EAXIv, dst); m_formatter.immediate64(imm); } + + void mov_i32r(int32_t imm, RegisterID dst) + { + m_formatter.oneByteOp64(OP_GROUP11_EvIz, GROUP11_MOV, dst); + m_formatter.immediate32(imm); + } void movsxd_rr(RegisterID src, RegisterID dst) { @@ -1324,6 +1794,13 @@ public: m_formatter.twoByteOp(OP2_MOVZX_GvEb, dst, base, index, scale, offset); } +#if !CPU(X86_64) + void movzbl_mr(const void* address, RegisterID dst) + { + m_formatter.twoByteOp(OP2_MOVZX_GvEb, dst, address); + } +#endif + void movsbl_mr(int offset, RegisterID base, RegisterID dst) { m_formatter.twoByteOp(OP2_MOVSX_GvEb, dst, base, offset); @@ -1342,15 +1819,118 @@ public: m_formatter.twoByteOp8(OP2_MOVZX_GvEb, dst, src); } + void movsbl_rr(RegisterID src, RegisterID dst) + { + m_formatter.twoByteOp8(OP2_MOVSX_GvEb, dst, src); + } + + void movzwl_rr(RegisterID src, RegisterID dst) + { + m_formatter.twoByteOp8(OP2_MOVZX_GvEw, dst, src); + } + + void movswl_rr(RegisterID src, RegisterID dst) + { + m_formatter.twoByteOp8(OP2_MOVSX_GvEw, dst, src); + } + + void cmovl_rr(Condition cond, RegisterID src, RegisterID dst) + { + m_formatter.twoByteOp(cmovcc(cond), dst, src); + } + + void cmovl_mr(Condition cond, int offset, RegisterID base, RegisterID dst) + { + m_formatter.twoByteOp(cmovcc(cond), dst, base, offset); + } + + void cmovl_mr(Condition cond, int offset, RegisterID base, RegisterID index, int scale, RegisterID dst) + { + m_formatter.twoByteOp(cmovcc(cond), dst, base, index, scale, offset); + } + + void cmovel_rr(RegisterID src, RegisterID dst) + { + m_formatter.twoByteOp(cmovcc(ConditionE), dst, src); + } + + void cmovnel_rr(RegisterID src, RegisterID dst) + { + m_formatter.twoByteOp(cmovcc(ConditionNE), dst, src); + } + + void cmovpl_rr(RegisterID src, RegisterID dst) + { + m_formatter.twoByteOp(cmovcc(ConditionP), dst, src); + } + + void cmovnpl_rr(RegisterID src, RegisterID dst) + { + m_formatter.twoByteOp(cmovcc(ConditionNP), dst, src); + } + +#if CPU(X86_64) + void cmovq_rr(Condition cond, RegisterID src, RegisterID dst) + { + m_formatter.twoByteOp64(cmovcc(cond), dst, src); + } + + void cmovq_mr(Condition cond, int offset, RegisterID base, RegisterID dst) + { + m_formatter.twoByteOp64(cmovcc(cond), dst, base, offset); + } + + void cmovq_mr(Condition cond, int offset, RegisterID base, RegisterID index, int scale, RegisterID dst) + { + m_formatter.twoByteOp64(cmovcc(cond), dst, base, index, scale, offset); + } + + void cmoveq_rr(RegisterID src, RegisterID dst) + { + m_formatter.twoByteOp64(cmovcc(ConditionE), dst, src); + } + + void cmovneq_rr(RegisterID src, RegisterID dst) + { + m_formatter.twoByteOp64(cmovcc(ConditionNE), dst, src); + } + + void cmovpq_rr(RegisterID src, RegisterID dst) + { + m_formatter.twoByteOp64(cmovcc(ConditionP), dst, src); + } + + void cmovnpq_rr(RegisterID src, RegisterID dst) + { + m_formatter.twoByteOp64(cmovcc(ConditionNP), dst, src); + } +#else + void cmovl_mr(Condition cond, const void* addr, RegisterID dst) + { + m_formatter.twoByteOp(cmovcc(cond), dst, addr); + } +#endif + void leal_mr(int offset, RegisterID base, RegisterID dst) { m_formatter.oneByteOp(OP_LEA, dst, base, offset); } + + void leal_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst) + { + m_formatter.oneByteOp(OP_LEA, dst, base, index, scale, offset); + } + #if CPU(X86_64) void leaq_mr(int offset, RegisterID base, RegisterID dst) { m_formatter.oneByteOp64(OP_LEA, dst, base, offset); } + + void leaq_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst) + { + m_formatter.oneByteOp64(OP_LEA, dst, base, index, scale, offset); + } #endif // Flow control: @@ -1513,6 +2093,18 @@ public: m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, base, offset); } + void addss_rr(XMMRegisterID src, XMMRegisterID dst) + { + m_formatter.prefix(PRE_SSE_F3); + m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, (RegisterID)src); + } + + void addss_mr(int offset, RegisterID base, XMMRegisterID dst) + { + m_formatter.prefix(PRE_SSE_F3); + m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, base, offset); + } + #if !CPU(X86_64) void addsd_mr(const void* address, XMMRegisterID dst) { @@ -1527,6 +2119,14 @@ public: m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, src); } +#if CPU(X86_64) + void cvtsi2sdq_rr(RegisterID src, XMMRegisterID dst) + { + m_formatter.prefix(PRE_SSE_F2); + m_formatter.twoByteOp64(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, src); + } +#endif + void cvtsi2sd_mr(int offset, RegisterID base, XMMRegisterID dst) { m_formatter.prefix(PRE_SSE_F2); @@ -1553,12 +2153,24 @@ public: m_formatter.twoByteOp(OP2_CVTSD2SS_VsdWsd, dst, (RegisterID)src); } + void cvtsd2ss_mr(int offset, RegisterID base, XMMRegisterID dst) + { + m_formatter.prefix(PRE_SSE_F2); + m_formatter.twoByteOp(OP2_CVTSD2SS_VsdWsd, dst, base, offset); + } + void cvtss2sd_rr(XMMRegisterID src, XMMRegisterID dst) { m_formatter.prefix(PRE_SSE_F3); m_formatter.twoByteOp(OP2_CVTSS2SD_VsdWsd, dst, (RegisterID)src); } - + + void cvtss2sd_mr(int offset, RegisterID base, XMMRegisterID dst) + { + m_formatter.prefix(PRE_SSE_F3); + m_formatter.twoByteOp(OP2_CVTSS2SD_VsdWsd, dst, base, offset); + } + #if CPU(X86_64) void cvttsd2siq_rr(XMMRegisterID src, RegisterID dst) { @@ -1580,6 +2192,12 @@ public: } #if CPU(X86_64) + void movmskpd_rr(XMMRegisterID src, RegisterID dst) + { + m_formatter.prefix(PRE_SSE_66); + m_formatter.twoByteOp64(OP2_MOVMSKPD_VdEd, dst, (RegisterID)src); + } + void movq_rr(XMMRegisterID src, RegisterID dst) { m_formatter.prefix(PRE_SSE_66); @@ -1593,6 +2211,17 @@ public: } #endif + void movapd_rr(XMMRegisterID src, XMMRegisterID dst) + { + m_formatter.prefix(PRE_SSE_66); + m_formatter.twoByteOp(OP2_MOVAPD_VpdWpd, (RegisterID)dst, (RegisterID)src); + } + + void movaps_rr(XMMRegisterID src, XMMRegisterID dst) + { + m_formatter.twoByteOp(OP2_MOVAPS_VpdWpd, (RegisterID)dst, (RegisterID)src); + } + void movsd_rr(XMMRegisterID src, XMMRegisterID dst) { m_formatter.prefix(PRE_SSE_F2); @@ -1610,6 +2239,12 @@ public: m_formatter.prefix(PRE_SSE_F2); m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, index, scale, offset); } + + void movss_rm(XMMRegisterID src, int offset, RegisterID base) + { + m_formatter.prefix(PRE_SSE_F3); + m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, offset); + } void movss_rm(XMMRegisterID src, int offset, RegisterID base, RegisterID index, int scale) { @@ -1628,7 +2263,13 @@ public: m_formatter.prefix(PRE_SSE_F2); m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, dst, base, index, scale, offset); } - + + void movss_mr(int offset, RegisterID base, XMMRegisterID dst) + { + m_formatter.prefix(PRE_SSE_F3); + m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, base, offset); + } + void movss_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst) { m_formatter.prefix(PRE_SSE_F3); @@ -1660,6 +2301,18 @@ public: m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, base, offset); } + void mulss_rr(XMMRegisterID src, XMMRegisterID dst) + { + m_formatter.prefix(PRE_SSE_F3); + m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, (RegisterID)src); + } + + void mulss_mr(int offset, RegisterID base, XMMRegisterID dst) + { + m_formatter.prefix(PRE_SSE_F3); + m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, base, offset); + } + void pextrw_irr(int whichWord, XMMRegisterID src, RegisterID dst) { m_formatter.prefix(PRE_SSE_66); @@ -1699,6 +2352,18 @@ public: m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, base, offset); } + void subss_rr(XMMRegisterID src, XMMRegisterID dst) + { + m_formatter.prefix(PRE_SSE_F3); + m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, (RegisterID)src); + } + + void subss_mr(int offset, RegisterID base, XMMRegisterID dst) + { + m_formatter.prefix(PRE_SSE_F3); + m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, base, offset); + } + void ucomisd_rr(XMMRegisterID src, XMMRegisterID dst) { m_formatter.prefix(PRE_SSE_66); @@ -1711,6 +2376,16 @@ public: m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, base, offset); } + void ucomiss_rr(XMMRegisterID src, XMMRegisterID dst) + { + m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, (RegisterID)src); + } + + void ucomiss_mr(int offset, RegisterID base, XMMRegisterID dst) + { + m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, base, offset); + } + void divsd_rr(XMMRegisterID src, XMMRegisterID dst) { m_formatter.prefix(PRE_SSE_F2); @@ -1723,8 +2398,34 @@ public: m_formatter.twoByteOp(OP2_DIVSD_VsdWsd, (RegisterID)dst, base, offset); } + void divss_rr(XMMRegisterID src, XMMRegisterID dst) + { + m_formatter.prefix(PRE_SSE_F3); + m_formatter.twoByteOp(OP2_DIVSD_VsdWsd, (RegisterID)dst, (RegisterID)src); + } + + void divss_mr(int offset, RegisterID base, XMMRegisterID dst) + { + m_formatter.prefix(PRE_SSE_F3); + m_formatter.twoByteOp(OP2_DIVSD_VsdWsd, (RegisterID)dst, base, offset); + } + + void andps_rr(XMMRegisterID src, XMMRegisterID dst) + { + m_formatter.twoByteOp(OP2_ANDPS_VpdWpd, (RegisterID)dst, (RegisterID)src); + } + + void xorps_rr(XMMRegisterID src, XMMRegisterID dst) + { + m_formatter.twoByteOp(OP2_XORPD_VpdWpd, (RegisterID)dst, (RegisterID)src); + } + void xorpd_rr(XMMRegisterID src, XMMRegisterID dst) { + if (src == dst) { + xorps_rr(src, dst); + return; + } m_formatter.prefix(PRE_SSE_66); m_formatter.twoByteOp(OP2_XORPD_VpdWpd, (RegisterID)dst, (RegisterID)src); } @@ -1741,6 +2442,59 @@ public: m_formatter.twoByteOp(OP2_SQRTSD_VsdWsd, (RegisterID)dst, (RegisterID)src); } + void sqrtsd_mr(int offset, RegisterID base, XMMRegisterID dst) + { + m_formatter.prefix(PRE_SSE_F2); + m_formatter.twoByteOp(OP2_SQRTSD_VsdWsd, (RegisterID)dst, base, offset); + } + + void sqrtss_rr(XMMRegisterID src, XMMRegisterID dst) + { + m_formatter.prefix(PRE_SSE_F3); + m_formatter.twoByteOp(OP2_SQRTSD_VsdWsd, (RegisterID)dst, (RegisterID)src); + } + + void sqrtss_mr(int offset, RegisterID base, XMMRegisterID dst) + { + m_formatter.prefix(PRE_SSE_F3); + m_formatter.twoByteOp(OP2_SQRTSD_VsdWsd, (RegisterID)dst, base, offset); + } + + enum class RoundingType : uint8_t { + ToNearestWithTiesToEven = 0, + TowardNegativeInfiniti = 1, + TowardInfiniti = 2, + TowardZero = 3 + }; + + void roundss_rr(XMMRegisterID src, XMMRegisterID dst, RoundingType rounding) + { + m_formatter.prefix(PRE_SSE_66); + m_formatter.threeByteOp(OP2_3BYTE_ESCAPE_3A, OP3_ROUNDSS_VssWssIb, (RegisterID)dst, (RegisterID)src); + m_formatter.immediate8(static_cast<uint8_t>(rounding)); + } + + void roundss_mr(int offset, RegisterID base, XMMRegisterID dst, RoundingType rounding) + { + m_formatter.prefix(PRE_SSE_66); + m_formatter.threeByteOp(OP2_3BYTE_ESCAPE_3A, OP3_ROUNDSS_VssWssIb, (RegisterID)dst, base, offset); + m_formatter.immediate8(static_cast<uint8_t>(rounding)); + } + + void roundsd_rr(XMMRegisterID src, XMMRegisterID dst, RoundingType rounding) + { + m_formatter.prefix(PRE_SSE_66); + m_formatter.threeByteOp(OP2_3BYTE_ESCAPE_3A, OP3_ROUNDSD_VsdWsdIb, (RegisterID)dst, (RegisterID)src); + m_formatter.immediate8(static_cast<uint8_t>(rounding)); + } + + void roundsd_mr(int offset, RegisterID base, XMMRegisterID dst, RoundingType rounding) + { + m_formatter.prefix(PRE_SSE_66); + m_formatter.threeByteOp(OP2_3BYTE_ESCAPE_3A, OP3_ROUNDSD_VsdWsdIb, (RegisterID)dst, base, offset); + m_formatter.immediate8(static_cast<uint8_t>(rounding)); + } + // Misc instructions: void int3() @@ -1757,6 +2511,11 @@ public: { m_formatter.prefix(PRE_PREDICT_BRANCH_NOT_TAKEN); } + + void mfence() + { + m_formatter.threeByteOp(OP2_3BYTE_ESCAPE_AE, OP3_MFENCE); + } // Assembler admin methods: @@ -1886,9 +2645,9 @@ public: #if CPU(X86_64) static void revertJumpTo_movq_i64r(void* instructionStart, int64_t imm, RegisterID dst) { + const unsigned instructionSize = 10; // REX.W MOV IMM64 const int rexBytes = 1; const int opcodeBytes = 1; - ASSERT(rexBytes + opcodeBytes <= maxJumpReplacementSize()); uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart); ptr[0] = PRE_REX | (1 << 3) | (dst >> 3); ptr[1] = OP_MOV_EAXIv | (dst & 7); @@ -1898,11 +2657,33 @@ public: uint8_t asBytes[8]; } u; u.asWord = imm; - for (unsigned i = rexBytes + opcodeBytes; i < static_cast<unsigned>(maxJumpReplacementSize()); ++i) + for (unsigned i = rexBytes + opcodeBytes; i < instructionSize; ++i) + ptr[i] = u.asBytes[i - rexBytes - opcodeBytes]; + } + + static void revertJumpTo_movl_i32r(void* instructionStart, int32_t imm, RegisterID dst) + { + // We only revert jumps on inline caches, and inline caches always use the scratch register (r11). + // FIXME: If the above is ever false then we need to make this smarter with respect to emitting + // the REX byte. + ASSERT(dst == X86Registers::r11); + const unsigned instructionSize = 6; // REX MOV IMM32 + const int rexBytes = 1; + const int opcodeBytes = 1; + uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart); + ptr[0] = PRE_REX | (dst >> 3); + ptr[1] = OP_MOV_EAXIv | (dst & 7); + + union { + uint32_t asWord; + uint8_t asBytes[4]; + } u; + u.asWord = imm; + for (unsigned i = rexBytes + opcodeBytes; i < instructionSize; ++i) ptr[i] = u.asBytes[i - rexBytes - opcodeBytes]; } #endif - + static void revertJumpTo_cmpl_ir_force32(void* instructionStart, int32_t imm, RegisterID dst) { const int opcodeBytes = 1; @@ -1991,11 +2772,6 @@ public: return b.m_offset - a.m_offset; } - PassRefPtr<ExecutableMemoryHandle> executableCopy(VM& vm, void* ownerUID, JITCompilationEffort effort) - { - return m_formatter.executableCopy(vm, ownerUID, effort); - } - unsigned debugOffset() { return m_formatter.debugOffset(); } void nop() @@ -2003,6 +2779,50 @@ public: m_formatter.oneByteOp(OP_NOP); } + static void fillNops(void* base, size_t size) + { +#if CPU(X86_64) + static const uint8_t nops[10][10] = { + // nop + {0x90}, + // xchg %ax,%ax + {0x66, 0x90}, + // nopl (%[re]ax) + {0x0f, 0x1f, 0x00}, + // nopl 8(%[re]ax) + {0x0f, 0x1f, 0x40, 0x08}, + // nopl 8(%[re]ax,%[re]ax,1) + {0x0f, 0x1f, 0x44, 0x00, 0x08}, + // nopw 8(%[re]ax,%[re]ax,1) + {0x66, 0x0f, 0x1f, 0x44, 0x00, 0x08}, + // nopl 512(%[re]ax) + {0x0f, 0x1f, 0x80, 0x00, 0x02, 0x00, 0x00}, + // nopl 512(%[re]ax,%[re]ax,1) + {0x0f, 0x1f, 0x84, 0x00, 0x00, 0x02, 0x00, 0x00}, + // nopw 512(%[re]ax,%[re]ax,1) + {0x66, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x02, 0x00, 0x00}, + // nopw %cs:512(%[re]ax,%[re]ax,1) + {0x66, 0x2e, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x02, 0x00, 0x00} + }; + + uint8_t* where = reinterpret_cast<uint8_t*>(base); + while (size) { + unsigned nopSize = static_cast<unsigned>(std::min<size_t>(size, 15)); + unsigned numPrefixes = nopSize <= 10 ? 0 : nopSize - 10; + for (unsigned i = 0; i != numPrefixes; ++i) + *where++ = 0x66; + + unsigned nopRest = nopSize - numPrefixes; + for (unsigned i = 0; i != nopRest; ++i) + *where++ = nops[nopRest-1][i]; + + size -= nopSize; + } +#else + memset(base, OP_NOP, size); +#endif + } + // This is a no-op on x86 ALWAYS_INLINE static void cacheFlush(void*, size_t) { } @@ -2173,6 +2993,34 @@ private: } #endif + void threeByteOp(TwoByteOpcodeID twoBytePrefix, ThreeByteOpcodeID opcode) + { + m_buffer.ensureSpace(maxInstructionSize); + m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE); + m_buffer.putByteUnchecked(twoBytePrefix); + m_buffer.putByteUnchecked(opcode); + } + + void threeByteOp(TwoByteOpcodeID twoBytePrefix, ThreeByteOpcodeID opcode, int reg, RegisterID rm) + { + m_buffer.ensureSpace(maxInstructionSize); + emitRexIfNeeded(reg, 0, rm); + m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE); + m_buffer.putByteUnchecked(twoBytePrefix); + m_buffer.putByteUnchecked(opcode); + registerModRM(reg, rm); + } + + void threeByteOp(TwoByteOpcodeID twoBytePrefix, ThreeByteOpcodeID opcode, int reg, RegisterID base, int displacement) + { + m_buffer.ensureSpace(maxInstructionSize); + emitRexIfNeeded(reg, 0, base); + m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE); + m_buffer.putByteUnchecked(twoBytePrefix); + m_buffer.putByteUnchecked(opcode); + memoryModRM(reg, base, displacement); + } + #if CPU(X86_64) // Quad-word-sized operands: // @@ -2242,6 +3090,24 @@ private: m_buffer.putByteUnchecked(opcode); registerModRM(reg, rm); } + + void twoByteOp64(TwoByteOpcodeID opcode, int reg, RegisterID base, int offset) + { + m_buffer.ensureSpace(maxInstructionSize); + emitRexW(reg, 0, base); + m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE); + m_buffer.putByteUnchecked(opcode); + memoryModRM(reg, base, offset); + } + + void twoByteOp64(TwoByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset) + { + m_buffer.ensureSpace(maxInstructionSize); + emitRexW(reg, index, base); + m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE); + m_buffer.putByteUnchecked(opcode); + memoryModRM(reg, base, index, scale, offset); + } #endif // Byte-operands: @@ -2285,6 +3151,14 @@ private: registerModRM(reg, rm); } + void oneByteOp8(OneByteOpcodeID opcode, int reg, RegisterID base, int offset) + { + m_buffer.ensureSpace(maxInstructionSize); + emitRexIf(byteRegRequiresRex(reg) || byteRegRequiresRex(base), reg, 0, base); + m_buffer.putByteUnchecked(opcode); + memoryModRM(reg, base, offset); + } + void oneByteOp8(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset) { m_buffer.ensureSpace(maxInstructionSize); @@ -2349,11 +3223,6 @@ private: bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); } void* data() const { return m_buffer.data(); } - PassRefPtr<ExecutableMemoryHandle> executableCopy(VM& vm, void* ownerUID, JITCompilationEffort effort) - { - return m_buffer.executableCopy(vm, ownerUID, effort); - } - unsigned debugOffset() { return m_buffer.debugOffset(); } private: @@ -2527,6 +3396,7 @@ private: } #endif + public: AssemblerBuffer m_buffer; } m_formatter; int m_indexOfLastWatchpoint; |