summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/assembler
diff options
context:
space:
mode:
Diffstat (limited to 'Source/JavaScriptCore/assembler')
-rw-r--r--Source/JavaScriptCore/assembler/ARM64Assembler.h245
-rw-r--r--Source/JavaScriptCore/assembler/ARMAssembler.h135
-rw-r--r--Source/JavaScriptCore/assembler/ARMv7Assembler.cpp36
-rw-r--r--Source/JavaScriptCore/assembler/ARMv7Assembler.h325
-rw-r--r--Source/JavaScriptCore/assembler/AbortReason.h77
-rw-r--r--Source/JavaScriptCore/assembler/AbstractMacroAssembler.h383
-rw-r--r--Source/JavaScriptCore/assembler/AssemblerBuffer.h116
-rw-r--r--Source/JavaScriptCore/assembler/LinkBuffer.cpp93
-rw-r--r--Source/JavaScriptCore/assembler/LinkBuffer.h51
-rw-r--r--Source/JavaScriptCore/assembler/MIPSAssembler.h20
-rw-r--r--Source/JavaScriptCore/assembler/MacroAssembler.h87
-rw-r--r--Source/JavaScriptCore/assembler/MacroAssemblerARM.cpp77
-rw-r--r--Source/JavaScriptCore/assembler/MacroAssemblerARM.h116
-rw-r--r--Source/JavaScriptCore/assembler/MacroAssemblerARM64.h244
-rw-r--r--Source/JavaScriptCore/assembler/MacroAssemblerARMv7.cpp107
-rw-r--r--Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h213
-rw-r--r--Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.h28
-rw-r--r--Source/JavaScriptCore/assembler/MacroAssemblerMIPS.h32
-rw-r--r--Source/JavaScriptCore/assembler/MacroAssemblerSH4.h116
-rw-r--r--Source/JavaScriptCore/assembler/MacroAssemblerX86.h100
-rw-r--r--Source/JavaScriptCore/assembler/MacroAssemblerX86Common.cpp123
-rw-r--r--Source/JavaScriptCore/assembler/MacroAssemblerX86Common.h88
-rw-r--r--Source/JavaScriptCore/assembler/MacroAssemblerX86_64.h240
-rw-r--r--Source/JavaScriptCore/assembler/MaxFrameExtentForSlowPathCall.h88
-rw-r--r--Source/JavaScriptCore/assembler/RepatchBuffer.h10
-rw-r--r--Source/JavaScriptCore/assembler/X86Assembler.h348
26 files changed, 985 insertions, 2513 deletions
diff --git a/Source/JavaScriptCore/assembler/ARM64Assembler.h b/Source/JavaScriptCore/assembler/ARM64Assembler.h
index 2b5fec622..cfbd8cec5 100644
--- a/Source/JavaScriptCore/assembler/ARM64Assembler.h
+++ b/Source/JavaScriptCore/assembler/ARM64Assembler.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -29,7 +29,6 @@
#if ENABLE(ASSEMBLER) && CPU(ARM64)
#include "AssemblerBuffer.h"
-#include <limits.h>
#include <wtf/Assertions.h>
#include <wtf/Vector.h>
#include <stdint.h>
@@ -41,26 +40,14 @@
#define DATASIZE DATASIZE_OF(datasize)
#define MEMOPSIZE MEMOPSIZE_OF(datasize)
#define CHECK_FP_MEMOP_DATASIZE() ASSERT(datasize == 8 || datasize == 16 || datasize == 32 || datasize == 64 || datasize == 128)
-#define MEMPAIROPSIZE_INT(datasize) ((datasize == 64) ? MemPairOp_64 : MemPairOp_32)
-#define MEMPAIROPSIZE_FP(datasize) ((datasize == 128) ? MemPairOp_V128 : (datasize == 64) ? MemPairOp_V64 : MemPairOp_32)
namespace JSC {
-ALWAYS_INLINE bool isInt7(int32_t value)
-{
- return value == ((value << 25) >> 25);
-}
-
ALWAYS_INLINE bool isInt9(int32_t value)
{
return value == ((value << 23) >> 23);
}
-ALWAYS_INLINE bool isInt11(int32_t value)
-{
- return value == ((value << 21) >> 21);
-}
-
ALWAYS_INLINE bool isUInt5(int32_t value)
{
return !(value & ~0x1f);
@@ -132,34 +119,6 @@ private:
int m_value;
};
-class PairPostIndex {
-public:
- explicit PairPostIndex(int value)
- : m_value(value)
- {
- ASSERT(isInt11(value));
- }
-
- operator int() { return m_value; }
-
-private:
- int m_value;
-};
-
-class PairPreIndex {
-public:
- explicit PairPreIndex(int value)
- : m_value(value)
- {
- ASSERT(isInt11(value));
- }
-
- operator int() { return m_value; }
-
-private:
- int m_value;
-};
-
class LogicalImmediate {
public:
static LogicalImmediate create32(uint32_t value)
@@ -479,7 +438,7 @@ public:
typedef ARM64Registers::FPRegisterID FPRegisterID;
static RegisterID firstRegister() { return ARM64Registers::x0; }
- static RegisterID lastRegister() { return ARM64Registers::sp; }
+ static RegisterID lastRegister() { return ARM64Registers::x28; }
static FPRegisterID firstFPRegister() { return ARM64Registers::q0; }
static FPRegisterID lastFPRegister() { return ARM64Registers::q31; }
@@ -624,9 +583,9 @@ public:
JumpType m_type : 8;
JumpLinkType m_linkType : 8;
Condition m_condition : 4;
- unsigned m_bitNumber : 6;
- RegisterID m_compareRegister : 6;
bool m_is64Bit : 1;
+ unsigned m_bitNumber : 6;
+ RegisterID m_compareRegister : 5;
} realTypes;
struct CopyTypes {
uint64_t content[3];
@@ -864,16 +823,6 @@ private:
MemOp_LOAD_signed32 = 3 // size may be 0 or 1
};
- enum MemPairOpSize {
- MemPairOp_32 = 0,
- MemPairOp_LoadSigned_32 = 1,
- MemPairOp_64 = 2,
-
- MemPairOp_V32 = MemPairOp_32,
- MemPairOp_V64 = 1,
- MemPairOp_V128 = 2
- };
-
enum MoveWideOp {
MoveWideOp_N = 0,
MoveWideOp_Z = 2,
@@ -887,14 +836,6 @@ private:
LdrLiteralOp_128BIT = 2
};
- static unsigned memPairOffsetShift(bool V, MemPairOpSize size)
- {
- // return the log2 of the size in bytes, e.g. 64 bit size returns 3
- if (V)
- return size + 2;
- return (size >> 1) + 2;
- }
-
public:
// Integer Instructions:
@@ -930,9 +871,8 @@ public:
ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount)
{
CHECK_DATASIZE();
- if (isSp(rd) || isSp(rn)) {
+ if (isSp(rn)) {
ASSERT(shift == LSL);
- ASSERT(!isSp(rm));
add<datasize, setFlags>(rd, rn, rm, UXTX, amount);
} else
insn(addSubtractShiftedRegister(DATASIZE, AddOp_ADD, setFlags, shift, rm, amount, rn, rd));
@@ -947,7 +887,6 @@ public:
{
ASSERT(!(offset & 0xfff));
insn(pcRelative(true, offset >> 12, rd));
- nopCortexA53Fix843419();
}
template<int datasize, SetFlags setFlags = DontSetFlags>
@@ -1277,20 +1216,6 @@ public:
}
template<int datasize>
- ALWAYS_INLINE void ldp(RegisterID rt, RegisterID rt2, RegisterID rn, PairPostIndex simm)
- {
- CHECK_DATASIZE();
- insn(loadStoreRegisterPairPostIndex(MEMPAIROPSIZE_INT(datasize), false, MemOp_LOAD, simm, rn, rt, rt2));
- }
-
- template<int datasize>
- ALWAYS_INLINE void ldp(RegisterID rt, RegisterID rt2, RegisterID rn, PairPreIndex simm)
- {
- CHECK_DATASIZE();
- insn(loadStoreRegisterPairPreIndex(MEMPAIROPSIZE_INT(datasize), false, MemOp_LOAD, simm, rn, rt, rt2));
- }
-
- template<int datasize>
ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, RegisterID rm)
{
ldr<datasize>(rt, rn, rm, UXTX, 0);
@@ -1569,7 +1494,6 @@ public:
ALWAYS_INLINE void madd(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra)
{
CHECK_DATASIZE();
- nopCortexA53Fix835769<datasize>();
insn(dataProcessing3Source(DATASIZE, DataOp_MADD, rm, ra, rn, rd));
}
@@ -1622,7 +1546,6 @@ public:
ALWAYS_INLINE void msub(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra)
{
CHECK_DATASIZE();
- nopCortexA53Fix835769<datasize>();
insn(dataProcessing3Source(DATASIZE, DataOp_MSUB, rm, ra, rn, rd));
}
@@ -1673,14 +1596,6 @@ public:
insn(nopPseudo());
}
- static void fillNops(void* base, size_t size)
- {
- RELEASE_ASSERT(!(size % sizeof(int32_t)));
- size_t n = size / sizeof(int32_t);
- for (int32_t* ptr = static_cast<int32_t*>(base); n--;)
- *ptr++ = nopPseudo();
- }
-
ALWAYS_INLINE void dmbSY()
{
insn(0xd5033fbf);
@@ -1809,7 +1724,6 @@ public:
ALWAYS_INLINE void smaddl(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra)
{
- nopCortexA53Fix835769<64>();
insn(dataProcessing3Source(Datasize_64, DataOp_SMADDL, rm, ra, rn, rd));
}
@@ -1820,7 +1734,6 @@ public:
ALWAYS_INLINE void smsubl(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra)
{
- nopCortexA53Fix835769<64>();
insn(dataProcessing3Source(Datasize_64, DataOp_SMSUBL, rm, ra, rn, rd));
}
@@ -1835,20 +1748,6 @@ public:
}
template<int datasize>
- ALWAYS_INLINE void stp(RegisterID rt, RegisterID rt2, RegisterID rn, PairPostIndex simm)
- {
- CHECK_DATASIZE();
- insn(loadStoreRegisterPairPostIndex(MEMPAIROPSIZE_INT(datasize), false, MemOp_STORE, simm, rn, rt, rt2));
- }
-
- template<int datasize>
- ALWAYS_INLINE void stp(RegisterID rt, RegisterID rt2, RegisterID rn, PairPreIndex simm)
- {
- CHECK_DATASIZE();
- insn(loadStoreRegisterPairPreIndex(MEMPAIROPSIZE_INT(datasize), false, MemOp_STORE, simm, rn, rt, rt2));
- }
-
- template<int datasize>
ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, RegisterID rm)
{
str<datasize>(rt, rn, rm, UXTX, 0);
@@ -1963,13 +1862,7 @@ public:
template<int datasize, SetFlags setFlags = DontSetFlags>
ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm)
{
- ASSERT_WITH_MESSAGE(!isSp(rd) || setFlags == DontSetFlags, "SUBS with shifted register does not support SP for Xd, it uses XZR for the register 31. SUBS with extended register support SP for Xd, but only if SetFlag is not used, otherwise register 31 is Xd.");
- ASSERT_WITH_MESSAGE(!isSp(rm), "No encoding of SUBS supports SP for the third operand.");
-
- if (isSp(rd) || isSp(rn))
- sub<datasize, setFlags>(rd, rn, rm, UXTX, 0);
- else
- sub<datasize, setFlags>(rd, rn, rm, LSL, 0);
+ sub<datasize, setFlags>(rd, rn, rm, LSL, 0);
}
template<int datasize, SetFlags setFlags = DontSetFlags>
@@ -1983,8 +1876,11 @@ public:
ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount)
{
CHECK_DATASIZE();
- ASSERT(!isSp(rd) && !isSp(rn) && !isSp(rm));
- insn(addSubtractShiftedRegister(DATASIZE, AddOp_SUB, setFlags, shift, rm, amount, rn, rd));
+ if (isSp(rn)) {
+ ASSERT(shift == LSL);
+ sub<datasize, setFlags>(rd, rn, rm, UXTX, amount);
+ } else
+ insn(addSubtractShiftedRegister(DATASIZE, AddOp_SUB, setFlags, shift, rm, amount, rn, rd));
}
template<int datasize>
@@ -2064,7 +1960,6 @@ public:
ALWAYS_INLINE void umaddl(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra)
{
- nopCortexA53Fix835769<64>();
insn(dataProcessing3Source(Datasize_64, DataOp_UMADDL, rm, ra, rn, rd));
}
@@ -2075,7 +1970,6 @@ public:
ALWAYS_INLINE void umsubl(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra)
{
- nopCortexA53Fix835769<64>();
insn(dataProcessing3Source(Datasize_64, DataOp_UMSUBL, rm, ra, rn, rd));
}
@@ -2600,6 +2494,13 @@ public:
return b.m_offset - a.m_offset;
}
+ int executableOffsetFor(int location)
+ {
+ if (!location)
+ return 0;
+ return static_cast<int32_t*>(m_buffer.data())[location / sizeof(int32_t) - 1];
+ }
+
void* unlinkedCode() { return m_buffer.data(); }
size_t codeSize() const { return m_buffer.codeSize(); }
@@ -2851,34 +2752,10 @@ public:
unsigned debugOffset() { return m_buffer.debugOffset(); }
-#if OS(LINUX) && COMPILER(GCC_OR_CLANG)
- static inline void linuxPageFlush(uintptr_t begin, uintptr_t end)
- {
- __builtin___clear_cache(reinterpret_cast<void*>(begin), reinterpret_cast<void*>(end));
- }
-#endif
-
static void cacheFlush(void* code, size_t size)
{
#if OS(IOS)
sys_cache_control(kCacheFunctionPrepareForExecution, code, size);
-#elif OS(LINUX)
- size_t page = pageSize();
- uintptr_t current = reinterpret_cast<uintptr_t>(code);
- uintptr_t end = current + size;
- uintptr_t firstPageEnd = (current & ~(page - 1)) + page;
-
- if (end <= firstPageEnd) {
- linuxPageFlush(current, end);
- return;
- }
-
- linuxPageFlush(current, firstPageEnd);
-
- for (current = firstPageEnd; current + page < end; current += page)
- linuxPageFlush(current, current + page);
-
- linuxPageFlush(current, end);
#else
#error "The cacheFlush support is missing on this platform."
#endif
@@ -2886,20 +2763,20 @@ public:
// Assembler admin methods:
- static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return JUMP_ENUM_SIZE(jumpType) - JUMP_ENUM_SIZE(jumpLinkType); }
+ int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return JUMP_ENUM_SIZE(jumpType) - JUMP_ENUM_SIZE(jumpLinkType); }
static ALWAYS_INLINE bool linkRecordSourceComparator(const LinkRecord& a, const LinkRecord& b)
{
return a.from() < b.from();
}
- static bool canCompact(JumpType jumpType)
+ bool canCompact(JumpType jumpType)
{
// Fixed jumps cannot be compacted
return (jumpType == JumpNoCondition) || (jumpType == JumpCondition) || (jumpType == JumpCompareAndBranch) || (jumpType == JumpTestBit);
}
- static JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to)
+ JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to)
{
switch (jumpType) {
case JumpFixed:
@@ -2951,20 +2828,29 @@ public:
return LinkJumpNoCondition;
}
- static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to)
+ JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to)
{
JumpLinkType linkType = computeJumpType(record.type(), from, to);
record.setLinkType(linkType);
return linkType;
}
+ void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset)
+ {
+ int32_t ptr = regionStart / sizeof(int32_t);
+ const int32_t end = regionEnd / sizeof(int32_t);
+ int32_t* offsets = static_cast<int32_t*>(m_buffer.data());
+ while (ptr < end)
+ offsets[ptr++] = offset;
+ }
+
Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink()
{
std::sort(m_jumpsToLink.begin(), m_jumpsToLink.end(), linkRecordSourceComparator);
return m_jumpsToLink;
}
- static void ALWAYS_INLINE link(LinkRecord& record, uint8_t* from, uint8_t* to)
+ void ALWAYS_INLINE link(LinkRecord& record, uint8_t* from, uint8_t* to)
{
switch (record.linkType()) {
case LinkJumpNoCondition:
@@ -3238,7 +3124,7 @@ private:
int insn = *static_cast<int*>(address);
op = (insn >> 24) & 0x1;
imm14 = (insn << 13) >> 18;
- bitNumber = static_cast<unsigned>((((insn >> 26) & 0x20)) | ((insn >> 19) & 0x1f));
+ bitNumber = static_cast<unsigned>((((insn >> 26) & 0x20)) | ((insn > 19) & 0x1f));
rt = static_cast<RegisterID>(insn & 0x1f);
return (insn & 0x7e000000) == 0x36000000;
@@ -3475,23 +3361,6 @@ private:
}
// 'V' means vector
- ALWAYS_INLINE static int loadStoreRegisterPairPostIndex(MemPairOpSize size, bool V, MemOp opc, int immediate, RegisterID rn, FPRegisterID rt, FPRegisterID rt2)
- {
- ASSERT(size < 3);
- ASSERT(opc == (opc & 1)); // Only load or store, load signed 64 is handled via size.
- ASSERT(V || (size != MemPairOp_LoadSigned_32) || (opc == MemOp_LOAD)); // There isn't an integer store signed.
- unsigned immedShiftAmount = memPairOffsetShift(V, size);
- int imm7 = immediate >> immedShiftAmount;
- ASSERT((imm7 << immedShiftAmount) == immediate && isInt7(imm7));
- return (0x28800000 | size << 30 | V << 26 | opc << 22 | (imm7 & 0x7f) << 15 | rt2 << 10 | xOrSp(rn) << 5 | rt);
- }
-
- ALWAYS_INLINE static int loadStoreRegisterPairPostIndex(MemPairOpSize size, bool V, MemOp opc, int immediate, RegisterID rn, RegisterID rt, RegisterID rt2)
- {
- return loadStoreRegisterPairPostIndex(size, V, opc, immediate, rn, xOrZrAsFPR(rt), xOrZrAsFPR(rt2));
- }
-
- // 'V' means vector
ALWAYS_INLINE static int loadStoreRegisterPreIndex(MemOpSize size, bool V, MemOp opc, int imm9, RegisterID rn, FPRegisterID rt)
{
ASSERT(!(size && V && (opc & 2))); // Maximum vector size is 128 bits.
@@ -3506,23 +3375,6 @@ private:
}
// 'V' means vector
- ALWAYS_INLINE static int loadStoreRegisterPairPreIndex(MemPairOpSize size, bool V, MemOp opc, int immediate, RegisterID rn, FPRegisterID rt, FPRegisterID rt2)
- {
- ASSERT(size < 3);
- ASSERT(opc == (opc & 1)); // Only load or store, load signed 64 is handled via size.
- ASSERT(V || (size != MemPairOp_LoadSigned_32) || (opc == MemOp_LOAD)); // There isn't an integer store signed.
- unsigned immedShiftAmount = memPairOffsetShift(V, size);
- int imm7 = immediate >> immedShiftAmount;
- ASSERT((imm7 << immedShiftAmount) == immediate && isInt7(imm7));
- return (0x29800000 | size << 30 | V << 26 | opc << 22 | (imm7 & 0x7f) << 15 | rt2 << 10 | xOrSp(rn) << 5 | rt);
- }
-
- ALWAYS_INLINE static int loadStoreRegisterPairPreIndex(MemPairOpSize size, bool V, MemOp opc, int immediate, RegisterID rn, RegisterID rt, RegisterID rt2)
- {
- return loadStoreRegisterPairPreIndex(size, V, opc, immediate, rn, xOrZrAsFPR(rt), xOrZrAsFPR(rt2));
- }
-
- // 'V' means vector
// 'S' means shift rm
ALWAYS_INLINE static int loadStoreRegisterRegisterOffset(MemOpSize size, bool V, MemOp opc, RegisterID rm, ExtendType option, bool S, RegisterID rn, FPRegisterID rt)
{
@@ -3636,37 +3488,6 @@ private:
return (0xd6000000 | opc << 21 | op2 << 16 | op3 << 10 | xOrZr(rn) << 5 | op4);
}
- // Workaround for Cortex-A53 erratum (835769). Emit an extra nop if the
- // last instruction in the buffer is a load, store or prefetch. Needed
- // before 64-bit multiply-accumulate instructions.
- template<int datasize>
- ALWAYS_INLINE void nopCortexA53Fix835769()
- {
-#if CPU(ARM64_CORTEXA53)
- CHECK_DATASIZE();
- if (datasize == 64) {
- if (LIKELY(m_buffer.codeSize() >= sizeof(int32_t))) {
- // From ARMv8 Reference Manual, Section C4.1: the encoding of the
- // instructions in the Loads and stores instruction group is:
- // ---- 1-0- ---- ---- ---- ---- ---- ----
- if (UNLIKELY((*reinterpret_cast_ptr<int32_t*>(reinterpret_cast_ptr<char*>(m_buffer.data()) + m_buffer.codeSize() - sizeof(int32_t)) & 0x0a000000) == 0x08000000))
- nop();
- }
- }
-#endif
- }
-
- // Workaround for Cortex-A53 erratum (843419). Emit extra nops to avoid
- // wrong address access after ADRP instruction.
- ALWAYS_INLINE void nopCortexA53Fix843419()
- {
-#if CPU(ARM64_CORTEXA53)
- nop();
- nop();
- nop();
-#endif
- }
-
AssemblerBuffer m_buffer;
Vector<LinkRecord, 0, UnsafeVectorOverflow> m_jumpsToLink;
int m_indexOfLastWatchpoint;
diff --git a/Source/JavaScriptCore/assembler/ARMAssembler.h b/Source/JavaScriptCore/assembler/ARMAssembler.h
index b314ea690..087d31c14 100644
--- a/Source/JavaScriptCore/assembler/ARMAssembler.h
+++ b/Source/JavaScriptCore/assembler/ARMAssembler.h
@@ -36,6 +36,62 @@ namespace JSC {
typedef uint32_t ARMWord;
+ namespace ARMRegisters {
+ typedef enum {
+ r0 = 0,
+ r1,
+ r2,
+ r3,
+ r4,
+ r5,
+ r6, S0 = r6,
+ r7,
+ r8,
+ r9,
+ r10,
+ r11, fp = r11, // frame pointer
+ r12, ip = r12, S1 = r12,
+ r13, sp = r13,
+ r14, lr = r14,
+ r15, pc = r15
+ } RegisterID;
+
+ typedef enum {
+ d0,
+ d1,
+ d2,
+ d3,
+ d4,
+ d5,
+ d6,
+ d7, SD0 = d7, /* Same as thumb assembler. */
+ d8,
+ d9,
+ d10,
+ d11,
+ d12,
+ d13,
+ d14,
+ d15,
+ d16,
+ d17,
+ d18,
+ d19,
+ d20,
+ d21,
+ d22,
+ d23,
+ d24,
+ d25,
+ d26,
+ d27,
+ d28,
+ d29,
+ d30,
+ d31
+ } FPRegisterID;
+
+#if USE(MASM_PROBE)
#define FOR_EACH_CPU_REGISTER(V) \
FOR_EACH_CPU_GPREGISTER(V) \
FOR_EACH_CPU_SPECIAL_REGISTER(V) \
@@ -53,11 +109,11 @@ namespace JSC {
V(void*, r8) \
V(void*, r9) \
V(void*, r10) \
- V(void*, fp) \
+ V(void*, r11) \
V(void*, ip) \
V(void*, sp) \
V(void*, lr) \
- V(void*, pc) \
+ V(void*, pc)
#define FOR_EACH_CPU_SPECIAL_REGISTER(V) \
V(void*, apsr) \
@@ -79,49 +135,8 @@ namespace JSC {
V(double, d12) \
V(double, d13) \
V(double, d14) \
- V(double, d15) \
- V(double, d16) \
- V(double, d17) \
- V(double, d18) \
- V(double, d19) \
- V(double, d20) \
- V(double, d21) \
- V(double, d22) \
- V(double, d23) \
- V(double, d24) \
- V(double, d25) \
- V(double, d26) \
- V(double, d27) \
- V(double, d28) \
- V(double, d29) \
- V(double, d30) \
- V(double, d31) \
-
- namespace ARMRegisters {
-
- typedef enum {
- #define DECLARE_REGISTER(_type, _regName) _regName,
- FOR_EACH_CPU_GPREGISTER(DECLARE_REGISTER)
- #undef DECLARE_REGISTER
-
- // Pseudonyms for some of the registers.
- S0 = r6,
- r11 = fp, // frame pointer
- r12 = ip, S1 = ip,
- r13 = sp,
- r14 = lr,
- r15 = pc
- } RegisterID;
-
- typedef enum {
- #define DECLARE_REGISTER(_type, _regName) _regName,
- FOR_EACH_CPU_FPREGISTER(DECLARE_REGISTER)
- #undef DECLARE_REGISTER
-
- // Pseudonyms for some of the registers.
- SD0 = d7, /* Same as thumb assembler. */
- } FPRegisterID;
-
+ V(double, d15)
+#endif // USE(MASM_PROBE)
} // namespace ARMRegisters
class ARMAssembler {
@@ -216,10 +231,6 @@ namespace JSC {
#endif
NOP = 0xe1a00000,
DMB_SY = 0xf57ff05f,
-#if HAVE(ARM_IDIV_INSTRUCTIONS)
- SDIV = 0x0710f010,
- UDIV = 0x0730f010,
-#endif
};
enum {
@@ -481,26 +492,6 @@ namespace JSC {
m_buffer.putInt(toARMWord(cc) | MULL | RN(rdhi) | RD(rdlo) | RS(rn) | RM(rm));
}
-#if HAVE(ARM_IDIV_INSTRUCTIONS)
- template<int datasize>
- void sdiv(int rd, int rn, int rm, Condition cc = AL)
- {
- static_assert(datasize == 32, "sdiv datasize must be 32 for armv7s");
- ASSERT(rd != ARMRegisters::pc);
- ASSERT(rn != ARMRegisters::pc);
- ASSERT(rm != ARMRegisters::pc);
- m_buffer.putInt(toARMWord(cc) | SDIV | RN(rd) | RM(rn) | RS(rm));
- }
-
- void udiv(int rd, int rn, int rm, Condition cc = AL)
- {
- ASSERT(rd != ARMRegisters::pc);
- ASSERT(rn != ARMRegisters::pc);
- ASSERT(rm != ARMRegisters::pc);
- m_buffer.putInt(toARMWord(cc) | UDIV | RN(rd) | RM(rn) | RS(rm));
- }
-#endif
-
void vmov_f64(int dd, int dm, Condition cc = AL)
{
emitDoublePrecisionInstruction(toARMWord(cc) | VMOV_F64, dd, 0, dm);
@@ -1091,7 +1082,7 @@ namespace JSC {
return AL | B | (offset & BranchOffsetMask);
}
-#if OS(LINUX) && COMPILER(GCC_OR_CLANG)
+#if OS(LINUX) && COMPILER(GCC)
static inline void linuxPageFlush(uintptr_t begin, uintptr_t end)
{
asm volatile(
@@ -1111,7 +1102,7 @@ namespace JSC {
static void cacheFlush(void* code, size_t size)
{
-#if OS(LINUX) && COMPILER(GCC_OR_CLANG)
+#if OS(LINUX) && COMPILER(GCC)
size_t page = pageSize();
uintptr_t current = reinterpret_cast<uintptr_t>(code);
uintptr_t end = current + size;
@@ -1128,6 +1119,8 @@ namespace JSC {
linuxPageFlush(current, current + page);
linuxPageFlush(current, end);
+#elif OS(WINCE)
+ CacheRangeFlush(code, size, CACHE_SYNC_ALL);
#else
#error "The cacheFlush support is missing on this platform."
#endif
diff --git a/Source/JavaScriptCore/assembler/ARMv7Assembler.cpp b/Source/JavaScriptCore/assembler/ARMv7Assembler.cpp
new file mode 100644
index 000000000..faca66421
--- /dev/null
+++ b/Source/JavaScriptCore/assembler/ARMv7Assembler.cpp
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
+
+#include "ARMv7Assembler.h"
+
+namespace JSC {
+
+}
+
+#endif
diff --git a/Source/JavaScriptCore/assembler/ARMv7Assembler.h b/Source/JavaScriptCore/assembler/ARMv7Assembler.h
index 1d731f98b..5257f32a8 100644
--- a/Source/JavaScriptCore/assembler/ARMv7Assembler.h
+++ b/Source/JavaScriptCore/assembler/ARMv7Assembler.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009, 2010, 2012, 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2009, 2010, 2012, 2013 Apple Inc. All rights reserved.
* Copyright (C) 2010 University of Szeged
*
* Redistribution and use in source and binary forms, with or without
@@ -38,83 +38,23 @@
namespace JSC {
namespace ARMRegisters {
-
- #define FOR_EACH_CPU_REGISTER(V) \
- FOR_EACH_CPU_GPREGISTER(V) \
- FOR_EACH_CPU_SPECIAL_REGISTER(V) \
- FOR_EACH_CPU_FPREGISTER(V)
-
- // The following are defined as pairs of the following value:
- // 1. type of the storage needed to save the register value by the JIT probe.
- // 2. name of the register.
- #define FOR_EACH_CPU_GPREGISTER(V) \
- V(void*, r0) \
- V(void*, r1) \
- V(void*, r2) \
- V(void*, r3) \
- V(void*, r4) \
- V(void*, r5) \
- V(void*, r6) \
- V(void*, r7) \
- V(void*, r8) \
- V(void*, r9) \
- V(void*, r10) \
- V(void*, r11) \
- V(void*, ip) \
- V(void*, sp) \
- V(void*, lr) \
- V(void*, pc)
-
- #define FOR_EACH_CPU_SPECIAL_REGISTER(V) \
- V(void*, apsr) \
- V(void*, fpscr) \
-
- #define FOR_EACH_CPU_FPREGISTER(V) \
- V(double, d0) \
- V(double, d1) \
- V(double, d2) \
- V(double, d3) \
- V(double, d4) \
- V(double, d5) \
- V(double, d6) \
- V(double, d7) \
- V(double, d8) \
- V(double, d9) \
- V(double, d10) \
- V(double, d11) \
- V(double, d12) \
- V(double, d13) \
- V(double, d14) \
- V(double, d15) \
- V(double, d16) \
- V(double, d17) \
- V(double, d18) \
- V(double, d19) \
- V(double, d20) \
- V(double, d21) \
- V(double, d22) \
- V(double, d23) \
- V(double, d24) \
- V(double, d25) \
- V(double, d26) \
- V(double, d27) \
- V(double, d28) \
- V(double, d29) \
- V(double, d30) \
- V(double, d31)
-
typedef enum {
- #define DECLARE_REGISTER(_type, _regName) _regName,
- FOR_EACH_CPU_GPREGISTER(DECLARE_REGISTER)
- #undef DECLARE_REGISTER
-
- fp = r7, // frame pointer
- sb = r9, // static base
- sl = r10, // stack limit
- r12 = ip,
- r13 = sp,
- r14 = lr,
- r15 = pc
+ r0,
+ r1,
+ r2,
+ r3,
+ r4,
+ r5,
+ r6,
+ r7, fp = r7, // frame pointer
+ r8,
+ r9, sb = r9, // static base
+ r10, sl = r10, // stack limit
+ r11,
+ r12, ip = r12,
+ r13, sp = r13,
+ r14, lr = r14,
+ r15, pc = r15,
} RegisterID;
typedef enum {
@@ -153,9 +93,38 @@ namespace ARMRegisters {
} FPSingleRegisterID;
typedef enum {
- #define DECLARE_REGISTER(_type, _regName) _regName,
- FOR_EACH_CPU_FPREGISTER(DECLARE_REGISTER)
- #undef DECLARE_REGISTER
+ d0,
+ d1,
+ d2,
+ d3,
+ d4,
+ d5,
+ d6,
+ d7,
+ d8,
+ d9,
+ d10,
+ d11,
+ d12,
+ d13,
+ d14,
+ d15,
+ d16,
+ d17,
+ d18,
+ d19,
+ d20,
+ d21,
+ d22,
+ d23,
+ d24,
+ d25,
+ d26,
+ d27,
+ d28,
+ d29,
+ d30,
+ d31,
} FPDoubleRegisterID;
typedef enum {
@@ -205,7 +174,77 @@ namespace ARMRegisters {
return (FPDoubleRegisterID)(reg >> 1);
}
-} // namespace ARMRegisters
+#if USE(MASM_PROBE)
+ #define FOR_EACH_CPU_REGISTER(V) \
+ FOR_EACH_CPU_GPREGISTER(V) \
+ FOR_EACH_CPU_SPECIAL_REGISTER(V) \
+ FOR_EACH_CPU_FPREGISTER(V)
+
+ #define FOR_EACH_CPU_GPREGISTER(V) \
+ V(void*, r0) \
+ V(void*, r1) \
+ V(void*, r2) \
+ V(void*, r3) \
+ V(void*, r4) \
+ V(void*, r5) \
+ V(void*, r6) \
+ V(void*, r7) \
+ V(void*, r8) \
+ V(void*, r9) \
+ V(void*, r10) \
+ V(void*, r11) \
+ V(void*, ip) \
+ V(void*, sp) \
+ V(void*, lr) \
+ V(void*, pc)
+
+ #define FOR_EACH_CPU_SPECIAL_REGISTER(V) \
+ V(void*, apsr) \
+ V(void*, fpscr) \
+
+ #define FOR_EACH_CPU_FPREGISTER(V) \
+ V(double, d0) \
+ V(double, d1) \
+ V(double, d2) \
+ V(double, d3) \
+ V(double, d4) \
+ V(double, d5) \
+ V(double, d6) \
+ V(double, d7) \
+ V(double, d8) \
+ V(double, d9) \
+ V(double, d10) \
+ V(double, d11) \
+ V(double, d12) \
+ V(double, d13) \
+ V(double, d14) \
+ V(double, d15) \
+ FOR_EACH_CPU_FPREGISTER_EXTENSION(V)
+
+#if CPU(APPLE_ARMV7S)
+ #define FOR_EACH_CPU_FPREGISTER_EXTENSION(V) \
+ V(double, d16) \
+ V(double, d17) \
+ V(double, d18) \
+ V(double, d19) \
+ V(double, d20) \
+ V(double, d21) \
+ V(double, d22) \
+ V(double, d23) \
+ V(double, d24) \
+ V(double, d25) \
+ V(double, d26) \
+ V(double, d27) \
+ V(double, d28) \
+ V(double, d29) \
+ V(double, d30) \
+ V(double, d31)
+#else
+ #define FOR_EACH_CPU_FPREGISTER_EXTENSION(V) // Nothing to add.
+#endif // CPU(APPLE_ARMV7S)
+
+#endif // USE(MASM_PROBE)
+}
class ARMv7Assembler;
class ARMThumbImmediate {
@@ -544,8 +583,6 @@ public:
{
}
- AssemblerBuffer& buffer() { return m_formatter.m_buffer; }
-
private:
// ARMv7, Appx-A.6.3
@@ -609,8 +646,6 @@ private:
OP_ADD_SP_imm_T1 = 0xA800,
OP_ADD_SP_imm_T2 = 0xB000,
OP_SUB_SP_imm_T1 = 0xB080,
- OP_PUSH_T1 = 0xB400,
- OP_POP_T1 = 0xBC00,
OP_BKPT = 0xBE00,
OP_IT = 0xBF00,
OP_NOP_T1 = 0xBF00,
@@ -619,8 +654,6 @@ private:
typedef enum {
OP_B_T1 = 0xD000,
OP_B_T2 = 0xE000,
- OP_POP_T2 = 0xE8BD,
- OP_PUSH_T2 = 0xE92D,
OP_AND_reg_T2 = 0xEA00,
OP_TST_reg_T2 = 0xEA10,
OP_ORR_reg_T2 = 0xEA40,
@@ -708,7 +741,7 @@ private:
OP_ROR_reg_T2 = 0xFA60,
OP_CLZ = 0xFAB0,
OP_SMULL_T1 = 0xFB80,
-#if HAVE(ARM_IDIV_INSTRUCTIONS)
+#if CPU(APPLE_ARMV7S)
OP_SDIV_T1 = 0xFB90,
OP_UDIV_T1 = 0xFBB0,
#endif
@@ -766,11 +799,11 @@ private:
class ARMInstructionFormatter;
// false means else!
- static bool ifThenElseConditionBit(Condition condition, bool isIf)
+ bool ifThenElseConditionBit(Condition condition, bool isIf)
{
return isIf ? (condition & 1) : !(condition & 1);
}
- static uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if, bool inst4if)
+ uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if, bool inst4if)
{
int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
| (ifThenElseConditionBit(condition, inst3if) << 2)
@@ -779,7 +812,7 @@ private:
ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
return (condition << 4) | mask;
}
- static uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if)
+ uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if)
{
int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
| (ifThenElseConditionBit(condition, inst3if) << 2)
@@ -787,7 +820,7 @@ private:
ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
return (condition << 4) | mask;
}
- static uint8_t ifThenElse(Condition condition, bool inst2if)
+ uint8_t ifThenElse(Condition condition, bool inst2if)
{
int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
| 4;
@@ -795,7 +828,7 @@ private:
return (condition << 4) | mask;
}
- static uint8_t ifThenElse(Condition condition)
+ uint8_t ifThenElse(Condition condition)
{
int mask = 8;
return (condition << 4) | mask;
@@ -822,7 +855,7 @@ public:
ASSERT(rn != ARMRegisters::pc);
ASSERT(imm.isValid());
- if (rn == ARMRegisters::sp && imm.isUInt16()) {
+ if (rn == ARMRegisters::sp) {
ASSERT(!(imm.getUInt16() & 3));
if (!(rd & 8) && imm.isUInt10()) {
m_formatter.oneWordOp5Reg3Imm8(OP_ADD_SP_imm_T1, rd, static_cast<uint8_t>(imm.getUInt10() >> 2));
@@ -861,11 +894,6 @@ public:
// NOTE: In an IT block, add doesn't modify the flags register.
ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm)
{
- if (rd == ARMRegisters::sp) {
- mov(rd, rn);
- rn = rd;
- }
-
if (rd == rn)
m_formatter.oneWordOp8RegReg143(OP_ADD_reg_T2, rm, rd);
else if (rd == rm)
@@ -1155,10 +1183,9 @@ public:
{
ASSERT(rn != ARMRegisters::pc); // LDR (literal)
ASSERT(imm.isUInt12());
- ASSERT(!(imm.getUInt12() & 1));
if (!((rt | rn) & 8) && imm.isUInt6())
- m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRH_imm_T1, imm.getUInt6() >> 1, rn, rt);
+ m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRH_imm_T1, imm.getUInt6() >> 2, rn, rt);
else
m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T2, rn, rt, imm.getUInt12());
}
@@ -1461,49 +1488,9 @@ public:
m_formatter.twoWordOp12Reg4FourFours(OP_ROR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
}
- ALWAYS_INLINE void pop(RegisterID dest)
- {
- if (dest < ARMRegisters::r8)
- m_formatter.oneWordOp7Imm9(OP_POP_T1, 1 << dest);
- else {
- // Load postindexed with writeback.
- ldr(dest, ARMRegisters::sp, sizeof(void*), false, true);
- }
- }
-
- ALWAYS_INLINE void pop(uint32_t registerList)
- {
- ASSERT(WTF::bitCount(registerList) > 1);
- ASSERT(!((1 << ARMRegisters::pc) & registerList) || !((1 << ARMRegisters::lr) & registerList));
- ASSERT(!((1 << ARMRegisters::sp) & registerList));
- m_formatter.twoWordOp16Imm16(OP_POP_T2, registerList);
- }
-
- ALWAYS_INLINE void push(RegisterID src)
- {
- if (src < ARMRegisters::r8)
- m_formatter.oneWordOp7Imm9(OP_PUSH_T1, 1 << src);
- else if (src == ARMRegisters::lr)
- m_formatter.oneWordOp7Imm9(OP_PUSH_T1, 0x100);
- else {
- // Store preindexed with writeback.
- str(src, ARMRegisters::sp, -sizeof(void*), true, true);
- }
- }
-
- ALWAYS_INLINE void push(uint32_t registerList)
- {
- ASSERT(WTF::bitCount(registerList) > 1);
- ASSERT(!((1 << ARMRegisters::pc) & registerList));
- ASSERT(!((1 << ARMRegisters::sp) & registerList));
- m_formatter.twoWordOp16Imm16(OP_PUSH_T2, registerList);
- }
-
-#if HAVE(ARM_IDIV_INSTRUCTIONS)
- template<int datasize>
+#if CPU(APPLE_ARMV7S)
ALWAYS_INLINE void sdiv(RegisterID rd, RegisterID rn, RegisterID rm)
{
- static_assert(datasize == 32, "sdiv datasize must be 32 for armv7s");
ASSERT(!BadReg(rd));
ASSERT(!BadReg(rn));
ASSERT(!BadReg(rm));
@@ -1648,8 +1635,8 @@ public:
ASSERT(rn != ARMRegisters::pc);
ASSERT(imm.isUInt12());
- if (!((rt | rn) & 8) && imm.isUInt6())
- m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STRH_imm_T1, imm.getUInt6() >> 1, rn, rt);
+ if (!((rt | rn) & 8) && imm.isUInt7())
+ m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STRH_imm_T1, imm.getUInt7() >> 2, rn, rt);
else
m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRH_imm_T2, rn, rt, imm.getUInt12());
}
@@ -1847,7 +1834,7 @@ public:
m_formatter.twoWordOp12Reg40Imm3Reg4Imm20Imm5(OP_UBFX_T1, rd, rn, (lsb & 0x1c) << 10, (lsb & 0x3) << 6, (width - 1) & 0x1f);
}
-#if HAVE(ARM_IDIV_INSTRUCTIONS)
+#if CPU(APPLE_ARMV7S)
ALWAYS_INLINE void udiv(RegisterID rd, RegisterID rn, RegisterID rm)
{
ASSERT(!BadReg(rd));
@@ -2049,7 +2036,14 @@ public:
return b.m_offset - a.m_offset;
}
- static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return JUMP_ENUM_SIZE(jumpType) - JUMP_ENUM_SIZE(jumpLinkType); }
+ int executableOffsetFor(int location)
+ {
+ if (!location)
+ return 0;
+ return static_cast<int32_t*>(m_formatter.data())[location / sizeof(int32_t) - 1];
+ }
+
+ int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return JUMP_ENUM_SIZE(jumpType) - JUMP_ENUM_SIZE(jumpLinkType); }
// Assembler admin methods:
@@ -2058,7 +2052,7 @@ public:
return a.from() < b.from();
}
- static bool canCompact(JumpType jumpType)
+ bool canCompact(JumpType jumpType)
{
// The following cannot be compacted:
// JumpFixed: represents custom jump sequence
@@ -2067,7 +2061,7 @@ public:
return (jumpType == JumpNoCondition) || (jumpType == JumpCondition);
}
- static JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to)
+ JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to)
{
if (jumpType == JumpFixed)
return LinkInvalid;
@@ -2111,20 +2105,29 @@ public:
return LinkConditionalBX;
}
- static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to)
+ JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to)
{
JumpLinkType linkType = computeJumpType(record.type(), from, to);
record.setLinkType(linkType);
return linkType;
}
+ void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset)
+ {
+ int32_t ptr = regionStart / sizeof(int32_t);
+ const int32_t end = regionEnd / sizeof(int32_t);
+ int32_t* offsets = static_cast<int32_t*>(m_formatter.data());
+ while (ptr < end)
+ offsets[ptr++] = offset;
+ }
+
Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink()
{
std::sort(m_jumpsToLink.begin(), m_jumpsToLink.end(), linkRecordSourceComparator);
return m_jumpsToLink;
}
- static void ALWAYS_INLINE link(LinkRecord& record, uint8_t* from, uint8_t* to)
+ void ALWAYS_INLINE link(LinkRecord& record, uint8_t* from, uint8_t* to)
{
switch (record.linkType()) {
case LinkJumpT1:
@@ -2372,6 +2375,8 @@ public:
linuxPageFlush(current, current + page);
linuxPageFlush(current, end);
+#elif OS(WINCE)
+ CacheRangeFlush(code, size, CACHE_SYNC_ALL);
#else
#error "The cacheFlush support is missing on this platform."
#endif
@@ -2573,7 +2578,7 @@ private:
return ((relative << 7) >> 7) == relative;
}
- static void linkJumpT1(Condition cond, uint16_t* instruction, void* target)
+ void linkJumpT1(Condition cond, uint16_t* instruction, void* target)
{
// FIMXE: this should be up in the MacroAssembler layer. :-(
ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
@@ -2609,7 +2614,7 @@ private:
instruction[-1] = OP_B_T2 | ((relative & 0xffe) >> 1);
}
- static void linkJumpT3(Condition cond, uint16_t* instruction, void* target)
+ void linkJumpT3(Condition cond, uint16_t* instruction, void* target)
{
// FIMXE: this should be up in the MacroAssembler layer. :-(
ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
@@ -2642,7 +2647,7 @@ private:
instruction[-1] = OP_B_T4b | ((relative & 0x800000) >> 10) | ((relative & 0x400000) >> 11) | ((relative & 0xffe) >> 1);
}
- static void linkConditionalJumpT4(Condition cond, uint16_t* instruction, void* target)
+ void linkConditionalJumpT4(Condition cond, uint16_t* instruction, void* target)
{
// FIMXE: this should be up in the MacroAssembler layer. :-(
ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
@@ -2668,7 +2673,7 @@ private:
instruction[-1] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3);
}
- static void linkConditionalBX(Condition cond, uint16_t* instruction, void* target)
+ void linkConditionalBX(Condition cond, uint16_t* instruction, void* target)
{
// FIMXE: this should be up in the MacroAssembler layer. :-(
ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
@@ -2748,11 +2753,6 @@ private:
m_buffer.putShort(op | (reg1 << 6) | (reg2 << 3) | reg3);
}
- ALWAYS_INLINE void oneWordOp7Imm9(OpcodeID op, uint16_t imm)
- {
- m_buffer.putShort(op | imm);
- }
-
ALWAYS_INLINE void oneWordOp8Imm8(OpcodeID op, uint8_t imm)
{
m_buffer.putShort(op | imm);
@@ -2791,12 +2791,6 @@ private:
m_buffer.putShort(op2);
}
- ALWAYS_INLINE void twoWordOp16Imm16(OpcodeID1 op1, uint16_t imm)
- {
- m_buffer.putShort(op1);
- m_buffer.putShort(imm);
- }
-
ALWAYS_INLINE void twoWordOp5i6Imm4Reg4EncodedImm(OpcodeID1 op, int imm4, RegisterID rd, ARMThumbImmediate imm)
{
ARMThumbImmediate newImm = imm;
@@ -2857,6 +2851,7 @@ private:
unsigned debugOffset() { return m_buffer.debugOffset(); }
+ private:
AssemblerBuffer m_buffer;
} m_formatter;
diff --git a/Source/JavaScriptCore/assembler/AbortReason.h b/Source/JavaScriptCore/assembler/AbortReason.h
deleted file mode 100644
index 1a5f068c7..000000000
--- a/Source/JavaScriptCore/assembler/AbortReason.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef AbortReason_h
-#define AbortReason_h
-
-namespace JSC {
-
-// It's important to not change the values of existing abort reasons unless we really
-// have to. For this reason there is a BASIC-style numbering that should allow us to
-// sneak new reasons in without changing the numbering of existing reasons - at least
-// for a while.
-enum AbortReason {
- AHCallFrameMisaligned = 10,
- AHIndexingTypeIsValid = 20,
- AHInsaneArgumentCount = 30,
- AHIsNotCell = 40,
- AHIsNotInt32 = 50,
- AHIsNotJSDouble = 60,
- AHIsNotJSInt32 = 70,
- AHIsNotJSNumber = 80,
- AHIsNotNull = 90,
- AHStackPointerMisaligned = 100,
- AHStructureIDIsValid = 110,
- AHTagMaskNotInPlace = 120,
- AHTagTypeNumberNotInPlace = 130,
- AHTypeInfoInlineTypeFlagsAreValid = 140,
- AHTypeInfoIsValid = 150,
- DFGBailedAtTopOfBlock = 161,
- DFGBailedAtEndOfNode = 162,
- DFGBasicStorageAllocatorZeroSize = 170,
- DFGIsNotCell = 180,
- DFGIneffectiveWatchpoint = 190,
- DFGNegativeStringLength = 200,
- DFGSlowPathGeneratorFellThrough = 210,
- DFGUnreachableBasicBlock = 220,
- DFGUnreasonableOSREntryJumpDestination = 230,
- DFGVarargsThrowingPathDidNotThrow = 235,
- JITDivOperandsAreNotNumbers = 240,
- JITGetByValResultIsNotEmpty = 250,
- JITNotSupported = 260,
- JITOffsetIsNotOutOfLine = 270,
- JITUnreasonableLoopHintJumpTarget = 280,
- RPWUnreasonableJumpTarget = 290,
- RepatchIneffectiveWatchpoint = 300,
- RepatchInsaneArgumentCount = 310,
- TGInvalidPointer = 320,
- TGNotSupported = 330,
- YARRNoInputConsumed = 340,
-};
-
-} // namespace JSC
-
-#endif // AbortReason_h
-
diff --git a/Source/JavaScriptCore/assembler/AbstractMacroAssembler.h b/Source/JavaScriptCore/assembler/AbstractMacroAssembler.h
index 6e82dcc5e..28537201b 100644
--- a/Source/JavaScriptCore/assembler/AbstractMacroAssembler.h
+++ b/Source/JavaScriptCore/assembler/AbstractMacroAssembler.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2012, 2014, 2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2012 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,7 +26,6 @@
#ifndef AbstractMacroAssembler_h
#define AbstractMacroAssembler_h
-#include "AbortReason.h"
#include "AssemblerBuffer.h"
#include "CodeLocation.h"
#include "MacroAssemblerCodeRef.h"
@@ -39,9 +38,9 @@
namespace JSC {
-inline bool isARMv7IDIVSupported()
+inline bool isARMv7s()
{
-#if HAVE(ARM_IDIV_INSTRUCTIONS)
+#if CPU(APPLE_ARMV7S)
return true;
#else
return false;
@@ -66,9 +65,9 @@ inline bool isX86()
#endif
}
-inline bool optimizeForARMv7IDIVSupported()
+inline bool optimizeForARMv7s()
{
- return isARMv7IDIVSupported() && Options::enableArchitectureSpecificOptimizations();
+ return isARMv7s() && Options::enableArchitectureSpecificOptimizations();
}
inline bool optimizeForARM64()
@@ -88,11 +87,10 @@ namespace DFG {
struct OSRExit;
}
-template <class AssemblerType, class MacroAssemblerType>
+template <class AssemblerType>
class AbstractMacroAssembler {
public:
friend class JITWriteBarrierBase;
- typedef AbstractMacroAssembler<AssemblerType, MacroAssemblerType> AbstractMacroAssemblerType;
typedef AssemblerType AssemblerType_T;
typedef MacroAssemblerCodePtr CodePtr;
@@ -142,7 +140,7 @@ public:
{
return Address(base, offset + additionalOffset);
}
-
+
RegisterID base;
int32_t offset;
};
@@ -205,11 +203,6 @@ public:
RegisterID index;
Scale scale;
int32_t offset;
-
- BaseIndex withOffset(int32_t additionalOffset)
- {
- return BaseIndex(base, index, scale, offset + additionalOffset);
- }
};
// AbsoluteAddress:
@@ -361,7 +354,7 @@ public:
// A Label records a point in the generated instruction stream, typically such that
// it may be used as a destination for a jump.
class Label {
- template<class TemplateAssemblerType, class TemplateMacroAssemblerType>
+ template<class TemplateAssemblerType>
friend class AbstractMacroAssembler;
friend struct DFG::OSRExit;
friend class Jump;
@@ -374,7 +367,7 @@ public:
{
}
- Label(AbstractMacroAssemblerType* masm)
+ Label(AbstractMacroAssembler<AssemblerType>* masm)
: m_label(masm->m_assembler.label())
{
masm->invalidateAllTempRegisters();
@@ -396,7 +389,7 @@ public:
//
// addPtr(TrustedImmPtr(i), a, b)
class ConvertibleLoadLabel {
- template<class TemplateAssemblerType, class TemplateMacroAssemblerType>
+ template<class TemplateAssemblerType>
friend class AbstractMacroAssembler;
friend class LinkBuffer;
@@ -405,7 +398,7 @@ public:
{
}
- ConvertibleLoadLabel(AbstractMacroAssemblerType* masm)
+ ConvertibleLoadLabel(AbstractMacroAssembler<AssemblerType>* masm)
: m_label(masm->m_assembler.labelIgnoringWatchpoints())
{
}
@@ -420,7 +413,7 @@ public:
// A DataLabelPtr is used to refer to a location in the code containing a pointer to be
// patched after the code has been generated.
class DataLabelPtr {
- template<class TemplateAssemblerType, class TemplateMacroAssemblerType>
+ template<class TemplateAssemblerType>
friend class AbstractMacroAssembler;
friend class LinkBuffer;
public:
@@ -428,7 +421,7 @@ public:
{
}
- DataLabelPtr(AbstractMacroAssemblerType* masm)
+ DataLabelPtr(AbstractMacroAssembler<AssemblerType>* masm)
: m_label(masm->m_assembler.label())
{
}
@@ -441,10 +434,10 @@ public:
// DataLabel32:
//
- // A DataLabel32 is used to refer to a location in the code containing a 32-bit constant to be
+ // A DataLabelPtr is used to refer to a location in the code containing a pointer to be
// patched after the code has been generated.
class DataLabel32 {
- template<class TemplateAssemblerType, class TemplateMacroAssemblerType>
+ template<class TemplateAssemblerType>
friend class AbstractMacroAssembler;
friend class LinkBuffer;
public:
@@ -452,7 +445,7 @@ public:
{
}
- DataLabel32(AbstractMacroAssemblerType* masm)
+ DataLabel32(AbstractMacroAssembler<AssemblerType>* masm)
: m_label(masm->m_assembler.label())
{
}
@@ -468,7 +461,7 @@ public:
// A DataLabelCompact is used to refer to a location in the code containing a
// compact immediate to be patched after the code has been generated.
class DataLabelCompact {
- template<class TemplateAssemblerType, class TemplateMacroAssemblerType>
+ template<class TemplateAssemblerType>
friend class AbstractMacroAssembler;
friend class LinkBuffer;
public:
@@ -476,7 +469,7 @@ public:
{
}
- DataLabelCompact(AbstractMacroAssemblerType* masm)
+ DataLabelCompact(AbstractMacroAssembler<AssemblerType>* masm)
: m_label(masm->m_assembler.label())
{
}
@@ -499,7 +492,7 @@ public:
// relative offset such that when executed it will call to the desired
// destination.
class Call {
- template<class TemplateAssemblerType, class TemplateMacroAssemblerType>
+ template<class TemplateAssemblerType>
friend class AbstractMacroAssembler;
public:
@@ -543,7 +536,7 @@ public:
// relative offset such that when executed it will jump to the desired
// destination.
class Jump {
- template<class TemplateAssemblerType, class TemplateMacroAssemblerType>
+ template<class TemplateAssemblerType>
friend class AbstractMacroAssembler;
friend class Call;
friend struct DFG::OSRExit;
@@ -608,7 +601,7 @@ public:
return result;
}
- void link(AbstractMacroAssemblerType* masm) const
+ void link(AbstractMacroAssembler<AssemblerType>* masm) const
{
masm->invalidateAllTempRegisters();
@@ -632,7 +625,7 @@ public:
#endif
}
- void linkTo(Label label, AbstractMacroAssemblerType* masm) const
+ void linkTo(Label label, AbstractMacroAssembler<AssemblerType>* masm) const
{
#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
masm->checkRegisterAllocationAgainstBranchRange(label.m_label.m_offset, m_label.m_offset);
@@ -700,11 +693,10 @@ public:
JumpList(Jump jump)
{
- if (jump.isSet())
- append(jump);
+ append(jump);
}
- void link(AbstractMacroAssemblerType* masm)
+ void link(AbstractMacroAssembler<AssemblerType>* masm)
{
size_t size = m_jumps.size();
for (size_t i = 0; i < size; ++i)
@@ -712,7 +704,7 @@ public:
m_jumps.clear();
}
- void linkTo(Label label, AbstractMacroAssemblerType* masm)
+ void linkTo(Label label, AbstractMacroAssembler<AssemblerType>* masm)
{
size_t size = m_jumps.size();
for (size_t i = 0; i < size; ++i)
@@ -842,195 +834,12 @@ public:
AssemblerType::cacheFlush(code, size);
}
-#if ENABLE(MASM_PROBE)
-
- struct CPUState {
- #define DECLARE_REGISTER(_type, _regName) \
- _type _regName;
- FOR_EACH_CPU_REGISTER(DECLARE_REGISTER)
- #undef DECLARE_REGISTER
-
- static const char* registerName(RegisterID regID)
- {
- switch (regID) {
- #define DECLARE_REGISTER(_type, _regName) \
- case RegisterID::_regName: \
- return #_regName;
- FOR_EACH_CPU_GPREGISTER(DECLARE_REGISTER)
- #undef DECLARE_REGISTER
- }
- RELEASE_ASSERT_NOT_REACHED();
- }
-
- static const char* registerName(FPRegisterID regID)
- {
- switch (regID) {
- #define DECLARE_REGISTER(_type, _regName) \
- case FPRegisterID::_regName: \
- return #_regName;
- FOR_EACH_CPU_FPREGISTER(DECLARE_REGISTER)
- #undef DECLARE_REGISTER
- }
- RELEASE_ASSERT_NOT_REACHED();
- }
-
- void* registerValue(RegisterID regID)
- {
- switch (regID) {
- #define DECLARE_REGISTER(_type, _regName) \
- case RegisterID::_regName: \
- return _regName;
- FOR_EACH_CPU_GPREGISTER(DECLARE_REGISTER)
- #undef DECLARE_REGISTER
- }
- RELEASE_ASSERT_NOT_REACHED();
- }
-
- double registerValue(FPRegisterID regID)
- {
- switch (regID) {
- #define DECLARE_REGISTER(_type, _regName) \
- case FPRegisterID::_regName: \
- return _regName;
- FOR_EACH_CPU_FPREGISTER(DECLARE_REGISTER)
- #undef DECLARE_REGISTER
- }
- RELEASE_ASSERT_NOT_REACHED();
- }
-
- };
-
- struct ProbeContext;
- typedef void (*ProbeFunction)(struct ProbeContext*);
-
- struct ProbeContext {
- ProbeFunction probeFunction;
- void* arg1;
- void* arg2;
- CPUState cpu;
-
- void print(int indentation = 0)
- {
- #define INDENT MacroAssemblerType::printIndent(indentation)
-
- INDENT, dataLogF("ProbeContext %p {\n", this);
- indentation++;
- {
- INDENT, dataLogF("probeFunction: %p\n", probeFunction);
- INDENT, dataLogF("arg1: %p %llu\n", arg1, reinterpret_cast<int64_t>(arg1));
- INDENT, dataLogF("arg2: %p %llu\n", arg2, reinterpret_cast<int64_t>(arg2));
- MacroAssemblerType::printCPU(cpu, indentation);
- }
- indentation--;
- INDENT, dataLog("}\n");
-
- #undef INDENT
- }
- };
-
- static void printIndent(int indentation)
- {
- for (; indentation > 0; indentation--)
- dataLog(" ");
- }
-
- static void printCPU(CPUState& cpu, int indentation = 0)
- {
- #define INDENT printIndent(indentation)
-
- INDENT, dataLog("cpu: {\n");
- MacroAssemblerType::printCPURegisters(cpu, indentation + 1);
- INDENT, dataLog("}\n");
-
- #undef INDENT
- }
-
- // This is a marker type only used with print(). See print() below for details.
- struct AllRegisters { };
-
- // Emits code which will print debugging info at runtime. The type of values that
- // can be printed is encapsulated in the PrintArg struct below. Here are some
- // examples:
- //
- // print("Hello world\n"); // Emits code to print the string.
- //
- // CodeBlock* cb = ...;
- // print(cb); // Emits code to print the pointer value.
- //
- // RegisterID regID = ...;
- // print(regID); // Emits code to print the register value (not the id).
- //
- // // Emits code to print all registers. Unlike other items, this prints
- // // multiple lines as follows:
- // // cpu {
- // // eax: 0x123456789
- // // ebx: 0x000000abc
- // // ...
- // // }
- // print(AllRegisters());
- //
- // // Print multiple things at once. This incurs the probe overhead only once
- // // to print all the items.
- // print("cb:", cb, " regID:", regID, " cpu:\n", AllRegisters());
-
- template<typename... Arguments>
- void print(Arguments... args)
- {
- printInternal(static_cast<MacroAssemblerType*>(this), args...);
- }
-
- // This function will be called by printCPU() to print the contents of the
- // target specific registers which are saved away in the CPUState struct.
- // printCPURegisters() should make use of printIndentation() to print the
- // registers with the appropriate amount of indentation.
- //
- // Note: printCPURegisters() should be implemented by the target specific
- // MacroAssembler. This prototype is only provided here to document the
- // interface.
-
- static void printCPURegisters(CPUState&, int indentation = 0);
-
- // This function will be called by print() to print the contents of a
- // specific register (from the CPUState) in line with other items in the
- // print stream. Hence, no indentation is needed.
- //
- // Note: printRegister() should be implemented by the target specific
- // MacroAssembler. These prototypes are only provided here to document their
- // interface.
-
- static void printRegister(CPUState&, RegisterID);
- static void printRegister(CPUState&, FPRegisterID);
-
- // This function emits code to preserve the CPUState (e.g. registers),
- // call a user supplied probe function, and restore the CPUState before
- // continuing with other JIT generated code.
- //
- // The user supplied probe function will be called with a single pointer to
- // a ProbeContext struct (defined above) which contains, among other things,
- // the preserved CPUState. This allows the user probe function to inspect
- // the CPUState at that point in the JIT generated code.
- //
- // If the user probe function alters the register values in the ProbeContext,
- // the altered values will be loaded into the CPU registers when the probe
- // returns.
- //
- // The ProbeContext is stack allocated and is only valid for the duration
- // of the call to the user probe function.
- //
- // Note: probe() should be implemented by the target specific MacroAssembler.
- // This prototype is only provided here to document the interface.
-
- void probe(ProbeFunction, void* arg1 = 0, void* arg2 = 0);
-
-#endif // ENABLE(MASM_PROBE)
-
AssemblerType m_assembler;
protected:
AbstractMacroAssembler()
: m_randomSource(cryptographicallyRandomNumber())
{
- invalidateAllTempRegisters();
}
uint32_t random()
@@ -1065,7 +874,7 @@ protected:
friend class Label;
public:
- CachedTempRegister(AbstractMacroAssemblerType* masm, RegisterID registerID)
+ CachedTempRegister(AbstractMacroAssembler<AssemblerType>* masm, RegisterID registerID)
: m_masm(masm)
, m_registerID(registerID)
, m_value(0)
@@ -1093,7 +902,7 @@ protected:
ALWAYS_INLINE void invalidate() { m_masm->clearTempRegisterValid(m_validBit); }
private:
- AbstractMacroAssemblerType* m_masm;
+ AbstractMacroAssembler<AssemblerType>* m_masm;
RegisterID m_registerID;
intptr_t m_value;
unsigned m_validBit;
@@ -1183,143 +992,7 @@ protected:
{
AssemblerType::replaceWithAddressComputation(label.dataLocation());
}
-
-private:
-
-#if ENABLE(MASM_PROBE)
-
- struct PrintArg {
-
- enum class Type {
- AllRegisters,
- RegisterID,
- FPRegisterID,
- ConstCharPtr,
- ConstVoidPtr,
- IntptrValue,
- UintptrValue,
- };
-
- PrintArg(AllRegisters&)
- : type(Type::AllRegisters)
- {
- }
-
- PrintArg(RegisterID regID)
- : type(Type::RegisterID)
- {
- u.gpRegisterID = regID;
- }
-
- PrintArg(FPRegisterID regID)
- : type(Type::FPRegisterID)
- {
- u.fpRegisterID = regID;
- }
-
- PrintArg(const char* ptr)
- : type(Type::ConstCharPtr)
- {
- u.constCharPtr = ptr;
- }
-
- PrintArg(const void* ptr)
- : type(Type::ConstVoidPtr)
- {
- u.constVoidPtr = ptr;
- }
-
- PrintArg(int value)
- : type(Type::IntptrValue)
- {
- u.intptrValue = value;
- }
-
- PrintArg(unsigned value)
- : type(Type::UintptrValue)
- {
- u.intptrValue = value;
- }
-
- PrintArg(intptr_t value)
- : type(Type::IntptrValue)
- {
- u.intptrValue = value;
- }
-
- PrintArg(uintptr_t value)
- : type(Type::UintptrValue)
- {
- u.uintptrValue = value;
- }
-
- Type type;
- union {
- RegisterID gpRegisterID;
- FPRegisterID fpRegisterID;
- const char* constCharPtr;
- const void* constVoidPtr;
- intptr_t intptrValue;
- uintptr_t uintptrValue;
- } u;
- };
-
- typedef Vector<PrintArg> PrintArgsList;
-
- template<typename FirstArg, typename... Arguments>
- static void appendPrintArg(PrintArgsList* argsList, FirstArg& firstArg, Arguments... otherArgs)
- {
- argsList->append(PrintArg(firstArg));
- appendPrintArg(argsList, otherArgs...);
- }
-
- static void appendPrintArg(PrintArgsList*) { }
-
-
- template<typename... Arguments>
- static void printInternal(MacroAssemblerType* masm, Arguments... args)
- {
- auto argsList = std::make_unique<PrintArgsList>();
- appendPrintArg(argsList.get(), args...);
- masm->probe(printCallback, argsList.release());
- }
-
- static void printCallback(ProbeContext* context)
- {
- typedef PrintArg Arg;
- PrintArgsList& argsList =
- *reinterpret_cast<PrintArgsList*>(context->arg1);
- for (size_t i = 0; i < argsList.size(); i++) {
- auto& arg = argsList[i];
- switch (arg.type) {
- case Arg::Type::AllRegisters:
- MacroAssemblerType::printCPU(context->cpu);
- break;
- case Arg::Type::RegisterID:
- MacroAssemblerType::printRegister(context->cpu, arg.u.gpRegisterID);
- break;
- case Arg::Type::FPRegisterID:
- MacroAssemblerType::printRegister(context->cpu, arg.u.fpRegisterID);
- break;
- case Arg::Type::ConstCharPtr:
- dataLog(arg.u.constCharPtr);
- break;
- case Arg::Type::ConstVoidPtr:
- dataLogF("%p", arg.u.constVoidPtr);
- break;
- case Arg::Type::IntptrValue:
- dataLog(arg.u.intptrValue);
- break;
- case Arg::Type::UintptrValue:
- dataLog(arg.u.uintptrValue);
- break;
- }
- }
- }
-
-#endif // ENABLE(MASM_PROBE)
-
-}; // class AbstractMacroAssembler
+};
} // namespace JSC
diff --git a/Source/JavaScriptCore/assembler/AssemblerBuffer.h b/Source/JavaScriptCore/assembler/AssemblerBuffer.h
index 3632a5b6e..120868d63 100644
--- a/Source/JavaScriptCore/assembler/AssemblerBuffer.h
+++ b/Source/JavaScriptCore/assembler/AssemblerBuffer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2012, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2012 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -59,69 +59,24 @@ namespace JSC {
uint32_t m_offset;
};
- class AssemblerData {
- public:
- AssemblerData()
- : m_buffer(nullptr)
- , m_capacity(0)
- {
- }
-
- AssemblerData(unsigned initialCapacity)
- {
- m_capacity = initialCapacity;
- m_buffer = static_cast<char*>(fastMalloc(m_capacity));
- }
-
- AssemblerData(AssemblerData&& other)
- {
- m_buffer = other.m_buffer;
- other.m_buffer = nullptr;
- m_capacity = other.m_capacity;
- other.m_capacity = 0;
- }
-
- AssemblerData& operator=(AssemblerData&& other)
- {
- m_buffer = other.m_buffer;
- other.m_buffer = nullptr;
- m_capacity = other.m_capacity;
- other.m_capacity = 0;
- return *this;
- }
-
- ~AssemblerData()
- {
- fastFree(m_buffer);
- }
-
- char* buffer() const { return m_buffer; }
-
- unsigned capacity() const { return m_capacity; }
-
- void grow(unsigned extraCapacity = 0)
- {
- m_capacity = m_capacity + m_capacity / 2 + extraCapacity;
- m_buffer = static_cast<char*>(fastRealloc(m_buffer, m_capacity));
- }
-
- private:
- char* m_buffer;
- unsigned m_capacity;
- };
-
class AssemblerBuffer {
- static const int initialCapacity = 128;
+ static const int inlineCapacity = 128;
public:
AssemblerBuffer()
- : m_storage(initialCapacity)
+ : m_storage(inlineCapacity)
+ , m_buffer(m_storage.begin())
+ , m_capacity(inlineCapacity)
, m_index(0)
{
}
+ ~AssemblerBuffer()
+ {
+ }
+
bool isAvailable(int space)
{
- return m_index + space <= m_storage.capacity();
+ return m_index + space <= m_capacity;
}
void ensureSpace(int space)
@@ -135,6 +90,21 @@ namespace JSC {
return !(m_index & (alignment - 1));
}
+ template<typename IntegralType>
+ void putIntegral(IntegralType value)
+ {
+ ensureSpace(sizeof(IntegralType));
+ putIntegralUnchecked(value);
+ }
+
+ template<typename IntegralType>
+ void putIntegralUnchecked(IntegralType value)
+ {
+ ASSERT(isAvailable(sizeof(IntegralType)));
+ *reinterpret_cast_ptr<IntegralType*>(m_buffer + m_index) = value;
+ m_index += sizeof(IntegralType);
+ }
+
void putByteUnchecked(int8_t value) { putIntegralUnchecked(value); }
void putByte(int8_t value) { putIntegral(value); }
void putShortUnchecked(int16_t value) { putIntegralUnchecked(value); }
@@ -146,7 +116,7 @@ namespace JSC {
void* data() const
{
- return m_storage.buffer();
+ return m_buffer;
}
size_t codeSize() const
@@ -161,45 +131,29 @@ namespace JSC {
unsigned debugOffset() { return m_index; }
- AssemblerData releaseAssemblerData() { return WTF::move(m_storage); }
-
protected:
- template<typename IntegralType>
- void putIntegral(IntegralType value)
- {
- unsigned nextIndex = m_index + sizeof(IntegralType);
- if (UNLIKELY(nextIndex > m_storage.capacity()))
- grow();
- ASSERT(isAvailable(sizeof(IntegralType)));
- *reinterpret_cast_ptr<IntegralType*>(m_storage.buffer() + m_index) = value;
- m_index = nextIndex;
- }
-
- template<typename IntegralType>
- void putIntegralUnchecked(IntegralType value)
- {
- ASSERT(isAvailable(sizeof(IntegralType)));
- *reinterpret_cast_ptr<IntegralType*>(m_storage.buffer() + m_index) = value;
- m_index += sizeof(IntegralType);
- }
-
void append(const char* data, int size)
{
if (!isAvailable(size))
grow(size);
- memcpy(m_storage.buffer() + m_index, data, size);
+ memcpy(m_buffer + m_index, data, size);
m_index += size;
}
void grow(int extraCapacity = 0)
{
- m_storage.grow(extraCapacity);
+ m_capacity += m_capacity / 2 + extraCapacity;
+
+ m_storage.grow(m_capacity);
+ m_buffer = m_storage.begin();
}
private:
- AssemblerData m_storage;
- unsigned m_index;
+ Vector<char, inlineCapacity, UnsafeVectorOverflow> m_storage;
+ char* m_buffer;
+ int m_capacity;
+ int m_index;
};
} // namespace JSC
diff --git a/Source/JavaScriptCore/assembler/LinkBuffer.cpp b/Source/JavaScriptCore/assembler/LinkBuffer.cpp
index d53ef451b..a7f469da8 100644
--- a/Source/JavaScriptCore/assembler/LinkBuffer.cpp
+++ b/Source/JavaScriptCore/assembler/LinkBuffer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,22 +28,12 @@
#if ENABLE(ASSEMBLER)
-#include "CodeBlock.h"
-#include "JITCode.h"
-#include "JSCInlines.h"
#include "Options.h"
#include "VM.h"
#include <wtf/CompilationThread.h>
namespace JSC {
-bool shouldShowDisassemblyFor(CodeBlock* codeBlock)
-{
- if (JITCode::isOptimizingJIT(codeBlock->jitType()) && Options::showDFGDisassembly())
- return true;
- return Options::showDisassembly();
-}
-
LinkBuffer::CodeRef LinkBuffer::finalizeCodeWithoutDisassembly()
{
performFinalization();
@@ -57,57 +47,38 @@ LinkBuffer::CodeRef LinkBuffer::finalizeCodeWithoutDisassembly()
LinkBuffer::CodeRef LinkBuffer::finalizeCodeWithDisassembly(const char* format, ...)
{
+ ASSERT(Options::showDisassembly() || Options::showDFGDisassembly());
+
CodeRef result = finalizeCodeWithoutDisassembly();
- if (m_alreadyDisassembled)
- return result;
-
- StringPrintStream out;
- out.printf("Generated JIT code for ");
+#if ENABLE(DISASSEMBLER)
+ dataLogF("Generated JIT code for ");
va_list argList;
va_start(argList, format);
- out.vprintf(format, argList);
+ WTF::dataLogFV(format, argList);
va_end(argList);
- out.printf(":\n");
-
- out.printf(" Code at [%p, %p):\n", result.code().executableAddress(), static_cast<char*>(result.code().executableAddress()) + result.size());
-
- CString header = out.toCString();
-
- if (Options::asyncDisassembly()) {
- disassembleAsynchronously(header, result, m_size, " ");
- return result;
- }
+ dataLogF(":\n");
- dataLog(header);
+ dataLogF(" Code at [%p, %p):\n", result.code().executableAddress(), static_cast<char*>(result.code().executableAddress()) + result.size());
disassemble(result.code(), m_size, " ", WTF::dataFile());
+#else
+ UNUSED_PARAM(format);
+#endif // ENABLE(DISASSEMBLER)
return result;
}
#if ENABLE(BRANCH_COMPACTION)
-static ALWAYS_INLINE void recordLinkOffsets(AssemblerData& assemblerData, int32_t regionStart, int32_t regionEnd, int32_t offset)
-{
- int32_t ptr = regionStart / sizeof(int32_t);
- const int32_t end = regionEnd / sizeof(int32_t);
- int32_t* offsets = reinterpret_cast<int32_t*>(assemblerData.buffer());
- while (ptr < end)
- offsets[ptr++] = offset;
-}
-
template <typename InstructionType>
-void LinkBuffer::copyCompactAndLinkCode(MacroAssembler& macroAssembler, void* ownerUID, JITCompilationEffort effort)
+void LinkBuffer::copyCompactAndLinkCode(void* ownerUID, JITCompilationEffort effort)
{
- m_initialSize = macroAssembler.m_assembler.codeSize();
+ m_initialSize = m_assembler->m_assembler.codeSize();
allocate(m_initialSize, ownerUID, effort);
- if (didFailToAllocate())
- return;
- Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink = macroAssembler.jumpsToLink();
- m_assemblerStorage = macroAssembler.m_assembler.buffer().releaseAssemblerData();
- uint8_t* inData = reinterpret_cast<uint8_t*>(m_assemblerStorage.buffer());
+ uint8_t* inData = (uint8_t*)m_assembler->unlinkedCode();
uint8_t* outData = reinterpret_cast<uint8_t*>(m_code);
int readPtr = 0;
int writePtr = 0;
+ Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink = m_assembler->jumpsToLink();
unsigned jumpCount = jumpsToLink.size();
for (unsigned i = 0; i < jumpCount; ++i) {
int offset = readPtr - writePtr;
@@ -123,7 +94,7 @@ void LinkBuffer::copyCompactAndLinkCode(MacroAssembler& macroAssembler, void* ow
ASSERT(!(writePtr % 2));
while (copySource != copyEnd)
*copyDst++ = *copySource++;
- recordLinkOffsets(m_assemblerStorage, readPtr, jumpsToLink[i].from(), offset);
+ m_assembler->recordLinkOffsets(readPtr, jumpsToLink[i].from(), offset);
readPtr += regionSize;
writePtr += regionSize;
@@ -133,28 +104,28 @@ void LinkBuffer::copyCompactAndLinkCode(MacroAssembler& macroAssembler, void* ow
if (jumpsToLink[i].to() >= jumpsToLink[i].from())
target = outData + jumpsToLink[i].to() - offset; // Compensate for what we have collapsed so far
else
- target = outData + jumpsToLink[i].to() - executableOffsetFor(jumpsToLink[i].to());
+ target = outData + jumpsToLink[i].to() - m_assembler->executableOffsetFor(jumpsToLink[i].to());
- JumpLinkType jumpLinkType = MacroAssembler::computeJumpType(jumpsToLink[i], outData + writePtr, target);
+ JumpLinkType jumpLinkType = m_assembler->computeJumpType(jumpsToLink[i], outData + writePtr, target);
// Compact branch if we can...
- if (MacroAssembler::canCompact(jumpsToLink[i].type())) {
+ if (m_assembler->canCompact(jumpsToLink[i].type())) {
// Step back in the write stream
- int32_t delta = MacroAssembler::jumpSizeDelta(jumpsToLink[i].type(), jumpLinkType);
+ int32_t delta = m_assembler->jumpSizeDelta(jumpsToLink[i].type(), jumpLinkType);
if (delta) {
writePtr -= delta;
- recordLinkOffsets(m_assemblerStorage, jumpsToLink[i].from() - delta, readPtr, readPtr - writePtr);
+ m_assembler->recordLinkOffsets(jumpsToLink[i].from() - delta, readPtr, readPtr - writePtr);
}
}
jumpsToLink[i].setFrom(writePtr);
}
// Copy everything after the last jump
memcpy(outData + writePtr, inData + readPtr, m_initialSize - readPtr);
- recordLinkOffsets(m_assemblerStorage, readPtr, m_initialSize, readPtr - writePtr);
+ m_assembler->recordLinkOffsets(readPtr, m_initialSize, readPtr - writePtr);
for (unsigned i = 0; i < jumpCount; ++i) {
uint8_t* location = outData + jumpsToLink[i].from();
- uint8_t* target = outData + jumpsToLink[i].to() - executableOffsetFor(jumpsToLink[i].to());
- MacroAssembler::link(jumpsToLink[i], location, target);
+ uint8_t* target = outData + jumpsToLink[i].to() - m_assembler->executableOffsetFor(jumpsToLink[i].to());
+ m_assembler->link(jumpsToLink[i], location, target);
}
jumpsToLink.clear();
@@ -170,28 +141,28 @@ void LinkBuffer::copyCompactAndLinkCode(MacroAssembler& macroAssembler, void* ow
#endif
-void LinkBuffer::linkCode(MacroAssembler& macroAssembler, void* ownerUID, JITCompilationEffort effort)
+void LinkBuffer::linkCode(void* ownerUID, JITCompilationEffort effort)
{
#if !ENABLE(BRANCH_COMPACTION)
#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
- macroAssembler.m_assembler.buffer().flushConstantPool(false);
+ m_assembler->m_assembler.buffer().flushConstantPool(false);
#endif
- AssemblerBuffer& buffer = macroAssembler.m_assembler.buffer();
+ AssemblerBuffer& buffer = m_assembler->m_assembler.buffer();
allocate(buffer.codeSize(), ownerUID, effort);
if (!m_didAllocate)
return;
ASSERT(m_code);
#if CPU(ARM_TRADITIONAL)
- macroAssembler.m_assembler.prepareExecutableCopy(m_code);
+ m_assembler->m_assembler.prepareExecutableCopy(m_code);
#endif
memcpy(m_code, buffer.data(), buffer.codeSize());
#if CPU(MIPS)
- macroAssembler.m_assembler.relocateJumps(buffer.data(), m_code);
+ m_assembler->m_assembler.relocateJumps(buffer.data(), m_code);
#endif
#elif CPU(ARM_THUMB2)
- copyCompactAndLinkCode<uint16_t>(macroAssembler, ownerUID, effort);
+ copyCompactAndLinkCode<uint16_t>(ownerUID, effort);
#elif CPU(ARM64)
- copyCompactAndLinkCode<uint32_t>(macroAssembler, ownerUID, effort);
+ copyCompactAndLinkCode<uint32_t>(ownerUID, effort);
#endif
}
@@ -217,8 +188,6 @@ void LinkBuffer::allocate(size_t initialSize, void* ownerUID, JITCompilationEffo
void LinkBuffer::shrink(size_t newSize)
{
- if (!m_executableMemory)
- return;
m_size = newSize;
m_executableMemory->shrink(m_size);
}
diff --git a/Source/JavaScriptCore/assembler/LinkBuffer.h b/Source/JavaScriptCore/assembler/LinkBuffer.h
index 9b0d4c437..8d4ce521f 100644
--- a/Source/JavaScriptCore/assembler/LinkBuffer.h
+++ b/Source/JavaScriptCore/assembler/LinkBuffer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009, 2010, 2012-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2009, 2010, 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -43,7 +43,6 @@
namespace JSC {
-class CodeBlock;
class VM;
// LinkBuffer:
@@ -80,34 +79,36 @@ class LinkBuffer {
#endif
public:
- LinkBuffer(VM& vm, MacroAssembler& macroAssembler, void* ownerUID, JITCompilationEffort effort = JITCompilationMustSucceed)
+ LinkBuffer(VM& vm, MacroAssembler* masm, void* ownerUID, JITCompilationEffort effort = JITCompilationMustSucceed)
: m_size(0)
#if ENABLE(BRANCH_COMPACTION)
, m_initialSize(0)
#endif
, m_didAllocate(false)
, m_code(0)
+ , m_assembler(masm)
, m_vm(&vm)
#ifndef NDEBUG
, m_completed(false)
#endif
{
- linkCode(macroAssembler, ownerUID, effort);
+ linkCode(ownerUID, effort);
}
- LinkBuffer(VM& vm, MacroAssembler& macroAssembler, void* code, size_t size)
+ LinkBuffer(VM& vm, MacroAssembler* masm, void* code, size_t size)
: m_size(size)
#if ENABLE(BRANCH_COMPACTION)
, m_initialSize(0)
#endif
, m_didAllocate(false)
, m_code(code)
+ , m_assembler(masm)
, m_vm(&vm)
#ifndef NDEBUG
, m_completed(false)
#endif
{
- linkCode(macroAssembler, 0, JITCompilationCanFail);
+ linkCode(0, JITCompilationCanFail);
}
~LinkBuffer()
@@ -163,11 +164,6 @@ public:
}
// These methods are used to obtain handles to allow the code to be relinked / repatched later.
-
- CodeLocationLabel entrypoint()
- {
- return CodeLocationLabel(code());
- }
CodeLocationCall locationOf(Call call)
{
@@ -248,30 +244,17 @@ public:
{
return m_code;
}
-
- // FIXME: this does not account for the AssemblerData size!
+
size_t size()
{
return m_size;
}
-
- bool wasAlreadyDisassembled() const { return m_alreadyDisassembled; }
- void didAlreadyDisassemble() { m_alreadyDisassembled = true; }
private:
-#if ENABLE(BRANCH_COMPACTION)
- int executableOffsetFor(int location)
- {
- if (!location)
- return 0;
- return bitwise_cast<int32_t*>(m_assemblerStorage.buffer())[location / sizeof(int32_t) - 1];
- }
-#endif
-
template <typename T> T applyOffset(T src)
{
#if ENABLE(BRANCH_COMPACTION)
- src.m_offset -= executableOffsetFor(src.m_offset);
+ src.m_offset -= m_assembler->executableOffsetFor(src.m_offset);
#endif
return src;
}
@@ -285,10 +268,10 @@ private:
void allocate(size_t initialSize, void* ownerUID, JITCompilationEffort);
void shrink(size_t newSize);
- JS_EXPORT_PRIVATE void linkCode(MacroAssembler&, void* ownerUID, JITCompilationEffort);
+ JS_EXPORT_PRIVATE void linkCode(void* ownerUID, JITCompilationEffort);
#if ENABLE(BRANCH_COMPACTION)
template <typename InstructionType>
- void copyCompactAndLinkCode(MacroAssembler&, void* ownerUID, JITCompilationEffort);
+ void copyCompactAndLinkCode(void* ownerUID, JITCompilationEffort);
#endif
void performFinalization();
@@ -305,15 +288,14 @@ private:
size_t m_size;
#if ENABLE(BRANCH_COMPACTION)
size_t m_initialSize;
- AssemblerData m_assemblerStorage;
#endif
bool m_didAllocate;
void* m_code;
+ MacroAssembler* m_assembler;
VM* m_vm;
#ifndef NDEBUG
bool m_completed;
#endif
- bool m_alreadyDisassembled { false };
};
#define FINALIZE_CODE_IF(condition, linkBufferReference, dataLogFArgumentsForHeading) \
@@ -321,11 +303,6 @@ private:
? ((linkBufferReference).finalizeCodeWithDisassembly dataLogFArgumentsForHeading) \
: (linkBufferReference).finalizeCodeWithoutDisassembly())
-bool shouldShowDisassemblyFor(CodeBlock*);
-
-#define FINALIZE_CODE_FOR(codeBlock, linkBufferReference, dataLogFArgumentsForHeading) \
- FINALIZE_CODE_IF(shouldShowDisassemblyFor(codeBlock) || Options::asyncDisassembly(), linkBufferReference, dataLogFArgumentsForHeading)
-
// Use this to finalize code, like so:
//
// CodeRef code = FINALIZE_CODE(linkBuffer, ("my super thingy number %d", number));
@@ -343,10 +320,10 @@ bool shouldShowDisassemblyFor(CodeBlock*);
// is true, so you can hide expensive disassembly-only computations inside there.
#define FINALIZE_CODE(linkBufferReference, dataLogFArgumentsForHeading) \
- FINALIZE_CODE_IF(JSC::Options::asyncDisassembly() || JSC::Options::showDisassembly(), linkBufferReference, dataLogFArgumentsForHeading)
+ FINALIZE_CODE_IF(JSC::Options::showDisassembly(), linkBufferReference, dataLogFArgumentsForHeading)
#define FINALIZE_DFG_CODE(linkBufferReference, dataLogFArgumentsForHeading) \
- FINALIZE_CODE_IF(JSC::Options::asyncDisassembly() || JSC::Options::showDisassembly() || Options::showDFGDisassembly(), linkBufferReference, dataLogFArgumentsForHeading)
+ FINALIZE_CODE_IF((JSC::Options::showDisassembly() || Options::showDFGDisassembly()), linkBufferReference, dataLogFArgumentsForHeading)
} // namespace JSC
diff --git a/Source/JavaScriptCore/assembler/MIPSAssembler.h b/Source/JavaScriptCore/assembler/MIPSAssembler.h
index caad1524d..b75b4d0af 100644
--- a/Source/JavaScriptCore/assembler/MIPSAssembler.h
+++ b/Source/JavaScriptCore/assembler/MIPSAssembler.h
@@ -870,8 +870,28 @@ public:
static void cacheFlush(void* code, size_t size)
{
+#if GCC_VERSION_AT_LEAST(4, 3, 0)
+#if WTF_MIPS_ISA_REV(2) && !GCC_VERSION_AT_LEAST(4, 4, 3)
+ int lineSize;
+ asm("rdhwr %0, $1" : "=r" (lineSize));
+ //
+ // Modify "start" and "end" to avoid GCC 4.3.0-4.4.2 bug in
+ // mips_expand_synci_loop that may execute synci one more time.
+ // "start" points to the fisrt byte of the cache line.
+ // "end" points to the last byte of the line before the last cache line.
+ // Because size is always a multiple of 4, this is safe to set
+ // "end" to the last byte.
+ //
+ intptr_t start = reinterpret_cast<intptr_t>(code) & (-lineSize);
+ intptr_t end = ((reinterpret_cast<intptr_t>(code) + size - 1) & (-lineSize)) - 1;
+ __builtin___clear_cache(reinterpret_cast<char*>(start), reinterpret_cast<char*>(end));
+#else
intptr_t end = reinterpret_cast<intptr_t>(code) + size;
__builtin___clear_cache(reinterpret_cast<char*>(code), reinterpret_cast<char*>(end));
+#endif
+#else
+ _flush_cache(reinterpret_cast<char*>(code), size, BCACHE);
+#endif
}
static ptrdiff_t maxJumpReplacementSize()
diff --git a/Source/JavaScriptCore/assembler/MacroAssembler.h b/Source/JavaScriptCore/assembler/MacroAssembler.h
index fd4c5bbf5..4a43eb625 100644
--- a/Source/JavaScriptCore/assembler/MacroAssembler.h
+++ b/Source/JavaScriptCore/assembler/MacroAssembler.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2012-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,6 +26,8 @@
#ifndef MacroAssembler_h
#define MacroAssembler_h
+#include <wtf/Platform.h>
+
#if ENABLE(ASSEMBLER)
#if CPU(ARM_THUMB2)
@@ -69,9 +71,30 @@ namespace JSC {
class MacroAssembler : public MacroAssemblerBase {
public:
+ static bool isStackRelated(RegisterID reg)
+ {
+ return reg == stackPointerRegister || reg == framePointerRegister;
+ }
+
+ static RegisterID firstRealRegister()
+ {
+ RegisterID firstRegister = MacroAssembler::firstRegister();
+ while (MacroAssembler::isStackRelated(firstRegister))
+ firstRegister = static_cast<RegisterID>(firstRegister + 1);
+ return firstRegister;
+ }
+
static RegisterID nextRegister(RegisterID reg)
{
- return static_cast<RegisterID>(reg + 1);
+ RegisterID result = static_cast<RegisterID>(reg + 1);
+ while (MacroAssembler::isStackRelated(result))
+ result = static_cast<RegisterID>(result + 1);
+ return result;
+ }
+
+ static RegisterID secondRealRegister()
+ {
+ return nextRegister(firstRealRegister());
}
static FPRegisterID nextFPRegister(FPRegisterID reg)
@@ -117,9 +140,9 @@ public:
using MacroAssemblerBase::and32;
using MacroAssemblerBase::branchAdd32;
using MacroAssemblerBase::branchMul32;
-#if CPU(ARM64) || CPU(ARM_THUMB2) || CPU(X86_64)
+#if CPU(X86_64)
using MacroAssemblerBase::branchPtr;
-#endif
+#endif // CPU(X86_64)
using MacroAssemblerBase::branchSub32;
using MacroAssemblerBase::lshift32;
using MacroAssemblerBase::or32;
@@ -235,10 +258,6 @@ public:
{
push(src);
}
- void pushToSaveImmediateWithoutTouchingRegisters(TrustedImm32 imm)
- {
- push(imm);
- }
void popToRestore(RegisterID dest)
{
pop(dest);
@@ -253,8 +272,6 @@ public:
loadDouble(stackPointerRegister, dest);
addPtr(TrustedImm32(sizeof(double)), stackPointerRegister);
}
-
- static ptrdiff_t pushToSaveByteOffset() { return sizeof(void*); }
#endif // !CPU(ARM64)
#if CPU(X86_64) || CPU(ARM64)
@@ -345,11 +362,6 @@ public:
return PatchableJump(branchPtrWithPatch(cond, left, dataLabel, initialRightValue));
}
- PatchableJump patchableBranch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
- {
- return PatchableJump(branch32WithPatch(cond, left, dataLabel, initialRightValue));
- }
-
#if !CPU(ARM_TRADITIONAL)
PatchableJump patchableJump()
{
@@ -365,11 +377,6 @@ public:
{
return PatchableJump(branch32(cond, reg, imm));
}
-
- PatchableJump patchableBranch32(RelationalCondition cond, Address address, TrustedImm32 imm)
- {
- return PatchableJump(branch32(cond, address, imm));
- }
#endif
#endif
@@ -467,21 +474,6 @@ public:
and32(TrustedImm32(imm), srcDest);
}
- void lshiftPtr(Imm32 imm, RegisterID srcDest)
- {
- lshift32(trustedImm32ForShift(imm), srcDest);
- }
-
- void rshiftPtr(Imm32 imm, RegisterID srcDest)
- {
- rshift32(trustedImm32ForShift(imm), srcDest);
- }
-
- void urshiftPtr(Imm32 imm, RegisterID srcDest)
- {
- urshift32(trustedImm32ForShift(imm), srcDest);
- }
-
void negPtr(RegisterID dest)
{
neg32(dest);
@@ -603,11 +595,6 @@ public:
store32(TrustedImm32(imm), address);
}
- void storePtr(TrustedImm32 imm, ImplicitAddress address)
- {
- store32(imm, address);
- }
-
void storePtr(TrustedImmPtr imm, BaseIndex address)
{
store32(TrustedImm32(imm), address);
@@ -760,16 +747,6 @@ public:
lshift64(trustedImm32ForShift(imm), srcDest);
}
- void rshiftPtr(Imm32 imm, RegisterID srcDest)
- {
- rshift64(trustedImm32ForShift(imm), srcDest);
- }
-
- void urshiftPtr(Imm32 imm, RegisterID srcDest)
- {
- urshift64(trustedImm32ForShift(imm), srcDest);
- }
-
void negPtr(RegisterID dest)
{
neg64(dest);
@@ -1009,7 +986,7 @@ public:
if (bitwise_cast<uint64_t>(value * 1.0) != bitwise_cast<uint64_t>(value))
return shouldConsiderBlinding();
- value = fabs(value);
+ value = abs(value);
// Only allow a limited set of fractional components
double scaledValue = value * 8;
if (scaledValue / 8 != value)
@@ -1160,7 +1137,7 @@ public:
void convertInt32ToDouble(Imm32 imm, FPRegisterID dest)
{
- if (shouldBlind(imm) && haveScratchRegisterForBlinding()) {
+ if (shouldBlind(imm)) {
RegisterID scratchRegister = scratchRegisterForBlinding();
loadXorBlindedConstant(xorBlindConstant(imm), scratchRegister);
convertInt32ToDouble(scratchRegister, dest);
@@ -1196,7 +1173,7 @@ public:
Jump branchPtr(RelationalCondition cond, RegisterID left, ImmPtr right)
{
- if (shouldBlind(right) && haveScratchRegisterForBlinding()) {
+ if (shouldBlind(right)) {
RegisterID scratchRegister = scratchRegisterForBlinding();
loadRotationBlindedConstant(rotationBlindConstant(right), scratchRegister);
return branchPtr(cond, left, scratchRegister);
@@ -1206,7 +1183,7 @@ public:
void storePtr(ImmPtr imm, Address dest)
{
- if (shouldBlind(imm) && haveScratchRegisterForBlinding()) {
+ if (shouldBlind(imm)) {
RegisterID scratchRegister = scratchRegisterForBlinding();
loadRotationBlindedConstant(rotationBlindConstant(imm), scratchRegister);
storePtr(scratchRegister, dest);
@@ -1216,7 +1193,7 @@ public:
void store64(Imm64 imm, Address dest)
{
- if (shouldBlind(imm) && haveScratchRegisterForBlinding()) {
+ if (shouldBlind(imm)) {
RegisterID scratchRegister = scratchRegisterForBlinding();
loadRotationBlindedConstant(rotationBlindConstant(imm), scratchRegister);
store64(scratchRegister, dest);
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerARM.cpp b/Source/JavaScriptCore/assembler/MacroAssemblerARM.cpp
index b0a9bf074..a6f3e65c0 100644
--- a/Source/JavaScriptCore/assembler/MacroAssemblerARM.cpp
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerARM.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013, 2014 Apple Inc.
+ * Copyright (C) 2013 Apple Inc.
* Copyright (C) 2009 University of Szeged
* All rights reserved.
*
@@ -31,6 +31,10 @@
#include "MacroAssemblerARM.h"
+#if USE(MASM_PROBE)
+#include <wtf/StdLibExtras.h>
+#endif
+
#if OS(LINUX)
#include <sys/types.h>
#include <sys/stat.h>
@@ -46,7 +50,7 @@ static bool isVFPPresent()
{
#if OS(LINUX)
int fd = open("/proc/self/auxv", O_RDONLY);
- if (fd != -1) {
+ if (fd > 0) {
Elf32_auxv_t aux;
while (read(fd, &aux, sizeof(Elf32_auxv_t))) {
if (aux.a_type == AT_HWCAP) {
@@ -58,7 +62,7 @@ static bool isVFPPresent()
}
#endif // OS(LINUX)
-#if (COMPILER(GCC_OR_CLANG) && defined(__VFP_FP__))
+#if (COMPILER(GCC) && defined(__VFP_FP__))
return true;
#else
return false;
@@ -95,58 +99,51 @@ void MacroAssemblerARM::load32WithUnalignedHalfWords(BaseIndex address, Register
}
#endif // CPU(ARMV5_OR_LOWER)
-#if ENABLE(MASM_PROBE)
-
-#define INDENT printIndent(indentation)
+#if USE(MASM_PROBE)
-void MacroAssemblerARM::printCPURegisters(CPUState& cpu, int indentation)
+void MacroAssemblerARM::ProbeContext::dumpCPURegisters(const char* indentation)
{
- #define PRINT_GPREGISTER(_type, _regName) { \
+ #define DUMP_GPREGISTER(_type, _regName) { \
int32_t value = reinterpret_cast<int32_t>(cpu._regName); \
- INDENT, dataLogF("%5s: 0x%08x %d\n", #_regName, value, value) ; \
+ dataLogF("%s %5s: 0x%08x %d\n", indentation, #_regName, value, value) ; \
}
- FOR_EACH_CPU_GPREGISTER(PRINT_GPREGISTER)
- FOR_EACH_CPU_SPECIAL_REGISTER(PRINT_GPREGISTER)
- #undef PRINT_GPREGISTER
+ FOR_EACH_CPU_GPREGISTER(DUMP_GPREGISTER)
+ FOR_EACH_CPU_SPECIAL_REGISTER(DUMP_GPREGISTER)
+ #undef DUMP_GPREGISTER
- #define PRINT_FPREGISTER(_type, _regName) { \
- uint64_t* u = reinterpret_cast<uint64_t*>(&cpu._regName); \
+ #define DUMP_FPREGISTER(_type, _regName) { \
+ uint32_t* u = reinterpret_cast<uint32_t*>(&cpu._regName); \
double* d = reinterpret_cast<double*>(&cpu._regName); \
- INDENT, dataLogF("%5s: 0x%016llx %.13g\n", #_regName, *u, *d); \
+ dataLogF("%s %5s: 0x %08x %08x %12g\n", \
+ indentation, #_regName, u[1], u[0], d[0]); \
}
- FOR_EACH_CPU_FPREGISTER(PRINT_FPREGISTER)
- #undef PRINT_FPREGISTER
+ FOR_EACH_CPU_FPREGISTER(DUMP_FPREGISTER)
+ #undef DUMP_FPREGISTER
}
-#undef INDENT
-
-void MacroAssemblerARM::printRegister(MacroAssemblerARM::CPUState& cpu, RegisterID regID)
+void MacroAssemblerARM::ProbeContext::dump(const char* indentation)
{
- const char* name = CPUState::registerName(regID);
- union {
- void* voidPtr;
- intptr_t intptrValue;
- } u;
- u.voidPtr = cpu.registerValue(regID);
- dataLogF("%s:<%p %ld>", name, u.voidPtr, u.intptrValue);
-}
+ if (!indentation)
+ indentation = "";
-void MacroAssemblerARM::printRegister(MacroAssemblerARM::CPUState& cpu, FPRegisterID regID)
-{
- const char* name = CPUState::registerName(regID);
- union {
- double doubleValue;
- uint64_t uint64Value;
- } u;
- u.doubleValue = cpu.registerValue(regID);
- dataLogF("%s:<0x%016llx %.13g>", name, u.uint64Value, u.doubleValue);
+ dataLogF("%sProbeContext %p {\n", indentation, this);
+ dataLogF("%s probeFunction: %p\n", indentation, probeFunction);
+ dataLogF("%s arg1: %p %llu\n", indentation, arg1, reinterpret_cast<int64_t>(arg1));
+ dataLogF("%s arg2: %p %llu\n", indentation, arg2, reinterpret_cast<int64_t>(arg2));
+ dataLogF("%s cpu: {\n", indentation);
+
+ dumpCPURegisters(indentation);
+
+ dataLogF("%s }\n", indentation);
+ dataLogF("%s}\n", indentation);
}
+
extern "C" void ctiMasmProbeTrampoline();
// For details on "What code is emitted for the probe?" and "What values are in
-// the saved registers?", see comment for MacroAssemblerX86Common::probe() in
-// MacroAssemblerX86Common.cpp.
+// the saved registers?", see comment for MacroAssemblerX86::probe() in
+// MacroAssemblerX86_64.h.
void MacroAssemblerARM::probe(MacroAssemblerARM::ProbeFunction function, void* arg1, void* arg2)
{
@@ -163,7 +160,7 @@ void MacroAssemblerARM::probe(MacroAssemblerARM::ProbeFunction function, void* a
m_assembler.blx(RegisterID::S0);
}
-#endif // ENABLE(MASM_PROBE)
+#endif // USE(MASM_PROBE)
} // namespace JSC
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerARM.h b/Source/JavaScriptCore/assembler/MacroAssemblerARM.h
index 6cda896a3..7eae2ee01 100644
--- a/Source/JavaScriptCore/assembler/MacroAssemblerARM.h
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerARM.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2013, 2014 Apple Inc.
+ * Copyright (C) 2008, 2013 Apple Inc.
* Copyright (C) 2009, 2010 University of Szeged
* All rights reserved.
*
@@ -35,7 +35,7 @@
namespace JSC {
-class MacroAssemblerARM : public AbstractMacroAssembler<ARMAssembler, MacroAssemblerARM> {
+class MacroAssemblerARM : public AbstractMacroAssembler<ARMAssembler> {
static const int DoubleConditionMask = 0x0f;
static const int DoubleConditionBitSpecial = 0x10;
COMPILE_ASSERT(!(DoubleConditionBitSpecial & DoubleConditionMask), DoubleConditionBitSpecial_should_not_interfere_with_ARMAssembler_Condition_codes);
@@ -370,7 +370,7 @@ public:
m_assembler.dataTransfer32(ARMAssembler::LoadUint8, dest, ARMRegisters::S0, 0);
}
- void load8SignedExtendTo32(BaseIndex address, RegisterID dest)
+ void load8Signed(BaseIndex address, RegisterID dest)
{
m_assembler.baseIndexTransfer16(ARMAssembler::LoadInt8, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
}
@@ -385,7 +385,7 @@ public:
m_assembler.baseIndexTransfer16(ARMAssembler::LoadUint16, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
}
- void load16SignedExtendTo32(BaseIndex address, RegisterID dest)
+ void load16Signed(BaseIndex address, RegisterID dest)
{
m_assembler.baseIndexTransfer16(ARMAssembler::LoadInt16, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
}
@@ -414,18 +414,6 @@ public:
load16(address, dest);
}
- void abortWithReason(AbortReason reason)
- {
- move(TrustedImm32(reason), ARMRegisters::S0);
- breakpoint();
- }
-
- void abortWithReason(AbortReason reason, intptr_t misc)
- {
- move(TrustedImm32(misc), ARMRegisters::S1);
- abortWithReason(reason);
- }
-
ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
{
ConvertibleLoadLabel result(this);
@@ -471,23 +459,12 @@ public:
m_assembler.baseIndexTransfer32(ARMAssembler::StoreUint8, src, address.base, address.index, static_cast<int>(address.scale), address.offset);
}
- void store8(RegisterID src, ImplicitAddress address)
- {
- m_assembler.dtrUp(ARMAssembler::StoreUint8, src, address.base, address.offset);
- }
-
void store8(RegisterID src, const void* address)
{
move(TrustedImmPtr(address), ARMRegisters::S0);
m_assembler.dtrUp(ARMAssembler::StoreUint8, src, ARMRegisters::S0, 0);
}
- void store8(TrustedImm32 imm, ImplicitAddress address)
- {
- move(imm, ARMRegisters::S1);
- store8(ARMRegisters::S1, address);
- }
-
void store8(TrustedImm32 imm, const void* address)
{
move(TrustedImm32(reinterpret_cast<ARMWord>(address)), ARMRegisters::S0);
@@ -540,12 +517,6 @@ public:
m_assembler.pop(dest);
}
- void popPair(RegisterID dest1, RegisterID dest2)
- {
- m_assembler.pop(dest1);
- m_assembler.pop(dest2);
- }
-
void push(RegisterID src)
{
m_assembler.push(src);
@@ -563,12 +534,6 @@ public:
push(ARMRegisters::S0);
}
- void pushPair(RegisterID src1, RegisterID src2)
- {
- m_assembler.push(src2);
- m_assembler.push(src1);
- }
-
void move(TrustedImm32 imm, RegisterID dest)
{
m_assembler.moveImm(imm.m_value, dest);
@@ -942,7 +907,7 @@ public:
void test32(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest)
{
if (mask.m_value == -1)
- m_assembler.tst(reg, reg);
+ m_assembler.cmp(0, reg);
else
m_assembler.tst(reg, m_assembler.getImm(mask.m_value, ARMRegisters::S0));
m_assembler.mov(dest, ARMAssembler::getOp2Byte(0));
@@ -1056,13 +1021,6 @@ public:
return dataLabel;
}
- DataLabel32 moveWithPatch(TrustedImm32 initialValue, RegisterID dest)
- {
- DataLabel32 dataLabel(this);
- m_assembler.ldrUniqueImmediate(dest, static_cast<ARMWord>(initialValue.m_value));
- return dataLabel;
- }
-
Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
{
ensureSpace(3 * sizeof(ARMWord), 2 * sizeof(ARMWord));
@@ -1080,15 +1038,6 @@ public:
return jump;
}
- Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
- {
- load32(left, ARMRegisters::S1);
- ensureSpace(3 * sizeof(ARMWord), 2 * sizeof(ARMWord));
- dataLabel = moveWithPatch(initialRightValue, ARMRegisters::S0);
- Jump jump = branch32(cond, ARMRegisters::S0, ARMRegisters::S1, true);
- return jump;
- }
-
DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
{
DataLabelPtr dataLabel = moveWithPatch(initialValue, ARMRegisters::S1);
@@ -1133,9 +1082,9 @@ public:
m_assembler.baseIndexTransferFloat(ARMAssembler::LoadDouble, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
}
- void loadDouble(TrustedImmPtr address, FPRegisterID dest)
+ void loadDouble(const void* address, FPRegisterID dest)
{
- move(TrustedImm32(reinterpret_cast<ARMWord>(address.m_value)), ARMRegisters::S0);
+ move(TrustedImm32(reinterpret_cast<ARMWord>(address)), ARMRegisters::S0);
m_assembler.doubleDtrUp(ARMAssembler::LoadDouble, dest, ARMRegisters::S0, 0);
}
@@ -1154,9 +1103,9 @@ public:
m_assembler.baseIndexTransferFloat(ARMAssembler::StoreDouble, src, address.base, address.index, static_cast<int>(address.scale), address.offset);
}
- void storeDouble(FPRegisterID src, TrustedImmPtr address)
+ void storeDouble(FPRegisterID src, const void* address)
{
- move(TrustedImm32(reinterpret_cast<ARMWord>(address.m_value)), ARMRegisters::S0);
+ move(TrustedImm32(reinterpret_cast<ARMWord>(address)), ARMRegisters::S0);
m_assembler.dataTransferFloat(ARMAssembler::StoreDouble, src, ARMRegisters::S0, 0);
}
@@ -1184,7 +1133,7 @@ public:
void addDouble(AbsoluteAddress address, FPRegisterID dest)
{
- loadDouble(TrustedImmPtr(address.m_ptr), ARMRegisters::SD0);
+ loadDouble(address.m_ptr, ARMRegisters::SD0);
addDouble(ARMRegisters::SD0, dest);
}
@@ -1398,13 +1347,6 @@ public:
}
static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
- static bool canJumpReplacePatchableBranch32WithPatch() { return false; }
-
- static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32)
- {
- UNREACHABLE_FOR_PLATFORM();
- return CodeLocationLabel();
- }
static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr)
{
@@ -1422,24 +1364,36 @@ public:
ARMAssembler::revertBranchPtrWithPatch(instructionStart.dataLocation(), reg, reinterpret_cast<uintptr_t>(initialValue) & 0xffff);
}
- static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel, Address, int32_t)
- {
- UNREACHABLE_FOR_PLATFORM();
- }
-
static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*)
{
UNREACHABLE_FOR_PLATFORM();
}
-#if ENABLE(MASM_PROBE)
- // Methods required by the MASM_PROBE mechanism as defined in
- // AbstractMacroAssembler.h.
- static void printCPURegisters(CPUState&, int indentation = 0);
- static void printRegister(CPUState&, RegisterID);
- static void printRegister(CPUState&, FPRegisterID);
+#if USE(MASM_PROBE)
+ struct CPUState {
+ #define DECLARE_REGISTER(_type, _regName) \
+ _type _regName;
+ FOR_EACH_CPU_REGISTER(DECLARE_REGISTER)
+ #undef DECLARE_REGISTER
+ };
+
+ struct ProbeContext;
+ typedef void (*ProbeFunction)(struct ProbeContext*);
+
+ struct ProbeContext {
+ ProbeFunction probeFunction;
+ void* arg1;
+ void* arg2;
+ CPUState cpu;
+
+ void dump(const char* indentation = 0);
+ private:
+ void dumpCPURegisters(const char* indentation);
+ };
+
+ // For details about probe(), see comment in MacroAssemblerX86_64.h.
void probe(ProbeFunction, void* arg1 = 0, void* arg2 = 0);
-#endif // ENABLE(MASM_PROBE)
+#endif // USE(MASM_PROBE)
protected:
ARMAssembler::Condition ARMCondition(RelationalCondition cond)
@@ -1496,7 +1450,7 @@ private:
ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
}
-#if ENABLE(MASM_PROBE)
+#if USE(MASM_PROBE)
inline TrustedImm32 trustedImm32FromPtr(void* ptr)
{
return TrustedImm32(TrustedImmPtr(ptr));
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerARM64.h b/Source/JavaScriptCore/assembler/MacroAssemblerARM64.h
index c82585952..a128923fc 100644
--- a/Source/JavaScriptCore/assembler/MacroAssemblerARM64.h
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerARM64.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -34,7 +34,7 @@
namespace JSC {
-class MacroAssemblerARM64 : public AbstractMacroAssembler<ARM64Assembler, MacroAssemblerARM64> {
+class MacroAssemblerARM64 : public AbstractMacroAssembler<ARM64Assembler> {
static const RegisterID dataTempRegister = ARM64Registers::ip0;
static const RegisterID memoryTempRegister = ARM64Registers::ip1;
static const ARM64Registers::FPRegisterID fpTempRegister = ARM64Registers::q31;
@@ -64,11 +64,13 @@ public:
Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink() { return m_assembler.jumpsToLink(); }
void* unlinkedCode() { return m_assembler.unlinkedCode(); }
- static bool canCompact(JumpType jumpType) { return ARM64Assembler::canCompact(jumpType); }
- static JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return ARM64Assembler::computeJumpType(jumpType, from, to); }
- static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return ARM64Assembler::computeJumpType(record, from, to); }
- static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return ARM64Assembler::jumpSizeDelta(jumpType, jumpLinkType); }
- static void link(LinkRecord& record, uint8_t* from, uint8_t* to) { return ARM64Assembler::link(record, from, to); }
+ bool canCompact(JumpType jumpType) { return m_assembler.canCompact(jumpType); }
+ JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return m_assembler.computeJumpType(jumpType, from, to); }
+ JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return m_assembler.computeJumpType(record, from, to); }
+ void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset) {return m_assembler.recordLinkOffsets(regionStart, regionEnd, offset); }
+ int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return m_assembler.jumpSizeDelta(jumpType, jumpLinkType); }
+ void link(LinkRecord& record, uint8_t* from, uint8_t* to) { return m_assembler.link(record, from, to); }
+ int executableOffsetFor(int location) { return m_assembler.executableOffsetFor(location); }
static const Scale ScalePtr = TimesEight;
@@ -128,6 +130,7 @@ public:
// FIXME: Get reasonable implementations for these
static bool shouldBlindForSpecificArch(uint32_t value) { return value >= 0x00ffffff; }
static bool shouldBlindForSpecificArch(uint64_t value) { return value >= 0x00ffffff; }
+ static bool shouldBlindForSpecificArch(uintptr_t value) { return value >= 0x00ffffff; }
// Integer operations:
@@ -198,10 +201,7 @@ public:
void add64(RegisterID src, RegisterID dest)
{
- if (src == ARM64Registers::sp)
- m_assembler.add<64>(dest, src, dest);
- else
- m_assembler.add<64>(dest, dest, src);
+ m_assembler.add<64>(dest, dest, src);
}
void add64(TrustedImm32 imm, RegisterID dest)
@@ -288,11 +288,6 @@ public:
store64(dataTempRegister, address.m_ptr);
}
- void addPtrNoFlags(TrustedImm32 imm, RegisterID srcDest)
- {
- add64(imm, srcDest);
- }
-
void add64(Address src, RegisterID dest)
{
load64(src, getCachedDataTempRegisterIDAndInvalidate());
@@ -476,13 +471,6 @@ public:
store32(dataTempRegister, address.m_ptr);
}
- void or32(TrustedImm32 imm, Address address)
- {
- load32(address, getCachedDataTempRegisterIDAndInvalidate());
- or32(imm, dataTempRegister, dataTempRegister);
- store32(dataTempRegister, address);
- }
-
void or64(RegisterID src, RegisterID dest)
{
or64(dest, src, dest);
@@ -503,7 +491,7 @@ public:
LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
if (logicalImm.isValid()) {
- m_assembler.orr<64>(dest, src, logicalImm);
+ m_assembler.orr<64>(dest, dest, logicalImm);
return;
}
@@ -551,12 +539,12 @@ public:
void rshift64(RegisterID src, RegisterID shiftAmount, RegisterID dest)
{
- m_assembler.asr<64>(dest, src, shiftAmount);
+ m_assembler.lsr<64>(dest, src, shiftAmount);
}
void rshift64(RegisterID src, TrustedImm32 imm, RegisterID dest)
{
- m_assembler.asr<64>(dest, src, imm.m_value & 0x3f);
+ m_assembler.lsr<64>(dest, src, imm.m_value & 0x3f);
}
void rshift64(RegisterID shiftAmount, RegisterID dest)
@@ -689,26 +677,6 @@ public:
urshift32(dest, imm, dest);
}
- void urshift64(RegisterID src, RegisterID shiftAmount, RegisterID dest)
- {
- m_assembler.lsr<64>(dest, src, shiftAmount);
- }
-
- void urshift64(RegisterID src, TrustedImm32 imm, RegisterID dest)
- {
- m_assembler.lsr<64>(dest, src, imm.m_value & 0x1f);
- }
-
- void urshift64(RegisterID shiftAmount, RegisterID dest)
- {
- urshift64(dest, shiftAmount, dest);
- }
-
- void urshift64(TrustedImm32 imm, RegisterID dest)
- {
- urshift64(dest, imm, dest);
- }
-
void xor32(RegisterID src, RegisterID dest)
{
xor32(dest, src, dest);
@@ -732,7 +700,7 @@ public:
LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value);
if (logicalImm.isValid()) {
- m_assembler.eor<32>(dest, src, logicalImm);
+ m_assembler.eor<32>(dest, dest, logicalImm);
return;
}
@@ -771,7 +739,7 @@ public:
LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
if (logicalImm.isValid()) {
- m_assembler.eor<64>(dest, src, logicalImm);
+ m_assembler.eor<64>(dest, dest, logicalImm);
return;
}
@@ -825,18 +793,6 @@ public:
return label;
}
- void abortWithReason(AbortReason reason)
- {
- move(TrustedImm32(reason), dataTempRegister);
- breakpoint();
- }
-
- void abortWithReason(AbortReason reason, intptr_t misc)
- {
- move(TrustedImm64(misc), memoryTempRegister);
- abortWithReason(reason);
- }
-
ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
{
ConvertibleLoadLabel result(this);
@@ -918,16 +874,16 @@ public:
load16(address, dest);
}
- void load16SignedExtendTo32(BaseIndex address, RegisterID dest)
+ void load16Signed(BaseIndex address, RegisterID dest)
{
if (!address.offset && (!address.scale || address.scale == 1)) {
- m_assembler.ldrsh<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
+ m_assembler.ldrsh<64>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
return;
}
signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
- m_assembler.ldrsh<32>(dest, address.base, memoryTempRegister);
+ m_assembler.ldrsh<64>(dest, address.base, memoryTempRegister);
}
void load8(ImplicitAddress address, RegisterID dest)
@@ -959,16 +915,16 @@ public:
m_cachedMemoryTempRegister.invalidate();
}
- void load8SignedExtendTo32(BaseIndex address, RegisterID dest)
+ void load8Signed(BaseIndex address, RegisterID dest)
{
if (!address.offset && !address.scale) {
- m_assembler.ldrsb<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
+ m_assembler.ldrsb<64>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
return;
}
signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
- m_assembler.ldrsb<32>(dest, address.base, memoryTempRegister);
+ m_assembler.ldrsb<64>(dest, address.base, memoryTempRegister);
}
void store64(RegisterID src, ImplicitAddress address)
@@ -1124,15 +1080,6 @@ public:
m_assembler.strb(src, memoryTempRegister, 0);
}
- void store8(RegisterID src, ImplicitAddress address)
- {
- if (tryStoreWithOffset<8>(src, address.base, address.offset))
- return;
-
- signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
- m_assembler.str<8>(src, address.base, memoryTempRegister);
- }
-
void store8(TrustedImm32 imm, void* address)
{
if (!imm.m_value) {
@@ -1144,16 +1091,6 @@ public:
store8(dataTempRegister, address);
}
- void store8(TrustedImm32 imm, ImplicitAddress address)
- {
- if (!imm.m_value) {
- store8(ARM64Registers::zr, address);
- return;
- }
-
- move(imm, getCachedDataTempRegisterIDAndInvalidate());
- store8(dataTempRegister, address);
- }
// Floating-point operations:
@@ -1187,7 +1124,7 @@ public:
void addDouble(AbsoluteAddress address, FPRegisterID dest)
{
- loadDouble(TrustedImmPtr(address.m_ptr), fpTempRegister);
+ loadDouble(address.m_ptr, fpTempRegister);
addDouble(fpTempRegister, dest);
}
@@ -1213,14 +1150,9 @@ public:
m_assembler.scvtf<64, 32>(fpTempRegister, dest);
failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, fpTempRegister));
- // Test for negative zero.
- if (negZeroCheck) {
- Jump valueIsNonZero = branchTest32(NonZero, dest);
- RegisterID scratch = getCachedMemoryTempRegisterIDAndInvalidate();
- m_assembler.fmov<64>(scratch, src);
- failureCases.append(makeTestBitAndBranch(scratch, 63, IsNonZero));
- valueIsNonZero.link(this);
- }
+ // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
+ if (negZeroCheck)
+ failureCases.append(branchTest32(Zero, dest));
}
Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
@@ -1346,9 +1278,9 @@ public:
m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
}
- void loadDouble(TrustedImmPtr address, FPRegisterID dest)
+ void loadDouble(const void* address, FPRegisterID dest)
{
- moveToCachedReg(address, m_cachedMemoryTempRegister);
+ moveToCachedReg(TrustedImmPtr(address), m_cachedMemoryTempRegister);
m_assembler.ldr<64>(dest, memoryTempRegister, ARM64Registers::zr);
}
@@ -1414,9 +1346,9 @@ public:
m_assembler.str<64>(src, address.base, memoryTempRegister);
}
- void storeDouble(FPRegisterID src, TrustedImmPtr address)
+ void storeDouble(FPRegisterID src, const void* address)
{
- moveToCachedReg(address, m_cachedMemoryTempRegister);
+ moveToCachedReg(TrustedImmPtr(address), m_cachedMemoryTempRegister);
m_assembler.str<64>(src, memoryTempRegister, ARM64Registers::zr);
}
@@ -1505,16 +1437,6 @@ public:
CRASH();
}
- void popPair(RegisterID dest1, RegisterID dest2)
- {
- m_assembler.ldp<64>(dest1, dest2, ARM64Registers::sp, PairPostIndex(16));
- }
-
- void pushPair(RegisterID src1, RegisterID src2)
- {
- m_assembler.stp<64>(src1, src2, ARM64Registers::sp, PairPreIndex(-16));
- }
-
void popToRestore(RegisterID dest)
{
m_assembler.ldr<64>(dest, ARM64Registers::sp, PostIndex(16));
@@ -1524,15 +1446,6 @@ public:
{
m_assembler.str<64>(src, ARM64Registers::sp, PreIndex(-16));
}
-
- void pushToSaveImmediateWithoutTouchingRegisters(TrustedImm32 imm)
- {
- RegisterID reg = dataTempRegister;
- pushPair(reg, reg);
- move(imm, reg);
- store64(reg, stackPointerRegister);
- load64(Address(stackPointerRegister, 8), reg);
- }
void pushToSave(Address address)
{
@@ -1558,7 +1471,6 @@ public:
storeDouble(src, stackPointerRegister);
}
- static ptrdiff_t pushToSaveByteOffset() { return 16; }
// Register move operations:
@@ -1676,16 +1588,6 @@ public:
Jump branch64(RelationalCondition cond, RegisterID left, RegisterID right)
{
- if (right == ARM64Registers::sp) {
- if (cond == Equal && left != ARM64Registers::sp) {
- // CMP can only use SP for the left argument, since we are testing for equality, the order
- // does not matter here.
- std::swap(left, right);
- } else {
- move(right, getCachedDataTempRegisterIDAndInvalidate());
- right = dataTempRegister;
- }
- }
m_assembler.cmp<64>(left, right);
return Jump(makeBranch(cond));
}
@@ -1728,12 +1630,6 @@ public:
return branch64(cond, memoryTempRegister, right);
}
- Jump branchPtr(RelationalCondition cond, BaseIndex left, RegisterID right)
- {
- load64(left, getCachedMemoryTempRegisterIDAndInvalidate());
- return branch64(cond, memoryTempRegister, right);
- }
-
Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
{
ASSERT(!(0xffffff00 & right.m_value));
@@ -1761,32 +1657,6 @@ public:
return Jump(makeBranch(cond));
}
- void test32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
- {
- if (mask.m_value == -1)
- m_assembler.tst<32>(reg, reg);
- else {
- bool testedWithImmediate = false;
- if ((cond == Zero) || (cond == NonZero)) {
- LogicalImmediate logicalImm = LogicalImmediate::create32(mask.m_value);
-
- if (logicalImm.isValid()) {
- m_assembler.tst<32>(reg, logicalImm);
- testedWithImmediate = true;
- }
- }
- if (!testedWithImmediate) {
- move(mask, getCachedDataTempRegisterIDAndInvalidate());
- m_assembler.tst<32>(reg, dataTempRegister);
- }
- }
- }
-
- Jump branch(ResultCondition cond)
- {
- return Jump(makeBranch(cond));
- }
-
Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
{
if (mask.m_value == -1) {
@@ -2334,17 +2204,10 @@ public:
return branch64(cond, left, dataTempRegister);
}
- ALWAYS_INLINE Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
- {
- dataLabel = DataLabel32(this);
- moveWithPatch(initialRightValue, getCachedDataTempRegisterIDAndInvalidate());
- return branch32(cond, left, dataTempRegister);
- }
-
- PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right)
+ PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right = TrustedImmPtr(0))
{
m_makeJumpPatchable = true;
- Jump result = branch64(cond, left, TrustedImm64(right));
+ Jump result = branch32(cond, left, TrustedImm32(right));
m_makeJumpPatchable = false;
return PatchableJump(result);
}
@@ -2365,22 +2228,6 @@ public:
return PatchableJump(result);
}
- PatchableJump patchableBranch64(RelationalCondition cond, RegisterID reg, TrustedImm64 imm)
- {
- m_makeJumpPatchable = true;
- Jump result = branch64(cond, reg, imm);
- m_makeJumpPatchable = false;
- return PatchableJump(result);
- }
-
- PatchableJump patchableBranch64(RelationalCondition cond, RegisterID left, RegisterID right)
- {
- m_makeJumpPatchable = true;
- Jump result = branch64(cond, left, right);
- m_makeJumpPatchable = false;
- return PatchableJump(result);
- }
-
PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
{
m_makeJumpPatchable = true;
@@ -2389,14 +2236,6 @@ public:
return PatchableJump(result);
}
- PatchableJump patchableBranch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
- {
- m_makeJumpPatchable = true;
- Jump result = branch32WithPatch(cond, left, dataLabel, initialRightValue);
- m_makeJumpPatchable = false;
- return PatchableJump(result);
- }
-
PatchableJump patchableJump()
{
m_makeJumpPatchable = true;
@@ -2464,15 +2303,9 @@ public:
return ARM64Assembler::maxJumpReplacementSize();
}
- RegisterID scratchRegisterForBlinding()
- {
- // We *do not* have a scratch register for blinding.
- RELEASE_ASSERT_NOT_REACHED();
- return getCachedDataTempRegisterIDAndInvalidate();
- }
+ RegisterID scratchRegisterForBlinding() { return getCachedDataTempRegisterIDAndInvalidate(); }
static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
- static bool canJumpReplacePatchableBranch32WithPatch() { return false; }
static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
{
@@ -2485,12 +2318,6 @@ public:
return CodeLocationLabel();
}
- static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32)
- {
- UNREACHABLE_FOR_PLATFORM();
- return CodeLocationLabel();
- }
-
static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID, void* initialValue)
{
reemitInitialMoveWithPatch(instructionStart.dataLocation(), initialValue);
@@ -2501,11 +2328,6 @@ public:
UNREACHABLE_FOR_PLATFORM();
}
- static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel, Address, int32_t)
- {
- UNREACHABLE_FOR_PLATFORM();
- }
-
protected:
ALWAYS_INLINE Jump makeBranch(ARM64Assembler::Condition cond)
{
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.cpp b/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.cpp
deleted file mode 100644
index 6651fff06..000000000
--- a/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.cpp
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Copyright (C) 2013, 2014 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-
-#if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
-#include "MacroAssemblerARMv7.h"
-
-namespace JSC {
-
-#if ENABLE(MASM_PROBE)
-
-#define INDENT printIndent(indentation)
-
-void MacroAssemblerARMv7::printCPURegisters(CPUState& cpu, int indentation)
-{
- #define PRINT_GPREGISTER(_type, _regName) { \
- int32_t value = reinterpret_cast<int32_t>(cpu._regName); \
- INDENT, dataLogF("%5s: 0x%08x %d\n", #_regName, value, value) ; \
- }
- FOR_EACH_CPU_GPREGISTER(PRINT_GPREGISTER)
- FOR_EACH_CPU_SPECIAL_REGISTER(PRINT_GPREGISTER)
- #undef PRINT_GPREGISTER
-
- #define PRINT_FPREGISTER(_type, _regName) { \
- uint64_t* u = reinterpret_cast<uint64_t*>(&cpu._regName); \
- double* d = reinterpret_cast<double*>(&cpu._regName); \
- INDENT, dataLogF("%5s: 0x%016llx %.13g\n", #_regName, *u, *d); \
- }
- FOR_EACH_CPU_FPREGISTER(PRINT_FPREGISTER)
- #undef PRINT_FPREGISTER
-}
-
-#undef INDENT
-
-void MacroAssemblerARMv7::printRegister(MacroAssemblerARMv7::CPUState& cpu, RegisterID regID)
-{
- const char* name = CPUState::registerName(regID);
- union {
- void* voidPtr;
- intptr_t intptrValue;
- } u;
- u.voidPtr = cpu.registerValue(regID);
- dataLogF("%s:<%p %ld>", name, u.voidPtr, u.intptrValue);
-}
-
-void MacroAssemblerARMv7::printRegister(MacroAssemblerARMv7::CPUState& cpu, FPRegisterID regID)
-{
- const char* name = CPUState::registerName(regID);
- union {
- double doubleValue;
- uint64_t uint64Value;
- } u;
- u.doubleValue = cpu.registerValue(regID);
- dataLogF("%s:<0x%016llx %.13g>", name, u.uint64Value, u.doubleValue);
-}
-
-extern "C" void ctiMasmProbeTrampoline();
-
-// For details on "What code is emitted for the probe?" and "What values are in
-// the saved registers?", see comment for MacroAssemblerX86Common::probe() in
-// MacroAssemblerX86Common.cpp.
-
-void MacroAssemblerARMv7::probe(MacroAssemblerARMv7::ProbeFunction function, void* arg1, void* arg2)
-{
- push(RegisterID::lr);
- push(RegisterID::lr);
- add32(TrustedImm32(8), RegisterID::sp, RegisterID::lr);
- store32(RegisterID::lr, ArmAddress(RegisterID::sp, 4));
- push(RegisterID::ip);
- push(RegisterID::r0);
- // The following uses RegisterID::ip. So, they must come after we push ip above.
- push(trustedImm32FromPtr(arg2));
- push(trustedImm32FromPtr(arg1));
- push(trustedImm32FromPtr(function));
-
- move(trustedImm32FromPtr(ctiMasmProbeTrampoline), RegisterID::ip);
- m_assembler.blx(RegisterID::ip);
-}
-#endif // ENABLE(MASM_PROBE)
-
-} // namespace JSC
-
-#endif // ENABLE(ASSEMBLER)
-
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h b/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h
index 2e71e61d8..68a04fd22 100644
--- a/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009, 2010, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2009, 2010 Apple Inc. All rights reserved.
* Copyright (C) 2010 University of Szeged
*
* Redistribution and use in source and binary forms, with or without
@@ -34,7 +34,7 @@
namespace JSC {
-class MacroAssemblerARMv7 : public AbstractMacroAssembler<ARMv7Assembler, MacroAssemblerARMv7> {
+class MacroAssemblerARMv7 : public AbstractMacroAssembler<ARMv7Assembler> {
static const RegisterID dataTempRegister = ARMRegisters::ip;
static const RegisterID addressTempRegister = ARMRegisters::r6;
@@ -62,11 +62,12 @@ public:
Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink() { return m_assembler.jumpsToLink(); }
void* unlinkedCode() { return m_assembler.unlinkedCode(); }
- static bool canCompact(JumpType jumpType) { return ARMv7Assembler::canCompact(jumpType); }
- static JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return ARMv7Assembler::computeJumpType(jumpType, from, to); }
- static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return ARMv7Assembler::computeJumpType(record, from, to); }
- static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return ARMv7Assembler::jumpSizeDelta(jumpType, jumpLinkType); }
- static void link(LinkRecord& record, uint8_t* from, uint8_t* to) { return ARMv7Assembler::link(record, from, to); }
+ bool canCompact(JumpType jumpType) { return m_assembler.canCompact(jumpType); }
+ JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return m_assembler.computeJumpType(jumpType, from, to); }
+ JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return m_assembler.computeJumpType(record, from, to); }
+ void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset) {return m_assembler.recordLinkOffsets(regionStart, regionEnd, offset); }
+ int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return m_assembler.jumpSizeDelta(jumpType, jumpLinkType); }
+ void link(LinkRecord& record, uint8_t* from, uint8_t* to) { return m_assembler.link(record, from, to); }
struct ArmAddress {
enum AddressType {
@@ -169,14 +170,6 @@ public:
void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
{
ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
-
- // For adds with stack pointer destination, moving the src first to sp is
- // needed to avoid unpredictable instruction
- if (dest == ARMRegisters::sp && src != dest) {
- move(src, ARMRegisters::sp);
- src = ARMRegisters::sp;
- }
-
if (armImm.isValid())
m_assembler.add(dest, src, armImm);
else {
@@ -225,11 +218,6 @@ public:
store32(dataTempRegister, address.m_ptr);
}
- void addPtrNoFlags(TrustedImm32 imm, RegisterID srcDest)
- {
- add32(imm, srcDest);
- }
-
void add64(TrustedImm32 imm, AbsoluteAddress address)
{
move(TrustedImmPtr(address.m_ptr), addressTempRegister);
@@ -341,13 +329,6 @@ public:
store32(dataTempRegister, addressTempRegister);
}
- void or32(TrustedImm32 imm, Address address)
- {
- load32(address, dataTempRegister);
- or32(imm, dataTempRegister, dataTempRegister);
- store32(dataTempRegister, address);
- }
-
void or32(TrustedImm32 imm, RegisterID dest)
{
or32(imm, dest, dest);
@@ -546,7 +527,7 @@ private:
}
}
- void load16SignedExtendTo32(ArmAddress address, RegisterID dest)
+ void load16Signed(ArmAddress address, RegisterID dest)
{
ASSERT(address.type == ArmAddress::HasIndex);
m_assembler.ldrsh(dest, address.base, address.u.index, address.u.scale);
@@ -566,7 +547,7 @@ private:
}
}
- void load8SignedExtendTo32(ArmAddress address, RegisterID dest)
+ void load8Signed(ArmAddress address, RegisterID dest)
{
ASSERT(address.type == ArmAddress::HasIndex);
m_assembler.ldrsb(dest, address.base, address.u.index, address.u.scale);
@@ -643,18 +624,6 @@ public:
m_assembler.ldr(dest, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
}
- void abortWithReason(AbortReason reason)
- {
- move(TrustedImm32(reason), dataTempRegister);
- breakpoint();
- }
-
- void abortWithReason(AbortReason reason, intptr_t misc)
- {
- move(TrustedImm32(misc), addressTempRegister);
- abortWithReason(reason);
- }
-
ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
{
ConvertibleLoadLabel result(this);
@@ -668,7 +637,7 @@ public:
load8(setupArmAddress(address), dest);
}
- void load8SignedExtendTo32(ImplicitAddress, RegisterID)
+ void load8Signed(ImplicitAddress, RegisterID)
{
UNREACHABLE_FOR_PLATFORM();
}
@@ -678,9 +647,9 @@ public:
load8(setupArmAddress(address), dest);
}
- void load8SignedExtendTo32(BaseIndex address, RegisterID dest)
+ void load8Signed(BaseIndex address, RegisterID dest)
{
- load8SignedExtendTo32(setupArmAddress(address), dest);
+ load8Signed(setupArmAddress(address), dest);
}
void load8(const void* address, RegisterID dest)
@@ -714,9 +683,9 @@ public:
m_assembler.ldrh(dest, makeBaseIndexBase(address), address.index, address.scale);
}
- void load16SignedExtendTo32(BaseIndex address, RegisterID dest)
+ void load16Signed(BaseIndex address, RegisterID dest)
{
- load16SignedExtendTo32(setupArmAddress(address), dest);
+ load16Signed(setupArmAddress(address), dest);
}
void load16(ImplicitAddress address, RegisterID dest)
@@ -730,7 +699,7 @@ public:
}
}
- void load16SignedExtendTo32(ImplicitAddress, RegisterID)
+ void load16Signed(ImplicitAddress, RegisterID)
{
UNREACHABLE_FOR_PLATFORM();
}
@@ -776,11 +745,6 @@ public:
store32(dataTempRegister, address);
}
- void store8(RegisterID src, Address address)
- {
- store8(src, setupArmAddress(address));
- }
-
void store8(RegisterID src, BaseIndex address)
{
store8(src, setupArmAddress(address));
@@ -798,12 +762,6 @@ public:
store8(dataTempRegister, address);
}
- void store8(TrustedImm32 imm, Address address)
- {
- move(imm, dataTempRegister);
- store8(dataTempRegister, address);
- }
-
void store16(RegisterID src, BaseIndex address)
{
store16(src, setupArmAddress(address));
@@ -898,9 +856,9 @@ public:
m_assembler.vmov(dest, src);
}
- void loadDouble(TrustedImmPtr address, FPRegisterID dest)
+ void loadDouble(const void* address, FPRegisterID dest)
{
- move(address, addressTempRegister);
+ move(TrustedImmPtr(address), addressTempRegister);
m_assembler.vldr(dest, addressTempRegister, 0);
}
@@ -934,9 +892,9 @@ public:
m_assembler.fsts(ARMRegisters::asSingle(src), base, offset);
}
- void storeDouble(FPRegisterID src, TrustedImmPtr address)
+ void storeDouble(FPRegisterID src, const void* address)
{
- move(address, addressTempRegister);
+ move(TrustedImmPtr(address), addressTempRegister);
storeDouble(src, addressTempRegister);
}
@@ -974,7 +932,7 @@ public:
void addDouble(AbsoluteAddress address, FPRegisterID dest)
{
- loadDouble(TrustedImmPtr(address.m_ptr), fpTempRegister);
+ loadDouble(address.m_ptr, fpTempRegister);
m_assembler.vadd(dest, dest, fpTempRegister);
}
@@ -1180,12 +1138,14 @@ public:
void pop(RegisterID dest)
{
- m_assembler.pop(dest);
+ // store postindexed with writeback
+ m_assembler.ldr(dest, ARMRegisters::sp, sizeof(void*), false, true);
}
void push(RegisterID src)
{
- m_assembler.push(src);
+ // store preindexed with writeback
+ m_assembler.str(src, ARMRegisters::sp, -sizeof(void*), true, true);
}
void push(Address address)
@@ -1200,16 +1160,6 @@ public:
push(dataTempRegister);
}
- void popPair(RegisterID dest1, RegisterID dest2)
- {
- m_assembler.pop(1 << dest1 | 1 << dest2);
- }
-
- void pushPair(RegisterID src1, RegisterID src2)
- {
- m_assembler.push(1 << src1 | 1 << src2);
- }
-
// Register move operations:
//
// Move values in registers.
@@ -1308,14 +1258,18 @@ private:
void compare32(RegisterID left, TrustedImm32 right)
{
int32_t imm = right.m_value;
- ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm);
- if (armImm.isValid())
- m_assembler.cmp(left, armImm);
- else if ((armImm = ARMThumbImmediate::makeEncodedImm(-imm)).isValid())
- m_assembler.cmn(left, armImm);
+ if (!imm)
+ m_assembler.tst(left, left);
else {
- move(TrustedImm32(imm), dataTempRegister);
- m_assembler.cmp(left, dataTempRegister);
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm);
+ if (armImm.isValid())
+ m_assembler.cmp(left, armImm);
+ else if ((armImm = ARMThumbImmediate::makeEncodedImm(-imm)).isValid())
+ m_assembler.cmn(left, armImm);
+ else {
+ move(TrustedImm32(imm), dataTempRegister);
+ m_assembler.cmp(left, dataTempRegister);
+ }
}
}
@@ -1327,34 +1281,16 @@ private:
m_assembler.tst(reg, reg);
else {
ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm);
- if (armImm.isValid()) {
- if (reg == ARMRegisters::sp) {
- move(reg, addressTempRegister);
- m_assembler.tst(addressTempRegister, armImm);
- } else
- m_assembler.tst(reg, armImm);
- } else {
+ if (armImm.isValid())
+ m_assembler.tst(reg, armImm);
+ else {
move(mask, dataTempRegister);
- if (reg == ARMRegisters::sp) {
- move(reg, addressTempRegister);
- m_assembler.tst(addressTempRegister, dataTempRegister);
- } else
- m_assembler.tst(reg, dataTempRegister);
+ m_assembler.tst(reg, dataTempRegister);
}
}
}
public:
- void test32(ResultCondition, RegisterID reg, TrustedImm32 mask)
- {
- test32(reg, mask);
- }
-
- Jump branch(ResultCondition cond)
- {
- return Jump(makeBranch(cond));
- }
-
Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
{
m_assembler.cmp(left, right);
@@ -1413,12 +1349,6 @@ public:
return branch32(cond, addressTempRegister, right);
}
- Jump branchPtr(RelationalCondition cond, BaseIndex left, RegisterID right)
- {
- load32(left, dataTempRegister);
- return branch32(cond, dataTempRegister, right);
- }
-
Jump branch8(RelationalCondition cond, RegisterID left, TrustedImm32 right)
{
compare32(left, right);
@@ -1776,13 +1706,6 @@ public:
return branch32(cond, addressTempRegister, dataTempRegister);
}
- ALWAYS_INLINE Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
- {
- load32(left, addressTempRegister);
- dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
- return branch32(cond, addressTempRegister, dataTempRegister);
- }
-
PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right = TrustedImmPtr(0))
{
m_makeJumpPatchable = true;
@@ -1815,14 +1738,6 @@ public:
return PatchableJump(result);
}
- PatchableJump patchableBranch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
- {
- m_makeJumpPatchable = true;
- Jump result = branch32WithPatch(cond, left, dataLabel, initialRightValue);
- m_makeJumpPatchable = false;
- return PatchableJump(result);
- }
-
PatchableJump patchableJump()
{
padBeforePatch();
@@ -1855,13 +1770,17 @@ public:
}
+ int executableOffsetFor(int location)
+ {
+ return m_assembler.executableOffsetFor(location);
+ }
+
static FunctionPtr readCallTarget(CodeLocationCall call)
{
return FunctionPtr(reinterpret_cast<void(*)()>(ARMv7Assembler::readCallTarget(call.dataLocation())));
}
static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
- static bool canJumpReplacePatchableBranch32WithPatch() { return false; }
static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
{
@@ -1885,30 +1804,36 @@ public:
return CodeLocationLabel();
}
- static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32)
- {
- UNREACHABLE_FOR_PLATFORM();
- return CodeLocationLabel();
- }
-
static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*)
{
UNREACHABLE_FOR_PLATFORM();
}
- static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel, Address, int32_t)
- {
- UNREACHABLE_FOR_PLATFORM();
- }
+#if USE(MASM_PROBE)
+ struct CPUState {
+ #define DECLARE_REGISTER(_type, _regName) \
+ _type _regName;
+ FOR_EACH_CPU_REGISTER(DECLARE_REGISTER)
+ #undef DECLARE_REGISTER
+ };
+
+ struct ProbeContext;
+ typedef void (*ProbeFunction)(struct ProbeContext*);
+
+ struct ProbeContext {
+ ProbeFunction probeFunction;
+ void* arg1;
+ void* arg2;
+ CPUState cpu;
+
+ void dump(const char* indentation = 0);
+ private:
+ void dumpCPURegisters(const char* indentation);
+ };
-#if ENABLE(MASM_PROBE)
- // Methods required by the MASM_PROBE mechanism as defined in
- // AbstractMacroAssembler.h.
- static void printCPURegisters(CPUState&, int indentation = 0);
- static void printRegister(CPUState&, RegisterID);
- static void printRegister(CPUState&, FPRegisterID);
+ // For details about probe(), see comment in MacroAssemblerX86_64.h.
void probe(ProbeFunction, void* arg1 = 0, void* arg2 = 0);
-#endif // ENABLE(MASM_PROBE)
+#endif // USE(MASM_PROBE)
protected:
ALWAYS_INLINE Jump jump()
@@ -2020,7 +1945,7 @@ private:
ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
}
-#if ENABLE(MASM_PROBE)
+#if USE(MASM_PROBE)
inline TrustedImm32 trustedImm32FromPtr(void* ptr)
{
return TrustedImm32(TrustedImmPtr(ptr));
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.h b/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.h
index b4d6c0bfb..5f8ba8a92 100644
--- a/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.h
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.h
@@ -36,7 +36,7 @@
// ASSERT_VALID_CODE_POINTER checks that ptr is a non-null pointer, and that it is a valid
// instruction address on the platform (for example, check any alignment requirements).
-#if CPU(ARM_THUMB2) && ENABLE(JIT)
+#if CPU(ARM_THUMB2) && !ENABLE(LLINT_C_LOOP)
// ARM instructions must be 16-bit aligned. Thumb2 code pointers to be loaded into
// into the processor are decorated with the bottom bit set, while traditional ARM has
// the lower bit clear. Since we don't know what kind of pointer, we check for both
@@ -132,12 +132,6 @@ public:
ASSERT_VALID_CODE_POINTER(m_value);
}
- template<typename returnType, typename argType1, typename argType2, typename argType3, typename argType4, typename argType5, typename argType6>
- FunctionPtr(returnType(*value)(argType1, argType2, argType3, argType4, argType5, argType6))
- : m_value((void*)value)
- {
- ASSERT_VALID_CODE_POINTER(m_value);
- }
// MSVC doesn't seem to treat functions with different calling conventions as
// different types; these methods already defined for fastcall, below.
#if CALLING_CONVENTION_IS_STDCALL && !OS(WINDOWS)
@@ -260,11 +254,6 @@ public:
}
void* value() const { return m_value; }
-
- void dump(PrintStream& out) const
- {
- out.print(RawPointer(m_value));
- }
private:
void* m_value;
@@ -299,10 +288,12 @@ public:
return result;
}
- static MacroAssemblerCodePtr createLLIntCodePtr(OpcodeID codeId)
+#if ENABLE(LLINT)
+ static MacroAssemblerCodePtr createLLIntCodePtr(LLIntCode codeId)
{
return createFromExecutableAddress(LLInt::getCodePtr(codeId));
}
+#endif
explicit MacroAssemblerCodePtr(ReturnAddressPtr ra)
: m_value(ra.value())
@@ -318,7 +309,10 @@ public:
void* dataLocation() const { ASSERT_VALID_CODE_POINTER(m_value); return m_value; }
#endif
- explicit operator bool() const { return m_value; }
+ bool operator!() const
+ {
+ return !m_value;
+ }
bool operator==(const MacroAssemblerCodePtr& other) const
{
@@ -410,11 +404,13 @@ public:
return MacroAssemblerCodeRef(codePtr);
}
+#if ENABLE(LLINT)
// Helper for creating self-managed code refs from LLInt.
- static MacroAssemblerCodeRef createLLIntCodeRef(OpcodeID codeId)
+ static MacroAssemblerCodeRef createLLIntCodeRef(LLIntCode codeId)
{
return createSelfManagedCodeRef(MacroAssemblerCodePtr::createFromExecutableAddress(LLInt::getCodePtr(codeId)));
}
+#endif
ExecutableMemoryHandle* executableMemory() const
{
@@ -438,7 +434,7 @@ public:
return JSC::tryToDisassemble(m_codePtr, size(), prefix, WTF::dataFile());
}
- explicit operator bool() const { return !!m_codePtr; }
+ bool operator!() const { return !m_codePtr; }
void dump(PrintStream& out) const
{
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerMIPS.h b/Source/JavaScriptCore/assembler/MacroAssemblerMIPS.h
index 1a9312829..a30247d33 100644
--- a/Source/JavaScriptCore/assembler/MacroAssemblerMIPS.h
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerMIPS.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
* Copyright (C) 2010 MIPS Technologies, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -34,7 +34,7 @@
namespace JSC {
-class MacroAssemblerMIPS : public AbstractMacroAssembler<MIPSAssembler, MacroAssemblerMIPS> {
+class MacroAssemblerMIPS : public AbstractMacroAssembler<MIPSAssembler> {
public:
typedef MIPSRegisters::FPRegisterID FPRegisterID;
@@ -707,7 +707,7 @@ public:
m_assembler.lbu(dest, addrTempRegister, 0);
}
- void load8SignedExtendTo32(BaseIndex address, RegisterID dest)
+ void load8Signed(BaseIndex address, RegisterID dest)
{
if (address.offset >= -32768 && address.offset <= 32767
&& !m_fixedWidth) {
@@ -919,7 +919,7 @@ public:
}
}
- void load16SignedExtendTo32(BaseIndex address, RegisterID dest)
+ void load16Signed(BaseIndex address, RegisterID dest)
{
if (address.offset >= -32768 && address.offset <= 32767
&& !m_fixedWidth) {
@@ -2120,16 +2120,6 @@ public:
return temp;
}
- Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
- {
- m_fixedWidth = true;
- load32(left, dataTempRegister);
- dataLabel = moveWithPatch(initialRightValue, immTempRegister);
- Jump temp = branch32(cond, dataTempRegister, immTempRegister);
- m_fixedWidth = false;
- return temp;
- }
-
DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
{
m_fixedWidth = true;
@@ -2278,7 +2268,7 @@ public:
#endif
}
- void loadDouble(TrustedImmPtr address, FPRegisterID dest)
+ void loadDouble(const void* address, FPRegisterID dest)
{
#if WTF_MIPS_ISA(1)
/*
@@ -2286,7 +2276,7 @@ public:
lwc1 dest, 0(addrTemp)
lwc1 dest+1, 4(addrTemp)
*/
- move(address, addrTempRegister);
+ move(TrustedImmPtr(address), addrTempRegister);
m_assembler.lwc1(dest, addrTempRegister, 0);
m_assembler.lwc1(FPRegisterID(dest + 1), addrTempRegister, 4);
#else
@@ -2294,7 +2284,7 @@ public:
li addrTemp, address
ldc1 dest, 0(addrTemp)
*/
- move(address, addrTempRegister);
+ move(TrustedImmPtr(address), addrTempRegister);
m_assembler.ldc1(dest, addrTempRegister, 0);
#endif
}
@@ -2416,14 +2406,14 @@ public:
#endif
}
- void storeDouble(FPRegisterID src, TrustedImmPtr address)
+ void storeDouble(FPRegisterID src, const void* address)
{
#if WTF_MIPS_ISA(1)
- move(address, addrTempRegister);
+ move(TrustedImmPtr(address), addrTempRegister);
m_assembler.swc1(src, addrTempRegister, 0);
m_assembler.swc1(FPRegisterID(src + 1), addrTempRegister, 4);
#else
- move(address, addrTempRegister);
+ move(TrustedImmPtr(address), addrTempRegister);
m_assembler.sdc1(src, addrTempRegister, 0);
#endif
}
@@ -2459,7 +2449,7 @@ public:
void addDouble(AbsoluteAddress address, FPRegisterID dest)
{
- loadDouble(TrustedImmPtr(address.m_ptr), fpTempRegister);
+ loadDouble(address.m_ptr, fpTempRegister);
m_assembler.addd(dest, dest, fpTempRegister);
}
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerSH4.h b/Source/JavaScriptCore/assembler/MacroAssemblerSH4.h
index 6857c60a4..32ea2b1b5 100644
--- a/Source/JavaScriptCore/assembler/MacroAssemblerSH4.h
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerSH4.h
@@ -1,7 +1,7 @@
/*
* Copyright (C) 2013 Cisco Systems, Inc. All rights reserved.
* Copyright (C) 2009-2011 STMicroelectronics. All rights reserved.
- * Copyright (C) 2008, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -36,7 +36,7 @@
namespace JSC {
-class MacroAssemblerSH4 : public AbstractMacroAssembler<SH4Assembler, MacroAssemblerSH4> {
+class MacroAssemblerSH4 : public AbstractMacroAssembler<SH4Assembler> {
public:
typedef SH4Assembler::FPRegisterID FPRegisterID;
@@ -718,13 +718,13 @@ public:
m_assembler.extub(dest, dest);
}
- void load8SignedExtendTo32(BaseIndex address, RegisterID dest)
+ void load8Signed(BaseIndex address, RegisterID dest)
{
RegisterID scr = claimScratch();
move(address.index, scr);
lshift32(TrustedImm32(address.scale), scr);
add32(address.base, scr);
- load8SignedExtendTo32(scr, address.offset, dest);
+ load8Signed(scr, address.offset, dest);
releaseScratch(scr);
}
@@ -770,7 +770,7 @@ public:
releaseScratch(scr);
}
- void load8SignedExtendTo32(RegisterID base, int offset, RegisterID dest)
+ void load8Signed(RegisterID base, int offset, RegisterID dest)
{
if (!offset) {
m_assembler.movbMemReg(base, dest);
@@ -798,7 +798,7 @@ public:
void load8(RegisterID base, int offset, RegisterID dest)
{
- load8SignedExtendTo32(base, offset, dest);
+ load8Signed(base, offset, dest);
m_assembler.extub(dest, dest);
}
@@ -858,14 +858,14 @@ public:
m_assembler.extuw(dest, dest);
}
- void load16SignedExtendTo32(RegisterID src, RegisterID dest)
+ void load16Signed(RegisterID src, RegisterID dest)
{
m_assembler.movwMemReg(src, dest);
}
void load16(BaseIndex address, RegisterID dest)
{
- load16SignedExtendTo32(address, dest);
+ load16Signed(address, dest);
m_assembler.extuw(dest, dest);
}
@@ -875,7 +875,7 @@ public:
m_assembler.extuw(dest, dest);
}
- void load16SignedExtendTo32(BaseIndex address, RegisterID dest)
+ void load16Signed(BaseIndex address, RegisterID dest)
{
RegisterID scr = claimScratch();
@@ -887,7 +887,7 @@ public:
m_assembler.movwR0mr(scr, dest);
else {
add32(address.base, scr);
- load16SignedExtendTo32(scr, dest);
+ load16Signed(scr, dest);
}
releaseScratch(scr);
@@ -931,19 +931,6 @@ public:
releaseScratch(srcval);
}
- void store8(TrustedImm32 imm, Address address)
- {
- ASSERT((imm.m_value >= -128) && (imm.m_value <= 127));
- RegisterID dstptr = claimScratch();
- move(address.base, dstptr);
- add32(TrustedImm32(address.offset), dstptr);
- RegisterID srcval = claimScratch();
- move(imm, srcval);
- m_assembler.movbRegMem(srcval, dstptr);
- releaseScratch(dstptr);
- releaseScratch(srcval);
- }
-
void store16(RegisterID src, BaseIndex address)
{
RegisterID scr = claimScratch();
@@ -1168,10 +1155,10 @@ public:
releaseScratch(scr);
}
- void loadDouble(TrustedImmPtr address, FPRegisterID dest)
+ void loadDouble(const void* address, FPRegisterID dest)
{
RegisterID scr = claimScratch();
- move(address, scr);
+ move(TrustedImmPtr(address), scr);
m_assembler.fmovsReadrminc(scr, (FPRegisterID)(dest + 1));
m_assembler.fmovsReadrm(scr, dest);
releaseScratch(scr);
@@ -1217,10 +1204,10 @@ public:
}
}
- void storeDouble(FPRegisterID src, TrustedImmPtr address)
+ void storeDouble(FPRegisterID src, const void* address)
{
RegisterID scr = claimScratch();
- m_assembler.loadConstant(reinterpret_cast<uint32_t>(const_cast<void*>(address.m_value)) + 8, scr);
+ m_assembler.loadConstant(reinterpret_cast<uint32_t>(const_cast<void*>(address)) + 8, scr);
m_assembler.fmovsWriterndec(src, scr);
m_assembler.fmovsWriterndec((FPRegisterID)(src + 1), scr);
releaseScratch(scr);
@@ -1233,7 +1220,7 @@ public:
void addDouble(AbsoluteAddress address, FPRegisterID dest)
{
- loadDouble(TrustedImmPtr(address.m_ptr), fscratch);
+ loadDouble(address.m_ptr, fscratch);
addDouble(fscratch, dest);
}
@@ -1746,14 +1733,6 @@ public:
return dataLabel;
}
- DataLabel32 moveWithPatch(TrustedImm32 initialValue, RegisterID dest)
- {
- m_assembler.ensureSpace(m_assembler.maxInstructionSize, sizeof(uint32_t));
- DataLabel32 dataLabel(this);
- m_assembler.loadConstantUnReusable(static_cast<uint32_t>(initialValue.m_value), dest);
- return dataLabel;
- }
-
void move(RegisterID src, RegisterID dest)
{
if (src != dest)
@@ -2161,29 +2140,6 @@ public:
return result ? branchTrue() : branchFalse();
}
- Jump branchAdd32(ResultCondition cond, Address src, RegisterID dest)
- {
- ASSERT((cond == Overflow) || (cond == Signed) || (cond == PositiveOrZero) || (cond == Zero) || (cond == NonZero));
-
- if (cond == Overflow) {
- RegisterID srcVal = claimScratch();
- load32(src, srcVal);
- m_assembler.addvlRegReg(srcVal, dest);
- releaseScratch(srcVal);
- return branchTrue();
- }
-
- add32(src, dest);
-
- if ((cond == Signed) || (cond == PositiveOrZero)) {
- m_assembler.cmppz(dest);
- return (cond == Signed) ? branchFalse() : branchTrue();
- }
-
- compare32(0, dest, Equal);
- return (cond == NonZero) ? branchFalse() : branchTrue();
- }
-
Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
{
ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
@@ -2463,23 +2419,6 @@ public:
return branchTrue();
}
- Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
- {
- RegisterID scr = claimScratch();
-
- m_assembler.loadConstant(left.offset, scr);
- m_assembler.addlRegReg(left.base, scr);
- m_assembler.movlMemReg(scr, scr);
- RegisterID scr1 = claimScratch();
- m_assembler.ensureSpace(m_assembler.maxInstructionSize + 10, 2 * sizeof(uint32_t));
- dataLabel = moveWithPatch(initialRightValue, scr1);
- m_assembler.cmplRegReg(scr1, scr, SH4Condition(cond));
- releaseScratch(scr);
- releaseScratch(scr1);
-
- return (cond == NotEqual) ? branchFalse() : branchTrue();
- }
-
void ret()
{
m_assembler.ret();
@@ -2529,18 +2468,6 @@ public:
m_assembler.synco();
}
- void abortWithReason(AbortReason reason)
- {
- move(TrustedImm32(reason), SH4Registers::r0);
- breakpoint();
- }
-
- void abortWithReason(AbortReason reason, intptr_t misc)
- {
- move(TrustedImm32(misc), SH4Registers::r1);
- abortWithReason(reason);
- }
-
static FunctionPtr readCallTarget(CodeLocationCall call)
{
return FunctionPtr(reinterpret_cast<void(*)()>(SH4Assembler::readCallTarget(call.dataLocation())));
@@ -2558,8 +2485,6 @@ public:
static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
- static bool canJumpReplacePatchableBranch32WithPatch() { return false; }
-
static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
{
return label.labelAtOffset(0);
@@ -2576,22 +2501,11 @@ public:
return CodeLocationLabel();
}
- static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32)
- {
- UNREACHABLE_FOR_PLATFORM();
- return CodeLocationLabel();
- }
-
static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*)
{
UNREACHABLE_FOR_PLATFORM();
}
- static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel, Address, int32_t)
- {
- UNREACHABLE_FOR_PLATFORM();
- }
-
protected:
SH4Assembler::Condition SH4Condition(RelationalCondition cond)
{
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerX86.h b/Source/JavaScriptCore/assembler/MacroAssemblerX86.h
index bdd9e57ba..547158fa7 100644
--- a/Source/JavaScriptCore/assembler/MacroAssemblerX86.h
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerX86.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -30,6 +30,10 @@
#include "MacroAssemblerX86Common.h"
+#if USE(MASM_PROBE)
+#include <wtf/StdLibExtras.h>
+#endif
+
namespace JSC {
class MacroAssemblerX86 : public MacroAssemblerX86Common {
@@ -107,18 +111,6 @@ public:
m_assembler.movzbl_mr(address, dest);
}
- void abortWithReason(AbortReason reason)
- {
- move(TrustedImm32(reason), X86Registers::eax);
- breakpoint();
- }
-
- void abortWithReason(AbortReason reason, intptr_t misc)
- {
- move(TrustedImm32(misc), X86Registers::edx);
- abortWithReason(reason);
- }
-
ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
{
ConvertibleLoadLabel result = ConvertibleLoadLabel(this);
@@ -131,11 +123,11 @@ public:
m_assembler.addsd_mr(address.m_ptr, dest);
}
- void storeDouble(FPRegisterID src, TrustedImmPtr address)
+ void storeDouble(FPRegisterID src, const void* address)
{
ASSERT(isSSE2Present());
- ASSERT(address.m_value);
- m_assembler.movsd_rm(src, address.m_value);
+ ASSERT(address);
+ m_assembler.movsd_rm(src, address);
}
void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest)
@@ -164,14 +156,12 @@ public:
m_assembler.movb_i8m(imm.m_value, address);
}
+ // Possibly clobbers src.
void moveDoubleToInts(FPRegisterID src, RegisterID dest1, RegisterID dest2)
{
- ASSERT(isSSE2Present());
- m_assembler.pextrw_irr(3, src, dest1);
- m_assembler.pextrw_irr(2, src, dest2);
- lshift32(TrustedImm32(16), dest1);
- or32(dest1, dest2);
movePackedToInt32(src, dest1);
+ rshiftPacked(TrustedImm32(32), src);
+ movePackedToInt32(src, dest2);
}
void moveIntsToDouble(RegisterID src1, RegisterID src2, FPRegisterID dest, FPRegisterID scratch)
@@ -267,14 +257,6 @@ public:
return Jump(m_assembler.jCC(x86Condition(cond)));
}
- Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
- {
- padBeforePatch();
- m_assembler.cmpl_im_force32(initialRightValue.m_value, left.offset, left.base);
- dataLabel = DataLabel32(this);
- return Jump(m_assembler.jCC(x86Condition(cond)));
- }
-
DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
{
padBeforePatch();
@@ -283,6 +265,7 @@ public:
}
static bool supportsFloatingPoint() { return isSSE2Present(); }
+ // See comment on MacroAssemblerARMv7::supportsFloatingPointTruncate()
static bool supportsFloatingPointTruncate() { return isSSE2Present(); }
static bool supportsFloatingPointSqrt() { return isSSE2Present(); }
static bool supportsFloatingPointAbs() { return isSSE2Present(); }
@@ -294,7 +277,6 @@ public:
}
static bool canJumpReplacePatchableBranchPtrWithPatch() { return true; }
- static bool canJumpReplacePatchableBranch32WithPatch() { return true; }
static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
{
@@ -317,17 +299,6 @@ public:
return label.labelAtOffset(-totalBytes);
}
- static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32 label)
- {
- const int opcodeBytes = 1;
- const int modRMBytes = 1;
- const int offsetBytes = 0;
- const int immediateBytes = 4;
- const int totalBytes = opcodeBytes + modRMBytes + offsetBytes + immediateBytes;
- ASSERT(totalBytes >= maxJumpReplacementSize());
- return label.labelAtOffset(-totalBytes);
- }
-
static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID reg, void* initialValue)
{
X86Assembler::revertJumpTo_cmpl_ir_force32(instructionStart.executableAddress(), reinterpret_cast<intptr_t>(initialValue), reg);
@@ -339,11 +310,10 @@ public:
X86Assembler::revertJumpTo_cmpl_im_force32(instructionStart.executableAddress(), reinterpret_cast<intptr_t>(initialValue), 0, address.base);
}
- static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel instructionStart, Address address, int32_t initialValue)
- {
- ASSERT(!address.offset);
- X86Assembler::revertJumpTo_cmpl_im_force32(instructionStart.executableAddress(), initialValue, 0, address.base);
- }
+#if USE(MASM_PROBE)
+ // For details about probe(), see comment in MacroAssemblerX86_64.h.
+ void probe(ProbeFunction, void* arg1 = 0, void* arg2 = 0);
+#endif // USE(MASM_PROBE)
private:
friend class LinkBuffer;
@@ -363,8 +333,46 @@ private:
{
X86Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
}
+
+#if USE(MASM_PROBE)
+ inline TrustedImm32 trustedImm32FromPtr(void* ptr)
+ {
+ return TrustedImm32(TrustedImmPtr(ptr));
+ }
+
+ inline TrustedImm32 trustedImm32FromPtr(ProbeFunction function)
+ {
+ return TrustedImm32(TrustedImmPtr(reinterpret_cast<void*>(function)));
+ }
+
+ inline TrustedImm32 trustedImm32FromPtr(void (*function)())
+ {
+ return TrustedImm32(TrustedImmPtr(reinterpret_cast<void*>(function)));
+ }
+#endif
};
+#if USE(MASM_PROBE)
+
+extern "C" void ctiMasmProbeTrampoline();
+
+// For details on "What code is emitted for the probe?" and "What values are in
+// the saved registers?", see comment for MacroAssemblerX86::probe() in
+// MacroAssemblerX86_64.h.
+
+inline void MacroAssemblerX86::probe(MacroAssemblerX86::ProbeFunction function, void* arg1, void* arg2)
+{
+ push(RegisterID::esp);
+ push(RegisterID::eax);
+ push(trustedImm32FromPtr(arg2));
+ push(trustedImm32FromPtr(arg1));
+ push(trustedImm32FromPtr(function));
+
+ move(trustedImm32FromPtr(ctiMasmProbeTrampoline), RegisterID::eax);
+ call(RegisterID::eax);
+}
+#endif // USE(MASM_PROBE)
+
} // namespace JSC
#endif // ENABLE(ASSEMBLER)
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.cpp b/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.cpp
index 0108ef4c0..0fab05fb5 100644
--- a/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.cpp
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -30,120 +30,53 @@
namespace JSC {
-#if ENABLE(MASM_PROBE)
+#if USE(MASM_PROBE)
-#define INDENT printIndent(indentation)
-
-void MacroAssemblerX86Common::printCPURegisters(MacroAssemblerX86Common::CPUState& cpu, int indentation)
+void MacroAssemblerX86Common::ProbeContext::dumpCPURegisters(const char* indentation)
{
#if CPU(X86)
- #define PRINT_GPREGISTER(_type, _regName) { \
+ #define DUMP_GPREGISTER(_type, _regName) { \
int32_t value = reinterpret_cast<int32_t>(cpu._regName); \
- INDENT, dataLogF("%6s: 0x%08x %d\n", #_regName, value, value) ; \
+ dataLogF("%s %6s: 0x%08x %d\n", indentation, #_regName, value, value) ; \
}
#elif CPU(X86_64)
- #define PRINT_GPREGISTER(_type, _regName) { \
+ #define DUMP_GPREGISTER(_type, _regName) { \
int64_t value = reinterpret_cast<int64_t>(cpu._regName); \
- INDENT, dataLogF("%6s: 0x%016llx %lld\n", #_regName, value, value) ; \
+ dataLogF("%s %6s: 0x%016llx %lld\n", indentation, #_regName, value, value) ; \
}
#endif
- FOR_EACH_CPU_GPREGISTER(PRINT_GPREGISTER)
- FOR_EACH_CPU_SPECIAL_REGISTER(PRINT_GPREGISTER)
- #undef PRINT_GPREGISTER
+ FOR_EACH_CPU_GPREGISTER(DUMP_GPREGISTER)
+ FOR_EACH_CPU_SPECIAL_REGISTER(DUMP_GPREGISTER)
+ #undef DUMP_GPREGISTER
- #define PRINT_FPREGISTER(_type, _regName) { \
- uint64_t* u = reinterpret_cast<uint64_t*>(&cpu._regName); \
+ #define DUMP_FPREGISTER(_type, _regName) { \
+ uint32_t* u = reinterpret_cast<uint32_t*>(&cpu._regName); \
double* d = reinterpret_cast<double*>(&cpu._regName); \
- INDENT, dataLogF("%6s: 0x%016llx %.13g\n", #_regName, *u, *d); \
+ dataLogF("%s %6s: 0x%08x%08x 0x%08x%08x %12g %12g\n", \
+ indentation, #_regName, u[3], u[2], u[1], u[0], d[1], d[0]); \
}
- FOR_EACH_CPU_FPREGISTER(PRINT_FPREGISTER)
- #undef PRINT_FPREGISTER
+ FOR_EACH_CPU_FPREGISTER(DUMP_FPREGISTER)
+ #undef DUMP_FPREGISTER
}
-#undef INDENT
-
-void MacroAssemblerX86Common::printRegister(MacroAssemblerX86Common::CPUState& cpu, RegisterID regID)
+void MacroAssemblerX86Common::ProbeContext::dump(const char* indentation)
{
- const char* name = CPUState::registerName(regID);
- union {
- void* voidPtr;
- intptr_t intptrValue;
- } u;
- u.voidPtr = cpu.registerValue(regID);
- dataLogF("%s:<%p %ld>", name, u.voidPtr, u.intptrValue);
-}
+ if (!indentation)
+ indentation = "";
-void MacroAssemblerX86Common::printRegister(MacroAssemblerX86Common::CPUState& cpu, FPRegisterID regID)
-{
- const char* name = CPUState::registerName(regID);
- union {
- double doubleValue;
- uint64_t uint64Value;
- } u;
- u.doubleValue = cpu.registerValue(regID);
- dataLogF("%s:<0x%016llx %.13g>", name, u.uint64Value, u.doubleValue);
-}
+ dataLogF("%sProbeContext %p {\n", indentation, this);
+ dataLogF("%s probeFunction: %p\n", indentation, probeFunction);
+ dataLogF("%s arg1: %p %llu\n", indentation, arg1, reinterpret_cast<int64_t>(arg1));
+ dataLogF("%s arg2: %p %llu\n", indentation, arg2, reinterpret_cast<int64_t>(arg2));
+ dataLogF("%s cpu: {\n", indentation);
-extern "C" void ctiMasmProbeTrampoline();
+ dumpCPURegisters(indentation);
-// What code is emitted for the probe?
-// ==================================
-// We want to keep the size of the emitted probe invocation code as compact as
-// possible to minimize the perturbation to the JIT generated code. However,
-// we also need to preserve the CPU registers and set up the ProbeContext to be
-// passed to the user probe function.
-//
-// Hence, we do only the minimum here to preserve a scratch register (i.e. rax
-// in this case) and the stack pointer (i.e. rsp), and pass the probe arguments.
-// We'll let the ctiMasmProbeTrampoline handle the rest of the probe invocation
-// work i.e. saving the CPUState (and setting up the ProbeContext), calling the
-// user probe function, and restoring the CPUState before returning to JIT
-// generated code.
-//
-// What registers need to be saved?
-// ===============================
-// The registers are saved for 2 reasons:
-// 1. To preserve their state in the JITted code. This means that all registers
-// that are not callee saved needs to be saved. We also need to save the
-// condition code registers because the probe can be inserted between a test
-// and a branch.
-// 2. To allow the probe to inspect the values of the registers for debugging
-// purposes. This means all registers need to be saved.
-//
-// In summary, save everything. But for reasons stated above, we should do the
-// minimum here and let ctiMasmProbeTrampoline do the heavy lifting to save the
-// full set.
-//
-// What values are in the saved registers?
-// ======================================
-// Conceptually, the saved registers should contain values as if the probe
-// is not present in the JIT generated code. Hence, they should contain values
-// that are expected at the start of the instruction immediately following the
-// probe.
-//
-// Specifically, the saved stack pointer register will point to the stack
-// position before we push the ProbeContext frame. The saved rip will point to
-// the address of the instruction immediately following the probe.
-
-void MacroAssemblerX86Common::probe(MacroAssemblerX86Common::ProbeFunction function, void* arg1, void* arg2)
-{
- push(RegisterID::esp);
- push(RegisterID::eax);
- move(TrustedImmPtr(arg2), RegisterID::eax);
- push(RegisterID::eax);
- move(TrustedImmPtr(arg1), RegisterID::eax);
- push(RegisterID::eax);
- move(TrustedImmPtr(reinterpret_cast<void*>(function)), RegisterID::eax);
- push(RegisterID::eax);
- move(TrustedImmPtr(reinterpret_cast<void*>(ctiMasmProbeTrampoline)), RegisterID::eax);
- call(RegisterID::eax);
+ dataLogF("%s }\n", indentation);
+ dataLogF("%s}\n", indentation);
}
-#endif // ENABLE(MASM_PROBE)
-
-#if CPU(X86) && !OS(MAC_OS_X)
-MacroAssemblerX86Common::SSE2CheckState MacroAssemblerX86Common::s_sse2CheckState = NotCheckedSSE2;
-#endif
+#endif // USE(MASM_PROBE)
} // namespace JSC
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.h b/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.h
index b6ae6fc6f..ac09eaca4 100644
--- a/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.h
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -33,7 +33,7 @@
namespace JSC {
-class MacroAssemblerX86Common : public AbstractMacroAssembler<X86Assembler, MacroAssemblerX86Common> {
+class MacroAssemblerX86Common : public AbstractMacroAssembler<X86Assembler> {
public:
#if CPU(X86_64)
static const X86Registers::RegisterID scratchRegister = X86Registers::r11;
@@ -183,18 +183,6 @@ public:
and32(imm, dest);
}
- void countLeadingZeros32(RegisterID src, RegisterID dst)
- {
- m_assembler.bsr_rr(src, dst);
- Jump srcIsNonZero = m_assembler.jCC(x86Condition(NonZero));
- move(TrustedImm32(32), dst);
-
- Jump skipNonZeroCase = jump();
- srcIsNonZero.link(this);
- xor32(TrustedImm32(0x1f), dst);
- skipNonZeroCase.link(this);
- }
-
void lshift32(RegisterID shift_amount, RegisterID dest)
{
ASSERT(shift_amount != dest);
@@ -460,7 +448,7 @@ public:
{
ASSERT(src != dst);
static const double negativeZeroConstant = -0.0;
- loadDouble(TrustedImmPtr(&negativeZeroConstant), dst);
+ loadDouble(&negativeZeroConstant, dst);
m_assembler.andnpd_rr(src, dst);
}
@@ -468,7 +456,7 @@ public:
{
ASSERT(src != dst);
static const double negativeZeroConstant = -0.0;
- loadDouble(TrustedImmPtr(&negativeZeroConstant), dst);
+ loadDouble(&negativeZeroConstant, dst);
m_assembler.xorpd_rr(src, dst);
}
@@ -537,12 +525,12 @@ public:
m_assembler.movzbl_mr(address.offset, address.base, dest);
}
- void load8SignedExtendTo32(BaseIndex address, RegisterID dest)
+ void load8Signed(BaseIndex address, RegisterID dest)
{
m_assembler.movsbl_mr(address.offset, address.base, address.index, address.scale, dest);
}
- void load8SignedExtendTo32(ImplicitAddress address, RegisterID dest)
+ void load8Signed(ImplicitAddress address, RegisterID dest)
{
m_assembler.movsbl_mr(address.offset, address.base, dest);
}
@@ -557,12 +545,12 @@ public:
m_assembler.movzwl_mr(address.offset, address.base, dest);
}
- void load16SignedExtendTo32(BaseIndex address, RegisterID dest)
+ void load16Signed(BaseIndex address, RegisterID dest)
{
m_assembler.movswl_mr(address.offset, address.base, address.index, address.scale, dest);
}
- void load16SignedExtendTo32(Address address, RegisterID dest)
+ void load16Signed(Address address, RegisterID dest)
{
m_assembler.movswl_mr(address.offset, address.base, dest);
}
@@ -696,13 +684,13 @@ public:
m_assembler.movsd_rr(src, dest);
}
- void loadDouble(TrustedImmPtr address, FPRegisterID dest)
+ void loadDouble(const void* address, FPRegisterID dest)
{
#if CPU(X86)
ASSERT(isSSE2Present());
- m_assembler.movsd_mr(address.m_value, dest);
+ m_assembler.movsd_mr(address, dest);
#else
- move(address, scratchRegister);
+ move(TrustedImmPtr(address), scratchRegister);
loadDouble(scratchRegister, dest);
#endif
}
@@ -921,17 +909,8 @@ public:
m_assembler.cvttsd2si_rr(src, dest);
// If the result is zero, it might have been -0.0, and the double comparison won't catch this!
-#if CPU(X86_64)
- if (negZeroCheck) {
- Jump valueIsNonZero = branchTest32(NonZero, dest);
- m_assembler.movmskpd_rr(src, scratchRegister);
- failureCases.append(branchTest32(NonZero, scratchRegister, TrustedImm32(1)));
- valueIsNonZero.link(this);
- }
-#else
if (negZeroCheck)
failureCases.append(branchTest32(Zero, dest));
-#endif
// Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
convertInt32ToDouble(dest, fpTemp);
@@ -1166,16 +1145,13 @@ public:
return Jump(m_assembler.jCC(x86Condition(cond)));
}
- void test32(ResultCondition, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+ void test32(RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
{
if (mask.m_value == -1)
m_assembler.testl_rr(reg, reg);
- else if (!(mask.m_value & ~0xff) && reg < X86Registers::esp) { // Using esp and greater as a byte register yields the upper half of the 16 bit registers ax, cx, dx and bx, e.g. esp, register 4, is actually ah.
- if (mask.m_value == 0xff)
- m_assembler.testb_rr(reg, reg);
- else
- m_assembler.testb_i8r(mask.m_value, reg);
- } else
+ else if (!(mask.m_value & ~0xff) && reg < X86Registers::esp) // Using esp and greater as a byte register yields the upper half of the 16 bit registers ax, cx, dx and bx, e.g. esp, register 4, is actually ah.
+ m_assembler.testb_i8r(mask.m_value, reg);
+ else
m_assembler.testl_i32r(mask.m_value, reg);
}
@@ -1186,7 +1162,7 @@ public:
Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
{
- test32(cond, reg, mask);
+ test32(reg, mask);
return branch(cond);
}
@@ -1490,14 +1466,28 @@ public:
return X86Assembler::maxJumpReplacementSize();
}
-#if ENABLE(MASM_PROBE)
- // Methods required by the MASM_PROBE mechanism as defined in
- // AbstractMacroAssembler.h.
- static void printCPURegisters(CPUState&, int indentation = 0);
- static void printRegister(CPUState&, RegisterID);
- static void printRegister(CPUState&, FPRegisterID);
- void probe(ProbeFunction, void* arg1 = 0, void* arg2 = 0);
-#endif // ENABLE(MASM_PROBE)
+#if USE(MASM_PROBE)
+ struct CPUState {
+ #define DECLARE_REGISTER(_type, _regName) \
+ _type _regName;
+ FOR_EACH_CPU_REGISTER(DECLARE_REGISTER)
+ #undef DECLARE_REGISTER
+ };
+
+ struct ProbeContext;
+ typedef void (*ProbeFunction)(struct ProbeContext*);
+
+ struct ProbeContext {
+ ProbeFunction probeFunction;
+ void* arg1;
+ void* arg2;
+ CPUState cpu;
+
+ void dump(const char* indentation = 0);
+ private:
+ void dumpCPURegisters(const char* indentation);
+ };
+#endif // USE(MASM_PROBE)
protected:
X86Assembler::Condition x86Condition(RelationalCondition cond)
@@ -1577,7 +1567,7 @@ private:
cpuid;
mov flags, edx;
}
-#elif COMPILER(GCC_OR_CLANG)
+#elif COMPILER(GCC)
asm (
"movl $0x1, %%eax;"
"pushl %%ebx;"
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerX86_64.h b/Source/JavaScriptCore/assembler/MacroAssemblerX86_64.h
index cee55f92e..4fbc5a3dd 100644
--- a/Source/JavaScriptCore/assembler/MacroAssemblerX86_64.h
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerX86_64.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2012, 2014, 2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2012 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -30,9 +30,11 @@
#include "MacroAssemblerX86Common.h"
-#define REPTACH_OFFSET_CALL_R11 3
+#if USE(MASM_PROBE)
+#include <wtf/StdLibExtras.h>
+#endif
-inline bool CAN_SIGN_EXTEND_32_64(int64_t value) { return value == (int64_t)(int32_t)value; }
+#define REPTACH_OFFSET_CALL_R11 3
namespace JSC {
@@ -124,16 +126,6 @@ public:
move(TrustedImmPtr(address), scratchRegister);
store32(imm, scratchRegister);
}
-
- void store32(RegisterID source, void* address)
- {
- if (source == X86Registers::eax)
- m_assembler.movl_EAXm(address);
- else {
- move(TrustedImmPtr(address), scratchRegister);
- store32(source, scratchRegister);
- }
- }
void store8(TrustedImm32 imm, void* address)
{
@@ -147,74 +139,10 @@ public:
store8(reg, Address(scratchRegister));
}
-#if OS(WINDOWS)
- Call callWithSlowPathReturnType()
- {
- // On Win64, when the return type is larger than 8 bytes, we need to allocate space on the stack for the return value.
- // On entry, rcx should contain a pointer to this stack space. The other parameters are shifted to the right,
- // rdx should contain the first argument, r8 should contain the second argument, and r9 should contain the third argument.
- // On return, rax contains a pointer to this stack value. See http://msdn.microsoft.com/en-us/library/7572ztz4.aspx.
- // We then need to copy the 16 byte return value into rax and rdx, since JIT expects the return value to be split between the two.
- // It is assumed that the parameters are already shifted to the right, when entering this method.
- // Note: this implementation supports up to 3 parameters.
-
- // JIT relies on the CallerFrame (frame pointer) being put on the stack,
- // On Win64 we need to manually copy the frame pointer to the stack, since MSVC may not maintain a frame pointer on 64-bit.
- // See http://msdn.microsoft.com/en-us/library/9z1stfyw.aspx where it's stated that rbp MAY be used as a frame pointer.
- store64(X86Registers::ebp, Address(X86Registers::esp, -16));
-
- // We also need to allocate the shadow space on the stack for the 4 parameter registers.
- // In addition, we need to allocate 16 bytes for the return value.
- // Also, we should allocate 16 bytes for the frame pointer, and return address (not populated).
- sub64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp);
-
- // The first parameter register should contain a pointer to the stack allocated space for the return value.
- move(X86Registers::esp, X86Registers::ecx);
- add64(TrustedImm32(4 * sizeof(int64_t)), X86Registers::ecx);
-
- DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister);
- Call result = Call(m_assembler.call(scratchRegister), Call::Linkable);
-
- add64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp);
-
- // Copy the return value into rax and rdx.
- load64(Address(X86Registers::eax, sizeof(int64_t)), X86Registers::edx);
- load64(Address(X86Registers::eax), X86Registers::eax);
-
- ASSERT_UNUSED(label, differenceBetween(label, result) == REPTACH_OFFSET_CALL_R11);
- return result;
- }
-#endif
-
Call call()
{
-#if OS(WINDOWS)
- // JIT relies on the CallerFrame (frame pointer) being put on the stack,
- // On Win64 we need to manually copy the frame pointer to the stack, since MSVC may not maintain a frame pointer on 64-bit.
- // See http://msdn.microsoft.com/en-us/library/9z1stfyw.aspx where it's stated that rbp MAY be used as a frame pointer.
- store64(X86Registers::ebp, Address(X86Registers::esp, -16));
-
- // On Windows we need to copy the arguments that don't fit in registers to the stack location where the callee expects to find them.
- // We don't know the number of arguments at this point, so the arguments (5, 6, ...) should always be copied.
-
- // Copy argument 5
- load64(Address(X86Registers::esp, 4 * sizeof(int64_t)), scratchRegister);
- store64(scratchRegister, Address(X86Registers::esp, -4 * static_cast<int32_t>(sizeof(int64_t))));
-
- // Copy argument 6
- load64(Address(X86Registers::esp, 5 * sizeof(int64_t)), scratchRegister);
- store64(scratchRegister, Address(X86Registers::esp, -3 * static_cast<int32_t>(sizeof(int64_t))));
-
- // We also need to allocate the shadow space on the stack for the 4 parameter registers.
- // Also, we should allocate 16 bytes for the frame pointer, and return address (not populated).
- // In addition, we need to allocate 16 bytes for two more parameters, since the call can have up to 6 parameters.
- sub64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp);
-#endif
DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister);
Call result = Call(m_assembler.call(scratchRegister), Call::Linkable);
-#if OS(WINDOWS)
- add64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp);
-#endif
ASSERT_UNUSED(label, differenceBetween(label, result) == REPTACH_OFFSET_CALL_R11);
return result;
}
@@ -291,10 +219,7 @@ public:
void add64(TrustedImm32 imm, Address address)
{
- if (imm.m_value == 1)
- m_assembler.incq_m(address.offset, address.base);
- else
- m_assembler.addq_im(imm.m_value, address.offset, address.base);
+ m_assembler.addq_im(imm.m_value, address.offset, address.base);
}
void add64(TrustedImm32 imm, AbsoluteAddress address)
@@ -334,11 +259,6 @@ public:
m_assembler.sarq_i8r(imm.m_value, dest);
}
- void urshift64(TrustedImm32 imm, RegisterID dest)
- {
- m_assembler.shrq_i8r(imm.m_value, dest);
- }
-
void mul64(RegisterID src, RegisterID dest)
{
m_assembler.imulq_rr(src, dest);
@@ -482,12 +402,8 @@ public:
void store64(TrustedImm64 imm, ImplicitAddress address)
{
- if (CAN_SIGN_EXTEND_32_64(imm.m_value))
- m_assembler.movq_i32m(static_cast<int>(imm.m_value), address.offset, address.base);
- else {
- move(imm, scratchRegister);
- store64(scratchRegister, address);
- }
+ move(imm, scratchRegister);
+ store64(scratchRegister, address);
}
void store64(TrustedImm64 imm, BaseIndex address)
@@ -697,18 +613,6 @@ public:
return Jump(m_assembler.jCC(x86Condition(cond)));
}
- void abortWithReason(AbortReason reason)
- {
- move(TrustedImm32(reason), X86Registers::r11);
- breakpoint();
- }
-
- void abortWithReason(AbortReason reason, intptr_t misc)
- {
- move(TrustedImm64(misc), X86Registers::r10);
- abortWithReason(reason);
- }
-
ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
{
ConvertibleLoadLabel result = ConvertibleLoadLabel(this);
@@ -723,13 +627,6 @@ public:
return DataLabelPtr(this);
}
- DataLabelPtr moveWithPatch(TrustedImm32 initialValue, RegisterID dest)
- {
- padBeforePatch();
- m_assembler.movq_i64r(initialValue.m_value, dest);
- return DataLabelPtr(this);
- }
-
Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
{
dataLabel = moveWithPatch(initialRightValue, scratchRegister);
@@ -742,30 +639,12 @@ public:
return branch64(cond, left, scratchRegister);
}
- Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
- {
- padBeforePatch();
- m_assembler.movl_i32r(initialRightValue.m_value, scratchRegister);
- dataLabel = DataLabel32(this);
- return branch32(cond, left, scratchRegister);
- }
-
DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
{
DataLabelPtr label = moveWithPatch(initialValue, scratchRegister);
store64(scratchRegister, address);
return label;
}
-
- PatchableJump patchableBranch64(RelationalCondition cond, RegisterID reg, TrustedImm64 imm)
- {
- return PatchableJump(branch64(cond, reg, imm));
- }
-
- PatchableJump patchableBranch64(RelationalCondition cond, RegisterID left, RegisterID right)
- {
- return PatchableJump(branch64(cond, left, right));
- }
using MacroAssemblerX86Common::branch8;
Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
@@ -794,6 +673,7 @@ public:
}
static bool supportsFloatingPoint() { return true; }
+ // See comment on MacroAssemblerARMv7::supportsFloatingPointTruncate()
static bool supportsFloatingPointTruncate() { return true; }
static bool supportsFloatingPointSqrt() { return true; }
static bool supportsFloatingPointAbs() { return true; }
@@ -807,7 +687,6 @@ public:
static RegisterID scratchRegisterForBlinding() { return scratchRegister; }
static bool canJumpReplacePatchableBranchPtrWithPatch() { return true; }
- static bool canJumpReplacePatchableBranch32WithPatch() { return true; }
static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
{
@@ -819,41 +698,41 @@ public:
return label.labelAtOffset(-totalBytes);
}
- static CodeLocationLabel startOfBranch32WithPatchOnRegister(CodeLocationDataLabel32 label)
- {
- const int rexBytes = 1;
- const int opcodeBytes = 1;
- const int immediateBytes = 4;
- const int totalBytes = rexBytes + opcodeBytes + immediateBytes;
- ASSERT(totalBytes >= maxJumpReplacementSize());
- return label.labelAtOffset(-totalBytes);
- }
-
static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr label)
{
return startOfBranchPtrWithPatchOnRegister(label);
}
-
- static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32 label)
- {
- return startOfBranch32WithPatchOnRegister(label);
- }
static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel instructionStart, Address, void* initialValue)
{
X86Assembler::revertJumpTo_movq_i64r(instructionStart.executableAddress(), reinterpret_cast<intptr_t>(initialValue), scratchRegister);
}
- static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel instructionStart, Address, int32_t initialValue)
- {
- X86Assembler::revertJumpTo_movl_i32r(instructionStart.executableAddress(), initialValue, scratchRegister);
- }
-
static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID, void* initialValue)
{
X86Assembler::revertJumpTo_movq_i64r(instructionStart.executableAddress(), reinterpret_cast<intptr_t>(initialValue), scratchRegister);
}
+#if USE(MASM_PROBE)
+ // This function emits code to preserve the CPUState (e.g. registers),
+ // call a user supplied probe function, and restore the CPUState before
+ // continuing with other JIT generated code.
+ //
+ // The user supplied probe function will be called with a single pointer to
+ // a ProbeContext struct (defined above) which contains, among other things,
+ // the preserved CPUState. This allows the user probe function to inspect
+ // the CPUState at that point in the JIT generated code.
+ //
+ // If the user probe function alters the register values in the ProbeContext,
+ // the altered values will be loaded into the CPU registers when the probe
+ // returns.
+ //
+ // The ProbeContext is stack allocated and is only valid for the duration
+ // of the call to the user probe function.
+
+ void probe(ProbeFunction, void* arg1 = 0, void* arg2 = 0);
+#endif // USE(MASM_PROBE)
+
private:
friend class LinkBuffer;
friend class RepatchBuffer;
@@ -875,8 +754,69 @@ private:
{
X86Assembler::repatchPointer(call.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).dataLocation(), destination.executableAddress());
}
+
+#if USE(MASM_PROBE)
+ inline TrustedImm64 trustedImm64FromPtr(void* ptr)
+ {
+ return TrustedImm64(TrustedImmPtr(ptr));
+ }
+
+ inline TrustedImm64 trustedImm64FromPtr(ProbeFunction function)
+ {
+ return TrustedImm64(TrustedImmPtr(reinterpret_cast<void*>(function)));
+ }
+
+ inline TrustedImm64 trustedImm64FromPtr(void (*function)())
+ {
+ return TrustedImm64(TrustedImmPtr(reinterpret_cast<void*>(function)));
+ }
+#endif
};
+#if USE(MASM_PROBE)
+
+extern "C" void ctiMasmProbeTrampoline();
+
+// What code is emitted for the probe?
+// ==================================
+// We want to keep the size of the emitted probe invocation code as compact as
+// possible to minimize the perturbation to the JIT generated code. However,
+// we also need to preserve the CPU registers and set up the ProbeContext to be
+// passed to the user probe function.
+//
+// Hence, we do only the minimum here to preserve a scratch register (i.e. rax
+// in this case) and the stack pointer (i.e. rsp), and pass the probe arguments.
+// We'll let the ctiMasmProbeTrampoline handle the rest of the probe invocation
+// work i.e. saving the CPUState (and setting up the ProbeContext), calling the
+// user probe function, and restoring the CPUState before returning to JIT
+// generated code.
+//
+// What values are in the saved registers?
+// ======================================
+// Conceptually, the saved registers should contain values as if the probe
+// is not present in the JIT generated code. Hence, they should contain values
+// that are expected at the start of the instruction immediately following the
+// probe.
+//
+// Specifcally, the saved stack pointer register will point to the stack
+// position before we push the ProbeContext frame. The saved rip will point to
+// the address of the instruction immediately following the probe.
+
+inline void MacroAssemblerX86_64::probe(MacroAssemblerX86_64::ProbeFunction function, void* arg1, void* arg2)
+{
+ push(RegisterID::esp);
+ push(RegisterID::eax);
+ move(trustedImm64FromPtr(arg2), RegisterID::eax);
+ push(RegisterID::eax);
+ move(trustedImm64FromPtr(arg1), RegisterID::eax);
+ push(RegisterID::eax);
+ move(trustedImm64FromPtr(function), RegisterID::eax);
+ push(RegisterID::eax);
+ move(trustedImm64FromPtr(ctiMasmProbeTrampoline), RegisterID::eax);
+ call(RegisterID::eax);
+}
+#endif // USE(MASM_PROBE)
+
} // namespace JSC
#endif // ENABLE(ASSEMBLER)
diff --git a/Source/JavaScriptCore/assembler/MaxFrameExtentForSlowPathCall.h b/Source/JavaScriptCore/assembler/MaxFrameExtentForSlowPathCall.h
deleted file mode 100644
index 39ed6fac5..000000000
--- a/Source/JavaScriptCore/assembler/MaxFrameExtentForSlowPathCall.h
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef MaxFrameExtentForSlowPathCall_h
-#define MaxFrameExtentForSlowPathCall_h
-
-#include "JSStack.h"
-#include "Register.h"
-#include "StackAlignment.h"
-#include <wtf/Assertions.h>
-
-namespace JSC {
-
-// The maxFrameExtentForSlowPathCall is the max amount of stack space (in bytes)
-// that can be used for outgoing args when calling a slow path C function
-// from JS code.
-
-#if !ENABLE(JIT)
-static const size_t maxFrameExtentForSlowPathCall = 0;
-
-#elif CPU(X86_64) && OS(WINDOWS)
-// 4 args in registers, but stack space needs to be allocated for all args.
-static const size_t maxFrameExtentForSlowPathCall = 64;
-
-#elif CPU(X86_64)
-// All args in registers.
-static const size_t maxFrameExtentForSlowPathCall = 0;
-
-#elif CPU(X86)
-// 7 args on stack (28 bytes).
-static const size_t maxFrameExtentForSlowPathCall = 40;
-
-#elif CPU(ARM64)
-// All args in registers.
-static const size_t maxFrameExtentForSlowPathCall = 0;
-
-#elif CPU(ARM)
-// First four args in registers, remaining 4 args on stack.
-static const size_t maxFrameExtentForSlowPathCall = 24;
-
-#elif CPU(SH4)
-// First four args in registers, remaining 4 args on stack.
-static const size_t maxFrameExtentForSlowPathCall = 24;
-
-#elif CPU(MIPS)
-// Though args are in registers, there need to be space on the stack for all args.
-static const size_t maxFrameExtentForSlowPathCall = 40;
-
-#else
-#error "Unsupported CPU: need value for maxFrameExtentForSlowPathCall"
-
-#endif
-
-COMPILE_ASSERT(!(maxFrameExtentForSlowPathCall % sizeof(Register)), extent_must_be_in_multiples_of_registers);
-
-#if ENABLE(JIT)
-// Make sure that cfr - maxFrameExtentForSlowPathCall bytes will make the stack pointer aligned
-COMPILE_ASSERT((maxFrameExtentForSlowPathCall % 16) == 16 - sizeof(CallerFrameAndPC), extent_must_align_stack_from_callframe_pointer);
-#endif
-
-static const size_t maxFrameExtentForSlowPathCallInRegisters = maxFrameExtentForSlowPathCall / sizeof(Register);
-
-} // namespace JSC
-
-#endif // MaxFrameExtentForSlowPathCall_h
-
diff --git a/Source/JavaScriptCore/assembler/RepatchBuffer.h b/Source/JavaScriptCore/assembler/RepatchBuffer.h
index 241ce14c7..41e950ad8 100644
--- a/Source/JavaScriptCore/assembler/RepatchBuffer.h
+++ b/Source/JavaScriptCore/assembler/RepatchBuffer.h
@@ -157,11 +157,6 @@ public:
{
return MacroAssembler::startOfPatchableBranchPtrWithPatchOnAddress(label);
}
-
- static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32 label)
- {
- return MacroAssembler::startOfPatchableBranch32WithPatchOnAddress(label);
- }
void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
{
@@ -181,11 +176,6 @@ public:
MacroAssembler::revertJumpReplacementToPatchableBranchPtrWithPatch(instructionStart, address, value);
}
- void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel instructionStart, MacroAssembler::Address address, int32_t value)
- {
- MacroAssembler::revertJumpReplacementToPatchableBranch32WithPatch(instructionStart, address, value);
- }
-
private:
CodeBlock* m_codeBlock;
#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
diff --git a/Source/JavaScriptCore/assembler/X86Assembler.h b/Source/JavaScriptCore/assembler/X86Assembler.h
index da3181e58..1a43e206c 100644
--- a/Source/JavaScriptCore/assembler/X86Assembler.h
+++ b/Source/JavaScriptCore/assembler/X86Assembler.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2012-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -35,89 +35,105 @@
#include <wtf/Assertions.h>
#include <wtf/Vector.h>
+#if USE(MASM_PROBE)
+#include <xmmintrin.h>
+#endif
+
namespace JSC {
inline bool CAN_SIGN_EXTEND_8_32(int32_t value) { return value == (int32_t)(signed char)value; }
namespace X86Registers {
+ typedef enum {
+ eax,
+ ecx,
+ edx,
+ ebx,
+ esp,
+ ebp,
+ esi,
+ edi,
-#define FOR_EACH_CPU_REGISTER(V) \
- FOR_EACH_CPU_GPREGISTER(V) \
- FOR_EACH_CPU_SPECIAL_REGISTER(V) \
- FOR_EACH_CPU_FPREGISTER(V)
-
-// The following are defined as pairs of the following value:
-// 1. type of the storage needed to save the register value by the JIT probe.
-// 2. name of the register.
-#define FOR_EACH_CPU_GPREGISTER(V) \
- V(void*, eax) \
- V(void*, ecx) \
- V(void*, edx) \
- V(void*, ebx) \
- V(void*, esp) \
- V(void*, ebp) \
- V(void*, esi) \
- V(void*, edi) \
- FOR_EACH_X86_64_CPU_GPREGISTER(V)
-
-#define FOR_EACH_CPU_SPECIAL_REGISTER(V) \
- V(void*, eip) \
- V(void*, eflags) \
-
-// Note: the JITs only stores double values in the FP registers.
-#define FOR_EACH_CPU_FPREGISTER(V) \
- V(double, xmm0) \
- V(double, xmm1) \
- V(double, xmm2) \
- V(double, xmm3) \
- V(double, xmm4) \
- V(double, xmm5) \
- V(double, xmm6) \
- V(double, xmm7) \
- FOR_EACH_X86_64_CPU_FPREGISTER(V)
+#if CPU(X86_64)
+ r8,
+ r9,
+ r10,
+ r11,
+ r12,
+ r13,
+ r14,
+ r15,
+#endif
+ } RegisterID;
-#if CPU(X86)
+ typedef enum {
+ xmm0,
+ xmm1,
+ xmm2,
+ xmm3,
+ xmm4,
+ xmm5,
+ xmm6,
+ xmm7,
-#define FOR_EACH_X86_64_CPU_GPREGISTER(V) // Nothing to add.
-#define FOR_EACH_X86_64_CPU_FPREGISTER(V) // Nothing to add.
+#if CPU(X86_64)
+ xmm8,
+ xmm9,
+ xmm10,
+ xmm11,
+ xmm12,
+ xmm13,
+ xmm14,
+ xmm15,
+#endif
+ } XMMRegisterID;
+
+#if USE(MASM_PROBE)
+ #define FOR_EACH_CPU_REGISTER(V) \
+ FOR_EACH_CPU_GPREGISTER(V) \
+ FOR_EACH_CPU_SPECIAL_REGISTER(V) \
+ FOR_EACH_CPU_FPREGISTER(V)
+
+ #define FOR_EACH_CPU_GPREGISTER(V) \
+ V(void*, eax) \
+ V(void*, ebx) \
+ V(void*, ecx) \
+ V(void*, edx) \
+ V(void*, esi) \
+ V(void*, edi) \
+ V(void*, ebp) \
+ V(void*, esp) \
+ FOR_EACH_X86_64_CPU_GPREGISTER(V)
+
+ #define FOR_EACH_CPU_SPECIAL_REGISTER(V) \
+ V(void*, eip) \
+ V(void*, eflags) \
+
+ #define FOR_EACH_CPU_FPREGISTER(V) \
+ V(__m128, xmm0) \
+ V(__m128, xmm1) \
+ V(__m128, xmm2) \
+ V(__m128, xmm3) \
+ V(__m128, xmm4) \
+ V(__m128, xmm5) \
+ V(__m128, xmm6) \
+ V(__m128, xmm7)
+#if CPU(X86)
+ #define FOR_EACH_X86_64_CPU_GPREGISTER(V) // Nothing to add.
#elif CPU(X86_64)
-
-#define FOR_EACH_X86_64_CPU_GPREGISTER(V) \
- V(void*, r8) \
- V(void*, r9) \
- V(void*, r10) \
- V(void*, r11) \
- V(void*, r12) \
- V(void*, r13) \
- V(void*, r14) \
- V(void*, r15)
-
-#define FOR_EACH_X86_64_CPU_FPREGISTER(V) \
- V(double, xmm8) \
- V(double, xmm9) \
- V(double, xmm10) \
- V(double, xmm11) \
- V(double, xmm12) \
- V(double, xmm13) \
- V(double, xmm14) \
- V(double, xmm15)
-
+ #define FOR_EACH_X86_64_CPU_GPREGISTER(V) \
+ V(void*, r8) \
+ V(void*, r9) \
+ V(void*, r10) \
+ V(void*, r11) \
+ V(void*, r12) \
+ V(void*, r13) \
+ V(void*, r14) \
+ V(void*, r15)
#endif // CPU(X86_64)
-
-typedef enum {
- #define DECLARE_REGISTER(_type, _regName) _regName,
- FOR_EACH_CPU_GPREGISTER(DECLARE_REGISTER)
- #undef DECLARE_REGISTER
-} RegisterID;
-
-typedef enum {
- #define DECLARE_REGISTER(_type, _regName) _regName,
- FOR_EACH_CPU_FPREGISTER(DECLARE_REGISTER)
- #undef DECLARE_REGISTER
-} XMMRegisterID;
-
-} // namespace X86Register
+#endif // USE(MASM_PROBE)
+}
class X86Assembler {
public:
@@ -172,23 +188,18 @@ private:
typedef enum {
OP_ADD_EvGv = 0x01,
OP_ADD_GvEv = 0x03,
- OP_ADD_EAXIv = 0x05,
OP_OR_EvGv = 0x09,
OP_OR_GvEv = 0x0B,
- OP_OR_EAXIv = 0x0D,
OP_2BYTE_ESCAPE = 0x0F,
OP_AND_EvGv = 0x21,
OP_AND_GvEv = 0x23,
OP_SUB_EvGv = 0x29,
OP_SUB_GvEv = 0x2B,
- OP_SUB_EAXIv = 0x2D,
PRE_PREDICT_BRANCH_NOT_TAKEN = 0x2E,
OP_XOR_EvGv = 0x31,
OP_XOR_GvEv = 0x33,
- OP_XOR_EAXIv = 0x35,
OP_CMP_EvGv = 0x39,
OP_CMP_GvEv = 0x3B,
- OP_CMP_EAXIv = 0x3D,
#if CPU(X86_64)
PRE_REX = 0x40,
#endif
@@ -213,12 +224,9 @@ private:
OP_LEA = 0x8D,
OP_GROUP1A_Ev = 0x8F,
OP_NOP = 0x90,
- OP_XCHG_EAX = 0x90,
OP_CDQ = 0x99,
OP_MOV_EAXOv = 0xA1,
OP_MOV_OvEAX = 0xA3,
- OP_TEST_ALIb = 0xA8,
- OP_TEST_EAXIv = 0xA9,
OP_MOV_EAXIv = 0xB8,
OP_GROUP2_EvIb = 0xC1,
OP_RET = 0xC3,
@@ -253,7 +261,6 @@ private:
OP2_CVTSS2SD_VsdWsd = 0x5A,
OP2_SUBSD_VsdWsd = 0x5C,
OP2_DIVSD_VsdWsd = 0x5E,
- OP2_MOVMSKPD_VdEd = 0x50,
OP2_SQRTSD_VsdWsd = 0x51,
OP2_ANDNPD_VpdWpd = 0x55,
OP2_XORPD_VpdWpd = 0x57,
@@ -264,7 +271,6 @@ private:
OP2_3BYTE_ESCAPE = 0xAE,
OP2_IMUL_GvEv = 0xAF,
OP2_MOVZX_GvEb = 0xB6,
- OP2_BSR = 0xBD,
OP2_MOVSX_GvEb = 0xBE,
OP2_MOVZX_GvEw = 0xB7,
OP2_MOVSX_GvEw = 0xBF,
@@ -407,10 +413,7 @@ public:
m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst);
m_formatter.immediate8(imm);
} else {
- if (dst == X86Registers::eax)
- m_formatter.oneByteOp(OP_ADD_EAXIv);
- else
- m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
m_formatter.immediate32(imm);
}
}
@@ -443,10 +446,7 @@ public:
m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst);
m_formatter.immediate8(imm);
} else {
- if (dst == X86Registers::eax)
- m_formatter.oneByteOp64(OP_ADD_EAXIv);
- else
- m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
m_formatter.immediate32(imm);
}
}
@@ -562,11 +562,6 @@ public:
{
m_formatter.oneByteOp64(OP_GROUP5_Ev, GROUP1_OP_ADD, dst);
}
-
- void incq_m(int offset, RegisterID base)
- {
- m_formatter.oneByteOp64(OP_GROUP5_Ev, GROUP1_OP_ADD, base, offset);
- }
#endif // CPU(X86_64)
void negl_r(RegisterID dst)
@@ -617,10 +612,7 @@ public:
m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, dst);
m_formatter.immediate8(imm);
} else {
- if (dst == X86Registers::eax)
- m_formatter.oneByteOp(OP_OR_EAXIv);
- else
- m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
m_formatter.immediate32(imm);
}
}
@@ -648,10 +640,7 @@ public:
m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_OR, dst);
m_formatter.immediate8(imm);
} else {
- if (dst == X86Registers::eax)
- m_formatter.oneByteOp64(OP_OR_EAXIv);
- else
- m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
m_formatter.immediate32(imm);
}
}
@@ -694,10 +683,7 @@ public:
m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst);
m_formatter.immediate8(imm);
} else {
- if (dst == X86Registers::eax)
- m_formatter.oneByteOp(OP_SUB_EAXIv);
- else
- m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
m_formatter.immediate32(imm);
}
}
@@ -725,10 +711,7 @@ public:
m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst);
m_formatter.immediate8(imm);
} else {
- if (dst == X86Registers::eax)
- m_formatter.oneByteOp64(OP_SUB_EAXIv);
- else
- m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
m_formatter.immediate32(imm);
}
}
@@ -777,10 +760,7 @@ public:
m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst);
m_formatter.immediate8(imm);
} else {
- if (dst == X86Registers::eax)
- m_formatter.oneByteOp(OP_XOR_EAXIv);
- else
- m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
m_formatter.immediate32(imm);
}
}
@@ -797,10 +777,7 @@ public:
m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst);
m_formatter.immediate8(imm);
} else {
- if (dst == X86Registers::eax)
- m_formatter.oneByteOp64(OP_XOR_EAXIv);
- else
- m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
m_formatter.immediate32(imm);
}
}
@@ -822,11 +799,6 @@ public:
#endif
- void bsr_rr(RegisterID src, RegisterID dst)
- {
- m_formatter.twoByteOp(OP2_BSR, dst, src);
- }
-
void sarl_i8r(int imm, RegisterID dst)
{
if (imm == 1)
@@ -888,16 +860,6 @@ public:
}
}
- void shrq_i8r(int imm, RegisterID dst)
- {
- if (imm == 1)
- m_formatter.oneByteOp64(OP_GROUP2_Ev1, GROUP2_OP_SHR, dst);
- else {
- m_formatter.oneByteOp64(OP_GROUP2_EvIb, GROUP2_OP_SHR, dst);
- m_formatter.immediate8(imm);
- }
- }
-
void shlq_i8r(int imm, RegisterID dst)
{
if (imm == 1)
@@ -960,10 +922,7 @@ public:
m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
m_formatter.immediate8(imm);
} else {
- if (dst == X86Registers::eax)
- m_formatter.oneByteOp(OP_CMP_EAXIv);
- else
- m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
m_formatter.immediate32(imm);
}
}
@@ -1049,10 +1008,7 @@ public:
m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
m_formatter.immediate8(imm);
} else {
- if (dst == X86Registers::eax)
- m_formatter.oneByteOp64(OP_CMP_EAXIv);
- else
- m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
m_formatter.immediate32(imm);
}
}
@@ -1135,10 +1091,7 @@ public:
void testl_i32r(int imm, RegisterID dst)
{
- if (dst == X86Registers::eax)
- m_formatter.oneByteOp(OP_TEST_EAXIv);
- else
- m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
+ m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
m_formatter.immediate32(imm);
}
@@ -1192,10 +1145,7 @@ public:
void testq_i32r(int imm, RegisterID dst)
{
- if (dst == X86Registers::eax)
- m_formatter.oneByteOp64(OP_TEST_EAXIv);
- else
- m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
+ m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
m_formatter.immediate32(imm);
}
@@ -1220,10 +1170,7 @@ public:
void testb_i8r(int imm, RegisterID dst)
{
- if (dst == X86Registers::eax)
- m_formatter.oneByteOp(OP_TEST_ALIb);
- else
- m_formatter.oneByteOp8(OP_GROUP3_EbIb, GROUP3_OP_TEST, dst);
+ m_formatter.oneByteOp8(OP_GROUP3_EbIb, GROUP3_OP_TEST, dst);
m_formatter.immediate8(imm);
}
@@ -1266,23 +1213,13 @@ public:
void xchgl_rr(RegisterID src, RegisterID dst)
{
- if (src == X86Registers::eax)
- m_formatter.oneByteOp(OP_XCHG_EAX, dst);
- else if (dst == X86Registers::eax)
- m_formatter.oneByteOp(OP_XCHG_EAX, src);
- else
- m_formatter.oneByteOp(OP_XCHG_EvGv, src, dst);
+ m_formatter.oneByteOp(OP_XCHG_EvGv, src, dst);
}
#if CPU(X86_64)
void xchgq_rr(RegisterID src, RegisterID dst)
{
- if (src == X86Registers::eax)
- m_formatter.oneByteOp64(OP_XCHG_EAX, dst);
- else if (dst == X86Registers::eax)
- m_formatter.oneByteOp64(OP_XCHG_EAX, src);
- else
- m_formatter.oneByteOp64(OP_XCHG_EvGv, src, dst);
+ m_formatter.oneByteOp64(OP_XCHG_EvGv, src, dst);
}
#endif
@@ -1393,7 +1330,7 @@ public:
{
m_formatter.oneByteOp8(OP_MOV_EbGb, src, base, index, scale, offset);
}
-
+
void movw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
{
m_formatter.prefix(PRE_OPERAND_SIZE);
@@ -1806,12 +1743,6 @@ public:
}
#if CPU(X86_64)
- void movmskpd_rr(XMMRegisterID src, RegisterID dst)
- {
- m_formatter.prefix(PRE_SSE_66);
- m_formatter.twoByteOp64(OP2_MOVMSKPD_VdEd, dst, (RegisterID)src);
- }
-
void movq_rr(XMMRegisterID src, RegisterID dst)
{
m_formatter.prefix(PRE_SSE_66);
@@ -2123,9 +2054,9 @@ public:
#if CPU(X86_64)
static void revertJumpTo_movq_i64r(void* instructionStart, int64_t imm, RegisterID dst)
{
- const unsigned instructionSize = 10; // REX.W MOV IMM64
const int rexBytes = 1;
const int opcodeBytes = 1;
+ ASSERT(rexBytes + opcodeBytes <= maxJumpReplacementSize());
uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart);
ptr[0] = PRE_REX | (1 << 3) | (dst >> 3);
ptr[1] = OP_MOV_EAXIv | (dst & 7);
@@ -2135,33 +2066,11 @@ public:
uint8_t asBytes[8];
} u;
u.asWord = imm;
- for (unsigned i = rexBytes + opcodeBytes; i < instructionSize; ++i)
- ptr[i] = u.asBytes[i - rexBytes - opcodeBytes];
- }
-
- static void revertJumpTo_movl_i32r(void* instructionStart, int32_t imm, RegisterID dst)
- {
- // We only revert jumps on inline caches, and inline caches always use the scratch register (r11).
- // FIXME: If the above is ever false then we need to make this smarter with respect to emitting
- // the REX byte.
- ASSERT(dst == X86Registers::r11);
- const unsigned instructionSize = 6; // REX MOV IMM32
- const int rexBytes = 1;
- const int opcodeBytes = 1;
- uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart);
- ptr[0] = PRE_REX | (dst >> 3);
- ptr[1] = OP_MOV_EAXIv | (dst & 7);
-
- union {
- uint32_t asWord;
- uint8_t asBytes[4];
- } u;
- u.asWord = imm;
- for (unsigned i = rexBytes + opcodeBytes; i < instructionSize; ++i)
+ for (unsigned i = rexBytes + opcodeBytes; i < static_cast<unsigned>(maxJumpReplacementSize()); ++i)
ptr[i] = u.asBytes[i - rexBytes - opcodeBytes];
}
#endif
-
+
static void revertJumpTo_cmpl_ir_force32(void* instructionStart, int32_t imm, RegisterID dst)
{
const int opcodeBytes = 1;
@@ -2256,49 +2165,10 @@ public:
{
m_formatter.oneByteOp(OP_NOP);
}
-
+
static void fillNops(void* base, size_t size)
{
-#if CPU(X86_64)
- static const uint8_t nops[10][10] = {
- // nop
- {0x90},
- // xchg %ax,%ax
- {0x66, 0x90},
- // nopl (%[re]ax)
- {0x0f, 0x1f, 0x00},
- // nopl 8(%[re]ax)
- {0x0f, 0x1f, 0x40, 0x08},
- // nopl 8(%[re]ax,%[re]ax,1)
- {0x0f, 0x1f, 0x44, 0x00, 0x08},
- // nopw 8(%[re]ax,%[re]ax,1)
- {0x66, 0x0f, 0x1f, 0x44, 0x00, 0x08},
- // nopl 512(%[re]ax)
- {0x0f, 0x1f, 0x80, 0x00, 0x02, 0x00, 0x00},
- // nopl 512(%[re]ax,%[re]ax,1)
- {0x0f, 0x1f, 0x84, 0x00, 0x00, 0x02, 0x00, 0x00},
- // nopw 512(%[re]ax,%[re]ax,1)
- {0x66, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x02, 0x00, 0x00},
- // nopw %cs:512(%[re]ax,%[re]ax,1)
- {0x66, 0x2e, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x02, 0x00, 0x00}
- };
-
- uint8_t* where = reinterpret_cast<uint8_t*>(base);
- while (size) {
- unsigned nopSize = static_cast<unsigned>(std::min<size_t>(size, 15));
- unsigned numPrefixes = nopSize <= 10 ? 0 : nopSize - 10;
- for (unsigned i = 0; i != numPrefixes; ++i)
- *where++ = 0x66;
-
- unsigned nopRest = nopSize - numPrefixes;
- for (unsigned i = 0; i != nopRest; ++i)
- *where++ = nops[nopRest-1][i];
-
- size -= nopSize;
- }
-#else
memset(base, OP_NOP, size);
-#endif
}
// This is a no-op on x86