diff options
author | Lorry Tar Creator <lorry-tar-importer@lorry> | 2015-05-20 09:56:07 +0000 |
---|---|---|
committer | Lorry Tar Creator <lorry-tar-importer@lorry> | 2015-05-20 09:56:07 +0000 |
commit | 41386e9cb918eed93b3f13648cbef387e371e451 (patch) | |
tree | a97f9d7bd1d9d091833286085f72da9d83fd0606 /Source/JavaScriptCore/assembler/X86Assembler.h | |
parent | e15dd966d523731101f70ccf768bba12435a0208 (diff) | |
download | WebKitGtk-tarball-41386e9cb918eed93b3f13648cbef387e371e451.tar.gz |
webkitgtk-2.4.9webkitgtk-2.4.9
Diffstat (limited to 'Source/JavaScriptCore/assembler/X86Assembler.h')
-rw-r--r-- | Source/JavaScriptCore/assembler/X86Assembler.h | 348 |
1 files changed, 109 insertions, 239 deletions
diff --git a/Source/JavaScriptCore/assembler/X86Assembler.h b/Source/JavaScriptCore/assembler/X86Assembler.h index da3181e58..1a43e206c 100644 --- a/Source/JavaScriptCore/assembler/X86Assembler.h +++ b/Source/JavaScriptCore/assembler/X86Assembler.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008, 2012-2015 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2012, 2013 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -35,89 +35,105 @@ #include <wtf/Assertions.h> #include <wtf/Vector.h> +#if USE(MASM_PROBE) +#include <xmmintrin.h> +#endif + namespace JSC { inline bool CAN_SIGN_EXTEND_8_32(int32_t value) { return value == (int32_t)(signed char)value; } namespace X86Registers { + typedef enum { + eax, + ecx, + edx, + ebx, + esp, + ebp, + esi, + edi, -#define FOR_EACH_CPU_REGISTER(V) \ - FOR_EACH_CPU_GPREGISTER(V) \ - FOR_EACH_CPU_SPECIAL_REGISTER(V) \ - FOR_EACH_CPU_FPREGISTER(V) - -// The following are defined as pairs of the following value: -// 1. type of the storage needed to save the register value by the JIT probe. -// 2. name of the register. -#define FOR_EACH_CPU_GPREGISTER(V) \ - V(void*, eax) \ - V(void*, ecx) \ - V(void*, edx) \ - V(void*, ebx) \ - V(void*, esp) \ - V(void*, ebp) \ - V(void*, esi) \ - V(void*, edi) \ - FOR_EACH_X86_64_CPU_GPREGISTER(V) - -#define FOR_EACH_CPU_SPECIAL_REGISTER(V) \ - V(void*, eip) \ - V(void*, eflags) \ - -// Note: the JITs only stores double values in the FP registers. -#define FOR_EACH_CPU_FPREGISTER(V) \ - V(double, xmm0) \ - V(double, xmm1) \ - V(double, xmm2) \ - V(double, xmm3) \ - V(double, xmm4) \ - V(double, xmm5) \ - V(double, xmm6) \ - V(double, xmm7) \ - FOR_EACH_X86_64_CPU_FPREGISTER(V) +#if CPU(X86_64) + r8, + r9, + r10, + r11, + r12, + r13, + r14, + r15, +#endif + } RegisterID; -#if CPU(X86) + typedef enum { + xmm0, + xmm1, + xmm2, + xmm3, + xmm4, + xmm5, + xmm6, + xmm7, -#define FOR_EACH_X86_64_CPU_GPREGISTER(V) // Nothing to add. -#define FOR_EACH_X86_64_CPU_FPREGISTER(V) // Nothing to add. +#if CPU(X86_64) + xmm8, + xmm9, + xmm10, + xmm11, + xmm12, + xmm13, + xmm14, + xmm15, +#endif + } XMMRegisterID; + +#if USE(MASM_PROBE) + #define FOR_EACH_CPU_REGISTER(V) \ + FOR_EACH_CPU_GPREGISTER(V) \ + FOR_EACH_CPU_SPECIAL_REGISTER(V) \ + FOR_EACH_CPU_FPREGISTER(V) + + #define FOR_EACH_CPU_GPREGISTER(V) \ + V(void*, eax) \ + V(void*, ebx) \ + V(void*, ecx) \ + V(void*, edx) \ + V(void*, esi) \ + V(void*, edi) \ + V(void*, ebp) \ + V(void*, esp) \ + FOR_EACH_X86_64_CPU_GPREGISTER(V) + + #define FOR_EACH_CPU_SPECIAL_REGISTER(V) \ + V(void*, eip) \ + V(void*, eflags) \ + + #define FOR_EACH_CPU_FPREGISTER(V) \ + V(__m128, xmm0) \ + V(__m128, xmm1) \ + V(__m128, xmm2) \ + V(__m128, xmm3) \ + V(__m128, xmm4) \ + V(__m128, xmm5) \ + V(__m128, xmm6) \ + V(__m128, xmm7) +#if CPU(X86) + #define FOR_EACH_X86_64_CPU_GPREGISTER(V) // Nothing to add. #elif CPU(X86_64) - -#define FOR_EACH_X86_64_CPU_GPREGISTER(V) \ - V(void*, r8) \ - V(void*, r9) \ - V(void*, r10) \ - V(void*, r11) \ - V(void*, r12) \ - V(void*, r13) \ - V(void*, r14) \ - V(void*, r15) - -#define FOR_EACH_X86_64_CPU_FPREGISTER(V) \ - V(double, xmm8) \ - V(double, xmm9) \ - V(double, xmm10) \ - V(double, xmm11) \ - V(double, xmm12) \ - V(double, xmm13) \ - V(double, xmm14) \ - V(double, xmm15) - + #define FOR_EACH_X86_64_CPU_GPREGISTER(V) \ + V(void*, r8) \ + V(void*, r9) \ + V(void*, r10) \ + V(void*, r11) \ + V(void*, r12) \ + V(void*, r13) \ + V(void*, r14) \ + V(void*, r15) #endif // CPU(X86_64) - -typedef enum { - #define DECLARE_REGISTER(_type, _regName) _regName, - FOR_EACH_CPU_GPREGISTER(DECLARE_REGISTER) - #undef DECLARE_REGISTER -} RegisterID; - -typedef enum { - #define DECLARE_REGISTER(_type, _regName) _regName, - FOR_EACH_CPU_FPREGISTER(DECLARE_REGISTER) - #undef DECLARE_REGISTER -} XMMRegisterID; - -} // namespace X86Register +#endif // USE(MASM_PROBE) +} class X86Assembler { public: @@ -172,23 +188,18 @@ private: typedef enum { OP_ADD_EvGv = 0x01, OP_ADD_GvEv = 0x03, - OP_ADD_EAXIv = 0x05, OP_OR_EvGv = 0x09, OP_OR_GvEv = 0x0B, - OP_OR_EAXIv = 0x0D, OP_2BYTE_ESCAPE = 0x0F, OP_AND_EvGv = 0x21, OP_AND_GvEv = 0x23, OP_SUB_EvGv = 0x29, OP_SUB_GvEv = 0x2B, - OP_SUB_EAXIv = 0x2D, PRE_PREDICT_BRANCH_NOT_TAKEN = 0x2E, OP_XOR_EvGv = 0x31, OP_XOR_GvEv = 0x33, - OP_XOR_EAXIv = 0x35, OP_CMP_EvGv = 0x39, OP_CMP_GvEv = 0x3B, - OP_CMP_EAXIv = 0x3D, #if CPU(X86_64) PRE_REX = 0x40, #endif @@ -213,12 +224,9 @@ private: OP_LEA = 0x8D, OP_GROUP1A_Ev = 0x8F, OP_NOP = 0x90, - OP_XCHG_EAX = 0x90, OP_CDQ = 0x99, OP_MOV_EAXOv = 0xA1, OP_MOV_OvEAX = 0xA3, - OP_TEST_ALIb = 0xA8, - OP_TEST_EAXIv = 0xA9, OP_MOV_EAXIv = 0xB8, OP_GROUP2_EvIb = 0xC1, OP_RET = 0xC3, @@ -253,7 +261,6 @@ private: OP2_CVTSS2SD_VsdWsd = 0x5A, OP2_SUBSD_VsdWsd = 0x5C, OP2_DIVSD_VsdWsd = 0x5E, - OP2_MOVMSKPD_VdEd = 0x50, OP2_SQRTSD_VsdWsd = 0x51, OP2_ANDNPD_VpdWpd = 0x55, OP2_XORPD_VpdWpd = 0x57, @@ -264,7 +271,6 @@ private: OP2_3BYTE_ESCAPE = 0xAE, OP2_IMUL_GvEv = 0xAF, OP2_MOVZX_GvEb = 0xB6, - OP2_BSR = 0xBD, OP2_MOVSX_GvEb = 0xBE, OP2_MOVZX_GvEw = 0xB7, OP2_MOVSX_GvEw = 0xBF, @@ -407,10 +413,7 @@ public: m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst); m_formatter.immediate8(imm); } else { - if (dst == X86Registers::eax) - m_formatter.oneByteOp(OP_ADD_EAXIv); - else - m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst); + m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst); m_formatter.immediate32(imm); } } @@ -443,10 +446,7 @@ public: m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst); m_formatter.immediate8(imm); } else { - if (dst == X86Registers::eax) - m_formatter.oneByteOp64(OP_ADD_EAXIv); - else - m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst); + m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst); m_formatter.immediate32(imm); } } @@ -562,11 +562,6 @@ public: { m_formatter.oneByteOp64(OP_GROUP5_Ev, GROUP1_OP_ADD, dst); } - - void incq_m(int offset, RegisterID base) - { - m_formatter.oneByteOp64(OP_GROUP5_Ev, GROUP1_OP_ADD, base, offset); - } #endif // CPU(X86_64) void negl_r(RegisterID dst) @@ -617,10 +612,7 @@ public: m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, dst); m_formatter.immediate8(imm); } else { - if (dst == X86Registers::eax) - m_formatter.oneByteOp(OP_OR_EAXIv); - else - m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, dst); + m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, dst); m_formatter.immediate32(imm); } } @@ -648,10 +640,7 @@ public: m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_OR, dst); m_formatter.immediate8(imm); } else { - if (dst == X86Registers::eax) - m_formatter.oneByteOp64(OP_OR_EAXIv); - else - m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_OR, dst); + m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_OR, dst); m_formatter.immediate32(imm); } } @@ -694,10 +683,7 @@ public: m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst); m_formatter.immediate8(imm); } else { - if (dst == X86Registers::eax) - m_formatter.oneByteOp(OP_SUB_EAXIv); - else - m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst); + m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst); m_formatter.immediate32(imm); } } @@ -725,10 +711,7 @@ public: m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst); m_formatter.immediate8(imm); } else { - if (dst == X86Registers::eax) - m_formatter.oneByteOp64(OP_SUB_EAXIv); - else - m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst); + m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst); m_formatter.immediate32(imm); } } @@ -777,10 +760,7 @@ public: m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst); m_formatter.immediate8(imm); } else { - if (dst == X86Registers::eax) - m_formatter.oneByteOp(OP_XOR_EAXIv); - else - m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst); + m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst); m_formatter.immediate32(imm); } } @@ -797,10 +777,7 @@ public: m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst); m_formatter.immediate8(imm); } else { - if (dst == X86Registers::eax) - m_formatter.oneByteOp64(OP_XOR_EAXIv); - else - m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst); + m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst); m_formatter.immediate32(imm); } } @@ -822,11 +799,6 @@ public: #endif - void bsr_rr(RegisterID src, RegisterID dst) - { - m_formatter.twoByteOp(OP2_BSR, dst, src); - } - void sarl_i8r(int imm, RegisterID dst) { if (imm == 1) @@ -888,16 +860,6 @@ public: } } - void shrq_i8r(int imm, RegisterID dst) - { - if (imm == 1) - m_formatter.oneByteOp64(OP_GROUP2_Ev1, GROUP2_OP_SHR, dst); - else { - m_formatter.oneByteOp64(OP_GROUP2_EvIb, GROUP2_OP_SHR, dst); - m_formatter.immediate8(imm); - } - } - void shlq_i8r(int imm, RegisterID dst) { if (imm == 1) @@ -960,10 +922,7 @@ public: m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst); m_formatter.immediate8(imm); } else { - if (dst == X86Registers::eax) - m_formatter.oneByteOp(OP_CMP_EAXIv); - else - m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst); + m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst); m_formatter.immediate32(imm); } } @@ -1049,10 +1008,7 @@ public: m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst); m_formatter.immediate8(imm); } else { - if (dst == X86Registers::eax) - m_formatter.oneByteOp64(OP_CMP_EAXIv); - else - m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst); + m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst); m_formatter.immediate32(imm); } } @@ -1135,10 +1091,7 @@ public: void testl_i32r(int imm, RegisterID dst) { - if (dst == X86Registers::eax) - m_formatter.oneByteOp(OP_TEST_EAXIv); - else - m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst); + m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst); m_formatter.immediate32(imm); } @@ -1192,10 +1145,7 @@ public: void testq_i32r(int imm, RegisterID dst) { - if (dst == X86Registers::eax) - m_formatter.oneByteOp64(OP_TEST_EAXIv); - else - m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst); + m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst); m_formatter.immediate32(imm); } @@ -1220,10 +1170,7 @@ public: void testb_i8r(int imm, RegisterID dst) { - if (dst == X86Registers::eax) - m_formatter.oneByteOp(OP_TEST_ALIb); - else - m_formatter.oneByteOp8(OP_GROUP3_EbIb, GROUP3_OP_TEST, dst); + m_formatter.oneByteOp8(OP_GROUP3_EbIb, GROUP3_OP_TEST, dst); m_formatter.immediate8(imm); } @@ -1266,23 +1213,13 @@ public: void xchgl_rr(RegisterID src, RegisterID dst) { - if (src == X86Registers::eax) - m_formatter.oneByteOp(OP_XCHG_EAX, dst); - else if (dst == X86Registers::eax) - m_formatter.oneByteOp(OP_XCHG_EAX, src); - else - m_formatter.oneByteOp(OP_XCHG_EvGv, src, dst); + m_formatter.oneByteOp(OP_XCHG_EvGv, src, dst); } #if CPU(X86_64) void xchgq_rr(RegisterID src, RegisterID dst) { - if (src == X86Registers::eax) - m_formatter.oneByteOp64(OP_XCHG_EAX, dst); - else if (dst == X86Registers::eax) - m_formatter.oneByteOp64(OP_XCHG_EAX, src); - else - m_formatter.oneByteOp64(OP_XCHG_EvGv, src, dst); + m_formatter.oneByteOp64(OP_XCHG_EvGv, src, dst); } #endif @@ -1393,7 +1330,7 @@ public: { m_formatter.oneByteOp8(OP_MOV_EbGb, src, base, index, scale, offset); } - + void movw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) { m_formatter.prefix(PRE_OPERAND_SIZE); @@ -1806,12 +1743,6 @@ public: } #if CPU(X86_64) - void movmskpd_rr(XMMRegisterID src, RegisterID dst) - { - m_formatter.prefix(PRE_SSE_66); - m_formatter.twoByteOp64(OP2_MOVMSKPD_VdEd, dst, (RegisterID)src); - } - void movq_rr(XMMRegisterID src, RegisterID dst) { m_formatter.prefix(PRE_SSE_66); @@ -2123,9 +2054,9 @@ public: #if CPU(X86_64) static void revertJumpTo_movq_i64r(void* instructionStart, int64_t imm, RegisterID dst) { - const unsigned instructionSize = 10; // REX.W MOV IMM64 const int rexBytes = 1; const int opcodeBytes = 1; + ASSERT(rexBytes + opcodeBytes <= maxJumpReplacementSize()); uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart); ptr[0] = PRE_REX | (1 << 3) | (dst >> 3); ptr[1] = OP_MOV_EAXIv | (dst & 7); @@ -2135,33 +2066,11 @@ public: uint8_t asBytes[8]; } u; u.asWord = imm; - for (unsigned i = rexBytes + opcodeBytes; i < instructionSize; ++i) - ptr[i] = u.asBytes[i - rexBytes - opcodeBytes]; - } - - static void revertJumpTo_movl_i32r(void* instructionStart, int32_t imm, RegisterID dst) - { - // We only revert jumps on inline caches, and inline caches always use the scratch register (r11). - // FIXME: If the above is ever false then we need to make this smarter with respect to emitting - // the REX byte. - ASSERT(dst == X86Registers::r11); - const unsigned instructionSize = 6; // REX MOV IMM32 - const int rexBytes = 1; - const int opcodeBytes = 1; - uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart); - ptr[0] = PRE_REX | (dst >> 3); - ptr[1] = OP_MOV_EAXIv | (dst & 7); - - union { - uint32_t asWord; - uint8_t asBytes[4]; - } u; - u.asWord = imm; - for (unsigned i = rexBytes + opcodeBytes; i < instructionSize; ++i) + for (unsigned i = rexBytes + opcodeBytes; i < static_cast<unsigned>(maxJumpReplacementSize()); ++i) ptr[i] = u.asBytes[i - rexBytes - opcodeBytes]; } #endif - + static void revertJumpTo_cmpl_ir_force32(void* instructionStart, int32_t imm, RegisterID dst) { const int opcodeBytes = 1; @@ -2256,49 +2165,10 @@ public: { m_formatter.oneByteOp(OP_NOP); } - + static void fillNops(void* base, size_t size) { -#if CPU(X86_64) - static const uint8_t nops[10][10] = { - // nop - {0x90}, - // xchg %ax,%ax - {0x66, 0x90}, - // nopl (%[re]ax) - {0x0f, 0x1f, 0x00}, - // nopl 8(%[re]ax) - {0x0f, 0x1f, 0x40, 0x08}, - // nopl 8(%[re]ax,%[re]ax,1) - {0x0f, 0x1f, 0x44, 0x00, 0x08}, - // nopw 8(%[re]ax,%[re]ax,1) - {0x66, 0x0f, 0x1f, 0x44, 0x00, 0x08}, - // nopl 512(%[re]ax) - {0x0f, 0x1f, 0x80, 0x00, 0x02, 0x00, 0x00}, - // nopl 512(%[re]ax,%[re]ax,1) - {0x0f, 0x1f, 0x84, 0x00, 0x00, 0x02, 0x00, 0x00}, - // nopw 512(%[re]ax,%[re]ax,1) - {0x66, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x02, 0x00, 0x00}, - // nopw %cs:512(%[re]ax,%[re]ax,1) - {0x66, 0x2e, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x02, 0x00, 0x00} - }; - - uint8_t* where = reinterpret_cast<uint8_t*>(base); - while (size) { - unsigned nopSize = static_cast<unsigned>(std::min<size_t>(size, 15)); - unsigned numPrefixes = nopSize <= 10 ? 0 : nopSize - 10; - for (unsigned i = 0; i != numPrefixes; ++i) - *where++ = 0x66; - - unsigned nopRest = nopSize - numPrefixes; - for (unsigned i = 0; i != nopRest; ++i) - *where++ = nops[nopRest-1][i]; - - size -= nopSize; - } -#else memset(base, OP_NOP, size); -#endif } // This is a no-op on x86 |