diff options
author | Simon Hausmann <simon.hausmann@digia.com> | 2012-10-17 16:21:14 +0200 |
---|---|---|
committer | Simon Hausmann <simon.hausmann@digia.com> | 2012-10-17 16:21:14 +0200 |
commit | 8995b83bcbfbb68245f779b64e5517627c6cc6ea (patch) | |
tree | 17985605dab9263cc2444bd4d45f189e142cca7c /Source/JavaScriptCore/assembler/MacroAssembler.h | |
parent | b9c9652036d5e9f1e29c574f40bc73a35c81ace6 (diff) | |
download | qtwebkit-8995b83bcbfbb68245f779b64e5517627c6cc6ea.tar.gz |
Imported WebKit commit cf4f8fc6f19b0629f51860cb2d4b25e139d07e00 (http://svn.webkit.org/repository/webkit/trunk@131592)
New snapshot that includes the build fixes for Mac OS X 10.6 and earlier as well
as the previously cherry-picked changes
Diffstat (limited to 'Source/JavaScriptCore/assembler/MacroAssembler.h')
-rw-r--r-- | Source/JavaScriptCore/assembler/MacroAssembler.h | 431 |
1 files changed, 417 insertions, 14 deletions
diff --git a/Source/JavaScriptCore/assembler/MacroAssembler.h b/Source/JavaScriptCore/assembler/MacroAssembler.h index 1a9af2989..4d4a960d3 100644 --- a/Source/JavaScriptCore/assembler/MacroAssembler.h +++ b/Source/JavaScriptCore/assembler/MacroAssembler.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2012 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,6 +26,8 @@ #ifndef MacroAssembler_h #define MacroAssembler_h +#include <wtf/Platform.h> + #if ENABLE(ASSEMBLER) #if CPU(ARM_THUMB2) @@ -68,10 +70,6 @@ public: using MacroAssemblerBase::pop; using MacroAssemblerBase::jump; using MacroAssemblerBase::branch32; -#if CPU(X86_64) - using MacroAssemblerBase::branchPtr; - using MacroAssemblerBase::branchTestPtr; -#endif using MacroAssemblerBase::move; #if ENABLE(JIT_CONSTANT_BLINDING) @@ -89,6 +87,8 @@ public: using MacroAssemblerBase::xor32; #endif + static const double twoToThe32; // This is super useful for some double code. + // Utilities used by the DFG JIT. #if ENABLE(DFG_JIT) using MacroAssemblerBase::invert; @@ -183,6 +183,23 @@ public: storePtr(imm, addressForPoke(index)); } +#if CPU(X86_64) + void peek64(RegisterID dest, int index = 0) + { + load64(Address(stackPointerRegister, (index * sizeof(void*))), dest); + } + + void poke(TrustedImm64 value, int index = 0) + { + store64(value, addressForPoke(index)); + } + + void poke64(RegisterID src, int index = 0) + { + store64(src, addressForPoke(index)); + } +#endif + // Backwards banches, these are currently all implemented using existing forwards branch mechanisms. void branchPtr(RelationalCondition cond, RegisterID op1, TrustedImmPtr imm, Label target) @@ -230,6 +247,11 @@ public: } #if !CPU(ARM_THUMB2) + PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right = TrustedImmPtr(0)) + { + return PatchableJump(branchPtr(cond, left, right)); + } + PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0)) { return PatchableJump(branchPtrWithPatch(cond, left, dataLabel, initialRightValue)); @@ -239,6 +261,16 @@ public: { return PatchableJump(jump()); } + + PatchableJump patchableBranchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1)) + { + return PatchableJump(branchTest32(cond, reg, mask)); + } + + PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm) + { + return PatchableJump(branch32(cond, reg, imm)); + } #endif void jump(Label target) @@ -486,6 +518,11 @@ public: return branch32(cond, left, TrustedImm32(right)); } + Jump branchSubPtr(ResultCondition cond, RegisterID src, RegisterID dest) + { + return branchSub32(cond, src, dest); + } + Jump branchTestPtr(ResultCondition cond, RegisterID reg, RegisterID mask) { return branchTest32(cond, reg, mask); @@ -521,15 +558,295 @@ public: return MacroAssemblerBase::branchTest8(cond, Address(address.base, address.offset), mask); } #else + void addPtr(RegisterID src, RegisterID dest) + { + add64(src, dest); + } + void addPtr(Address src, RegisterID dest) + { + add64(src, dest); + } + + void addPtr(TrustedImm32 imm, RegisterID srcDest) + { + add64(imm, srcDest); + } + + void addPtr(TrustedImm32 imm, RegisterID src, RegisterID dest) + { + add64(imm, src, dest); + } + + void addPtr(TrustedImm32 imm, Address address) + { + add64(imm, address); + } + + void addPtr(AbsoluteAddress src, RegisterID dest) + { + add64(src, dest); + } + + void addPtr(TrustedImmPtr imm, RegisterID dest) + { + add64(TrustedImm64(imm), dest); + } + + void addPtr(TrustedImm32 imm, AbsoluteAddress address) + { + add64(imm, address); + } + + void andPtr(RegisterID src, RegisterID dest) + { + and64(src, dest); + } + + void andPtr(TrustedImm32 imm, RegisterID srcDest) + { + and64(imm, srcDest); + } + + void negPtr(RegisterID dest) + { + neg64(dest); + } + + void orPtr(RegisterID src, RegisterID dest) + { + or64(src, dest); + } + + void orPtr(TrustedImm32 imm, RegisterID dest) + { + or64(imm, dest); + } + + void orPtr(TrustedImmPtr imm, RegisterID dest) + { + or64(TrustedImm64(imm), dest); + } + + void orPtr(RegisterID op1, RegisterID op2, RegisterID dest) + { + or64(op1, op2, dest); + } + + void orPtr(TrustedImm32 imm, RegisterID src, RegisterID dest) + { + or64(imm, src, dest); + } + + void rotateRightPtr(TrustedImm32 imm, RegisterID srcDst) + { + rotateRight64(imm, srcDst); + } + + void subPtr(RegisterID src, RegisterID dest) + { + sub64(src, dest); + } + + void subPtr(TrustedImm32 imm, RegisterID dest) + { + sub64(imm, dest); + } + + void subPtr(TrustedImmPtr imm, RegisterID dest) + { + sub64(TrustedImm64(imm), dest); + } + + void xorPtr(RegisterID src, RegisterID dest) + { + xor64(src, dest); + } + + void xorPtr(RegisterID src, Address dest) + { + xor64(src, dest); + } + + void xorPtr(TrustedImm32 imm, RegisterID srcDest) + { + xor64(imm, srcDest); + } + + void loadPtr(ImplicitAddress address, RegisterID dest) + { + load64(address, dest); + } + + void loadPtr(BaseIndex address, RegisterID dest) + { + load64(address, dest); + } + + void loadPtr(const void* address, RegisterID dest) + { + load64(address, dest); + } + + DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest) + { + return load64WithAddressOffsetPatch(address, dest); + } + + DataLabelCompact loadPtrWithCompactAddressOffsetPatch(Address address, RegisterID dest) + { + return load64WithCompactAddressOffsetPatch(address, dest); + } + + void storePtr(RegisterID src, ImplicitAddress address) + { + store64(src, address); + } + + void storePtr(RegisterID src, BaseIndex address) + { + store64(src, address); + } + + void storePtr(RegisterID src, void* address) + { + store64(src, address); + } + + void storePtr(TrustedImmPtr imm, ImplicitAddress address) + { + store64(TrustedImm64(imm), address); + } + + void storePtr(TrustedImmPtr imm, BaseIndex address) + { + store64(TrustedImm64(imm), address); + } + + DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address) + { + return store64WithAddressOffsetPatch(src, address); + } + + void movePtrToDouble(RegisterID src, FPRegisterID dest) + { + move64ToDouble(src, dest); + } + + void moveDoubleToPtr(FPRegisterID src, RegisterID dest) + { + moveDoubleTo64(src, dest); + } + + void comparePtr(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest) + { + compare64(cond, left, right, dest); + } + + void comparePtr(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest) + { + compare64(cond, left, right, dest); + } + + void testPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest) + { + test64(cond, reg, mask, dest); + } + + void testPtr(ResultCondition cond, RegisterID reg, RegisterID mask, RegisterID dest) + { + test64(cond, reg, mask, dest); + } + + Jump branchPtr(RelationalCondition cond, RegisterID left, RegisterID right) + { + return branch64(cond, left, right); + } + + Jump branchPtr(RelationalCondition cond, RegisterID left, TrustedImmPtr right) + { + return branch64(cond, left, TrustedImm64(right)); + } + + Jump branchPtr(RelationalCondition cond, RegisterID left, Address right) + { + return branch64(cond, left, right); + } + + Jump branchPtr(RelationalCondition cond, Address left, RegisterID right) + { + return branch64(cond, left, right); + } + + Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, RegisterID right) + { + return branch64(cond, left, right); + } + + Jump branchPtr(RelationalCondition cond, Address left, TrustedImmPtr right) + { + return branch64(cond, left, TrustedImm64(right)); + } + + Jump branchTestPtr(ResultCondition cond, RegisterID reg, RegisterID mask) + { + return branchTest64(cond, reg, mask); + } + + Jump branchTestPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1)) + { + return branchTest64(cond, reg, mask); + } + + Jump branchTestPtr(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1)) + { + return branchTest64(cond, address, mask); + } + + Jump branchTestPtr(ResultCondition cond, Address address, RegisterID reg) + { + return branchTest64(cond, address, reg); + } + + Jump branchTestPtr(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1)) + { + return branchTest64(cond, address, mask); + } + + Jump branchTestPtr(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1)) + { + return branchTest64(cond, address, mask); + } + + Jump branchAddPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest) + { + return branchAdd64(cond, imm, dest); + } + + Jump branchAddPtr(ResultCondition cond, RegisterID src, RegisterID dest) + { + return branchAdd64(cond, src, dest); + } + + Jump branchSubPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest) + { + return branchSub64(cond, imm, dest); + } + + Jump branchSubPtr(ResultCondition cond, RegisterID src, RegisterID dest) + { + return branchSub64(cond, src, dest); + } + + Jump branchSubPtr(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest) + { + return branchSub64(cond, src1, src2, dest); + } + #if ENABLE(JIT_CONSTANT_BLINDING) - using MacroAssemblerBase::addPtr; - using MacroAssemblerBase::andPtr; - using MacroAssemblerBase::branchSubPtr; + using MacroAssemblerBase::and64; using MacroAssemblerBase::convertInt32ToDouble; - using MacroAssemblerBase::storePtr; - using MacroAssemblerBase::subPtr; - using MacroAssemblerBase::xorPtr; + using MacroAssemblerBase::store64; bool shouldBlindDouble(double value) { @@ -539,7 +856,7 @@ public: // Try to force normalisation, and check that there's no change // in the bit pattern - if (bitwise_cast<uintptr_t>(value * 1.0) != bitwise_cast<uintptr_t>(value)) + if (bitwise_cast<uint64_t>(value * 1.0) != bitwise_cast<uint64_t>(value)) return true; value = abs(value); @@ -578,7 +895,6 @@ public: default: { if (value <= 0xff) return false; -#if CPU(X86_64) JSValue jsValue = JSValue::decode(reinterpret_cast<void*>(value)); if (jsValue.isInt32()) return shouldBlind(Imm32(jsValue.asInt32())); @@ -587,7 +903,6 @@ public: if (!shouldBlindDouble(bitwise_cast<double>(value))) return false; -#endif } } return shouldBlindForSpecificArch(value); @@ -617,6 +932,59 @@ public: rotateRightPtr(constant.rotation, dest); } + bool shouldBlind(Imm64 imm) + { +#if !defined(NDEBUG) + UNUSED_PARAM(imm); + // Debug always blind all constants, if only so we know + // if we've broken blinding during patch development. + return true; +#endif + + // First off we'll special case common, "safe" values to avoid hurting + // performance too much + uint64_t value = imm.asTrustedImm64().m_value; + switch (value) { + case 0xffff: + case 0xffffff: + case 0xffffffffL: + case 0xffffffffffL: + case 0xffffffffffffL: + case 0xffffffffffffffL: + case 0xffffffffffffffffL: + return false; + default: { + if (value <= 0xff) + return false; + } + } + return shouldBlindForSpecificArch(value); + } + + struct RotatedImm64 { + RotatedImm64(uint64_t v1, uint8_t v2) + : value(v1) + , rotation(v2) + { + } + TrustedImm64 value; + TrustedImm32 rotation; + }; + + RotatedImm64 rotationBlindConstant(Imm64 imm) + { + uint8_t rotation = random() % (sizeof(int64_t) * 8); + uint64_t value = imm.asTrustedImm64().m_value; + value = (value << rotation) | (value >> (sizeof(int64_t) * 8 - rotation)); + return RotatedImm64(value, rotation); + } + + void loadRotationBlindedConstant(RotatedImm64 constant, RegisterID dest) + { + move(constant.value, dest); + rotateRight64(constant.rotation, dest); + } + void convertInt32ToDouble(Imm32 imm, FPRegisterID dest) { if (shouldBlind(imm)) { @@ -635,6 +1003,24 @@ public: move(imm.asTrustedImmPtr(), dest); } + void move(Imm64 imm, RegisterID dest) + { + if (shouldBlind(imm)) + loadRotationBlindedConstant(rotationBlindConstant(imm), dest); + else + move(imm.asTrustedImm64(), dest); + } + + void and64(Imm32 imm, RegisterID dest) + { + if (shouldBlind(imm)) { + BlindedImm32 key = andBlindedConstant(imm); + and64(key.value1, dest); + and64(key.value2, dest); + } else + and64(imm.asTrustedImm32(), dest); + } + Jump branchPtr(RelationalCondition cond, RegisterID left, ImmPtr right) { if (shouldBlind(right)) { @@ -655,6 +1041,16 @@ public: storePtr(imm.asTrustedImmPtr(), dest); } + void store64(Imm64 imm, Address dest) + { + if (shouldBlind(imm)) { + RegisterID scratchRegister = scratchRegisterForBlinding(); + loadRotationBlindedConstant(rotationBlindConstant(imm), scratchRegister); + store64(scratchRegister, dest); + } else + store64(imm.asTrustedImm64(), dest); + } + #endif #endif // !CPU(X86_64) @@ -848,6 +1244,13 @@ public: storePtr(value, addressForPoke(index)); } +#if CPU(X86_64) + void poke(Imm64 value, int index = 0) + { + store64(value, addressForPoke(index)); + } +#endif + void store32(Imm32 imm, Address dest) { if (shouldBlind(imm)) { |