summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/assembler/MacroAssembler.h
diff options
context:
space:
mode:
authorSimon Hausmann <simon.hausmann@nokia.com>2012-01-06 14:44:00 +0100
committerSimon Hausmann <simon.hausmann@nokia.com>2012-01-06 14:44:00 +0100
commit40736c5763bf61337c8c14e16d8587db021a87d4 (patch)
treeb17a9c00042ad89cb1308e2484491799aa14e9f8 /Source/JavaScriptCore/assembler/MacroAssembler.h
downloadqtwebkit-40736c5763bf61337c8c14e16d8587db021a87d4.tar.gz
Imported WebKit commit 2ea9d364d0f6efa8fa64acf19f451504c59be0e4 (http://svn.webkit.org/repository/webkit/trunk@104285)
Diffstat (limited to 'Source/JavaScriptCore/assembler/MacroAssembler.h')
-rw-r--r--Source/JavaScriptCore/assembler/MacroAssembler.h411
1 files changed, 411 insertions, 0 deletions
diff --git a/Source/JavaScriptCore/assembler/MacroAssembler.h b/Source/JavaScriptCore/assembler/MacroAssembler.h
new file mode 100644
index 000000000..cc11b5925
--- /dev/null
+++ b/Source/JavaScriptCore/assembler/MacroAssembler.h
@@ -0,0 +1,411 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MacroAssembler_h
+#define MacroAssembler_h
+
+#if ENABLE(ASSEMBLER)
+
+#if CPU(ARM_THUMB2)
+#include "MacroAssemblerARMv7.h"
+namespace JSC { typedef MacroAssemblerARMv7 MacroAssemblerBase; };
+
+#elif CPU(ARM_TRADITIONAL)
+#include "MacroAssemblerARM.h"
+namespace JSC { typedef MacroAssemblerARM MacroAssemblerBase; };
+
+#elif CPU(MIPS)
+#include "MacroAssemblerMIPS.h"
+namespace JSC {
+typedef MacroAssemblerMIPS MacroAssemblerBase;
+};
+
+#elif CPU(X86)
+#include "MacroAssemblerX86.h"
+namespace JSC { typedef MacroAssemblerX86 MacroAssemblerBase; };
+
+#elif CPU(X86_64)
+#include "MacroAssemblerX86_64.h"
+namespace JSC { typedef MacroAssemblerX86_64 MacroAssemblerBase; };
+
+#elif CPU(SH4)
+#include "MacroAssemblerSH4.h"
+namespace JSC {
+typedef MacroAssemblerSH4 MacroAssemblerBase;
+};
+
+#else
+#error "The MacroAssembler is not supported on this platform."
+#endif
+
+
+namespace JSC {
+
+class MacroAssembler : public MacroAssemblerBase {
+public:
+
+ using MacroAssemblerBase::pop;
+ using MacroAssemblerBase::jump;
+ using MacroAssemblerBase::branch32;
+#if CPU(X86_64)
+ using MacroAssemblerBase::branchPtr;
+ using MacroAssemblerBase::branchTestPtr;
+#endif
+
+
+ // Platform agnostic onvenience functions,
+ // described in terms of other macro assembly methods.
+ void pop()
+ {
+ addPtr(TrustedImm32(sizeof(void*)), stackPointerRegister);
+ }
+
+ void peek(RegisterID dest, int index = 0)
+ {
+ loadPtr(Address(stackPointerRegister, (index * sizeof(void*))), dest);
+ }
+
+ void poke(RegisterID src, int index = 0)
+ {
+ storePtr(src, Address(stackPointerRegister, (index * sizeof(void*))));
+ }
+
+ void poke(TrustedImm32 value, int index = 0)
+ {
+ store32(value, Address(stackPointerRegister, (index * sizeof(void*))));
+ }
+
+ void poke(TrustedImmPtr imm, int index = 0)
+ {
+ storePtr(imm, Address(stackPointerRegister, (index * sizeof(void*))));
+ }
+
+
+ // Backwards banches, these are currently all implemented using existing forwards branch mechanisms.
+ void branchPtr(RelationalCondition cond, RegisterID op1, TrustedImmPtr imm, Label target)
+ {
+ branchPtr(cond, op1, imm).linkTo(target, this);
+ }
+
+ void branch32(RelationalCondition cond, RegisterID op1, RegisterID op2, Label target)
+ {
+ branch32(cond, op1, op2).linkTo(target, this);
+ }
+
+ void branch32(RelationalCondition cond, RegisterID op1, TrustedImm32 imm, Label target)
+ {
+ branch32(cond, op1, imm).linkTo(target, this);
+ }
+
+ void branch32(RelationalCondition cond, RegisterID left, Address right, Label target)
+ {
+ branch32(cond, left, right).linkTo(target, this);
+ }
+
+ Jump branch32(RelationalCondition cond, TrustedImm32 left, RegisterID right)
+ {
+ return branch32(commute(cond), right, left);
+ }
+
+ void branchTestPtr(ResultCondition cond, RegisterID reg, Label target)
+ {
+ branchTestPtr(cond, reg).linkTo(target, this);
+ }
+
+ void jump(Label target)
+ {
+ jump().linkTo(target, this);
+ }
+
+ // Commute a relational condition, returns a new condition that will produce
+ // the same results given the same inputs but with their positions exchanged.
+ static RelationalCondition commute(RelationalCondition condition)
+ {
+ switch (condition) {
+ case Above:
+ return Below;
+ case AboveOrEqual:
+ return BelowOrEqual;
+ case Below:
+ return Above;
+ case BelowOrEqual:
+ return AboveOrEqual;
+ case GreaterThan:
+ return LessThan;
+ case GreaterThanOrEqual:
+ return LessThanOrEqual;
+ case LessThan:
+ return GreaterThan;
+ case LessThanOrEqual:
+ return GreaterThanOrEqual;
+ default:
+ break;
+ }
+
+ ASSERT(condition == Equal || condition == NotEqual);
+ return condition;
+ }
+
+
+ // Ptr methods
+ // On 32-bit platforms (i.e. x86), these methods directly map onto their 32-bit equivalents.
+ // FIXME: should this use a test for 32-bitness instead of this specific exception?
+#if !CPU(X86_64)
+ void addPtr(RegisterID src, RegisterID dest)
+ {
+ add32(src, dest);
+ }
+
+ void addPtr(TrustedImm32 imm, RegisterID srcDest)
+ {
+ add32(imm, srcDest);
+ }
+
+ void addPtr(TrustedImmPtr imm, RegisterID dest)
+ {
+ add32(TrustedImm32(imm), dest);
+ }
+
+ void addPtr(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ add32(imm, src, dest);
+ }
+
+ void addPtr(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ add32(imm, address);
+ }
+
+ void andPtr(RegisterID src, RegisterID dest)
+ {
+ and32(src, dest);
+ }
+
+ void andPtr(TrustedImm32 imm, RegisterID srcDest)
+ {
+ and32(imm, srcDest);
+ }
+
+ void orPtr(RegisterID src, RegisterID dest)
+ {
+ or32(src, dest);
+ }
+
+ void orPtr(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ or32(op1, op2, dest);
+ }
+
+ void orPtr(TrustedImmPtr imm, RegisterID dest)
+ {
+ or32(TrustedImm32(imm), dest);
+ }
+
+ void orPtr(TrustedImm32 imm, RegisterID dest)
+ {
+ or32(imm, dest);
+ }
+
+ void subPtr(RegisterID src, RegisterID dest)
+ {
+ sub32(src, dest);
+ }
+
+ void subPtr(TrustedImm32 imm, RegisterID dest)
+ {
+ sub32(imm, dest);
+ }
+
+ void subPtr(TrustedImmPtr imm, RegisterID dest)
+ {
+ sub32(TrustedImm32(imm), dest);
+ }
+
+ void xorPtr(RegisterID src, RegisterID dest)
+ {
+ xor32(src, dest);
+ }
+
+ void xorPtr(TrustedImm32 imm, RegisterID srcDest)
+ {
+ xor32(imm, srcDest);
+ }
+
+
+ void loadPtr(ImplicitAddress address, RegisterID dest)
+ {
+ load32(address, dest);
+ }
+
+ void loadPtr(BaseIndex address, RegisterID dest)
+ {
+ load32(address, dest);
+ }
+
+ void loadPtr(const void* address, RegisterID dest)
+ {
+ load32(address, dest);
+ }
+
+ DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ return load32WithAddressOffsetPatch(address, dest);
+ }
+
+ DataLabelCompact loadPtrWithCompactAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ return load32WithCompactAddressOffsetPatch(address, dest);
+ }
+
+ void comparePtr(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
+ {
+ compare32(cond, left, right, dest);
+ }
+
+ void storePtr(RegisterID src, ImplicitAddress address)
+ {
+ store32(src, address);
+ }
+
+ void storePtr(RegisterID src, BaseIndex address)
+ {
+ store32(src, address);
+ }
+
+ void storePtr(RegisterID src, void* address)
+ {
+ store32(src, address);
+ }
+
+ void storePtr(TrustedImmPtr imm, ImplicitAddress address)
+ {
+ store32(TrustedImm32(imm), address);
+ }
+
+ void storePtr(TrustedImmPtr imm, void* address)
+ {
+ store32(TrustedImm32(imm), address);
+ }
+
+ DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
+ {
+ return store32WithAddressOffsetPatch(src, address);
+ }
+
+
+ Jump branchPtr(RelationalCondition cond, RegisterID left, RegisterID right)
+ {
+ return branch32(cond, left, right);
+ }
+
+ Jump branchPtr(RelationalCondition cond, RegisterID left, TrustedImmPtr right)
+ {
+ return branch32(cond, left, TrustedImm32(right));
+ }
+
+ Jump branchPtr(RelationalCondition cond, RegisterID left, Address right)
+ {
+ return branch32(cond, left, right);
+ }
+
+ Jump branchPtr(RelationalCondition cond, Address left, RegisterID right)
+ {
+ return branch32(cond, left, right);
+ }
+
+ Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
+ {
+ return branch32(cond, left, right);
+ }
+
+ Jump branchPtr(RelationalCondition cond, Address left, TrustedImmPtr right)
+ {
+ return branch32(cond, left, TrustedImm32(right));
+ }
+
+ Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, TrustedImmPtr right)
+ {
+ return branch32(cond, left, TrustedImm32(right));
+ }
+
+ Jump branchTestPtr(ResultCondition cond, RegisterID reg, RegisterID mask)
+ {
+ return branchTest32(cond, reg, mask);
+ }
+
+ Jump branchTestPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ return branchTest32(cond, reg, mask);
+ }
+
+ Jump branchTestPtr(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ return branchTest32(cond, address, mask);
+ }
+
+ Jump branchTestPtr(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ return branchTest32(cond, address, mask);
+ }
+
+
+ Jump branchAddPtr(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ return branchAdd32(cond, src, dest);
+ }
+
+ Jump branchSubPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ return branchSub32(cond, imm, dest);
+ }
+ using MacroAssemblerBase::branchTest8;
+ Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ return MacroAssemblerBase::branchTest8(cond, Address(address.base, address.offset), mask);
+ }
+#endif // !CPU(X86_64)
+
+};
+
+} // namespace JSC
+
+#else // ENABLE(ASSEMBLER)
+
+// If there is no assembler for this platform, at least allow code to make references to
+// some of the things it would otherwise define, albeit without giving that code any way
+// of doing anything useful.
+class MacroAssembler {
+private:
+ MacroAssembler() { }
+
+public:
+
+ enum RegisterID { NoRegister };
+ enum FPRegisterID { NoFPRegister };
+};
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // MacroAssembler_h