summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/offlineasm
diff options
context:
space:
mode:
Diffstat (limited to 'Source/JavaScriptCore/offlineasm')
-rw-r--r--Source/JavaScriptCore/offlineasm/asm.rb20
-rw-r--r--Source/JavaScriptCore/offlineasm/backends.rb13
-rw-r--r--Source/JavaScriptCore/offlineasm/cloop.rb988
-rw-r--r--Source/JavaScriptCore/offlineasm/config.rb2
-rw-r--r--Source/JavaScriptCore/offlineasm/generate_offset_extractor.rb4
-rw-r--r--Source/JavaScriptCore/offlineasm/instructions.rb10
-rw-r--r--Source/JavaScriptCore/offlineasm/offsets.rb9
7 files changed, 1035 insertions, 11 deletions
diff --git a/Source/JavaScriptCore/offlineasm/asm.rb b/Source/JavaScriptCore/offlineasm/asm.rb
index 1603f4af4..14d616442 100644
--- a/Source/JavaScriptCore/offlineasm/asm.rb
+++ b/Source/JavaScriptCore/offlineasm/asm.rb
@@ -96,6 +96,12 @@ class Assembler
result
end
+ # Puts a C Statement in the output stream.
+ def putc(*line)
+ raise unless @state == :asm
+ @outp.puts(formatDump(" " + line.join(''), lastComment))
+ end
+
def formatDump(dumpStr, comment, commentColumns=$preferredCommentStartColumn)
if comment.length > 0
"%-#{commentColumns}s %s" % [dumpStr, comment]
@@ -151,7 +157,11 @@ class Assembler
@numGlobalLabels += 1
putsNewlineSpacerIfAppropriate(:global)
@internalComment = $enableLabelCountComments ? "Global Label #{@numGlobalLabels}" : nil
- @outp.puts(formatDump("OFFLINE_ASM_GLOBAL_LABEL(#{labelName})", lastComment))
+ if /\Allint_op_/.match(labelName)
+ @outp.puts(formatDump("OFFLINE_ASM_OPCODE_LABEL(op_#{$~.post_match})", lastComment))
+ else
+ @outp.puts(formatDump("OFFLINE_ASM_GLUE_LABEL(#{labelName})", lastComment))
+ end
@newlineSpacerState = :none # After a global label, we can use another spacer.
end
@@ -171,6 +181,14 @@ class Assembler
"\" LOCAL_LABEL_STRING(#{labelName}) \""
end
+ def self.cLabelReference(labelName)
+ "#{labelName}"
+ end
+
+ def self.cLocalLabelReference(labelName)
+ "#{labelName}"
+ end
+
def codeOrigin(text)
case @commentState
when :none
diff --git a/Source/JavaScriptCore/offlineasm/backends.rb b/Source/JavaScriptCore/offlineasm/backends.rb
index e33a2a083..0633f07f8 100644
--- a/Source/JavaScriptCore/offlineasm/backends.rb
+++ b/Source/JavaScriptCore/offlineasm/backends.rb
@@ -25,12 +25,14 @@ require "config"
require "armv7"
require "ast"
require "x86"
+require "cloop"
BACKENDS =
[
"X86",
"X86_64",
- "ARMv7"
+ "ARMv7",
+ "C_LOOP"
]
# Keep the set of working backends separate from the set of backends that might be
@@ -42,7 +44,8 @@ WORKING_BACKENDS =
[
"X86",
"X86_64",
- "ARMv7"
+ "ARMv7",
+ "C_LOOP"
]
BACKEND_PATTERN = Regexp.new('\\A(' + BACKENDS.join(')|(') + ')\\Z')
@@ -76,12 +79,18 @@ class LabelReference
def asmLabel
Assembler.labelReference(name[1..-1])
end
+ def cLabel
+ Assembler.cLabelReference(name[1..-1])
+ end
end
class LocalLabelReference
def asmLabel
Assembler.localLabelReference("_offlineasm_"+name[1..-1])
end
+ def cLabel
+ Assembler.cLocalLabelReference("_offlineasm_"+name[1..-1])
+ end
end
class Skip
diff --git a/Source/JavaScriptCore/offlineasm/cloop.rb b/Source/JavaScriptCore/offlineasm/cloop.rb
new file mode 100644
index 000000000..8469ed441
--- /dev/null
+++ b/Source/JavaScriptCore/offlineasm/cloop.rb
@@ -0,0 +1,988 @@
+# Copyright (C) 2012 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+require "config"
+require "ast"
+require "opt"
+
+# The CLoop llint backend is initially based on the ARMv7 backend, and
+# then further enhanced with a few instructions from the x86 backend to
+# support building for X64 targets. Hence, the shape of the generated
+# code and the usage convention of registers will look a lot like the
+# ARMv7 backend's.
+
+def cloopMapType(type)
+ case type
+ when :int; ".i"
+ when :uint; ".u"
+ when :int32; ".i32"
+ when :uint32; ".u32"
+ when :int8; ".i8"
+ when :uint8; ".u8"
+ when :int8Ptr; ".i8p"
+ when :voidPtr; ".vp"
+ when :nativeFunc; ".nativeFunc"
+ when :double; ".d"
+ when :castToDouble; ".castToDouble"
+ when :castToVoidPtr; ".castToVoidPtr"
+ when :opcode; ".opcode"
+ else;
+ raise "Unsupported type"
+ end
+end
+
+
+class SpecialRegister < NoChildren
+ def dump
+ @name
+ end
+ def clValue(type=:int)
+ @name + cloopMapType(type)
+ end
+end
+
+C_LOOP_SCRATCH_FPR = SpecialRegister.new("d8")
+
+class RegisterID
+ def dump
+ case name
+ when "t0"
+ "t0"
+ when "t1"
+ "t1"
+ when "t2"
+ "t2"
+ when "t3"
+ "t3"
+ when "t4"
+ "rPC"
+ when "t6"
+ "rBasePC"
+ when "csr1"
+ "tagTypeNumber"
+ when "csr2"
+ "tagMask"
+ when "cfr"
+ "cfr"
+ when "lr"
+ "rRetVPC"
+ when "sp"
+ "sp"
+ else
+ raise "Bad register #{name} for C_LOOP at #{codeOriginString}"
+ end
+ end
+ def clValue(type=:int)
+ dump + cloopMapType(type)
+ end
+end
+
+class FPRegisterID
+ def dump
+ case name
+ when "ft0", "fr"
+ "d0"
+ when "ft1"
+ "d1"
+ when "ft2"
+ "d2"
+ when "ft3"
+ "d3"
+ when "ft4"
+ "d4"
+ when "ft5"
+ "d5"
+ else
+ raise "Bad register #{name} for C_LOOP at #{codeOriginString}"
+ end
+ end
+ def clValue(type=:int)
+ dump + cloopMapType(type)
+ end
+end
+
+class Immediate
+ def dump
+ "#{value}"
+ end
+ def clValue(type=:int)
+ # There is a case of a very large unsigned number (0x8000000000000000)
+ # which we wish to encode. Unfortunately, the C/C++ compiler
+ # complains if we express that number as a positive decimal integer.
+ # Hence, for positive values, we just convert the number into hex form
+ # to keep the compiler happy.
+ #
+ # However, for negative values, the to_s(16) hex conversion method does
+ # not strip the "-" sign resulting in a meaningless "0x-..." valueStr.
+ # To workaround this, we simply don't encode negative numbers as hex.
+
+ valueStr = (value < 0) ? "#{value}" : "0x#{value.to_s(16)}"
+
+ case type
+ when :int8; "int8_t(#{valueStr})"
+ when :int32; "int32_t(#{valueStr})"
+ when :int; "intptr_t(#{valueStr})"
+ when :uint8; "uint8_t(#{valueStr})"
+ when :uint32; "uint32_t(#{valueStr})"
+ when :uint; "uintptr_t(#{valueStr})"
+ else
+ raise "Not implemented immediate of type: #{type}"
+ end
+ end
+end
+
+class Address
+ def dump
+ "[#{base.dump}, #{offset.value}]"
+ end
+ def clValue(type=:int)
+ case type
+ when :int8; int8MemRef
+ when :int32; int32MemRef
+ when :int; intMemRef
+ when :uint8; uint8MemRef
+ when :uint32; uint32MemRef
+ when :uint; uintMemRef
+ when :opcode; opcodeMemRef
+ when :nativeFunc; nativeFuncMemRef
+ else
+ raise "Unexpected Address type: #{type}"
+ end
+ end
+ def pointerExpr
+ if base.is_a? RegisterID and base.name == "sp"
+ offsetValue = "#{offset.value}"
+ "(ASSERT(#{offsetValue} == offsetof(JITStackFrame, globalData)), &sp->globalData)"
+ elsif offset.value == 0
+ "#{base.clValue(:int8Ptr)}"
+ elsif offset.value > 0
+ "#{base.clValue(:int8Ptr)} + #{offset.value}"
+ else
+ "#{base.clValue(:int8Ptr)} - #{-offset.value}"
+ end
+ end
+ def int8MemRef
+ "*CAST<int8_t*>(#{pointerExpr})"
+ end
+ def int16MemRef
+ "*CAST<int16_t*>(#{pointerExpr})"
+ end
+ def int32MemRef
+ "*CAST<int32_t*>(#{pointerExpr})"
+ end
+ def intMemRef
+ "*CAST<intptr_t*>(#{pointerExpr})"
+ end
+ def uint8MemRef
+ "*CAST<uint8_t*>(#{pointerExpr})"
+ end
+ def uint16MemRef
+ "*CAST<uint16_t*>(#{pointerExpr})"
+ end
+ def uint32MemRef
+ "*CAST<uint32_t*>(#{pointerExpr})"
+ end
+ def uintMemRef
+ "*CAST<uintptr_t*>(#{pointerExpr})"
+ end
+ def nativeFuncMemRef
+ "*CAST<NativeFunction*>(#{pointerExpr})"
+ end
+ def opcodeMemRef
+ "*CAST<Opcode*>(#{pointerExpr})"
+ end
+ def dblMemRef
+ "*CAST<double*>(#{pointerExpr})"
+ end
+end
+
+class BaseIndex
+ def dump
+ "[#{base.dump}, #{offset.dump}, #{index.dump} << #{scaleShift}]"
+ end
+ def clValue(type=:int)
+ case type
+ when :int8; int8MemRef
+ when :int32; int32MemRef
+ when :int; intMemRef
+ when :uint8; uint8MemRef
+ when :uint32; uint32MemRef
+ when :uint; uintMemRef
+ when :opcode; opcodeMemRef
+ else
+ raise "Unexpected BaseIndex type: #{type}"
+ end
+ end
+ def pointerExpr
+ if base.is_a? RegisterID and base.name == "sp"
+ offsetValue = "(#{index.clValue(:int32)} << #{scaleShift}) + #{offset.clValue})"
+ "(ASSERT(#{offsetValue} == offsetof(JITStackFrame, globalData)), &sp->globalData)"
+ else
+ "#{base.clValue(:int8Ptr)} + (#{index.clValue(:int32)} << #{scaleShift}) + #{offset.clValue}"
+ end
+ end
+ def int8MemRef
+ "*CAST<int8_t*>(#{pointerExpr})"
+ end
+ def int16MemRef
+ "*CAST<int16_t*>(#{pointerExpr})"
+ end
+ def int32MemRef
+ "*CAST<int32_t*>(#{pointerExpr})"
+ end
+ def intMemRef
+ "*CAST<intptr_t*>(#{pointerExpr})"
+ end
+ def uint8MemRef
+ "*CAST<uint8_t*>(#{pointerExpr})"
+ end
+ def uint16MemRef
+ "*CAST<uint16_t*>(#{pointerExpr})"
+ end
+ def uint32MemRef
+ "*CAST<uint32_t*>(#{pointerExpr})"
+ end
+ def uintMemRef
+ "*CAST<uintptr_t*>(#{pointerExpr})"
+ end
+ def opcodeMemRef
+ "*CAST<Opcode*>(#{pointerExpr})"
+ end
+ def dblMemRef
+ "*CAST<double*>(#{pointerExpr})"
+ end
+end
+
+class AbsoluteAddress
+ def dump
+ "#{codeOriginString}"
+ end
+ def clValue
+ dump
+ end
+end
+
+
+#
+# Lea support.
+#
+
+class Address
+ def cloopEmitLea(destination, type)
+ if destination == base
+ $asm.putc "#{destination.clValue(:int8Ptr)} += #{offset.clValue(type)};"
+ else
+ $asm.putc "#{destination.clValue(:int8Ptr)} = #{base.clValue(:int8Ptr)} + #{offset.clValue(type)};"
+ end
+ end
+end
+
+class BaseIndex
+ def cloopEmitLea(destination, type)
+ raise "Malformed BaseIndex, offset should be zero at #{codeOriginString}" unless offset.value == 0
+ $asm.putc "#{destination.clValue(:int8Ptr)} = #{base.clValue(:int8Ptr)} + (#{index.clValue} << #{scaleShift});"
+ end
+end
+
+#
+# Actual lowering code follows.
+#
+
+class Sequence
+ def getModifiedListC_LOOP
+ myList = @list
+
+ # Verify that we will only see instructions and labels.
+ myList.each {
+ | node |
+ unless node.is_a? Instruction or
+ node.is_a? Label or
+ node.is_a? LocalLabel or
+ node.is_a? Skip
+ raise "Unexpected #{node.inspect} at #{node.codeOrigin}"
+ end
+ }
+
+ return myList
+ end
+end
+
+def clOperands(operands)
+ operands.map{|v| v.dump}.join(", ")
+end
+
+
+def cloopEmitOperation(operands, type, operator)
+ if operands.size == 3
+ $asm.putc "#{operands[2].clValue(type)} = #{operands[1].clValue(type)} #{operator} #{operands[0].clValue(type)};"
+ else
+ raise unless operands.size == 2
+ raise unless not operands[1].is_a? Immediate
+ $asm.putc "#{operands[1].clValue(type)} = #{operands[1].clValue(type)} #{operator} #{operands[0].clValue(type)};"
+ end
+end
+
+def cloopEmitShiftOperation(operands, type, operator)
+ if operands.size == 3
+ $asm.putc "#{operands[2].clValue(type)} = #{operands[1].clValue(type)} #{operator} (#{operands[0].clValue(:int)} & 0x1f);"
+ else
+ raise unless operands.size == 2
+ raise unless not operands[1].is_a? Immediate
+ $asm.putc "#{operands[1].clValue(type)} = #{operands[1].clValue(type)} #{operator} (#{operands[0].clValue(:int)} & 0x1f);"
+ end
+end
+
+def cloopEmitCompareDoubleWithNaNCheckAndBranch(operands, condition)
+ $asm.putc "if (isnan(#{operands[0].clValue(:double)}) || isnan(#{operands[1].clValue(:double)})"
+ $asm.putc " || (#{operands[0].clValue(:double)} #{condition} #{operands[1].clValue(:double)}))"
+ $asm.putc " goto #{operands[2].cLabel};"
+end
+
+
+def cloopEmitCompareAndSet(operands, type, comparator)
+ # The result is a boolean. Hence, it doesn't need to be based on the type
+ # of the arguments being compared.
+ $asm.putc "#{operands[2].clValue} = (#{operands[0].clValue(type)} #{comparator} #{op2 = operands[1].clValue(type)});"
+end
+
+
+def cloopEmitCompareAndBranch(operands, type, comparator)
+ $asm.putc "if (#{operands[0].clValue(type)} #{comparator} #{operands[1].clValue(type)})"
+ $asm.putc " goto #{operands[2].cLabel};"
+end
+
+
+# conditionTest should contain a string that provides a comparator and a RHS
+# value e.g. "< 0".
+def cloopGenerateConditionExpression(operands, type, conditionTest)
+ op1 = operands[0].clValue(type)
+
+ # The operands must consist of 2 or 3 values.
+ case operands.size
+ when 2 # Just test op1 against the conditionTest.
+ lhs = op1
+ when 3 # Mask op1 with op2 before testing against the conditionTest.
+ lhs = "(#{op1} & #{operands[1].clValue(type)})"
+ else
+ raise "Expected 2 or 3 operands but got #{operands.size} at #{codeOriginString}"
+ end
+
+ "#{lhs} #{conditionTest}"
+end
+
+# conditionTest should contain a string that provides a comparator and a RHS
+# value e.g. "< 0".
+def cloopEmitTestAndBranchIf(operands, type, conditionTest, branchTarget)
+ conditionExpr = cloopGenerateConditionExpression(operands, type, conditionTest)
+ $asm.putc "if (#{conditionExpr})"
+ $asm.putc " goto #{branchTarget};"
+end
+
+def cloopEmitTestSet(operands, type, conditionTest)
+ # The result is a boolean condition. Hence, the result type is always an
+ # int. The passed in type is only used for the values being tested in
+ # the condition test.
+ conditionExpr = cloopGenerateConditionExpression(operands, type, conditionTest)
+ $asm.putc "#{operands[-1].clValue} = (#{conditionExpr});"
+end
+
+def cloopEmitOpAndBranch(operands, operator, type, conditionTest)
+ case type
+ when :int; tempType = "intptr_t"
+ when :int32; tempType = "int32_t"
+ else
+ raise "Unimplemented type"
+ end
+
+ op1 = operands[0].clValue(type)
+ op2 = operands[1].clValue(type)
+
+ $asm.putc "{"
+ $asm.putc " #{tempType} temp = #{op2} #{operator} #{op1};"
+ $asm.putc " if (temp #{conditionTest})"
+ $asm.putc " goto #{operands[2].cLabel};"
+ $asm.putc " #{op2} = temp;"
+ $asm.putc "}"
+end
+
+def cloopAddOverflowTest(operands, type)
+ case type
+ when :int32
+ tempType = "int32_t"
+ signBit = "SIGN_BIT32"
+ else
+ raise "Unimplemented type"
+ end
+
+ $asm.putc " #{tempType} a = #{operands[0].clValue(type)};"
+ $asm.putc " #{tempType} b = #{operands[1].clValue(type)};"
+ $asm.putc " // sign(b) sign(a) | Overflows if:"
+ $asm.putc " // 0 0 | sign(b+a) = 1 (pos + pos != neg)"
+ $asm.putc " // 0 1 | never"
+ $asm.putc " // 1 0 | never"
+ $asm.putc " // 1 1 | sign(b+a) = 0 (neg + neg != pos)"
+ "((#{signBit}(b) == #{signBit}(a)) && (#{signBit}(b+a) != #{signBit}(a)))"
+end
+
+def cloopSubOverflowTest(operands, type)
+ case type
+ when :int32
+ tempType = "int32_t"
+ signBit = "SIGN_BIT32"
+ else
+ raise "Unimplemented type"
+ end
+
+ $asm.putc " #{tempType} a = #{operands[0].clValue(type)};"
+ $asm.putc " #{tempType} b = #{operands[1].clValue(type)};"
+ $asm.putc " // sign(b) sign(a) | Overflows if:"
+ $asm.putc " // 0 0 | never"
+ $asm.putc " // 0 1 | sign(b-a) = 1 (pos - neg != pos)"
+ $asm.putc " // 1 0 | sign(b-a) = 0 (neg - pos != pos)"
+ $asm.putc " // 1 1 | never"
+ "((#{signBit}(b) != #{signBit}(a)) && (#{signBit}(b-a) == #{signBit}(a)))"
+end
+
+def cloopMulOverflowTest(operands, type)
+ case type
+ when :int32
+ tempType = "uint32_t"
+ else
+ raise "Unimplemented type"
+ end
+ $asm.putc " #{tempType} a = #{operands[0].clValue(type)};"
+ $asm.putc " #{tempType} b = #{operands[1].clValue(type)};"
+ "((b | a) >> 15)"
+end
+
+def cloopEmitOpAndBranchIfOverflow(operands, operator, type)
+ $asm.putc "{"
+
+ # Emit the overflow test based on the operands and the type:
+ case operator
+ when "+"; overflowTest = cloopAddOverflowTest(operands, type)
+ when "-"; overflowTest = cloopSubOverflowTest(operands, type)
+ when "*"; overflowTest = cloopMulOverflowTest(operands, type)
+ else
+ raise "Unimplemented opeartor"
+ end
+
+ $asm.putc " if #{overflowTest} {"
+ $asm.putc " goto #{operands[2].cLabel};"
+ $asm.putc " }"
+ $asm.putc " #{operands[1].clValue(type)} = #{operands[1].clValue(type)} #{operator} #{operands[0].clValue(type)};"
+ $asm.putc "}"
+end
+
+# operands: callTarget, currentFrame, currentPC
+def cloopEmitCallSlowPath(operands)
+ $asm.putc "{"
+ $asm.putc " ExecState* exec = CAST<ExecState*>(#{operands[1].clValue(:voidPtr)});"
+ $asm.putc " Instruction* pc = CAST<Instruction*>(#{operands[2].clValue(:voidPtr)});"
+ $asm.putc " SlowPathReturnType result = #{operands[0].cLabel}(exec, pc);"
+ $asm.putc " LLInt::decodeResult(result, t0.instruction, t1.execState);"
+ $asm.putc "}"
+end
+
+class Instruction
+ def lowerC_LOOP
+ $asm.codeOrigin codeOriginString if $enableCodeOriginComments
+ $asm.annotation annotation if $enableInstrAnnotations
+
+ case opcode
+ when "addi"
+ cloopEmitOperation(operands, :int32, "+")
+ when "addp"
+ cloopEmitOperation(operands, :int, "+")
+
+ when "andi"
+ cloopEmitOperation(operands, :int32, "&")
+ when "andp"
+ cloopEmitOperation(operands, :int, "&")
+
+ when "ori"
+ cloopEmitOperation(operands, :int32, "|")
+ when "orp"
+ cloopEmitOperation(operands, :int, "|")
+
+ when "xori"
+ cloopEmitOperation(operands, :int32, "^")
+ when "xorp"
+ cloopEmitOperation(operands, :int, "^")
+
+ when "lshifti"
+ cloopEmitShiftOperation(operands, :int32, "<<")
+ when "lshiftp"
+ cloopEmitShiftOperation(operands, :int, "<<")
+
+ when "rshifti"
+ cloopEmitShiftOperation(operands, :int32, ">>")
+ when "rshiftp"
+ cloopEmitShiftOperation(operands, :int, ">>")
+
+ when "urshifti"
+ cloopEmitShiftOperation(operands, :uint32, ">>")
+ when "urshiftp"
+ cloopEmitShiftOperation(operands, :uint, ">>")
+
+ when "muli"
+ cloopEmitOperation(operands, :int32, "*")
+ when "mulp"
+ cloopEmitOperation(operands, :int, "*")
+
+ when "subi"
+ cloopEmitOperation(operands, :int32, "-")
+ when "subp"
+ cloopEmitOperation(operands, :int, "-")
+
+ when "negi"
+ $asm.putc "#{operands[0].clValue(:int32)} = -#{operands[0].clValue(:int32)};"
+ when "negp"
+ $asm.putc "#{operands[0].clValue(:int)} = -#{operands[0].clValue(:int)};"
+
+ when "noti"
+ $asm.putc "#{operands[0].clValue(:int32)} = !#{operands[0].clValue(:int32)};"
+
+ when "loadi"
+ $asm.putc "#{operands[1].clValue(:int)} = #{operands[0].uint32MemRef};"
+ when "loadis"
+ $asm.putc "#{operands[1].clValue(:int)} = #{operands[0].int32MemRef};"
+ when "loadp"
+ $asm.putc "#{operands[1].clValue(:int)} = #{operands[0].intMemRef};"
+ when "storei"
+ $asm.putc "#{operands[1].int32MemRef} = #{operands[0].clValue(:int32)};"
+ when "storep"
+ $asm.putc "#{operands[1].intMemRef} = #{operands[0].clValue(:int)};"
+ when "loadb"
+ $asm.putc "#{operands[1].clValue(:int)} = #{operands[0].uint8MemRef};"
+ when "loadbs"
+ $asm.putc "#{operands[1].clValue(:int)} = #{operands[0].int8MemRef};"
+ when "storeb"
+ $asm.putc "#{operands[1].uint8MemRef} = #{operands[0].clValue(:int8)}"
+ when "loadh"
+ $asm.putc "#{operands[1].clValue(:int)} = #{operands[0].uint16MemRef};"
+ when "loadhs"
+ $asm.putc "#{operands[1].clValue(:int)} = #{operands[0].int16MemRef};"
+ when "storeh"
+ $asm.putc "*#{operands[1].uint16MemRef} = #{operands[0].clValue(:int16)};"
+ when "loadd"
+ $asm.putc "#{operands[1].clValue(:double)} = #{operands[0].dblMemRef};"
+ when "stored"
+ $asm.putc "#{operands[1].dblMemRef} = #{operands[0].clValue(:double)};"
+
+ when "addd"
+ cloopEmitOperation(operands, :double, "+")
+ when "divd"
+ cloopEmitOperation(operands, :double, "/")
+ when "subd"
+ cloopEmitOperation(operands, :double, "-")
+ when "muld"
+ cloopEmitOperation(operands, :double, "*")
+
+ # Convert an int value to its double equivalent, and store it in a double register.
+ when "ci2d"
+ $asm.putc "#{operands[1].clValue(:double)} = #{operands[0].clValue(:int32)};"
+
+ when "bdeq"
+ cloopEmitCompareAndBranch(operands, :double, "==")
+ when "bdneq"
+ cloopEmitCompareAndBranch(operands, :double, "!=")
+ when "bdgt"
+ cloopEmitCompareAndBranch(operands, :double, ">");
+ when "bdgteq"
+ cloopEmitCompareAndBranch(operands, :double, ">=");
+ when "bdlt"
+ cloopEmitCompareAndBranch(operands, :double, "<");
+ when "bdlteq"
+ cloopEmitCompareAndBranch(operands, :double, "<=");
+
+ when "bdequn"
+ cloopEmitCompareDoubleWithNaNCheckAndBranch(operands, "==")
+ when "bdnequn"
+ cloopEmitCompareDoubleWithNaNCheckAndBranch(operands, "!=")
+ when "bdgtun"
+ cloopEmitCompareDoubleWithNaNCheckAndBranch(operands, ">")
+ when "bdgtequn"
+ cloopEmitCompareDoubleWithNaNCheckAndBranch(operands, ">=")
+ when "bdltun"
+ cloopEmitCompareDoubleWithNaNCheckAndBranch(operands, "<")
+ when "bdltequn"
+ cloopEmitCompareDoubleWithNaNCheckAndBranch(operands, "<=")
+
+ when "td2i"
+ $asm.putc "#{operands[1].clValue(:int)} = #{operands[0].clValue(:double)};"
+
+ when "bcd2i" # operands: srcDbl dstInt slowPath
+ $asm.putc "{"
+ $asm.putc " double d = #{operands[0].clValue(:double)};"
+ $asm.putc " const int32_t asInt32 = int32_t(d);"
+ $asm.putc " if (asInt32 != d || (!asInt32 && signbit(d))) // true for -0.0"
+ $asm.putc " goto #{operands[2].cLabel};"
+ $asm.putc " #{operands[1].clValue} = asInt32;"
+ $asm.putc "}"
+
+ when "move"
+ $asm.putc "#{operands[1].clValue(:int)} = #{operands[0].clValue(:int)};"
+ when "sxi2p"
+ $asm.putc "#{operands[1].clValue(:int)} = #{operands[0].clValue(:int32)};"
+ when "zxi2p"
+ $asm.putc "#{operands[1].clValue(:uint)} = #{operands[0].clValue(:uint32)};"
+ when "nop"
+ $asm.putc "// nop"
+ when "bbeq"
+ cloopEmitCompareAndBranch(operands, :int8, "==")
+ when "bieq"
+ cloopEmitCompareAndBranch(operands, :int32, "==")
+ when "bpeq"
+ cloopEmitCompareAndBranch(operands, :int, "==")
+
+ when "bbneq"
+ cloopEmitCompareAndBranch(operands, :int8, "!=")
+ when "bineq"
+ cloopEmitCompareAndBranch(operands, :int32, "!=")
+ when "bpneq"
+ cloopEmitCompareAndBranch(operands, :int, "!=")
+
+ when "bba"
+ cloopEmitCompareAndBranch(operands, :uint8, ">")
+ when "bia"
+ cloopEmitCompareAndBranch(operands, :uint32, ">")
+ when "bpa"
+ cloopEmitCompareAndBranch(operands, :uint, ">")
+
+ when "bbaeq"
+ cloopEmitCompareAndBranch(operands, :uint8, ">=")
+ when "biaeq"
+ cloopEmitCompareAndBranch(operands, :uint32, ">=")
+ when "bpaeq"
+ cloopEmitCompareAndBranch(operands, :uint, ">=")
+
+ when "bbb"
+ cloopEmitCompareAndBranch(operands, :uint8, "<")
+ when "bib"
+ cloopEmitCompareAndBranch(operands, :uint32, "<")
+ when "bpb"
+ cloopEmitCompareAndBranch(operands, :uint, "<")
+
+ when "bbbeq"
+ cloopEmitCompareAndBranch(operands, :uint8, "<=")
+ when "bibeq"
+ cloopEmitCompareAndBranch(operands, :uint32, "<=")
+ when "bpbeq"
+ cloopEmitCompareAndBranch(operands, :uint, "<=")
+
+ when "bbgt"
+ cloopEmitCompareAndBranch(operands, :int8, ">")
+ when "bigt"
+ cloopEmitCompareAndBranch(operands, :int32, ">")
+ when "bpgt"
+ cloopEmitCompareAndBranch(operands, :int, ">")
+
+ when "bbgteq"
+ cloopEmitCompareAndBranch(operands, :int8, ">=")
+ when "bigteq"
+ cloopEmitCompareAndBranch(operands, :int32, ">=")
+ when "bpgteq"
+ cloopEmitCompareAndBranch(operands, :int, ">=")
+
+ when "bblt"
+ cloopEmitCompareAndBranch(operands, :int8, "<")
+ when "bilt"
+ cloopEmitCompareAndBranch(operands, :int32, "<")
+ when "bplt"
+ cloopEmitCompareAndBranch(operands, :int, "<")
+
+ when "bblteq"
+ cloopEmitCompareAndBranch(operands, :int8, "<=")
+ when "bilteq"
+ cloopEmitCompareAndBranch(operands, :int32, "<=")
+ when "bplteq"
+ cloopEmitCompareAndBranch(operands, :int, "<=")
+
+ when "btbz"
+ cloopEmitTestAndBranchIf(operands, :int8, "== 0", operands[-1].cLabel)
+ when "btiz"
+ cloopEmitTestAndBranchIf(operands, :int32, "== 0", operands[-1].cLabel)
+ when "btpz"
+ cloopEmitTestAndBranchIf(operands, :int, "== 0", operands[-1].cLabel)
+
+ when "btbnz"
+ cloopEmitTestAndBranchIf(operands, :int8, "!= 0", operands[-1].cLabel)
+ when "btinz"
+ cloopEmitTestAndBranchIf(operands, :int32, "!= 0", operands[-1].cLabel)
+ when "btpnz"
+ cloopEmitTestAndBranchIf(operands, :int, "!= 0", operands[-1].cLabel)
+
+ when "btbs"
+ cloopEmitTestAndBranchIf(operands, :int8, "< 0", operands[-1].cLabel)
+ when "btis"
+ cloopEmitTestAndBranchIf(operands, :int32, "< 0", operands[-1].cLabel)
+ when "btps"
+ cloopEmitTestAndBranchIf(operands, :int, "< 0", operands[-1].cLabel)
+
+ # For jmp, we do not want to assume that we have COMPUTED_GOTO support.
+ # Fortunately, the only times we should ever encounter indirect jmps is
+ # when the jmp target is a CLoop opcode (by design).
+ #
+ # Hence, we check if the jmp target is a known label reference. If so,
+ # we can emit a goto directly. If it is not a known target, then we set
+ # the target in the opcode, and dispatch to it via whatever dispatch
+ # mechanism is in used.
+ when "jmp"
+ if operands[0].is_a? LocalLabelReference or operands[0].is_a? LabelReference
+ # Handles jumps local or global labels.
+ $asm.putc "goto #{operands[0].cLabel};"
+ else
+ # Handles jumps to some computed target.
+ # NOTE: must be an opcode handler or a llint glue helper.
+ $asm.putc "opcode = #{operands[0].clValue(:opcode)};"
+ $asm.putc "DISPATCH_OPCODE();"
+ end
+
+ when "call"
+ $asm.putc "CRASH(); // generic call instruction not supported by design!"
+ when "break"
+ $asm.putc "CRASH(); // break instruction not implemented."
+ when "ret"
+ $asm.putc "goto doReturnHelper;"
+
+ when "cbeq"
+ cloopEmitCompareAndSet(operands, :uint8, "==")
+ when "cieq"
+ cloopEmitCompareAndSet(operands, :uint32, "==")
+ when "cpeq"
+ cloopEmitCompareAndSet(operands, :uint, "==")
+
+ when "cbneq"
+ cloopEmitCompareAndSet(operands, :uint8, "!=")
+ when "cineq"
+ cloopEmitCompareAndSet(operands, :uint32, "!=")
+ when "cpneq"
+ cloopEmitCompareAndSet(operands, :uint, "!=")
+
+ when "cba"
+ cloopEmitCompareAndSet(operands, :uint8, ">")
+ when "cia"
+ cloopEmitCompareAndSet(operands, :uint32, ">")
+ when "cpa"
+ cloopEmitCompareAndSet(operands, :uint, ">")
+
+ when "cbaeq"
+ cloopEmitCompareAndSet(operands, :uint8, ">=")
+ when "ciaeq"
+ cloopEmitCompareAndSet(operands, :uint32, ">=")
+ when "cpaeq"
+ cloopEmitCompareAndSet(operands, :uint, ">=")
+
+ when "cbb"
+ cloopEmitCompareAndSet(operands, :uint8, "<")
+ when "cib"
+ cloopEmitCompareAndSet(operands, :uint32, "<")
+ when "cpb"
+ cloopEmitCompareAndSet(operands, :uint, "<")
+
+ when "cbbeq"
+ cloopEmitCompareAndSet(operands, :uint8, "<=")
+ when "cibeq"
+ cloopEmitCompareAndSet(operands, :uint32, "<=")
+ when "cpbeq"
+ cloopEmitCompareAndSet(operands, :uint, "<=")
+
+ when "cbgt"
+ cloopEmitCompareAndSet(operands, :int8, ">")
+ when "cigt"
+ cloopEmitCompareAndSet(operands, :int32, ">")
+ when "cpgt"
+ cloopEmitCompareAndSet(operands, :int, ">")
+
+ when "cbgteq"
+ cloopEmitCompareAndSet(operands, :int8, ">=")
+ when "cigteq"
+ cloopEmitCompareAndSet(operands, :int32, ">=")
+ when "cpgteq"
+ cloopEmitCompareAndSet(operands, :int, ">=")
+
+ when "cblt"
+ cloopEmitCompareAndSet(operands, :int8, "<")
+ when "cilt"
+ cloopEmitCompareAndSet(operands, :int32, "<")
+ when "cplt"
+ cloopEmitCompareAndSet(operands, :int, "<")
+
+ when "cblteq"
+ cloopEmitCompareAndSet(operands, :int8, "<=")
+ when "cilteq"
+ cloopEmitCompareAndSet(operands, :int32, "<=")
+ when "cplteq"
+ cloopEmitCompareAndSet(operands, :int, "<=")
+
+ when "tbs"
+ cloopEmitTestSet(operands, :int8, "< 0")
+ when "tis"
+ cloopEmitTestSet(operands, :int32, "< 0")
+ when "tps"
+ cloopEmitTestSet(operands, :int, "< 0")
+
+ when "tbz"
+ cloopEmitTestSet(operands, :int8, "== 0")
+ when "tiz"
+ cloopEmitTestSet(operands, :int32, "== 0")
+ when "tpz"
+ cloopEmitTestSet(operands, :int, "== 0")
+
+ when "tbnz"
+ cloopEmitTestSet(operands, :int8, "!= 0")
+ when "tinz"
+ cloopEmitTestSet(operands, :int32, "!= 0")
+ when "tpnz"
+ cloopEmitTestSet(operands, :int, "!= 0")
+
+ # 64-bit instruction: cdqi (based on X64)
+ # Sign extends the lower 32 bits of t0, but put the sign extension into
+ # the lower 32 bits of t1. Leave the upper 32 bits of t0 and t1 unchanged.
+ when "cdqi"
+ $asm.putc "{"
+ $asm.putc " int64_t temp = t0.i32; // sign extend the low 32bit"
+ $asm.putc " t0.i32 = temp; // low word"
+ $asm.putc " t1.i32 = uint64_t(temp) >> 32; // high word"
+ $asm.putc "}"
+
+ # 64-bit instruction: idivi op1 (based on X64)
+ # Divide a 64-bit integer numerator by the specified denominator.
+ # The numerator is specified in t0 and t1 as follows:
+ # 1. low 32 bits of the numerator is in the low 32 bits of t0.
+ # 2. high 32 bits of the numerator is in the low 32 bits of t1.
+ #
+ # The resultant quotient is a signed 32-bit int, and is to be stored
+ # in the lower 32 bits of t0.
+ # The resultant remainder is a signed 32-bit int, and is to be stored
+ # in the lower 32 bits of t1.
+ when "idivi"
+ # Divide t1,t0 (EDX,EAX) by the specified arg, and store the remainder in t1,
+ # and quotient in t0:
+ $asm.putc "{"
+ $asm.putc " int64_t dividend = (int64_t(t1.u32) << 32) | t0.u32;"
+ $asm.putc " int64_t divisor = #{operands[0].clValue(:int)};"
+ $asm.putc " t1.i32 = dividend % divisor; // remainder"
+ $asm.putc " t0.i32 = dividend / divisor; // quotient"
+ $asm.putc "}"
+
+ # 32-bit instruction: fii2d int32LoOp int32HiOp dblOp (based on ARMv7)
+ # Decode 2 32-bit ints (low and high) into a 64-bit double.
+ when "fii2d"
+ $asm.putc "#{operands[2].clValue(:double)} = Ints2Double(#{operands[0].clValue(:uint32)}, #{operands[1].clValue(:uint32)});"
+
+ # 32-bit instruction: f2dii dblOp int32LoOp int32HiOp (based on ARMv7)
+ # Encode a 64-bit double into 2 32-bit ints (low and high).
+ when "fd2ii"
+ $asm.putc "Double2Ints(#{operands[0].clValue(:double)}, #{operands[1].clValue}, #{operands[2].clValue});"
+
+ # 64-bit instruction: fp2d int64Op dblOp (based on X64)
+ # Copy a bit-encoded double in a 64-bit int register to a double register.
+ when "fp2d"
+ $asm.putc "#{operands[1].clValue(:double)} = #{operands[0].clValue(:castToDouble)};"
+
+ # 64-bit instruction: fd2p dblOp int64Op (based on X64 instruction set)
+ # Copy a double as a bit-encoded double into a 64-bit int register.
+ when "fd2p"
+ $asm.putc "#{operands[1].clValue(:voidPtr)} = #{operands[0].clValue(:castToVoidPtr)};"
+
+ when "leai"
+ operands[0].cloopEmitLea(operands[1], :int32)
+ when "leap"
+ operands[0].cloopEmitLea(operands[1], :int)
+
+ when "baddio"
+ cloopEmitOpAndBranchIfOverflow(operands, "+", :int32)
+ when "bsubio"
+ cloopEmitOpAndBranchIfOverflow(operands, "-", :int32)
+ when "bmulio"
+ cloopEmitOpAndBranchIfOverflow(operands, "*", :int32)
+
+ when "baddis"
+ cloopEmitOpAndBranch(operands, "+", :int32, "< 0")
+ when "baddiz"
+ cloopEmitOpAndBranch(operands, "+", :int32, "== 0")
+ when "baddinz"
+ cloopEmitOpAndBranch(operands, "+", :int32, "!= 0")
+
+ when "baddps"
+ cloopEmitOpAndBranch(operands, "+", :int, "< 0")
+ when "baddpz"
+ cloopEmitOpAndBranch(operands, "+", :int, "== 0")
+ when "baddpnz"
+ cloopEmitOpAndBranch(operands, "+", :int, "!= 0")
+
+ when "bsubis"
+ cloopEmitOpAndBranch(operands, "-", :int32, "< 0")
+ when "bsubiz"
+ cloopEmitOpAndBranch(operands, "-", :int32, "== 0")
+ when "bsubinz"
+ cloopEmitOpAndBranch(operands, "-", :int32, "!= 0")
+
+ when "borris"
+ cloopEmitOpAndBranch(operands, "|", :int32, "< 0")
+ when "borriz"
+ cloopEmitOpAndBranch(operands, "|", :int32, "== 0")
+ when "borrinz"
+ cloopEmitOpAndBranch(operands, "|", :int32, "!= 0")
+
+ # A convenience and compact call to crash because we don't want to use
+ # the generic llint crash mechanism which relies on the availability
+ # of the call instruction (which cannot be implemented in a generic
+ # way, and can be abused if we made it just work for this special case).
+ # Using a special cloopCrash instruction is cleaner.
+ when "cloopCrash"
+ $asm.putc "CRASH();"
+
+ # We can't rely on the llint JS call mechanism which actually makes
+ # use of the call instruction. Instead, we just implement JS calls
+ # as an opcode dispatch.
+ when "cloopCallJSFunction"
+ $asm.putc "opcode = #{operands[0].clValue(:opcode)};"
+ $asm.putc "DISPATCH_OPCODE();"
+
+ # We can't do generic function calls with an arbitrary set of args, but
+ # fortunately we don't have to here. All native function calls always
+ # have a fixed prototype of 1 args: the passed ExecState.
+ when "cloopCallNative"
+ $asm.putc "nativeFunc = #{operands[0].clValue(:nativeFunc)};"
+ $asm.putc "functionReturnValue = JSValue::decode(nativeFunc(t0.execState));"
+ $asm.putc "#if USE(JSVALUE32_64)"
+ $asm.putc " t1.i = functionReturnValue.tag();"
+ $asm.putc " t0.i = functionReturnValue.payload();"
+ $asm.putc "#else // USE_JSVALUE64)"
+ $asm.putc " t0.encodedJSValue = JSValue::encode(functionReturnValue);"
+ $asm.putc "#endif // USE_JSVALUE64)"
+
+ # We can't do generic function calls with an arbitrary set of args, but
+ # fortunately we don't have to here. All slow path function calls always
+ # have a fixed prototype too. See cloopEmitCallSlowPath() for details.
+ when "cloopCallSlowPath"
+ cloopEmitCallSlowPath(operands)
+
+ else
+ lowerDefault
+ end
+ end
+end
diff --git a/Source/JavaScriptCore/offlineasm/config.rb b/Source/JavaScriptCore/offlineasm/config.rb
index e6287f367..4c86eeceb 100644
--- a/Source/JavaScriptCore/offlineasm/config.rb
+++ b/Source/JavaScriptCore/offlineasm/config.rb
@@ -21,7 +21,7 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
-$preferredCommentStartColumn = 70
+$preferredCommentStartColumn = 60
# Turns on dumping of the count of labels.
diff --git a/Source/JavaScriptCore/offlineasm/generate_offset_extractor.rb b/Source/JavaScriptCore/offlineasm/generate_offset_extractor.rb
index fefbb1290..81c28632c 100644
--- a/Source/JavaScriptCore/offlineasm/generate_offset_extractor.rb
+++ b/Source/JavaScriptCore/offlineasm/generate_offset_extractor.rb
@@ -42,7 +42,7 @@ $stderr.puts "offlineasm: Parsing #{inputFlnm} and creating offset extractor #{o
def emitMagicNumber
OFFSET_MAGIC_NUMBERS.each {
| number |
- $output.puts "#{number},"
+ $output.puts "unsigned(#{number}),"
}
end
@@ -120,7 +120,7 @@ File.open(outputFlnm, "w") {
| settings, ast, backend, index |
OFFSET_HEADER_MAGIC_NUMBERS.each {
| number |
- $output.puts "#{number},"
+ $output.puts "unsigned(#{number}),"
}
offsetsList = ast.filter(StructOffset).uniq.sort
diff --git a/Source/JavaScriptCore/offlineasm/instructions.rb b/Source/JavaScriptCore/offlineasm/instructions.rb
index aa1a32393..211c10933 100644
--- a/Source/JavaScriptCore/offlineasm/instructions.rb
+++ b/Source/JavaScriptCore/offlineasm/instructions.rb
@@ -227,7 +227,15 @@ ARMv7_INSTRUCTIONS =
"oris"
]
-INSTRUCTIONS = MACRO_INSTRUCTIONS + X86_INSTRUCTIONS + ARMv7_INSTRUCTIONS
+CXX_INSTRUCTIONS =
+ [
+ "cloopCrash", # no operands
+ "cloopCallJSFunction", # operands: callee
+ "cloopCallNative", # operands: callee
+ "cloopCallSlowPath", # operands: callTarget, currentFrame, currentPC
+ ]
+
+INSTRUCTIONS = MACRO_INSTRUCTIONS + X86_INSTRUCTIONS + ARMv7_INSTRUCTIONS + CXX_INSTRUCTIONS
INSTRUCTION_PATTERN = Regexp.new('\\A((' + INSTRUCTIONS.join(')|(') + '))\\Z')
diff --git a/Source/JavaScriptCore/offlineasm/offsets.rb b/Source/JavaScriptCore/offlineasm/offsets.rb
index 8a064a216..627183dc8 100644
--- a/Source/JavaScriptCore/offlineasm/offsets.rb
+++ b/Source/JavaScriptCore/offlineasm/offsets.rb
@@ -61,7 +61,7 @@ end
#
# offsetsAndConfigurationIndex(ast, file) ->
-# [[offsets, index], ...]
+# {[offsets, index], ...}
#
# Parses the offsets from a file and returns a list of offsets and the
# index of the configuration that is valid in this build target.
@@ -69,7 +69,7 @@ end
def offsetsAndConfigurationIndex(file)
endiannessMarkerBytes = nil
- result = []
+ result = {}
def readInt(endianness, bytes)
if endianness == :little
@@ -155,13 +155,14 @@ def offsetsAndConfigurationIndex(file)
| data |
offsets << readInt(endianness, data)
}
- result << [offsets, index]
+ if not result.has_key?(offsets)
+ result[offsets] = index
+ end
}
end
}
raise MissingMagicValuesException unless result.length >= 1
- raise if result.map{|v| v[1]}.uniq.size < result.map{|v| v[1]}.size
result
end