diff options
author | Oswald Buddenhagen <oswald.buddenhagen@qt.io> | 2017-05-30 12:48:17 +0200 |
---|---|---|
committer | Oswald Buddenhagen <oswald.buddenhagen@qt.io> | 2017-05-30 12:48:17 +0200 |
commit | 881da28418d380042aa95a97f0cbd42560a64f7c (patch) | |
tree | a794dff3274695e99c651902dde93d934ea7a5af /Source/JavaScriptCore/offlineasm | |
parent | 7e104c57a70fdf551bb3d22a5d637cdcbc69dbea (diff) | |
parent | 0fcedcd17cc00d3dd44c718b3cb36c1033319671 (diff) | |
download | qtwebkit-881da28418d380042aa95a97f0cbd42560a64f7c.tar.gz |
Merge 'wip/next' into dev
Change-Id: Iff9ee5e23bb326c4371ec8ed81d56f2f05d680e9
Diffstat (limited to 'Source/JavaScriptCore/offlineasm')
18 files changed, 2700 insertions, 512 deletions
diff --git a/Source/JavaScriptCore/offlineasm/arm.rb b/Source/JavaScriptCore/offlineasm/arm.rb index 498333ba2..c8064a591 100644 --- a/Source/JavaScriptCore/offlineasm/arm.rb +++ b/Source/JavaScriptCore/offlineasm/arm.rb @@ -1,4 +1,4 @@ -# Copyright (C) 2011, 2012 Apple Inc. All rights reserved. +# Copyright (C) 2011, 2012, 2015-2016 Apple Inc. All rights reserved. # Copyright (C) 2013 University of Szeged. All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -27,6 +27,34 @@ require "ast" require "opt" require "risc" +# GPR conventions, to match the baseline JIT +# +# x0 => t0, a0, r0 +# x1 => t1, a1, r1 +# x2 => t2, a2, r2 +# x3 => t3, a3, r3 +# x6 => (callee-save scratch) +# x7 => cfr (ARMv7 only) +# x8 => t4 (callee-save) +# x9 => t5 (callee-save) +# x10 => (callee-save scratch) +# x11 => cfr (ARM and ARMv7 traditional) +# x12 => (callee-save scratch) +# lr => lr +# sp => sp +# pc => pc +# +# FPR conventions, to match the baseline JIT +# +# d0 => ft0, fa0, fr +# d1 => ft1, fa1 +# d2 => ft2 +# d3 => ft3 +# d4 => ft4 +# d5 => ft5 +# d6 => (scratch) +# d7 => (scratch) + def isARMv7 case $activeBackend when "ARMv7" @@ -63,13 +91,15 @@ class SpecialRegister end end -ARM_EXTRA_GPRS = [SpecialRegister.new("r9"), SpecialRegister.new("r8"), SpecialRegister.new("r3")] +ARM_EXTRA_GPRS = [SpecialRegister.new("r6"), SpecialRegister.new("r10"), SpecialRegister.new("r12")] ARM_EXTRA_FPRS = [SpecialRegister.new("d7")] ARM_SCRATCH_FPR = SpecialRegister.new("d6") def armMoveImmediate(value, register) # Currently we only handle the simple cases, and fall back to mov/movt for the complex ones. - if value >= 0 && value < 256 + if value.is_a? String + $asm.puts "mov #{register.armOperand}, (#{value})" + elsif value >= 0 && value < 256 $asm.puts "mov #{register.armOperand}, \##{value}" elsif (~value) >= 0 && (~value) < 256 $asm.puts "mvn #{register.armOperand}, \##{~value}" @@ -97,13 +127,17 @@ class RegisterID when "t3" "r4" when "t4" - "r10" + "r8" + when "t5" + "r9" when "cfr" - "r5" + isARMv7 ? "r7" : "r11" when "lr" "lr" when "sp" "sp" + when "pc" + "pc" else raise "Bad register #{name} for ARM at #{codeOriginString}" end @@ -113,9 +147,9 @@ end class FPRegisterID def armOperand case name - when "ft0", "fr" + when "ft0", "fr", "fa0" "d0" - when "ft1" + when "ft1", "fa1" "d1" when "ft2" "d2" @@ -217,7 +251,7 @@ class Sequence end } result = riscLowerMalformedAddressesDouble(result) - result = riscLowerMisplacedImmediates(result, ["storeb", "storei", "storep"]) + result = riscLowerMisplacedImmediates(result, ["storeb", "storei", "storep", "storeq"]) result = riscLowerMalformedImmediates(result, 0..0xff) result = riscLowerMisplacedAddresses(result) result = riscLowerRegisterReuse(result) @@ -315,6 +349,7 @@ class Instruction def lowerARMCommon $asm.codeOrigin codeOriginString if $enableCodeOriginComments $asm.annotation annotation if $enableInstrAnnotations + $asm.debugAnnotation codeOrigin.debugDirective if $enableDebugAnnotations case opcode when "addi", "addp", "addis", "addps" @@ -333,7 +368,7 @@ class Instruction else $asm.puts "adds #{operands[2].armOperand}, #{operands[1].armOperand}, #{operands[0].armOperand}" end - elsif operands.size == 3 and operands[0].immediate? + elsif operands.size == 3 and operands[0].register? raise unless operands[1].register? raise unless operands[2].register? $asm.puts "adds #{armFlippedOperands(operands)}" @@ -451,15 +486,24 @@ class Instruction # FIXME: either support this or remove it. raise "ARM does not support this opcode yet, #{codeOrigin}" when "pop" - $asm.puts "pop #{operands[0].armOperand}" + operands.each { + | op | + $asm.puts "pop { #{op.armOperand} }" + } when "push" - $asm.puts "push #{operands[0].armOperand}" + operands.each { + | op | + $asm.puts "push { #{op.armOperand} }" + } when "move" if operands[0].immediate? armMoveImmediate(operands[0].value, operands[1]) else $asm.puts "mov #{armFlippedOperands(operands)}" end + when "mvlbl" + $asm.puts "movw #{operands[1].armOperand}, \#:lower16:#{operands[0].value}" + $asm.puts "movt #{operands[1].armOperand}, \#:upper16:#{operands[0].value}" when "nop" $asm.puts "nop" when "bieq", "bpeq", "bbeq" @@ -579,6 +623,10 @@ class Instruction when "smulli" raise "Wrong number of arguments to smull in #{self.inspect} at #{codeOriginString}" unless operands.length == 4 $asm.puts "smull #{operands[2].armOperand}, #{operands[3].armOperand}, #{operands[0].armOperand}, #{operands[1].armOperand}" + when "memfence" + $asm.puts "dmb sy" + when "clrbp" + $asm.puts "bic #{operands[2].armOperand}, #{operands[0].armOperand}, #{operands[1].armOperand}" else lowerDefault end diff --git a/Source/JavaScriptCore/offlineasm/arm64.rb b/Source/JavaScriptCore/offlineasm/arm64.rb new file mode 100644 index 000000000..ead489133 --- /dev/null +++ b/Source/JavaScriptCore/offlineasm/arm64.rb @@ -0,0 +1,912 @@ +# Copyright (C) 2011, 2012, 2014-2016 Apple Inc. All rights reserved. +# Copyright (C) 2014 University of Szeged. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS +# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +# THE POSSIBILITY OF SUCH DAMAGE. + +require "ast" +require "opt" +require "risc" + +# Naming conventions: +# +# x<number> => GPR. This is both the generic name of the register, and the name used +# to indicate that the register is used in 64-bit mode. +# w<number> => GPR in 32-bit mode. This is the low 32-bits of the GPR. If it is +# mutated then the high 32-bit part of the register is zero filled. +# q<number> => FPR. This is the generic name of the register. +# d<number> => FPR used as an IEEE 64-bit binary floating point number (i.e. double). +# +# GPR conventions, to match the baseline JIT: +# +# x0 => t0, a0, r0 +# x1 => t1, a1, r1 +# x2 => t2, a2 +# x3 => t3, a3 +# x4 => t4 +# x5 => t5 +# x13 => (scratch) +# x16 => (scratch) +# x17 => (scratch) +# x26 => csr0 (PB) +# x27 => csr1 (tagTypeNumber) +# x28 => csr2 (tagMask) +# x29 => cfr +# sp => sp +# lr => lr +# +# FPR conventions, to match the baseline JIT: +# +# q0 => ft0, fa0, fr +# q1 => ft1, fa1 +# q2 => ft2, fa2 +# q3 => ft3, fa3 +# q4 => ft4 (unused in baseline) +# q5 => ft5 (unused in baseline) +# q8 => csfr0 (Only the lower 64 bits) +# q9 => csfr1 (Only the lower 64 bits) +# q10 => csfr2 (Only the lower 64 bits) +# q11 => csfr3 (Only the lower 64 bits) +# q12 => csfr4 (Only the lower 64 bits) +# q13 => csfr5 (Only the lower 64 bits) +# q14 => csfr6 (Only the lower 64 bits) +# q15 => csfr7 (Only the lower 64 bits) +# q31 => scratch + +def arm64GPRName(name, kind) + raise "bad GPR name #{name}" unless name =~ /^x/ + number = name[1..-1] + case kind + when :int + "w" + number + when :ptr + "x" + number + else + raise "Wrong kind: #{kind}" + end +end + +def arm64FPRName(name, kind) + raise "bad FPR kind #{kind}" unless kind == :double + raise "bad FPR name #{name}" unless name =~ /^q/ + "d" + name[1..-1] +end + +class SpecialRegister + def arm64Operand(kind) + case @name + when /^x/ + arm64GPRName(@name, kind) + when /^q/ + arm64FPRName(@name, kind) + else + raise "Bad name: #{@name}" + end + end +end + +ARM64_EXTRA_GPRS = [SpecialRegister.new("x16"), SpecialRegister.new("x17"), SpecialRegister.new("x13")] +ARM64_EXTRA_FPRS = [SpecialRegister.new("q31")] + +class RegisterID + def arm64Operand(kind) + case @name + when 't0', 'a0', 'r0' + arm64GPRName('x0', kind) + when 't1', 'a1', 'r1' + arm64GPRName('x1', kind) + when 't2', 'a2' + arm64GPRName('x2', kind) + when 't3', 'a3' + arm64GPRName('x3', kind) + when 't4' + arm64GPRName('x4', kind) + when 't5' + arm64GPRName('x5', kind) + when 'cfr' + arm64GPRName('x29', kind) + when 'csr0' + arm64GPRName('x19', kind) + when 'csr1' + arm64GPRName('x20', kind) + when 'csr2' + arm64GPRName('x21', kind) + when 'csr3' + arm64GPRName('x22', kind) + when 'csr4' + arm64GPRName('x23', kind) + when 'csr5' + arm64GPRName('x24', kind) + when 'csr6' + arm64GPRName('x25', kind) + when 'csr7' + arm64GPRName('x26', kind) + when 'csr8' + arm64GPRName('x27', kind) + when 'csr9' + arm64GPRName('x28', kind) + when 'sp' + 'sp' + when 'lr' + 'x30' + else + raise "Bad register name #{@name} at #{codeOriginString}" + end + end +end + +class FPRegisterID + def arm64Operand(kind) + case @name + when 'ft0', 'fr', 'fa0' + arm64FPRName('q0', kind) + when 'ft1', 'fa1' + arm64FPRName('q1', kind) + when 'ft2', 'fa2' + arm64FPRName('q2', kind) + when 'ft3', 'fa3' + arm64FPRName('q3', kind) + when 'ft4' + arm64FPRName('q4', kind) + when 'ft5' + arm64FPRName('q5', kind) + when 'csfr0' + arm64FPRName('q8', kind) + when 'csfr1' + arm64FPRName('q9', kind) + when 'csfr2' + arm64FPRName('q10', kind) + when 'csfr3' + arm64FPRName('q11', kind) + when 'csfr4' + arm64FPRName('q12', kind) + when 'csfr5' + arm64FPRName('q13', kind) + when 'csfr6' + arm64FPRName('q14', kind) + when 'csfr7' + arm64FPRName('q15', kind) + else "Bad register name #{@name} at #{codeOriginString}" + end + end +end + +class Immediate + def arm64Operand(kind) + raise "Invalid immediate #{value} at #{codeOriginString}" if value < 0 or value > 4095 + "\##{value}" + end +end + +class Address + def arm64Operand(kind) + raise "Invalid offset #{offset.value} at #{codeOriginString}" if offset.value < -255 or offset.value > 4095 + "[#{base.arm64Operand(:ptr)}, \##{offset.value}]" + end + + def arm64EmitLea(destination, kind) + $asm.puts "add #{destination.arm64Operand(kind)}, #{base.arm64Operand(kind)}, \##{offset.value}" + end +end + +class BaseIndex + def arm64Operand(kind) + raise "Invalid offset #{offset.value} at #{codeOriginString}" if offset.value != 0 + "[#{base.arm64Operand(:ptr)}, #{index.arm64Operand(:ptr)}, lsl \##{scaleShift}]" + end + + def arm64EmitLea(destination, kind) + $asm.puts "add #{destination.arm64Operand(kind)}, #{base.arm64Operand(kind)}, #{index.arm64Operand(kind)}, lsl \##{scaleShift}" + end +end + +class AbsoluteAddress + def arm64Operand(kind) + raise "Unconverted absolute address #{address.value} at #{codeOriginString}" + end +end + +# FIXME: We could support AbsoluteAddress for lea, but we don't. + +# +# Actual lowering code follows. +# + +def arm64LowerMalformedLoadStoreAddresses(list) + newList = [] + + def isAddressMalformed(operand) + operand.is_a? Address and not (-255..4095).include? operand.offset.value + end + + list.each { + | node | + if node.is_a? Instruction + if node.opcode =~ /^store/ and isAddressMalformed(node.operands[1]) + address = node.operands[1] + tmp = Tmp.new(codeOrigin, :gpr) + newList << Instruction.new(node.codeOrigin, "move", [address.offset, tmp]) + newList << Instruction.new(node.codeOrigin, node.opcode, [node.operands[0], BaseIndex.new(node.codeOrigin, address.base, tmp, 1, Immediate.new(codeOrigin, 0))], node.annotation) + elsif node.opcode =~ /^load/ and isAddressMalformed(node.operands[0]) + address = node.operands[0] + tmp = Tmp.new(codeOrigin, :gpr) + newList << Instruction.new(node.codeOrigin, "move", [address.offset, tmp]) + newList << Instruction.new(node.codeOrigin, node.opcode, [BaseIndex.new(node.codeOrigin, address.base, tmp, 1, Immediate.new(codeOrigin, 0)), node.operands[1]], node.annotation) + else + newList << node + end + else + newList << node + end + } + newList +end + +# Workaround for Cortex-A53 erratum (835769) +def arm64CortexA53Fix835769(list) + newList = [] + lastOpcodeUnsafe = false + + list.each { + | node | + if node.is_a? Instruction + case node.opcode + when /^store/, /^load/ + # List all macro instructions that can be lowered to a load, store or prefetch ARM64 assembly instruction + lastOpcodeUnsafe = true + when "muli", "mulp", "mulq", "smulli" + # List all macro instructions that can be lowered to a 64-bit multiply-accumulate ARM64 assembly instruction + # (defined as one of MADD, MSUB, SMADDL, SMSUBL, UMADDL or UMSUBL). + if lastOpcodeUnsafe + newList << Instruction.new(node.codeOrigin, "nopCortexA53Fix835769", []) + end + lastOpcodeUnsafe = false + else + lastOpcodeUnsafe = false + end + end + newList << node + } + newList +end + +class Sequence + def getModifiedListARM64 + result = @list + result = riscLowerNot(result) + result = riscLowerSimpleBranchOps(result) + result = riscLowerHardBranchOps64(result) + result = riscLowerShiftOps(result) + result = arm64LowerMalformedLoadStoreAddresses(result) + result = riscLowerMalformedAddresses(result) { + | node, address | + case node.opcode + when "loadb", "loadbs", "storeb", /^bb/, /^btb/, /^cb/, /^tb/ + size = 1 + when "loadh", "loadhs" + size = 2 + when "loadi", "loadis", "storei", "addi", "andi", "lshifti", "muli", "negi", + "noti", "ori", "rshifti", "urshifti", "subi", "xori", /^bi/, /^bti/, + /^ci/, /^ti/, "addis", "subis", "mulis", "smulli", "leai" + size = 4 + when "loadp", "storep", "loadq", "storeq", "loadd", "stored", "lshiftp", "lshiftq", "negp", "negq", "rshiftp", "rshiftq", + "urshiftp", "urshiftq", "addp", "addq", "mulp", "mulq", "andp", "andq", "orp", "orq", "subp", "subq", "xorp", "xorq", "addd", + "divd", "subd", "muld", "sqrtd", /^bp/, /^bq/, /^btp/, /^btq/, /^cp/, /^cq/, /^tp/, /^tq/, /^bd/, + "jmp", "call", "leap", "leaq" + size = 8 + else + raise "Bad instruction #{node.opcode} for heap access at #{node.codeOriginString}" + end + + if address.is_a? BaseIndex + address.offset.value == 0 and + (node.opcode =~ /^lea/ or address.scale == 1 or address.scale == size) + elsif address.is_a? Address + (-255..4095).include? address.offset.value + else + false + end + } + result = riscLowerMisplacedImmediates(result, ["storeb", "storei", "storep", "storeq"]) + result = riscLowerMalformedImmediates(result, 0..4095) + result = riscLowerMisplacedAddresses(result) + result = riscLowerMalformedAddresses(result) { + | node, address | + case node.opcode + when /^load/ + true + when /^store/ + not (address.is_a? Address and address.offset.value < 0) + when /^lea/ + true + else + raise "Bad instruction #{node.opcode} for heap access at #{node.codeOriginString}" + end + } + result = riscLowerTest(result) + result = assignRegistersToTemporaries(result, :gpr, ARM64_EXTRA_GPRS) + result = assignRegistersToTemporaries(result, :fpr, ARM64_EXTRA_FPRS) + result = arm64CortexA53Fix835769(result) + return result + end +end + +def arm64Operands(operands, kinds) + if kinds.is_a? Array + raise "Mismatched operand lists: #{operands.inspect} and #{kinds.inspect}" if operands.size != kinds.size + else + kinds = operands.map{ kinds } + end + (0...operands.size).map { + | index | + operands[index].arm64Operand(kinds[index]) + }.join(', ') +end + +def arm64FlippedOperands(operands, kinds) + if kinds.is_a? Array + kinds = [kinds[-1]] + kinds[0..-2] + end + arm64Operands([operands[-1]] + operands[0..-2], kinds) +end + +# TAC = three address code. +def arm64TACOperands(operands, kind) + if operands.size == 3 + return arm64FlippedOperands(operands, kind) + end + + raise unless operands.size == 2 + + return operands[1].arm64Operand(kind) + ", " + arm64FlippedOperands(operands, kind) +end + +def emitARM64Add(opcode, operands, kind) + if operands.size == 3 + raise unless operands[1].register? + raise unless operands[2].register? + + if operands[0].immediate? + if operands[0].value == 0 and flag !~ /s$/ + unless operands[1] == operands[2] + $asm.puts "mov #{arm64FlippedOperands(operands[1..2], kind)}" + end + else + $asm.puts "#{opcode} #{arm64Operands(operands.reverse, kind)}" + end + return + end + + raise unless operands[0].register? + $asm.puts "#{opcode} #{arm64FlippedOperands(operands, kind)}" + return + end + + raise unless operands.size == 2 + + if operands[0].immediate? and operands[0].value == 0 and opcode !~ /s$/ + return + end + + $asm.puts "#{opcode} #{arm64TACOperands(operands, kind)}" +end + +def emitARM64Unflipped(opcode, operands, kind) + $asm.puts "#{opcode} #{arm64Operands(operands, kind)}" +end + +def emitARM64TAC(opcode, operands, kind) + $asm.puts "#{opcode} #{arm64TACOperands(operands, kind)}" +end + +def emitARM64(opcode, operands, kind) + $asm.puts "#{opcode} #{arm64FlippedOperands(operands, kind)}" +end + +def emitARM64Access(opcode, opcodeNegativeOffset, register, memory, kind) + if memory.is_a? Address and memory.offset.value < 0 + $asm.puts "#{opcodeNegativeOffset} #{register.arm64Operand(kind)}, #{memory.arm64Operand(kind)}" + return + end + + $asm.puts "#{opcode} #{register.arm64Operand(kind)}, #{memory.arm64Operand(kind)}" +end + +def emitARM64Shift(opcodeRegs, opcodeImmediate, operands, kind) + if operands.size == 3 and operands[1].immediate? + magicNumbers = yield operands[1].value + $asm.puts "#{opcodeImmediate} #{operands[2].arm64Operand(kind)}, #{operands[0].arm64Operand(kind)}, \##{magicNumbers[0]}, \##{magicNumbers[1]}" + return + end + + if operands.size == 2 and operands[0].immediate? + magicNumbers = yield operands[0].value + $asm.puts "#{opcodeImmediate} #{operands[1].arm64Operand(kind)}, #{operands[1].arm64Operand(kind)}, \##{magicNumbers[0]}, \##{magicNumbers[1]}" + return + end + + emitARM64TAC(opcodeRegs, operands, kind) +end + +def emitARM64Branch(opcode, operands, kind, branchOpcode) + emitARM64Unflipped(opcode, operands[0..-2], kind) + $asm.puts "#{branchOpcode} #{operands[-1].asmLabel}" +end + +def emitARM64Compare(operands, kind, compareCode) + emitARM64Unflipped("subs #{arm64GPRName('xzr', kind)}, ", operands[0..-2], kind) + $asm.puts "csinc #{operands[-1].arm64Operand(:int)}, wzr, wzr, #{compareCode}" +end + +def emitARM64MoveImmediate(value, target) + first = true + isNegative = value < 0 + [48, 32, 16, 0].each { + | shift | + currentValue = (value >> shift) & 0xffff + next if currentValue == (isNegative ? 0xffff : 0) and (shift != 0 or !first) + if first + if isNegative + $asm.puts "movn #{target.arm64Operand(:ptr)}, \##{(~currentValue) & 0xffff}, lsl \##{shift}" + else + $asm.puts "movz #{target.arm64Operand(:ptr)}, \##{currentValue}, lsl \##{shift}" + end + first = false + else + $asm.puts "movk #{target.arm64Operand(:ptr)}, \##{currentValue}, lsl \##{shift}" + end + } +end + +class Instruction + def lowerARM64 + $asm.comment codeOriginString + $asm.annotation annotation if $enableInstrAnnotations + $asm.debugAnnotation codeOrigin.debugDirective if $enableDebugAnnotations + + case opcode + when 'addi' + emitARM64Add("add", operands, :int) + when 'addis' + emitARM64Add("adds", operands, :int) + when 'addp' + emitARM64Add("add", operands, :ptr) + when 'addps' + emitARM64Add("adds", operands, :ptr) + when 'addq' + emitARM64Add("add", operands, :ptr) + when "andi" + emitARM64TAC("and", operands, :int) + when "andp" + emitARM64TAC("and", operands, :ptr) + when "andq" + emitARM64TAC("and", operands, :ptr) + when "ori" + emitARM64TAC("orr", operands, :int) + when "orp" + emitARM64TAC("orr", operands, :ptr) + when "orq" + emitARM64TAC("orr", operands, :ptr) + when "xori" + emitARM64TAC("eor", operands, :int) + when "xorp" + emitARM64TAC("eor", operands, :ptr) + when "xorq" + emitARM64TAC("eor", operands, :ptr) + when "lshifti" + emitARM64Shift("lslv", "ubfm", operands, :int) { + | value | + [32 - value, 31 - value] + } + when "lshiftp" + emitARM64Shift("lslv", "ubfm", operands, :ptr) { + | value | + [64 - value, 63 - value] + } + when "lshiftq" + emitARM64Shift("lslv", "ubfm", operands, :ptr) { + | value | + [64 - value, 63 - value] + } + when "rshifti" + emitARM64Shift("asrv", "sbfm", operands, :int) { + | value | + [value, 31] + } + when "rshiftp" + emitARM64Shift("asrv", "sbfm", operands, :ptr) { + | value | + [value, 63] + } + when "rshiftq" + emitARM64Shift("asrv", "sbfm", operands, :ptr) { + | value | + [value, 63] + } + when "urshifti" + emitARM64Shift("lsrv", "ubfm", operands, :int) { + | value | + [value, 31] + } + when "urshiftp" + emitARM64Shift("lsrv", "ubfm", operands, :ptr) { + | value | + [value, 63] + } + when "urshiftq" + emitARM64Shift("lsrv", "ubfm", operands, :ptr) { + | value | + [value, 63] + } + when "muli" + $asm.puts "madd #{arm64TACOperands(operands, :int)}, wzr" + when "mulp" + $asm.puts "madd #{arm64TACOperands(operands, :ptr)}, xzr" + when "mulq" + $asm.puts "madd #{arm64TACOperands(operands, :ptr)}, xzr" + when "subi" + emitARM64TAC("sub", operands, :int) + when "subp" + emitARM64TAC("sub", operands, :ptr) + when "subq" + emitARM64TAC("sub", operands, :ptr) + when "subis" + emitARM64TAC("subs", operands, :int) + when "negi" + $asm.puts "sub #{operands[0].arm64Operand(:int)}, wzr, #{operands[0].arm64Operand(:int)}" + when "negp" + $asm.puts "sub #{operands[0].arm64Operand(:ptr)}, xzr, #{operands[0].arm64Operand(:ptr)}" + when "negq" + $asm.puts "sub #{operands[0].arm64Operand(:ptr)}, xzr, #{operands[0].arm64Operand(:ptr)}" + when "loadi" + emitARM64Access("ldr", "ldur", operands[1], operands[0], :int) + when "loadis" + emitARM64Access("ldrsw", "ldursw", operands[1], operands[0], :ptr) + when "loadp" + emitARM64Access("ldr", "ldur", operands[1], operands[0], :ptr) + when "loadq" + emitARM64Access("ldr", "ldur", operands[1], operands[0], :ptr) + when "storei" + emitARM64Unflipped("str", operands, :int) + when "storep" + emitARM64Unflipped("str", operands, :ptr) + when "storeq" + emitARM64Unflipped("str", operands, :ptr) + when "loadb" + emitARM64Access("ldrb", "ldurb", operands[1], operands[0], :int) + when "loadbs" + emitARM64Access("ldrsb", "ldursb", operands[1], operands[0], :int) + when "storeb" + emitARM64Unflipped("strb", operands, :int) + when "loadh" + emitARM64Access("ldrh", "ldurh", operands[1], operands[0], :int) + when "loadhs" + emitARM64Access("ldrsh", "ldursh", operands[1], operands[0], :int) + when "storeh" + emitARM64Unflipped("strh", operands, :int) + when "loadd" + emitARM64Access("ldr", "ldur", operands[1], operands[0], :double) + when "stored" + emitARM64Unflipped("str", operands, :double) + when "addd" + emitARM64TAC("fadd", operands, :double) + when "divd" + emitARM64TAC("fdiv", operands, :double) + when "subd" + emitARM64TAC("fsub", operands, :double) + when "muld" + emitARM64TAC("fmul", operands, :double) + when "sqrtd" + emitARM64("fsqrt", operands, :double) + when "ci2d" + emitARM64("scvtf", operands, [:int, :double]) + when "bdeq" + emitARM64Branch("fcmp", operands, :double, "b.eq") + when "bdneq" + emitARM64Unflipped("fcmp", operands[0..1], :double) + isUnordered = LocalLabel.unique("bdneq") + $asm.puts "b.vs #{LocalLabelReference.new(codeOrigin, isUnordered).asmLabel}" + $asm.puts "b.ne #{operands[2].asmLabel}" + isUnordered.lower("ARM64") + when "bdgt" + emitARM64Branch("fcmp", operands, :double, "b.gt") + when "bdgteq" + emitARM64Branch("fcmp", operands, :double, "b.ge") + when "bdlt" + emitARM64Branch("fcmp", operands, :double, "b.mi") + when "bdlteq" + emitARM64Branch("fcmp", operands, :double, "b.ls") + when "bdequn" + emitARM64Unflipped("fcmp", operands[0..1], :double) + $asm.puts "b.vs #{operands[2].asmLabel}" + $asm.puts "b.eq #{operands[2].asmLabel}" + when "bdnequn" + emitARM64Branch("fcmp", operands, :double, "b.ne") + when "bdgtun" + emitARM64Branch("fcmp", operands, :double, "b.hi") + when "bdgtequn" + emitARM64Branch("fcmp", operands, :double, "b.pl") + when "bdltun" + emitARM64Branch("fcmp", operands, :double, "b.lt") + when "bdltequn" + emitARM64Branch("fcmp", operands, :double, "b.le") + when "btd2i" + # FIXME: May be a good idea to just get rid of this instruction, since the interpreter + # currently does not use it. + raise "ARM64 does not support this opcode yet, #{codeOriginString}" + when "td2i" + emitARM64("fcvtzs", operands, [:double, :int]) + when "bcd2i" + # FIXME: Remove this instruction, or use it and implement it. Currently it's not + # used. + raise "ARM64 does not support this opcode yet, #{codeOriginString}" + when "movdz" + # FIXME: Remove it or support it. + raise "ARM64 does not support this opcode yet, #{codeOriginString}" + when "pop" + operands.each_slice(2) { + | ops | + # Note that the operands are in the reverse order of the case for push. + # This is due to the fact that order matters for pushing and popping, and + # on platforms that only push/pop one slot at a time they pop their + # arguments in the reverse order that they were pushed. In order to remain + # compatible with those platforms we assume here that that's what has been done. + + # So for example, if we did push(A, B, C, D), we would then pop(D, C, B, A). + # But since the ordering of arguments doesn't change on arm64 between the stp and ldp + # instructions we need to flip flop the argument positions that were passed to us. + $asm.puts "ldp #{ops[1].arm64Operand(:ptr)}, #{ops[0].arm64Operand(:ptr)}, [sp], #16" + } + when "push" + operands.each_slice(2) { + | ops | + $asm.puts "stp #{ops[0].arm64Operand(:ptr)}, #{ops[1].arm64Operand(:ptr)}, [sp, #-16]!" + } + when "move" + if operands[0].immediate? + emitARM64MoveImmediate(operands[0].value, operands[1]) + else + emitARM64("mov", operands, :ptr) + end + when "sxi2p" + emitARM64("sxtw", operands, [:int, :ptr]) + when "sxi2q" + emitARM64("sxtw", operands, [:int, :ptr]) + when "zxi2p" + emitARM64("uxtw", operands, [:int, :ptr]) + when "zxi2q" + emitARM64("uxtw", operands, [:int, :ptr]) + when "nop" + $asm.puts "nop" + when "bieq", "bbeq" + if operands[0].immediate? and operands[0].value == 0 + $asm.puts "cbz #{operands[1].arm64Operand(:int)}, #{operands[2].asmLabel}" + elsif operands[1].immediate? and operands[1].value == 0 + $asm.puts "cbz #{operands[0].arm64Operand(:int)}, #{operands[2].asmLabel}" + else + emitARM64Branch("subs wzr, ", operands, :int, "b.eq") + end + when "bpeq" + if operands[0].immediate? and operands[0].value == 0 + $asm.puts "cbz #{operands[1].arm64Operand(:ptr)}, #{operands[2].asmLabel}" + elsif operands[1].immediate? and operands[1].value == 0 + $asm.puts "cbz #{operands[0].arm64Operand(:ptr)}, #{operands[2].asmLabel}" + else + emitARM64Branch("subs xzr, ", operands, :ptr, "b.eq") + end + when "bqeq" + if operands[0].immediate? and operands[0].value == 0 + $asm.puts "cbz #{operands[1].arm64Operand(:ptr)}, #{operands[2].asmLabel}" + elsif operands[1].immediate? and operands[1].value == 0 + $asm.puts "cbz #{operands[0].arm64Operand(:ptr)}, #{operands[2].asmLabel}" + else + emitARM64Branch("subs xzr, ", operands, :ptr, "b.eq") + end + when "bineq", "bbneq" + if operands[0].immediate? and operands[0].value == 0 + $asm.puts "cbnz #{operands[1].arm64Operand(:int)}, #{operands[2].asmLabel}" + elsif operands[1].immediate? and operands[1].value == 0 + $asm.puts "cbnz #{operands[0].arm64Operand(:int)}, #{operands[2].asmLabel}" + else + emitARM64Branch("subs wzr, ", operands, :int, "b.ne") + end + when "bpneq" + if operands[0].immediate? and operands[0].value == 0 + $asm.puts "cbnz #{operands[1].arm64Operand(:ptr)}, #{operands[2].asmLabel}" + elsif operands[1].immediate? and operands[1].value == 0 + $asm.puts "cbnz #{operands[0].arm64Operand(:ptr)}, #{operands[2].asmLabel}" + else + emitARM64Branch("subs xzr, ", operands, :ptr, "b.ne") + end + when "bqneq" + if operands[0].immediate? and operands[0].value == 0 + $asm.puts "cbnz #{operands[1].arm64Operand(:ptr)}, #{operands[2].asmLabel}" + elsif operands[1].immediate? and operands[1].value == 0 + $asm.puts "cbnz #{operands[0].arm64Operand(:ptr)}, #{operands[2].asmLabel}" + else + emitARM64Branch("subs xzr, ", operands, :ptr, "b.ne") + end + when "bia", "bba" + emitARM64Branch("subs wzr, ", operands, :int, "b.hi") + when "bpa" + emitARM64Branch("subs xzr, ", operands, :ptr, "b.hi") + when "bqa" + emitARM64Branch("subs xzr, ", operands, :ptr, "b.hi") + when "biaeq", "bbaeq" + emitARM64Branch("subs wzr, ", operands, :int, "b.hs") + when "bpaeq" + emitARM64Branch("subs xzr, ", operands, :ptr, "b.hs") + when "bqaeq" + emitARM64Branch("subs xzr, ", operands, :ptr, "b.hs") + when "bib", "bbb" + emitARM64Branch("subs wzr, ", operands, :int, "b.lo") + when "bpb" + emitARM64Branch("subs xzr, ", operands, :ptr, "b.lo") + when "bqb" + emitARM64Branch("subs xzr, ", operands, :ptr, "b.lo") + when "bibeq", "bbbeq" + emitARM64Branch("subs wzr, ", operands, :int, "b.ls") + when "bpbeq" + emitARM64Branch("subs xzr, ", operands, :ptr, "b.ls") + when "bqbeq" + emitARM64Branch("subs xzr, ", operands, :ptr, "b.ls") + when "bigt", "bbgt" + emitARM64Branch("subs wzr, ", operands, :int, "b.gt") + when "bpgt" + emitARM64Branch("subs xzr, ", operands, :ptr, "b.gt") + when "bqgt" + emitARM64Branch("subs xzr, ", operands, :ptr, "b.gt") + when "bigteq", "bbgteq" + emitARM64Branch("subs wzr, ", operands, :int, "b.ge") + when "bpgteq" + emitARM64Branch("subs xzr, ", operands, :ptr, "b.ge") + when "bqgteq" + emitARM64Branch("subs xzr, ", operands, :ptr, "b.ge") + when "bilt", "bblt" + emitARM64Branch("subs wzr, ", operands, :int, "b.lt") + when "bplt" + emitARM64Branch("subs xzr, ", operands, :ptr, "b.lt") + when "bqlt" + emitARM64Branch("subs xzr, ", operands, :ptr, "b.lt") + when "bilteq", "bblteq" + emitARM64Branch("subs wzr, ", operands, :int, "b.le") + when "bplteq" + emitARM64Branch("subs xzr, ", operands, :ptr, "b.le") + when "bqlteq" + emitARM64Branch("subs xzr, ", operands, :ptr, "b.le") + when "jmp" + if operands[0].label? + $asm.puts "b #{operands[0].asmLabel}" + else + emitARM64Unflipped("br", operands, :ptr) + end + when "call" + if operands[0].label? + $asm.puts "bl #{operands[0].asmLabel}" + else + emitARM64Unflipped("blr", operands, :ptr) + end + when "break" + $asm.puts "brk \#0" + when "ret" + $asm.puts "ret" + when "cieq", "cbeq" + emitARM64Compare(operands, :int, "ne") + when "cpeq" + emitARM64Compare(operands, :ptr, "ne") + when "cqeq" + emitARM64Compare(operands, :ptr, "ne") + when "cineq", "cbneq" + emitARM64Compare(operands, :int, "eq") + when "cpneq" + emitARM64Compare(operands, :ptr, "eq") + when "cqneq" + emitARM64Compare(operands, :ptr, "eq") + when "cia", "cba" + emitARM64Compare(operands, :int, "ls") + when "cpa" + emitARM64Compare(operands, :ptr, "ls") + when "cqa" + emitARM64Compare(operands, :ptr, "ls") + when "ciaeq", "cbaeq" + emitARM64Compare(operands, :int, "lo") + when "cpaeq" + emitARM64Compare(operands, :ptr, "lo") + when "cqaeq" + emitARM64Compare(operands, :ptr, "lo") + when "cib", "cbb" + emitARM64Compare(operands, :int, "hs") + when "cpb" + emitARM64Compare(operands, :ptr, "hs") + when "cqb" + emitARM64Compare(operands, :ptr, "hs") + when "cibeq", "cbbeq" + emitARM64Compare(operands, :int, "hi") + when "cpbeq" + emitARM64Compare(operands, :ptr, "hi") + when "cqbeq" + emitARM64Compare(operands, :ptr, "hi") + when "cilt", "cblt" + emitARM64Compare(operands, :int, "ge") + when "cplt" + emitARM64Compare(operands, :ptr, "ge") + when "cqlt" + emitARM64Compare(operands, :ptr, "ge") + when "cilteq", "cblteq" + emitARM64Compare(operands, :int, "gt") + when "cplteq" + emitARM64Compare(operands, :ptr, "gt") + when "cqlteq" + emitARM64Compare(operands, :ptr, "gt") + when "cigt", "cbgt" + emitARM64Compare(operands, :int, "le") + when "cpgt" + emitARM64Compare(operands, :ptr, "le") + when "cqgt" + emitARM64Compare(operands, :ptr, "le") + when "cigteq", "cbgteq" + emitARM64Compare(operands, :int, "lt") + when "cpgteq" + emitARM64Compare(operands, :ptr, "lt") + when "cqgteq" + emitARM64Compare(operands, :ptr, "lt") + when "peek" + $asm.puts "ldr #{operands[1].arm64Operand(:ptr)}, [sp, \##{operands[0].value * 8}]" + when "poke" + $asm.puts "str #{operands[1].arm64Operand(:ptr)}, [sp, \##{operands[0].value * 8}]" + when "fp2d" + emitARM64("fmov", operands, [:ptr, :double]) + when "fq2d" + emitARM64("fmov", operands, [:ptr, :double]) + when "fd2p" + emitARM64("fmov", operands, [:double, :ptr]) + when "fd2q" + emitARM64("fmov", operands, [:double, :ptr]) + when "bo" + $asm.puts "b.vs #{operands[0].asmLabel}" + when "bs" + $asm.puts "b.mi #{operands[0].asmLabel}" + when "bz" + $asm.puts "b.eq #{operands[0].asmLabel}" + when "bnz" + $asm.puts "b.ne #{operands[0].asmLabel}" + when "leai" + operands[0].arm64EmitLea(operands[1], :int) + when "leap" + operands[0].arm64EmitLea(operands[1], :ptr) + when "leaq" + operands[0].arm64EmitLea(operands[1], :ptr) + when "smulli" + $asm.puts "smaddl #{operands[2].arm64Operand(:ptr)}, #{operands[0].arm64Operand(:int)}, #{operands[1].arm64Operand(:int)}, xzr" + when "memfence" + $asm.puts "dmb sy" + when "pcrtoaddr" + $asm.puts "adr #{operands[1].arm64Operand(:ptr)}, #{operands[0].value}" + when "nopCortexA53Fix835769" + $asm.putStr("#if CPU(ARM64_CORTEXA53)") + $asm.puts "nop" + $asm.putStr("#endif") + else + lowerDefault + end + end +end + diff --git a/Source/JavaScriptCore/offlineasm/asm.rb b/Source/JavaScriptCore/offlineasm/asm.rb index ed0f15340..36482660e 100644 --- a/Source/JavaScriptCore/offlineasm/asm.rb +++ b/Source/JavaScriptCore/offlineasm/asm.rb @@ -1,6 +1,6 @@ #!/usr/bin/env ruby -# Copyright (C) 2011 Apple Inc. All rights reserved. +# Copyright (C) 2011, 2016 Apple Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions @@ -28,8 +28,8 @@ $: << File.dirname(__FILE__) require "config" require "backends" require "digest/sha1" -require "fileutils" require "offsets" +require 'optparse' require "parser" require "self_hash" require "settings" @@ -48,16 +48,32 @@ class Assembler @numGlobalLabels = 0 @newlineSpacerState = :none + @lastlabel = "" end - + def enterAsm - @outp.puts "OFFLINE_ASM_BEGIN" + @outp.puts "OFFLINE_ASM_BEGIN" if !$emitWinAsm + + if !$emitWinAsm + @outp.puts "OFFLINE_ASM_GLOBAL_LABEL(llintPCRangeStart)" + else + putsProc("llintPCRangeStart", "") + putsProcEndIfNeeded + end @state = :asm + SourceFile.outputDotFileList(@outp) if $enableDebugAnnotations end def leaveAsm + putsProcEndIfNeeded if $emitWinAsm + if !$emitWinAsm + @outp.puts "OFFLINE_ASM_GLOBAL_LABEL(llintPCRangeEnd)" + else + putsProc("llintPCRangeEnd", "") + putsProcEndIfNeeded + end putsLastComment - @outp.puts "OFFLINE_ASM_END" + @outp.puts "OFFLINE_ASM_END" if !$emitWinAsm @state = :cpp end @@ -85,7 +101,7 @@ class Assembler result += "#{@codeOrigin}" end if result != "" - result = "// " + result + result = $commentPrefix + " " + result end # Reset all the components that we've just sent to be dumped. @@ -138,7 +154,11 @@ class Assembler def puts(*line) raise unless @state == :asm - @outp.puts(formatDump(" \"\\t" + line.join('') + "\\n\"", lastComment)) + if !$emitWinAsm + @outp.puts(formatDump(" \"\\t" + line.join('') + "\\n\"", lastComment)) + else + @outp.puts(formatDump(" " + line.join(''), lastComment)) + end end def print(line) @@ -153,15 +173,45 @@ class Assembler end end - def putsLabel(labelName) + def putsProc(label, comment) + raise unless $emitWinAsm + @outp.puts(formatDump("#{label} PROC PUBLIC", comment)) + @lastlabel = label + end + + def putsProcEndIfNeeded + raise unless $emitWinAsm + if @lastlabel != "" + @outp.puts("#{@lastlabel} ENDP") + end + @lastlabel = "" + end + + def putsLabel(labelName, isGlobal) raise unless @state == :asm @numGlobalLabels += 1 + putsProcEndIfNeeded if $emitWinAsm and isGlobal putsNewlineSpacerIfAppropriate(:global) @internalComment = $enableLabelCountComments ? "Global Label #{@numGlobalLabels}" : nil - if /\Allint_op_/.match(labelName) - @outp.puts(formatDump("OFFLINE_ASM_OPCODE_LABEL(op_#{$~.post_match})", lastComment)) + if isGlobal + if !$emitWinAsm + @outp.puts(formatDump("OFFLINE_ASM_GLOBAL_LABEL(#{labelName})", lastComment)) + else + putsProc(labelName, lastComment) + end + elsif /\Allint_op_/.match(labelName) + if !$emitWinAsm + @outp.puts(formatDump("OFFLINE_ASM_OPCODE_LABEL(op_#{$~.post_match})", lastComment)) + else + label = "llint_" + "op_#{$~.post_match}" + @outp.puts(formatDump(" _#{label}:", lastComment)) + end else - @outp.puts(formatDump("OFFLINE_ASM_GLUE_LABEL(#{labelName})", lastComment)) + if !$emitWinAsm + @outp.puts(formatDump("OFFLINE_ASM_GLUE_LABEL(#{labelName})", lastComment)) + else + @outp.puts(formatDump(" _#{labelName}:", lastComment)) + end end @newlineSpacerState = :none # After a global label, we can use another spacer. end @@ -171,15 +221,35 @@ class Assembler @numLocalLabels += 1 @outp.puts("\n") @internalComment = $enableLabelCountComments ? "Local Label #{@numLocalLabels}" : nil - @outp.puts(formatDump(" OFFLINE_ASM_LOCAL_LABEL(#{labelName})", lastComment)) + if !$emitWinAsm + @outp.puts(formatDump(" OFFLINE_ASM_LOCAL_LABEL(#{labelName})", lastComment)) + else + @outp.puts(formatDump(" #{labelName}:", lastComment)) + end end - + + def self.externLabelReference(labelName) + if !$emitWinAsm + "\" LOCAL_REFERENCE(#{labelName}) \"" + else + "#{labelName}" + end + end + def self.labelReference(labelName) - "\" LOCAL_REFERENCE(#{labelName}) \"" + if !$emitWinAsm + "\" LOCAL_LABEL_STRING(#{labelName}) \"" + else + "_#{labelName}" + end end def self.localLabelReference(labelName) - "\" LOCAL_LABEL_STRING(#{labelName}) \"" + if !$emitWinAsm + "\" LOCAL_LABEL_STRING(#{labelName}) \"" + else + "#{labelName}" + end end def self.cLabelReference(labelName) @@ -201,13 +271,13 @@ class Assembler @commentState = :one when :one if $enableCodeOriginComments - @outp.puts " // #{@codeOrigin}" - @outp.puts " // #{text}" + @outp.puts " " + $commentPrefix + " #{@codeOrigin}" + @outp.puts " " + $commentPrefix + " #{text}" end @codeOrigin = nil @commentState = :many when :many - @outp.puts "// #{text}" if $enableCodeOriginComments + @outp.puts $commentPrefix + " #{text}" if $enableCodeOriginComments else raise end @@ -216,16 +286,30 @@ class Assembler def comment(text) @comment = text end + def annotation(text) @annotation = text end + + def debugAnnotation(text) + @outp.puts text + end end +IncludeFile.processIncludeOptions() + asmFile = ARGV.shift offsetsFile = ARGV.shift outputFlnm = ARGV.shift -$stderr.puts "offlineasm: Parsing #{asmFile} and #{offsetsFile} and creating assembly file #{outputFlnm}." +$options = {} +OptionParser.new do |opts| + opts.banner = "Usage: asm.rb asmFile offsetsFile outputFileName [--assembler=<ASM>]" + # This option is currently only used to specify the masm assembler + opts.on("--assembler=[ASM]", "Specify an assembler to use.") do |assembler| + $options[:assembler] = assembler + end +end.parse! begin configurationList = offsetsAndConfigurationIndex(offsetsFile) @@ -234,10 +318,19 @@ rescue MissingMagicValuesException exit 0 end +# The MS compiler doesn't accept DWARF2 debug annotations. +if isMSVC + $enableDebugAnnotations = false +end + +$emitWinAsm = isMSVC ? outputFlnm.index(".asm") != nil : false +$commentPrefix = $emitWinAsm ? ";" : "//" + inputHash = - "// offlineasm input hash: " + parseHash(asmFile) + + $commentPrefix + " offlineasm input hash: " + parseHash(asmFile) + " " + Digest::SHA1.hexdigest(configurationList.map{|v| (v[0] + [v[1]]).join(' ')}.join(' ')) + - " " + selfHash + " " + selfHash + + " " + Digest::SHA1.hexdigest($options.has_key?(:assembler) ? $options[:assembler] : "") if FileTest.exist? outputFlnm File.open(outputFlnm, "r") { @@ -245,7 +338,6 @@ if FileTest.exist? outputFlnm firstLine = inp.gets if firstLine and firstLine.chomp == inputHash $stderr.puts "offlineasm: Nothing changed." - FileUtils.touch(outputFlnm) exit 0 end } @@ -255,11 +347,11 @@ File.open(outputFlnm, "w") { | outp | $output = outp $output.puts inputHash - + $asm = Assembler.new($output) ast = parse(asmFile) - + configurationList.each { | configuration | offsetsList = configuration[0] @@ -276,6 +368,3 @@ File.open(outputFlnm, "w") { } } } - -$stderr.puts "offlineasm: Assembly file #{outputFlnm} successfully generated." - diff --git a/Source/JavaScriptCore/offlineasm/ast.rb b/Source/JavaScriptCore/offlineasm/ast.rb index 74bccff56..1241b7fe5 100644 --- a/Source/JavaScriptCore/offlineasm/ast.rb +++ b/Source/JavaScriptCore/offlineasm/ast.rb @@ -229,6 +229,10 @@ class Immediate < NoChildren true end + def immediateOperand? + true + end + def register? false end @@ -255,6 +259,10 @@ class AddImmediates < Node "(#{left.dump} + #{right.dump})" end + def value + "#{left.value} + #{right.value}" + end + def address? false end @@ -267,6 +275,10 @@ class AddImmediates < Node true end + def immediateOperand? + true + end + def register? false end @@ -293,6 +305,10 @@ class SubImmediates < Node "(#{left.dump} - #{right.dump})" end + def value + "#{left.value} - #{right.value}" + end + def address? false end @@ -305,6 +321,10 @@ class SubImmediates < Node true end + def immediateOperand? + true + end + def register? false end @@ -343,6 +363,10 @@ class MulImmediates < Node true end + def immediateOperand? + false + end + def register? false end @@ -380,6 +404,10 @@ class NegImmediate < Node true end + def immediateOperand? + false + end + def register? false end @@ -418,6 +446,10 @@ class OrImmediates < Node true end + def immediateOperand? + false + end + def register? false end @@ -456,6 +488,10 @@ class AndImmediates < Node true end + def immediateOperand? + false + end + def register? false end @@ -494,6 +530,10 @@ class XorImmediates < Node true end + def immediateOperand? + false + end + def register? false end @@ -531,6 +571,48 @@ class BitnotImmediate < Node true end + def immediateOperand? + false + end + + def register? + false + end +end + +class StringLiteral < NoChildren + attr_reader :value + + def initialize(codeOrigin, value) + super(codeOrigin) + @value = value[1..-2] + raise "Bad string literal #{value.inspect} at #{codeOriginString}" unless value.is_a? String + end + + def dump + "#{value}" + end + + def ==(other) + other.is_a? StringLiteral and other.value == @value + end + + def address? + false + end + + def label? + false + end + + def immediate? + false + end + + def immediateOperand? + false + end + def register? false end @@ -607,6 +689,10 @@ class FPRegisterID < NoChildren false end + def immediateOperand? + false + end + def register? true end @@ -629,6 +715,10 @@ class SpecialRegister < NoChildren false end + def immediateOperand? + false + end + def register? true end @@ -699,6 +789,10 @@ class Address < Node false end + def immediateOperand? + true + end + def register? false end @@ -759,6 +853,10 @@ class BaseIndex < Node false end + def immediateOperand? + false + end + def register? false end @@ -792,6 +890,10 @@ class AbsoluteAddress < NoChildren false end + def immediateOperand? + true + end + def register? false end @@ -825,6 +927,8 @@ class Instruction < Node $asm.putLocalAnnotation when "globalAnnotation" $asm.putGlobalAnnotation + when "emit" + $asm.puts "#{operands[0].dump}" else raise "Unhandled opcode #{opcode} at #{codeOriginString}" end @@ -864,6 +968,7 @@ class ConstDecl < Node end $labelMapping = {} +$referencedExternLabels = Array.new class Label < NoChildren attr_reader :name @@ -871,17 +976,61 @@ class Label < NoChildren def initialize(codeOrigin, name) super(codeOrigin) @name = name + @extern = true + @global = false end - def self.forName(codeOrigin, name) + def self.forName(codeOrigin, name, definedInFile = false) if $labelMapping[name] raise "Label name collision: #{name}" unless $labelMapping[name].is_a? Label else $labelMapping[name] = Label.new(codeOrigin, name) end + if definedInFile + $labelMapping[name].clearExtern() + end $labelMapping[name] end - + + def self.setAsGlobal(codeOrigin, name) + if $labelMapping[name] + label = $labelMapping[name] + raise "Label: #{name} declared global multiple times" unless not label.global? + label.setGlobal() + else + newLabel = Label.new(codeOrigin, name) + newLabel.setGlobal() + $labelMapping[name] = newLabel + end + end + + def self.resetReferenced + $referencedExternLabels = Array.new + end + + def self.forReferencedExtern() + $referencedExternLabels.each { + | label | + yield "#{label.name}" + } + end + + def clearExtern + @extern = false + end + + def extern? + @extern + end + + def setGlobal + @global = true + end + + def global? + @global + end + def dump "#{name}:" end @@ -949,10 +1098,24 @@ class LabelReference < Node label.name end + def extern? + $labelMapping[name].is_a? Label and $labelMapping[name].extern? + end + + def used + if !$referencedExternLabels.include?(@label) and extern? + $referencedExternLabels.push(@label) + end + end + def dump label.name end + def value + asmLabel() + end + def address? false end @@ -964,6 +1127,10 @@ class LabelReference < Node def immediate? false end + + def immediateOperand? + true + end end class LocalLabelReference < NoChildren @@ -989,6 +1156,10 @@ class LocalLabelReference < NoChildren def dump label.name end + + def value + asmLabel() + end def address? false @@ -1001,6 +1172,10 @@ class LocalLabelReference < NoChildren def immediate? false end + + def immediateOperand? + true + end end class Sequence < Node diff --git a/Source/JavaScriptCore/offlineasm/backends.rb b/Source/JavaScriptCore/offlineasm/backends.rb index 902a764af..274441997 100644 --- a/Source/JavaScriptCore/offlineasm/backends.rb +++ b/Source/JavaScriptCore/offlineasm/backends.rb @@ -1,4 +1,4 @@ -# Copyright (C) 2011 Apple Inc. All rights reserved. +# Copyright (C) 2011, 2016 Apple Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions @@ -23,6 +23,7 @@ require "config" require "arm" +require "arm64" require "ast" require "x86" require "mips" @@ -32,10 +33,13 @@ require "cloop" BACKENDS = [ "X86", + "X86_WIN", "X86_64", + "X86_64_WIN", "ARM", "ARMv7", "ARMv7_TRADITIONAL", + "ARM64", "MIPS", "SH4", "C_LOOP" @@ -49,10 +53,13 @@ BACKENDS = WORKING_BACKENDS = [ "X86", + "X86_WIN", "X86_64", + "X86_64_WIN", "ARM", "ARMv7", "ARMv7_TRADITIONAL", + "ARM64", "MIPS", "SH4", "C_LOOP" @@ -60,6 +67,37 @@ WORKING_BACKENDS = BACKEND_PATTERN = Regexp.new('\\A(' + BACKENDS.join(')|(') + ')\\Z') +$allBackends = {} +$validBackends = {} +BACKENDS.each { + | backend | + $validBackends[backend] = true + $allBackends[backend] = true +} + +def includeOnlyBackends(list) + newValidBackends = {} + list.each { + | backend | + if $validBackends[backend] + newValidBackends[backend] = true + end + } + $validBackends = newValidBackends +end + +def isBackend?(backend) + $allBackends[backend] +end + +def isValidBackend?(backend) + $validBackends[backend] +end + +def validBackends + $validBackends.keys +end + class Node def lower(name) begin @@ -76,7 +114,8 @@ end class Label def lower(name) - $asm.putsLabel(self.name[1..-1]) + $asm.debugAnnotation codeOrigin.debugDirective if $enableDebugAnnotations + $asm.putsLabel(self.name[1..-1], @global) end end @@ -88,8 +127,13 @@ end class LabelReference def asmLabel - Assembler.labelReference(name[1..-1]) + if extern? + Assembler.externLabelReference(name[1..-1]) + else + Assembler.labelReference(name[1..-1]) + end end + def cLabel Assembler.cLabelReference(name[1..-1]) end @@ -99,6 +143,7 @@ class LocalLabelReference def asmLabel Assembler.localLabelReference("_offlineasm_"+name[1..-1]) end + def cLabel Assembler.cLocalLabelReference("_offlineasm_"+name[1..-1]) end diff --git a/Source/JavaScriptCore/offlineasm/cloop.rb b/Source/JavaScriptCore/offlineasm/cloop.rb index 9e783fc39..7e939480e 100644 --- a/Source/JavaScriptCore/offlineasm/cloop.rb +++ b/Source/JavaScriptCore/offlineasm/cloop.rb @@ -1,4 +1,4 @@ -# Copyright (C) 2012 Apple Inc. All rights reserved. +# Copyright (C) 2012, 2014 Apple Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions @@ -68,18 +68,22 @@ C_LOOP_SCRATCH_FPR = SpecialRegister.new("d6") class RegisterID def clDump case name - when "t0" + # The cloop is modelled on the ARM implementation. Hence, the a0-a3 + # registers are aliases for r0-r3 i.e. t0-t3 in our case. + when "t0", "a0", "r0" "t0" - when "t1" + when "t1", "a1", "r1" "t1" - when "t2" + when "t2", "a2" "t2" - when "t3" + when "t3", "a3" "t3" when "t4" - "rPC" - when "t6" - "rBasePC" + "pc" + when "t5" + "t5" + when "csr0" + "pcBase" when "csr1" "tagTypeNumber" when "csr2" @@ -87,7 +91,7 @@ class RegisterID when "cfr" "cfr" when "lr" - "rRetVPC" + "lr" when "sp" "sp" else @@ -176,10 +180,7 @@ class Address end end def pointerExpr - if base.is_a? RegisterID and base.name == "sp" - offsetValue = "#{offset.value}" - "(ASSERT(#{offsetValue} == offsetof(JITStackFrame, vm)), &sp->vm)" - elsif offset.value == 0 + if offset.value == 0 "#{base.clValue(:int8Ptr)}" elsif offset.value > 0 "#{base.clValue(:int8Ptr)} + #{offset.value}" @@ -248,9 +249,8 @@ class BaseIndex end end def pointerExpr - if base.is_a? RegisterID and base.name == "sp" - offsetValue = "(#{index.clValue} << #{scaleShift}) + #{offset.clValue})" - "(ASSERT(#{offsetValue} == offsetof(JITStackFrame, vm)), &sp->vm)" + if offset.value == 0 + "#{base.clValue(:int8Ptr)} + (#{index.clValue} << #{scaleShift})" else "#{base.clValue(:int8Ptr)} + (#{index.clValue} << #{scaleShift}) + #{offset.clValue}" end @@ -356,7 +356,7 @@ def cloopEmitOperation(operands, type, operator) raise unless type == :int || type == :uint || type == :int32 || type == :uint32 || \ type == :int64 || type == :uint64 || type == :double if operands.size == 3 - $asm.putc "#{operands[2].clValue(type)} = #{operands[1].clValue(type)} #{operator} #{operands[0].clValue(type)};" + $asm.putc "#{operands[2].clValue(type)} = #{operands[0].clValue(type)} #{operator} #{operands[1].clValue(type)};" if operands[2].is_a? RegisterID and (type == :int32 or type == :uint32) $asm.putc "#{operands[2].clDump}.clearHighWord();" # Just clear it. It does nothing on the 32-bit port. end @@ -543,14 +543,18 @@ end # operands: callTarget, currentFrame, currentPC def cloopEmitCallSlowPath(operands) $asm.putc "{" - $asm.putc " ExecState* exec = CAST<ExecState*>(#{operands[1].clValue(:voidPtr)});" - $asm.putc " Instruction* pc = CAST<Instruction*>(#{operands[2].clValue(:voidPtr)});" - $asm.putc " SlowPathReturnType result = #{operands[0].cLabel}(exec, pc);" - $asm.putc " LLInt::decodeResult(result, t0.instruction, t1.execState);" + $asm.putc " SlowPathReturnType result = #{operands[0].cLabel}(#{operands[1].clDump}, #{operands[2].clDump});" + $asm.putc " decodeResult(result, t0.vp, t1.vp);" $asm.putc "}" end +def cloopEmitCallSlowPathVoid(operands) + $asm.putc "#{operands[0].cLabel}(#{operands[1].clDump}, #{operands[2].clDump});" +end + class Instruction + @@didReturnFromJSLabelCounter = 0 + def lowerC_LOOP $asm.codeOrigin codeOriginString if $enableCodeOriginComments $asm.annotation annotation if $enableInstrAnnotations && (opcode != "cloopDo") @@ -864,7 +868,8 @@ class Instruction when "break" $asm.putc "CRASH(); // break instruction not implemented." when "ret" - $asm.putc "goto doReturnHelper;" + $asm.putc "opcode = lr.opcode;" + $asm.putc "DISPATCH_OPCODE();" when "cbeq" cloopEmitCompareAndSet(operands, :uint8, "==") @@ -1083,6 +1088,20 @@ class Instruction cloopEmitOpAndBranch(operands, "|", :int32, "== 0") when "borrinz" cloopEmitOpAndBranch(operands, "|", :int32, "!= 0") + + when "memfence" + + when "push" + operands.each { + | op | + $asm.putc "PUSH(#{op.clDump});" + } + when "pop" + operands.each { + | op | + $asm.putc "POP(#{op.clDump});" + } + # A convenience and compact call to crash because we don't want to use # the generic llint crash mechanism which relies on the availability @@ -1096,8 +1115,11 @@ class Instruction # use of the call instruction. Instead, we just implement JS calls # as an opcode dispatch. when "cloopCallJSFunction" + @@didReturnFromJSLabelCounter += 1 + $asm.putc "lr.opcode = getOpcode(llint_cloop_did_return_from_js_#{@@didReturnFromJSLabelCounter});" $asm.putc "opcode = #{operands[0].clValue(:opcode)};" $asm.putc "DISPATCH_OPCODE();" + $asm.putsLabel("llint_cloop_did_return_from_js_#{@@didReturnFromJSLabelCounter}", false) # We can't do generic function calls with an arbitrary set of args, but # fortunately we don't have to here. All native function calls always @@ -1118,6 +1140,9 @@ class Instruction when "cloopCallSlowPath" cloopEmitCallSlowPath(operands) + when "cloopCallSlowPathVoid" + cloopEmitCallSlowPathVoid(operands) + # For debugging only. This is used to insert instrumentation into the # generated LLIntAssembly.h during llint development only. Do not use # for production code. diff --git a/Source/JavaScriptCore/offlineasm/config.rb b/Source/JavaScriptCore/offlineasm/config.rb index 4c86eeceb..468c5cdda 100644 --- a/Source/JavaScriptCore/offlineasm/config.rb +++ b/Source/JavaScriptCore/offlineasm/config.rb @@ -1,4 +1,4 @@ -# Copyright (C) 2012 Apple Inc. All rights reserved. +# Copyright (C) 2012, 2016 Apple Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions @@ -55,3 +55,8 @@ $enableCodeOriginComments = true # ... # $enableInstrAnnotations = false + +# Turns on generation of DWARF2 debug annotions for file and line numbers. +# Allows for source level debuging of the original .asm files in a debugger. +# +$enableDebugAnnotations = false diff --git a/Source/JavaScriptCore/offlineasm/generate_offset_extractor.rb b/Source/JavaScriptCore/offlineasm/generate_offset_extractor.rb index 81c28632c..aafa93416 100644 --- a/Source/JavaScriptCore/offlineasm/generate_offset_extractor.rb +++ b/Source/JavaScriptCore/offlineasm/generate_offset_extractor.rb @@ -34,10 +34,16 @@ require "self_hash" require "settings" require "transform" +IncludeFile.processIncludeOptions() + inputFlnm = ARGV.shift outputFlnm = ARGV.shift -$stderr.puts "offlineasm: Parsing #{inputFlnm} and creating offset extractor #{outputFlnm}." +validBackends = ARGV.shift +if validBackends + $stderr.puts "Only dealing with backends: #{validBackends}" + includeOnlyBackends(validBackends.split(",")) +end def emitMagicNumber OFFSET_MAGIC_NUMBERS.each { @@ -141,6 +147,3 @@ File.open(outputFlnm, "w") { } outp.puts "};" } - -$stderr.puts "offlineasm: offset extractor #{outputFlnm} successfully generated." - diff --git a/Source/JavaScriptCore/offlineasm/instructions.rb b/Source/JavaScriptCore/offlineasm/instructions.rb index a54827ef8..244928746 100644 --- a/Source/JavaScriptCore/offlineasm/instructions.rb +++ b/Source/JavaScriptCore/offlineasm/instructions.rb @@ -22,6 +22,7 @@ # THE POSSIBILITY OF SUCH DAMAGE. require "config" +require "set" # Interesting invariant, which we take advantage of: branching instructions # always begin with "b", and no non-branching instructions begin with "b". @@ -29,6 +30,7 @@ require "config" MACRO_INSTRUCTIONS = [ + "emit", "addi", "andi", "lshifti", @@ -206,8 +208,6 @@ MACRO_INSTRUCTIONS = "tqs", "tqz", "tqnz", - "peekq", - "pokeq", "bqeq", "bqneq", "bqa", @@ -249,6 +249,7 @@ MACRO_INSTRUCTIONS = "bnz", "leai", "leap", + "memfence" ] X86_INSTRUCTIONS = @@ -259,10 +260,23 @@ X86_INSTRUCTIONS = ARM_INSTRUCTIONS = [ - "smulli", - "addis", - "subis", - "oris" + "clrbp", + "mvlbl" + ] + +ARM64_INSTRUCTIONS = + [ + "pcrtoaddr", # Address from PC relative offset - adr instruction + "nopFixCortexA53Err835769" # nop on Cortex-A53 (nothing otherwise) + ] + +RISC_INSTRUCTIONS = + [ + "smulli", # Multiply two 32-bit words and produce a 64-bit word + "addis", # Add integers and set a flag. + "subis", # Same, but for subtraction. + "oris", # Same, but for bitwise or. + "addps" # addis but for pointers. ] MIPS_INSTRUCTIONS = @@ -270,6 +284,7 @@ MIPS_INSTRUCTIONS = "la", "movz", "movn", + "setcallreg", "slt", "sltu", "pichdr" @@ -277,6 +292,9 @@ MIPS_INSTRUCTIONS = SH4_INSTRUCTIONS = [ + "flushcp", + "alignformova", + "mova", "shllx", "shlrx", "shld", @@ -285,15 +303,17 @@ SH4_INSTRUCTIONS = "loaddReversedAndIncrementAddress", "storedReversedAndDecrementAddress", "ldspr", - "stspr" + "stspr", + "setargs" ] CXX_INSTRUCTIONS = [ - "cloopCrash", # no operands - "cloopCallJSFunction", # operands: callee - "cloopCallNative", # operands: callee - "cloopCallSlowPath", # operands: callTarget, currentFrame, currentPC + "cloopCrash", # no operands + "cloopCallJSFunction", # operands: callee + "cloopCallNative", # operands: callee + "cloopCallSlowPath", # operands: callTarget, currentFrame, currentPC + "cloopCallSlowPathVoid", # operands: callTarget, currentFrame, currentPC # For debugging only: # Takes no operands but simply emits whatever follows in // comments as @@ -304,9 +324,9 @@ CXX_INSTRUCTIONS = "cloopDo", # no operands ] -INSTRUCTIONS = MACRO_INSTRUCTIONS + X86_INSTRUCTIONS + ARM_INSTRUCTIONS + MIPS_INSTRUCTIONS + SH4_INSTRUCTIONS + CXX_INSTRUCTIONS +INSTRUCTIONS = MACRO_INSTRUCTIONS + X86_INSTRUCTIONS + ARM_INSTRUCTIONS + ARM64_INSTRUCTIONS + RISC_INSTRUCTIONS + MIPS_INSTRUCTIONS + SH4_INSTRUCTIONS + CXX_INSTRUCTIONS -INSTRUCTION_PATTERN = Regexp.new('\\A((' + INSTRUCTIONS.join(')|(') + '))\\Z') +INSTRUCTION_SET = INSTRUCTIONS.to_set def isBranch(instruction) instruction =~ /^b/ diff --git a/Source/JavaScriptCore/offlineasm/mips.rb b/Source/JavaScriptCore/offlineasm/mips.rb index 08fd02662..53bb9c143 100644 --- a/Source/JavaScriptCore/offlineasm/mips.rb +++ b/Source/JavaScriptCore/offlineasm/mips.rb @@ -24,6 +24,43 @@ require 'risc' +# GPR conventions, to match the baseline JIT +# +# $a0 => a0 +# $a1 => a1 +# $a2 => a2 +# $a3 => a3 +# $v0 => t0, r0 +# $v1 => t1, r1 +# $t0 => (scratch) +# $t1 => (scratch) +# $t2 => t2 +# $t3 => t3 +# $t4 => t4 +# $t5 => t5 +# $t6 => t6 +# $t7 => (scratch) +# $t8 => (scratch) +# $t9 => (stores the callee of a call opcode) +# $gp => (globals) +# $s4 => (callee-save used to preserve $gp across calls) +# $ra => lr +# $sp => sp +# $fp => cfr +# +# FPR conventions, to match the baseline JIT +# We don't have fa2 or fa3! +# $f0 => ft0, fr +# $f2 => ft1 +# $f4 => ft2 +# $f6 => ft3 +# $f8 => ft4 +# $f10 => ft5 +# $f12 => fa0 +# $f14 => fa1 +# $f16 => (scratch) +# $f18 => (scratch) + class Assembler def putStr(str) @outp.puts str @@ -57,8 +94,7 @@ class SpecialRegister < NoChildren end end -MIPS_TEMP_GPRS = [SpecialRegister.new("$t5"), SpecialRegister.new("$t6"), SpecialRegister.new("$t7"), - SpecialRegister.new("$t8")] +MIPS_TEMP_GPRS = [SpecialRegister.new("$t0"), SpecialRegister.new("$t1"), SpecialRegister.new("$t7"), SpecialRegister.new("$t8")] MIPS_ZERO_REG = SpecialRegister.new("$zero") MIPS_GP_REG = SpecialRegister.new("$gp") MIPS_GPSAVE_REG = SpecialRegister.new("$s4") @@ -81,26 +117,24 @@ class RegisterID "$a0" when "a1" "$a1" - when "r0", "t0" + when "a2" + "$a2" + when "a3" + "$a3" + when "t0", "r0" "$v0" - when "r1", "t1" + when "t1", "r1" "$v1" when "t2" "$t2" when "t3" - "$s3" - when "t4" # PC reg in llint - "$s2" + "$t3" + when "t4" + "$t4" when "t5" "$t5" - when "t6" - "$t6" - when "t7" - "$t7" - when "t8" - "$t8" when "cfr" - "$s0" + "$fp" when "lr" "$ra" when "sp" @@ -138,7 +172,7 @@ end class Immediate def mipsOperand - raise "Invalid immediate #{value} at #{codeOriginString}" if value < -0x7fff or value > 0x7fff + raise "Invalid immediate #{value} at #{codeOriginString}" if value < -0x7fff or value > 0xffff "#{value}" end end @@ -371,46 +405,93 @@ end # class Node - def mipsLowerMalformedAddressesRecurse(list, topLevelNode, &block) + def mipsLowerMalformedAddressesRecurse(list) mapChildren { | subNode | - subNode.mipsLowerMalformedAddressesRecurse(list, topLevelNode, &block) + subNode.mipsLowerMalformedAddressesRecurse(list) } end -end -class Address - def mipsLowerMalformedAddressesRecurse(list, node, &block) - riscLowerMalformedAddressesRecurse(list, node, &block) + def mipsLowerShiftedAddressesRecurse(list, isFirst, tmp) + mapChildren { + | subNode | + subNode.mipsLowerShiftedAddressesRecurse(list, isFirst, tmp) + } end end class BaseIndex - def mipsLowerMalformedAddressesRecurse(list, node, &block) + def mipsLowerMalformedAddressesRecurse(list) + tmp = Tmp.new(codeOrigin, :gpr) if scaleShift == 0 - tmp0 = Tmp.new(codeOrigin, :gpr) - list << Instruction.new(codeOrigin, "addp", [base, index, tmp0]) - Address.new(codeOrigin, tmp0, Immediate.new(codeOrigin, offset.value)); - else - tmp0 = Tmp.new(codeOrigin, :gpr) - list << Instruction.new(codeOrigin, "lshifti", [index, Immediate.new(codeOrigin, scaleShift), tmp0]); - list << Instruction.new(codeOrigin, "addp", [base, tmp0]) - Address.new(codeOrigin, tmp0, Immediate.new(codeOrigin, offset.value)); + list << Instruction.new(codeOrigin, "addp", [base, index, tmp]) + Address.new(codeOrigin, tmp, Immediate.new(codeOrigin, offset.value)); end end -end -class AbsoluteAddress - def mipsLowerMalformedAddressesRecurse(list, node, &block) - riscLowerMalformedAddressesRecurse(list, node, &block) + def mipsLowerShiftedAddressesRecurse(list, isFirst, tmp) + if isFirst + list << Instruction.new(codeOrigin, "lshifti", [index, Immediate.new(codeOrigin, scaleShift), tmp]); + list << Instruction.new(codeOrigin, "addp", [base, tmp]) + end + Address.new(codeOrigin, tmp, Immediate.new(codeOrigin, offset.value)); end end -def mipsLowerMalformedAddresses(list, &block) - newList = [] - list.each { - | node | - newList << node.mipsLowerMalformedAddressesRecurse(newList, node, &block) +# +# Lowering of BaseIndex addresses with optimization for MIPS. +# +# offline asm instruction pair: +# loadi 4[cfr, t0, 8], t2 +# loadi 0[cfr, t0, 8], t0 +# +# lowered instructions: +# lshifti t0, 3, tmp +# addp cfr, tmp +# loadi 4[tmp], t2 +# loadi 0[tmp], t0 +# + +def mipsHasShiftedBaseIndexAddress(instruction) + instruction.operands.each_with_index { + | operand, index | + if operand.is_a? BaseIndex and operand.scaleShift != 0 + return index + end + } + -1 +end + +def mipsScaleOfBaseIndexMatches(baseIndex0, baseIndex1) + baseIndex0.base == baseIndex1.base and + baseIndex0.index == baseIndex1.index and + baseIndex0.scale == baseIndex1.scale +end + +def mipsLowerBaseIndexAddresses(list) + newList = [ list[0] ] + tmp = nil + list.each_cons(2) { + | nodes | + if nodes[1].is_a? Instruction + ind = mipsHasShiftedBaseIndexAddress(nodes[1]) + if ind != -1 + if nodes[0].is_a? Instruction and + nodes[0].opcode == nodes[1].opcode and + ind == mipsHasShiftedBaseIndexAddress(nodes[0]) and + mipsScaleOfBaseIndexMatches(nodes[0].operands[ind], nodes[1].operands[ind]) + + newList << nodes[1].mipsLowerShiftedAddressesRecurse(newList, false, tmp) + else + tmp = Tmp.new(codeOrigin, :gpr) + newList << nodes[1].mipsLowerShiftedAddressesRecurse(newList, true, tmp) + end + else + newList << nodes[1].mipsLowerMalformedAddressesRecurse(newList) + end + else + newList << nodes[1] + end } newList end @@ -442,6 +523,10 @@ def mipsLowerMisplacedImmediates(list) else newList << node end + when /^(addi|subi)/ + newList << node.riscLowerMalformedImmediatesRecurse(newList, -0x7fff..0x7fff) + when "andi", "andp", "ori", "orp", "xori", "xorp" + newList << node.riscLowerMalformedImmediatesRecurse(newList, 0..0xffff) else newList << node end @@ -624,7 +709,8 @@ class Sequence result = riscLowerSimpleBranchOps(result) result = riscLowerHardBranchOps(result) result = riscLowerShiftOps(result) - result = mipsLowerMalformedAddresses(result) { + result = mipsLowerBaseIndexAddresses(result) + result = riscLowerMalformedAddresses(result) { | node, address | if address.is_a? Address (-0xffff..0xffff).include? address.offset.value @@ -864,11 +950,17 @@ class Instruction # FIXME: either support this or remove it. raise "MIPS does not support this opcode yet, #{codeOrigin}" when "pop" - $asm.puts "lw #{operands[0].mipsOperand}, 0($sp)" - $asm.puts "addiu $sp, $sp, 4" + operands.each { + | op | + $asm.puts "lw #{op.mipsOperand}, 0($sp)" + $asm.puts "addiu $sp, $sp, 4" + } when "push" - $asm.puts "addiu $sp, $sp, -4" - $asm.puts "sw #{operands[0].mipsOperand}, 0($sp)" + operands.each { + | op | + $asm.puts "addiu $sp, $sp, -4" + $asm.puts "sw #{op.mipsOperand}, 0($sp)" + } when "move", "sxi2p", "zxi2p" if operands[0].is_a? Immediate mipsMoveImmediate(operands[0].value, operands[1]) @@ -946,15 +1038,18 @@ class Instruction $asm.puts "movz #{operands[0].mipsOperand}, #{operands[1].mipsOperand}, #{operands[2].mipsOperand}" when "movn" $asm.puts "movn #{operands[0].mipsOperand}, #{operands[1].mipsOperand}, #{operands[2].mipsOperand}" + when "setcallreg" + $asm.puts "move #{MIPS_CALL_REG.mipsOperand}, #{operands[0].mipsOperand}" when "slt", "sltb" $asm.puts "slt #{operands[0].mipsOperand}, #{operands[1].mipsOperand}, #{operands[2].mipsOperand}" when "sltu", "sltub" $asm.puts "sltu #{operands[0].mipsOperand}, #{operands[1].mipsOperand}, #{operands[2].mipsOperand}" when "pichdr" $asm.putStr("OFFLINE_ASM_CPLOAD(#{MIPS_CALL_REG.mipsOperand})") - $asm.puts "move #{MIPS_GPSAVE_REG.mipsOperand}, #{MIPS_GP_REG.mipsOperand}" + when "memfence" + $asm.puts "sync" else - raise "Unhandled opcode #{opcode} at #{codeOriginString}" + lowerDefault end end end diff --git a/Source/JavaScriptCore/offlineasm/parser.rb b/Source/JavaScriptCore/offlineasm/parser.rb index 3b9c67bed..b44511245 100644 --- a/Source/JavaScriptCore/offlineasm/parser.rb +++ b/Source/JavaScriptCore/offlineasm/parser.rb @@ -1,4 +1,4 @@ -# Copyright (C) 2011 Apple Inc. All rights reserved. +# Copyright (C) 2011, 2016 Apple Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions @@ -28,19 +28,83 @@ require "pathname" require "registers" require "self_hash" +class SourceFile + @@fileNames = [] + + attr_reader :name, :fileNumber + + def SourceFile.outputDotFileList(outp) + @@fileNames.each_index { + | index | + outp.puts "\".file #{index+1} \\\"#{@@fileNames[index]}\\\"\\n\"" + } + end + + def initialize(fileName) + @name = Pathname.new(fileName) + pathName = "#{@name.realpath}" + fileNumber = @@fileNames.index(pathName) + if not fileNumber + @@fileNames << pathName + fileNumber = @@fileNames.length + else + fileNumber += 1 # File numbers are 1 based + end + @fileNumber = fileNumber + end +end + class CodeOrigin - attr_reader :fileName, :lineNumber + attr_reader :lineNumber - def initialize(fileName, lineNumber) - @fileName = fileName + def initialize(sourceFile, lineNumber) + @sourceFile = sourceFile @lineNumber = lineNumber end - + + def fileName + @sourceFile.name + end + + def debugDirective + $emitWinAsm ? nil : "\".loc #{@sourceFile.fileNumber} #{lineNumber}\\n\"" + end + def to_s "#{fileName}:#{lineNumber}" end end +class IncludeFile + @@includeDirs = [] + + attr_reader :fileName + + def initialize(moduleName, defaultDir) + directory = nil + @@includeDirs.each { + | includePath | + fileName = includePath + (moduleName + ".asm") + directory = includePath unless not File.file?(fileName) + } + if not directory + directory = defaultDir + end + + @fileName = directory + (moduleName + ".asm") + end + + def self.processIncludeOptions() + while ARGV[0][/^-I/] + path = ARGV.shift[2..-1] + if not path + path = ARGV.shift + end + @@includeDirs << (path + "/") + end + end +end + class Token attr_reader :codeOrigin, :string @@ -87,8 +151,7 @@ end # The lexer. Takes a string and returns an array of tokens. # -def lex(str, fileName) - fileName = Pathname.new(fileName) +def lex(str, file) result = [] lineNumber = 1 annotation = nil @@ -108,35 +171,37 @@ def lex(str, fileName) # use of this for its cloopDo debugging utility even if # enableInstrAnnotations is not enabled. if annotation - result << Annotation.new(CodeOrigin.new(fileName, lineNumber), + result << Annotation.new(CodeOrigin.new(file, lineNumber), annotationType, annotation) annotation = nil end - result << Token.new(CodeOrigin.new(fileName, lineNumber), $&) + result << Token.new(CodeOrigin.new(file, lineNumber), $&) lineNumber += 1 when /\A[a-zA-Z]([a-zA-Z0-9_.]*)/ - result << Token.new(CodeOrigin.new(fileName, lineNumber), $&) + result << Token.new(CodeOrigin.new(file, lineNumber), $&) when /\A\.([a-zA-Z0-9_]*)/ - result << Token.new(CodeOrigin.new(fileName, lineNumber), $&) + result << Token.new(CodeOrigin.new(file, lineNumber), $&) when /\A_([a-zA-Z0-9_]*)/ - result << Token.new(CodeOrigin.new(fileName, lineNumber), $&) + result << Token.new(CodeOrigin.new(file, lineNumber), $&) when /\A([ \t]+)/ # whitespace, ignore whitespaceFound = true str = $~.post_match next when /\A0x([0-9a-fA-F]+)/ - result << Token.new(CodeOrigin.new(fileName, lineNumber), $&.hex.to_s) + result << Token.new(CodeOrigin.new(file, lineNumber), $&.hex.to_s) when /\A0([0-7]+)/ - result << Token.new(CodeOrigin.new(fileName, lineNumber), $&.oct.to_s) + result << Token.new(CodeOrigin.new(file, lineNumber), $&.oct.to_s) when /\A([0-9]+)/ - result << Token.new(CodeOrigin.new(fileName, lineNumber), $&) + result << Token.new(CodeOrigin.new(file, lineNumber), $&) when /\A::/ - result << Token.new(CodeOrigin.new(fileName, lineNumber), $&) + result << Token.new(CodeOrigin.new(file, lineNumber), $&) when /\A[:,\(\)\[\]=\+\-~\|&^*]/ - result << Token.new(CodeOrigin.new(fileName, lineNumber), $&) + result << Token.new(CodeOrigin.new(file, lineNumber), $&) + when /\A".*"/ + result << Token.new(CodeOrigin.new(file, lineNumber), $&) else - raise "Lexer error at #{CodeOrigin.new(fileName, lineNumber).to_s}, unexpected sequence #{str[0..20].inspect}" + raise "Lexer error at #{CodeOrigin.new(file, lineNumber).to_s}, unexpected sequence #{str[0..20].inspect}" end whitespaceFound = false str = $~.post_match @@ -153,13 +218,13 @@ def isRegister(token) end def isInstruction(token) - token =~ INSTRUCTION_PATTERN + INSTRUCTION_SET.member? token.string end def isKeyword(token) - token =~ /\A((true)|(false)|(if)|(then)|(else)|(elsif)|(end)|(and)|(or)|(not)|(macro)|(const)|(sizeof)|(error)|(include))\Z/ or + token =~ /\A((true)|(false)|(if)|(then)|(else)|(elsif)|(end)|(and)|(or)|(not)|(global)|(macro)|(const)|(sizeof)|(error)|(include))\Z/ or token =~ REGISTER_PATTERN or - token =~ INSTRUCTION_PATTERN + isInstruction(token) end def isIdentifier(token) @@ -182,6 +247,10 @@ def isInteger(token) token =~ /\A[0-9]/ end +def isString(token) + token =~ /\A".*"/ +end + # # The parser. Takes an array of tokens and returns an AST. Methods # other than parse(tokens) are not for public consumption. @@ -367,6 +436,10 @@ class Parser result = Immediate.new(@tokens[@idx].codeOrigin, @tokens[@idx].string.to_i) @idx += 1 result + elsif isString @tokens[@idx] + result = StringLiteral.new(@tokens[@idx].codeOrigin, @tokens[@idx].string) + @idx += 1 + result elsif isIdentifier @tokens[@idx] codeOrigin, names = parseColonColon if names.size > 1 @@ -380,6 +453,14 @@ class Parser @idx += 1 codeOrigin, names = parseColonColon Sizeof.forName(codeOrigin, names.join('::')) + elsif isLabel @tokens[@idx] + result = LabelReference.new(@tokens[@idx].codeOrigin, Label.forName(@tokens[@idx].codeOrigin, @tokens[@idx].string)) + @idx += 1 + result + elsif isLocalLabel @tokens[@idx] + result = LocalLabelReference.new(@tokens[@idx].codeOrigin, LocalLabel.forName(@tokens[@idx].codeOrigin, @tokens[@idx].string)) + @idx += 1 + result else parseError end @@ -400,7 +481,7 @@ class Parser end def couldBeExpression - @tokens[@idx] == "-" or @tokens[@idx] == "~" or @tokens[@idx] == "sizeof" or isInteger(@tokens[@idx]) or isVariable(@tokens[@idx]) or @tokens[@idx] == "(" + @tokens[@idx] == "-" or @tokens[@idx] == "~" or @tokens[@idx] == "sizeof" or isInteger(@tokens[@idx]) or isString(@tokens[@idx]) or isVariable(@tokens[@idx]) or @tokens[@idx] == "(" end def parseExpressionAdd @@ -573,6 +654,14 @@ class Parser body = parseSequence(/\Aend\Z/, "while inside of macro #{name}") @idx += 1 list << Macro.new(codeOrigin, name, variables, body) + elsif @tokens[@idx] == "global" + codeOrigin = @tokens[@idx].codeOrigin + @idx += 1 + skipNewLine + parseError unless isLabel(@tokens[@idx]) + name = @tokens[@idx].string + @idx += 1 + Label.setAsGlobal(codeOrigin, name) elsif isInstruction @tokens[@idx] codeOrigin = @tokens[@idx].codeOrigin name = @tokens[@idx].string @@ -677,7 +766,7 @@ class Parser parseError unless @tokens[@idx] == ":" # It's a label. if isLabel name - list << Label.forName(codeOrigin, name) + list << Label.forName(codeOrigin, name, true) else list << LocalLabel.forName(codeOrigin, name) end @@ -686,9 +775,8 @@ class Parser @idx += 1 parseError unless isIdentifier(@tokens[@idx]) moduleName = @tokens[@idx].string - fileName = @tokens[@idx].codeOrigin.fileName.dirname + (moduleName + ".asm") + fileName = IncludeFile.new(moduleName, @tokens[@idx].codeOrigin.fileName.dirname).fileName @idx += 1 - $stderr.puts "offlineasm: Including file #{fileName}" list << parse(fileName) else parseError "Expecting terminal #{final} #{comment}" @@ -696,10 +784,33 @@ class Parser } Sequence.new(firstCodeOrigin, list) end + + def parseIncludes(final, comment) + firstCodeOrigin = @tokens[@idx].codeOrigin + fileList = [] + fileList << @tokens[@idx].codeOrigin.fileName + loop { + if (@idx == @tokens.length and not final) or (final and @tokens[@idx] =~ final) + break + elsif @tokens[@idx] == "include" + @idx += 1 + parseError unless isIdentifier(@tokens[@idx]) + moduleName = @tokens[@idx].string + fileName = IncludeFile.new(moduleName, @tokens[@idx].codeOrigin.fileName.dirname).fileName + @idx += 1 + + fileList << fileName + else + @idx += 1 + end + } + + return fileList + end end def parseData(data, fileName) - parser = Parser.new(data, fileName) + parser = Parser.new(data, SourceFile.new(fileName)) parser.parseSequence(nil, "") end @@ -708,6 +819,8 @@ def parse(fileName) end def parseHash(fileName) - dirHash(Pathname.new(fileName).dirname, /\.asm$/) + parser = Parser.new(IO::read(fileName), SourceFile.new(fileName)) + fileList = parser.parseIncludes(nil, "") + fileListHash(fileList) end diff --git a/Source/JavaScriptCore/offlineasm/registers.rb b/Source/JavaScriptCore/offlineasm/registers.rb index f062ae6a5..b6ed36d00 100644 --- a/Source/JavaScriptCore/offlineasm/registers.rb +++ b/Source/JavaScriptCore/offlineasm/registers.rb @@ -30,19 +30,28 @@ GPRS = "t2", "t3", "t4", + "t5", "cfr", "a0", "a1", + "a2", + "a3", "r0", "r1", "sp", "lr", - + "pc", # 64-bit only registers: - "t5", - "t6", # r10 - "csr1", # r14, tag type number register - "csr2" # r15, tag mask register + "csr0", + "csr1", + "csr2", + "csr3", + "csr4", + "csr5", + "csr6", + "csr7", + "csr8", + "csr9" ] FPRS = @@ -57,6 +66,14 @@ FPRS = "fa1", "fa2", "fa3", + "csfr0", + "csfr1", + "csfr2", + "csfr3", + "csfr4", + "csfr5", + "csfr6", + "csfr7", "fr" ] diff --git a/Source/JavaScriptCore/offlineasm/risc.rb b/Source/JavaScriptCore/offlineasm/risc.rb index 1696a0c5c..3fbc07d0b 100644 --- a/Source/JavaScriptCore/offlineasm/risc.rb +++ b/Source/JavaScriptCore/offlineasm/risc.rb @@ -371,7 +371,7 @@ def riscLowerMalformedImmediates(list, validImmediates) case node.opcode when "move" newList << node - when "addi", "addp", "addis", "subi", "subp", "subis" + when "addi", "addp", "addq", "addis", "subi", "subp", "subq", "subis" if node.operands[0].is_a? Immediate and (not validImmediates.include? node.operands[0].value) and validImmediates.include? -node.operands[0].value @@ -387,7 +387,7 @@ def riscLowerMalformedImmediates(list, validImmediates) else newList << node.riscLowerMalformedImmediatesRecurse(newList, validImmediates) end - when "muli", "mulp" + when "muli", "mulp", "mulq" if node.operands[0].is_a? Immediate tmp = Tmp.new(codeOrigin, :gpr) newList << Instruction.new(node.codeOrigin, "move", [node.operands[0], tmp], annotation) @@ -466,6 +466,12 @@ def riscLowerMisplacedAddresses(list) node.opcode, riscAsRegisters(newList, postInstructions, node.operands, "p"), annotation) + when "addq", "andq", "lshiftq", "mulq", "negq", "orq", "rshiftq", "urshiftq", + "subq", "xorq", /^bq/, /^btq/, /^cq/ + newList << Instruction.new(node.codeOrigin, + node.opcode, + riscAsRegisters(newList, postInstructions, node.operands, "q"), + annotation) when "bbeq", "bbneq", "bba", "bbaeq", "bbb", "bbbeq", "btbz", "btbnz", "tbz", "tbnz", "cbeq", "cbneq", "cba", "cbaeq", "cbb", "cbbeq" newList << Instruction.new(node.codeOrigin, @@ -552,3 +558,173 @@ def riscLowerRegisterReuse(list) newList end +# +# Lowering of the not instruction. The following: +# +# noti t0 +# +# becomes: +# +# xori -1, t0 +# + +def riscLowerNot(list) + newList = [] + list.each { + | node | + if node.is_a? Instruction + case node.opcode + when "noti", "notp" + raise "Wrong nubmer of operands at #{node.codeOriginString}" unless node.operands.size == 1 + suffix = node.opcode[-1..-1] + newList << Instruction.new(node.codeOrigin, "xor" + suffix, + [Immediate.new(node.codeOrigin, -1), node.operands[0]]) + else + newList << node + end + else + newList << node + end + } + return newList +end + +# +# Lowing of complex branch ops on 64-bit. For example: +# +# bmulio foo, bar, baz +# +# becomes: +# +# smulli foo, bar, bar +# rshiftp bar, 32, tmp1 +# rshifti bar, 31, tmp2 +# zxi2p bar, bar +# bineq tmp1, tmp2, baz +# + +def riscLowerHardBranchOps64(list) + newList = [] + list.each { + | node | + if node.is_a? Instruction and node.opcode == "bmulio" + tmp1 = Tmp.new(node.codeOrigin, :gpr) + tmp2 = Tmp.new(node.codeOrigin, :gpr) + newList << Instruction.new(node.codeOrigin, "smulli", [node.operands[0], node.operands[1], node.operands[1]]) + newList << Instruction.new(node.codeOrigin, "rshiftp", [node.operands[1], Immediate.new(node.codeOrigin, 32), tmp1]) + newList << Instruction.new(node.codeOrigin, "rshifti", [node.operands[1], Immediate.new(node.codeOrigin, 31), tmp2]) + newList << Instruction.new(node.codeOrigin, "zxi2p", [node.operands[1], node.operands[1]]) + newList << Instruction.new(node.codeOrigin, "bineq", [tmp1, tmp2, node.operands[2]]) + else + newList << node + end + } + newList +end + +# +# Lowering of test instructions. For example: +# +# btiz t0, t1, .foo +# +# becomes: +# +# andi t0, t1, tmp +# bieq tmp, 0, .foo +# +# and another example: +# +# tiz t0, t1, t2 +# +# becomes: +# +# andi t0, t1, tmp +# cieq tmp, 0, t2 +# + +def riscLowerTest(list) + def emit(newList, andOpcode, branchOpcode, node) + if node.operands.size == 2 + newList << Instruction.new(node.codeOrigin, branchOpcode, [node.operands[0], Immediate.new(node.codeOrigin, 0), node.operands[1]]) + return + end + + raise "Incorrect number of operands at #{codeOriginString}" unless node.operands.size == 3 + + if node.operands[0].immediate? and node.operands[0].value == -1 + newList << Instruction.new(node.codeOrigin, branchOpcode, [node.operands[1], Immediate.new(node.codeOrigin, 0), node.operands[2]]) + return + end + + if node.operands[1].immediate? and node.operands[1].value == -1 + newList << Instruction.new(node.codeOrigin, branchOpcode, [node.operands[0], Immediate.new(node.codeOrigin, 0), node.operands[2]]) + return + end + + tmp = Tmp.new(node.codeOrigin, :gpr) + newList << Instruction.new(node.codeOrigin, andOpcode, [node.operands[0], node.operands[1], tmp]) + newList << Instruction.new(node.codeOrigin, branchOpcode, [tmp, Immediate.new(node.codeOrigin, 0), node.operands[2]]) + end + + newList = [] + list.each { + | node | + if node.is_a? Instruction + case node.opcode + when "btis" + emit(newList, "andi", "bilt", node) + when "btiz" + emit(newList, "andi", "bieq", node) + when "btinz" + emit(newList, "andi", "bineq", node) + when "btps" + emit(newList, "andp", "bplt", node) + when "btpz" + emit(newList, "andp", "bpeq", node) + when "btpnz" + emit(newList, "andp", "bpneq", node) + when "btqs" + emit(newList, "andq", "bqlt", node) + when "btqz" + emit(newList, "andq", "bqeq", node) + when "btqnz" + emit(newList, "andq", "bqneq", node) + when "btbs" + emit(newList, "andi", "bblt", node) + when "btbz" + emit(newList, "andi", "bbeq", node) + when "btbnz" + emit(newList, "andi", "bbneq", node) + when "tis" + emit(newList, "andi", "cilt", node) + when "tiz" + emit(newList, "andi", "cieq", node) + when "tinz" + emit(newList, "andi", "cineq", node) + when "tps" + emit(newList, "andp", "cplt", node) + when "tpz" + emit(newList, "andp", "cpeq", node) + when "tpnz" + emit(newList, "andp", "cpneq", node) + when "tqs" + emit(newList, "andq", "cqlt", node) + when "tqz" + emit(newList, "andq", "cqeq", node) + when "tqnz" + emit(newList, "andq", "cqneq", node) + when "tbs" + emit(newList, "andi", "cblt", node) + when "tbz" + emit(newList, "andi", "cbeq", node) + when "tbnz" + emit(newList, "andi", "cbneq", node) + else + newList << node + end + else + newList << node + end + } + return newList +end diff --git a/Source/JavaScriptCore/offlineasm/self_hash.rb b/Source/JavaScriptCore/offlineasm/self_hash.rb index b91057391..6c736ff5b 100644 --- a/Source/JavaScriptCore/offlineasm/self_hash.rb +++ b/Source/JavaScriptCore/offlineasm/self_hash.rb @@ -45,6 +45,21 @@ def dirHash(directory, regexp) end # +# fileListHash(fileList) -> SHA1 hexdigest +# +# Returns a hash of all files in the list. +# + +def fileListHash(fileList) + contents = "" + fileList.each { + | fileName | + contents += IO::read(fileName) + } + return Digest::SHA1.hexdigest(contents) +end + +# # selfHash -> SHA1 hexdigest # # Returns a hash of the offlineasm source code. This allows dependency diff --git a/Source/JavaScriptCore/offlineasm/settings.rb b/Source/JavaScriptCore/offlineasm/settings.rb index 601934f99..eec092584 100644 --- a/Source/JavaScriptCore/offlineasm/settings.rb +++ b/Source/JavaScriptCore/offlineasm/settings.rb @@ -54,7 +54,28 @@ def computeSettingsCombinations(ast) settingsCombinator(settingsCombinations, newMap, remaining[1..-1]) end - settingsCombinator(settingsCombinations, {}, (ast.filter(Setting).uniq.collect{|v| v.name} + BACKENDS).uniq) + nonBackendSettings = ast.filter(Setting).uniq.collect{ |v| v.name } + nonBackendSettings.delete_if { + | setting | + isBackend? setting + } + + allBackendsFalse = {} + BACKENDS.each { + | backend | + allBackendsFalse[backend] = false + } + + # This will create entries for invalid backends. That's fine. It's necessary + # because it ensures that generate_offsets_extractor (which knows about valid + # backends) has settings indices that are compatible with what asm will see + # (asm doesn't know about valid backends). + BACKENDS.each { + | backend | + map = allBackendsFalse.clone + map[backend] = true + settingsCombinator(settingsCombinations, map, nonBackendSettings) + } settingsCombinations end @@ -73,15 +94,13 @@ def forSettings(concreteSettings, ast) selectedBackend = nil BACKENDS.each { | backend | - isSupported = concreteSettings[backend] - raise unless isSupported != nil - numClaimedBackends += if isSupported then 1 else 0 end - if isSupported + if concreteSettings[backend] + raise if selectedBackend selectedBackend = backend end } - return if numClaimedBackends > 1 + return unless isValidBackend? selectedBackend # Resolve the AST down to a low-level form (no macros or conditionals). lowLevelAST = ast.resolveSettings(concreteSettings) @@ -172,7 +191,17 @@ end # def emitCodeInConfiguration(concreteSettings, ast, backend) - $output.puts cppSettingsTest(concreteSettings) + Label.resetReferenced + + if !$emitWinAsm + $output.puts cppSettingsTest(concreteSettings) + else + if backend == "X86_WIN" + $output.puts ".MODEL FLAT, C" + end + $output.puts "INCLUDE #{File.basename($output.path)}.sym" + $output.puts "_TEXT SEGMENT" + end if isASTErroneous(ast) $output.puts "#error \"Invalid configuration.\"" @@ -182,7 +211,21 @@ def emitCodeInConfiguration(concreteSettings, ast, backend) yield concreteSettings, ast, backend end - $output.puts "#endif" + if !$emitWinAsm + $output.puts "#endif" + else + $output.puts "_TEXT ENDS" + $output.puts "END" + + # Write symbols needed by MASM + File.open("#{File.basename($output.path)}.sym", "w") { + | outp | + Label.forReferencedExtern { + | name | + outp.puts "EXTERN #{name[1..-1]} : near" + } + } + end end # diff --git a/Source/JavaScriptCore/offlineasm/sh4.rb b/Source/JavaScriptCore/offlineasm/sh4.rb index 5721baee7..c847d564a 100644 --- a/Source/JavaScriptCore/offlineasm/sh4.rb +++ b/Source/JavaScriptCore/offlineasm/sh4.rb @@ -24,6 +24,33 @@ require 'risc' +# GPR conventions, to match the baseline JIT +# +# r0 => t0, r0 +# r1 => t1, r1 +# r2 => t4 +# r3 => t5 +# r4 => a0 +# r5 => a1 +# r6 => t2, a2 +# r7 => t3, a3 +# r10 => (scratch) +# r11 => (scratch) +# r13 => (scratch) +# r14 => cfr +# r15 => sp +# pr => lr + +# FPR conventions, to match the baseline JIT +# We don't have fa2 or fa3! +# dr0 => ft0, fr +# dr2 => ft1 +# dr4 => ft2, fa0 +# dr6 => ft3, fa1 +# dr8 => ft4 +# dr10 => ft5 +# dr12 => (scratch) + class Node def sh4SingleHi doubleOperand = sh4Operand @@ -51,8 +78,8 @@ class SpecialRegister < NoChildren end end -SH4_TMP_GPRS = [ SpecialRegister.new("r3"), SpecialRegister.new("r11"), SpecialRegister.new("r13") ] -SH4_TMP_FPRS = [ SpecialRegister.new("dr10") ] +SH4_TMP_GPRS = [ SpecialRegister.new("r10"), SpecialRegister.new("r11"), SpecialRegister.new("r13") ] +SH4_TMP_FPRS = [ SpecialRegister.new("dr12") ] class RegisterID def sh4Operand @@ -61,16 +88,18 @@ class RegisterID "r4" when "a1" "r5" - when "t0" + when "r0", "t0" "r0" - when "t1" + when "r1", "t1" "r1" - when "t2" - "r2" - when "t3" - "r10" - when "t4" + when "a2", "t2" "r6" + when "a3", "t3" + "r7" + when "t4" + "r2" + when "t5" + "r3" when "cfr" "r14" when "sp" @@ -90,14 +119,14 @@ class FPRegisterID "dr0" when "ft1" "dr2" - when "ft2" + when "ft2", "fa0" "dr4" - when "ft3" + when "ft3", "fa1" "dr6" when "ft4" "dr8" - when "fa0" - "dr12" + when "ft5" + "dr10" else raise "Bad register #{name} for SH4 at #{codeOriginString}" end @@ -144,6 +173,18 @@ class AbsoluteAddress end end +class LabelReference + def sh4Operand + value + end +end + +class SubImmediates < Node + def sh4Operand + "#{@left.sh4Operand} - #{@right.sh4Operand}" + end +end + class ConstPool < Node attr_reader :size attr_reader :entries @@ -154,23 +195,23 @@ class ConstPool < Node @size = size @entries = entries end - + def dump "#{size}: #{entries}" end - + def address? false end - + def label? false end - + def immediate? false end - + def register? false end @@ -198,7 +239,7 @@ class ConstPoolEntry < Node attr_reader :value attr_reader :label attr_reader :labelref - + def initialize(codeOrigin, value, size) super(codeOrigin) raise "Invalid size #{size} for ConstPoolEntry" unless size == 16 or size == 32 @@ -207,27 +248,27 @@ class ConstPoolEntry < Node @label = LocalLabel.unique("constpool#{size}") @labelref = LocalLabelReference.new(codeOrigin, label) end - + def dump "#{value} (#{size} @ #{label})" end - + def ==(other) other.is_a? ConstPoolEntry and other.value == @value end - + def address? false end - + def label? false end - + def immediate? false end - + def register? false end @@ -446,12 +487,65 @@ def sh4LowerMisplacedLabels(list) list.each { | node | if node.is_a? Instruction + operands = node.operands + newOperands = [] + operands.each { + | operand | + if operand.is_a? LabelReference and node.opcode != "mova" + tmp = Tmp.new(operand.codeOrigin, :gpr) + newList << Instruction.new(operand.codeOrigin, "move", [operand, tmp]) + newOperands << tmp + else + newOperands << operand + end + } + newList << Instruction.new(node.codeOrigin, node.opcode, newOperands, node.annotation) + else + newList << node + end + } + newList +end + + +# +# Lowering of misplaced special registers for SH4. For example: +# +# storep pr, foo +# +# becomes: +# +# stspr tmp +# storep tmp, foo +# + +def sh4LowerMisplacedSpecialRegisters(list) + newList = [] + list.each { + | node | + if node.is_a? Instruction case node.opcode - when "jmp", "call" - if node.operands[0].is_a? LabelReference + when "move" + if node.operands[0].is_a? RegisterID and node.operands[0].sh4Operand == "pr" + newList << Instruction.new(codeOrigin, "stspr", [node.operands[1]]) + elsif node.operands[1].is_a? RegisterID and node.operands[1].sh4Operand == "pr" + newList << Instruction.new(codeOrigin, "ldspr", [node.operands[0]]) + else + newList << node + end + when "loadi", "loadis", "loadp" + if node.operands[1].is_a? RegisterID and node.operands[1].sh4Operand == "pr" tmp = Tmp.new(codeOrigin, :gpr) - newList << Instruction.new(codeOrigin, "move", [node.operands[0], tmp]) - newList << Instruction.new(codeOrigin, node.opcode, [tmp]) + newList << Instruction.new(codeOrigin, node.opcode, [node.operands[0], tmp]) + newList << Instruction.new(codeOrigin, "ldspr", [tmp]) + else + newList << node + end + when "storei", "storep" + if node.operands[0].is_a? RegisterID and node.operands[0].sh4Operand == "pr" + tmp = Tmp.new(codeOrigin, :gpr) + newList << Instruction.new(codeOrigin, "stspr", [tmp]) + newList << Instruction.new(codeOrigin, node.opcode, [tmp, node.operands[1]]) else newList << node end @@ -490,8 +584,13 @@ def sh4LowerConstPool(list) | node | if node.is_a? Instruction case node.opcode - when "jmp", "ret" - newList << node + when "jmp", "ret", "flushcp" + if node.opcode == "flushcp" + outlabel = LocalLabel.unique("flushcp") + newList << Instruction.new(codeOrigin, "jmp", [LocalLabelReference.new(codeOrigin, outlabel)]) + else + newList << node + end if not currentPool16.empty? newList << ConstPool.new(codeOrigin, currentPool16, 16) currentPool16 = [] @@ -500,6 +599,9 @@ def sh4LowerConstPool(list) newList << ConstPool.new(codeOrigin, currentPool32, 32) currentPool32 = [] end + if node.opcode == "flushcp" + newList << outlabel + end when "move" if node.operands[0].is_a? Immediate and not (-128..127).include? node.operands[0].value poolEntry = nil @@ -537,6 +639,10 @@ def sh4LowerConstPool(list) currentPool32 << poolEntry end newList << Instruction.new(codeOrigin, "move", [poolEntry, node.operands[1]]) + elsif node.operands[0].is_a? SubImmediates + poolEntry = ConstPoolEntry.new(codeOrigin, node.operands[0].sh4Operand, 32) + currentPool32 << poolEntry + newList << Instruction.new(codeOrigin, "move", [poolEntry, node.operands[1]]) else newList << node end @@ -557,6 +663,78 @@ def sh4LowerConstPool(list) end +# +# Lowering of argument setup for SH4. +# This phase avoids argument register trampling. For example, if a0 == t4: +# +# setargs t1, t4 +# +# becomes: +# +# move t4, a1 +# move t1, a0 +# + +def sh4LowerArgumentSetup(list) + a0 = RegisterID.forName(codeOrigin, "a0") + a1 = RegisterID.forName(codeOrigin, "a1") + a2 = RegisterID.forName(codeOrigin, "a2") + a3 = RegisterID.forName(codeOrigin, "a3") + newList = [] + list.each { + | node | + if node.is_a? Instruction + case node.opcode + when "setargs" + if node.operands.size == 2 + if node.operands[1].sh4Operand != a0.sh4Operand + newList << Instruction.new(codeOrigin, "move", [node.operands[0], a0]) + newList << Instruction.new(codeOrigin, "move", [node.operands[1], a1]) + elsif node.operands[0].sh4Operand != a1.sh4Operand + newList << Instruction.new(codeOrigin, "move", [node.operands[1], a1]) + newList << Instruction.new(codeOrigin, "move", [node.operands[0], a0]) + else + # As (operands[0] == a1) and (operands[1] == a0), we just need to swap a0 and a1. + newList << Instruction.new(codeOrigin, "xori", [a0, a1]) + newList << Instruction.new(codeOrigin, "xori", [a1, a0]) + newList << Instruction.new(codeOrigin, "xori", [a0, a1]) + end + elsif node.operands.size == 4 + # FIXME: We just raise an error if something is likely to go wrong for now. + # It would be better to implement a recovering algorithm. + if (node.operands[0].sh4Operand == a1.sh4Operand) or + (node.operands[0].sh4Operand == a2.sh4Operand) or + (node.operands[0].sh4Operand == a3.sh4Operand) or + (node.operands[1].sh4Operand == a0.sh4Operand) or + (node.operands[1].sh4Operand == a2.sh4Operand) or + (node.operands[1].sh4Operand == a3.sh4Operand) or + (node.operands[2].sh4Operand == a0.sh4Operand) or + (node.operands[2].sh4Operand == a1.sh4Operand) or + (node.operands[2].sh4Operand == a3.sh4Operand) or + (node.operands[3].sh4Operand == a0.sh4Operand) or + (node.operands[3].sh4Operand == a1.sh4Operand) or + (node.operands[3].sh4Operand == a2.sh4Operand) + raise "Potential argument register trampling detected." + end + + newList << Instruction.new(codeOrigin, "move", [node.operands[0], a0]) + newList << Instruction.new(codeOrigin, "move", [node.operands[1], a1]) + newList << Instruction.new(codeOrigin, "move", [node.operands[2], a2]) + newList << Instruction.new(codeOrigin, "move", [node.operands[3], a3]) + else + raise "Invalid operands number (#{node.operands.size}) for setargs" + end + else + newList << node + end + else + newList << node + end + } + newList +end + + class Sequence def getModifiedListSH4 result = @list @@ -600,13 +778,15 @@ class Sequence "bbeq", "bbneq", "bbb", "bieq", "bpeq", "bineq", "bpneq", "bia", "bpa", "biaeq", "bpaeq", "bib", "bpb", "bigteq", "bpgteq", "bilt", "bplt", "bigt", "bpgt", "bilteq", "bplteq", "btiz", "btpz", "btinz", "btpnz", "btbz", "btbnz"]) result = riscLowerMalformedImmediates(result, -128..127) - result = sh4LowerMisplacedLabels(result) result = riscLowerMisplacedAddresses(result) + result = sh4LowerMisplacedLabels(result) + result = sh4LowerMisplacedSpecialRegisters(result) result = assignRegistersToTemporaries(result, :gpr, SH4_TMP_GPRS) result = assignRegistersToTemporaries(result, :gpr, SH4_TMP_FPRS) result = sh4LowerConstPool(result) + result = sh4LowerArgumentSetup(result) return result end @@ -617,6 +797,7 @@ def sh4Operands(operands) end def emitSH4Branch(sh4opcode, operand) + raise "Invalid operand #{operand}" unless operand.is_a? RegisterID or operand.is_a? SpecialRegister $asm.puts "#{sh4opcode} @#{operand.sh4Operand}" $asm.puts "nop" end @@ -640,12 +821,16 @@ def emitSH4ShiftImm(val, operand, direction) end end -def emitSH4BranchIfT(label, neg) +def emitSH4BranchIfT(dest, neg) outlabel = LocalLabel.unique("branchIfT") sh4opcode = neg ? "bt" : "bf" $asm.puts "#{sh4opcode} #{LocalLabelReference.new(codeOrigin, outlabel).asmLabel}" - $asm.puts "bra #{label.asmLabel}" - $asm.puts "nop" + if dest.is_a? LocalLabelReference + $asm.puts "bra #{dest.asmLabel}" + $asm.puts "nop" + else + emitSH4Branch("jmp", dest) + end outlabel.lower("SH4") end @@ -705,7 +890,10 @@ class Instruction end when "subi", "subp" if operands.size == 3 - if operands[1].sh4Operand == operands[2].sh4Operand + if operands[1].is_a? Immediate + $asm.puts "mov #{sh4Operands([Immediate.new(codeOrigin, -1 * operands[1].value), operands[2]])}" + $asm.puts "add #{sh4Operands([operands[0], operands[2]])}" + elsif operands[1].sh4Operand == operands[2].sh4Operand $asm.puts "neg #{sh4Operands([operands[2], operands[2]])}" $asm.puts "add #{sh4Operands([operands[0], operands[2]])}" else @@ -739,6 +927,9 @@ class Instruction else $asm.puts "shl#{opcode[3, 1]}#{operands[0].value} #{operands[1].sh4Operand}" end + when "shalx", "sharx" + raise "Unhandled parameters for opcode #{opcode}" unless operands[0].is_a? Immediate and operands[0].value == 1 + $asm.puts "sha#{opcode[3, 1]} #{operands[1].sh4Operand}" when "shld", "shad" $asm.puts "#{opcode} #{sh4Operands(operands)}" when "loaddReversedAndIncrementAddress" @@ -870,6 +1061,10 @@ class Instruction $asm.puts "extu.w #{sh4Operands([operands[1], operands[1]])}" when "loadi", "loadis", "loadp", "storei", "storep" $asm.puts "mov.l #{sh4Operands(operands)}" + when "alignformova" + $asm.puts ".balign 4" # As balign directive is in a code section, fill value is 'nop' instruction. + when "mova" + $asm.puts "mova #{sh4Operands(operands)}" when "move" if operands[0].is_a? ConstPoolEntry if operands[0].size == 16 @@ -877,7 +1072,7 @@ class Instruction else $asm.puts "mov.l #{operands[0].labelref.asmLabel}, #{operands[1].sh4Operand}" end - else + elsif operands[0].sh4Operand != operands[1].sh4Operand $asm.puts "mov #{sh4Operands(operands)}" end when "leap" @@ -905,11 +1100,25 @@ class Instruction $asm.puts "lds #{sh4Operands(operands)}, pr" when "stspr" $asm.puts "sts pr, #{sh4Operands(operands)}" + when "memfence" + $asm.puts "synco" + when "pop" + if operands[0].sh4Operand == "pr" + $asm.puts "lds.l @r15+, #{sh4Operands(operands)}" + else + $asm.puts "mov.l @r15+, #{sh4Operands(operands)}" + end + when "push" + if operands[0].sh4Operand == "pr" + $asm.puts "sts.l #{sh4Operands(operands)}, @-r15" + else + $asm.puts "mov.l #{sh4Operands(operands)}, @-r15" + end when "break" # This special opcode always generates an illegal instruction exception. $asm.puts ".word 0xfffd" else - raise "Unhandled opcode #{opcode} at #{codeOriginString}" + lowerDefault end end end diff --git a/Source/JavaScriptCore/offlineasm/transform.rb b/Source/JavaScriptCore/offlineasm/transform.rb index c838629f0..84dd0413b 100644 --- a/Source/JavaScriptCore/offlineasm/transform.rb +++ b/Source/JavaScriptCore/offlineasm/transform.rb @@ -404,6 +404,17 @@ end class Sequence def validate validateChildren + + # Further verify that this list contains only instructions, labels, and skips. + @list.each { + | node | + unless node.is_a? Instruction or + node.is_a? Label or + node.is_a? LocalLabel or + node.is_a? Skip + raise "Unexpected #{node.inspect} at #{node.codeOrigin}" + end + } end end @@ -412,6 +423,11 @@ class Immediate end end +class StringLiteral + def validate + end +end + class RegisterID def validate end @@ -446,6 +462,13 @@ class Instruction end end +class SubImmediates + def validate + raise "Invalid operand #{left.dump} to immediate subtraction" unless left.immediateOperand? + raise "Invalid operand #{right.dump} to immediate subtraction" unless right.immediateOperand? + end +end + class Error def validate end diff --git a/Source/JavaScriptCore/offlineasm/x86.rb b/Source/JavaScriptCore/offlineasm/x86.rb index ac433f36b..0da7a0240 100644 --- a/Source/JavaScriptCore/offlineasm/x86.rb +++ b/Source/JavaScriptCore/offlineasm/x86.rb @@ -1,5 +1,5 @@ -# Copyright (C) 2012 Apple Inc. All rights reserved. -# Copyright (C) 2015 The Qt Company Ltd +# Copyright (C) 2012, 2014-2016 Apple Inc. All rights reserved. +# Copyright (C) 2013 Digia Plc. and/or its subsidiary(-ies) # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions @@ -24,11 +24,86 @@ require "config" +# GPR conventions, to match the baseline JIT: +# +# +# On x86-32 bits (windows and non-windows) +# a0, a1, a2, a3 are only there for ease-of-use of offlineasm; they are not +# actually considered as such by the ABI and we need to push/pop our arguments +# on the stack. a0 and a1 are ecx and edx to follow fastcall. +# +# eax => t0, a2, r0 +# edx => t1, a1, r1 +# ecx => t2, a0 +# ebx => t3, a3 (callee-save) +# esi => t4 (callee-save) +# edi => t5 (callee-save) +# ebp => cfr +# esp => sp +# +# On x86-64 non-windows +# +# rax => t0, r0 +# rdi => a0 +# rsi => t1, a1 +# rdx => t2, a2, r1 +# rcx => t3, a3 +# r8 => t4 +# r10 => t5 +# rbx => csr0 (callee-save, PB, unused in baseline) +# r12 => csr1 (callee-save) +# r13 => csr2 (callee-save) +# r14 => csr3 (callee-save, tagTypeNumber) +# r15 => csr4 (callee-save, tagMask) +# rsp => sp +# rbp => cfr +# r11 => (scratch) +# +# On x86-64 windows +# Arguments need to be push/pop'd on the stack in addition to being stored in +# the registers. Also, >8 return types are returned in a weird way. +# +# rax => t0, r0 +# rcx => a0 +# rdx => t1, a1, r1 +# r8 => t2, a2 +# r9 => t3, a3 +# r10 => t4 +# rbx => csr0 (callee-save, PB, unused in baseline) +# rsi => csr1 (callee-save) +# rdi => csr2 (callee-save) +# r12 => csr3 (callee-save) +# r13 => csr4 (callee-save) +# r14 => csr5 (callee-save, tagTypeNumber) +# r15 => csr6 (callee-save, tagMask) +# rsp => sp +# rbp => cfr +# r11 => (scratch) + def isX64 case $activeBackend when "X86" false + when "X86_WIN" + false + when "X86_64" + true + when "X86_64_WIN" + true + else + raise "bad value for $activeBackend: #{$activeBackend}" + end +end + +def isWin + case $activeBackend + when "X86" + false + when "X86_WIN" + true when "X86_64" + false + when "X86_64_WIN" true else raise "bad value for $activeBackend: #{$activeBackend}" @@ -39,228 +114,222 @@ def useX87 case $activeBackend when "X86" true + when "X86_WIN" + true when "X86_64" false + when "X86_64_WIN" + false else raise "bad value for $activeBackend: #{$activeBackend}" end end +def isMSVC + $options.has_key?(:assembler) && $options[:assembler] == "MASM" +end + +def isIntelSyntax + $options.has_key?(:assembler) && $options[:assembler] == "MASM" +end + +def register(name) + isIntelSyntax ? name : "%" + name +end + +def offsetRegister(off, register) + isIntelSyntax ? "[#{off} + #{register}]" : "#{off}(#{register})" +end + +def callPrefix + isIntelSyntax ? "" : "*" +end + +def orderOperands(opA, opB) + isIntelSyntax ? "#{opB}, #{opA}" : "#{opA}, #{opB}" +end + +def const(c) + isIntelSyntax ? "#{c}" : "$#{c}" +end + +def getSizeString(kind) + if !isIntelSyntax + return "" + end + + size = "" + case kind + when :byte + size = "byte" + when :half + size = "word" + when :int + size = "dword" + when :ptr + size = isX64 ? "qword" : "dword" + when :double + size = "qword" + when :quad + size = "qword" + else + raise "Invalid kind #{kind}" + end + + return size + " " + "ptr" + " "; +end + class SpecialRegister < NoChildren def x86Operand(kind) raise unless @name =~ /^r/ raise unless isX64 case kind when :half - "%" + @name + "w" + register(@name + "w") when :int - "%" + @name + "d" + register(@name + "d") when :ptr - "%" + @name + register(@name) when :quad - "%" + @name + register(@name) else raise end end def x86CallOperand(kind) # Call operands are not allowed to be partial registers. - "*#{x86Operand(:quad)}" + "#{callPrefix}#{x86Operand(:quad)}" end end X64_SCRATCH_REGISTER = SpecialRegister.new("r11") +def x86GPRName(name, kind) + case name + when "eax", "ebx", "ecx", "edx" + name8 = name[1] + 'l' + name16 = name[1..2] + when "esi", "edi", "ebp", "esp" + name16 = name[1..2] + name8 = name16 + 'l' + when "rax", "rbx", "rcx", "rdx" + raise "bad GPR name #{name} in 32-bit X86" unless isX64 + name8 = name[1] + 'l' + name16 = name[1..2] + when "r8", "r9", "r10", "r12", "r13", "r14", "r15" + raise "bad GPR name #{name} in 32-bit X86" unless isX64 + case kind + when :half + return register(name + "w") + when :int + return register(name + "d") + when :ptr + return register(name) + when :quad + return register(name) + end + else + raise "bad GPR name #{name}" + end + case kind + when :byte + register(name8) + when :half + register(name16) + when :int + register("e" + name16) + when :ptr + register((isX64 ? "r" : "e") + name16) + when :quad + isX64 ? register("r" + name16) : raise + else + raise "invalid kind #{kind} for GPR #{name} in X86" + end +end + class RegisterID def supports8BitOnX86 - case name - when "t0", "a0", "r0", "t1", "a1", "r1", "t2", "t3" + case x86GPR + when "eax", "ebx", "ecx", "edx", "edi", "esi", "ebp", "esp" true - when "cfr", "ttnr", "tmr" + when "r8", "r9", "r10", "r12", "r13", "r14", "r15" false - when "t4", "t5" - isX64 else raise end end - - def x86Operand(kind) - case name - when "t0", "a0", "r0" - case kind - when :byte - "%al" - when :half - "%ax" - when :int - "%eax" - when :ptr - isX64 ? "%rax" : "%eax" - when :quad - isX64 ? "%rax" : raise - else - raise - end - when "t1", "a1", "r1" - case kind - when :byte - "%dl" - when :half - "%dx" - when :int - "%edx" - when :ptr - isX64 ? "%rdx" : "%edx" - when :quad - isX64 ? "%rdx" : raise - else - raise - end - when "t2" - case kind - when :byte - "%cl" - when :half - "%cx" - when :int - "%ecx" - when :ptr - isX64 ? "%rcx" : "%ecx" - when :quad - isX64 ? "%rcx" : raise - else - raise - end - when "t3" - case kind - when :byte - "%bl" - when :half - "%bx" - when :int - "%ebx" - when :ptr - isX64 ? "%rbx" : "%ebx" - when :quad - isX64 ? "%rbx" : raise - else - raise - end - when "t4" - case kind - when :byte - "%sil" - when :half - "%si" - when :int - "%esi" - when :ptr - isX64 ? "%rsi" : "%esi" - when :quad - isX64 ? "%rsi" : raise - else - raise - end - when "cfr" - if isX64 - case kind - when :half - "%r13w" - when :int - "%r13d" - when :ptr - "%r13" - when :quad - "%r13" - else - raise - end - else - case kind - when :byte - "%dil" - when :half - "%di" - when :int - "%edi" - when :ptr - "%edi" - else - raise - end - end - when "sp" - case kind - when :byte - "%spl" - when :half - "%sp" - when :int - "%esp" - when :ptr - isX64 ? "%rsp" : "%esp" - when :quad - isX64 ? "%rsp" : raise + + def x86GPR + if isX64 + case name + when "t0", "r0" + "eax" + when "r1" + "edx" # t1 = a1 when isWin, t2 = a2 otherwise + when "a0" + isWin ? "ecx" : "edi" + when "t1", "a1" + isWin ? "edx" : "esi" + when "t2", "a2" + isWin ? "r8" : "edx" + when "t3", "a3" + isWin ? "r9" : "ecx" + when "t4" + isWin ? "r10" : "r8" + when "t5" + raise "cannot use register #{name} on X86-64 Windows" unless not isWin + "r10" + when "csr0" + "ebx" + when "csr1" + isWin ? "esi" : "r12" + when "csr2" + isWin ? "edi" : "r13" + when "csr3" + isWin ? "r12" : "r14" + when "csr4" + isWin ? "r13" : "r15" + when "csr5" + raise "cannot use register #{name} on X86-64" unless isWin + "r14" + when "csr6" + raise "cannot use register #{name} on X86-64" unless isWin + "r15" + when "cfr" + "ebp" + when "sp" + "esp" else - raise - end - when "t5" - raise "Cannot use #{name} in 32-bit X86 at #{codeOriginString}" unless isX64 - case kind - when :byte - "%dil" - when :half - "%di" - when :int - "%edi" - when :ptr - "%rdi" - when :quad - "%rdi" - end - when "t6" - raise "Cannot use #{name} in 32-bit X86 at #{codeOriginString}" unless isX64 - case kind - when :half - "%r10w" - when :int - "%r10d" - when :ptr - "%r10" - when :quad - "%r10" - end - when "csr1" - raise "Cannot use #{name} in 32-bit X86 at #{codeOriginString}" unless isX64 - case kind - when :half - "%r14w" - when :int - "%r14d" - when :ptr - "%r14" - when :quad - "%r14" - end - when "csr2" - raise "Cannot use #{name} in 32-bit X86 at #{codeOriginString}" unless isX64 - case kind - when :half - "%r15w" - when :int - "%r15d" - when :ptr - "%r15" - when :quad - "%r15" + raise "cannot use register #{name} on X86" end else - raise "Bad register #{name} for X86 at #{codeOriginString}" + case name + when "t0", "r0", "a2" + "eax" + when "t1", "r1", "a1" + "edx" + when "t2", "a0" + "ecx" + when "t3", "a3" + "ebx" + when "t4" + "esi" + when "t5" + "edi" + when "cfr" + "ebp" + when "sp" + "esp" + end end end + + def x86Operand(kind) + x86GPRName(x86GPR, kind) + end + def x86CallOperand(kind) - isX64 ? "*#{x86Operand(:quad)}" : "*#{x86Operand(:ptr)}" + "#{callPrefix}#{x86Operand(:ptr)}" end end @@ -270,17 +339,17 @@ class FPRegisterID raise if useX87 case name when "ft0", "fa0", "fr" - "%xmm0" + register("xmm0") when "ft1", "fa1" - "%xmm1" + register("xmm1") when "ft2", "fa2" - "%xmm2" + register("xmm2") when "ft3", "fa3" - "%xmm3" + register("xmm3") when "ft4" - "%xmm4" + register("xmm4") when "ft5" - "%xmm5" + register("xmm5") else raise "Bad register #{name} for X86 at #{codeOriginString}" end @@ -300,10 +369,10 @@ class FPRegisterID def x87Operand(offset) raise unless useX87 raise unless offset == 0 or offset == 1 - "%st(#{x87DefaultStackPosition + offset})" + "#{register("st")}(#{x87DefaultStackPosition + offset})" end def x86CallOperand(kind) - "*#{x86Operand(kind)}" + "#{callPrefix}#{x86Operand(kind)}" end end @@ -316,7 +385,7 @@ class Immediate end end def x86Operand(kind) - "$#{value}" + "#{const(value)}" end def x86CallOperand(kind) "#{value}" @@ -329,13 +398,13 @@ class Address end def x86AddressOperand(addressKind) - "#{offset.value}(#{base.x86Operand(addressKind)})" + "#{offsetRegister(offset.value, base.x86Operand(addressKind))}" end def x86Operand(kind) - x86AddressOperand(:ptr) + "#{getSizeString(kind)}#{x86AddressOperand(:ptr)}" end def x86CallOperand(kind) - "*#{x86Operand(kind)}" + "#{callPrefix}#{x86Operand(kind)}" end end @@ -345,15 +414,23 @@ class BaseIndex end def x86AddressOperand(addressKind) - "#{offset.value}(#{base.x86Operand(addressKind)}, #{index.x86Operand(addressKind)}, #{scale})" + if !isIntelSyntax + "#{offset.value}(#{base.x86Operand(addressKind)}, #{index.x86Operand(addressKind)}, #{scale})" + else + "#{getSizeString(addressKind)}[#{offset.value} + #{base.x86Operand(addressKind)} + #{index.x86Operand(addressKind)} * #{scale}]" + end end def x86Operand(kind) - x86AddressOperand(:ptr) + if !isIntelSyntax + x86AddressOperand(:ptr) + else + "#{getSizeString(kind)}[#{offset.value} + #{base.x86Operand(:ptr)} + #{index.x86Operand(:ptr)} * #{scale}]" + end end def x86CallOperand(kind) - "*#{x86Operand(kind)}" + "#{callPrefix}#{x86Operand(kind)}" end end @@ -371,7 +448,7 @@ class AbsoluteAddress end def x86CallOperand(kind) - "*#{address.value}" + "#{callPrefix}#{address.value}" end end @@ -382,6 +459,9 @@ class LabelReference end class LocalLabelReference + def x86Operand(kind) + asmLabel + end def x86CallOperand(kind) asmLabel end @@ -426,20 +506,29 @@ class Sequence return newList end + def getModifiedListX86_64_WIN + getModifiedListX86_64 + end end class Instruction + def x86Operands(*kinds) raise unless kinds.size == operands.size result = [] kinds.size.times { | idx | - result << operands[idx].x86Operand(kinds[idx]) + i = isIntelSyntax ? (kinds.size - idx - 1) : idx + result << operands[i].x86Operand(kinds[i]) } result.join(", ") end def x86Suffix(kind) + if isIntelSyntax + return "" + end + case kind when :byte "b" @@ -476,19 +565,23 @@ class Instruction raise end end + + def getImplicitOperandString + isIntelSyntax ? "st(0), " : "" + end def handleX86OpWithNumOperands(opcode, kind, numOperands) if numOperands == 3 if operands[0] == operands[2] - $asm.puts "#{opcode} #{operands[1].x86Operand(kind)}, #{operands[2].x86Operand(kind)}" + $asm.puts "#{opcode} #{orderOperands(operands[1].x86Operand(kind), operands[2].x86Operand(kind))}" elsif operands[1] == operands[2] - $asm.puts "#{opcode} #{operands[0].x86Operand(kind)}, #{operands[2].x86Operand(kind)}" + $asm.puts "#{opcode} #{orderOperands(operands[0].x86Operand(kind), operands[2].x86Operand(kind))}" else - $asm.puts "mov#{x86Suffix(kind)} #{operands[0].x86Operand(kind)}, #{operands[2].x86Operand(kind)}" - $asm.puts "#{opcode} #{operands[1].x86Operand(kind)}, #{operands[2].x86Operand(kind)}" + $asm.puts "mov#{x86Suffix(kind)} #{orderOperands(operands[0].x86Operand(kind), operands[2].x86Operand(kind))}" + $asm.puts "#{opcode} #{orderOperands(operands[1].x86Operand(kind), operands[2].x86Operand(kind))}" end else - $asm.puts "#{opcode} #{operands[0].x86Operand(kind)}, #{operands[1].x86Operand(kind)}" + $asm.puts "#{opcode} #{orderOperands(operands[0].x86Operand(kind), operands[1].x86Operand(kind))}" end end @@ -497,13 +590,12 @@ class Instruction end def handleX86Shift(opcode, kind) - if operands[0].is_a? Immediate or operands[0] == RegisterID.forName(nil, "t2") - $asm.puts "#{opcode} #{operands[0].x86Operand(:byte)}, #{operands[1].x86Operand(kind)}" + if operands[0].is_a? Immediate or operands[0].x86GPR == "ecx" + $asm.puts "#{opcode} #{orderOperands(operands[0].x86Operand(:byte), operands[1].x86Operand(kind))}" else - cx = RegisterID.forName(nil, "t2") - $asm.puts "xchg#{x86Suffix(:ptr)} #{operands[0].x86Operand(:ptr)}, #{cx.x86Operand(:ptr)}" - $asm.puts "#{opcode} %cl, #{operands[1].x86Operand(kind)}" - $asm.puts "xchg#{x86Suffix(:ptr)} #{operands[0].x86Operand(:ptr)}, #{cx.x86Operand(:ptr)}" + $asm.puts "xchg#{x86Suffix(:ptr)} #{operands[0].x86Operand(:ptr)}, #{x86GPRName("ecx", :ptr)}" + $asm.puts "#{opcode} #{orderOperands(register("cl"), operands[1].x86Operand(kind))}" + $asm.puts "xchg#{x86Suffix(:ptr)} #{operands[0].x86Operand(:ptr)}, #{x86GPRName("ecx", :ptr)}" end end @@ -513,9 +605,9 @@ class Instruction else case mode when :normal - $asm.puts "ucomisd #{operands[1].x86Operand(:double)}, #{operands[0].x86Operand(:double)}" + $asm.puts "ucomisd #{orderOperands(operands[1].x86Operand(:double), operands[0].x86Operand(:double))}" when :reverse - $asm.puts "ucomisd #{operands[0].x86Operand(:double)}, #{operands[1].x86Operand(:double)}" + $asm.puts "ucomisd #{orderOperands(operands[0].x86Operand(:double), operands[1].x86Operand(:double))}" else raise mode.inspect end @@ -525,11 +617,11 @@ class Instruction def handleX86IntCompare(opcodeSuffix, kind) if operands[0].is_a? Immediate and operands[0].value == 0 and operands[1].is_a? RegisterID and (opcodeSuffix == "e" or opcodeSuffix == "ne") - $asm.puts "test#{x86Suffix(kind)} #{operands[1].x86Operand(kind)}" + $asm.puts "test#{x86Suffix(kind)} #{orderOperands(operands[1].x86Operand(kind), operands[1].x86Operand(kind))}" elsif operands[1].is_a? Immediate and operands[1].value == 0 and operands[0].is_a? RegisterID and (opcodeSuffix == "e" or opcodeSuffix == "ne") - $asm.puts "test#{x86Suffix(kind)} #{operands[0].x86Operand(kind)}" + $asm.puts "test#{x86Suffix(kind)} #{orderOperands(operands[0].x86Operand(kind), operands[0].x86Operand(kind))}" else - $asm.puts "cmp#{x86Suffix(kind)} #{operands[1].x86Operand(kind)}, #{operands[0].x86Operand(kind)}" + $asm.puts "cmp#{x86Suffix(kind)} #{orderOperands(operands[1].x86Operand(kind), operands[0].x86Operand(kind))}" end end @@ -541,12 +633,20 @@ class Instruction def handleX86Set(setOpcode, operand) if operand.supports8BitOnX86 $asm.puts "#{setOpcode} #{operand.x86Operand(:byte)}" - $asm.puts "movzbl #{operand.x86Operand(:byte)}, #{operand.x86Operand(:int)}" + if !isIntelSyntax + $asm.puts "movzbl #{orderOperands(operand.x86Operand(:byte), operand.x86Operand(:int))}" + else + $asm.puts "movzx #{orderOperands(operand.x86Operand(:byte), operand.x86Operand(:int))}" + end else - ax = RegisterID.new(nil, "t0") + ax = RegisterID.new(nil, "r0") $asm.puts "xchg#{x86Suffix(:ptr)} #{operand.x86Operand(:ptr)}, #{ax.x86Operand(:ptr)}" - $asm.puts "#{setOpcode} %al" - $asm.puts "movzbl %al, %eax" + $asm.puts "#{setOpcode} #{ax.x86Operand(:byte)}" + if !isIntelSyntax + $asm.puts "movzbl #{ax.x86Operand(:byte)}, #{ax.x86Operand(:int)}" + else + $asm.puts "movzx #{ax.x86Operand(:int)}, #{ax.x86Operand(:byte)}" + end $asm.puts "xchg#{x86Suffix(:ptr)} #{operand.x86Operand(:ptr)}, #{ax.x86Operand(:ptr)}" end end @@ -571,10 +671,10 @@ class Instruction if value.is_a? RegisterID $asm.puts "test#{x86Suffix(kind)} #{value.x86Operand(kind)}, #{value.x86Operand(kind)}" else - $asm.puts "cmp#{x86Suffix(kind)} $0, #{value.x86Operand(kind)}" + $asm.puts "cmp#{x86Suffix(kind)} #{orderOperands(const(0), value.x86Operand(kind))}" end else - $asm.puts "test#{x86Suffix(kind)} #{mask.x86Operand(kind)}, #{value.x86Operand(kind)}" + $asm.puts "test#{x86Suffix(kind)} #{orderOperands(mask.x86Operand(kind), value.x86Operand(kind))}" end end @@ -604,7 +704,7 @@ class Instruction def handleX86SubBranch(branchOpcode, kind) if operands.size == 4 and operands[1] == operands[2] $asm.puts "neg#{x86Suffix(kind)} #{operands[2].x86Operand(kind)}" - $asm.puts "add#{x86Suffix(kind)} #{operands[0].x86Operand(kind)}, #{operands[2].x86Operand(kind)}" + $asm.puts "add#{x86Suffix(kind)} #{orderOperands(operands[0].x86Operand(kind), operands[2].x86Operand(kind))}" else handleX86OpWithNumOperands("sub#{x86Suffix(kind)}", kind, operands.size - 1) end @@ -622,25 +722,29 @@ class Instruction def handleX86Add(kind) if operands.size == 3 and operands[1] == operands[2] unless Immediate.new(nil, 0) == operands[0] - $asm.puts "add#{x86Suffix(kind)} #{operands[0].x86Operand(kind)}, #{operands[2].x86Operand(kind)}" + $asm.puts "add#{x86Suffix(kind)} #{orderOperands(operands[0].x86Operand(kind), operands[2].x86Operand(kind))}" end elsif operands.size == 3 and operands[0].is_a? Immediate raise unless operands[1].is_a? RegisterID raise unless operands[2].is_a? RegisterID if operands[0].value == 0 unless operands[1] == operands[2] - $asm.puts "mov#{x86Suffix(kind)} #{operands[1].x86Operand(kind)}, #{operands[2].x86Operand(kind)}" + $asm.puts "mov#{x86Suffix(kind)} #{orderOperands(operands[1].x86Operand(kind), operands[2].x86Operand(kind))}" end else - $asm.puts "lea#{x86Suffix(kind)} #{operands[0].value}(#{operands[1].x86Operand(kind)}), #{operands[2].x86Operand(kind)}" + $asm.puts "lea#{x86Suffix(kind)} #{orderOperands(offsetRegister(operands[0].value, operands[1].x86Operand(kind)), operands[2].x86Operand(kind))}" end elsif operands.size == 3 and operands[0].is_a? RegisterID raise unless operands[1].is_a? RegisterID raise unless operands[2].is_a? RegisterID if operands[0] == operands[2] - $asm.puts "add#{x86Suffix(kind)} #{operands[1].x86Operand(kind)}, #{operands[2].x86Operand(kind)}" + $asm.puts "add#{x86Suffix(kind)} #{orderOperands(operands[1].x86Operand(kind), operands[2].x86Operand(kind))}" else - $asm.puts "lea#{x86Suffix(kind)} (#{operands[0].x86Operand(kind)}, #{operands[1].x86Operand(kind)}), #{operands[2].x86Operand(kind)}" + if !isIntelSyntax + $asm.puts "lea#{x86Suffix(kind)} (#{operands[0].x86Operand(kind)}, #{operands[1].x86Operand(kind)}), #{operands[2].x86Operand(kind)}" + else + $asm.puts "lea#{x86Suffix(kind)} #{operands[2].x86Operand(kind)}, [#{operands[0].x86Operand(kind)} + #{operands[1].x86Operand(kind)}]" + end end else unless Immediate.new(nil, 0) == operands[0] @@ -652,7 +756,7 @@ class Instruction def handleX86Sub(kind) if operands.size == 3 and operands[1] == operands[2] $asm.puts "neg#{x86Suffix(kind)} #{operands[2].x86Operand(kind)}" - $asm.puts "add#{x86Suffix(kind)} #{operands[0].x86Operand(kind)}, #{operands[2].x86Operand(kind)}" + $asm.puts "add#{x86Suffix(kind)} #{orderOperands(operands[0].x86Operand(kind), operands[2].x86Operand(kind))}" else handleX86Op("sub#{x86Suffix(kind)}", kind) end @@ -668,6 +772,20 @@ class Instruction end end + def handleX86Peek() + sp = RegisterID.new(nil, "sp") + opA = offsetRegister(operands[0].value * x86Bytes(:ptr), sp.x86Operand(:ptr)) + opB = operands[1].x86Operand(:ptr) + $asm.puts "mov#{x86Suffix(:ptr)} #{orderOperands(opA, opB)}" + end + + def handleX86Poke() + sp = RegisterID.new(nil, "sp") + opA = operands[0].x86Operand(:ptr) + opB = offsetRegister(operands[1].value * x86Bytes(:ptr), sp.x86Operand(:ptr)) + $asm.puts "mov#{x86Suffix(:ptr)} #{orderOperands(opA, opB)}" + end + def handleMove if Immediate.new(nil, 0) == operands[0] and operands[1].is_a? RegisterID if isX64 @@ -683,22 +801,23 @@ class Instruction end end end - + def handleX87Compare(mode) + floatingPointCompareImplicitOperand = getImplicitOperandString case mode when :normal if (operands[0].x87DefaultStackPosition == 0) - $asm.puts "fucomi #{operands[1].x87Operand(0)}" + $asm.puts "fucomi #{floatingPointCompareImplicitOperand}#{operands[1].x87Operand(0)}" else $asm.puts "fld #{operands[0].x87Operand(0)}" - $asm.puts "fucomip #{operands[1].x87Operand(1)}" + $asm.puts "fucomip #{floatingPointCompareImplicitOperand}#{operands[1].x87Operand(1)}" end when :reverse if (operands[1].x87DefaultStackPosition == 0) - $asm.puts "fucomi #{operands[0].x87Operand(0)}" + $asm.puts "fucomi #{floatingPointCompareImplicitOperand}#{operands[0].x87Operand(0)}" else $asm.puts "fld #{operands[1].x87Operand(0)}" - $asm.puts "fucomip #{operands[0].x87Operand(1)}" + $asm.puts "fucomip #{floatingPointCompareImplicitOperand}#{operands[0].x87Operand(1)}" end else raise mode.inspect @@ -707,12 +826,16 @@ class Instruction def handleX87BinOp(opcode, opcodereverse) if (operands[1].x87DefaultStackPosition == 0) - $asm.puts "#{opcode} #{operands[0].x87Operand(0)}, %st" + $asm.puts "#{opcode} #{orderOperands(operands[0].x87Operand(0), register("st"))}" elsif (operands[0].x87DefaultStackPosition == 0) - $asm.puts "#{opcodereverse} %st, #{operands[1].x87Operand(0)}" + if !isIntelSyntax + $asm.puts "#{opcodereverse} #{register("st")}, #{operands[1].x87Operand(0)}" + else + $asm.puts "#{opcode} #{operands[1].x87Operand(0)}, #{register("st")}" + end else $asm.puts "fld #{operands[0].x87Operand(0)}" - $asm.puts "#{opcodereverse}p %st, #{operands[1].x87Operand(1)}" + $asm.puts "#{opcodereverse}p #{orderOperands(register("st"), operands[1].x87Operand(1))}" end end @@ -720,15 +843,26 @@ class Instruction raise unless $activeBackend == "X86" lowerX86Common end + + def lowerX86_WIN + raise unless $activeBackend == "X86_WIN" + lowerX86Common + end def lowerX86_64 raise unless $activeBackend == "X86_64" lowerX86Common end - + + def lowerX86_64_WIN + raise unless $activeBackend == "X86_64_WIN" + lowerX86Common + end + def lowerX86Common $asm.codeOrigin codeOriginString if $enableCodeOriginComments $asm.annotation annotation if $enableInstrAnnotations + $asm.debugAnnotation codeOrigin.debugDirective if $enableDebugAnnotations case opcode when "addi" @@ -738,13 +872,13 @@ class Instruction when "addq" handleX86Add(:quad) when "andi" - handleX86Op("andl", :int) + handleX86Op("and#{x86Suffix(:int)}", :int) when "andp" handleX86Op("and#{x86Suffix(:ptr)}", :ptr) when "andq" handleX86Op("and#{x86Suffix(:quad)}", :quad) when "lshifti" - handleX86Shift("sall", :int) + handleX86Shift("sal#{x86Suffix(:int)}", :int) when "lshiftp" handleX86Shift("sal#{x86Suffix(:ptr)}", :ptr) when "lshiftq" @@ -756,27 +890,27 @@ class Instruction when "mulq" handleX86Mul(:quad) when "negi" - $asm.puts "negl #{x86Operands(:int)}" + $asm.puts "neg#{x86Suffix(:int)} #{x86Operands(:int)}" when "negp" $asm.puts "neg#{x86Suffix(:ptr)} #{x86Operands(:ptr)}" when "negq" $asm.puts "neg#{x86Suffix(:quad)} #{x86Operands(:quad)}" when "noti" - $asm.puts "notl #{x86Operands(:int)}" + $asm.puts "not#{x86Suffix(:int)} #{x86Operands(:int)}" when "ori" - handleX86Op("orl", :int) + handleX86Op("or#{x86Suffix(:int)}", :int) when "orp" handleX86Op("or#{x86Suffix(:ptr)}", :ptr) when "orq" handleX86Op("or#{x86Suffix(:quad)}", :quad) when "rshifti" - handleX86Shift("sarl", :int) + handleX86Shift("sar#{x86Suffix(:int)}", :int) when "rshiftp" handleX86Shift("sar#{x86Suffix(:ptr)}", :ptr) when "rshiftq" handleX86Shift("sar#{x86Suffix(:quad)}", :quad) when "urshifti" - handleX86Shift("shrl", :int) + handleX86Shift("shr#{x86Suffix(:int)}", :int) when "urshiftp" handleX86Shift("shr#{x86Suffix(:ptr)}", :ptr) when "urshiftq" @@ -788,36 +922,52 @@ class Instruction when "subq" handleX86Sub(:quad) when "xori" - handleX86Op("xorl", :int) + handleX86Op("xor#{x86Suffix(:int)}", :int) when "xorp" handleX86Op("xor#{x86Suffix(:ptr)}", :ptr) when "xorq" handleX86Op("xor#{x86Suffix(:quad)}", :quad) when "loadi", "storei" - $asm.puts "movl #{x86Operands(:int, :int)}" + $asm.puts "mov#{x86Suffix(:int)} #{x86Operands(:int, :int)}" when "loadis" if isX64 - $asm.puts "movslq #{x86Operands(:int, :quad)}" + if !isIntelSyntax + $asm.puts "movslq #{x86Operands(:int, :quad)}" + else + $asm.puts "movsxd #{x86Operands(:int, :quad)}" + end else - $asm.puts "movl #{x86Operands(:int, :int)}" + $asm.puts "mov#{x86Suffix(:int)} #{x86Operands(:int, :int)}" end when "loadp", "storep" $asm.puts "mov#{x86Suffix(:ptr)} #{x86Operands(:ptr, :ptr)}" when "loadq", "storeq" $asm.puts "mov#{x86Suffix(:quad)} #{x86Operands(:quad, :quad)}" when "loadb" - $asm.puts "movzbl #{operands[0].x86Operand(:byte)}, #{operands[1].x86Operand(:int)}" + if !isIntelSyntax + $asm.puts "movzbl #{orderOperands(operands[0].x86Operand(:byte), operands[1].x86Operand(:int))}" + else + $asm.puts "movzx #{orderOperands(operands[0].x86Operand(:byte), operands[1].x86Operand(:int))}" + end when "loadbs" $asm.puts "movsbl #{operands[0].x86Operand(:byte)}, #{operands[1].x86Operand(:int)}" when "loadh" - $asm.puts "movzwl #{operands[0].x86Operand(:half)}, #{operands[1].x86Operand(:int)}" + if !isIntelSyntax + $asm.puts "movzwl #{orderOperands(operands[0].x86Operand(:half), operands[1].x86Operand(:int))}" + else + $asm.puts "movzx #{orderOperands(operands[0].x86Operand(:half), operands[1].x86Operand(:int))}" + end when "loadhs" $asm.puts "movswl #{operands[0].x86Operand(:half)}, #{operands[1].x86Operand(:int)}" when "storeb" - $asm.puts "movb #{x86Operands(:byte, :byte)}" + $asm.puts "mov#{x86Suffix(:byte)} #{x86Operands(:byte, :byte)}" when "loadd" if useX87 - $asm.puts "fldl #{operands[0].x86Operand(:double)}" + if !isIntelSyntax + $asm.puts "fldl #{operands[0].x86Operand(:double)}" + else + $asm.puts "fld #{operands[0].x86Operand(:double)}" + end $asm.puts "fstp #{operands[1].x87Operand(1)}" else $asm.puts "movsd #{x86Operands(:double, :double)}" @@ -836,10 +986,14 @@ class Instruction when "stored" if useX87 if (operands[0].x87DefaultStackPosition == 0) - $asm.puts "fstl #{operands[1].x86Operand(:double)}" + $asm.puts "fst#{x86Suffix(:int)} #{operands[1].x86Operand(:double)}" else $asm.puts "fld #{operands[0].x87Operand(0)}" - $asm.puts "fstpl #{operands[1].x86Operand(:double)}" + if !isIntelSyntax + $asm.puts "fstpl #{operands[1].x86Operand(:double)}" + else + $asm.puts "fstp #{operands[1].x86Operand(:double)}" + end end else $asm.puts "movsd #{x86Operands(:double, :double)}" @@ -879,17 +1033,17 @@ class Instruction when "ci2d" if useX87 sp = RegisterID.new(nil, "sp") - $asm.puts "movl #{operands[0].x86Operand(:int)}, -4(#{sp.x86Operand(:ptr)})" - $asm.puts "fildl -4(#{sp.x86Operand(:ptr)})" + $asm.puts "mov#{x86Suffix(:int)} #{orderOperands(operands[0].x86Operand(:int), offsetRegister(-4, sp.x86Operand(:ptr)))}" + $asm.puts "fild#{x86Suffix(:ptr)} #{getSizeString(:ptr)}#{offsetRegister(-4, sp.x86Operand(:ptr))}" $asm.puts "fstp #{operands[1].x87Operand(1)}" else - $asm.puts "cvtsi2sd #{operands[0].x86Operand(:int)}, #{operands[1].x86Operand(:double)}" + $asm.puts "cvtsi2sd #{orderOperands(operands[0].x86Operand(:int), operands[1].x86Operand(:double))}" end when "bdeq" if useX87 handleX87Compare(:normal) else - $asm.puts "ucomisd #{operands[0].x86Operand(:double)}, #{operands[1].x86Operand(:double)}" + $asm.puts "ucomisd #{orderOperands(operands[0].x86Operand(:double), operands[1].x86Operand(:double))}" end if operands[0] == operands[1] # This is just a jump ordered, which is a jnp. @@ -916,7 +1070,7 @@ class Instruction if useX87 handleX87Compare(:normal) else - $asm.puts "ucomisd #{operands[0].x86Operand(:double)}, #{operands[1].x86Operand(:double)}" + $asm.puts "ucomisd #{orderOperands(operands[0].x86Operand(:double), operands[1].x86Operand(:double))}" end if operands[0] == operands[1] # This is just a jump unordered, which is a jp. @@ -950,23 +1104,24 @@ class Instruction $asm.puts "cvttsd2si #{operands[0].x86Operand(:double)}, #{operands[1].x86Operand(:int)}" when "bcd2i" if useX87 + floatingPointCompareImplicitOperand = getImplicitOperandString sp = RegisterID.new(nil, "sp") if (operands[0].x87DefaultStackPosition == 0) $asm.puts "fistl -4(#{sp.x86Operand(:ptr)})" else $asm.puts "fld #{operands[0].x87Operand(0)}" - $asm.puts "fistpl -4(#{sp.x86Operand(:ptr)})" + $asm.puts "fistp#{x86Suffix(:ptr)} #{getSizeString(:ptr)}#{offsetRegister(-4, sp.x86Operand(:ptr))}" end - $asm.puts "movl -4(#{sp.x86Operand(:ptr)}), #{operands[1].x86Operand(:int)}" - $asm.puts "testl #{operands[1].x86Operand(:int)}, #{operands[1].x86Operand(:int)}" + $asm.puts "mov#{x86Suffix(:int)} #{orderOperands(offsetRegister(-4, sp.x86Operand(:ptr)), operands[1].x86Operand(:int))}" + $asm.puts "test#{x86Suffix(:int)} #{operands[1].x86Operand(:int)}, #{operands[1].x86Operand(:int)}" $asm.puts "je #{operands[2].asmLabel}" - $asm.puts "fildl -4(#{sp.x86Operand(:ptr)})" - $asm.puts "fucomip #{operands[0].x87Operand(1)}" + $asm.puts "fild#{x86Suffix(:int)} #{getSizeString(:int)}#{offsetRegister(-4, sp.x86Operand(:ptr))}" + $asm.puts "fucomip #{floatingPointCompareImplicitOperand}#{operands[0].x87Operand(1)}" $asm.puts "jp #{operands[2].asmLabel}" $asm.puts "jne #{operands[2].asmLabel}" else $asm.puts "cvttsd2si #{operands[0].x86Operand(:double)}, #{operands[1].x86Operand(:int)}" - $asm.puts "testl #{operands[1].x86Operand(:int)}, #{operands[1].x86Operand(:int)}" + $asm.puts "test#{x86Suffix(:int)} #{operands[1].x86Operand(:int)}, #{operands[1].x86Operand(:int)}" $asm.puts "je #{operands[2].asmLabel}" $asm.puts "cvtsi2sd #{operands[1].x86Operand(:int)}, %xmm7" $asm.puts "ucomisd #{operands[0].x86Operand(:double)}, %xmm7" @@ -981,15 +1136,25 @@ class Instruction $asm.puts "xorpd #{operands[0].x86Operand(:double)}, #{operands[0].x86Operand(:double)}" end when "pop" - $asm.puts "pop #{operands[0].x86Operand(:ptr)}" + operands.each { + | op | + $asm.puts "pop #{op.x86Operand(:ptr)}" + } when "push" - $asm.puts "push #{operands[0].x86Operand(:ptr)}" + operands.each { + | op | + $asm.puts "push #{op.x86Operand(:ptr)}" + } when "move" handleMove when "sxi2q" - $asm.puts "movslq #{operands[0].x86Operand(:int)}, #{operands[1].x86Operand(:quad)}" + if !isIntelSyntax + $asm.puts "movslq #{operands[0].x86Operand(:int)}, #{operands[1].x86Operand(:quad)}" + else + $asm.puts "movsxd #{orderOperands(operands[0].x86Operand(:int), operands[1].x86Operand(:quad))}" + end when "zxi2q" - $asm.puts "movl #{operands[0].x86Operand(:int)}, #{operands[1].x86Operand(:int)}" + $asm.puts "mov#{x86Suffix(:int)} #{orderOperands(operands[0].x86Operand(:int), operands[1].x86Operand(:int))}" when "nop" $asm.puts "nop" when "bieq" @@ -1099,25 +1264,25 @@ class Instruction when "jmp" $asm.puts "jmp #{operands[0].x86CallOperand(:ptr)}" when "baddio" - handleX86OpBranch("addl", "jo", :int) + handleX86OpBranch("add#{x86Suffix(:int)}", "jo", :int) when "baddpo" handleX86OpBranch("add#{x86Suffix(:ptr)}", "jo", :ptr) when "baddqo" handleX86OpBranch("add#{x86Suffix(:quad)}", "jo", :quad) when "baddis" - handleX86OpBranch("addl", "js", :int) + handleX86OpBranch("add#{x86Suffix(:int)}", "js", :int) when "baddps" handleX86OpBranch("add#{x86Suffix(:ptr)}", "js", :ptr) when "baddqs" handleX86OpBranch("add#{x86Suffix(:quad)}", "js", :quad) when "baddiz" - handleX86OpBranch("addl", "jz", :int) + handleX86OpBranch("add#{x86Suffix(:int)}", "jz", :int) when "baddpz" handleX86OpBranch("add#{x86Suffix(:ptr)}", "jz", :ptr) when "baddqz" handleX86OpBranch("add#{x86Suffix(:quad)}", "jz", :quad) when "baddinz" - handleX86OpBranch("addl", "jnz", :int) + handleX86OpBranch("add#{x86Suffix(:int)}", "jnz", :int) when "baddpnz" handleX86OpBranch("add#{x86Suffix(:ptr)}", "jnz", :ptr) when "baddqnz" @@ -1131,13 +1296,13 @@ class Instruction when "bsubinz" handleX86SubBranch("jnz", :int) when "bmulio" - handleX86OpBranch("imull", "jo", :int) + handleX86OpBranch("imul#{x86Suffix(:int)}", "jo", :int) when "bmulis" - handleX86OpBranch("imull", "js", :int) + handleX86OpBranch("imul#{x86Suffix(:int)}", "js", :int) when "bmuliz" - handleX86OpBranch("imull", "jz", :int) + handleX86OpBranch("imul#{x86Suffix(:int)}", "jz", :int) when "bmulinz" - handleX86OpBranch("imull", "jnz", :int) + handleX86OpBranch("imul#{x86Suffix(:int)}", "jnz", :int) when "borio" handleX86OpBranch("orl", "jo", :int) when "boris" @@ -1147,15 +1312,19 @@ class Instruction when "borinz" handleX86OpBranch("orl", "jnz", :int) when "break" - $asm.puts "int $3" + $asm.puts "int #{const(3)}" when "call" if useX87 2.times { | offset | - $asm.puts "ffree %st(#{offset})" + $asm.puts "ffree #{register("st")}(#{offset})" } end - $asm.puts "call #{operands[0].x86CallOperand(:ptr)}" + op = operands[0].x86CallOperand(:ptr) + if operands[0].is_a? LabelReference + operands[0].used + end + $asm.puts "call #{op}" when "ret" $asm.puts "ret" when "cieq" @@ -1263,27 +1432,19 @@ class Instruction when "tbnz" handleX86SetTest("setnz", :byte) when "peek" - sp = RegisterID.new(nil, "sp") - $asm.puts "mov#{x86Suffix(:ptr)} #{operands[0].value * x86Bytes(:ptr)}(#{sp.x86Operand(:ptr)}), #{operands[1].x86Operand(:ptr)}" - when "peekq" - sp = RegisterID.new(nil, "sp") - $asm.puts "mov#{x86Suffix(:quad)} #{operands[0].value * x86Bytes(:quad)}(#{sp.x86Operand(:ptr)}), #{operands[1].x86Operand(:quad)}" + handleX86Peek() when "poke" - sp = RegisterID.new(nil, "sp") - $asm.puts "mov#{x86Suffix(:ptr)} #{operands[0].x86Operand(:ptr)}, #{operands[1].value * x86Bytes(:ptr)}(#{sp.x86Operand(:ptr)})" - when "pokeq" - sp = RegisterID.new(nil, "sp") - $asm.puts "mov#{x86Suffix(:quad)} #{operands[0].x86Operand(:quad)}, #{operands[1].value * x86Bytes(:quad)}(#{sp.x86Operand(:ptr)})" + handleX86Poke() when "cdqi" $asm.puts "cdq" when "idivi" - $asm.puts "idivl #{operands[0].x86Operand(:int)}" + $asm.puts "idiv#{x86Suffix(:int)} #{operands[0].x86Operand(:int)}" when "fii2d" if useX87 sp = RegisterID.new(nil, "sp") - $asm.puts "movl #{operands[0].x86Operand(:int)}, -8(#{sp.x86Operand(:ptr)})" - $asm.puts "movl #{operands[1].x86Operand(:int)}, -4(#{sp.x86Operand(:ptr)})" - $asm.puts "fldl -8(#{sp.x86Operand(:ptr)})" + $asm.puts "mov#{x86Suffix(:int)} #{orderOperands(operands[0].x86Operand(:int), offsetRegister(-8, sp.x86Operand(:ptr)))}" + $asm.puts "mov#{x86Suffix(:int)} #{orderOperands(operands[1].x86Operand(:int), offsetRegister(-4, sp.x86Operand(:ptr)))}" + $asm.puts "fld#{x86Suffix(:ptr)} #{getSizeString(:double)}#{offsetRegister(-8, sp.x86Operand(:ptr))}" $asm.puts "fstp #{operands[2].x87Operand(1)}" else $asm.puts "movd #{operands[0].x86Operand(:int)}, #{operands[2].x86Operand(:double)}" @@ -1295,13 +1456,13 @@ class Instruction if useX87 sp = RegisterID.new(nil, "sp") if (operands[0].x87DefaultStackPosition == 0) - $asm.puts "fstl -8(#{sp.x86Operand(:ptr)})" + $asm.puts "fst#{x86Suffix(:ptr)} #{getSizeString(:double)}#{offsetRegister(-8, sp.x86Operand(:ptr))}" else $asm.puts "fld #{operands[0].x87Operand(0)}" $asm.puts "fstpl -8(#{sp.x86Operand(:ptr)})" end - $asm.puts "movl -8(#{sp.x86Operand(:ptr)}), #{operands[1].x86Operand(:int)}" - $asm.puts "movl -4(#{sp.x86Operand(:ptr)}), #{operands[2].x86Operand(:int)}" + $asm.puts "mov#{x86Suffix(:int)} #{orderOperands(offsetRegister(-8, sp.x86Operand(:ptr)), operands[1].x86Operand(:int))}" + $asm.puts "mov#{x86Suffix(:int)} #{orderOperands(offsetRegister(-4, sp.x86Operand(:ptr)), operands[2].x86Operand(:int))}" else $asm.puts "movd #{operands[0].x86Operand(:double)}, #{operands[1].x86Operand(:int)}" $asm.puts "movsd #{operands[0].x86Operand(:double)}, %xmm7" @@ -1315,20 +1476,32 @@ class Instruction $asm.puts "fldl -8(#{sp.x86Operand(:ptr)})" $asm.puts "fstp #{operands[1].x87Operand(1)}" else - $asm.puts "movq #{operands[0].x86Operand(:quad)}, #{operands[1].x86Operand(:double)}" + if !isIntelSyntax + $asm.puts "movq #{operands[0].x86Operand(:quad)}, #{operands[1].x86Operand(:double)}" + else + # MASM does not accept register operands with movq. + # Debugging shows that movd actually moves a qword when using MASM. + $asm.puts "movd #{operands[1].x86Operand(:double)}, #{operands[0].x86Operand(:quad)}" + end end when "fd2q" if useX87 sp = RegisterID.new(nil, "sp") if (operands[0].x87DefaultStackPosition == 0) - $asm.puts "fstl -8(#{sp.x86Operand(:ptr)})" + $asm.puts "fst#{x86Suffix(:int)} #{getSizeString(:int)}#{offsetRegister(-8, sp.x86Operand(:ptr))}" else $asm.puts "fld #{operands[0].x87Operand(0)}" $asm.puts "fstpl -8(#{sp.x86Operand(:ptr)})" end $asm.puts "movq -8(#{sp.x86Operand(:ptr)}), #{operands[1].x86Operand(:quad)}" else - $asm.puts "movq #{operands[0].x86Operand(:double)}, #{operands[1].x86Operand(:quad)}" + if !isIntelSyntax + $asm.puts "movq #{operands[0].x86Operand(:double)}, #{operands[1].x86Operand(:quad)}" + else + # MASM does not accept register operands with movq. + # Debugging shows that movd actually moves a qword when using MASM. + $asm.puts "movd #{operands[1].x86Operand(:quad)}, #{operands[0].x86Operand(:double)}" + end end when "bo" $asm.puts "jo #{operands[0].asmLabel}" @@ -1339,9 +1512,11 @@ class Instruction when "bnz" $asm.puts "jnz #{operands[0].asmLabel}" when "leai" - $asm.puts "leal #{operands[0].x86AddressOperand(:int)}, #{operands[1].x86Operand(:int)}" + $asm.puts "lea#{x86Suffix(:int)} #{orderOperands(operands[0].x86AddressOperand(:int), operands[1].x86Operand(:int))}" when "leap" - $asm.puts "lea#{x86Suffix(:ptr)} #{operands[0].x86AddressOperand(:ptr)}, #{operands[1].x86Operand(:ptr)}" + $asm.puts "lea#{x86Suffix(:ptr)} #{orderOperands(operands[0].x86AddressOperand(:ptr), operands[1].x86Operand(:ptr))}" + when "memfence" + $asm.puts "mfence" else lowerDefault end |