diff options
author | Lorry Tar Creator <lorry-tar-importer@lorry> | 2016-04-10 09:28:39 +0000 |
---|---|---|
committer | Lorry Tar Creator <lorry-tar-importer@lorry> | 2016-04-10 09:28:39 +0000 |
commit | 32761a6cee1d0dee366b885b7b9c777e67885688 (patch) | |
tree | d6bec92bebfb216f4126356e55518842c2f476a1 /Source/JavaScriptCore/llint/LowLevelInterpreter.asm | |
parent | a4e969f4965059196ca948db781e52f7cfebf19e (diff) | |
download | WebKitGtk-tarball-32761a6cee1d0dee366b885b7b9c777e67885688.tar.gz |
webkitgtk-2.4.11webkitgtk-2.4.11
Diffstat (limited to 'Source/JavaScriptCore/llint/LowLevelInterpreter.asm')
-rw-r--r-- | Source/JavaScriptCore/llint/LowLevelInterpreter.asm | 1279 |
1 files changed, 246 insertions, 1033 deletions
diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter.asm b/Source/JavaScriptCore/llint/LowLevelInterpreter.asm index 8e77c0e22..8f21f6d89 100644 --- a/Source/JavaScriptCore/llint/LowLevelInterpreter.asm +++ b/Source/JavaScriptCore/llint/LowLevelInterpreter.asm @@ -1,4 +1,4 @@ -# Copyright (C) 2011-2015 Apple Inc. All rights reserved. +# Copyright (C) 2011, 2012, 2013, 2014 Apple Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions @@ -21,231 +21,56 @@ # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF # THE POSSIBILITY OF SUCH DAMAGE. -# Crash course on the language that this is written in (which I just call -# "assembly" even though it's more than that): -# -# - Mostly gas-style operand ordering. The last operand tends to be the -# destination. So "a := b" is written as "mov b, a". But unlike gas, -# comparisons are in-order, so "if (a < b)" is written as -# "bilt a, b, ...". -# -# - "b" = byte, "h" = 16-bit word, "i" = 32-bit word, "p" = pointer. -# For 32-bit, "i" and "p" are interchangeable except when an op supports one -# but not the other. -# -# - In general, valid operands for macro invocations and instructions are -# registers (eg "t0"), addresses (eg "4[t0]"), base-index addresses -# (eg "7[t0, t1, 2]"), absolute addresses (eg "0xa0000000[]"), or labels -# (eg "_foo" or ".foo"). Macro invocations can also take anonymous -# macros as operands. Instructions cannot take anonymous macros. -# -# - Labels must have names that begin with either "_" or ".". A "." label -# is local and gets renamed before code gen to minimize namespace -# pollution. A "_" label is an extern symbol (i.e. ".globl"). The "_" -# may or may not be removed during code gen depending on whether the asm -# conventions for C name mangling on the target platform mandate a "_" -# prefix. -# -# - A "macro" is a lambda expression, which may be either anonymous or -# named. But this has caveats. "macro" can take zero or more arguments, -# which may be macros or any valid operands, but it can only return -# code. But you can do Turing-complete things via continuation passing -# style: "macro foo (a, b) b(a, a) end foo(foo, foo)". Actually, don't do -# that, since you'll just crash the assembler. -# -# - An "if" is a conditional on settings. Any identifier supplied in the -# predicate of an "if" is assumed to be a #define that is available -# during code gen. So you can't use "if" for computation in a macro, but -# you can use it to select different pieces of code for different -# platforms. -# -# - Arguments to macros follow lexical scoping rather than dynamic scoping. -# Const's also follow lexical scoping and may override (hide) arguments -# or other consts. All variables (arguments and constants) can be bound -# to operands. Additionally, arguments (but not constants) can be bound -# to macros. - -# The following general-purpose registers are available: -# -# - cfr and sp hold the call frame and (native) stack pointer respectively. -# They are callee-save registers, and guaranteed to be distinct from all other -# registers on all architectures. -# -# - lr is defined on non-X86 architectures (ARM64, ARMv7, ARM, -# ARMv7_TRADITIONAL, MIPS, SH4 and CLOOP) and holds the return PC -# -# - pc holds the (native) program counter on 32-bits ARM architectures (ARM, -# ARMv7, ARMv7_TRADITIONAL) -# -# - t0, t1, t2, t3, t4 and optionally t5 are temporary registers that can get trashed on -# calls, and are pairwise distinct registers. t4 holds the JS program counter, so use -# with caution in opcodes (actually, don't use it in opcodes at all, except as PC). -# -# - r0 and r1 are the platform's customary return registers, and thus are -# two distinct registers -# -# - a0, a1, a2 and a3 are the platform's customary argument registers, and -# thus are pairwise distinct registers. Be mindful that: -# + On X86, there are no argument registers. a0 and a1 are edx and -# ecx following the fastcall convention, but you should still use the stack -# to pass your arguments. The cCall2 and cCall4 macros do this for you. -# + On X86_64_WIN, you should allocate space on the stack for the arguments, -# and the return convention is weird for > 8 bytes types. The only place we -# use > 8 bytes return values is on a cCall, and cCall2 and cCall4 handle -# this for you. -# -# - The only registers guaranteed to be caller-saved are r0, r1, a0, a1 and a2, and -# you should be mindful of that in functions that are called directly from C. -# If you need more registers, you should push and pop them like a good -# assembly citizen, because any other register will be callee-saved on X86. -# -# You can additionally assume: -# -# - a3, t2, t3, t4 and t5 are never return registers; t0, t1, a0, a1 and a2 -# can be return registers. -# -# - t4 and t5 are never argument registers, t3 can only be a3, t1 can only be -# a1; but t0 and t2 can be either a0 or a2. -# -# - On 64 bits, there are callee-save registers named csr0, csr1, ... csrN. -# The last three csr registers are used used to store the PC base and -# two special tag values. Don't use them for anything else. -# -# Additional platform-specific details (you shouldn't rely on this remaining -# true): -# -# - For consistency with the baseline JIT, t0 is always r0 (and t1 is always -# r1 on 32 bits platforms). You should use the r version when you need return -# registers, and the t version otherwise: code using t0 (or t1) should still -# work if swapped with e.g. t3, while code using r0 (or r1) should not. There -# *may* be legacy code relying on this. -# -# - On all platforms other than X86, t0 can only be a0 and t2 can only be a2. -# -# - On all platforms other than X86 and X86_64, a2 is not a return register. -# a2 is r0 on X86 (because we have so few registers) and r1 on X86_64 (because -# the ABI enforces it). -# -# The following floating-point registers are available: -# -# - ft0-ft5 are temporary floating-point registers that get trashed on calls, -# and are pairwise distinct. -# -# - fa0 and fa1 are the platform's customary floating-point argument -# registers, and are both distinct. On 64-bits platforms, fa2 and fa3 are -# additional floating-point argument registers. -# -# - fr is the platform's customary floating-point return register -# -# You can assume that ft1-ft5 or fa1-fa3 are never fr, and that ftX is never -# faY if X != Y. - # First come the common protocols that both interpreters use. Note that each # of these must have an ASSERT() in LLIntData.cpp -# Work-around for the fact that the toolchain's awareness of armv7k / armv7s -# results in a separate slab in the fat binary, yet the offlineasm doesn't know -# to expect it. -if ARMv7k -end +# Work-around for the fact that the toolchain's awareness of armv7s results in +# a separate slab in the fat binary, yet the offlineasm doesn't know to expect +# it. if ARMv7s end # These declarations must match interpreter/JSStack.h. if JSVALUE64 - const PtrSize = 8 - const CallFrameHeaderSlots = 5 +const PtrSize = 8 +const CallFrameHeaderSlots = 6 else - const PtrSize = 4 - const CallFrameHeaderSlots = 4 - const CallFrameAlignSlots = 1 +const PtrSize = 4 +const CallFrameHeaderSlots = 5 end const SlotSize = 8 -const JSEnvironmentRecord_variables = (sizeof JSEnvironmentRecord + SlotSize - 1) & ~(SlotSize - 1) -const DirectArguments_storage = (sizeof DirectArguments + SlotSize - 1) & ~(SlotSize - 1) - -const StackAlignment = 16 -const StackAlignmentSlots = 2 -const StackAlignmentMask = StackAlignment - 1 - -const CallerFrameAndPCSize = 2 * PtrSize - const CallerFrame = 0 const ReturnPC = CallerFrame + PtrSize const CodeBlock = ReturnPC + PtrSize -const Callee = CodeBlock + SlotSize +const ScopeChain = CodeBlock + SlotSize +const Callee = ScopeChain + SlotSize const ArgumentCount = Callee + SlotSize const ThisArgumentOffset = ArgumentCount + SlotSize -const FirstArgumentOffset = ThisArgumentOffset + SlotSize const CallFrameHeaderSize = ThisArgumentOffset # Some value representation constants. if JSVALUE64 - const TagBitTypeOther = 0x2 - const TagBitBool = 0x4 - const TagBitUndefined = 0x8 - const ValueEmpty = 0x0 - const ValueFalse = TagBitTypeOther | TagBitBool - const ValueTrue = TagBitTypeOther | TagBitBool | 1 - const ValueUndefined = TagBitTypeOther | TagBitUndefined - const ValueNull = TagBitTypeOther - const TagTypeNumber = 0xffff000000000000 - const TagMask = TagTypeNumber | TagBitTypeOther +const TagBitTypeOther = 0x2 +const TagBitBool = 0x4 +const TagBitUndefined = 0x8 +const ValueEmpty = 0x0 +const ValueFalse = TagBitTypeOther | TagBitBool +const ValueTrue = TagBitTypeOther | TagBitBool | 1 +const ValueUndefined = TagBitTypeOther | TagBitUndefined +const ValueNull = TagBitTypeOther else - const Int32Tag = -1 - const BooleanTag = -2 - const NullTag = -3 - const UndefinedTag = -4 - const CellTag = -5 - const EmptyValueTag = -6 - const DeletedValueTag = -7 - const LowestTag = DeletedValueTag -end - -# NOTE: The values below must be in sync with what is in PutByIdFlags.h. -const PutByIdPrimaryTypeMask = 0x6 -const PutByIdPrimaryTypeSecondary = 0x0 -const PutByIdPrimaryTypeObjectWithStructure = 0x2 -const PutByIdPrimaryTypeObjectWithStructureOrOther = 0x4 -const PutByIdSecondaryTypeMask = -0x8 -const PutByIdSecondaryTypeBottom = 0x0 -const PutByIdSecondaryTypeBoolean = 0x8 -const PutByIdSecondaryTypeOther = 0x10 -const PutByIdSecondaryTypeInt32 = 0x18 -const PutByIdSecondaryTypeNumber = 0x20 -const PutByIdSecondaryTypeString = 0x28 -const PutByIdSecondaryTypeSymbol = 0x30 -const PutByIdSecondaryTypeObject = 0x38 -const PutByIdSecondaryTypeObjectOrOther = 0x40 -const PutByIdSecondaryTypeTop = 0x48 - -const CopyBarrierSpaceBits = 3 - -const CallOpCodeSize = 9 - -if X86_64 or ARM64 or C_LOOP - const maxFrameExtentForSlowPathCall = 0 -elsif ARM or ARMv7_TRADITIONAL or ARMv7 or SH4 - const maxFrameExtentForSlowPathCall = 24 -elsif X86 or X86_WIN - const maxFrameExtentForSlowPathCall = 40 -elsif MIPS - const maxFrameExtentForSlowPathCall = 40 -elsif X86_64_WIN - const maxFrameExtentForSlowPathCall = 64 +const Int32Tag = -1 +const BooleanTag = -2 +const NullTag = -3 +const UndefinedTag = -4 +const CellTag = -5 +const EmptyValueTag = -6 +const DeletedValueTag = -7 +const LowestTag = DeletedValueTag end -if X86_64 or X86_64_WIN or ARM64 - const CalleeSaveSpaceAsVirtualRegisters = 3 -else - const CalleeSaveSpaceAsVirtualRegisters = 0 -end - -const CalleeSaveSpaceStackAligned = (CalleeSaveSpaceAsVirtualRegisters * SlotSize + StackAlignment - 1) & ~StackAlignmentMask - - # Watchpoint states const ClearWatchpoint = 0 const IsWatched = 1 @@ -255,28 +80,16 @@ const IsInvalidated = 2 if JSVALUE64 # - Use a pair of registers to represent the PC: one register for the # base of the bytecodes, and one register for the index. - # - The PC base (or PB for short) must be stored in a callee-save register. + # - The PC base (or PB for short) should be stored in the csr. It will + # get clobbered on calls to other JS code, but will get saved on calls + # to C functions. # - C calls are still given the Instruction* rather than the PC index. # This requires an add before the call, and a sub after. - const PC = t4 # When changing this, make sure LLIntPC is up to date in LLIntPCRanges.h - if ARM64 - const PB = csr7 - const tagTypeNumber = csr8 - const tagMask = csr9 - elsif X86_64 - const PB = csr2 - const tagTypeNumber = csr3 - const tagMask = csr4 - elsif X86_64_WIN - const PB = csr4 - const tagTypeNumber = csr5 - const tagMask = csr6 - elsif C_LOOP - const PB = csr0 - const tagTypeNumber = csr1 - const tagMask = csr2 - end - + const PC = t4 + const PB = t6 + const tagTypeNumber = csr1 + const tagMask = csr2 + macro loadisFromInstruction(offset, dest) loadis offset * 8[PB, PC, 8], dest end @@ -290,7 +103,7 @@ if JSVALUE64 end else - const PC = t4 # When changing this, make sure LLIntPC is up to date in LLIntPCRanges.h + const PC = t4 macro loadisFromInstruction(offset, dest) loadis offset * 4[PC], dest end @@ -300,12 +113,6 @@ else end end -if X86_64_WIN - const extraTempReg = t0 -else - const extraTempReg = t5 -end - # Constants for reasoning about value representation. if BIG_ENDIAN const TagOffset = 0 @@ -326,14 +133,14 @@ const ArrayStorageShape = 28 const SlowPutArrayStorageShape = 30 # Type constants. -const StringType = 6 -const SymbolType = 7 -const ObjectType = 21 -const FinalObjectType = 22 +const StringType = 5 +const ObjectType = 17 +const FinalObjectType = 18 # Type flags constants. const MasqueradesAsUndefined = 1 -const ImplementsDefaultHasInstance = 2 +const ImplementsHasInstance = 2 +const ImplementsDefaultHasInstance = 8 # Bytecode operand constants. const FirstConstantRegisterIndex = 0x40000000 @@ -342,13 +149,12 @@ const FirstConstantRegisterIndex = 0x40000000 const GlobalCode = 0 const EvalCode = 1 const FunctionCode = 2 -const ModuleCode = 3 # The interpreter steals the tag word of the argument count. const LLIntReturnPC = ArgumentCount + TagOffset # String flags. -const HashFlags8BitBuffer = 8 +const HashFlags8BitBuffer = 32 # Copied from PropertyOffset.h const firstOutOfLineOffset = 100 @@ -356,22 +162,19 @@ const firstOutOfLineOffset = 100 # ResolveType const GlobalProperty = 0 const GlobalVar = 1 -const GlobalLexicalVar = 2 -const ClosureVar = 3 -const LocalClosureVar = 4 -const ModuleVar = 5 -const GlobalPropertyWithVarInjectionChecks = 6 -const GlobalVarWithVarInjectionChecks = 7 -const GlobalLexicalVarWithVarInjectionChecks = 8 -const ClosureVarWithVarInjectionChecks = 9 - -const ResolveTypeMask = 0x3ff -const InitializationModeMask = 0xffc00 -const InitializationModeShift = 10 -const Initialization = 0 - -const MarkedBlockSize = 16 * 1024 +const ClosureVar = 2 +const GlobalPropertyWithVarInjectionChecks = 3 +const GlobalVarWithVarInjectionChecks = 4 +const ClosureVarWithVarInjectionChecks = 5 +const Dynamic = 6 + +const ResolveModeMask = 0xffff + +const MarkedBlockSize = 64 * 1024 const MarkedBlockMask = ~(MarkedBlockSize - 1) +# Constants for checking mark bits. +const AtomNumberShift = 3 +const BitMapWordShift = 4 # Allocation constants if JSVALUE64 @@ -393,7 +196,9 @@ macro crash() if C_LOOP cloopCrash else - call _llint_crash + storei t0, 0xbbadbeef[] + move 0, t0 + call t0 end end @@ -405,306 +210,25 @@ macro assert(assertion) end end -macro checkStackPointerAlignment(tempReg, location) - if ARM64 or C_LOOP or SH4 - # ARM64 will check for us! - # C_LOOP does not need the alignment, and can use a little perf - # improvement from avoiding useless work. - # SH4 does not need specific alignment (4 bytes). - else - if ARM or ARMv7 or ARMv7_TRADITIONAL - # ARM can't do logical ops with the sp as a source - move sp, tempReg - andp StackAlignmentMask, tempReg - else - andp sp, StackAlignmentMask, tempReg - end - btpz tempReg, .stackPointerOkay - move location, tempReg - break - .stackPointerOkay: - end -end - -if C_LOOP or ARM64 or X86_64 or X86_64_WIN - const CalleeSaveRegisterCount = 0 -elsif ARM or ARMv7_TRADITIONAL or ARMv7 - const CalleeSaveRegisterCount = 7 -elsif SH4 - const CalleeSaveRegisterCount = 5 -elsif MIPS - const CalleeSaveRegisterCount = 1 -elsif X86 or X86_WIN - const CalleeSaveRegisterCount = 3 -end - -const CalleeRegisterSaveSize = CalleeSaveRegisterCount * PtrSize - -# VMEntryTotalFrameSize includes the space for struct VMEntryRecord and the -# callee save registers rounded up to keep the stack aligned -const VMEntryTotalFrameSize = (CalleeRegisterSaveSize + sizeof VMEntryRecord + StackAlignment - 1) & ~StackAlignmentMask - -macro pushCalleeSaves() - if C_LOOP or ARM64 or X86_64 or X86_64_WIN - elsif ARM or ARMv7_TRADITIONAL - emit "push {r4-r10}" - elsif ARMv7 - emit "push {r4-r6, r8-r11}" - elsif MIPS - emit "addiu $sp, $sp, -4" - emit "sw $s4, 0($sp)" - # save $gp to $s4 so that we can restore it after a function call - emit "move $s4, $gp" - elsif SH4 - emit "mov.l r13, @-r15" - emit "mov.l r11, @-r15" - emit "mov.l r10, @-r15" - emit "mov.l r9, @-r15" - emit "mov.l r8, @-r15" - elsif X86 - emit "push %esi" - emit "push %edi" - emit "push %ebx" - elsif X86_WIN - emit "push esi" - emit "push edi" - emit "push ebx" - end -end - -macro popCalleeSaves() - if C_LOOP or ARM64 or X86_64 or X86_64_WIN - elsif ARM or ARMv7_TRADITIONAL - emit "pop {r4-r10}" - elsif ARMv7 - emit "pop {r4-r6, r8-r11}" - elsif MIPS - emit "lw $s4, 0($sp)" - emit "addiu $sp, $sp, 4" - elsif SH4 - emit "mov.l @r15+, r8" - emit "mov.l @r15+, r9" - emit "mov.l @r15+, r10" - emit "mov.l @r15+, r11" - emit "mov.l @r15+, r13" - elsif X86 - emit "pop %ebx" - emit "pop %edi" - emit "pop %esi" - elsif X86_WIN - emit "pop ebx" - emit "pop edi" - emit "pop esi" - end -end - -macro preserveCallerPCAndCFR() - if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4 - push lr - push cfr - elsif X86 or X86_WIN or X86_64 or X86_64_WIN - push cfr - elsif ARM64 - push cfr, lr - else - error - end - move sp, cfr -end - -macro restoreCallerPCAndCFR() - move cfr, sp - if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4 - pop cfr - pop lr - elsif X86 or X86_WIN or X86_64 or X86_64_WIN - pop cfr - elsif ARM64 - pop lr, cfr - end -end - -macro preserveCalleeSavesUsedByLLInt() - subp CalleeSaveSpaceStackAligned, sp - if C_LOOP - elsif ARM or ARMv7_TRADITIONAL - elsif ARMv7 - elsif ARM64 - emit "stp x27, x28, [x29, #-16]" - emit "stp xzr, x26, [x29, #-32]" - elsif MIPS - elsif SH4 - elsif X86 - elsif X86_WIN - elsif X86_64 - storep csr4, -8[cfr] - storep csr3, -16[cfr] - storep csr2, -24[cfr] - elsif X86_64_WIN - storep csr6, -8[cfr] - storep csr5, -16[cfr] - storep csr4, -24[cfr] - end -end - -macro restoreCalleeSavesUsedByLLInt() - if C_LOOP - elsif ARM or ARMv7_TRADITIONAL - elsif ARMv7 - elsif ARM64 - emit "ldp xzr, x26, [x29, #-32]" - emit "ldp x27, x28, [x29, #-16]" - elsif MIPS - elsif SH4 - elsif X86 - elsif X86_WIN - elsif X86_64 - loadp -24[cfr], csr2 - loadp -16[cfr], csr3 - loadp -8[cfr], csr4 - elsif X86_64_WIN - loadp -24[cfr], csr4 - loadp -16[cfr], csr5 - loadp -8[cfr], csr6 - end -end - -macro copyCalleeSavesToVMCalleeSavesBuffer(vm, temp) - if ARM64 or X86_64 or X86_64_WIN - leap VM::calleeSaveRegistersBuffer[vm], temp - if ARM64 - storep csr0, [temp] - storep csr1, 8[temp] - storep csr2, 16[temp] - storep csr3, 24[temp] - storep csr4, 32[temp] - storep csr5, 40[temp] - storep csr6, 48[temp] - storep csr7, 56[temp] - storep csr8, 64[temp] - storep csr9, 72[temp] - stored csfr0, 80[temp] - stored csfr1, 88[temp] - stored csfr2, 96[temp] - stored csfr3, 104[temp] - stored csfr4, 112[temp] - stored csfr5, 120[temp] - stored csfr6, 128[temp] - stored csfr7, 136[temp] - elsif X86_64 - storep csr0, [temp] - storep csr1, 8[temp] - storep csr2, 16[temp] - storep csr3, 24[temp] - storep csr4, 32[temp] - elsif X86_64_WIN - storep csr0, [temp] - storep csr1, 8[temp] - storep csr2, 16[temp] - storep csr3, 24[temp] - storep csr4, 32[temp] - storep csr5, 40[temp] - storep csr6, 48[temp] - end - end -end - -macro restoreCalleeSavesFromVMCalleeSavesBuffer(vm, temp) - if ARM64 or X86_64 or X86_64_WIN - leap VM::calleeSaveRegistersBuffer[vm], temp - if ARM64 - loadp [temp], csr0 - loadp 8[temp], csr1 - loadp 16[temp], csr2 - loadp 24[temp], csr3 - loadp 32[temp], csr4 - loadp 40[temp], csr5 - loadp 48[temp], csr6 - loadp 56[temp], csr7 - loadp 64[temp], csr8 - loadp 72[temp], csr9 - loadd 80[temp], csfr0 - loadd 88[temp], csfr1 - loadd 96[temp], csfr2 - loadd 104[temp], csfr3 - loadd 112[temp], csfr4 - loadd 120[temp], csfr5 - loadd 128[temp], csfr6 - loadd 136[temp], csfr7 - elsif X86_64 - loadp [temp], csr0 - loadp 8[temp], csr1 - loadp 16[temp], csr2 - loadp 24[temp], csr3 - loadp 32[temp], csr4 - elsif X86_64_WIN - loadp [temp], csr0 - loadp 8[temp], csr1 - loadp 16[temp], csr2 - loadp 24[temp], csr3 - loadp 32[temp], csr4 - loadp 40[temp], csr5 - loadp 48[temp], csr6 - end - end -end - macro preserveReturnAddressAfterCall(destinationRegister) if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or ARM64 or MIPS or SH4 # In C_LOOP case, we're only preserving the bytecode vPC. move lr, destinationRegister - elsif X86 or X86_WIN or X86_64 or X86_64_WIN + elsif X86 or X86_64 pop destinationRegister else error end end -macro copyBarrier(value, slow) - btpnz value, CopyBarrierSpaceBits, slow -end - -macro functionPrologue() - if X86 or X86_WIN or X86_64 or X86_64_WIN - push cfr - elsif ARM64 - push cfr, lr - elsif C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4 - push lr - push cfr - end - move sp, cfr -end - -macro functionEpilogue() - if X86 or X86_WIN or X86_64 or X86_64_WIN - pop cfr - elsif ARM64 - pop lr, cfr - elsif C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4 - pop cfr - pop lr - end -end - -macro vmEntryRecord(entryFramePointer, resultReg) - subp entryFramePointer, VMEntryTotalFrameSize, resultReg -end - -macro getFrameRegisterSizeForCodeBlock(codeBlock, size) - loadi CodeBlock::m_numCalleeLocals[codeBlock], size - lshiftp 3, size - addp maxFrameExtentForSlowPathCall, size -end - -macro restoreStackPointerAfterCall() - loadp CodeBlock[cfr], t2 - getFrameRegisterSizeForCodeBlock(t2, t2) - if ARMv7 - subp cfr, t2, t2 - move t2, sp +macro restoreReturnAddressBeforeReturn(sourceRegister) + if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or ARM64 or MIPS or SH4 + # In C_LOOP case, we're only restoring the bytecode vPC. + move sourceRegister, lr + elsif X86 or X86_64 + push sourceRegister else - subp cfr, t2, sp + error end end @@ -714,102 +238,50 @@ macro traceExecution() end end -macro callTargetFunction(callee) +macro callTargetFunction(callLinkInfo) if C_LOOP - cloopCallJSFunction callee + cloopCallJSFunction LLIntCallLinkInfo::machineCodeTarget[callLinkInfo] else - call callee + call LLIntCallLinkInfo::machineCodeTarget[callLinkInfo] + dispatchAfterCall() end - restoreStackPointerAfterCall() - dispatchAfterCall() -end - -macro prepareForRegularCall(callee, temp1, temp2, temp3) - addp CallerFrameAndPCSize, sp -end - -# sp points to the new frame -macro prepareForTailCall(callee, temp1, temp2, temp3) - restoreCalleeSavesUsedByLLInt() - - loadi PayloadOffset + ArgumentCount[cfr], temp2 - loadp CodeBlock[cfr], temp1 - loadp CodeBlock::m_numParameters[temp1], temp1 - bilteq temp1, temp2, .noArityFixup - move temp1, temp2 - -.noArityFixup: - # We assume < 2^28 arguments - muli SlotSize, temp2 - addi StackAlignment - 1 + CallFrameHeaderSize, temp2 - andi ~StackAlignmentMask, temp2 - - move cfr, temp1 - addp temp2, temp1 - - loadi PayloadOffset + ArgumentCount[sp], temp2 - # We assume < 2^28 arguments - muli SlotSize, temp2 - addi StackAlignment - 1 + CallFrameHeaderSize, temp2 - andi ~StackAlignmentMask, temp2 - - if ARM or ARMv7_TRADITIONAL or ARMv7 or SH4 or ARM64 or C_LOOP or MIPS - addp 2 * PtrSize, sp - subi 2 * PtrSize, temp2 - loadp PtrSize[cfr], lr - else - addp PtrSize, sp - subi PtrSize, temp2 - loadp PtrSize[cfr], temp3 - storep temp3, [sp] - end - - subp temp2, temp1 - loadp [cfr], cfr - -.copyLoop: - subi PtrSize, temp2 - loadp [sp, temp2, 1], temp3 - storep temp3, [temp1, temp2, 1] - btinz temp2, .copyLoop - - move temp1, sp - jmp callee end -macro slowPathForCall(slowPath, prepareCall) +macro slowPathForCall(slowPath) callCallSlowPath( slowPath, - # Those are r0 and r1 - macro (callee, calleeFramePtr) - btpz calleeFramePtr, .dontUpdateSP - move calleeFramePtr, sp - prepareCall(callee, t2, t3, t4) - .dontUpdateSP: - callTargetFunction(callee) + macro (callee) + if C_LOOP + cloopCallJSFunction callee + else + call callee + dispatchAfterCall() + end end) end -macro arrayProfile(cellAndIndexingType, profile, scratch) - const cell = cellAndIndexingType - const indexingType = cellAndIndexingType - loadi JSCell::m_structureID[cell], scratch - storei scratch, ArrayProfile::m_lastSeenStructureID[profile] - loadb JSCell::m_indexingType[cell], indexingType +macro arrayProfile(structureAndIndexingType, profile, scratch) + const structure = structureAndIndexingType + const indexingType = structureAndIndexingType + storep structure, ArrayProfile::m_lastSeenStructure[profile] + loadb Structure::m_indexingType[structure], indexingType end -macro skipIfIsRememberedOrInEden(cell, scratch1, scratch2, continuation) - loadb JSCell::m_cellState[cell], scratch1 - continuation(scratch1) -end +macro checkMarkByte(cell, scratch1, scratch2, continuation) + move cell, scratch1 + move cell, scratch2 + + andp MarkedBlockMask, scratch1 + andp ~MarkedBlockMask, scratch2 -macro notifyWrite(set, slow) - bbneq WatchpointSet::m_state[set], IsInvalidated, slow + rshiftp AtomNumberShift + BitMapWordShift, scratch2 + loadb MarkedBlock::m_marks[scratch1, scratch2, 1], scratch1 + continuation(scratch1) end macro checkSwitchToJIT(increment, action) loadp CodeBlock[cfr], t0 - baddis increment, CodeBlock::m_llintExecuteCounter + BaselineExecutionCounter::m_counter[t0], .continue + baddis increment, CodeBlock::m_llintExecuteCounter + ExecutionCounter::m_counter[t0], .continue action() .continue: end @@ -861,51 +333,26 @@ end # Do the bare minimum required to execute code. Sets up the PC, leave the CodeBlock* # in t1. May also trigger prologue entry OSR. macro prologue(codeBlockGetter, codeBlockSetter, osrSlowPath, traceSlowPath) + preserveReturnAddressAfterCall(t2) + # Set up the call frame and check if we should OSR. - preserveCallerPCAndCFR() - + storep t2, ReturnPC[cfr] if EXECUTION_TRACING - subp maxFrameExtentForSlowPathCall, sp callSlowPath(traceSlowPath) - addp maxFrameExtentForSlowPathCall, sp end codeBlockGetter(t1) - if not C_LOOP - baddis 5, CodeBlock::m_llintExecuteCounter + BaselineExecutionCounter::m_counter[t1], .continue - if JSVALUE64 - move cfr, a0 - move PC, a1 - cCall2(osrSlowPath) - else - # We are after the function prologue, but before we have set up sp from the CodeBlock. - # Temporarily align stack pointer for this call. - subp 8, sp - move cfr, a0 - move PC, a1 - cCall2(osrSlowPath) - addp 8, sp - end - btpz r0, .recover - move cfr, sp # restore the previous sp - # pop the callerFrame since we will jump to a function that wants to save it - if ARM64 - pop lr, cfr - elsif ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4 - pop cfr - pop lr - else - pop cfr - end - jmp r0 - .recover: - codeBlockGetter(t1) - .continue: - end - + baddis 5, CodeBlock::m_llintExecuteCounter + ExecutionCounter::m_counter[t1], .continue + cCall2(osrSlowPath, cfr, PC) + move t1, cfr + btpz t0, .recover + loadp ReturnPC[cfr], t2 + restoreReturnAddressBeforeReturn(t2) + jmp t0 +.recover: + codeBlockGetter(t1) +.continue: codeBlockSetter(t1) - - preserveCalleeSavesUsedByLLInt() - + # Set up the PC. if JSVALUE64 loadp CodeBlock::m_instructions[t1], PB @@ -913,35 +360,6 @@ macro prologue(codeBlockGetter, codeBlockSetter, osrSlowPath, traceSlowPath) else loadp CodeBlock::m_instructions[t1], PC end - - # Get new sp in t0 and check stack height. - getFrameRegisterSizeForCodeBlock(t1, t0) - subp cfr, t0, t0 - loadp CodeBlock::m_vm[t1], t2 - bpbeq VM::m_jsStackLimit[t2], t0, .stackHeightOK - - # Stack height check failed - need to call a slow_path. - # Set up temporary stack pointer for call including callee saves - subp maxFrameExtentForSlowPathCall, sp - callSlowPath(_llint_stack_check) - bpeq r1, 0, .stackHeightOKGetCodeBlock - move r1, cfr - dispatch(0) # Go to exception handler in PC - -.stackHeightOKGetCodeBlock: - # Stack check slow path returned that the stack was ok. - # Since they were clobbered, need to get CodeBlock and new sp - codeBlockGetter(t1) - getFrameRegisterSizeForCodeBlock(t1, t0) - subp cfr, t0, t0 - -.stackHeightOK: - move t0, sp - - if JSVALUE64 - move TagTypeNumber, tagTypeNumber - addp TagBitTypeOther, tagTypeNumber, tagMask - end end # Expects that CodeBlock is in t1, which is what prologue() leaves behind. @@ -976,187 +394,73 @@ macro functionInitialization(profileArgSkip) end baddpnz -8, t0, .argumentProfileLoop .argumentProfileDone: + + # Check stack height. + loadi CodeBlock::m_numCalleeRegisters[t1], t0 + addi 1, t0 # Account that local0 goes at slot -1 + loadp CodeBlock::m_vm[t1], t2 + lshiftp 3, t0 + subp cfr, t0, t0 + bpbeq VM::m_jsStackLimit[t2], t0, .stackHeightOK + + # Stack height check failed - need to call a slow_path. + callSlowPath(_llint_stack_check) +.stackHeightOK: end macro allocateJSObject(allocator, structure, result, scratch1, slowCase) - const offsetOfFirstFreeCell = - MarkedAllocator::m_freeList + - MarkedBlock::FreeList::head - - # Get the object from the free list. - loadp offsetOfFirstFreeCell[allocator], result - btpz result, slowCase + if ALWAYS_ALLOCATE_SLOW + jmp slowCase + else + const offsetOfFirstFreeCell = + MarkedAllocator::m_freeList + + MarkedBlock::FreeList::head + + # Get the object from the free list. + loadp offsetOfFirstFreeCell[allocator], result + btpz result, slowCase + + # Remove the object from the free list. + loadp [result], scratch1 + storep scratch1, offsetOfFirstFreeCell[allocator] - # Remove the object from the free list. - loadp [result], scratch1 - storep scratch1, offsetOfFirstFreeCell[allocator] - - # Initialize the object. - storep 0, JSObject::m_butterfly[result] - storeStructureWithTypeInfo(result, structure, scratch1) + # Initialize the object. + storep structure, JSCell::m_structure[result] + storep 0, JSObject::m_butterfly[result] + end end macro doReturn() - restoreCalleeSavesUsedByLLInt() - restoreCallerPCAndCFR() + loadp ReturnPC[cfr], t2 + loadp CallerFrame[cfr], cfr + restoreReturnAddressBeforeReturn(t2) ret end -# stub to call into JavaScript or Native functions -# EncodedJSValue vmEntryToJavaScript(void* code, VM* vm, ProtoCallFrame* protoFrame) -# EncodedJSValue vmEntryToNativeFunction(void* code, VM* vm, ProtoCallFrame* protoFrame) - -if C_LOOP - _llint_vm_entry_to_javascript: -else - global _vmEntryToJavaScript - _vmEntryToJavaScript: -end - doVMEntry(makeJavaScriptCall) - - if C_LOOP - _llint_vm_entry_to_native: else - global _vmEntryToNative - _vmEntryToNative: -end - doVMEntry(makeHostFunctionCall) - - -if not C_LOOP - # void sanitizeStackForVMImpl(VM* vm) - global _sanitizeStackForVMImpl - _sanitizeStackForVMImpl: - # We need three non-aliased caller-save registers. We are guaranteed - # this for a0, a1 and a2 on all architectures. - if X86 or X86_WIN - loadp 4[sp], a0 - end - const vm = a0 - const address = a1 - const zeroValue = a2 - - loadp VM::m_lastStackTop[vm], address - bpbeq sp, address, .zeroFillDone - - move 0, zeroValue - .zeroFillLoop: - storep zeroValue, [address] - addp PtrSize, address - bpa sp, address, .zeroFillLoop - - .zeroFillDone: - move sp, address - storep address, VM::m_lastStackTop[vm] - ret - - # VMEntryRecord* vmEntryRecord(const VMEntryFrame* entryFrame) - global _vmEntryRecord - _vmEntryRecord: - if X86 or X86_WIN - loadp 4[sp], a0 - end - - vmEntryRecord(a0, r0) - ret -end - -if C_LOOP - # Dummy entry point the C Loop uses to initialize. - _llint_entry: - crash() -else - macro initPCRelative(pcBase) - if X86_64 or X86_64_WIN or X86 or X86_WIN - call _relativePCBase - _relativePCBase: - pop pcBase - elsif ARM64 - elsif ARMv7 - _relativePCBase: - move pc, pcBase - subp 3, pcBase # Need to back up the PC and set the Thumb2 bit - elsif ARM or ARMv7_TRADITIONAL - _relativePCBase: - move pc, pcBase - subp 8, pcBase - elsif MIPS - la _relativePCBase, pcBase - setcallreg pcBase # needed to set $t9 to the right value for the .cpload created by the label. - _relativePCBase: - elsif SH4 - mova _relativePCBase, t0 - move t0, pcBase - alignformova - _relativePCBase: - end -end +# stub to call into JavaScript or Native functions +# EncodedJSValue callToJavaScript(void* code, ExecState** vm, ProtoCallFrame* protoFrame, Register* topOfStack) +# EncodedJSValue callToNativeFunction(void* code, ExecState** vm, ProtoCallFrame* protoFrame, Register* topOfStack) +# Note, if these stubs or one of their related macros are changed, make the +# equivalent changes in jit/JITStubsX86.h and/or jit/JITStubsMSVC64.asm +_callToJavaScript: + doCallToJavaScript(makeJavaScriptCall, doReturnFromJavaScript) -# The PC base is in t1, as this is what _llint_entry leaves behind through -# initPCRelative(t1) -macro setEntryAddress(index, label) - if X86_64 or X86_64_WIN - leap (label - _relativePCBase)[t1], t3 - move index, t4 - storep t3, [a0, t4, 8] - elsif X86 or X86_WIN - leap (label - _relativePCBase)[t1], t3 - move index, t4 - storep t3, [a0, t4, 4] - elsif ARM64 - pcrtoaddr label, t1 - move index, t4 - storep t1, [a0, t4, 8] - elsif ARM or ARMv7 or ARMv7_TRADITIONAL - mvlbl (label - _relativePCBase), t4 - addp t4, t1, t4 - move index, t3 - storep t4, [a0, t3, 4] - elsif SH4 - move (label - _relativePCBase), t4 - addp t4, t1, t4 - move index, t3 - storep t4, [a0, t3, 4] - flushcp # Force constant pool flush to avoid "pcrel too far" link error. - elsif MIPS - la label, t4 - la _relativePCBase, t3 - subp t3, t4 - addp t4, t1, t4 - move index, t3 - storep t4, [a0, t3, 4] - end +_callToNativeFunction: + doCallToJavaScript(makeHostFunctionCall, doReturnFromHostFunction) end -global _llint_entry -# Entry point for the llint to initialize. -_llint_entry: - functionPrologue() - pushCalleeSaves() - if X86 or X86_WIN - loadp 20[sp], a0 - end - initPCRelative(t1) - - # Include generated bytecode initialization file. - include InitBytecodes +# Indicate the beginning of LLInt. +_llint_begin: + crash() - popCalleeSaves() - functionEpilogue() - ret -end _llint_program_prologue: prologue(notFunctionCodeBlockGetter, notFunctionCodeBlockSetter, _llint_entry_osr, _llint_trace_prologue) dispatch(0) -_llint_module_program_prologue: - prologue(notFunctionCodeBlockGetter, notFunctionCodeBlockSetter, _llint_entry_osr, _llint_trace_prologue) - dispatch(0) - - _llint_eval_prologue: prologue(notFunctionCodeBlockGetter, notFunctionCodeBlockSetter, _llint_entry_osr, _llint_trace_prologue) dispatch(0) @@ -1164,12 +468,14 @@ _llint_eval_prologue: _llint_function_for_call_prologue: prologue(functionForCallCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_call, _llint_trace_prologue_function_for_call) +.functionForCallBegin: functionInitialization(0) dispatch(0) _llint_function_for_construct_prologue: prologue(functionForConstructCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_construct, _llint_trace_prologue_function_for_construct) +.functionForConstructBegin: functionInitialization(1) dispatch(0) @@ -1177,17 +483,11 @@ _llint_function_for_construct_prologue: _llint_function_for_call_arity_check: prologue(functionForCallCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_call_arityCheck, _llint_trace_arityCheck_for_call) functionArityCheck(.functionForCallBegin, _slow_path_call_arityCheck) -.functionForCallBegin: - functionInitialization(0) - dispatch(0) _llint_function_for_construct_arity_check: prologue(functionForConstructCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_construct_arityCheck, _llint_trace_arityCheck_for_construct) functionArityCheck(.functionForConstructBegin, _slow_path_construct_arityCheck) -.functionForConstructBegin: - functionInitialization(1) - dispatch(0) # Value-representation-specific code. @@ -1199,34 +499,10 @@ end # Value-representation-agnostic code. -_llint_op_create_direct_arguments: +_llint_op_touch_entry: traceExecution() - callSlowPath(_slow_path_create_direct_arguments) - dispatch(2) - - -_llint_op_create_scoped_arguments: - traceExecution() - callSlowPath(_slow_path_create_scoped_arguments) - dispatch(3) - - -_llint_op_create_out_of_band_arguments: - traceExecution() - callSlowPath(_slow_path_create_out_of_band_arguments) - dispatch(2) - - -_llint_op_new_func: - traceExecution() - callSlowPath(_llint_slow_path_new_func) - dispatch(4) - - -_llint_op_new_generator_func: - traceExecution() - callSlowPath(_llint_slow_path_new_generator_func) - dispatch(4) + callSlowPath(_slow_path_touch_entry) + dispatch(1) _llint_op_new_array: @@ -1289,11 +565,12 @@ _llint_op_typeof: dispatch(3) -_llint_op_is_object_or_null: +_llint_op_is_object: traceExecution() - callSlowPath(_slow_path_is_object_or_null) + callSlowPath(_slow_path_is_object) dispatch(3) + _llint_op_is_function: traceExecution() callSlowPath(_slow_path_is_function) @@ -1305,6 +582,20 @@ _llint_op_in: callSlowPath(_slow_path_in) dispatch(4) +macro withInlineStorage(object, propertyStorage, continuation) + # Indicate that the object is the property storage, and that the + # property storage register is unused. + continuation(object, propertyStorage) +end + +macro withOutOfLineStorage(object, propertyStorage, continuation) + loadp JSObject::m_butterfly[object], propertyStorage + # Indicate that the propertyStorage register now points to the + # property storage, and that the object register may be reused + # if the object pointer is not needed anymore. + continuation(propertyStorage, object) +end + _llint_op_del_by_id: traceExecution() @@ -1324,33 +615,9 @@ _llint_op_put_by_index: dispatch(4) -_llint_op_put_getter_by_id: - traceExecution() - callSlowPath(_llint_slow_path_put_getter_by_id) - dispatch(5) - - -_llint_op_put_setter_by_id: - traceExecution() - callSlowPath(_llint_slow_path_put_setter_by_id) - dispatch(5) - - -_llint_op_put_getter_setter_by_id: - traceExecution() - callSlowPath(_llint_slow_path_put_getter_setter_by_id) - dispatch(6) - - -_llint_op_put_getter_by_val: +_llint_op_put_getter_setter: traceExecution() - callSlowPath(_llint_slow_path_put_getter_by_val) - dispatch(5) - - -_llint_op_put_setter_by_val: - traceExecution() - callSlowPath(_llint_slow_path_put_setter_by_val) + callSlowPath(_llint_slow_path_put_getter_setter) dispatch(5) @@ -1434,27 +701,19 @@ _llint_op_jngreatereq: _llint_op_loop_hint: traceExecution() - checkSwitchToJITForLoop() - dispatch(1) - - -_llint_op_watchdog: - traceExecution() loadp CodeBlock[cfr], t1 loadp CodeBlock::m_vm[t1], t1 - loadp VM::m_watchdog[t1], t0 - btpnz t0, .handleWatchdogTimer + loadb VM::watchdog+Watchdog::m_timerDidFire[t1], t0 + btbnz t0, .handleWatchdogTimer .afterWatchdogTimerCheck: + checkSwitchToJITForLoop() dispatch(1) .handleWatchdogTimer: - loadb Watchdog::m_timerDidFire[t0], t0 - btbz t0, .afterWatchdogTimerCheck callWatchdogTimerHandler(.throwHandler) jmp .afterWatchdogTimerCheck .throwHandler: jmp _llint_throw_from_slow_path_trampoline - _llint_op_switch_string: traceExecution() callSlowPath(_llint_slow_path_switch_string) @@ -1464,65 +723,25 @@ _llint_op_switch_string: _llint_op_new_func_exp: traceExecution() callSlowPath(_llint_slow_path_new_func_exp) - dispatch(4) - -_llint_op_new_generator_func_exp: - traceExecution() - callSlowPath(_llint_slow_path_new_generator_func_exp) - dispatch(4) + dispatch(3) -_llint_op_new_arrow_func_exp: - traceExecution() - callSlowPath(_llint_slow_path_new_arrow_func_exp) - dispatch(4) _llint_op_call: traceExecution() arrayProfileForCall() - doCall(_llint_slow_path_call, prepareForRegularCall) + doCall(_llint_slow_path_call) -_llint_op_tail_call: - traceExecution() - arrayProfileForCall() - checkSwitchToJITForEpilogue() - doCall(_llint_slow_path_call, prepareForTailCall) _llint_op_construct: traceExecution() - doCall(_llint_slow_path_construct, prepareForRegularCall) + doCall(_llint_slow_path_construct) -macro doCallVarargs(slowPath, prepareCall) - callSlowPath(_llint_slow_path_size_frame_for_varargs) - branchIfException(_llint_throw_from_slow_path_trampoline) - # calleeFrame in r1 - if JSVALUE64 - move r1, sp - else - # The calleeFrame is not stack aligned, move down by CallerFrameAndPCSize to align - if ARMv7 - subp r1, CallerFrameAndPCSize, t2 - move t2, sp - else - subp r1, CallerFrameAndPCSize, sp - end - end - slowPathForCall(slowPath, prepareCall) -end _llint_op_call_varargs: traceExecution() - doCallVarargs(_llint_slow_path_call_varargs, prepareForRegularCall) - -_llint_op_tail_call_varargs: - traceExecution() - checkSwitchToJITForEpilogue() - # We lie and perform the tail call instead of preparing it since we can't - # prepare the frame for a call opcode - doCallVarargs(_llint_slow_path_call_varargs, prepareForTailCall) - -_llint_op_construct_varargs: - traceExecution() - doCallVarargs(_llint_slow_path_construct_varargs, prepareForRegularCall) + callSlowPath(_llint_slow_path_size_and_alloc_frame_for_varargs) + branchIfException(_llint_throw_from_slow_path_trampoline) + slowPathForCall(_llint_slow_path_call_varargs) _llint_op_call_eval: @@ -1561,7 +780,7 @@ _llint_op_call_eval: # and a PC to call, and that PC may be a dummy thunk that just # returns the JS value that the eval returned. - slowPathForCall(_llint_slow_path_call_eval, prepareForRegularCall) + slowPathForCall(_llint_slow_path_call_eval) _llint_generic_return_point: @@ -1574,34 +793,28 @@ _llint_op_strcat: dispatch(4) -_llint_op_push_with_scope: - traceExecution() - callSlowPath(_slow_path_push_with_scope) - dispatch(4) - - -_llint_op_assert: +_llint_op_get_pnames: traceExecution() - callSlowPath(_slow_path_assert) - dispatch(3) + callSlowPath(_llint_slow_path_get_pnames) + dispatch(0) # The slow_path either advances the PC or jumps us to somewhere else. -_llint_op_save: +_llint_op_push_with_scope: traceExecution() - callSlowPath(_slow_path_save) - dispatch(4) + callSlowPath(_llint_slow_path_push_with_scope) + dispatch(2) -_llint_op_resume: +_llint_op_pop_scope: traceExecution() - callSlowPath(_slow_path_resume) - dispatch(3) + callSlowPath(_llint_slow_path_pop_scope) + dispatch(1) -_llint_op_create_lexical_environment: +_llint_op_push_name_scope: traceExecution() - callSlowPath(_slow_path_create_lexical_environment) - dispatch(5) + callSlowPath(_llint_slow_path_push_name_scope) + dispatch(4) _llint_op_throw: @@ -1655,56 +868,6 @@ _llint_native_call_trampoline: _llint_native_construct_trampoline: nativeCallTrampoline(NativeExecutable::m_constructor) -_llint_op_get_enumerable_length: - traceExecution() - callSlowPath(_slow_path_get_enumerable_length) - dispatch(3) - -_llint_op_has_indexed_property: - traceExecution() - callSlowPath(_slow_path_has_indexed_property) - dispatch(5) - -_llint_op_has_structure_property: - traceExecution() - callSlowPath(_slow_path_has_structure_property) - dispatch(5) - -_llint_op_has_generic_property: - traceExecution() - callSlowPath(_slow_path_has_generic_property) - dispatch(4) - -_llint_op_get_direct_pname: - traceExecution() - callSlowPath(_slow_path_get_direct_pname) - dispatch(7) - -_llint_op_get_property_enumerator: - traceExecution() - callSlowPath(_slow_path_get_property_enumerator) - dispatch(3) - -_llint_op_enumerator_structure_pname: - traceExecution() - callSlowPath(_slow_path_next_structure_enumerator_pname) - dispatch(4) - -_llint_op_enumerator_generic_pname: - traceExecution() - callSlowPath(_slow_path_next_generic_enumerator_pname) - dispatch(4) - -_llint_op_to_index_string: - traceExecution() - callSlowPath(_slow_path_to_index_string) - dispatch(3) - -_llint_op_copy_rest: - traceExecution() - callSlowPath(_slow_path_copy_rest) - dispatch(4) - # Lastly, make sure that we can link even though we don't support all opcodes. # These opcodes should never arise when using LLInt or either JIT. We assert @@ -1723,3 +886,53 @@ macro notSupported() break end end + +_llint_op_get_by_id_chain: + notSupported() + +_llint_op_get_by_id_custom_chain: + notSupported() + +_llint_op_get_by_id_custom_proto: + notSupported() + +_llint_op_get_by_id_custom_self: + notSupported() + +_llint_op_get_by_id_generic: + notSupported() + +_llint_op_get_by_id_getter_chain: + notSupported() + +_llint_op_get_by_id_getter_proto: + notSupported() + +_llint_op_get_by_id_getter_self: + notSupported() + +_llint_op_get_by_id_proto: + notSupported() + +_llint_op_get_by_id_self: + notSupported() + +_llint_op_get_string_length: + notSupported() + +_llint_op_put_by_id_generic: + notSupported() + +_llint_op_put_by_id_replace: + notSupported() + +_llint_op_put_by_id_transition: + notSupported() + +_llint_op_init_global_const_nop: + dispatch(5) + +# Indicate the end of LLInt. +_llint_end: + crash() + |