summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/jit/ThunkGenerators.cpp
diff options
context:
space:
mode:
authorLorry Tar Creator <lorry-tar-importer@lorry>2016-04-10 09:28:39 +0000
committerLorry Tar Creator <lorry-tar-importer@lorry>2016-04-10 09:28:39 +0000
commit32761a6cee1d0dee366b885b7b9c777e67885688 (patch)
treed6bec92bebfb216f4126356e55518842c2f476a1 /Source/JavaScriptCore/jit/ThunkGenerators.cpp
parenta4e969f4965059196ca948db781e52f7cfebf19e (diff)
downloadWebKitGtk-tarball-32761a6cee1d0dee366b885b7b9c777e67885688.tar.gz
webkitgtk-2.4.11webkitgtk-2.4.11
Diffstat (limited to 'Source/JavaScriptCore/jit/ThunkGenerators.cpp')
-rw-r--r--Source/JavaScriptCore/jit/ThunkGenerators.cpp662
1 files changed, 347 insertions, 315 deletions
diff --git a/Source/JavaScriptCore/jit/ThunkGenerators.cpp b/Source/JavaScriptCore/jit/ThunkGenerators.cpp
index 4a71dfeb2..f8f5cbaf5 100644
--- a/Source/JavaScriptCore/jit/ThunkGenerators.cpp
+++ b/Source/JavaScriptCore/jit/ThunkGenerators.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010, 2012, 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2010, 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -27,14 +27,11 @@
#include "ThunkGenerators.h"
#include "CodeBlock.h"
-#include "DFGSpeculativeJIT.h"
#include "JITOperations.h"
#include "JSArray.h"
#include "JSArrayIterator.h"
#include "JSStack.h"
-#include "MathCommon.h"
-#include "MaxFrameExtentForSlowPathCall.h"
-#include "JSCInlines.h"
+#include "Operations.h"
#include "SpecializedThunkJIT.h"
#include <wtf/InlineASM.h>
#include <wtf/StringPrintStream.h>
@@ -46,14 +43,17 @@ namespace JSC {
inline void emitPointerValidation(CCallHelpers& jit, GPRReg pointerGPR)
{
- if (ASSERT_DISABLED)
- return;
+#if !ASSERT_DISABLED
CCallHelpers::Jump isNonZero = jit.branchTestPtr(CCallHelpers::NonZero, pointerGPR);
- jit.abortWithReason(TGInvalidPointer);
+ jit.breakpoint();
isNonZero.link(&jit);
jit.pushToSave(pointerGPR);
jit.load8(pointerGPR, pointerGPR);
jit.popToRestore(pointerGPR);
+#else
+ UNUSED_PARAM(jit);
+ UNUSED_PARAM(pointerGPR);
+#endif
}
// We will jump here if the JIT code tries to make a call, but the
@@ -66,100 +66,86 @@ MacroAssemblerCodeRef throwExceptionFromCallSlowPathGenerator(VM* vm)
// even though we won't use it.
jit.preserveReturnAddressAfterCall(GPRInfo::nonPreservedNonReturnGPR);
- jit.copyCalleeSavesToVMCalleeSavesBuffer();
+ // The CallFrame register points to the (failed) callee frame, so we need to pop back one frame.
+ jit.emitGetCallerFrameFromCallFrameHeaderPtr(GPRInfo::callFrameRegister);
- jit.setupArguments(CCallHelpers::TrustedImmPtr(vm), GPRInfo::callFrameRegister);
+ jit.setupArgumentsExecState();
jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(lookupExceptionHandler)), GPRInfo::nonArgGPR0);
emitPointerValidation(jit, GPRInfo::nonArgGPR0);
jit.call(GPRInfo::nonArgGPR0);
jit.jumpToExceptionHandler();
- LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
+ LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
return FINALIZE_CODE(patchBuffer, ("Throw exception from call slow path thunk"));
}
static void slowPathFor(
- CCallHelpers& jit, VM* vm, Sprt_JITOperation_ECli slowPathFunction)
+ CCallHelpers& jit, VM* vm, P_JITOperation_E slowPathFunction)
{
- jit.emitFunctionPrologue();
+ jit.preserveReturnAddressAfterCall(GPRInfo::nonArgGPR2);
+ emitPointerValidation(jit, GPRInfo::nonArgGPR2);
+ jit.emitPutReturnPCToCallFrameHeader(GPRInfo::nonArgGPR2);
jit.storePtr(GPRInfo::callFrameRegister, &vm->topCallFrame);
-#if OS(WINDOWS) && CPU(X86_64)
- // Windows X86_64 needs some space pointed to by arg0 for return types larger than 64 bits.
- // Other argument values are shift by 1. Use space on the stack for our two return values.
- // Moving the stack down maxFrameExtentForSlowPathCall bytes gives us room for our 3 arguments
- // and space for the 16 byte return area.
- jit.addPtr(CCallHelpers::TrustedImm32(-maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
- jit.move(GPRInfo::regT2, GPRInfo::argumentGPR2);
- jit.addPtr(CCallHelpers::TrustedImm32(32), CCallHelpers::stackPointerRegister, GPRInfo::argumentGPR0);
- jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
- jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(slowPathFunction)), GPRInfo::nonArgGPR0);
- emitPointerValidation(jit, GPRInfo::nonArgGPR0);
- jit.call(GPRInfo::nonArgGPR0);
- jit.loadPtr(CCallHelpers::Address(GPRInfo::returnValueGPR, 8), GPRInfo::returnValueGPR2);
- jit.loadPtr(CCallHelpers::Address(GPRInfo::returnValueGPR), GPRInfo::returnValueGPR);
- jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
-#else
- if (maxFrameExtentForSlowPathCall)
- jit.addPtr(CCallHelpers::TrustedImm32(-maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
- jit.setupArgumentsWithExecState(GPRInfo::regT2);
+ jit.setupArgumentsExecState();
jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(slowPathFunction)), GPRInfo::nonArgGPR0);
emitPointerValidation(jit, GPRInfo::nonArgGPR0);
jit.call(GPRInfo::nonArgGPR0);
- if (maxFrameExtentForSlowPathCall)
- jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
-#endif
-
+
// This slow call will return the address of one of the following:
// 1) Exception throwing thunk.
// 2) Host call return value returner thingy.
// 3) The function to call.
- // The second return value GPR will hold a non-zero value for tail calls.
-
+ jit.emitGetReturnPCFromCallFrameHeaderPtr(GPRInfo::nonPreservedNonReturnGPR);
+ jit.emitPutReturnPCToCallFrameHeader(CCallHelpers::TrustedImmPtr(0));
+ emitPointerValidation(jit, GPRInfo::nonPreservedNonReturnGPR);
+ jit.restoreReturnAddressBeforeReturn(GPRInfo::nonPreservedNonReturnGPR);
emitPointerValidation(jit, GPRInfo::returnValueGPR);
- jit.emitFunctionEpilogue();
-
- RELEASE_ASSERT(reinterpret_cast<void*>(KeepTheFrame) == reinterpret_cast<void*>(0));
- CCallHelpers::Jump doNotTrash = jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::returnValueGPR2);
-
- jit.preserveReturnAddressAfterCall(GPRInfo::nonPreservedNonReturnGPR);
- jit.prepareForTailCallSlow(GPRInfo::returnValueGPR);
-
- doNotTrash.link(&jit);
jit.jump(GPRInfo::returnValueGPR);
}
-MacroAssemblerCodeRef linkCallThunkGenerator(VM* vm)
+static MacroAssemblerCodeRef linkForThunkGenerator(
+ VM* vm, CodeSpecializationKind kind)
{
// The return address is on the stack or in the link register. We will hence
// save the return address to the call frame while we make a C++ function call
// to perform linking and lazy compilation if necessary. We expect the callee
// to be in regT0/regT1 (payload/tag), the CallFrame to have already
// been adjusted, and all other registers to be available for use.
+
CCallHelpers jit(vm);
- slowPathFor(jit, vm, operationLinkCall);
+ slowPathFor(jit, vm, kind == CodeForCall ? operationLinkCall : operationLinkConstruct);
- LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
- return FINALIZE_CODE(patchBuffer, ("Link call slow path thunk"));
+ LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
+ return FINALIZE_CODE(
+ patchBuffer,
+ ("Link %s slow path thunk", kind == CodeForCall ? "call" : "construct"));
+}
+
+MacroAssemblerCodeRef linkCallThunkGenerator(VM* vm)
+{
+ return linkForThunkGenerator(vm, CodeForCall);
+}
+
+MacroAssemblerCodeRef linkConstructThunkGenerator(VM* vm)
+{
+ return linkForThunkGenerator(vm, CodeForConstruct);
}
// For closure optimizations, we only include calls, since if you're using closures for
// object construction then you're going to lose big time anyway.
-MacroAssemblerCodeRef linkPolymorphicCallThunkGenerator(VM* vm)
+MacroAssemblerCodeRef linkClosureCallThunkGenerator(VM* vm)
{
CCallHelpers jit(vm);
- slowPathFor(jit, vm, operationLinkPolymorphicCall);
+ slowPathFor(jit, vm, operationLinkClosureCall);
- LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
- return FINALIZE_CODE(patchBuffer, ("Link polymorphic call slow path thunk"));
+ LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
+ return FINALIZE_CODE(patchBuffer, ("Link closure call slow path thunk"));
}
-// FIXME: We should distinguish between a megamorphic virtual call vs. a slow
-// path virtual call so that we can enable fast tail calls for megamorphic
-// virtual calls by using the shuffler.
-// https://bugs.webkit.org/show_bug.cgi?id=148831
-MacroAssemblerCodeRef virtualThunkFor(VM* vm, CallLinkInfo& callLinkInfo)
+static MacroAssemblerCodeRef virtualForThunkGenerator(
+ VM* vm, CodeSpecializationKind kind)
{
// The callee is in regT0 (for JSVALUE32_64, the tag is in regT1).
// The return address is on the stack, or in the link register. We will hence
@@ -169,149 +155,203 @@ MacroAssemblerCodeRef virtualThunkFor(VM* vm, CallLinkInfo& callLinkInfo)
CCallHelpers jit(vm);
CCallHelpers::JumpList slowCase;
-
- // This is a slow path execution, and regT2 contains the CallLinkInfo. Count the
- // slow path execution for the profiler.
- jit.add32(
- CCallHelpers::TrustedImm32(1),
- CCallHelpers::Address(GPRInfo::regT2, CallLinkInfo::offsetOfSlowPathCount()));
// FIXME: we should have a story for eliminating these checks. In many cases,
// the DFG knows that the value is definitely a cell, or definitely a function.
#if USE(JSVALUE64)
- jit.move(CCallHelpers::TrustedImm64(TagMask), GPRInfo::regT4);
-
slowCase.append(
jit.branchTest64(
- CCallHelpers::NonZero, GPRInfo::regT0, GPRInfo::regT4));
+ CCallHelpers::NonZero, GPRInfo::regT0, GPRInfo::tagMaskRegister));
#else
slowCase.append(
jit.branch32(
CCallHelpers::NotEqual, GPRInfo::regT1,
CCallHelpers::TrustedImm32(JSValue::CellTag)));
#endif
- AssemblyHelpers::emitLoadStructure(jit, GPRInfo::regT0, GPRInfo::regT4, GPRInfo::regT1);
+ jit.loadPtr(CCallHelpers::Address(GPRInfo::regT0, JSCell::structureOffset()), GPRInfo::nonArgGPR2);
slowCase.append(
jit.branchPtr(
CCallHelpers::NotEqual,
- CCallHelpers::Address(GPRInfo::regT4, Structure::classInfoOffset()),
+ CCallHelpers::Address(GPRInfo::nonArgGPR2, Structure::classInfoOffset()),
CCallHelpers::TrustedImmPtr(JSFunction::info())));
// Now we know we have a JSFunction.
jit.loadPtr(
CCallHelpers::Address(GPRInfo::regT0, JSFunction::offsetOfExecutable()),
- GPRInfo::regT4);
- jit.loadPtr(
- CCallHelpers::Address(
- GPRInfo::regT4, ExecutableBase::offsetOfJITCodeWithArityCheckFor(
- callLinkInfo.specializationKind())),
- GPRInfo::regT4);
- slowCase.append(jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::regT4));
+ GPRInfo::nonArgGPR2);
+ slowCase.append(
+ jit.branch32(
+ CCallHelpers::LessThan,
+ CCallHelpers::Address(
+ GPRInfo::nonArgGPR2, ExecutableBase::offsetOfNumParametersFor(kind)),
+ CCallHelpers::TrustedImm32(0)));
// Now we know that we have a CodeBlock, and we're committed to making a fast
// call.
+ jit.loadPtr(
+ CCallHelpers::Address(GPRInfo::regT0, JSFunction::offsetOfScopeChain()),
+ GPRInfo::regT1);
+#if USE(JSVALUE64)
+ jit.store64(
+ GPRInfo::regT1,
+ CCallHelpers::Address(
+ GPRInfo::callFrameRegister,
+ static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ScopeChain));
+#else
+ jit.storePtr(
+ GPRInfo::regT1,
+ CCallHelpers::Address(
+ GPRInfo::callFrameRegister,
+ static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ScopeChain +
+ OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
+ jit.store32(
+ CCallHelpers::TrustedImm32(JSValue::CellTag),
+ CCallHelpers::Address(
+ GPRInfo::callFrameRegister,
+ static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ScopeChain +
+ OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
+#endif
+
+ jit.loadPtr(
+ CCallHelpers::Address(GPRInfo::nonArgGPR2, ExecutableBase::offsetOfJITCodeWithArityCheckFor(kind)),
+ GPRInfo::regT0);
+
// Make a tail call. This will return back to JIT code.
- emitPointerValidation(jit, GPRInfo::regT4);
- if (callLinkInfo.isTailCall()) {
- jit.preserveReturnAddressAfterCall(GPRInfo::regT0);
- jit.prepareForTailCallSlow(GPRInfo::regT4);
- }
- jit.jump(GPRInfo::regT4);
+ emitPointerValidation(jit, GPRInfo::regT0);
+ jit.jump(GPRInfo::regT0);
slowCase.link(&jit);
// Here we don't know anything, so revert to the full slow path.
- slowPathFor(jit, vm, operationVirtualCall);
+ slowPathFor(jit, vm, kind == CodeForCall ? operationVirtualCall : operationVirtualConstruct);
- LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
+ LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
return FINALIZE_CODE(
patchBuffer,
- ("Virtual %s slow path thunk",
- callLinkInfo.callMode() == CallMode::Regular ? "call" : callLinkInfo.callMode() == CallMode::Tail ? "tail call" : "construct"));
+ ("Virtual %s slow path thunk", kind == CodeForCall ? "call" : "construct"));
}
-enum ThunkEntryType { EnterViaCall, EnterViaJump };
+MacroAssemblerCodeRef virtualCallThunkGenerator(VM* vm)
+{
+ return virtualForThunkGenerator(vm, CodeForCall);
+}
-static MacroAssemblerCodeRef nativeForGenerator(VM* vm, CodeSpecializationKind kind, ThunkEntryType entryType = EnterViaCall)
+MacroAssemblerCodeRef virtualConstructThunkGenerator(VM* vm)
+{
+ return virtualForThunkGenerator(vm, CodeForConstruct);
+}
+
+static MacroAssemblerCodeRef nativeForGenerator(VM* vm, CodeSpecializationKind kind)
{
int executableOffsetToFunction = NativeExecutable::offsetOfNativeFunctionFor(kind);
JSInterfaceJIT jit(vm);
-
- if (entryType == EnterViaCall)
- jit.emitFunctionPrologue();
-#if USE(JSVALUE64)
- else if (entryType == EnterViaJump) {
- // We're coming from a specialized thunk that has saved the prior tag registers' contents.
- // Restore them now.
-#if CPU(ARM64)
- jit.popPair(JSInterfaceJIT::tagTypeNumberRegister, JSInterfaceJIT::tagMaskRegister);
-#else
- jit.pop(JSInterfaceJIT::tagMaskRegister);
- jit.pop(JSInterfaceJIT::tagTypeNumberRegister);
-#endif
- }
-#endif
-
- jit.emitPutToCallFrameHeader(0, JSStack::CodeBlock);
+
+ jit.emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock);
jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
#if CPU(X86)
+ // Load caller frame's scope chain into this callframe so that whatever we call can
+ // get to its global data.
+ jit.emitGetCallerFrameFromCallFrameHeaderPtr(JSInterfaceJIT::regT0);
+ jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT0);
+ jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
+
+ jit.peek(JSInterfaceJIT::regT1);
+ jit.emitPutReturnPCToCallFrameHeader(JSInterfaceJIT::regT1);
+
// Calling convention: f(ecx, edx, ...);
// Host function signature: f(ExecState*);
jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);
- jit.subPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister); // Align stack after prologue.
+ jit.subPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(void*)), JSInterfaceJIT::stackPointerRegister); // Align stack after call.
// call the function
jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, JSInterfaceJIT::regT1);
jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT1);
+ jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, executableOffsetToFunction));
- jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister);
+ jit.addPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(void*)), JSInterfaceJIT::stackPointerRegister);
#elif CPU(X86_64)
+ // Load caller frame's scope chain into this callframe so that whatever we call can
+ // get to its global data.
+ jit.emitGetCallerFrameFromCallFrameHeaderPtr(JSInterfaceJIT::regT0);
+ jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT0);
+ jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
+
+ jit.peek(JSInterfaceJIT::regT1);
+ jit.emitPutReturnPCToCallFrameHeader(JSInterfaceJIT::regT1);
+
#if !OS(WINDOWS)
// Calling convention: f(edi, esi, edx, ecx, ...);
// Host function signature: f(ExecState*);
jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::edi);
+ jit.subPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister); // Align stack after call.
+
jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::esi);
jit.loadPtr(JSInterfaceJIT::Address(X86Registers::esi, JSFunction::offsetOfExecutable()), X86Registers::r9);
+ jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction));
+ jit.addPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
#else
// Calling convention: f(ecx, edx, r8, r9, ...);
// Host function signature: f(ExecState*);
jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);
- // Leave space for the callee parameter home addresses.
- // At this point the stack is aligned to 16 bytes, but if this changes at some point, we need to emit code to align it.
- jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
+ // Leave space for the callee parameter home addresses and align the stack.
+ jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t) + 16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::edx);
jit.loadPtr(JSInterfaceJIT::Address(X86Registers::edx, JSFunction::offsetOfExecutable()), X86Registers::r9);
+ jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction));
- jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
+ jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t) + 16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
#endif
#elif CPU(ARM64)
+ COMPILE_ASSERT(ARM64Registers::x3 != JSInterfaceJIT::regT1, prev_callframe_not_trampled_by_T1);
+ COMPILE_ASSERT(ARM64Registers::x3 != JSInterfaceJIT::regT3, prev_callframe_not_trampled_by_T3);
COMPILE_ASSERT(ARM64Registers::x0 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_0);
COMPILE_ASSERT(ARM64Registers::x1 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_1);
COMPILE_ASSERT(ARM64Registers::x2 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_2);
+ // Load caller frame's scope chain into this callframe so that whatever we call can
+ // get to its global data.
+ jit.emitGetCallerFrameFromCallFrameHeaderPtr(ARM64Registers::x3);
+ jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, ARM64Registers::x3);
+ jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
+
+ jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3); // Callee preserved
+ jit.emitPutReturnPCToCallFrameHeader(ARM64Registers::lr);
+
// Host function signature: f(ExecState*);
jit.move(JSInterfaceJIT::callFrameRegister, ARM64Registers::x0);
jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, ARM64Registers::x1);
jit.loadPtr(JSInterfaceJIT::Address(ARM64Registers::x1, JSFunction::offsetOfExecutable()), ARM64Registers::x2);
+ jit.move(ARM64Registers::x3, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
jit.call(JSInterfaceJIT::Address(ARM64Registers::x2, executableOffsetToFunction));
+
+ jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
+
#elif CPU(ARM) || CPU(SH4) || CPU(MIPS)
+ // Load caller frame's scope chain into this callframe so that whatever we call can get to its global data.
+ jit.emitGetCallerFrameFromCallFrameHeaderPtr(JSInterfaceJIT::regT2);
+ jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT2);
+ jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
+
+ jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3); // Callee preserved
+ jit.emitPutReturnPCToCallFrameHeader(JSInterfaceJIT::regT3);
+
#if CPU(MIPS)
// Allocate stack space for (unused) 16 bytes (8-byte aligned) for 4 arguments.
jit.subPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
@@ -322,6 +362,7 @@ static MacroAssemblerCodeRef nativeForGenerator(VM* vm, CodeSpecializationKind k
jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR0);
jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, JSInterfaceJIT::argumentGPR1);
+ jit.move(JSInterfaceJIT::regT2, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::argumentGPR1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, executableOffsetToFunction));
@@ -329,10 +370,12 @@ static MacroAssemblerCodeRef nativeForGenerator(VM* vm, CodeSpecializationKind k
// Restore stack space
jit.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
#endif
+
+ jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
#else
#error "JIT not supported on this platform."
UNUSED_PARAM(executableOffsetToFunction);
- abortWithReason(TGNotSupported);
+ breakpoint();
#endif
// Check for an exception
@@ -342,43 +385,40 @@ static MacroAssemblerCodeRef nativeForGenerator(VM* vm, CodeSpecializationKind k
#else
JSInterfaceJIT::Jump exceptionHandler = jit.branch32(
JSInterfaceJIT::NotEqual,
- JSInterfaceJIT::AbsoluteAddress(vm->addressOfException()),
- JSInterfaceJIT::TrustedImm32(0));
+ JSInterfaceJIT::AbsoluteAddress(reinterpret_cast<char*>(vm->addressOfException()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)),
+ JSInterfaceJIT::TrustedImm32(JSValue::EmptyValueTag));
#endif
- jit.emitFunctionEpilogue();
// Return.
jit.ret();
// Handle an exception
exceptionHandler.link(&jit);
- jit.copyCalleeSavesToVMCalleeSavesBuffer();
+ // Grab the return address.
+ jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT1);
+
+ jit.move(JSInterfaceJIT::TrustedImmPtr(&vm->exceptionLocation), JSInterfaceJIT::regT2);
+ jit.storePtr(JSInterfaceJIT::regT1, JSInterfaceJIT::regT2);
+
jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
#if CPU(X86) && USE(JSVALUE32_64)
jit.addPtr(JSInterfaceJIT::TrustedImm32(-12), JSInterfaceJIT::stackPointerRegister);
- jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT0);
- jit.push(JSInterfaceJIT::regT0);
+ jit.push(JSInterfaceJIT::callFrameRegister);
#else
-#if OS(WINDOWS)
- // Allocate space on stack for the 4 parameter registers.
- jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
-#endif
jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR0);
#endif
jit.move(JSInterfaceJIT::TrustedImmPtr(FunctionPtr(operationVMHandleException).value()), JSInterfaceJIT::regT3);
jit.call(JSInterfaceJIT::regT3);
#if CPU(X86) && USE(JSVALUE32_64)
jit.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
-#elif OS(WINDOWS)
- jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
#endif
jit.jumpToExceptionHandler();
- LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
- return FINALIZE_CODE(patchBuffer, ("native %s%s trampoline", entryType == EnterViaJump ? "Tail " : "", toCString(kind).data()));
+ LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
+ return FINALIZE_CODE(patchBuffer, ("native %s trampoline", toCString(kind).data()));
}
MacroAssemblerCodeRef nativeCallGenerator(VM* vm)
@@ -386,72 +426,43 @@ MacroAssemblerCodeRef nativeCallGenerator(VM* vm)
return nativeForGenerator(vm, CodeForCall);
}
-MacroAssemblerCodeRef nativeTailCallGenerator(VM* vm)
-{
- return nativeForGenerator(vm, CodeForCall, EnterViaJump);
-}
-
MacroAssemblerCodeRef nativeConstructGenerator(VM* vm)
{
return nativeForGenerator(vm, CodeForConstruct);
}
-MacroAssemblerCodeRef arityFixupGenerator(VM* vm)
+MacroAssemblerCodeRef arityFixup(VM* vm)
{
JSInterfaceJIT jit(vm);
- // We enter with fixup count in argumentGPR0
- // We have the guarantee that a0, a1, a2, t3, t4 and t5 (or t0 for Windows) are all distinct :-)
+ // We enter with fixup count in regT0
#if USE(JSVALUE64)
-#if OS(WINDOWS)
- const GPRReg extraTemp = JSInterfaceJIT::regT0;
-#else
- const GPRReg extraTemp = JSInterfaceJIT::regT5;
-#endif
# if CPU(X86_64)
jit.pop(JSInterfaceJIT::regT4);
# endif
+ jit.neg64(JSInterfaceJIT::regT0);
jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT3);
- jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, JSStack::ArgumentCount * sizeof(Register)), JSInterfaceJIT::argumentGPR2);
- jit.add32(JSInterfaceJIT::TrustedImm32(JSStack::CallFrameHeaderSize), JSInterfaceJIT::argumentGPR2);
-
- // Check to see if we have extra slots we can use
- jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR1);
- jit.and32(JSInterfaceJIT::TrustedImm32(stackAlignmentRegisters() - 1), JSInterfaceJIT::argumentGPR1);
- JSInterfaceJIT::Jump noExtraSlot = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR1);
- jit.move(JSInterfaceJIT::TrustedImm64(ValueUndefined), extraTemp);
- JSInterfaceJIT::Label fillExtraSlots(jit.label());
- jit.store64(extraTemp, MacroAssembler::BaseIndex(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR2, JSInterfaceJIT::TimesEight));
- jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2);
- jit.branchSub32(JSInterfaceJIT::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR1).linkTo(fillExtraSlots, &jit);
- jit.and32(JSInterfaceJIT::TrustedImm32(-stackAlignmentRegisters()), JSInterfaceJIT::argumentGPR0);
- JSInterfaceJIT::Jump done = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR0);
- noExtraSlot.link(&jit);
-
- jit.neg64(JSInterfaceJIT::argumentGPR0);
-
- // Move current frame down argumentGPR0 number of slots
+ jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, JSStack::ArgumentCount * 8), JSInterfaceJIT::regT2);
+ jit.add32(JSInterfaceJIT::TrustedImm32(JSStack::CallFrameHeaderSize), JSInterfaceJIT::regT2);
+
+ // Move current frame down regT0 number of slots
JSInterfaceJIT::Label copyLoop(jit.label());
- jit.load64(JSInterfaceJIT::regT3, extraTemp);
- jit.store64(extraTemp, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight));
+ jit.load64(JSInterfaceJIT::regT3, JSInterfaceJIT::regT1);
+ jit.store64(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
- jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(copyLoop, &jit);
+ jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(copyLoop, &jit);
- // Fill in argumentGPR0 missing arg slots with undefined
- jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR2);
- jit.move(JSInterfaceJIT::TrustedImm64(ValueUndefined), extraTemp);
+ // Fill in regT0 missing arg slots with undefined
+ jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::regT2);
+ jit.move(JSInterfaceJIT::TrustedImm64(ValueUndefined), JSInterfaceJIT::regT1);
JSInterfaceJIT::Label fillUndefinedLoop(jit.label());
- jit.store64(extraTemp, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight));
+ jit.store64(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
- jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(fillUndefinedLoop, &jit);
-
- // Adjust call frame register and stack pointer to account for missing args
- jit.move(JSInterfaceJIT::argumentGPR0, extraTemp);
- jit.lshift64(JSInterfaceJIT::TrustedImm32(3), extraTemp);
- jit.addPtr(extraTemp, JSInterfaceJIT::callFrameRegister);
- jit.addPtr(extraTemp, JSInterfaceJIT::stackPointerRegister);
+ jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(fillUndefinedLoop, &jit);
- done.link(&jit);
+ // Adjust call frame register to account for missing args
+ jit.lshift64(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT0);
+ jit.addPtr(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister);
# if CPU(X86_64)
jit.push(JSInterfaceJIT::regT4);
@@ -461,54 +472,34 @@ MacroAssemblerCodeRef arityFixupGenerator(VM* vm)
# if CPU(X86)
jit.pop(JSInterfaceJIT::regT4);
# endif
+ jit.neg32(JSInterfaceJIT::regT0);
jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT3);
- jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, JSStack::ArgumentCount * sizeof(Register)), JSInterfaceJIT::argumentGPR2);
- jit.add32(JSInterfaceJIT::TrustedImm32(JSStack::CallFrameHeaderSize), JSInterfaceJIT::argumentGPR2);
-
- // Check to see if we have extra slots we can use
- jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR1);
- jit.and32(JSInterfaceJIT::TrustedImm32(stackAlignmentRegisters() - 1), JSInterfaceJIT::argumentGPR1);
- JSInterfaceJIT::Jump noExtraSlot = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR1);
- JSInterfaceJIT::Label fillExtraSlots(jit.label());
- jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT5);
- jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR2, JSInterfaceJIT::TimesEight, PayloadOffset));
- jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT5);
- jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR2, JSInterfaceJIT::TimesEight, TagOffset));
- jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2);
- jit.branchSub32(JSInterfaceJIT::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR1).linkTo(fillExtraSlots, &jit);
- jit.and32(JSInterfaceJIT::TrustedImm32(-stackAlignmentRegisters()), JSInterfaceJIT::argumentGPR0);
- JSInterfaceJIT::Jump done = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR0);
- noExtraSlot.link(&jit);
-
- jit.neg32(JSInterfaceJIT::argumentGPR0);
-
- // Move current frame down argumentGPR0 number of slots
+ jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, JSStack::ArgumentCount * 8), JSInterfaceJIT::regT2);
+ jit.add32(JSInterfaceJIT::TrustedImm32(JSStack::CallFrameHeaderSize), JSInterfaceJIT::regT2);
+
+ // Move current frame down regT0 number of slots
JSInterfaceJIT::Label copyLoop(jit.label());
- jit.load32(MacroAssembler::Address(JSInterfaceJIT::regT3, PayloadOffset), JSInterfaceJIT::regT5);
- jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, PayloadOffset));
- jit.load32(MacroAssembler::Address(JSInterfaceJIT::regT3, TagOffset), JSInterfaceJIT::regT5);
- jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, TagOffset));
+ jit.load32(JSInterfaceJIT::regT3, JSInterfaceJIT::regT1);
+ jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
+ jit.load32(MacroAssembler::Address(JSInterfaceJIT::regT3, 4), JSInterfaceJIT::regT1);
+ jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight, 4));
jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
- jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(copyLoop, &jit);
+ jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(copyLoop, &jit);
- // Fill in argumentGPR0 missing arg slots with undefined
- jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR2);
+ // Fill in regT0 missing arg slots with undefined
+ jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::regT2);
JSInterfaceJIT::Label fillUndefinedLoop(jit.label());
- jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT5);
- jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, PayloadOffset));
- jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT5);
- jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, TagOffset));
+ jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT1);
+ jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
+ jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT1);
+ jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight, 4));
jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
- jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(fillUndefinedLoop, &jit);
-
- // Adjust call frame register and stack pointer to account for missing args
- jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::regT5);
- jit.lshift32(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT5);
- jit.addPtr(JSInterfaceJIT::regT5, JSInterfaceJIT::callFrameRegister);
- jit.addPtr(JSInterfaceJIT::regT5, JSInterfaceJIT::stackPointerRegister);
+ jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(fillUndefinedLoop, &jit);
- done.link(&jit);
+ // Adjust call frame register to account for missing args
+ jit.lshift32(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT0);
+ jit.addPtr(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister);
# if CPU(X86)
jit.push(JSInterfaceJIT::regT4);
@@ -516,20 +507,10 @@ MacroAssemblerCodeRef arityFixupGenerator(VM* vm)
jit.ret();
#endif
- LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
+ LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
return FINALIZE_CODE(patchBuffer, ("fixup arity"));
}
-MacroAssemblerCodeRef unreachableGenerator(VM* vm)
-{
- JSInterfaceJIT jit(vm);
-
- jit.breakpoint();
-
- LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
- return FINALIZE_CODE(patchBuffer, ("unreachable thunk"));
-}
-
static void stringCharLoad(SpecializedThunkJIT& jit, VM* vm)
{
// load string
@@ -573,7 +554,7 @@ MacroAssemblerCodeRef charCodeAtThunkGenerator(VM* vm)
SpecializedThunkJIT jit(vm, 1);
stringCharLoad(jit, vm);
jit.returnInt32(SpecializedThunkJIT::regT0);
- return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "charCodeAt");
+ return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "charCodeAt");
}
MacroAssemblerCodeRef charAtThunkGenerator(VM* vm)
@@ -582,7 +563,7 @@ MacroAssemblerCodeRef charAtThunkGenerator(VM* vm)
stringCharLoad(jit, vm);
charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
jit.returnJSCell(SpecializedThunkJIT::regT0);
- return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "charAt");
+ return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "charAt");
}
MacroAssemblerCodeRef fromCharCodeThunkGenerator(VM* vm)
@@ -592,28 +573,7 @@ MacroAssemblerCodeRef fromCharCodeThunkGenerator(VM* vm)
jit.loadInt32Argument(0, SpecializedThunkJIT::regT0);
charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
jit.returnJSCell(SpecializedThunkJIT::regT0);
- return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "fromCharCode");
-}
-
-MacroAssemblerCodeRef clz32ThunkGenerator(VM* vm)
-{
- SpecializedThunkJIT jit(vm, 1);
- MacroAssembler::Jump nonIntArgJump;
- jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntArgJump);
-
- SpecializedThunkJIT::Label convertedArgumentReentry(&jit);
- jit.countLeadingZeros32(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
- jit.returnInt32(SpecializedThunkJIT::regT1);
-
- if (jit.supportsFloatingPointTruncate()) {
- nonIntArgJump.link(&jit);
- jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
- jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(convertedArgumentReentry, &jit);
- jit.appendFailure(jit.jump());
- } else
- jit.appendFailure(nonIntArgJump);
-
- return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "clz32");
+ return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "fromCharCode");
}
MacroAssemblerCodeRef sqrtThunkGenerator(VM* vm)
@@ -625,15 +585,25 @@ MacroAssemblerCodeRef sqrtThunkGenerator(VM* vm)
jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
jit.returnDouble(SpecializedThunkJIT::fpRegT0);
- return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "sqrt");
+ return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "sqrt");
}
#define UnaryDoubleOpWrapper(function) function##Wrapper
enum MathThunkCallingConvention { };
typedef MathThunkCallingConvention(*MathThunk)(MathThunkCallingConvention);
+extern "C" {
+
+double jsRound(double) REFERENCED_FROM_ASM;
+double jsRound(double d)
+{
+ double integer = ceil(d);
+ return integer - (integer - d > 0.5);
+}
+
+}
-#if CPU(X86_64) && COMPILER(GCC_OR_CLANG) && (OS(DARWIN) || OS(LINUX))
+#if CPU(X86_64) && COMPILER(GCC) && (OS(DARWIN) || OS(LINUX))
#define defineUnaryDoubleOpWrapper(function) \
asm( \
@@ -641,9 +611,7 @@ typedef MathThunkCallingConvention(*MathThunk)(MathThunkCallingConvention);
".globl " SYMBOL_STRING(function##Thunk) "\n" \
HIDE_SYMBOL(function##Thunk) "\n" \
SYMBOL_STRING(function##Thunk) ":" "\n" \
- "pushq %rax\n" \
"call " GLOBAL_REFERENCE(function) "\n" \
- "popq %rcx\n" \
"ret\n" \
);\
extern "C" { \
@@ -651,7 +619,7 @@ typedef MathThunkCallingConvention(*MathThunk)(MathThunkCallingConvention);
} \
static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
-#elif CPU(X86) && COMPILER(GCC_OR_CLANG) && OS(LINUX) && defined(__PIC__)
+#elif CPU(X86) && COMPILER(GCC) && OS(LINUX) && defined(__PIC__)
#define defineUnaryDoubleOpWrapper(function) \
asm( \
".text\n" \
@@ -675,19 +643,19 @@ typedef MathThunkCallingConvention(*MathThunk)(MathThunkCallingConvention);
} \
static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
-#elif CPU(X86) && COMPILER(GCC_OR_CLANG) && (OS(DARWIN) || OS(LINUX))
+#elif CPU(X86) && COMPILER(GCC) && (OS(DARWIN) || OS(LINUX))
#define defineUnaryDoubleOpWrapper(function) \
asm( \
".text\n" \
".globl " SYMBOL_STRING(function##Thunk) "\n" \
HIDE_SYMBOL(function##Thunk) "\n" \
SYMBOL_STRING(function##Thunk) ":" "\n" \
- "subl $20, %esp\n" \
+ "subl $8, %esp\n" \
"movsd %xmm0, (%esp) \n" \
"call " GLOBAL_REFERENCE(function) "\n" \
"fstpl (%esp) \n" \
"movsd (%esp), %xmm0 \n" \
- "addl $20, %esp\n" \
+ "addl $8, %esp\n" \
"ret\n" \
);\
extern "C" { \
@@ -695,7 +663,7 @@ typedef MathThunkCallingConvention(*MathThunk)(MathThunkCallingConvention);
} \
static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
-#elif CPU(ARM_THUMB2) && COMPILER(GCC_OR_CLANG) && PLATFORM(IOS)
+#elif CPU(ARM_THUMB2) && COMPILER(GCC) && PLATFORM(IOS)
#define defineUnaryDoubleOpWrapper(function) \
asm( \
@@ -728,38 +696,12 @@ typedef MathThunkCallingConvention(*MathThunk)(MathThunkCallingConvention);
HIDE_SYMBOL(function##Thunk) "\n" \
SYMBOL_STRING(function##Thunk) ":" "\n" \
"b " GLOBAL_REFERENCE(function) "\n" \
- ".previous" \
); \
extern "C" { \
MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
} \
static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
-#elif CPU(X86) && COMPILER(MSVC) && OS(WINDOWS)
-
-// MSVC does not accept floor, etc, to be called directly from inline assembly, so we need to wrap these functions.
-static double (_cdecl *floorFunction)(double) = floor;
-static double (_cdecl *ceilFunction)(double) = ceil;
-static double (_cdecl *expFunction)(double) = exp;
-static double (_cdecl *logFunction)(double) = log;
-static double (_cdecl *jsRoundFunction)(double) = jsRound;
-
-#define defineUnaryDoubleOpWrapper(function) \
- extern "C" __declspec(naked) MathThunkCallingConvention function##Thunk(MathThunkCallingConvention) \
- { \
- __asm \
- { \
- __asm sub esp, 20 \
- __asm movsd mmword ptr [esp], xmm0 \
- __asm call function##Function \
- __asm fstp qword ptr [esp] \
- __asm movsd xmm0, mmword ptr [esp] \
- __asm add esp, 20 \
- __asm ret \
- } \
- } \
- static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
-
#else
#define defineUnaryDoubleOpWrapper(function) \
@@ -774,6 +716,7 @@ defineUnaryDoubleOpWrapper(ceil);
static const double oneConstant = 1.0;
static const double negativeHalfConstant = -0.5;
+static const double zeroConstant = 0.0;
static const double halfConstant = 0.5;
MacroAssemblerCodeRef floorThunkGenerator(VM* vm)
@@ -797,7 +740,7 @@ MacroAssemblerCodeRef floorThunkGenerator(VM* vm)
SpecializedThunkJIT::Jump intResult;
SpecializedThunkJIT::JumpList doubleResult;
if (jit.supportsFloatingPointTruncate()) {
- jit.moveZeroToDouble(SpecializedThunkJIT::fpRegT1);
+ jit.loadDouble(&zeroConstant, SpecializedThunkJIT::fpRegT1);
doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
SpecializedThunkJIT::JumpList slowPath;
// Handle the negative doubles in the slow path for now.
@@ -814,7 +757,7 @@ MacroAssemblerCodeRef floorThunkGenerator(VM* vm)
doubleResult.link(&jit);
jit.returnDouble(SpecializedThunkJIT::fpRegT0);
#endif // CPU(ARM64)
- return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "floor");
+ return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "floor");
}
MacroAssemblerCodeRef ceilThunkGenerator(VM* vm)
@@ -827,17 +770,17 @@ MacroAssemblerCodeRef ceilThunkGenerator(VM* vm)
jit.returnInt32(SpecializedThunkJIT::regT0);
nonIntJump.link(&jit);
jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
- if (jit.supportsFloatingPointRounding())
- jit.ceilDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
- else
- jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(ceil));
-
+#if CPU(ARM64)
+ jit.ceilDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
+#else
+ jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(ceil));
+#endif // CPU(ARM64)
SpecializedThunkJIT::JumpList doubleResult;
jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
jit.returnInt32(SpecializedThunkJIT::regT0);
doubleResult.link(&jit);
jit.returnDouble(SpecializedThunkJIT::fpRegT0);
- return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "ceil");
+ return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "ceil");
}
MacroAssemblerCodeRef roundThunkGenerator(VM* vm)
@@ -853,12 +796,12 @@ MacroAssemblerCodeRef roundThunkGenerator(VM* vm)
SpecializedThunkJIT::Jump intResult;
SpecializedThunkJIT::JumpList doubleResult;
if (jit.supportsFloatingPointTruncate()) {
- jit.moveZeroToDouble(SpecializedThunkJIT::fpRegT1);
+ jit.loadDouble(&zeroConstant, SpecializedThunkJIT::fpRegT1);
doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
SpecializedThunkJIT::JumpList slowPath;
// Handle the negative doubles in the slow path for now.
slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
- jit.loadDouble(MacroAssembler::TrustedImmPtr(&halfConstant), SpecializedThunkJIT::fpRegT1);
+ jit.loadDouble(&halfConstant, SpecializedThunkJIT::fpRegT1);
jit.addDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0));
intResult = jit.jump();
@@ -871,7 +814,7 @@ MacroAssemblerCodeRef roundThunkGenerator(VM* vm)
jit.returnInt32(SpecializedThunkJIT::regT0);
doubleResult.link(&jit);
jit.returnDouble(SpecializedThunkJIT::fpRegT0);
- return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "round");
+ return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "round");
}
MacroAssemblerCodeRef expThunkGenerator(VM* vm)
@@ -884,7 +827,7 @@ MacroAssemblerCodeRef expThunkGenerator(VM* vm)
jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(exp));
jit.returnDouble(SpecializedThunkJIT::fpRegT0);
- return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "exp");
+ return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "exp");
}
MacroAssemblerCodeRef logThunkGenerator(VM* vm)
@@ -897,7 +840,7 @@ MacroAssemblerCodeRef logThunkGenerator(VM* vm)
jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(log));
jit.returnDouble(SpecializedThunkJIT::fpRegT0);
- return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "log");
+ return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "log");
}
MacroAssemblerCodeRef absThunkGenerator(VM* vm)
@@ -910,14 +853,14 @@ MacroAssemblerCodeRef absThunkGenerator(VM* vm)
jit.rshift32(SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(31), SpecializedThunkJIT::regT1);
jit.add32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
jit.xor32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
- jit.appendFailure(jit.branchTest32(MacroAssembler::Signed, SpecializedThunkJIT::regT0));
+ jit.appendFailure(jit.branch32(MacroAssembler::Equal, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(1 << 31)));
jit.returnInt32(SpecializedThunkJIT::regT0);
nonIntJump.link(&jit);
// Shame about the double int conversion here.
jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
jit.absDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
jit.returnDouble(SpecializedThunkJIT::fpRegT1);
- return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "abs");
+ return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "abs");
}
MacroAssemblerCodeRef powThunkGenerator(VM* vm)
@@ -926,7 +869,7 @@ MacroAssemblerCodeRef powThunkGenerator(VM* vm)
if (!jit.supportsFloatingPoint())
return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
- jit.loadDouble(MacroAssembler::TrustedImmPtr(&oneConstant), SpecializedThunkJIT::fpRegT1);
+ jit.loadDouble(&oneConstant, SpecializedThunkJIT::fpRegT1);
jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
MacroAssembler::Jump nonIntExponent;
jit.loadInt32Argument(1, SpecializedThunkJIT::regT0, nonIntExponent);
@@ -954,7 +897,7 @@ MacroAssemblerCodeRef powThunkGenerator(VM* vm)
if (jit.supportsFloatingPointSqrt()) {
nonIntExponent.link(&jit);
- jit.loadDouble(MacroAssembler::TrustedImmPtr(&negativeHalfConstant), SpecializedThunkJIT::fpRegT3);
+ jit.loadDouble(&negativeHalfConstant, SpecializedThunkJIT::fpRegT3);
jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::regT0);
jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleLessThanOrEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::fpRegT3));
@@ -969,7 +912,7 @@ MacroAssemblerCodeRef powThunkGenerator(VM* vm)
} else
jit.appendFailure(nonIntExponent);
- return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "pow");
+ return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "pow");
}
MacroAssemblerCodeRef imulThunkGenerator(VM* vm)
@@ -988,7 +931,8 @@ MacroAssemblerCodeRef imulThunkGenerator(VM* vm)
nonIntArg0Jump.link(&jit);
jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg0, &jit);
- jit.appendFailure(jit.jump());
+ jit.xor32(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0);
+ jit.jump(doneLoadingArg0);
} else
jit.appendFailure(nonIntArg0Jump);
@@ -996,29 +940,117 @@ MacroAssemblerCodeRef imulThunkGenerator(VM* vm)
nonIntArg1Jump.link(&jit);
jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1);
jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg1, &jit);
- jit.appendFailure(jit.jump());
+ jit.xor32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT1);
+ jit.jump(doneLoadingArg1);
} else
jit.appendFailure(nonIntArg1Jump);
- return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "imul");
+ return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "imul");
}
-MacroAssemblerCodeRef randomThunkGenerator(VM* vm)
+static MacroAssemblerCodeRef arrayIteratorNextThunkGenerator(VM* vm, ArrayIterationKind kind)
{
- SpecializedThunkJIT jit(vm, 0);
- if (!jit.supportsFloatingPoint())
- return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
+ typedef SpecializedThunkJIT::TrustedImm32 TrustedImm32;
+ typedef SpecializedThunkJIT::TrustedImmPtr TrustedImmPtr;
+ typedef SpecializedThunkJIT::Address Address;
+ typedef SpecializedThunkJIT::BaseIndex BaseIndex;
+ typedef SpecializedThunkJIT::Jump Jump;
+
+ SpecializedThunkJIT jit(vm);
+ // Make sure we're being called on an array iterator, and load m_iteratedObject, and m_nextIndex into regT0 and regT1 respectively
+ jit.loadArgumentWithSpecificClass(JSArrayIterator::info(), SpecializedThunkJIT::ThisArgument, SpecializedThunkJIT::regT4, SpecializedThunkJIT::regT1);
-#if USE(JSVALUE64)
- jit.emitRandomThunk(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT3, SpecializedThunkJIT::fpRegT0);
- jit.returnDouble(SpecializedThunkJIT::fpRegT0);
+ // Early exit if we don't have a thunk for this form of iteration
+ jit.appendFailure(jit.branch32(SpecializedThunkJIT::AboveOrEqual, Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfIterationKind()), TrustedImm32(ArrayIterateKeyValue)));
+
+ jit.loadPtr(Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfIteratedObject()), SpecializedThunkJIT::regT0);
+
+ jit.load32(Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex()), SpecializedThunkJIT::regT1);
+
+ // Pull out the butterfly from iteratedObject
+ jit.loadPtr(Address(SpecializedThunkJIT::regT0, JSCell::structureOffset()), SpecializedThunkJIT::regT2);
+
+ jit.load8(Address(SpecializedThunkJIT::regT2, Structure::indexingTypeOffset()), SpecializedThunkJIT::regT3);
+ jit.loadPtr(Address(SpecializedThunkJIT::regT0, JSObject::butterflyOffset()), SpecializedThunkJIT::regT2);
+ Jump nullButterfly = jit.branchTestPtr(SpecializedThunkJIT::Zero, SpecializedThunkJIT::regT2);
+
+ Jump notDone = jit.branch32(SpecializedThunkJIT::Below, SpecializedThunkJIT::regT1, Address(SpecializedThunkJIT::regT2, Butterfly::offsetOfPublicLength()));
- return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "random");
+ nullButterfly.link(&jit);
+
+ // Return the termination signal to indicate that we've finished
+ jit.move(TrustedImmPtr(vm->iterationTerminator.get()), SpecializedThunkJIT::regT0);
+ jit.returnJSCell(SpecializedThunkJIT::regT0);
+
+ notDone.link(&jit);
+
+ if (kind == ArrayIterateKey) {
+ jit.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex()));
+ jit.returnInt32(SpecializedThunkJIT::regT1);
+ return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "array-iterator-next-key");
+
+ }
+ ASSERT(kind == ArrayIterateValue);
+
+ // Okay, now we're returning a value so make sure we're inside the vector size
+ jit.appendFailure(jit.branch32(SpecializedThunkJIT::AboveOrEqual, SpecializedThunkJIT::regT1, Address(SpecializedThunkJIT::regT2, Butterfly::offsetOfVectorLength())));
+
+ // So now we perform inline loads for int32, value/undecided, and double storage
+ Jump undecidedStorage = jit.branch32(SpecializedThunkJIT::Equal, SpecializedThunkJIT::regT3, TrustedImm32(ArrayWithUndecided));
+ Jump notContiguousStorage = jit.branch32(SpecializedThunkJIT::NotEqual, SpecializedThunkJIT::regT3, TrustedImm32(ArrayWithContiguous));
+
+ undecidedStorage.link(&jit);
+
+ jit.loadPtr(Address(SpecializedThunkJIT::regT0, JSObject::butterflyOffset()), SpecializedThunkJIT::regT2);
+
+#if USE(JSVALUE64)
+ jit.load64(BaseIndex(SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT1, SpecializedThunkJIT::TimesEight), SpecializedThunkJIT::regT0);
+ Jump notHole = jit.branchTest64(SpecializedThunkJIT::NonZero, SpecializedThunkJIT::regT0);
+ jit.move(JSInterfaceJIT::TrustedImm64(ValueUndefined), JSInterfaceJIT::regT0);
+ notHole.link(&jit);
+ jit.addPtr(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex()));
+ jit.returnJSValue(SpecializedThunkJIT::regT0);
#else
- return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
+ jit.load32(BaseIndex(SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT1, SpecializedThunkJIT::TimesEight, JSValue::offsetOfTag()), SpecializedThunkJIT::regT3);
+ Jump notHole = jit.branch32(SpecializedThunkJIT::NotEqual, SpecializedThunkJIT::regT3, TrustedImm32(JSValue::EmptyValueTag));
+ jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT1);
+ jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT0);
+ jit.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex()));
+ jit.returnJSValue(SpecializedThunkJIT::regT0, JSInterfaceJIT::regT1);
+ notHole.link(&jit);
+ jit.load32(BaseIndex(SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT1, SpecializedThunkJIT::TimesEight, JSValue::offsetOfPayload()), SpecializedThunkJIT::regT0);
+ jit.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex()));
+ jit.move(SpecializedThunkJIT::regT3, SpecializedThunkJIT::regT1);
+ jit.returnJSValue(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
#endif
+ notContiguousStorage.link(&jit);
+
+ Jump notInt32Storage = jit.branch32(SpecializedThunkJIT::NotEqual, SpecializedThunkJIT::regT3, TrustedImm32(ArrayWithInt32));
+ jit.loadPtr(Address(SpecializedThunkJIT::regT0, JSObject::butterflyOffset()), SpecializedThunkJIT::regT2);
+ jit.load32(BaseIndex(SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT1, SpecializedThunkJIT::TimesEight, JSValue::offsetOfPayload()), SpecializedThunkJIT::regT0);
+ jit.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex()));
+ jit.returnInt32(SpecializedThunkJIT::regT0);
+ notInt32Storage.link(&jit);
+
+ jit.appendFailure(jit.branch32(SpecializedThunkJIT::NotEqual, SpecializedThunkJIT::regT3, TrustedImm32(ArrayWithDouble)));
+ jit.loadPtr(Address(SpecializedThunkJIT::regT0, JSObject::butterflyOffset()), SpecializedThunkJIT::regT2);
+ jit.loadDouble(BaseIndex(SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT1, SpecializedThunkJIT::TimesEight), SpecializedThunkJIT::fpRegT0);
+ jit.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex()));
+ jit.returnDouble(SpecializedThunkJIT::fpRegT0);
+
+ return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "array-iterator-next-value");
}
+MacroAssemblerCodeRef arrayIteratorNextKeyThunkGenerator(VM* vm)
+{
+ return arrayIteratorNextThunkGenerator(vm, ArrayIterateKey);
+}
+
+MacroAssemblerCodeRef arrayIteratorNextValueThunkGenerator(VM* vm)
+{
+ return arrayIteratorNextThunkGenerator(vm, ArrayIterateValue);
+}
+
}
#endif // ENABLE(JIT)