diff options
author | Lorry Tar Creator <lorry-tar-importer@lorry> | 2015-05-20 09:56:07 +0000 |
---|---|---|
committer | Lorry Tar Creator <lorry-tar-importer@lorry> | 2015-05-20 09:56:07 +0000 |
commit | 41386e9cb918eed93b3f13648cbef387e371e451 (patch) | |
tree | a97f9d7bd1d9d091833286085f72da9d83fd0606 /Source/JavaScriptCore/jit/ThunkGenerators.cpp | |
parent | e15dd966d523731101f70ccf768bba12435a0208 (diff) | |
download | WebKitGtk-tarball-41386e9cb918eed93b3f13648cbef387e371e451.tar.gz |
webkitgtk-2.4.9webkitgtk-2.4.9
Diffstat (limited to 'Source/JavaScriptCore/jit/ThunkGenerators.cpp')
-rw-r--r-- | Source/JavaScriptCore/jit/ThunkGenerators.cpp | 602 |
1 files changed, 324 insertions, 278 deletions
diff --git a/Source/JavaScriptCore/jit/ThunkGenerators.cpp b/Source/JavaScriptCore/jit/ThunkGenerators.cpp index 34c6504dd..f8f5cbaf5 100644 --- a/Source/JavaScriptCore/jit/ThunkGenerators.cpp +++ b/Source/JavaScriptCore/jit/ThunkGenerators.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010, 2012, 2013, 2014 Apple Inc. All rights reserved. + * Copyright (C) 2010, 2012, 2013 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -27,14 +27,11 @@ #include "ThunkGenerators.h" #include "CodeBlock.h" -#include "DFGSpeculativeJIT.h" #include "JITOperations.h" #include "JSArray.h" #include "JSArrayIterator.h" #include "JSStack.h" -#include "MathCommon.h" -#include "MaxFrameExtentForSlowPathCall.h" -#include "JSCInlines.h" +#include "Operations.h" #include "SpecializedThunkJIT.h" #include <wtf/InlineASM.h> #include <wtf/StringPrintStream.h> @@ -46,14 +43,17 @@ namespace JSC { inline void emitPointerValidation(CCallHelpers& jit, GPRReg pointerGPR) { - if (ASSERT_DISABLED) - return; +#if !ASSERT_DISABLED CCallHelpers::Jump isNonZero = jit.branchTestPtr(CCallHelpers::NonZero, pointerGPR); - jit.abortWithReason(TGInvalidPointer); + jit.breakpoint(); isNonZero.link(&jit); jit.pushToSave(pointerGPR); jit.load8(pointerGPR, pointerGPR); jit.popToRestore(pointerGPR); +#else + UNUSED_PARAM(jit); + UNUSED_PARAM(pointerGPR); +#endif } // We will jump here if the JIT code tries to make a call, but the @@ -66,40 +66,45 @@ MacroAssemblerCodeRef throwExceptionFromCallSlowPathGenerator(VM* vm) // even though we won't use it. jit.preserveReturnAddressAfterCall(GPRInfo::nonPreservedNonReturnGPR); - jit.setupArguments(CCallHelpers::TrustedImmPtr(vm), GPRInfo::callFrameRegister); + // The CallFrame register points to the (failed) callee frame, so we need to pop back one frame. + jit.emitGetCallerFrameFromCallFrameHeaderPtr(GPRInfo::callFrameRegister); + + jit.setupArgumentsExecState(); jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(lookupExceptionHandler)), GPRInfo::nonArgGPR0); emitPointerValidation(jit, GPRInfo::nonArgGPR0); jit.call(GPRInfo::nonArgGPR0); jit.jumpToExceptionHandler(); - LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID); + LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID); return FINALIZE_CODE(patchBuffer, ("Throw exception from call slow path thunk")); } static void slowPathFor( - CCallHelpers& jit, VM* vm, P_JITOperation_ECli slowPathFunction) + CCallHelpers& jit, VM* vm, P_JITOperation_E slowPathFunction) { - jit.emitFunctionPrologue(); + jit.preserveReturnAddressAfterCall(GPRInfo::nonArgGPR2); + emitPointerValidation(jit, GPRInfo::nonArgGPR2); + jit.emitPutReturnPCToCallFrameHeader(GPRInfo::nonArgGPR2); jit.storePtr(GPRInfo::callFrameRegister, &vm->topCallFrame); - if (maxFrameExtentForSlowPathCall) - jit.addPtr(CCallHelpers::TrustedImm32(-maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister); - jit.setupArgumentsWithExecState(GPRInfo::regT2); + jit.setupArgumentsExecState(); jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(slowPathFunction)), GPRInfo::nonArgGPR0); emitPointerValidation(jit, GPRInfo::nonArgGPR0); jit.call(GPRInfo::nonArgGPR0); - if (maxFrameExtentForSlowPathCall) - jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister); // This slow call will return the address of one of the following: // 1) Exception throwing thunk. // 2) Host call return value returner thingy. // 3) The function to call. + jit.emitGetReturnPCFromCallFrameHeaderPtr(GPRInfo::nonPreservedNonReturnGPR); + jit.emitPutReturnPCToCallFrameHeader(CCallHelpers::TrustedImmPtr(0)); + emitPointerValidation(jit, GPRInfo::nonPreservedNonReturnGPR); + jit.restoreReturnAddressBeforeReturn(GPRInfo::nonPreservedNonReturnGPR); emitPointerValidation(jit, GPRInfo::returnValueGPR); - jit.emitFunctionEpilogue(); jit.jump(GPRInfo::returnValueGPR); } -MacroAssemblerCodeRef linkCallThunkGenerator(VM* vm) +static MacroAssemblerCodeRef linkForThunkGenerator( + VM* vm, CodeSpecializationKind kind) { // The return address is on the stack or in the link register. We will hence // save the return address to the call frame while we make a C++ function call @@ -109,25 +114,38 @@ MacroAssemblerCodeRef linkCallThunkGenerator(VM* vm) CCallHelpers jit(vm); - slowPathFor(jit, vm, operationLinkCall); + slowPathFor(jit, vm, kind == CodeForCall ? operationLinkCall : operationLinkConstruct); - LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID); - return FINALIZE_CODE(patchBuffer, ("Link call slow path thunk")); + LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID); + return FINALIZE_CODE( + patchBuffer, + ("Link %s slow path thunk", kind == CodeForCall ? "call" : "construct")); +} + +MacroAssemblerCodeRef linkCallThunkGenerator(VM* vm) +{ + return linkForThunkGenerator(vm, CodeForCall); +} + +MacroAssemblerCodeRef linkConstructThunkGenerator(VM* vm) +{ + return linkForThunkGenerator(vm, CodeForConstruct); } // For closure optimizations, we only include calls, since if you're using closures for // object construction then you're going to lose big time anyway. -MacroAssemblerCodeRef linkPolymorphicCallThunkGenerator(VM* vm) +MacroAssemblerCodeRef linkClosureCallThunkGenerator(VM* vm) { CCallHelpers jit(vm); - slowPathFor(jit, vm, operationLinkPolymorphicCall); + slowPathFor(jit, vm, operationLinkClosureCall); - LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID); - return FINALIZE_CODE(patchBuffer, ("Link polymorphic call slow path thunk")); + LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID); + return FINALIZE_CODE(patchBuffer, ("Link closure call slow path thunk")); } -MacroAssemblerCodeRef virtualThunkFor(VM* vm, CallLinkInfo& callLinkInfo) +static MacroAssemblerCodeRef virtualForThunkGenerator( + VM* vm, CodeSpecializationKind kind) { // The callee is in regT0 (for JSVALUE32_64, the tag is in regT1). // The return address is on the stack, or in the link register. We will hence @@ -137,121 +155,166 @@ MacroAssemblerCodeRef virtualThunkFor(VM* vm, CallLinkInfo& callLinkInfo) CCallHelpers jit(vm); CCallHelpers::JumpList slowCase; - - // This is a slow path execution, and regT2 contains the CallLinkInfo. Count the - // slow path execution for the profiler. - jit.add32( - CCallHelpers::TrustedImm32(1), - CCallHelpers::Address(GPRInfo::regT2, CallLinkInfo::offsetOfSlowPathCount())); // FIXME: we should have a story for eliminating these checks. In many cases, // the DFG knows that the value is definitely a cell, or definitely a function. #if USE(JSVALUE64) - jit.move(CCallHelpers::TrustedImm64(TagMask), GPRInfo::regT4); - slowCase.append( jit.branchTest64( - CCallHelpers::NonZero, GPRInfo::regT0, GPRInfo::regT4)); + CCallHelpers::NonZero, GPRInfo::regT0, GPRInfo::tagMaskRegister)); #else slowCase.append( jit.branch32( CCallHelpers::NotEqual, GPRInfo::regT1, CCallHelpers::TrustedImm32(JSValue::CellTag))); #endif - AssemblyHelpers::emitLoadStructure(jit, GPRInfo::regT0, GPRInfo::regT4, GPRInfo::regT1); + jit.loadPtr(CCallHelpers::Address(GPRInfo::regT0, JSCell::structureOffset()), GPRInfo::nonArgGPR2); slowCase.append( jit.branchPtr( CCallHelpers::NotEqual, - CCallHelpers::Address(GPRInfo::regT4, Structure::classInfoOffset()), + CCallHelpers::Address(GPRInfo::nonArgGPR2, Structure::classInfoOffset()), CCallHelpers::TrustedImmPtr(JSFunction::info()))); // Now we know we have a JSFunction. jit.loadPtr( CCallHelpers::Address(GPRInfo::regT0, JSFunction::offsetOfExecutable()), - GPRInfo::regT4); - jit.loadPtr( - CCallHelpers::Address( - GPRInfo::regT4, ExecutableBase::offsetOfJITCodeWithArityCheckFor( - callLinkInfo.specializationKind(), callLinkInfo.registerPreservationMode())), - GPRInfo::regT4); - slowCase.append(jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::regT4)); + GPRInfo::nonArgGPR2); + slowCase.append( + jit.branch32( + CCallHelpers::LessThan, + CCallHelpers::Address( + GPRInfo::nonArgGPR2, ExecutableBase::offsetOfNumParametersFor(kind)), + CCallHelpers::TrustedImm32(0))); // Now we know that we have a CodeBlock, and we're committed to making a fast // call. + jit.loadPtr( + CCallHelpers::Address(GPRInfo::regT0, JSFunction::offsetOfScopeChain()), + GPRInfo::regT1); +#if USE(JSVALUE64) + jit.store64( + GPRInfo::regT1, + CCallHelpers::Address( + GPRInfo::callFrameRegister, + static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ScopeChain)); +#else + jit.storePtr( + GPRInfo::regT1, + CCallHelpers::Address( + GPRInfo::callFrameRegister, + static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ScopeChain + + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload))); + jit.store32( + CCallHelpers::TrustedImm32(JSValue::CellTag), + CCallHelpers::Address( + GPRInfo::callFrameRegister, + static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ScopeChain + + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag))); +#endif + + jit.loadPtr( + CCallHelpers::Address(GPRInfo::nonArgGPR2, ExecutableBase::offsetOfJITCodeWithArityCheckFor(kind)), + GPRInfo::regT0); + // Make a tail call. This will return back to JIT code. - emitPointerValidation(jit, GPRInfo::regT4); - jit.jump(GPRInfo::regT4); + emitPointerValidation(jit, GPRInfo::regT0); + jit.jump(GPRInfo::regT0); slowCase.link(&jit); // Here we don't know anything, so revert to the full slow path. - slowPathFor(jit, vm, operationVirtualCall); + slowPathFor(jit, vm, kind == CodeForCall ? operationVirtualCall : operationVirtualConstruct); - LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID); + LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID); return FINALIZE_CODE( patchBuffer, - ("Virtual %s%s slow path thunk at CodePtr(%p)", - callLinkInfo.specializationKind() == CodeForCall ? "call" : "construct", - callLinkInfo.registerPreservationMode() == MustPreserveRegisters ? " that preserves registers" : "", - callLinkInfo.callReturnLocation().dataLocation())); + ("Virtual %s slow path thunk", kind == CodeForCall ? "call" : "construct")); } -enum ThunkEntryType { EnterViaCall, EnterViaJump }; +MacroAssemblerCodeRef virtualCallThunkGenerator(VM* vm) +{ + return virtualForThunkGenerator(vm, CodeForCall); +} + +MacroAssemblerCodeRef virtualConstructThunkGenerator(VM* vm) +{ + return virtualForThunkGenerator(vm, CodeForConstruct); +} -static MacroAssemblerCodeRef nativeForGenerator(VM* vm, CodeSpecializationKind kind, ThunkEntryType entryType = EnterViaCall) +static MacroAssemblerCodeRef nativeForGenerator(VM* vm, CodeSpecializationKind kind) { int executableOffsetToFunction = NativeExecutable::offsetOfNativeFunctionFor(kind); JSInterfaceJIT jit(vm); - - if (entryType == EnterViaCall) - jit.emitFunctionPrologue(); - + jit.emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock); jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame); #if CPU(X86) + // Load caller frame's scope chain into this callframe so that whatever we call can + // get to its global data. + jit.emitGetCallerFrameFromCallFrameHeaderPtr(JSInterfaceJIT::regT0); + jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT0); + jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain); + + jit.peek(JSInterfaceJIT::regT1); + jit.emitPutReturnPCToCallFrameHeader(JSInterfaceJIT::regT1); + // Calling convention: f(ecx, edx, ...); // Host function signature: f(ExecState*); jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx); - jit.subPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister); // Align stack after prologue. + jit.subPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(void*)), JSInterfaceJIT::stackPointerRegister); // Align stack after call. // call the function jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, JSInterfaceJIT::regT1); jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT1); + jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack. jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, executableOffsetToFunction)); - jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister); + jit.addPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(void*)), JSInterfaceJIT::stackPointerRegister); #elif CPU(X86_64) + // Load caller frame's scope chain into this callframe so that whatever we call can + // get to its global data. + jit.emitGetCallerFrameFromCallFrameHeaderPtr(JSInterfaceJIT::regT0); + jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT0); + jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain); + + jit.peek(JSInterfaceJIT::regT1); + jit.emitPutReturnPCToCallFrameHeader(JSInterfaceJIT::regT1); + #if !OS(WINDOWS) // Calling convention: f(edi, esi, edx, ecx, ...); // Host function signature: f(ExecState*); jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::edi); + jit.subPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister); // Align stack after call. + jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::esi); jit.loadPtr(JSInterfaceJIT::Address(X86Registers::esi, JSFunction::offsetOfExecutable()), X86Registers::r9); + jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack. jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction)); + jit.addPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister); #else // Calling convention: f(ecx, edx, r8, r9, ...); // Host function signature: f(ExecState*); jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx); - // Leave space for the callee parameter home addresses. - // At this point the stack is aligned to 16 bytes, but if this changes at some point, we need to emit code to align it. - jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister); + // Leave space for the callee parameter home addresses and align the stack. + jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t) + 16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister); jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::edx); jit.loadPtr(JSInterfaceJIT::Address(X86Registers::edx, JSFunction::offsetOfExecutable()), X86Registers::r9); + jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack. jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction)); - jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister); + jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t) + 16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister); #endif #elif CPU(ARM64) @@ -261,13 +324,34 @@ static MacroAssemblerCodeRef nativeForGenerator(VM* vm, CodeSpecializationKind k COMPILE_ASSERT(ARM64Registers::x1 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_1); COMPILE_ASSERT(ARM64Registers::x2 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_2); + // Load caller frame's scope chain into this callframe so that whatever we call can + // get to its global data. + jit.emitGetCallerFrameFromCallFrameHeaderPtr(ARM64Registers::x3); + jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, ARM64Registers::x3); + jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain); + + jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3); // Callee preserved + jit.emitPutReturnPCToCallFrameHeader(ARM64Registers::lr); + // Host function signature: f(ExecState*); jit.move(JSInterfaceJIT::callFrameRegister, ARM64Registers::x0); jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, ARM64Registers::x1); jit.loadPtr(JSInterfaceJIT::Address(ARM64Registers::x1, JSFunction::offsetOfExecutable()), ARM64Registers::x2); + jit.move(ARM64Registers::x3, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack. jit.call(JSInterfaceJIT::Address(ARM64Registers::x2, executableOffsetToFunction)); + + jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3); + #elif CPU(ARM) || CPU(SH4) || CPU(MIPS) + // Load caller frame's scope chain into this callframe so that whatever we call can get to its global data. + jit.emitGetCallerFrameFromCallFrameHeaderPtr(JSInterfaceJIT::regT2); + jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT2); + jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain); + + jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3); // Callee preserved + jit.emitPutReturnPCToCallFrameHeader(JSInterfaceJIT::regT3); + #if CPU(MIPS) // Allocate stack space for (unused) 16 bytes (8-byte aligned) for 4 arguments. jit.subPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister); @@ -278,6 +362,7 @@ static MacroAssemblerCodeRef nativeForGenerator(VM* vm, CodeSpecializationKind k jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR0); jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, JSInterfaceJIT::argumentGPR1); + jit.move(JSInterfaceJIT::regT2, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack. jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::argumentGPR1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2); jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, executableOffsetToFunction)); @@ -285,10 +370,12 @@ static MacroAssemblerCodeRef nativeForGenerator(VM* vm, CodeSpecializationKind k // Restore stack space jit.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister); #endif + + jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3); #else #error "JIT not supported on this platform." UNUSED_PARAM(executableOffsetToFunction); - abortWithReason(TGNotSupported); + breakpoint(); #endif // Check for an exception @@ -298,42 +385,40 @@ static MacroAssemblerCodeRef nativeForGenerator(VM* vm, CodeSpecializationKind k #else JSInterfaceJIT::Jump exceptionHandler = jit.branch32( JSInterfaceJIT::NotEqual, - JSInterfaceJIT::AbsoluteAddress(vm->addressOfException()), - JSInterfaceJIT::TrustedImm32(0)); + JSInterfaceJIT::AbsoluteAddress(reinterpret_cast<char*>(vm->addressOfException()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), + JSInterfaceJIT::TrustedImm32(JSValue::EmptyValueTag)); #endif - jit.emitFunctionEpilogue(); // Return. jit.ret(); // Handle an exception exceptionHandler.link(&jit); + // Grab the return address. + jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT1); + + jit.move(JSInterfaceJIT::TrustedImmPtr(&vm->exceptionLocation), JSInterfaceJIT::regT2); + jit.storePtr(JSInterfaceJIT::regT1, JSInterfaceJIT::regT2); + jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame); #if CPU(X86) && USE(JSVALUE32_64) jit.addPtr(JSInterfaceJIT::TrustedImm32(-12), JSInterfaceJIT::stackPointerRegister); - jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister), JSInterfaceJIT::regT0); - jit.push(JSInterfaceJIT::regT0); + jit.push(JSInterfaceJIT::callFrameRegister); #else -#if OS(WINDOWS) - // Allocate space on stack for the 4 parameter registers. - jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister); -#endif - jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister), JSInterfaceJIT::argumentGPR0); + jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR0); #endif jit.move(JSInterfaceJIT::TrustedImmPtr(FunctionPtr(operationVMHandleException).value()), JSInterfaceJIT::regT3); jit.call(JSInterfaceJIT::regT3); #if CPU(X86) && USE(JSVALUE32_64) jit.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister); -#elif OS(WINDOWS) - jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister); #endif jit.jumpToExceptionHandler(); - LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID); - return FINALIZE_CODE(patchBuffer, ("native %s%s trampoline", entryType == EnterViaJump ? "Tail " : "", toCString(kind).data())); + LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID); + return FINALIZE_CODE(patchBuffer, ("native %s trampoline", toCString(kind).data())); } MacroAssemblerCodeRef nativeCallGenerator(VM* vm) @@ -341,60 +426,43 @@ MacroAssemblerCodeRef nativeCallGenerator(VM* vm) return nativeForGenerator(vm, CodeForCall); } -MacroAssemblerCodeRef nativeTailCallGenerator(VM* vm) -{ - return nativeForGenerator(vm, CodeForCall, EnterViaJump); -} - MacroAssemblerCodeRef nativeConstructGenerator(VM* vm) { return nativeForGenerator(vm, CodeForConstruct); } -MacroAssemblerCodeRef arityFixupGenerator(VM* vm) +MacroAssemblerCodeRef arityFixup(VM* vm) { JSInterfaceJIT jit(vm); - // We enter with fixup count, in aligned stack units, in regT0 and the return thunk in - // regT5 on 32-bit and regT7 on 64-bit. + // We enter with fixup count in regT0 #if USE(JSVALUE64) # if CPU(X86_64) jit.pop(JSInterfaceJIT::regT4); # endif - jit.lshift32(JSInterfaceJIT::TrustedImm32(logStackAlignmentRegisters()), JSInterfaceJIT::regT0); jit.neg64(JSInterfaceJIT::regT0); - jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT6); - jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, JSStack::ArgumentCount * sizeof(Register)), JSInterfaceJIT::regT2); + jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT3); + jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, JSStack::ArgumentCount * 8), JSInterfaceJIT::regT2); jit.add32(JSInterfaceJIT::TrustedImm32(JSStack::CallFrameHeaderSize), JSInterfaceJIT::regT2); // Move current frame down regT0 number of slots JSInterfaceJIT::Label copyLoop(jit.label()); - jit.load64(JSInterfaceJIT::regT6, JSInterfaceJIT::regT1); - jit.store64(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT6, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight)); - jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT6); + jit.load64(JSInterfaceJIT::regT3, JSInterfaceJIT::regT1); + jit.store64(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight)); + jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3); jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(copyLoop, &jit); - // Fill in regT0 - 1 missing arg slots with undefined + // Fill in regT0 missing arg slots with undefined jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::regT2); jit.move(JSInterfaceJIT::TrustedImm64(ValueUndefined), JSInterfaceJIT::regT1); - jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2); JSInterfaceJIT::Label fillUndefinedLoop(jit.label()); - jit.store64(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT6, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight)); - jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT6); + jit.store64(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight)); + jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3); jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(fillUndefinedLoop, &jit); - - // Adjust call frame register and stack pointer to account for missing args - jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::regT1); - jit.lshift64(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT1); - jit.addPtr(JSInterfaceJIT::regT1, JSInterfaceJIT::callFrameRegister); - jit.addPtr(JSInterfaceJIT::regT1, JSInterfaceJIT::stackPointerRegister); - // Save the original return PC. - jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrame::returnPCOffset()), GPRInfo::regT1); - jit.storePtr(GPRInfo::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT6, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight)); - - // Install the new return PC. - jit.storePtr(GPRInfo::regT7, JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrame::returnPCOffset())); + // Adjust call frame register to account for missing args + jit.lshift64(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT0); + jit.addPtr(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); # if CPU(X86_64) jit.push(JSInterfaceJIT::regT4); @@ -404,10 +472,9 @@ MacroAssemblerCodeRef arityFixupGenerator(VM* vm) # if CPU(X86) jit.pop(JSInterfaceJIT::regT4); # endif - jit.lshift32(JSInterfaceJIT::TrustedImm32(logStackAlignmentRegisters()), JSInterfaceJIT::regT0); jit.neg32(JSInterfaceJIT::regT0); jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT3); - jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, JSStack::ArgumentCount * sizeof(Register)), JSInterfaceJIT::regT2); + jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, JSStack::ArgumentCount * 8), JSInterfaceJIT::regT2); jit.add32(JSInterfaceJIT::TrustedImm32(JSStack::CallFrameHeaderSize), JSInterfaceJIT::regT2); // Move current frame down regT0 number of slots @@ -419,9 +486,8 @@ MacroAssemblerCodeRef arityFixupGenerator(VM* vm) jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3); jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(copyLoop, &jit); - // Fill in regT0 - 1 missing arg slots with undefined + // Fill in regT0 missing arg slots with undefined jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::regT2); - jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2); JSInterfaceJIT::Label fillUndefinedLoop(jit.label()); jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT1); jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight)); @@ -431,106 +497,20 @@ MacroAssemblerCodeRef arityFixupGenerator(VM* vm) jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3); jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(fillUndefinedLoop, &jit); - // Adjust call frame register and stack pointer to account for missing args - jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::regT1); - jit.lshift32(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT1); - jit.addPtr(JSInterfaceJIT::regT1, JSInterfaceJIT::callFrameRegister); - jit.addPtr(JSInterfaceJIT::regT1, JSInterfaceJIT::stackPointerRegister); + // Adjust call frame register to account for missing args + jit.lshift32(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT0); + jit.addPtr(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); - // Save the original return PC. - jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrame::returnPCOffset()), GPRInfo::regT1); - jit.storePtr(GPRInfo::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight)); - - // Install the new return PC. - jit.storePtr(GPRInfo::regT5, JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrame::returnPCOffset())); - # if CPU(X86) jit.push(JSInterfaceJIT::regT4); # endif jit.ret(); #endif - LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID); + LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID); return FINALIZE_CODE(patchBuffer, ("fixup arity")); } -MacroAssemblerCodeRef baselineGetterReturnThunkGenerator(VM* vm) -{ - JSInterfaceJIT jit(vm); - -#if USE(JSVALUE64) - jit.move(GPRInfo::returnValueGPR, GPRInfo::regT0); -#else - jit.setupResults(GPRInfo::regT0, GPRInfo::regT1); -#endif - - unsigned numberOfParameters = 0; - numberOfParameters++; // The 'this' argument. - numberOfParameters++; // The true return PC. - - unsigned numberOfRegsForCall = - JSStack::CallFrameHeaderSize + numberOfParameters; - - unsigned numberOfBytesForCall = - numberOfRegsForCall * sizeof(Register) - sizeof(CallerFrameAndPC); - - unsigned alignedNumberOfBytesForCall = - WTF::roundUpToMultipleOf(stackAlignmentBytes(), numberOfBytesForCall); - - // The real return address is stored above the arguments. We passed one argument, which is - // 'this'. So argument at index 1 is the return address. - jit.loadPtr( - AssemblyHelpers::Address( - AssemblyHelpers::stackPointerRegister, - (virtualRegisterForArgument(1).offset() - JSStack::CallerFrameAndPCSize) * sizeof(Register)), - GPRInfo::regT2); - - jit.addPtr( - AssemblyHelpers::TrustedImm32(alignedNumberOfBytesForCall), - AssemblyHelpers::stackPointerRegister); - - jit.jump(GPRInfo::regT2); - - LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID); - return FINALIZE_CODE(patchBuffer, ("baseline getter return thunk")); -} - -MacroAssemblerCodeRef baselineSetterReturnThunkGenerator(VM* vm) -{ - JSInterfaceJIT jit(vm); - - unsigned numberOfParameters = 0; - numberOfParameters++; // The 'this' argument. - numberOfParameters++; // The value to set. - numberOfParameters++; // The true return PC. - - unsigned numberOfRegsForCall = - JSStack::CallFrameHeaderSize + numberOfParameters; - - unsigned numberOfBytesForCall = - numberOfRegsForCall * sizeof(Register) - sizeof(CallerFrameAndPC); - - unsigned alignedNumberOfBytesForCall = - WTF::roundUpToMultipleOf(stackAlignmentBytes(), numberOfBytesForCall); - - // The real return address is stored above the arguments. We passed two arguments, so - // the argument at index 2 is the return address. - jit.loadPtr( - AssemblyHelpers::Address( - AssemblyHelpers::stackPointerRegister, - (virtualRegisterForArgument(2).offset() - JSStack::CallerFrameAndPCSize) * sizeof(Register)), - GPRInfo::regT2); - - jit.addPtr( - AssemblyHelpers::TrustedImm32(alignedNumberOfBytesForCall), - AssemblyHelpers::stackPointerRegister); - - jit.jump(GPRInfo::regT2); - - LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID); - return FINALIZE_CODE(patchBuffer, ("baseline setter return thunk")); -} - static void stringCharLoad(SpecializedThunkJIT& jit, VM* vm) { // load string @@ -574,7 +554,7 @@ MacroAssemblerCodeRef charCodeAtThunkGenerator(VM* vm) SpecializedThunkJIT jit(vm, 1); stringCharLoad(jit, vm); jit.returnInt32(SpecializedThunkJIT::regT0); - return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "charCodeAt"); + return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "charCodeAt"); } MacroAssemblerCodeRef charAtThunkGenerator(VM* vm) @@ -583,7 +563,7 @@ MacroAssemblerCodeRef charAtThunkGenerator(VM* vm) stringCharLoad(jit, vm); charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1); jit.returnJSCell(SpecializedThunkJIT::regT0); - return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "charAt"); + return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "charAt"); } MacroAssemblerCodeRef fromCharCodeThunkGenerator(VM* vm) @@ -593,28 +573,7 @@ MacroAssemblerCodeRef fromCharCodeThunkGenerator(VM* vm) jit.loadInt32Argument(0, SpecializedThunkJIT::regT0); charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1); jit.returnJSCell(SpecializedThunkJIT::regT0); - return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "fromCharCode"); -} - -MacroAssemblerCodeRef clz32ThunkGenerator(VM* vm) -{ - SpecializedThunkJIT jit(vm, 1); - MacroAssembler::Jump nonIntArgJump; - jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntArgJump); - - SpecializedThunkJIT::Label convertedArgumentReentry(&jit); - jit.countLeadingZeros32(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1); - jit.returnInt32(SpecializedThunkJIT::regT1); - - if (jit.supportsFloatingPointTruncate()) { - nonIntArgJump.link(&jit); - jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0); - jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(convertedArgumentReentry, &jit); - jit.appendFailure(jit.jump()); - } else - jit.appendFailure(nonIntArgJump); - - return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "clz32"); + return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "fromCharCode"); } MacroAssemblerCodeRef sqrtThunkGenerator(VM* vm) @@ -626,15 +585,25 @@ MacroAssemblerCodeRef sqrtThunkGenerator(VM* vm) jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0); jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0); jit.returnDouble(SpecializedThunkJIT::fpRegT0); - return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "sqrt"); + return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "sqrt"); } #define UnaryDoubleOpWrapper(function) function##Wrapper enum MathThunkCallingConvention { }; typedef MathThunkCallingConvention(*MathThunk)(MathThunkCallingConvention); +extern "C" { -#if CPU(X86_64) && COMPILER(GCC_OR_CLANG) && (OS(DARWIN) || OS(LINUX)) +double jsRound(double) REFERENCED_FROM_ASM; +double jsRound(double d) +{ + double integer = ceil(d); + return integer - (integer - d > 0.5); +} + +} + +#if CPU(X86_64) && COMPILER(GCC) && (OS(DARWIN) || OS(LINUX)) #define defineUnaryDoubleOpWrapper(function) \ asm( \ @@ -642,9 +611,7 @@ typedef MathThunkCallingConvention(*MathThunk)(MathThunkCallingConvention); ".globl " SYMBOL_STRING(function##Thunk) "\n" \ HIDE_SYMBOL(function##Thunk) "\n" \ SYMBOL_STRING(function##Thunk) ":" "\n" \ - "pushq %rax\n" \ "call " GLOBAL_REFERENCE(function) "\n" \ - "popq %rcx\n" \ "ret\n" \ );\ extern "C" { \ @@ -652,7 +619,7 @@ typedef MathThunkCallingConvention(*MathThunk)(MathThunkCallingConvention); } \ static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk; -#elif CPU(X86) && COMPILER(GCC_OR_CLANG) && OS(LINUX) && defined(__PIC__) +#elif CPU(X86) && COMPILER(GCC) && OS(LINUX) && defined(__PIC__) #define defineUnaryDoubleOpWrapper(function) \ asm( \ ".text\n" \ @@ -676,19 +643,19 @@ typedef MathThunkCallingConvention(*MathThunk)(MathThunkCallingConvention); } \ static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk; -#elif CPU(X86) && COMPILER(GCC_OR_CLANG) && (OS(DARWIN) || OS(LINUX)) +#elif CPU(X86) && COMPILER(GCC) && (OS(DARWIN) || OS(LINUX)) #define defineUnaryDoubleOpWrapper(function) \ asm( \ ".text\n" \ ".globl " SYMBOL_STRING(function##Thunk) "\n" \ HIDE_SYMBOL(function##Thunk) "\n" \ SYMBOL_STRING(function##Thunk) ":" "\n" \ - "subl $20, %esp\n" \ + "subl $8, %esp\n" \ "movsd %xmm0, (%esp) \n" \ "call " GLOBAL_REFERENCE(function) "\n" \ "fstpl (%esp) \n" \ "movsd (%esp), %xmm0 \n" \ - "addl $20, %esp\n" \ + "addl $8, %esp\n" \ "ret\n" \ );\ extern "C" { \ @@ -696,7 +663,7 @@ typedef MathThunkCallingConvention(*MathThunk)(MathThunkCallingConvention); } \ static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk; -#elif CPU(ARM_THUMB2) && COMPILER(GCC_OR_CLANG) && PLATFORM(IOS) +#elif CPU(ARM_THUMB2) && COMPILER(GCC) && PLATFORM(IOS) #define defineUnaryDoubleOpWrapper(function) \ asm( \ @@ -729,38 +696,12 @@ typedef MathThunkCallingConvention(*MathThunk)(MathThunkCallingConvention); HIDE_SYMBOL(function##Thunk) "\n" \ SYMBOL_STRING(function##Thunk) ":" "\n" \ "b " GLOBAL_REFERENCE(function) "\n" \ - ".previous" \ ); \ extern "C" { \ MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \ } \ static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk; -#elif CPU(X86) && COMPILER(MSVC) && OS(WINDOWS) - -// MSVC does not accept floor, etc, to be called directly from inline assembly, so we need to wrap these functions. -static double (_cdecl *floorFunction)(double) = floor; -static double (_cdecl *ceilFunction)(double) = ceil; -static double (_cdecl *expFunction)(double) = exp; -static double (_cdecl *logFunction)(double) = log; -static double (_cdecl *jsRoundFunction)(double) = jsRound; - -#define defineUnaryDoubleOpWrapper(function) \ - extern "C" __declspec(naked) MathThunkCallingConvention function##Thunk(MathThunkCallingConvention) \ - { \ - __asm \ - { \ - __asm sub esp, 20 \ - __asm movsd mmword ptr [esp], xmm0 \ - __asm call function##Function \ - __asm fstp qword ptr [esp] \ - __asm movsd xmm0, mmword ptr [esp] \ - __asm add esp, 20 \ - __asm ret \ - } \ - } \ - static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk; - #else #define defineUnaryDoubleOpWrapper(function) \ @@ -799,7 +740,7 @@ MacroAssemblerCodeRef floorThunkGenerator(VM* vm) SpecializedThunkJIT::Jump intResult; SpecializedThunkJIT::JumpList doubleResult; if (jit.supportsFloatingPointTruncate()) { - jit.loadDouble(MacroAssembler::TrustedImmPtr(&zeroConstant), SpecializedThunkJIT::fpRegT1); + jit.loadDouble(&zeroConstant, SpecializedThunkJIT::fpRegT1); doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1)); SpecializedThunkJIT::JumpList slowPath; // Handle the negative doubles in the slow path for now. @@ -816,7 +757,7 @@ MacroAssemblerCodeRef floorThunkGenerator(VM* vm) doubleResult.link(&jit); jit.returnDouble(SpecializedThunkJIT::fpRegT0); #endif // CPU(ARM64) - return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "floor"); + return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "floor"); } MacroAssemblerCodeRef ceilThunkGenerator(VM* vm) @@ -839,7 +780,7 @@ MacroAssemblerCodeRef ceilThunkGenerator(VM* vm) jit.returnInt32(SpecializedThunkJIT::regT0); doubleResult.link(&jit); jit.returnDouble(SpecializedThunkJIT::fpRegT0); - return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "ceil"); + return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "ceil"); } MacroAssemblerCodeRef roundThunkGenerator(VM* vm) @@ -855,12 +796,12 @@ MacroAssemblerCodeRef roundThunkGenerator(VM* vm) SpecializedThunkJIT::Jump intResult; SpecializedThunkJIT::JumpList doubleResult; if (jit.supportsFloatingPointTruncate()) { - jit.loadDouble(MacroAssembler::TrustedImmPtr(&zeroConstant), SpecializedThunkJIT::fpRegT1); + jit.loadDouble(&zeroConstant, SpecializedThunkJIT::fpRegT1); doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1)); SpecializedThunkJIT::JumpList slowPath; // Handle the negative doubles in the slow path for now. slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1)); - jit.loadDouble(MacroAssembler::TrustedImmPtr(&halfConstant), SpecializedThunkJIT::fpRegT1); + jit.loadDouble(&halfConstant, SpecializedThunkJIT::fpRegT1); jit.addDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1); slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0)); intResult = jit.jump(); @@ -873,7 +814,7 @@ MacroAssemblerCodeRef roundThunkGenerator(VM* vm) jit.returnInt32(SpecializedThunkJIT::regT0); doubleResult.link(&jit); jit.returnDouble(SpecializedThunkJIT::fpRegT0); - return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "round"); + return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "round"); } MacroAssemblerCodeRef expThunkGenerator(VM* vm) @@ -886,7 +827,7 @@ MacroAssemblerCodeRef expThunkGenerator(VM* vm) jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0); jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(exp)); jit.returnDouble(SpecializedThunkJIT::fpRegT0); - return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "exp"); + return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "exp"); } MacroAssemblerCodeRef logThunkGenerator(VM* vm) @@ -899,7 +840,7 @@ MacroAssemblerCodeRef logThunkGenerator(VM* vm) jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0); jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(log)); jit.returnDouble(SpecializedThunkJIT::fpRegT0); - return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "log"); + return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "log"); } MacroAssemblerCodeRef absThunkGenerator(VM* vm) @@ -919,7 +860,7 @@ MacroAssemblerCodeRef absThunkGenerator(VM* vm) jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0); jit.absDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1); jit.returnDouble(SpecializedThunkJIT::fpRegT1); - return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "abs"); + return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "abs"); } MacroAssemblerCodeRef powThunkGenerator(VM* vm) @@ -928,7 +869,7 @@ MacroAssemblerCodeRef powThunkGenerator(VM* vm) if (!jit.supportsFloatingPoint()) return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm)); - jit.loadDouble(MacroAssembler::TrustedImmPtr(&oneConstant), SpecializedThunkJIT::fpRegT1); + jit.loadDouble(&oneConstant, SpecializedThunkJIT::fpRegT1); jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0); MacroAssembler::Jump nonIntExponent; jit.loadInt32Argument(1, SpecializedThunkJIT::regT0, nonIntExponent); @@ -956,7 +897,7 @@ MacroAssemblerCodeRef powThunkGenerator(VM* vm) if (jit.supportsFloatingPointSqrt()) { nonIntExponent.link(&jit); - jit.loadDouble(MacroAssembler::TrustedImmPtr(&negativeHalfConstant), SpecializedThunkJIT::fpRegT3); + jit.loadDouble(&negativeHalfConstant, SpecializedThunkJIT::fpRegT3); jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::regT0); jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleLessThanOrEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1)); jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::fpRegT3)); @@ -971,7 +912,7 @@ MacroAssemblerCodeRef powThunkGenerator(VM* vm) } else jit.appendFailure(nonIntExponent); - return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "pow"); + return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "pow"); } MacroAssemblerCodeRef imulThunkGenerator(VM* vm) @@ -990,7 +931,8 @@ MacroAssemblerCodeRef imulThunkGenerator(VM* vm) nonIntArg0Jump.link(&jit); jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0); jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg0, &jit); - jit.appendFailure(jit.jump()); + jit.xor32(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0); + jit.jump(doneLoadingArg0); } else jit.appendFailure(nonIntArg0Jump); @@ -998,13 +940,117 @@ MacroAssemblerCodeRef imulThunkGenerator(VM* vm) nonIntArg1Jump.link(&jit); jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1); jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg1, &jit); - jit.appendFailure(jit.jump()); + jit.xor32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT1); + jit.jump(doneLoadingArg1); } else jit.appendFailure(nonIntArg1Jump); - return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "imul"); + return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "imul"); } +static MacroAssemblerCodeRef arrayIteratorNextThunkGenerator(VM* vm, ArrayIterationKind kind) +{ + typedef SpecializedThunkJIT::TrustedImm32 TrustedImm32; + typedef SpecializedThunkJIT::TrustedImmPtr TrustedImmPtr; + typedef SpecializedThunkJIT::Address Address; + typedef SpecializedThunkJIT::BaseIndex BaseIndex; + typedef SpecializedThunkJIT::Jump Jump; + + SpecializedThunkJIT jit(vm); + // Make sure we're being called on an array iterator, and load m_iteratedObject, and m_nextIndex into regT0 and regT1 respectively + jit.loadArgumentWithSpecificClass(JSArrayIterator::info(), SpecializedThunkJIT::ThisArgument, SpecializedThunkJIT::regT4, SpecializedThunkJIT::regT1); + + // Early exit if we don't have a thunk for this form of iteration + jit.appendFailure(jit.branch32(SpecializedThunkJIT::AboveOrEqual, Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfIterationKind()), TrustedImm32(ArrayIterateKeyValue))); + + jit.loadPtr(Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfIteratedObject()), SpecializedThunkJIT::regT0); + + jit.load32(Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex()), SpecializedThunkJIT::regT1); + + // Pull out the butterfly from iteratedObject + jit.loadPtr(Address(SpecializedThunkJIT::regT0, JSCell::structureOffset()), SpecializedThunkJIT::regT2); + + jit.load8(Address(SpecializedThunkJIT::regT2, Structure::indexingTypeOffset()), SpecializedThunkJIT::regT3); + jit.loadPtr(Address(SpecializedThunkJIT::regT0, JSObject::butterflyOffset()), SpecializedThunkJIT::regT2); + Jump nullButterfly = jit.branchTestPtr(SpecializedThunkJIT::Zero, SpecializedThunkJIT::regT2); + + Jump notDone = jit.branch32(SpecializedThunkJIT::Below, SpecializedThunkJIT::regT1, Address(SpecializedThunkJIT::regT2, Butterfly::offsetOfPublicLength())); + + nullButterfly.link(&jit); + + // Return the termination signal to indicate that we've finished + jit.move(TrustedImmPtr(vm->iterationTerminator.get()), SpecializedThunkJIT::regT0); + jit.returnJSCell(SpecializedThunkJIT::regT0); + + notDone.link(&jit); + + if (kind == ArrayIterateKey) { + jit.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex())); + jit.returnInt32(SpecializedThunkJIT::regT1); + return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "array-iterator-next-key"); + + } + ASSERT(kind == ArrayIterateValue); + + // Okay, now we're returning a value so make sure we're inside the vector size + jit.appendFailure(jit.branch32(SpecializedThunkJIT::AboveOrEqual, SpecializedThunkJIT::regT1, Address(SpecializedThunkJIT::regT2, Butterfly::offsetOfVectorLength()))); + + // So now we perform inline loads for int32, value/undecided, and double storage + Jump undecidedStorage = jit.branch32(SpecializedThunkJIT::Equal, SpecializedThunkJIT::regT3, TrustedImm32(ArrayWithUndecided)); + Jump notContiguousStorage = jit.branch32(SpecializedThunkJIT::NotEqual, SpecializedThunkJIT::regT3, TrustedImm32(ArrayWithContiguous)); + + undecidedStorage.link(&jit); + + jit.loadPtr(Address(SpecializedThunkJIT::regT0, JSObject::butterflyOffset()), SpecializedThunkJIT::regT2); + +#if USE(JSVALUE64) + jit.load64(BaseIndex(SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT1, SpecializedThunkJIT::TimesEight), SpecializedThunkJIT::regT0); + Jump notHole = jit.branchTest64(SpecializedThunkJIT::NonZero, SpecializedThunkJIT::regT0); + jit.move(JSInterfaceJIT::TrustedImm64(ValueUndefined), JSInterfaceJIT::regT0); + notHole.link(&jit); + jit.addPtr(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex())); + jit.returnJSValue(SpecializedThunkJIT::regT0); +#else + jit.load32(BaseIndex(SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT1, SpecializedThunkJIT::TimesEight, JSValue::offsetOfTag()), SpecializedThunkJIT::regT3); + Jump notHole = jit.branch32(SpecializedThunkJIT::NotEqual, SpecializedThunkJIT::regT3, TrustedImm32(JSValue::EmptyValueTag)); + jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT1); + jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT0); + jit.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex())); + jit.returnJSValue(SpecializedThunkJIT::regT0, JSInterfaceJIT::regT1); + notHole.link(&jit); + jit.load32(BaseIndex(SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT1, SpecializedThunkJIT::TimesEight, JSValue::offsetOfPayload()), SpecializedThunkJIT::regT0); + jit.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex())); + jit.move(SpecializedThunkJIT::regT3, SpecializedThunkJIT::regT1); + jit.returnJSValue(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1); +#endif + notContiguousStorage.link(&jit); + + Jump notInt32Storage = jit.branch32(SpecializedThunkJIT::NotEqual, SpecializedThunkJIT::regT3, TrustedImm32(ArrayWithInt32)); + jit.loadPtr(Address(SpecializedThunkJIT::regT0, JSObject::butterflyOffset()), SpecializedThunkJIT::regT2); + jit.load32(BaseIndex(SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT1, SpecializedThunkJIT::TimesEight, JSValue::offsetOfPayload()), SpecializedThunkJIT::regT0); + jit.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex())); + jit.returnInt32(SpecializedThunkJIT::regT0); + notInt32Storage.link(&jit); + + jit.appendFailure(jit.branch32(SpecializedThunkJIT::NotEqual, SpecializedThunkJIT::regT3, TrustedImm32(ArrayWithDouble))); + jit.loadPtr(Address(SpecializedThunkJIT::regT0, JSObject::butterflyOffset()), SpecializedThunkJIT::regT2); + jit.loadDouble(BaseIndex(SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT1, SpecializedThunkJIT::TimesEight), SpecializedThunkJIT::fpRegT0); + jit.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex())); + jit.returnDouble(SpecializedThunkJIT::fpRegT0); + + return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "array-iterator-next-value"); +} + +MacroAssemblerCodeRef arrayIteratorNextKeyThunkGenerator(VM* vm) +{ + return arrayIteratorNextThunkGenerator(vm, ArrayIterateKey); +} + +MacroAssemblerCodeRef arrayIteratorNextValueThunkGenerator(VM* vm) +{ + return arrayIteratorNextThunkGenerator(vm, ArrayIterateValue); +} + } #endif // ENABLE(JIT) |