diff options
author | Lorry Tar Creator <lorry-tar-importer@lorry> | 2015-05-20 09:56:07 +0000 |
---|---|---|
committer | Lorry Tar Creator <lorry-tar-importer@lorry> | 2015-05-20 09:56:07 +0000 |
commit | 41386e9cb918eed93b3f13648cbef387e371e451 (patch) | |
tree | a97f9d7bd1d9d091833286085f72da9d83fd0606 /Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.cpp | |
parent | e15dd966d523731101f70ccf768bba12435a0208 (diff) | |
download | WebKitGtk-tarball-41386e9cb918eed93b3f13648cbef387e371e451.tar.gz |
webkitgtk-2.4.9webkitgtk-2.4.9
Diffstat (limited to 'Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.cpp')
-rw-r--r-- | Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.cpp | 183 |
1 files changed, 39 insertions, 144 deletions
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.cpp b/Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.cpp index 39b5bb5fc..9f84a2968 100644 --- a/Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.cpp +++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013-2015 Apple Inc. All rights reserved. + * Copyright (C) 2013 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -28,11 +28,10 @@ #if ENABLE(DFG_JIT) -#include "DFGJITCode.h" +#include "Arguments.h" #include "DFGOperations.h" -#include "JIT.h" #include "JSCJSValueInlines.h" -#include "JSCInlines.h" +#include "Operations.h" namespace JSC { namespace DFG { @@ -53,55 +52,20 @@ void handleExitCounts(CCallHelpers& jit, const OSRExitBase& exit) AssemblyHelpers::GreaterThanOrEqual, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()), AssemblyHelpers::TrustedImm32(0)); - - // We want to figure out if there's a possibility that we're in a loop. For the outermost - // code block in the inline stack, we handle this appropriately by having the loop OSR trigger - // check the exit count of the replacement of the CodeBlock from which we are OSRing. The - // problem is the inlined functions, which might also have loops, but whose baseline versions - // don't know where to look for the exit count. Figure out if those loops are severe enough - // that we had tried to OSR enter. If so, then we should use the loop reoptimization trigger. - // Otherwise, we should use the normal reoptimization trigger. - - AssemblyHelpers::JumpList loopThreshold; - - for (InlineCallFrame* inlineCallFrame = exit.m_codeOrigin.inlineCallFrame; inlineCallFrame; inlineCallFrame = inlineCallFrame->caller.inlineCallFrame) { - loopThreshold.append( - jit.branchTest8( - AssemblyHelpers::NonZero, - AssemblyHelpers::AbsoluteAddress( - inlineCallFrame->executable->addressOfDidTryToEnterInLoop()))); - } - - jit.move( - AssemblyHelpers::TrustedImm32(jit.codeBlock()->exitCountThresholdForReoptimization()), - GPRInfo::regT1); - - if (!loopThreshold.empty()) { - AssemblyHelpers::Jump done = jit.jump(); - - loopThreshold.link(&jit); - jit.move( - AssemblyHelpers::TrustedImm32( - jit.codeBlock()->exitCountThresholdForReoptimizationFromLoop()), - GPRInfo::regT1); - done.link(&jit); - } - - tooFewFails = jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, GPRInfo::regT1); + tooFewFails = jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, AssemblyHelpers::TrustedImm32(jit.codeBlock()->exitCountThresholdForReoptimization())); reoptimizeNow.link(&jit); // Reoptimize as soon as possible. #if !NUMBER_OF_ARGUMENT_REGISTERS jit.poke(GPRInfo::regT0); - jit.poke(AssemblyHelpers::TrustedImmPtr(&exit), 1); #else jit.move(GPRInfo::regT0, GPRInfo::argumentGPR0); - jit.move(AssemblyHelpers::TrustedImmPtr(&exit), GPRInfo::argumentGPR1); + ASSERT(GPRInfo::argumentGPR0 != GPRInfo::regT1); #endif - jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(triggerReoptimizationNow)), GPRInfo::nonArgGPR0); - jit.call(GPRInfo::nonArgGPR0); + jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(triggerReoptimizationNow)), GPRInfo::regT1); + jit.call(GPRInfo::regT1); AssemblyHelpers::Jump doneAdjusting = jit.jump(); tooFewFails.link(&jit); @@ -110,26 +74,13 @@ void handleExitCounts(CCallHelpers& jit, const OSRExitBase& exit) int32_t activeThreshold = jit.baselineCodeBlock()->adjustedCounterValue( Options::thresholdForOptimizeAfterLongWarmUp()); - int32_t targetValue = applyMemoryUsageHeuristicsAndConvertToInt( + int32_t targetValue = ExecutionCounter::applyMemoryUsageHeuristicsAndConvertToInt( activeThreshold, jit.baselineCodeBlock()); - int32_t clippedValue; - switch (jit.codeBlock()->jitType()) { - case JITCode::DFGJIT: - clippedValue = BaselineExecutionCounter::clippedThreshold(jit.codeBlock()->globalObject(), targetValue); - break; - case JITCode::FTLJIT: - clippedValue = UpperTierExecutionCounter::clippedThreshold(jit.codeBlock()->globalObject(), targetValue); - break; - default: - RELEASE_ASSERT_NOT_REACHED(); -#if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE) - clippedValue = 0; // Make some compilers, and mhahnenberg, happy. -#endif - break; - } + int32_t clippedValue = + ExecutionCounter::clippedThreshold(jit.codeBlock()->globalObject(), targetValue); jit.store32(AssemblyHelpers::TrustedImm32(-clippedValue), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter())); jit.store32(AssemblyHelpers::TrustedImm32(activeThreshold), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionActiveThreshold())); - jit.store32(AssemblyHelpers::TrustedImm32(formattedTotalExecutionCount(clippedValue)), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionTotalCount())); + jit.store32(AssemblyHelpers::TrustedImm32(ExecutionCounter::formattedTotalCount(clippedValue)), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionTotalCount())); doneAdjusting.link(&jit); } @@ -144,46 +95,10 @@ void reifyInlinedCallFrames(CCallHelpers& jit, const OSRExitBase& exit) InlineCallFrame* inlineCallFrame = codeOrigin.inlineCallFrame; CodeBlock* baselineCodeBlock = jit.baselineCodeBlockFor(codeOrigin); CodeBlock* baselineCodeBlockForCaller = jit.baselineCodeBlockFor(inlineCallFrame->caller); - void* jumpTarget = nullptr; - void* trueReturnPC = nullptr; - unsigned callBytecodeIndex = inlineCallFrame->caller.bytecodeIndex; + CallLinkInfo& callLinkInfo = baselineCodeBlockForCaller->getCallLinkInfo(callBytecodeIndex); - switch (inlineCallFrame->kind) { - case InlineCallFrame::Call: - case InlineCallFrame::Construct: - case InlineCallFrame::CallVarargs: - case InlineCallFrame::ConstructVarargs: { - CallLinkInfo* callLinkInfo = - baselineCodeBlockForCaller->getCallLinkInfoForBytecodeIndex(callBytecodeIndex); - RELEASE_ASSERT(callLinkInfo); - - jumpTarget = callLinkInfo->callReturnLocation().executableAddress(); - break; - } - - case InlineCallFrame::GetterCall: - case InlineCallFrame::SetterCall: { - StructureStubInfo* stubInfo = - baselineCodeBlockForCaller->findStubInfo(CodeOrigin(callBytecodeIndex)); - RELEASE_ASSERT(stubInfo); - - switch (inlineCallFrame->kind) { - case InlineCallFrame::GetterCall: - jumpTarget = jit.vm()->getCTIStub(baselineGetterReturnThunkGenerator).code().executableAddress(); - break; - case InlineCallFrame::SetterCall: - jumpTarget = jit.vm()->getCTIStub(baselineSetterReturnThunkGenerator).code().executableAddress(); - break; - default: - RELEASE_ASSERT_NOT_REACHED(); - break; - } - - trueReturnPC = stubInfo->callReturnLocation.labelAtOffset( - stubInfo->patch.deltaCallToDone).executableAddress(); - break; - } } + void* jumpTarget = callLinkInfo.callReturnLocation.executableAddress(); GPRReg callerFrameGPR; if (inlineCallFrame->caller.inlineCallFrame) { @@ -192,28 +107,47 @@ void reifyInlinedCallFrames(CCallHelpers& jit, const OSRExitBase& exit) } else callerFrameGPR = GPRInfo::callFrameRegister; - jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::addressForByteOffset(inlineCallFrame->returnPCOffset())); - if (trueReturnPC) - jit.storePtr(AssemblyHelpers::TrustedImmPtr(trueReturnPC), AssemblyHelpers::addressFor(inlineCallFrame->stackOffset + virtualRegisterForArgument(inlineCallFrame->arguments.size()).offset())); - - jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CodeBlock))); - if (!inlineCallFrame->isVarargs()) - jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount))); #if USE(JSVALUE64) + jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CodeBlock))); + if (!inlineCallFrame->isClosureCall) + jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue(inlineCallFrame->calleeConstant()->scope()))), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ScopeChain))); jit.store64(callerFrameGPR, AssemblyHelpers::addressForByteOffset(inlineCallFrame->callerFrameOffset())); + jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::addressForByteOffset(inlineCallFrame->returnPCOffset())); uint32_t locationBits = CallFrame::Location::encodeAsBytecodeOffset(codeOrigin.bytecodeIndex); jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount))); + jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount))); if (!inlineCallFrame->isClosureCall) jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue(inlineCallFrame->calleeConstant()))), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee))); + + // Leave the captured arguments in regT3. + if (baselineCodeBlock->usesArguments()) + jit.loadPtr(AssemblyHelpers::addressFor(VirtualRegister(inlineCallFrame->stackOffset + unmodifiedArgumentsRegister(baselineCodeBlock->argumentsRegister()).offset())), GPRInfo::regT3); #else // USE(JSVALUE64) // so this is the 32-bit part + jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CodeBlock))); + jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ScopeChain))); + if (!inlineCallFrame->isClosureCall) + jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->calleeConstant()->scope()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ScopeChain))); jit.storePtr(callerFrameGPR, AssemblyHelpers::addressForByteOffset(inlineCallFrame->callerFrameOffset())); + jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::addressForByteOffset(inlineCallFrame->returnPCOffset())); Instruction* instruction = baselineCodeBlock->instructions().begin() + codeOrigin.bytecodeIndex; uint32_t locationBits = CallFrame::Location::encodeAsBytecodeInstruction(instruction); jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount))); + jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount))); jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee))); if (!inlineCallFrame->isClosureCall) jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->calleeConstant()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee))); + + // Leave the captured arguments in regT3. + if (baselineCodeBlock->usesArguments()) + jit.loadPtr(AssemblyHelpers::payloadFor(VirtualRegister(inlineCallFrame->stackOffset + unmodifiedArgumentsRegister(baselineCodeBlock->argumentsRegister()).offset())), GPRInfo::regT3); #endif // USE(JSVALUE64) // ending the #else part, so directly above is the 32-bit part + + if (baselineCodeBlock->usesArguments()) { + AssemblyHelpers::Jump noArguments = jit.branchTestPtr(AssemblyHelpers::Zero, GPRInfo::regT3); + jit.addPtr(AssemblyHelpers::TrustedImm32(inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister, GPRInfo::regT0); + jit.storePtr(GPRInfo::regT0, AssemblyHelpers::Address(GPRInfo::regT3, Arguments::offsetOfRegisters())); + noArguments.link(&jit); + } } #if USE(JSVALUE64) @@ -225,43 +159,8 @@ void reifyInlinedCallFrames(CCallHelpers& jit, const OSRExitBase& exit) jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(JSStack::ArgumentCount))); } -#if ENABLE(GGC) -static void osrWriteBarrier(CCallHelpers& jit, GPRReg owner, GPRReg scratch) -{ - AssemblyHelpers::Jump ownerIsRememberedOrInEden = jit.jumpIfIsRememberedOrInEden(owner); - - // We need these extra slots because setupArgumentsWithExecState will use poke on x86. -#if CPU(X86) - jit.subPtr(MacroAssembler::TrustedImm32(sizeof(void*) * 3), MacroAssembler::stackPointerRegister); -#endif - - jit.setupArgumentsWithExecState(owner); - jit.move(MacroAssembler::TrustedImmPtr(reinterpret_cast<void*>(operationOSRWriteBarrier)), scratch); - jit.call(scratch); - -#if CPU(X86) - jit.addPtr(MacroAssembler::TrustedImm32(sizeof(void*) * 3), MacroAssembler::stackPointerRegister); -#endif - - ownerIsRememberedOrInEden.link(&jit); -} -#endif // ENABLE(GGC) - void adjustAndJumpToTarget(CCallHelpers& jit, const OSRExitBase& exit) { -#if ENABLE(GGC) - jit.move(AssemblyHelpers::TrustedImmPtr(jit.codeBlock()->ownerExecutable()), GPRInfo::nonArgGPR0); - osrWriteBarrier(jit, GPRInfo::nonArgGPR0, GPRInfo::nonArgGPR1); - InlineCallFrameSet* inlineCallFrames = jit.codeBlock()->jitCode()->dfgCommon()->inlineCallFrames.get(); - if (inlineCallFrames) { - for (InlineCallFrame* inlineCallFrame : *inlineCallFrames) { - ScriptExecutable* ownerExecutable = inlineCallFrame->executable.get(); - jit.move(AssemblyHelpers::TrustedImmPtr(ownerExecutable), GPRInfo::nonArgGPR0); - osrWriteBarrier(jit, GPRInfo::nonArgGPR0, GPRInfo::nonArgGPR1); - } - } -#endif - if (exit.m_codeOrigin.inlineCallFrame) jit.addPtr(AssemblyHelpers::TrustedImm32(exit.m_codeOrigin.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister); @@ -274,11 +173,7 @@ void adjustAndJumpToTarget(CCallHelpers& jit, const OSRExitBase& exit) ASSERT(mapping->m_bytecodeIndex == exit.m_codeOrigin.bytecodeIndex); void* jumpTarget = baselineCodeBlock->jitCode()->executableAddressAtOffset(mapping->m_machineCodeOffset); - - jit.addPtr(AssemblyHelpers::TrustedImm32(JIT::stackPointerOffsetFor(baselineCodeBlock) * sizeof(Register)), GPRInfo::callFrameRegister, AssemblyHelpers::stackPointerRegister); - jit.jitAssertTagsInPlace(); - jit.move(AssemblyHelpers::TrustedImmPtr(jumpTarget), GPRInfo::regT2); jit.jump(GPRInfo::regT2); } |