diff options
author | Lorry Tar Creator <lorry-tar-importer@lorry> | 2016-04-10 09:28:39 +0000 |
---|---|---|
committer | Lorry Tar Creator <lorry-tar-importer@lorry> | 2016-04-10 09:28:39 +0000 |
commit | 32761a6cee1d0dee366b885b7b9c777e67885688 (patch) | |
tree | d6bec92bebfb216f4126356e55518842c2f476a1 /Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp | |
parent | a4e969f4965059196ca948db781e52f7cfebf19e (diff) | |
download | WebKitGtk-tarball-32761a6cee1d0dee366b885b7b9c777e67885688.tar.gz |
webkitgtk-2.4.11webkitgtk-2.4.11
Diffstat (limited to 'Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp')
-rw-r--r-- | Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp | 247 |
1 files changed, 138 insertions, 109 deletions
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp index 6999e5cfb..219a5e68a 100644 --- a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp +++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved. + * Copyright (C) 2011, 2013 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -31,7 +31,7 @@ #include "DFGOperations.h" #include "DFGOSRExitCompilerCommon.h" #include "DFGSpeculativeJIT.h" -#include "JSCInlines.h" +#include "Operations.h" #include "VirtualRegister.h" #include <wtf/DataLog.h> @@ -40,20 +40,16 @@ namespace JSC { namespace DFG { void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecovery>& operands, SpeculationRecovery* recovery) { - m_jit.jitAssertTagsInPlace(); - - // Pro-forma stuff. + // 1) Pro-forma stuff. if (Options::printEachOSRExit()) { SpeculationFailureDebugInfo* debugInfo = new SpeculationFailureDebugInfo; debugInfo->codeBlock = m_jit.codeBlock(); - debugInfo->kind = exit.m_kind; - debugInfo->bytecodeOffset = exit.m_codeOrigin.bytecodeIndex; m_jit.debugCall(debugOperationPrintSpeculationFailure, debugInfo); } - // Perform speculation recovery. This only comes into play when an operation - // starts mutating state before verifying the speculation it has already made. + // 2) Perform speculation recovery. This only comes into play when an operation + // starts mutating state before verifying the speculation it has already made. if (recovery) { switch (recovery->type()) { @@ -71,7 +67,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov } } - // Refine some array and/or value profile, if appropriate. + // 3) Refine some array and/or value profile, if appropriate. if (!!exit.m_jsValueSource) { if (exit.m_kind == BadCache || exit.m_kind == BadIndexingType) { @@ -97,13 +93,13 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov scratch1 = AssemblyHelpers::selectScratchGPR(usedRegister); scratch2 = AssemblyHelpers::selectScratchGPR(usedRegister, scratch1); - if (isARM64()) { - m_jit.pushToSave(scratch1); - m_jit.pushToSave(scratch2); - } else { - m_jit.push(scratch1); - m_jit.push(scratch2); - } +#if CPU(ARM64) + m_jit.pushToSave(scratch1); + m_jit.pushToSave(scratch2); +#else + m_jit.push(scratch1); + m_jit.push(scratch2); +#endif GPRReg value; if (exit.m_jsValueSource.isAddress()) { @@ -112,20 +108,20 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov } else value = exit.m_jsValueSource.gpr(); - m_jit.load32(AssemblyHelpers::Address(value, JSCell::structureIDOffset()), scratch1); - m_jit.store32(scratch1, arrayProfile->addressOfLastSeenStructureID()); - m_jit.load8(AssemblyHelpers::Address(value, JSCell::indexingTypeOffset()), scratch1); + m_jit.loadPtr(AssemblyHelpers::Address(value, JSCell::structureOffset()), scratch1); + m_jit.storePtr(scratch1, arrayProfile->addressOfLastSeenStructure()); + m_jit.load8(AssemblyHelpers::Address(scratch1, Structure::indexingTypeOffset()), scratch1); m_jit.move(AssemblyHelpers::TrustedImm32(1), scratch2); m_jit.lshift32(scratch1, scratch2); m_jit.or32(scratch2, AssemblyHelpers::AbsoluteAddress(arrayProfile->addressOfArrayModes())); - if (isARM64()) { - m_jit.popToRestore(scratch2); - m_jit.popToRestore(scratch1); - } else { - m_jit.pop(scratch2); - m_jit.pop(scratch1); - } +#if CPU(ARM64) + m_jit.popToRestore(scratch2); + m_jit.popToRestore(scratch1); +#else + m_jit.pop(scratch2); + m_jit.pop(scratch1); +#endif } } @@ -179,7 +175,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov // variable" from "how was it represented", which will make it more difficult to add // features in the future and it will make it harder to reason about bugs. - // Save all state from GPRs into the scratch buffer. + // 4) Save all state from GPRs into the scratch buffer. ScratchBuffer* scratchBuffer = m_jit.vm()->scratchBufferForSize(sizeof(EncodedJSValue) * operands.size()); EncodedJSValue* scratch = scratchBuffer ? static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) : 0; @@ -202,17 +198,16 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov } // And voila, all GPRs are free to reuse. - - // Save all state from FPRs into the scratch buffer. + + // 5) Save all state from FPRs into the scratch buffer. for (size_t index = 0; index < operands.size(); ++index) { const ValueRecovery& recovery = operands[index]; switch (recovery.technique()) { - case UnboxedDoubleInFPR: case InFPR: m_jit.move(AssemblyHelpers::TrustedImmPtr(scratch + index), GPRInfo::regT0); - m_jit.storeDouble(recovery.fpr(), MacroAssembler::Address(GPRInfo::regT0)); + m_jit.storeDouble(recovery.fpr(), GPRInfo::regT0); break; default: @@ -222,9 +217,9 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov // Now, all FPRs are also free. - // Save all state from the stack into the scratch buffer. For simplicity we - // do this even for state that's already in the right place on the stack. - // It makes things simpler later. + // 6) Save all state from the stack into the scratch buffer. For simplicity we + // do this even for state that's already in the right place on the stack. + // It makes things simpler later. for (size_t index = 0; index < operands.size(); ++index) { const ValueRecovery& recovery = operands[index]; @@ -245,43 +240,21 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov break; } } - - // Need to ensure that the stack pointer accounts for the worst-case stack usage at exit. This - // could toast some stack that the DFG used. We need to do it before storing to stack offsets - // used by baseline. - m_jit.addPtr( - CCallHelpers::TrustedImm32( - -m_jit.codeBlock()->jitCode()->dfgCommon()->requiredRegisterCountForExit * sizeof(Register)), - CCallHelpers::framePointerRegister, CCallHelpers::stackPointerRegister); - // Restore the DFG callee saves and then save the ones the baseline JIT uses. - m_jit.emitRestoreCalleeSaves(); - m_jit.emitSaveCalleeSavesFor(m_jit.baselineCodeBlock()); - - // The tag registers are needed to materialize recoveries below. - m_jit.emitMaterializeTagCheckRegisters(); - - if (exit.isExceptionHandler()) - m_jit.copyCalleeSavesToVMCalleeSavesBuffer(); - - // Do all data format conversions and store the results into the stack. + // 7) Do all data format conversions and store the results into the stack. + + bool haveArguments = false; for (size_t index = 0; index < operands.size(); ++index) { const ValueRecovery& recovery = operands[index]; - VirtualRegister reg = operands.virtualRegisterForIndex(index); - - if (reg.isLocal() && reg.toLocal() < static_cast<int>(m_jit.baselineCodeBlock()->calleeSaveSpaceAsVirtualRegisters())) - continue; - - int operand = reg.offset(); - + int operand = operands.operandForIndex(index); + switch (recovery.technique()) { case InGPR: case UnboxedCellInGPR: case DisplacedInJSStack: case CellDisplacedInJSStack: case BooleanDisplacedInJSStack: - case InFPR: m_jit.load64(scratch + index, GPRInfo::regT0); m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand)); break; @@ -310,11 +283,10 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand)); break; - case UnboxedDoubleInFPR: + case InFPR: case DoubleDisplacedInJSStack: m_jit.move(AssemblyHelpers::TrustedImmPtr(scratch + index), GPRInfo::regT0); - m_jit.loadDouble(MacroAssembler::Address(GPRInfo::regT0), FPRInfo::fpRegT0); - m_jit.purifyNaN(FPRInfo::fpRegT0); + m_jit.loadDouble(GPRInfo::regT0, FPRInfo::fpRegT0); m_jit.boxDouble(FPRInfo::fpRegT0, GPRInfo::regT0); m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand)); break; @@ -325,68 +297,125 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov AssemblyHelpers::addressFor(operand)); break; - case DirectArgumentsThatWereNotCreated: - case ClonedArgumentsThatWereNotCreated: - // Don't do this, yet. + case ArgumentsThatWereNotCreated: + haveArguments = true; + // We can't restore this yet but we can make sure that the stack appears + // sane. + m_jit.store64( + AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue())), + AssemblyHelpers::addressFor(operand)); break; default: - RELEASE_ASSERT_NOT_REACHED(); break; } } - - // Now that things on the stack are recovered, do the arguments recovery. We assume that arguments - // recoveries don't recursively refer to each other. But, we don't try to assume that they only - // refer to certain ranges of locals. Hence why we need to do this here, once the stack is sensible. - // Note that we also roughly assume that the arguments might still be materialized outside of its - // inline call frame scope - but for now the DFG wouldn't do that. - - emitRestoreArguments(operands); - // Adjust the old JIT's execute counter. Since we are exiting OSR, we know - // that all new calls into this code will go to the new JIT, so the execute - // counter only affects call frames that performed OSR exit and call frames - // that were still executing the old JIT at the time of another call frame's - // OSR exit. We want to ensure that the following is true: + // 8) Adjust the old JIT's execute counter. Since we are exiting OSR, we know + // that all new calls into this code will go to the new JIT, so the execute + // counter only affects call frames that performed OSR exit and call frames + // that were still executing the old JIT at the time of another call frame's + // OSR exit. We want to ensure that the following is true: // - // (a) Code the performs an OSR exit gets a chance to reenter optimized - // code eventually, since optimized code is faster. But we don't - // want to do such reentery too aggressively (see (c) below). + // (a) Code the performs an OSR exit gets a chance to reenter optimized + // code eventually, since optimized code is faster. But we don't + // want to do such reentery too aggressively (see (c) below). // - // (b) If there is code on the call stack that is still running the old - // JIT's code and has never OSR'd, then it should get a chance to - // perform OSR entry despite the fact that we've exited. + // (b) If there is code on the call stack that is still running the old + // JIT's code and has never OSR'd, then it should get a chance to + // perform OSR entry despite the fact that we've exited. // - // (c) Code the performs an OSR exit should not immediately retry OSR - // entry, since both forms of OSR are expensive. OSR entry is - // particularly expensive. + // (c) Code the performs an OSR exit should not immediately retry OSR + // entry, since both forms of OSR are expensive. OSR entry is + // particularly expensive. // - // (d) Frequent OSR failures, even those that do not result in the code - // running in a hot loop, result in recompilation getting triggered. + // (d) Frequent OSR failures, even those that do not result in the code + // running in a hot loop, result in recompilation getting triggered. // - // To ensure (c), we'd like to set the execute counter to - // counterValueForOptimizeAfterWarmUp(). This seems like it would endanger - // (a) and (b), since then every OSR exit would delay the opportunity for - // every call frame to perform OSR entry. Essentially, if OSR exit happens - // frequently and the function has few loops, then the counter will never - // become non-negative and OSR entry will never be triggered. OSR entry - // will only happen if a loop gets hot in the old JIT, which does a pretty - // good job of ensuring (a) and (b). But that doesn't take care of (d), - // since each speculation failure would reset the execute counter. - // So we check here if the number of speculation failures is significantly - // larger than the number of successes (we want 90% success rate), and if - // there have been a large enough number of failures. If so, we set the - // counter to 0; otherwise we set the counter to - // counterValueForOptimizeAfterWarmUp(). + // To ensure (c), we'd like to set the execute counter to + // counterValueForOptimizeAfterWarmUp(). This seems like it would endanger + // (a) and (b), since then every OSR exit would delay the opportunity for + // every call frame to perform OSR entry. Essentially, if OSR exit happens + // frequently and the function has few loops, then the counter will never + // become non-negative and OSR entry will never be triggered. OSR entry + // will only happen if a loop gets hot in the old JIT, which does a pretty + // good job of ensuring (a) and (b). But that doesn't take care of (d), + // since each speculation failure would reset the execute counter. + // So we check here if the number of speculation failures is significantly + // larger than the number of successes (we want 90% success rate), and if + // there have been a large enough number of failures. If so, we set the + // counter to 0; otherwise we set the counter to + // counterValueForOptimizeAfterWarmUp(). handleExitCounts(m_jit, exit); - // Reify inlined call frames. + // 9) Reify inlined call frames. reifyInlinedCallFrames(m_jit, exit); + + // 10) Create arguments if necessary and place them into the appropriate aliased + // registers. + + if (haveArguments) { + HashSet<InlineCallFrame*, DefaultHash<InlineCallFrame*>::Hash, + NullableHashTraits<InlineCallFrame*>> didCreateArgumentsObject; + + for (size_t index = 0; index < operands.size(); ++index) { + const ValueRecovery& recovery = operands[index]; + if (recovery.technique() != ArgumentsThatWereNotCreated) + continue; + int operand = operands.operandForIndex(index); + // Find the right inline call frame. + InlineCallFrame* inlineCallFrame = 0; + for (InlineCallFrame* current = exit.m_codeOrigin.inlineCallFrame; + current; + current = current->caller.inlineCallFrame) { + if (current->stackOffset >= operand) { + inlineCallFrame = current; + break; + } + } + + if (!m_jit.baselineCodeBlockFor(inlineCallFrame)->usesArguments()) + continue; + VirtualRegister argumentsRegister = m_jit.baselineArgumentsRegisterFor(inlineCallFrame); + if (didCreateArgumentsObject.add(inlineCallFrame).isNewEntry) { + // We know this call frame optimized out an arguments object that + // the baseline JIT would have created. Do that creation now. + if (inlineCallFrame) { + m_jit.addPtr(AssemblyHelpers::TrustedImm32(inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister, GPRInfo::regT0); + m_jit.setupArguments(GPRInfo::regT0); + } else + m_jit.setupArgumentsExecState(); + m_jit.move( + AssemblyHelpers::TrustedImmPtr( + bitwise_cast<void*>(operationCreateArguments)), + GPRInfo::nonArgGPR0); + m_jit.call(GPRInfo::nonArgGPR0); + m_jit.store64(GPRInfo::returnValueGPR, AssemblyHelpers::addressFor(argumentsRegister)); + m_jit.store64( + GPRInfo::returnValueGPR, + AssemblyHelpers::addressFor(unmodifiedArgumentsRegister(argumentsRegister))); + m_jit.move(GPRInfo::returnValueGPR, GPRInfo::regT0); // no-op move on almost all platforms. + } + + m_jit.load64(AssemblyHelpers::addressFor(argumentsRegister), GPRInfo::regT0); + m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand)); + } + } + +#if ENABLE(GGC) + // 11) Write barrier the owner executable because we're jumping into a different block. + for (CodeOrigin codeOrigin = exit.m_codeOrigin; ; codeOrigin = codeOrigin.inlineCallFrame->caller) { + CodeBlock* baselineCodeBlock = m_jit.baselineCodeBlockFor(codeOrigin); + m_jit.move(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock->ownerExecutable()), GPRInfo::nonArgGPR0); + SpeculativeJIT::osrWriteBarrier(m_jit, GPRInfo::nonArgGPR0, GPRInfo::nonArgGPR1, GPRInfo::nonArgGPR2); + if (!codeOrigin.inlineCallFrame) + break; + } +#endif - // And finish. + // 12) And finish. adjustAndJumpToTarget(m_jit, exit); } |