diff options
author | Lorry Tar Creator <lorry-tar-importer@lorry> | 2016-04-10 09:28:39 +0000 |
---|---|---|
committer | Lorry Tar Creator <lorry-tar-importer@lorry> | 2016-04-10 09:28:39 +0000 |
commit | 32761a6cee1d0dee366b885b7b9c777e67885688 (patch) | |
tree | d6bec92bebfb216f4126356e55518842c2f476a1 /Source/JavaScriptCore/dfg/DFGJITCompiler.cpp | |
parent | a4e969f4965059196ca948db781e52f7cfebf19e (diff) | |
download | WebKitGtk-tarball-32761a6cee1d0dee366b885b7b9c777e67885688.tar.gz |
webkitgtk-2.4.11webkitgtk-2.4.11
Diffstat (limited to 'Source/JavaScriptCore/dfg/DFGJITCompiler.cpp')
-rw-r--r-- | Source/JavaScriptCore/dfg/DFGJITCompiler.cpp | 381 |
1 files changed, 82 insertions, 299 deletions
diff --git a/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp b/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp index 758f5cdd4..2934d2ba9 100644 --- a/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp +++ b/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved. + * Copyright (C) 2011, 2013 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -40,10 +40,8 @@ #include "DFGSpeculativeJIT.h" #include "DFGThunks.h" #include "JSCJSValueInlines.h" -#include "LinkBuffer.h" -#include "MaxFrameExtentForSlowPathCall.h" -#include "JSCInlines.h" #include "VM.h" +#include "LinkBuffer.h" namespace JSC { namespace DFG { @@ -52,10 +50,9 @@ JITCompiler::JITCompiler(Graph& dfg) , m_graph(dfg) , m_jitCode(adoptRef(new JITCode())) , m_blockHeads(dfg.numBlocks()) - , m_pcToCodeOriginMapBuilder(dfg.m_vm) { - if (shouldDumpDisassembly() || m_graph.m_vm.m_perBytecodeProfiler) - m_disassembler = std::make_unique<Disassembler>(dfg); + if (shouldShowDisassembly() || m_graph.m_vm.m_perBytecodeProfiler) + m_disassembler = adoptPtr(new Disassembler(dfg)); } JITCompiler::~JITCompiler() @@ -86,7 +83,6 @@ void JITCompiler::linkOSRExits() failureJumps.link(this); else info.m_replacementDestination = label(); - jitAssertHasValidCallFrame(); store32(TrustedImm32(i), &vm()->osrExitIndex); exit.setPatchableCodeOffset(patchableJump()); @@ -96,27 +92,15 @@ void JITCompiler::linkOSRExits() void JITCompiler::compileEntry() { // This code currently matches the old JIT. In the function header we need to - // save return address and call frame via the prologue and perform a fast stack check. + // pop the return address (since we do not allow any recursion on the machine + // stack), and perform a fast stack check. // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56292 // We'll need to convert the remaining cti_ style calls (specifically the stack // check) which will be dependent on stack layout. (We'd need to account for this in // both normal return code and when jumping to an exception handler). - emitFunctionPrologue(); - emitPutToCallFrameHeader(m_codeBlock, JSStack::CodeBlock); -} - -void JITCompiler::compileSetupRegistersForEntry() -{ - emitSaveCalleeSaves(); - emitMaterializeTagCheckRegisters(); -} - -void JITCompiler::compileEntryExecutionFlag() -{ -#if ENABLE(FTL_JIT) - if (m_graph.m_plan.canTierUpAndOSREnter) - store8(TrustedImm32(0), &m_jitCode->neverExecutedEntry); -#endif // ENABLE(FTL_JIT) + preserveReturnAddressAfterCall(GPRInfo::regT2); + emitPutReturnPCToCallFrameHeader(GPRInfo::regT2); + emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock); } void JITCompiler::compileBody() @@ -130,44 +114,32 @@ void JITCompiler::compileBody() void JITCompiler::compileExceptionHandlers() { - if (!m_exceptionChecksWithCallFrameRollback.empty()) { - m_exceptionChecksWithCallFrameRollback.link(this); - - copyCalleeSavesToVMCalleeSavesBuffer(); - - // lookupExceptionHandlerFromCallerFrame is passed two arguments, the VM and the exec (the CallFrame*). - move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0); - move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1); - addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister); + if (m_exceptionChecks.empty() && m_exceptionChecksWithCallFrameRollback.empty()) + return; -#if CPU(X86) - // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer! - poke(GPRInfo::argumentGPR0); - poke(GPRInfo::argumentGPR1, 1); -#endif - m_calls.append(CallLinkRecord(call(), lookupExceptionHandlerFromCallerFrame)); + Jump doLookup; - jumpToExceptionHandler(); + if (!m_exceptionChecksWithCallFrameRollback.empty()) { + m_exceptionChecksWithCallFrameRollback.link(this); + emitGetCallerFrameFromCallFrameHeaderPtr(GPRInfo::argumentGPR0); + doLookup = jump(); } - if (!m_exceptionChecks.empty()) { + if (!m_exceptionChecks.empty()) m_exceptionChecks.link(this); - copyCalleeSavesToVMCalleeSavesBuffer(); + // lookupExceptionHandler is passed one argument, the exec (the CallFrame*). + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); - // lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*). - move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0); - move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1); + if (doLookup.isSet()) + doLookup.link(this); #if CPU(X86) - // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer! - poke(GPRInfo::argumentGPR0); - poke(GPRInfo::argumentGPR1, 1); + // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer! + poke(GPRInfo::argumentGPR0); #endif - m_calls.append(CallLinkRecord(call(), lookupExceptionHandler)); - - jumpToExceptionHandler(); - } + m_calls.append(CallLinkRecord(call(), lookupExceptionHandler)); + jumpToExceptionHandler(); } void JITCompiler::link(LinkBuffer& linkBuffer) @@ -176,18 +148,15 @@ void JITCompiler::link(LinkBuffer& linkBuffer) m_jitCode->common.frameRegisterCount = m_graph.frameRegisterCount(); m_jitCode->common.requiredRegisterCountForExit = m_graph.requiredRegisterCountForExit(); - if (!m_graph.m_plan.inlineCallFrames->isEmpty()) - m_jitCode->common.inlineCallFrames = m_graph.m_plan.inlineCallFrames; + if (!m_graph.m_inlineCallFrames->isEmpty()) + m_jitCode->common.inlineCallFrames = m_graph.m_inlineCallFrames.release(); -#if USE(JSVALUE32_64) - m_jitCode->common.doubleConstants = WTFMove(m_graph.m_doubleConstants); -#endif - - m_graph.registerFrozenValues(); + m_jitCode->common.machineCaptureStart = m_graph.m_machineCaptureStart; + m_jitCode->common.slowArguments = std::move(m_graph.m_slowArguments); BitVector usedJumpTables; - for (Bag<SwitchData>::iterator iter = m_graph.m_switchData.begin(); !!iter; ++iter) { - SwitchData& data = **iter; + for (unsigned i = m_graph.m_switchData.size(); i--;) { + SwitchData& data = m_graph.m_switchData[i]; if (!data.didUseJumpTable) continue; @@ -198,14 +167,14 @@ void JITCompiler::link(LinkBuffer& linkBuffer) usedJumpTables.set(data.switchTableIndex); SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex); - table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough.block->index]); + table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough->index]); table.ctiOffsets.grow(table.branchOffsets.size()); for (unsigned j = table.ctiOffsets.size(); j--;) table.ctiOffsets[j] = table.ctiDefault; for (unsigned j = data.cases.size(); j--;) { SwitchCase& myCase = data.cases[j]; - table.ctiOffsets[myCase.value.switchLookupValue(data.kind) - table.min] = - linkBuffer.locationOf(m_blockHeads[myCase.target.block->index]); + table.ctiOffsets[myCase.value.switchLookupValue() - table.min] = + linkBuffer.locationOf(m_blockHeads[myCase.target->index]); } } @@ -219,8 +188,8 @@ void JITCompiler::link(LinkBuffer& linkBuffer) // NOTE: we cannot clear string switch tables because (1) we're running concurrently // and we cannot deref StringImpl's and (2) it would be weird to deref those // StringImpl's since we refer to them. - for (Bag<SwitchData>::iterator switchDataIter = m_graph.m_switchData.begin(); !!switchDataIter; ++switchDataIter) { - SwitchData& data = **switchDataIter; + for (unsigned i = m_graph.m_switchData.size(); i--;) { + SwitchData& data = m_graph.m_switchData[i]; if (!data.didUseJumpTable) continue; @@ -228,7 +197,7 @@ void JITCompiler::link(LinkBuffer& linkBuffer) continue; StringJumpTable& table = m_codeBlock->stringSwitchJumpTable(data.switchTableIndex); - table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough.block->index]); + table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough->index]); StringJumpTable::StringOffsetTable::iterator iter; StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end(); for (iter = table.offsetTable.begin(); iter != end; ++iter) @@ -237,7 +206,7 @@ void JITCompiler::link(LinkBuffer& linkBuffer) SwitchCase& myCase = data.cases[j]; iter = table.offsetTable.find(myCase.value.stringImpl()); RELEASE_ASSERT(iter != end); - iter->value.ctiOffset = linkBuffer.locationOf(m_blockHeads[myCase.target.block->index]); + iter->value.ctiOffset = linkBuffer.locationOf(m_blockHeads[myCase.target->index]); } } @@ -259,13 +228,17 @@ void JITCompiler::link(LinkBuffer& linkBuffer) info.patch.deltaCallToSlowCase = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_ins[i].m_slowPathGenerator->label())); } + m_codeBlock->setNumberOfCallLinkInfos(m_jsCalls.size()); for (unsigned i = 0; i < m_jsCalls.size(); ++i) { - JSCallRecord& record = m_jsCalls[i]; - CallLinkInfo& info = *record.m_info; - linkBuffer.link(record.m_slowCall, FunctionPtr(m_vm->getCTIStub(linkCallThunkGenerator).code().executableAddress())); - info.setCallLocations(linkBuffer.locationOfNearCall(record.m_slowCall), - linkBuffer.locationOf(record.m_targetToCheck), - linkBuffer.locationOfNearCall(record.m_fastCall)); + CallLinkInfo& info = m_codeBlock->callLinkInfo(i); + info.callType = m_jsCalls[i].m_callType; + info.isDFG = true; + info.codeOrigin = m_jsCalls[i].m_codeOrigin; + linkBuffer.link(m_jsCalls[i].m_slowCall, FunctionPtr((m_vm->getCTIStub(info.callType == CallLinkInfo::Construct ? linkConstructThunkGenerator : linkCallThunkGenerator)).code().executableAddress())); + info.callReturnLocation = linkBuffer.locationOfNearCall(m_jsCalls[i].m_slowCall); + info.hotPathBegin = linkBuffer.locationOf(m_jsCalls[i].m_targetToCheck); + info.hotPathOther = linkBuffer.locationOfNearCall(m_jsCalls[i].m_fastCall); + info.calleeGPR = static_cast<unsigned>(m_jsCalls[i].m_callee); } MacroAssemblerCodeRef osrExitThunk = vm()->getCTIStub(osrExitGenerationThunkGenerator); @@ -293,30 +266,9 @@ void JITCompiler::link(LinkBuffer& linkBuffer) } } else ASSERT(!m_exitSiteLabels.size()); - + m_jitCode->common.compilation = m_graph.compilation(); - // Link new DFG exception handlers and remove baseline JIT handlers. - m_codeBlock->clearExceptionHandlers(); - for (unsigned i = 0; i < m_exceptionHandlerOSRExitCallSites.size(); i++) { - OSRExitCompilationInfo& info = m_exceptionHandlerOSRExitCallSites[i].exitInfo; - if (info.m_replacementDestination.isSet()) { - // If this is is *not* set, it means that we already jumped to the OSR exit in pure generated control flow. - // i.e, we explicitly emitted an exceptionCheck that we know will be caught in this machine frame. - // If this *is set*, it means we will be landing at this code location from genericUnwind from an - // exception thrown in a child call frame. - CodeLocationLabel catchLabel = linkBuffer.locationOf(info.m_replacementDestination); - HandlerInfo newExceptionHandler = m_exceptionHandlerOSRExitCallSites[i].baselineExceptionHandler; - CallSiteIndex callSite = m_exceptionHandlerOSRExitCallSites[i].callSiteIndex; - newExceptionHandler.start = callSite.bits(); - newExceptionHandler.end = callSite.bits() + 1; - newExceptionHandler.nativeCode = catchLabel; - m_codeBlock->appendExceptionHandler(newExceptionHandler); - } - } - - if (m_pcToCodeOriginMapBuilder.didBuildMapping()) - m_codeBlock->setPCToCodeOriginMap(std::make_unique<PCToCodeOriginMap>(WTFMove(m_pcToCodeOriginMapBuilder), linkBuffer)); } void JITCompiler::compile() @@ -325,35 +277,12 @@ void JITCompiler::compile() setStartOfCode(); compileEntry(); - m_speculative = std::make_unique<SpeculativeJIT>(*this); - - // Plant a check that sufficient space is available in the JSStack. - addPtr(TrustedImm32(virtualRegisterForLocal(m_graph.requiredRegisterCountForExecutionAndExit() - 1).offset() * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1); - Jump stackOverflow = branchPtr(Above, AbsoluteAddress(m_vm->addressOfStackLimit()), GPRInfo::regT1); - - addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister); - checkStackPointerAlignment(); - compileSetupRegistersForEntry(); - compileEntryExecutionFlag(); + m_speculative = adoptPtr(new SpeculativeJIT(*this)); compileBody(); setEndOfMainPath(); - // === Footer code generation === - // - // Generate the stack overflow handling; if the stack check in the entry head fails, - // we need to call out to a helper function to throw the StackOverflowError. - stackOverflow.link(this); - - emitStoreCodeOrigin(CodeOrigin(0)); - - if (maxFrameExtentForSlowPathCall) - addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister); - - m_speculative->callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock); - // Generate slow path code. - m_speculative->runSlowPathGenerators(m_pcToCodeOriginMapBuilder); - m_pcToCodeOriginMapBuilder.appendItem(label(), PCToCodeOriginMapBuilder::defaultCodeOrigin()); + m_speculative->runSlowPathGenerators(); compileExceptionHandlers(); linkOSRExits(); @@ -361,23 +290,26 @@ void JITCompiler::compile() // Create OSR entry trampolines if necessary. m_speculative->createOSREntries(); setEndOfCode(); +} - auto linkBuffer = std::make_unique<LinkBuffer>(*m_vm, *this, m_codeBlock, JITCompilationCanFail); +void JITCompiler::link() +{ + OwnPtr<LinkBuffer> linkBuffer = adoptPtr(new LinkBuffer(*m_vm, this, m_codeBlock, JITCompilationCanFail)); if (linkBuffer->didFailToAllocate()) { - m_graph.m_plan.finalizer = std::make_unique<FailedFinalizer>(m_graph.m_plan); + m_graph.m_plan.finalizer = adoptPtr(new FailedFinalizer(m_graph.m_plan)); return; } link(*linkBuffer); m_speculative->linkOSREntries(*linkBuffer); - + m_jitCode->shrinkToFit(); codeBlock()->shrinkToFit(CodeBlock::LateShrink); disassemble(*linkBuffer); - m_graph.m_plan.finalizer = std::make_unique<JITFinalizer>( - m_graph.m_plan, m_jitCode.release(), WTFMove(linkBuffer)); + m_graph.m_plan.finalizer = adoptPtr(new JITFinalizer( + m_graph.m_plan, m_jitCode.release(), linkBuffer.release())); } void JITCompiler::compileFunction() @@ -393,36 +325,30 @@ void JITCompiler::compileFunction() // so enter after this. Label fromArityCheck(this); // Plant a check that sufficient space is available in the JSStack. - addPtr(TrustedImm32(virtualRegisterForLocal(m_graph.requiredRegisterCountForExecutionAndExit() - 1).offset() * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1); - Jump stackOverflow = branchPtr(Above, AbsoluteAddress(m_vm->addressOfStackLimit()), GPRInfo::regT1); - - // Move the stack pointer down to accommodate locals - addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister); - checkStackPointerAlignment(); + addPtr(TrustedImm32(virtualRegisterForLocal(m_graph.requiredRegisterCountForExecutionAndExit()).offset() * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1); + Jump stackCheck = branchPtr(Above, AbsoluteAddress(m_vm->addressOfJSStackLimit()), GPRInfo::regT1); + // Return here after stack check. + Label fromStackCheck = label(); - compileSetupRegistersForEntry(); - compileEntryExecutionFlag(); // === Function body code generation === - m_speculative = std::make_unique<SpeculativeJIT>(*this); + m_speculative = adoptPtr(new SpeculativeJIT(*this)); compileBody(); setEndOfMainPath(); // === Function footer code generation === // - // Generate code to perform the stack overflow handling (if the stack check in + // Generate code to perform the slow stack check (if the fast one in // the function header fails), and generate the entry point with arity check. // - // Generate the stack overflow handling; if the stack check in the function head fails, - // we need to call out to a helper function to throw the StackOverflowError. - stackOverflow.link(this); + // Generate the stack check; if the fast check in the function head fails, + // we need to call out to a helper function to check whether more space is available. + // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions). + stackCheck.link(this); emitStoreCodeOrigin(CodeOrigin(0)); - - if (maxFrameExtentForSlowPathCall) - addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister); - - m_speculative->callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock); + m_speculative->callOperationWithCallFrameRollbackOnException(operationStackCheck, m_codeBlock); + jump(fromStackCheck); // The fast entry point into a function does not check the correct number of arguments // have been passed to the call (we only use the fast entry point where we can statically @@ -435,20 +361,14 @@ void JITCompiler::compileFunction() load32(AssemblyHelpers::payloadFor((VirtualRegister)JSStack::ArgumentCount), GPRInfo::regT1); branch32(AboveOrEqual, GPRInfo::regT1, TrustedImm32(m_codeBlock->numParameters())).linkTo(fromArityCheck, this); emitStoreCodeOrigin(CodeOrigin(0)); - if (maxFrameExtentForSlowPathCall) - addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister); m_speculative->callOperationWithCallFrameRollbackOnException(m_codeBlock->m_isConstructor ? operationConstructArityCheck : operationCallArityCheck, GPRInfo::regT0); - if (maxFrameExtentForSlowPathCall) - addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister); - branchTest32(Zero, GPRInfo::returnValueGPR).linkTo(fromArityCheck, this); + branchTest32(Zero, GPRInfo::regT0).linkTo(fromArityCheck, this); emitStoreCodeOrigin(CodeOrigin(0)); - move(GPRInfo::returnValueGPR, GPRInfo::argumentGPR0); m_callArityFixup = call(); jump(fromArityCheck); // Generate slow path code. - m_speculative->runSlowPathGenerators(m_pcToCodeOriginMapBuilder); - m_pcToCodeOriginMapBuilder.appendItem(label(), PCToCodeOriginMapBuilder::defaultCodeOrigin()); + m_speculative->runSlowPathGenerators(); compileExceptionHandlers(); linkOSRExits(); @@ -456,11 +376,14 @@ void JITCompiler::compileFunction() // Create OSR entry trampolines if necessary. m_speculative->createOSREntries(); setEndOfCode(); +} +void JITCompiler::linkFunction() +{ // === Link === - auto linkBuffer = std::make_unique<LinkBuffer>(*m_vm, *this, m_codeBlock, JITCompilationCanFail); + OwnPtr<LinkBuffer> linkBuffer = adoptPtr(new LinkBuffer(*m_vm, this, m_codeBlock, JITCompilationCanFail)); if (linkBuffer->didFailToAllocate()) { - m_graph.m_plan.finalizer = std::make_unique<FailedFinalizer>(m_graph.m_plan); + m_graph.m_plan.finalizer = adoptPtr(new FailedFinalizer(m_graph.m_plan)); return; } link(*linkBuffer); @@ -469,165 +392,25 @@ void JITCompiler::compileFunction() m_jitCode->shrinkToFit(); codeBlock()->shrinkToFit(CodeBlock::LateShrink); - linkBuffer->link(m_callArityFixup, FunctionPtr((m_vm->getCTIStub(arityFixupGenerator)).code().executableAddress())); + linkBuffer->link(m_callArityFixup, FunctionPtr((m_vm->getCTIStub(arityFixup)).code().executableAddress())); disassemble(*linkBuffer); MacroAssemblerCodePtr withArityCheck = linkBuffer->locationOf(m_arityCheck); - m_graph.m_plan.finalizer = std::make_unique<JITFinalizer>( - m_graph.m_plan, m_jitCode.release(), WTFMove(linkBuffer), withArityCheck); + m_graph.m_plan.finalizer = adoptPtr(new JITFinalizer( + m_graph.m_plan, m_jitCode.release(), linkBuffer.release(), withArityCheck)); } void JITCompiler::disassemble(LinkBuffer& linkBuffer) { - if (shouldDumpDisassembly()) { + if (shouldShowDisassembly()) m_disassembler->dump(linkBuffer); - linkBuffer.didAlreadyDisassemble(); - } if (m_graph.m_plan.compilation) m_disassembler->reportToProfiler(m_graph.m_plan.compilation.get(), linkBuffer); } -#if USE(JSVALUE32_64) -void* JITCompiler::addressOfDoubleConstant(Node* node) -{ - double value = node->asNumber(); - int64_t valueBits = bitwise_cast<int64_t>(value); - auto it = m_graph.m_doubleConstantsMap.find(valueBits); - if (it != m_graph.m_doubleConstantsMap.end()) - return it->second; - - if (!m_graph.m_doubleConstants) - m_graph.m_doubleConstants = std::make_unique<Bag<double>>(); - - double* addressInConstantPool = m_graph.m_doubleConstants->add(); - *addressInConstantPool = value; - m_graph.m_doubleConstantsMap[valueBits] = addressInConstantPool; - return addressInConstantPool; -} -#endif - -void JITCompiler::noticeOSREntry(BasicBlock& basicBlock, JITCompiler::Label blockHead, LinkBuffer& linkBuffer) -{ - // OSR entry is not allowed into blocks deemed unreachable by control flow analysis. - if (!basicBlock.intersectionOfCFAHasVisited) - return; - - OSREntryData* entry = m_jitCode->appendOSREntryData(basicBlock.bytecodeBegin, linkBuffer.offsetOf(blockHead)); - - entry->m_expectedValues = basicBlock.intersectionOfPastValuesAtHead; - - // Fix the expected values: in our protocol, a dead variable will have an expected - // value of (None, []). But the old JIT may stash some values there. So we really - // need (Top, TOP). - for (size_t argument = 0; argument < basicBlock.variablesAtHead.numberOfArguments(); ++argument) { - Node* node = basicBlock.variablesAtHead.argument(argument); - if (!node || !node->shouldGenerate()) - entry->m_expectedValues.argument(argument).makeHeapTop(); - } - for (size_t local = 0; local < basicBlock.variablesAtHead.numberOfLocals(); ++local) { - Node* node = basicBlock.variablesAtHead.local(local); - if (!node || !node->shouldGenerate()) - entry->m_expectedValues.local(local).makeHeapTop(); - else { - VariableAccessData* variable = node->variableAccessData(); - entry->m_machineStackUsed.set(variable->machineLocal().toLocal()); - - switch (variable->flushFormat()) { - case FlushedDouble: - entry->m_localsForcedDouble.set(local); - break; - case FlushedInt52: - entry->m_localsForcedMachineInt.set(local); - break; - default: - break; - } - - if (variable->local() != variable->machineLocal()) { - entry->m_reshufflings.append( - OSREntryReshuffling( - variable->local().offset(), variable->machineLocal().offset())); - } - } - } - - entry->m_reshufflings.shrinkToFit(); -} - -void JITCompiler::appendExceptionHandlingOSRExit(ExitKind kind, unsigned eventStreamIndex, CodeOrigin opCatchOrigin, HandlerInfo* exceptionHandler, CallSiteIndex callSite, MacroAssembler::JumpList jumpsToFail) -{ - OSRExit exit(kind, JSValueRegs(), graph().methodOfGettingAValueProfileFor(nullptr), m_speculative.get(), eventStreamIndex); - exit.m_codeOrigin = opCatchOrigin; - exit.m_exceptionHandlerCallSiteIndex = callSite; - OSRExitCompilationInfo& exitInfo = appendExitInfo(jumpsToFail); - jitCode()->appendOSRExit(exit); - m_exceptionHandlerOSRExitCallSites.append(ExceptionHandlingOSRExitInfo { exitInfo, *exceptionHandler, callSite }); -} - -void JITCompiler::exceptionCheck() -{ - // It's important that we use origin.forExit here. Consider if we hoist string - // addition outside a loop, and that we exit at the point of that concatenation - // from an out of memory exception. - // If the original loop had a try/catch around string concatenation, if we "catch" - // that exception inside the loop, then the loops induction variable will be undefined - // in the OSR exit value recovery. It's more defensible for the string concatenation, - // then, to not be caught by the for loops' try/catch. - // Here is the program I'm speaking about: - // - // >>>> lets presume "c = a + b" gets hoisted here. - // for (var i = 0; i < length; i++) { - // try { - // c = a + b - // } catch(e) { - // If we threw an out of memory error, and we cought the exception - // right here, then "i" would almost certainly be undefined, which - // would make no sense. - // ... - // } - // } - CodeOrigin opCatchOrigin; - HandlerInfo* exceptionHandler; - bool willCatchException = m_graph.willCatchExceptionInMachineFrame(m_speculative->m_currentNode->origin.forExit, opCatchOrigin, exceptionHandler); - if (willCatchException) { - unsigned streamIndex = m_speculative->m_outOfLineStreamIndex != UINT_MAX ? m_speculative->m_outOfLineStreamIndex : m_speculative->m_stream->size(); - MacroAssembler::Jump hadException = emitNonPatchableExceptionCheck(); - // We assume here that this is called after callOpeartion()/appendCall() is called. - appendExceptionHandlingOSRExit(ExceptionCheck, streamIndex, opCatchOrigin, exceptionHandler, m_jitCode->common.lastCallSite(), hadException); - } else - m_exceptionChecks.append(emitExceptionCheck()); -} - -CallSiteIndex JITCompiler::recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(const CodeOrigin& callSiteCodeOrigin, unsigned eventStreamIndex) -{ - CodeOrigin opCatchOrigin; - HandlerInfo* exceptionHandler; - bool willCatchException = m_graph.willCatchExceptionInMachineFrame(callSiteCodeOrigin, opCatchOrigin, exceptionHandler); - CallSiteIndex callSite = addCallSite(callSiteCodeOrigin); - if (willCatchException) - appendExceptionHandlingOSRExit(GenericUnwind, eventStreamIndex, opCatchOrigin, exceptionHandler, callSite); - return callSite; -} - -void JITCompiler::setEndOfMainPath() -{ - m_pcToCodeOriginMapBuilder.appendItem(labelIgnoringWatchpoints(), m_speculative->m_origin.semantic); - if (LIKELY(!m_disassembler)) - return; - m_disassembler->setEndOfMainPath(labelIgnoringWatchpoints()); -} - -void JITCompiler::setEndOfCode() -{ - m_pcToCodeOriginMapBuilder.appendItem(labelIgnoringWatchpoints(), PCToCodeOriginMapBuilder::defaultCodeOrigin()); - if (LIKELY(!m_disassembler)) - return; - m_disassembler->setEndOfCode(labelIgnoringWatchpoints()); -} - } } // namespace JSC::DFG #endif // ENABLE(DFG_JIT) |