summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp
diff options
context:
space:
mode:
authorKonstantin Tokarev <annulen@yandex.ru>2016-08-25 19:20:41 +0300
committerKonstantin Tokarev <annulen@yandex.ru>2017-02-02 12:30:55 +0000
commit6882a04fb36642862b11efe514251d32070c3d65 (patch)
treeb7959826000b061fd5ccc7512035c7478742f7b0 /Source/JavaScriptCore/dfg/DFGJITCompiler.cpp
parentab6df191029eeeb0b0f16f127d553265659f739e (diff)
downloadqtwebkit-6882a04fb36642862b11efe514251d32070c3d65.tar.gz
Imported QtWebKit TP3 (git b57bc6801f1876c3220d5a4bfea33d620d477443)
Change-Id: I3b1d8a2808782c9f34d50240000e20cb38d3680f Reviewed-by: Konstantin Tokarev <annulen@yandex.ru>
Diffstat (limited to 'Source/JavaScriptCore/dfg/DFGJITCompiler.cpp')
-rw-r--r--Source/JavaScriptCore/dfg/DFGJITCompiler.cpp615
1 files changed, 436 insertions, 179 deletions
diff --git a/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp b/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp
index 2ba9ea709..758f5cdd4 100644
--- a/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp
+++ b/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -29,6 +29,10 @@
#if ENABLE(DFG_JIT)
#include "CodeBlock.h"
+#include "DFGFailedFinalizer.h"
+#include "DFGInlineCacheWrapperInlines.h"
+#include "DFGJITCode.h"
+#include "DFGJITFinalizer.h"
#include "DFGOSRExitCompiler.h"
#include "DFGOperations.h"
#include "DFGRegisterBank.h"
@@ -36,45 +40,53 @@
#include "DFGSpeculativeJIT.h"
#include "DFGThunks.h"
#include "JSCJSValueInlines.h"
-#include "VM.h"
#include "LinkBuffer.h"
+#include "MaxFrameExtentForSlowPathCall.h"
+#include "JSCInlines.h"
+#include "VM.h"
namespace JSC { namespace DFG {
JITCompiler::JITCompiler(Graph& dfg)
: CCallHelpers(&dfg.m_vm, dfg.m_codeBlock)
, m_graph(dfg)
- , m_currentCodeOriginIndex(0)
+ , m_jitCode(adoptRef(new JITCode()))
+ , m_blockHeads(dfg.numBlocks())
+ , m_pcToCodeOriginMapBuilder(dfg.m_vm)
+{
+ if (shouldDumpDisassembly() || m_graph.m_vm.m_perBytecodeProfiler)
+ m_disassembler = std::make_unique<Disassembler>(dfg);
+}
+
+JITCompiler::~JITCompiler()
{
- if (shouldShowDisassembly() || m_graph.m_vm.m_perBytecodeProfiler)
- m_disassembler = adoptPtr(new Disassembler(dfg));
}
void JITCompiler::linkOSRExits()
{
- ASSERT(codeBlock()->numberOfOSRExits() == m_exitCompilationInfo.size());
- if (m_graph.m_compilation) {
- for (unsigned i = 0; i < codeBlock()->numberOfOSRExits(); ++i) {
- OSRExit& exit = codeBlock()->osrExit(i);
+ ASSERT(m_jitCode->osrExit.size() == m_exitCompilationInfo.size());
+ if (m_graph.compilation()) {
+ for (unsigned i = 0; i < m_jitCode->osrExit.size(); ++i) {
+ OSRExitCompilationInfo& info = m_exitCompilationInfo[i];
Vector<Label> labels;
- if (exit.m_watchpointIndex == std::numeric_limits<unsigned>::max()) {
- OSRExitCompilationInfo& info = m_exitCompilationInfo[i];
+ if (!info.m_failureJumps.empty()) {
for (unsigned j = 0; j < info.m_failureJumps.jumps().size(); ++j)
labels.append(info.m_failureJumps.jumps()[j].label());
} else
- labels.append(codeBlock()->watchpoint(exit.m_watchpointIndex).sourceLabel());
+ labels.append(info.m_replacementSource);
m_exitSiteLabels.append(labels);
}
}
- for (unsigned i = 0; i < codeBlock()->numberOfOSRExits(); ++i) {
- OSRExit& exit = codeBlock()->osrExit(i);
- JumpList& failureJumps = m_exitCompilationInfo[i].m_failureJumps;
- ASSERT(failureJumps.empty() == (exit.m_watchpointIndex != std::numeric_limits<unsigned>::max()));
- if (exit.m_watchpointIndex == std::numeric_limits<unsigned>::max())
+ for (unsigned i = 0; i < m_jitCode->osrExit.size(); ++i) {
+ OSRExit& exit = m_jitCode->osrExit[i];
+ OSRExitCompilationInfo& info = m_exitCompilationInfo[i];
+ JumpList& failureJumps = info.m_failureJumps;
+ if (!failureJumps.empty())
failureJumps.link(this);
else
- codeBlock()->watchpoint(exit.m_watchpointIndex).setDestination(label());
+ info.m_replacementDestination = label();
+
jitAssertHasValidCallFrame();
store32(TrustedImm32(i), &vm()->osrExitIndex);
exit.setPatchableCodeOffset(patchableJump());
@@ -84,199 +96,291 @@ void JITCompiler::linkOSRExits()
void JITCompiler::compileEntry()
{
// This code currently matches the old JIT. In the function header we need to
- // pop the return address (since we do not allow any recursion on the machine
- // stack), and perform a fast stack check.
+ // save return address and call frame via the prologue and perform a fast stack check.
// FIXME: https://bugs.webkit.org/show_bug.cgi?id=56292
// We'll need to convert the remaining cti_ style calls (specifically the stack
// check) which will be dependent on stack layout. (We'd need to account for this in
// both normal return code and when jumping to an exception handler).
- preserveReturnAddressAfterCall(GPRInfo::regT2);
- emitPutToCallFrameHeader(GPRInfo::regT2, JSStack::ReturnPC);
- emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock);
+ emitFunctionPrologue();
+ emitPutToCallFrameHeader(m_codeBlock, JSStack::CodeBlock);
}
-void JITCompiler::compileBody(SpeculativeJIT& speculative)
+void JITCompiler::compileSetupRegistersForEntry()
+{
+ emitSaveCalleeSaves();
+ emitMaterializeTagCheckRegisters();
+}
+
+void JITCompiler::compileEntryExecutionFlag()
+{
+#if ENABLE(FTL_JIT)
+ if (m_graph.m_plan.canTierUpAndOSREnter)
+ store8(TrustedImm32(0), &m_jitCode->neverExecutedEntry);
+#endif // ENABLE(FTL_JIT)
+}
+
+void JITCompiler::compileBody()
{
// We generate the speculative code path, followed by OSR exit code to return
// to the old JIT code if speculations fail.
-#if DFG_ENABLE(JIT_BREAK_ON_EVERY_FUNCTION)
- // Handy debug tool!
- breakpoint();
-#endif
-
- bool compiledSpeculative = speculative.compile();
+ bool compiledSpeculative = m_speculative->compile();
ASSERT_UNUSED(compiledSpeculative, compiledSpeculative);
}
void JITCompiler::compileExceptionHandlers()
{
- // Iterate over the m_calls vector, checking for jumps to link.
- bool didLinkExceptionCheck = false;
- for (unsigned i = 0; i < m_exceptionChecks.size(); ++i) {
- Jump& exceptionCheck = m_exceptionChecks[i].m_exceptionCheck;
- if (exceptionCheck.isSet()) {
- exceptionCheck.link(this);
- didLinkExceptionCheck = true;
- }
+ if (!m_exceptionChecksWithCallFrameRollback.empty()) {
+ m_exceptionChecksWithCallFrameRollback.link(this);
+
+ copyCalleeSavesToVMCalleeSavesBuffer();
+
+ // lookupExceptionHandlerFromCallerFrame is passed two arguments, the VM and the exec (the CallFrame*).
+ move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
+ addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister);
+
+#if CPU(X86)
+ // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
+ poke(GPRInfo::argumentGPR0);
+ poke(GPRInfo::argumentGPR1, 1);
+#endif
+ m_calls.append(CallLinkRecord(call(), lookupExceptionHandlerFromCallerFrame));
+
+ jumpToExceptionHandler();
}
- // If any exception checks were linked, generate code to lookup a handler.
- if (didLinkExceptionCheck) {
- // lookupExceptionHandler is passed two arguments, exec (the CallFrame*), and
- // the index into the CodeBlock's callReturnIndexVector corresponding to the
- // call that threw the exception (this was set in nonPreservedNonReturnGPR, when
- // the exception check was planted).
- move(GPRInfo::nonPreservedNonReturnGPR, GPRInfo::argumentGPR1);
- move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ if (!m_exceptionChecks.empty()) {
+ m_exceptionChecks.link(this);
+
+ copyCalleeSavesToVMCalleeSavesBuffer();
+
+ // lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*).
+ move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
+
#if CPU(X86)
// FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
poke(GPRInfo::argumentGPR0);
poke(GPRInfo::argumentGPR1, 1);
#endif
m_calls.append(CallLinkRecord(call(), lookupExceptionHandler));
- // lookupExceptionHandler leaves the handler CallFrame* in the returnValueGPR,
- // and the address of the handler in returnValueGPR2.
- jump(GPRInfo::returnValueGPR2);
+
+ jumpToExceptionHandler();
}
}
void JITCompiler::link(LinkBuffer& linkBuffer)
{
// Link the code, populate data in CodeBlock data structures.
-#if DFG_ENABLE(DEBUG_VERBOSE)
- dataLogF("JIT code for %p start at [%p, %p). Size = %zu.\n", m_codeBlock, linkBuffer.debugAddress(), static_cast<char*>(linkBuffer.debugAddress()) + linkBuffer.debugSize(), linkBuffer.debugSize());
+ m_jitCode->common.frameRegisterCount = m_graph.frameRegisterCount();
+ m_jitCode->common.requiredRegisterCountForExit = m_graph.requiredRegisterCountForExit();
+
+ if (!m_graph.m_plan.inlineCallFrames->isEmpty())
+ m_jitCode->common.inlineCallFrames = m_graph.m_plan.inlineCallFrames;
+
+#if USE(JSVALUE32_64)
+ m_jitCode->common.doubleConstants = WTFMove(m_graph.m_doubleConstants);
#endif
+
+ m_graph.registerFrozenValues();
+
+ BitVector usedJumpTables;
+ for (Bag<SwitchData>::iterator iter = m_graph.m_switchData.begin(); !!iter; ++iter) {
+ SwitchData& data = **iter;
+ if (!data.didUseJumpTable)
+ continue;
+
+ if (data.kind == SwitchString)
+ continue;
+
+ RELEASE_ASSERT(data.kind == SwitchImm || data.kind == SwitchChar);
+
+ usedJumpTables.set(data.switchTableIndex);
+ SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
+ table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough.block->index]);
+ table.ctiOffsets.grow(table.branchOffsets.size());
+ for (unsigned j = table.ctiOffsets.size(); j--;)
+ table.ctiOffsets[j] = table.ctiDefault;
+ for (unsigned j = data.cases.size(); j--;) {
+ SwitchCase& myCase = data.cases[j];
+ table.ctiOffsets[myCase.value.switchLookupValue(data.kind) - table.min] =
+ linkBuffer.locationOf(m_blockHeads[myCase.target.block->index]);
+ }
+ }
+
+ for (unsigned i = m_codeBlock->numberOfSwitchJumpTables(); i--;) {
+ if (usedJumpTables.get(i))
+ continue;
+
+ m_codeBlock->switchJumpTable(i).clear();
+ }
+
+ // NOTE: we cannot clear string switch tables because (1) we're running concurrently
+ // and we cannot deref StringImpl's and (2) it would be weird to deref those
+ // StringImpl's since we refer to them.
+ for (Bag<SwitchData>::iterator switchDataIter = m_graph.m_switchData.begin(); !!switchDataIter; ++switchDataIter) {
+ SwitchData& data = **switchDataIter;
+ if (!data.didUseJumpTable)
+ continue;
+
+ if (data.kind != SwitchString)
+ continue;
+
+ StringJumpTable& table = m_codeBlock->stringSwitchJumpTable(data.switchTableIndex);
+ table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough.block->index]);
+ StringJumpTable::StringOffsetTable::iterator iter;
+ StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end();
+ for (iter = table.offsetTable.begin(); iter != end; ++iter)
+ iter->value.ctiOffset = table.ctiDefault;
+ for (unsigned j = data.cases.size(); j--;) {
+ SwitchCase& myCase = data.cases[j];
+ iter = table.offsetTable.find(myCase.value.stringImpl());
+ RELEASE_ASSERT(iter != end);
+ iter->value.ctiOffset = linkBuffer.locationOf(m_blockHeads[myCase.target.block->index]);
+ }
+ }
// Link all calls out from the JIT code to their respective functions.
for (unsigned i = 0; i < m_calls.size(); ++i)
linkBuffer.link(m_calls[i].m_call, m_calls[i].m_function);
- m_codeBlock->callReturnIndexVector().reserveCapacity(m_exceptionChecks.size());
- for (unsigned i = 0; i < m_exceptionChecks.size(); ++i) {
- unsigned returnAddressOffset = linkBuffer.returnAddressOffset(m_exceptionChecks[i].m_call);
- CodeOrigin codeOrigin = m_exceptionChecks[i].m_codeOrigin;
- while (codeOrigin.inlineCallFrame)
- codeOrigin = codeOrigin.inlineCallFrame->caller;
- unsigned exceptionInfo = codeOrigin.bytecodeIndex;
- m_codeBlock->callReturnIndexVector().append(CallReturnOffsetToBytecodeOffset(returnAddressOffset, exceptionInfo));
- }
+ for (unsigned i = m_getByIds.size(); i--;)
+ m_getByIds[i].finalize(linkBuffer);
+ for (unsigned i = m_putByIds.size(); i--;)
+ m_putByIds[i].finalize(linkBuffer);
- Vector<CodeOriginAtCallReturnOffset, 0, UnsafeVectorOverflow>& codeOrigins = m_codeBlock->codeOrigins();
- codeOrigins.resize(m_exceptionChecks.size());
-
- for (unsigned i = 0; i < m_exceptionChecks.size(); ++i) {
- CallExceptionRecord& record = m_exceptionChecks[i];
- unsigned returnAddressOffset = linkBuffer.returnAddressOffset(m_exceptionChecks[i].m_call);
- codeOrigins[i].codeOrigin = record.m_codeOrigin;
- codeOrigins[i].callReturnOffset = returnAddressOffset;
- }
-
- m_codeBlock->setNumberOfStructureStubInfos(m_propertyAccesses.size());
- for (unsigned i = 0; i < m_propertyAccesses.size(); ++i) {
- StructureStubInfo& info = m_codeBlock->structureStubInfo(i);
- CodeLocationCall callReturnLocation = linkBuffer.locationOf(m_propertyAccesses[i].m_slowPathGenerator->call());
- info.codeOrigin = m_propertyAccesses[i].m_codeOrigin;
+ for (unsigned i = 0; i < m_ins.size(); ++i) {
+ StructureStubInfo& info = *m_ins[i].m_stubInfo;
+ CodeLocationCall callReturnLocation = linkBuffer.locationOf(m_ins[i].m_slowPathGenerator->call());
+ info.patch.deltaCallToDone = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_ins[i].m_done));
+ info.patch.deltaCallToJump = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_ins[i].m_jump));
info.callReturnLocation = callReturnLocation;
- info.patch.dfg.deltaCheckImmToCall = differenceBetweenCodePtr(linkBuffer.locationOf(m_propertyAccesses[i].m_structureImm), callReturnLocation);
- info.patch.dfg.deltaCallToStructCheck = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_structureCheck));
-#if USE(JSVALUE64)
- info.patch.dfg.deltaCallToLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_loadOrStore));
-#else
- info.patch.dfg.deltaCallToTagLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_tagLoadOrStore));
- info.patch.dfg.deltaCallToPayloadLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_payloadLoadOrStore));
-#endif
- info.patch.dfg.deltaCallToSlowCase = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_slowPathGenerator->label()));
- info.patch.dfg.deltaCallToDone = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_done));
- info.patch.dfg.deltaCallToStorageLoad = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_propertyStorageLoad));
- info.patch.dfg.baseGPR = m_propertyAccesses[i].m_baseGPR;
-#if USE(JSVALUE64)
- info.patch.dfg.valueGPR = m_propertyAccesses[i].m_valueGPR;
-#else
- info.patch.dfg.valueTagGPR = m_propertyAccesses[i].m_valueTagGPR;
- info.patch.dfg.valueGPR = m_propertyAccesses[i].m_valueGPR;
-#endif
- m_propertyAccesses[i].m_usedRegisters.copyInfo(info.patch.dfg.usedRegisters);
- info.patch.dfg.registersFlushed = m_propertyAccesses[i].m_registerMode == PropertyAccessRecord::RegistersFlushed;
+ info.patch.deltaCallToSlowCase = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_ins[i].m_slowPathGenerator->label()));
}
- m_codeBlock->setNumberOfCallLinkInfos(m_jsCalls.size());
for (unsigned i = 0; i < m_jsCalls.size(); ++i) {
- CallLinkInfo& info = m_codeBlock->callLinkInfo(i);
- info.callType = m_jsCalls[i].m_callType;
- info.isDFG = true;
- info.codeOrigin = m_jsCalls[i].m_codeOrigin;
- linkBuffer.link(m_jsCalls[i].m_slowCall, FunctionPtr((m_vm->getCTIStub(info.callType == CallLinkInfo::Construct ? linkConstructThunkGenerator : linkCallThunkGenerator)).code().executableAddress()));
- info.callReturnLocation = linkBuffer.locationOfNearCall(m_jsCalls[i].m_slowCall);
- info.hotPathBegin = linkBuffer.locationOf(m_jsCalls[i].m_targetToCheck);
- info.hotPathOther = linkBuffer.locationOfNearCall(m_jsCalls[i].m_fastCall);
- info.calleeGPR = static_cast<unsigned>(m_jsCalls[i].m_callee);
+ JSCallRecord& record = m_jsCalls[i];
+ CallLinkInfo& info = *record.m_info;
+ linkBuffer.link(record.m_slowCall, FunctionPtr(m_vm->getCTIStub(linkCallThunkGenerator).code().executableAddress()));
+ info.setCallLocations(linkBuffer.locationOfNearCall(record.m_slowCall),
+ linkBuffer.locationOf(record.m_targetToCheck),
+ linkBuffer.locationOfNearCall(record.m_fastCall));
}
MacroAssemblerCodeRef osrExitThunk = vm()->getCTIStub(osrExitGenerationThunkGenerator);
CodeLocationLabel target = CodeLocationLabel(osrExitThunk.code());
- for (unsigned i = 0; i < codeBlock()->numberOfOSRExits(); ++i) {
- OSRExit& exit = codeBlock()->osrExit(i);
+ for (unsigned i = 0; i < m_jitCode->osrExit.size(); ++i) {
+ OSRExit& exit = m_jitCode->osrExit[i];
+ OSRExitCompilationInfo& info = m_exitCompilationInfo[i];
linkBuffer.link(exit.getPatchableCodeOffsetAsJump(), target);
exit.correctJump(linkBuffer);
- if (exit.m_watchpointIndex != std::numeric_limits<unsigned>::max())
- codeBlock()->watchpoint(exit.m_watchpointIndex).correctLabels(linkBuffer);
+ if (info.m_replacementSource.isSet()) {
+ m_jitCode->common.jumpReplacements.append(JumpReplacement(
+ linkBuffer.locationOf(info.m_replacementSource),
+ linkBuffer.locationOf(info.m_replacementDestination)));
+ }
}
- if (m_graph.m_compilation) {
- ASSERT(m_exitSiteLabels.size() == codeBlock()->numberOfOSRExits());
+ if (m_graph.compilation()) {
+ ASSERT(m_exitSiteLabels.size() == m_jitCode->osrExit.size());
for (unsigned i = 0; i < m_exitSiteLabels.size(); ++i) {
Vector<Label>& labels = m_exitSiteLabels[i];
Vector<const void*> addresses;
for (unsigned j = 0; j < labels.size(); ++j)
addresses.append(linkBuffer.locationOf(labels[j]).executableAddress());
- m_graph.m_compilation->addOSRExitSite(addresses);
+ m_graph.compilation()->addOSRExitSite(addresses);
}
} else
ASSERT(!m_exitSiteLabels.size());
+
+ m_jitCode->common.compilation = m_graph.compilation();
- codeBlock()->saveCompilation(m_graph.m_compilation);
+ // Link new DFG exception handlers and remove baseline JIT handlers.
+ m_codeBlock->clearExceptionHandlers();
+ for (unsigned i = 0; i < m_exceptionHandlerOSRExitCallSites.size(); i++) {
+ OSRExitCompilationInfo& info = m_exceptionHandlerOSRExitCallSites[i].exitInfo;
+ if (info.m_replacementDestination.isSet()) {
+ // If this is is *not* set, it means that we already jumped to the OSR exit in pure generated control flow.
+ // i.e, we explicitly emitted an exceptionCheck that we know will be caught in this machine frame.
+ // If this *is set*, it means we will be landing at this code location from genericUnwind from an
+ // exception thrown in a child call frame.
+ CodeLocationLabel catchLabel = linkBuffer.locationOf(info.m_replacementDestination);
+ HandlerInfo newExceptionHandler = m_exceptionHandlerOSRExitCallSites[i].baselineExceptionHandler;
+ CallSiteIndex callSite = m_exceptionHandlerOSRExitCallSites[i].callSiteIndex;
+ newExceptionHandler.start = callSite.bits();
+ newExceptionHandler.end = callSite.bits() + 1;
+ newExceptionHandler.nativeCode = catchLabel;
+ m_codeBlock->appendExceptionHandler(newExceptionHandler);
+ }
+ }
+
+ if (m_pcToCodeOriginMapBuilder.didBuildMapping())
+ m_codeBlock->setPCToCodeOriginMap(std::make_unique<PCToCodeOriginMap>(WTFMove(m_pcToCodeOriginMapBuilder), linkBuffer));
}
-bool JITCompiler::compile(JITCode& entry)
+void JITCompiler::compile()
{
SamplingRegion samplingRegion("DFG Backend");
setStartOfCode();
compileEntry();
- SpeculativeJIT speculative(*this);
- compileBody(speculative);
+ m_speculative = std::make_unique<SpeculativeJIT>(*this);
+
+ // Plant a check that sufficient space is available in the JSStack.
+ addPtr(TrustedImm32(virtualRegisterForLocal(m_graph.requiredRegisterCountForExecutionAndExit() - 1).offset() * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1);
+ Jump stackOverflow = branchPtr(Above, AbsoluteAddress(m_vm->addressOfStackLimit()), GPRInfo::regT1);
+
+ addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister);
+ checkStackPointerAlignment();
+ compileSetupRegistersForEntry();
+ compileEntryExecutionFlag();
+ compileBody();
setEndOfMainPath();
+ // === Footer code generation ===
+ //
+ // Generate the stack overflow handling; if the stack check in the entry head fails,
+ // we need to call out to a helper function to throw the StackOverflowError.
+ stackOverflow.link(this);
+
+ emitStoreCodeOrigin(CodeOrigin(0));
+
+ if (maxFrameExtentForSlowPathCall)
+ addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
+
+ m_speculative->callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock);
+
// Generate slow path code.
- speculative.runSlowPathGenerators();
+ m_speculative->runSlowPathGenerators(m_pcToCodeOriginMapBuilder);
+ m_pcToCodeOriginMapBuilder.appendItem(label(), PCToCodeOriginMapBuilder::defaultCodeOrigin());
compileExceptionHandlers();
linkOSRExits();
// Create OSR entry trampolines if necessary.
- speculative.createOSREntries();
+ m_speculative->createOSREntries();
setEndOfCode();
- LinkBuffer linkBuffer(*m_vm, this, m_codeBlock, JITCompilationCanFail);
- if (linkBuffer.didFailToAllocate())
- return false;
- link(linkBuffer);
- speculative.linkOSREntries(linkBuffer);
- codeBlock()->shrinkToFit(CodeBlock::LateShrink);
+ auto linkBuffer = std::make_unique<LinkBuffer>(*m_vm, *this, m_codeBlock, JITCompilationCanFail);
+ if (linkBuffer->didFailToAllocate()) {
+ m_graph.m_plan.finalizer = std::make_unique<FailedFinalizer>(m_graph.m_plan);
+ return;
+ }
+
+ link(*linkBuffer);
+ m_speculative->linkOSREntries(*linkBuffer);
- if (shouldShowDisassembly())
- m_disassembler->dump(linkBuffer);
- if (m_graph.m_compilation)
- m_disassembler->reportToProfiler(m_graph.m_compilation.get(), linkBuffer);
+ m_jitCode->shrinkToFit();
+ codeBlock()->shrinkToFit(CodeBlock::LateShrink);
- entry = JITCode(
- linkBuffer.finalizeCodeWithoutDisassembly(),
- JITCode::DFGJIT);
- return true;
+ disassemble(*linkBuffer);
+
+ m_graph.m_plan.finalizer = std::make_unique<JITFinalizer>(
+ m_graph.m_plan, m_jitCode.release(), WTFMove(linkBuffer));
}
-bool JITCompiler::compileFunction(JITCode& entry, MacroAssemblerCodePtr& entryWithArityCheck)
+void JITCompiler::compileFunction()
{
SamplingRegion samplingRegion("DFG Backend");
@@ -289,86 +393,239 @@ bool JITCompiler::compileFunction(JITCode& entry, MacroAssemblerCodePtr& entryWi
// so enter after this.
Label fromArityCheck(this);
// Plant a check that sufficient space is available in the JSStack.
- // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56291
- addPtr(TrustedImm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1);
- Jump stackCheck = branchPtr(Below, AbsoluteAddress(m_vm->interpreter->stack().addressOfEnd()), GPRInfo::regT1);
- // Return here after stack check.
- Label fromStackCheck = label();
+ addPtr(TrustedImm32(virtualRegisterForLocal(m_graph.requiredRegisterCountForExecutionAndExit() - 1).offset() * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1);
+ Jump stackOverflow = branchPtr(Above, AbsoluteAddress(m_vm->addressOfStackLimit()), GPRInfo::regT1);
+ // Move the stack pointer down to accommodate locals
+ addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister);
+ checkStackPointerAlignment();
+
+ compileSetupRegistersForEntry();
+ compileEntryExecutionFlag();
// === Function body code generation ===
- SpeculativeJIT speculative(*this);
- compileBody(speculative);
+ m_speculative = std::make_unique<SpeculativeJIT>(*this);
+ compileBody();
setEndOfMainPath();
// === Function footer code generation ===
//
- // Generate code to perform the slow stack check (if the fast one in
+ // Generate code to perform the stack overflow handling (if the stack check in
// the function header fails), and generate the entry point with arity check.
//
- // Generate the stack check; if the fast check in the function head fails,
- // we need to call out to a helper function to check whether more space is available.
- // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions).
- stackCheck.link(this);
- move(stackPointerRegister, GPRInfo::argumentGPR0);
- poke(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
-
- CallBeginToken token;
- beginCall(CodeOrigin(0), token);
- Call callStackCheck = call();
- notifyCall(callStackCheck, CodeOrigin(0), token);
- jump(fromStackCheck);
+ // Generate the stack overflow handling; if the stack check in the function head fails,
+ // we need to call out to a helper function to throw the StackOverflowError.
+ stackOverflow.link(this);
+
+ emitStoreCodeOrigin(CodeOrigin(0));
+
+ if (maxFrameExtentForSlowPathCall)
+ addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
+
+ m_speculative->callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock);
// The fast entry point into a function does not check the correct number of arguments
// have been passed to the call (we only use the fast entry point where we can statically
// determine the correct number of arguments have been passed, or have already checked).
// In cases where an arity check is necessary, we enter here.
// FIXME: change this from a cti call to a DFG style operation (normal C calling conventions).
- Label arityCheck = label();
+ m_arityCheck = label();
compileEntry();
load32(AssemblyHelpers::payloadFor((VirtualRegister)JSStack::ArgumentCount), GPRInfo::regT1);
branch32(AboveOrEqual, GPRInfo::regT1, TrustedImm32(m_codeBlock->numParameters())).linkTo(fromArityCheck, this);
- move(stackPointerRegister, GPRInfo::argumentGPR0);
- poke(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
- beginCall(CodeOrigin(0), token);
- Call callArityCheck = call();
- notifyCall(callArityCheck, CodeOrigin(0), token);
- move(GPRInfo::regT0, GPRInfo::callFrameRegister);
+ emitStoreCodeOrigin(CodeOrigin(0));
+ if (maxFrameExtentForSlowPathCall)
+ addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
+ m_speculative->callOperationWithCallFrameRollbackOnException(m_codeBlock->m_isConstructor ? operationConstructArityCheck : operationCallArityCheck, GPRInfo::regT0);
+ if (maxFrameExtentForSlowPathCall)
+ addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister);
+ branchTest32(Zero, GPRInfo::returnValueGPR).linkTo(fromArityCheck, this);
+ emitStoreCodeOrigin(CodeOrigin(0));
+ move(GPRInfo::returnValueGPR, GPRInfo::argumentGPR0);
+ m_callArityFixup = call();
jump(fromArityCheck);
// Generate slow path code.
- speculative.runSlowPathGenerators();
+ m_speculative->runSlowPathGenerators(m_pcToCodeOriginMapBuilder);
+ m_pcToCodeOriginMapBuilder.appendItem(label(), PCToCodeOriginMapBuilder::defaultCodeOrigin());
compileExceptionHandlers();
linkOSRExits();
// Create OSR entry trampolines if necessary.
- speculative.createOSREntries();
+ m_speculative->createOSREntries();
setEndOfCode();
// === Link ===
- LinkBuffer linkBuffer(*m_vm, this, m_codeBlock, JITCompilationCanFail);
- if (linkBuffer.didFailToAllocate())
- return false;
- link(linkBuffer);
- speculative.linkOSREntries(linkBuffer);
+ auto linkBuffer = std::make_unique<LinkBuffer>(*m_vm, *this, m_codeBlock, JITCompilationCanFail);
+ if (linkBuffer->didFailToAllocate()) {
+ m_graph.m_plan.finalizer = std::make_unique<FailedFinalizer>(m_graph.m_plan);
+ return;
+ }
+ link(*linkBuffer);
+ m_speculative->linkOSREntries(*linkBuffer);
+
+ m_jitCode->shrinkToFit();
codeBlock()->shrinkToFit(CodeBlock::LateShrink);
- // FIXME: switch the stack check & arity check over to DFGOpertaion style calls, not JIT stubs.
- linkBuffer.link(callStackCheck, cti_stack_check);
- linkBuffer.link(callArityCheck, m_codeBlock->m_isConstructor ? cti_op_construct_arityCheck : cti_op_call_arityCheck);
+ linkBuffer->link(m_callArityFixup, FunctionPtr((m_vm->getCTIStub(arityFixupGenerator)).code().executableAddress()));
- if (shouldShowDisassembly())
+ disassemble(*linkBuffer);
+
+ MacroAssemblerCodePtr withArityCheck = linkBuffer->locationOf(m_arityCheck);
+
+ m_graph.m_plan.finalizer = std::make_unique<JITFinalizer>(
+ m_graph.m_plan, m_jitCode.release(), WTFMove(linkBuffer), withArityCheck);
+}
+
+void JITCompiler::disassemble(LinkBuffer& linkBuffer)
+{
+ if (shouldDumpDisassembly()) {
m_disassembler->dump(linkBuffer);
- if (m_graph.m_compilation)
- m_disassembler->reportToProfiler(m_graph.m_compilation.get(), linkBuffer);
-
- entryWithArityCheck = linkBuffer.locationOf(arityCheck);
- entry = JITCode(
- linkBuffer.finalizeCodeWithoutDisassembly(),
- JITCode::DFGJIT);
- return true;
+ linkBuffer.didAlreadyDisassemble();
+ }
+
+ if (m_graph.m_plan.compilation)
+ m_disassembler->reportToProfiler(m_graph.m_plan.compilation.get(), linkBuffer);
+}
+
+#if USE(JSVALUE32_64)
+void* JITCompiler::addressOfDoubleConstant(Node* node)
+{
+ double value = node->asNumber();
+ int64_t valueBits = bitwise_cast<int64_t>(value);
+ auto it = m_graph.m_doubleConstantsMap.find(valueBits);
+ if (it != m_graph.m_doubleConstantsMap.end())
+ return it->second;
+
+ if (!m_graph.m_doubleConstants)
+ m_graph.m_doubleConstants = std::make_unique<Bag<double>>();
+
+ double* addressInConstantPool = m_graph.m_doubleConstants->add();
+ *addressInConstantPool = value;
+ m_graph.m_doubleConstantsMap[valueBits] = addressInConstantPool;
+ return addressInConstantPool;
+}
+#endif
+
+void JITCompiler::noticeOSREntry(BasicBlock& basicBlock, JITCompiler::Label blockHead, LinkBuffer& linkBuffer)
+{
+ // OSR entry is not allowed into blocks deemed unreachable by control flow analysis.
+ if (!basicBlock.intersectionOfCFAHasVisited)
+ return;
+
+ OSREntryData* entry = m_jitCode->appendOSREntryData(basicBlock.bytecodeBegin, linkBuffer.offsetOf(blockHead));
+
+ entry->m_expectedValues = basicBlock.intersectionOfPastValuesAtHead;
+
+ // Fix the expected values: in our protocol, a dead variable will have an expected
+ // value of (None, []). But the old JIT may stash some values there. So we really
+ // need (Top, TOP).
+ for (size_t argument = 0; argument < basicBlock.variablesAtHead.numberOfArguments(); ++argument) {
+ Node* node = basicBlock.variablesAtHead.argument(argument);
+ if (!node || !node->shouldGenerate())
+ entry->m_expectedValues.argument(argument).makeHeapTop();
+ }
+ for (size_t local = 0; local < basicBlock.variablesAtHead.numberOfLocals(); ++local) {
+ Node* node = basicBlock.variablesAtHead.local(local);
+ if (!node || !node->shouldGenerate())
+ entry->m_expectedValues.local(local).makeHeapTop();
+ else {
+ VariableAccessData* variable = node->variableAccessData();
+ entry->m_machineStackUsed.set(variable->machineLocal().toLocal());
+
+ switch (variable->flushFormat()) {
+ case FlushedDouble:
+ entry->m_localsForcedDouble.set(local);
+ break;
+ case FlushedInt52:
+ entry->m_localsForcedMachineInt.set(local);
+ break;
+ default:
+ break;
+ }
+
+ if (variable->local() != variable->machineLocal()) {
+ entry->m_reshufflings.append(
+ OSREntryReshuffling(
+ variable->local().offset(), variable->machineLocal().offset()));
+ }
+ }
+ }
+
+ entry->m_reshufflings.shrinkToFit();
+}
+
+void JITCompiler::appendExceptionHandlingOSRExit(ExitKind kind, unsigned eventStreamIndex, CodeOrigin opCatchOrigin, HandlerInfo* exceptionHandler, CallSiteIndex callSite, MacroAssembler::JumpList jumpsToFail)
+{
+ OSRExit exit(kind, JSValueRegs(), graph().methodOfGettingAValueProfileFor(nullptr), m_speculative.get(), eventStreamIndex);
+ exit.m_codeOrigin = opCatchOrigin;
+ exit.m_exceptionHandlerCallSiteIndex = callSite;
+ OSRExitCompilationInfo& exitInfo = appendExitInfo(jumpsToFail);
+ jitCode()->appendOSRExit(exit);
+ m_exceptionHandlerOSRExitCallSites.append(ExceptionHandlingOSRExitInfo { exitInfo, *exceptionHandler, callSite });
+}
+
+void JITCompiler::exceptionCheck()
+{
+ // It's important that we use origin.forExit here. Consider if we hoist string
+ // addition outside a loop, and that we exit at the point of that concatenation
+ // from an out of memory exception.
+ // If the original loop had a try/catch around string concatenation, if we "catch"
+ // that exception inside the loop, then the loops induction variable will be undefined
+ // in the OSR exit value recovery. It's more defensible for the string concatenation,
+ // then, to not be caught by the for loops' try/catch.
+ // Here is the program I'm speaking about:
+ //
+ // >>>> lets presume "c = a + b" gets hoisted here.
+ // for (var i = 0; i < length; i++) {
+ // try {
+ // c = a + b
+ // } catch(e) {
+ // If we threw an out of memory error, and we cought the exception
+ // right here, then "i" would almost certainly be undefined, which
+ // would make no sense.
+ // ...
+ // }
+ // }
+ CodeOrigin opCatchOrigin;
+ HandlerInfo* exceptionHandler;
+ bool willCatchException = m_graph.willCatchExceptionInMachineFrame(m_speculative->m_currentNode->origin.forExit, opCatchOrigin, exceptionHandler);
+ if (willCatchException) {
+ unsigned streamIndex = m_speculative->m_outOfLineStreamIndex != UINT_MAX ? m_speculative->m_outOfLineStreamIndex : m_speculative->m_stream->size();
+ MacroAssembler::Jump hadException = emitNonPatchableExceptionCheck();
+ // We assume here that this is called after callOpeartion()/appendCall() is called.
+ appendExceptionHandlingOSRExit(ExceptionCheck, streamIndex, opCatchOrigin, exceptionHandler, m_jitCode->common.lastCallSite(), hadException);
+ } else
+ m_exceptionChecks.append(emitExceptionCheck());
+}
+
+CallSiteIndex JITCompiler::recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(const CodeOrigin& callSiteCodeOrigin, unsigned eventStreamIndex)
+{
+ CodeOrigin opCatchOrigin;
+ HandlerInfo* exceptionHandler;
+ bool willCatchException = m_graph.willCatchExceptionInMachineFrame(callSiteCodeOrigin, opCatchOrigin, exceptionHandler);
+ CallSiteIndex callSite = addCallSite(callSiteCodeOrigin);
+ if (willCatchException)
+ appendExceptionHandlingOSRExit(GenericUnwind, eventStreamIndex, opCatchOrigin, exceptionHandler, callSite);
+ return callSite;
+}
+
+void JITCompiler::setEndOfMainPath()
+{
+ m_pcToCodeOriginMapBuilder.appendItem(labelIgnoringWatchpoints(), m_speculative->m_origin.semantic);
+ if (LIKELY(!m_disassembler))
+ return;
+ m_disassembler->setEndOfMainPath(labelIgnoringWatchpoints());
+}
+
+void JITCompiler::setEndOfCode()
+{
+ m_pcToCodeOriginMapBuilder.appendItem(labelIgnoringWatchpoints(), PCToCodeOriginMapBuilder::defaultCodeOrigin());
+ if (LIKELY(!m_disassembler))
+ return;
+ m_disassembler->setEndOfCode(labelIgnoringWatchpoints());
}
} } // namespace JSC::DFG