diff options
Diffstat (limited to 'Source/JavaScriptCore/dfg/DFGJITCompiler.cpp')
| -rw-r--r-- | Source/JavaScriptCore/dfg/DFGJITCompiler.cpp | 261 |
1 files changed, 76 insertions, 185 deletions
diff --git a/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp b/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp index b58d67e1f..2934d2ba9 100644 --- a/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp +++ b/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved. + * Copyright (C) 2011, 2013 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -28,7 +28,6 @@ #if ENABLE(DFG_JIT) -#include "ArityCheckFailReturnThunks.h" #include "CodeBlock.h" #include "DFGFailedFinalizer.h" #include "DFGInlineCacheWrapperInlines.h" @@ -41,10 +40,8 @@ #include "DFGSpeculativeJIT.h" #include "DFGThunks.h" #include "JSCJSValueInlines.h" -#include "LinkBuffer.h" -#include "MaxFrameExtentForSlowPathCall.h" -#include "JSCInlines.h" #include "VM.h" +#include "LinkBuffer.h" namespace JSC { namespace DFG { @@ -55,7 +52,7 @@ JITCompiler::JITCompiler(Graph& dfg) , m_blockHeads(dfg.numBlocks()) { if (shouldShowDisassembly() || m_graph.m_vm.m_perBytecodeProfiler) - m_disassembler = std::make_unique<Disassembler>(dfg); + m_disassembler = adoptPtr(new Disassembler(dfg)); } JITCompiler::~JITCompiler() @@ -95,14 +92,15 @@ void JITCompiler::linkOSRExits() void JITCompiler::compileEntry() { // This code currently matches the old JIT. In the function header we need to - // save return address and call frame via the prologue and perform a fast stack check. + // pop the return address (since we do not allow any recursion on the machine + // stack), and perform a fast stack check. // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56292 // We'll need to convert the remaining cti_ style calls (specifically the stack // check) which will be dependent on stack layout. (We'd need to account for this in // both normal return code and when jumping to an exception handler). - emitFunctionPrologue(); + preserveReturnAddressAfterCall(GPRInfo::regT2); + emitPutReturnPCToCallFrameHeader(GPRInfo::regT2); emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock); - jitAssertTagsInPlace(); } void JITCompiler::compileBody() @@ -116,40 +114,32 @@ void JITCompiler::compileBody() void JITCompiler::compileExceptionHandlers() { - if (!m_exceptionChecksWithCallFrameRollback.empty()) { - m_exceptionChecksWithCallFrameRollback.link(this); - - // lookupExceptionHandlerFromCallerFrame is passed two arguments, the VM and the exec (the CallFrame*). - move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0); - move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1); - addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister); + if (m_exceptionChecks.empty() && m_exceptionChecksWithCallFrameRollback.empty()) + return; -#if CPU(X86) - // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer! - poke(GPRInfo::argumentGPR0); - poke(GPRInfo::argumentGPR1, 1); -#endif - m_calls.append(CallLinkRecord(call(), lookupExceptionHandlerFromCallerFrame)); + Jump doLookup; - jumpToExceptionHandler(); + if (!m_exceptionChecksWithCallFrameRollback.empty()) { + m_exceptionChecksWithCallFrameRollback.link(this); + emitGetCallerFrameFromCallFrameHeaderPtr(GPRInfo::argumentGPR0); + doLookup = jump(); } - if (!m_exceptionChecks.empty()) { + if (!m_exceptionChecks.empty()) m_exceptionChecks.link(this); - // lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*). - move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0); - move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1); + // lookupExceptionHandler is passed one argument, the exec (the CallFrame*). + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + + if (doLookup.isSet()) + doLookup.link(this); #if CPU(X86) - // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer! - poke(GPRInfo::argumentGPR0); - poke(GPRInfo::argumentGPR1, 1); + // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer! + poke(GPRInfo::argumentGPR0); #endif - m_calls.append(CallLinkRecord(call(), lookupExceptionHandler)); - - jumpToExceptionHandler(); - } + m_calls.append(CallLinkRecord(call(), lookupExceptionHandler)); + jumpToExceptionHandler(); } void JITCompiler::link(LinkBuffer& linkBuffer) @@ -158,18 +148,15 @@ void JITCompiler::link(LinkBuffer& linkBuffer) m_jitCode->common.frameRegisterCount = m_graph.frameRegisterCount(); m_jitCode->common.requiredRegisterCountForExit = m_graph.requiredRegisterCountForExit(); - if (!m_graph.m_plan.inlineCallFrames->isEmpty()) - m_jitCode->common.inlineCallFrames = m_graph.m_plan.inlineCallFrames; - -#if USE(JSVALUE32_64) - m_jitCode->common.doubleConstants = WTF::move(m_graph.m_doubleConstants); -#endif + if (!m_graph.m_inlineCallFrames->isEmpty()) + m_jitCode->common.inlineCallFrames = m_graph.m_inlineCallFrames.release(); - m_graph.registerFrozenValues(); + m_jitCode->common.machineCaptureStart = m_graph.m_machineCaptureStart; + m_jitCode->common.slowArguments = std::move(m_graph.m_slowArguments); BitVector usedJumpTables; - for (Bag<SwitchData>::iterator iter = m_graph.m_switchData.begin(); !!iter; ++iter) { - SwitchData& data = **iter; + for (unsigned i = m_graph.m_switchData.size(); i--;) { + SwitchData& data = m_graph.m_switchData[i]; if (!data.didUseJumpTable) continue; @@ -180,14 +167,14 @@ void JITCompiler::link(LinkBuffer& linkBuffer) usedJumpTables.set(data.switchTableIndex); SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex); - table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough.block->index]); + table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough->index]); table.ctiOffsets.grow(table.branchOffsets.size()); for (unsigned j = table.ctiOffsets.size(); j--;) table.ctiOffsets[j] = table.ctiDefault; for (unsigned j = data.cases.size(); j--;) { SwitchCase& myCase = data.cases[j]; - table.ctiOffsets[myCase.value.switchLookupValue(data.kind) - table.min] = - linkBuffer.locationOf(m_blockHeads[myCase.target.block->index]); + table.ctiOffsets[myCase.value.switchLookupValue() - table.min] = + linkBuffer.locationOf(m_blockHeads[myCase.target->index]); } } @@ -201,8 +188,8 @@ void JITCompiler::link(LinkBuffer& linkBuffer) // NOTE: we cannot clear string switch tables because (1) we're running concurrently // and we cannot deref StringImpl's and (2) it would be weird to deref those // StringImpl's since we refer to them. - for (Bag<SwitchData>::iterator switchDataIter = m_graph.m_switchData.begin(); !!switchDataIter; ++switchDataIter) { - SwitchData& data = **switchDataIter; + for (unsigned i = m_graph.m_switchData.size(); i--;) { + SwitchData& data = m_graph.m_switchData[i]; if (!data.didUseJumpTable) continue; @@ -210,7 +197,7 @@ void JITCompiler::link(LinkBuffer& linkBuffer) continue; StringJumpTable& table = m_codeBlock->stringSwitchJumpTable(data.switchTableIndex); - table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough.block->index]); + table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough->index]); StringJumpTable::StringOffsetTable::iterator iter; StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end(); for (iter = table.offsetTable.begin(); iter != end; ++iter) @@ -219,7 +206,7 @@ void JITCompiler::link(LinkBuffer& linkBuffer) SwitchCase& myCase = data.cases[j]; iter = table.offsetTable.find(myCase.value.stringImpl()); RELEASE_ASSERT(iter != end); - iter->value.ctiOffset = linkBuffer.locationOf(m_blockHeads[myCase.target.block->index]); + iter->value.ctiOffset = linkBuffer.locationOf(m_blockHeads[myCase.target->index]); } } @@ -241,13 +228,17 @@ void JITCompiler::link(LinkBuffer& linkBuffer) info.patch.deltaCallToSlowCase = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_ins[i].m_slowPathGenerator->label())); } + m_codeBlock->setNumberOfCallLinkInfos(m_jsCalls.size()); for (unsigned i = 0; i < m_jsCalls.size(); ++i) { - JSCallRecord& record = m_jsCalls[i]; - CallLinkInfo& info = *record.m_info; - linkBuffer.link(record.m_slowCall, FunctionPtr(m_vm->getCTIStub(linkCallThunkGenerator).code().executableAddress())); - info.setCallLocations(linkBuffer.locationOfNearCall(record.m_slowCall), - linkBuffer.locationOf(record.m_targetToCheck), - linkBuffer.locationOfNearCall(record.m_fastCall)); + CallLinkInfo& info = m_codeBlock->callLinkInfo(i); + info.callType = m_jsCalls[i].m_callType; + info.isDFG = true; + info.codeOrigin = m_jsCalls[i].m_codeOrigin; + linkBuffer.link(m_jsCalls[i].m_slowCall, FunctionPtr((m_vm->getCTIStub(info.callType == CallLinkInfo::Construct ? linkConstructThunkGenerator : linkCallThunkGenerator)).code().executableAddress())); + info.callReturnLocation = linkBuffer.locationOfNearCall(m_jsCalls[i].m_slowCall); + info.hotPathBegin = linkBuffer.locationOf(m_jsCalls[i].m_targetToCheck); + info.hotPathOther = linkBuffer.locationOfNearCall(m_jsCalls[i].m_fastCall); + info.calleeGPR = static_cast<unsigned>(m_jsCalls[i].m_callee); } MacroAssemblerCodeRef osrExitThunk = vm()->getCTIStub(osrExitGenerationThunkGenerator); @@ -286,30 +277,10 @@ void JITCompiler::compile() setStartOfCode(); compileEntry(); - m_speculative = std::make_unique<SpeculativeJIT>(*this); - - // Plant a check that sufficient space is available in the JSStack. - addPtr(TrustedImm32(virtualRegisterForLocal(m_graph.requiredRegisterCountForExecutionAndExit() - 1).offset() * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1); - Jump stackOverflow = branchPtr(Above, AbsoluteAddress(m_vm->addressOfStackLimit()), GPRInfo::regT1); - - addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister); - checkStackPointerAlignment(); + m_speculative = adoptPtr(new SpeculativeJIT(*this)); compileBody(); setEndOfMainPath(); - // === Footer code generation === - // - // Generate the stack overflow handling; if the stack check in the entry head fails, - // we need to call out to a helper function to throw the StackOverflowError. - stackOverflow.link(this); - - emitStoreCodeOrigin(CodeOrigin(0)); - - if (maxFrameExtentForSlowPathCall) - addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister); - - m_speculative->callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock); - // Generate slow path code. m_speculative->runSlowPathGenerators(); @@ -319,10 +290,13 @@ void JITCompiler::compile() // Create OSR entry trampolines if necessary. m_speculative->createOSREntries(); setEndOfCode(); +} - auto linkBuffer = std::make_unique<LinkBuffer>(*m_vm, *this, m_codeBlock, JITCompilationCanFail); +void JITCompiler::link() +{ + OwnPtr<LinkBuffer> linkBuffer = adoptPtr(new LinkBuffer(*m_vm, this, m_codeBlock, JITCompilationCanFail)); if (linkBuffer->didFailToAllocate()) { - m_graph.m_plan.finalizer = std::make_unique<FailedFinalizer>(m_graph.m_plan); + m_graph.m_plan.finalizer = adoptPtr(new FailedFinalizer(m_graph.m_plan)); return; } @@ -334,8 +308,8 @@ void JITCompiler::compile() disassemble(*linkBuffer); - m_graph.m_plan.finalizer = std::make_unique<JITFinalizer>( - m_graph.m_plan, m_jitCode.release(), WTF::move(linkBuffer)); + m_graph.m_plan.finalizer = adoptPtr(new JITFinalizer( + m_graph.m_plan, m_jitCode.release(), linkBuffer.release())); } void JITCompiler::compileFunction() @@ -351,33 +325,30 @@ void JITCompiler::compileFunction() // so enter after this. Label fromArityCheck(this); // Plant a check that sufficient space is available in the JSStack. - addPtr(TrustedImm32(virtualRegisterForLocal(m_graph.requiredRegisterCountForExecutionAndExit() - 1).offset() * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1); - Jump stackOverflow = branchPtr(Above, AbsoluteAddress(m_vm->addressOfStackLimit()), GPRInfo::regT1); + addPtr(TrustedImm32(virtualRegisterForLocal(m_graph.requiredRegisterCountForExecutionAndExit()).offset() * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1); + Jump stackCheck = branchPtr(Above, AbsoluteAddress(m_vm->addressOfJSStackLimit()), GPRInfo::regT1); + // Return here after stack check. + Label fromStackCheck = label(); - // Move the stack pointer down to accommodate locals - addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister); - checkStackPointerAlignment(); // === Function body code generation === - m_speculative = std::make_unique<SpeculativeJIT>(*this); + m_speculative = adoptPtr(new SpeculativeJIT(*this)); compileBody(); setEndOfMainPath(); // === Function footer code generation === // - // Generate code to perform the stack overflow handling (if the stack check in + // Generate code to perform the slow stack check (if the fast one in // the function header fails), and generate the entry point with arity check. // - // Generate the stack overflow handling; if the stack check in the function head fails, - // we need to call out to a helper function to throw the StackOverflowError. - stackOverflow.link(this); + // Generate the stack check; if the fast check in the function head fails, + // we need to call out to a helper function to check whether more space is available. + // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions). + stackCheck.link(this); emitStoreCodeOrigin(CodeOrigin(0)); - - if (maxFrameExtentForSlowPathCall) - addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister); - - m_speculative->callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock); + m_speculative->callOperationWithCallFrameRollbackOnException(operationStackCheck, m_codeBlock); + jump(fromStackCheck); // The fast entry point into a function does not check the correct number of arguments // have been passed to the call (we only use the fast entry point where we can statically @@ -390,23 +361,9 @@ void JITCompiler::compileFunction() load32(AssemblyHelpers::payloadFor((VirtualRegister)JSStack::ArgumentCount), GPRInfo::regT1); branch32(AboveOrEqual, GPRInfo::regT1, TrustedImm32(m_codeBlock->numParameters())).linkTo(fromArityCheck, this); emitStoreCodeOrigin(CodeOrigin(0)); - if (maxFrameExtentForSlowPathCall) - addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister); m_speculative->callOperationWithCallFrameRollbackOnException(m_codeBlock->m_isConstructor ? operationConstructArityCheck : operationCallArityCheck, GPRInfo::regT0); - if (maxFrameExtentForSlowPathCall) - addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister); branchTest32(Zero, GPRInfo::regT0).linkTo(fromArityCheck, this); emitStoreCodeOrigin(CodeOrigin(0)); - GPRReg thunkReg; -#if USE(JSVALUE64) - thunkReg = GPRInfo::regT7; -#else - thunkReg = GPRInfo::regT5; -#endif - CodeLocationLabel* arityThunkLabels = - m_vm->arityCheckFailReturnThunks->returnPCsFor(*m_vm, m_codeBlock->numParameters()); - move(TrustedImmPtr(arityThunkLabels), thunkReg); - loadPtr(BaseIndex(thunkReg, GPRInfo::regT0, timesPtr()), thunkReg); m_callArityFixup = call(); jump(fromArityCheck); @@ -419,11 +376,14 @@ void JITCompiler::compileFunction() // Create OSR entry trampolines if necessary. m_speculative->createOSREntries(); setEndOfCode(); +} +void JITCompiler::linkFunction() +{ // === Link === - auto linkBuffer = std::make_unique<LinkBuffer>(*m_vm, *this, m_codeBlock, JITCompilationCanFail); + OwnPtr<LinkBuffer> linkBuffer = adoptPtr(new LinkBuffer(*m_vm, this, m_codeBlock, JITCompilationCanFail)); if (linkBuffer->didFailToAllocate()) { - m_graph.m_plan.finalizer = std::make_unique<FailedFinalizer>(m_graph.m_plan); + m_graph.m_plan.finalizer = adoptPtr(new FailedFinalizer(m_graph.m_plan)); return; } link(*linkBuffer); @@ -432,94 +392,25 @@ void JITCompiler::compileFunction() m_jitCode->shrinkToFit(); codeBlock()->shrinkToFit(CodeBlock::LateShrink); - linkBuffer->link(m_callArityFixup, FunctionPtr((m_vm->getCTIStub(arityFixupGenerator)).code().executableAddress())); + linkBuffer->link(m_callArityFixup, FunctionPtr((m_vm->getCTIStub(arityFixup)).code().executableAddress())); disassemble(*linkBuffer); MacroAssemblerCodePtr withArityCheck = linkBuffer->locationOf(m_arityCheck); - m_graph.m_plan.finalizer = std::make_unique<JITFinalizer>( - m_graph.m_plan, m_jitCode.release(), WTF::move(linkBuffer), withArityCheck); + m_graph.m_plan.finalizer = adoptPtr(new JITFinalizer( + m_graph.m_plan, m_jitCode.release(), linkBuffer.release(), withArityCheck)); } void JITCompiler::disassemble(LinkBuffer& linkBuffer) { - if (shouldShowDisassembly()) { + if (shouldShowDisassembly()) m_disassembler->dump(linkBuffer); - linkBuffer.didAlreadyDisassemble(); - } if (m_graph.m_plan.compilation) m_disassembler->reportToProfiler(m_graph.m_plan.compilation.get(), linkBuffer); } -#if USE(JSVALUE32_64) -void* JITCompiler::addressOfDoubleConstant(Node* node) -{ - double value = node->asNumber(); - int64_t valueBits = bitwise_cast<int64_t>(value); - auto it = m_graph.m_doubleConstantsMap.find(valueBits); - if (it != m_graph.m_doubleConstantsMap.end()) - return it->second; - - if (!m_graph.m_doubleConstants) - m_graph.m_doubleConstants = std::make_unique<Bag<double>>(); - - double* addressInConstantPool = m_graph.m_doubleConstants->add(); - *addressInConstantPool = value; - m_graph.m_doubleConstantsMap[valueBits] = addressInConstantPool; - return addressInConstantPool; -} -#endif - -void JITCompiler::noticeOSREntry(BasicBlock& basicBlock, JITCompiler::Label blockHead, LinkBuffer& linkBuffer) -{ - // OSR entry is not allowed into blocks deemed unreachable by control flow analysis. - if (!basicBlock.intersectionOfCFAHasVisited) - return; - - OSREntryData* entry = m_jitCode->appendOSREntryData(basicBlock.bytecodeBegin, linkBuffer.offsetOf(blockHead)); - - entry->m_expectedValues = basicBlock.intersectionOfPastValuesAtHead; - - // Fix the expected values: in our protocol, a dead variable will have an expected - // value of (None, []). But the old JIT may stash some values there. So we really - // need (Top, TOP). - for (size_t argument = 0; argument < basicBlock.variablesAtHead.numberOfArguments(); ++argument) { - Node* node = basicBlock.variablesAtHead.argument(argument); - if (!node || !node->shouldGenerate()) - entry->m_expectedValues.argument(argument).makeHeapTop(); - } - for (size_t local = 0; local < basicBlock.variablesAtHead.numberOfLocals(); ++local) { - Node* node = basicBlock.variablesAtHead.local(local); - if (!node || !node->shouldGenerate()) - entry->m_expectedValues.local(local).makeHeapTop(); - else { - VariableAccessData* variable = node->variableAccessData(); - entry->m_machineStackUsed.set(variable->machineLocal().toLocal()); - - switch (variable->flushFormat()) { - case FlushedDouble: - entry->m_localsForcedDouble.set(local); - break; - case FlushedInt52: - entry->m_localsForcedMachineInt.set(local); - break; - default: - break; - } - - if (variable->local() != variable->machineLocal()) { - entry->m_reshufflings.append( - OSREntryReshuffling( - variable->local().offset(), variable->machineLocal().offset())); - } - } - } - - entry->m_reshufflings.shrinkToFit(); -} - } } // namespace JSC::DFG #endif // ENABLE(DFG_JIT) |
