diff options
Diffstat (limited to 'Source/JavaScriptCore/jit/JIT.cpp')
-rw-r--r-- | Source/JavaScriptCore/jit/JIT.cpp | 353 |
1 files changed, 172 insertions, 181 deletions
diff --git a/Source/JavaScriptCore/jit/JIT.cpp b/Source/JavaScriptCore/jit/JIT.cpp index 038f5d7db..c3508b01d 100644 --- a/Source/JavaScriptCore/jit/JIT.cpp +++ b/Source/JavaScriptCore/jit/JIT.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008, 2009, 2012-2015 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2009, 2012, 2013 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,12 +26,15 @@ #include "config.h" #if ENABLE(JIT) - #include "JIT.h" -#include "ArityCheckFailReturnThunks.h" +// This probably does not belong here; adding here for now as a quick Windows build fix. +#if ENABLE(ASSEMBLER) && CPU(X86) && !OS(MAC_OS_X) +#include "MacroAssembler.h" +JSC::MacroAssemblerX86Common::SSE2CheckState JSC::MacroAssemblerX86Common::s_sse2CheckState = NotCheckedSSE2; +#endif + #include "CodeBlock.h" -#include "CodeBlockWithJITType.h" #include "DFGCapabilities.h" #include "Interpreter.h" #include "JITInlines.h" @@ -39,15 +42,11 @@ #include "JSArray.h" #include "JSFunction.h" #include "LinkBuffer.h" -#include "MaxFrameExtentForSlowPathCall.h" -#include "JSCInlines.h" -#include "ProfilerDatabase.h" +#include "Operations.h" #include "RepatchBuffer.h" #include "ResultType.h" #include "SamplingTool.h" #include "SlowPathCall.h" -#include "StackAlignment.h" -#include "TypeProfilerLog.h" #include <wtf/CryptographicallyRandomNumber.h> using namespace std; @@ -76,7 +75,7 @@ JIT::JIT(VM* vm, CodeBlock* codeBlock) : JSInterfaceJIT(vm, codeBlock) , m_interpreter(vm->interpreter) , m_labels(codeBlock ? codeBlock->numberOfInstructions() : 0) - , m_bytecodeOffset(std::numeric_limits<unsigned>::max()) + , m_bytecodeOffset((unsigned)-1) , m_getByIdIndex(UINT_MAX) , m_putByIdIndex(UINT_MAX) , m_byValInstructionIndex(UINT_MAX) @@ -99,31 +98,11 @@ void JIT::emitEnterOptimizationCheck() ASSERT(!m_bytecodeOffset); callOperation(operationOptimize, m_bytecodeOffset); skipOptimize.append(branchTestPtr(Zero, returnValueGPR)); - move(returnValueGPR2, stackPointerRegister); jump(returnValueGPR); skipOptimize.link(this); } #endif -void JIT::emitNotifyWrite(WatchpointSet* set) -{ - if (!set || set->state() == IsInvalidated) - return; - - addSlowCase(branch8(NotEqual, AbsoluteAddress(set->addressOfState()), TrustedImm32(IsInvalidated))); -} - -void JIT::assertStackPointerOffset() -{ - if (ASSERT_DISABLED) - return; - - addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, regT0); - Jump ok = branchPtr(Equal, regT0, stackPointerRegister); - breakpoint(); - ok.link(this); -} - #define NEXT_OPCODE(name) \ m_bytecodeOffset += OPCODE_LENGTH(name); \ break; @@ -149,9 +128,6 @@ void JIT::assertStackPointerOffset() void JIT::privateCompileMainPass() { - jitAssertTagsInPlace(); - jitAssertArgumentCountSane(); - Instruction* instructionsBegin = m_codeBlock->instructions().begin(); unsigned instructionCount = m_codeBlock->instructions().size(); @@ -182,10 +158,7 @@ void JIT::privateCompileMainPass() AbsoluteAddress(m_compilation->executionCounterFor(Profiler::OriginStack(Profiler::Origin( m_compilation->bytecodes(), m_bytecodeOffset)))->address())); } - - if (Options::eagerlyUpdateTopCallFrame()) - updateTopCallFrame(); - + switch (opcodeID) { DEFINE_SLOW_OP(del_by_val) DEFINE_SLOW_OP(in) @@ -194,9 +167,10 @@ void JIT::privateCompileMainPass() DEFINE_SLOW_OP(greater) DEFINE_SLOW_OP(greatereq) DEFINE_SLOW_OP(is_function) - DEFINE_SLOW_OP(is_object_or_null) + DEFINE_SLOW_OP(is_object) DEFINE_SLOW_OP(typeof) + DEFINE_OP(op_touch_entry) DEFINE_OP(op_add) DEFINE_OP(op_bitand) DEFINE_OP(op_bitor) @@ -204,34 +178,35 @@ void JIT::privateCompileMainPass() DEFINE_OP(op_call) DEFINE_OP(op_call_eval) DEFINE_OP(op_call_varargs) - DEFINE_OP(op_construct_varargs) DEFINE_OP(op_catch) DEFINE_OP(op_construct) + DEFINE_OP(op_get_callee) DEFINE_OP(op_create_this) DEFINE_OP(op_to_this) - DEFINE_OP(op_create_direct_arguments) - DEFINE_OP(op_create_scoped_arguments) - DEFINE_OP(op_create_out_of_band_arguments) - DEFINE_OP(op_check_tdz) + DEFINE_OP(op_init_lazy_reg) + DEFINE_OP(op_create_arguments) DEFINE_OP(op_debug) DEFINE_OP(op_del_by_id) DEFINE_OP(op_div) DEFINE_OP(op_end) DEFINE_OP(op_enter) - DEFINE_OP(op_get_scope) + DEFINE_OP(op_create_activation) DEFINE_OP(op_eq) DEFINE_OP(op_eq_null) case op_get_by_id_out_of_line: case op_get_array_length: DEFINE_OP(op_get_by_id) + DEFINE_OP(op_get_arguments_length) DEFINE_OP(op_get_by_val) + DEFINE_OP(op_get_argument_by_val) + DEFINE_OP(op_get_by_pname) + DEFINE_OP(op_get_pnames) DEFINE_OP(op_check_has_instance) DEFINE_OP(op_instanceof) DEFINE_OP(op_is_undefined) DEFINE_OP(op_is_boolean) DEFINE_OP(op_is_number) DEFINE_OP(op_is_string) - DEFINE_OP(op_is_object) DEFINE_OP(op_jeq_null) DEFINE_OP(op_jfalse) DEFINE_OP(op_jmp) @@ -249,6 +224,7 @@ void JIT::privateCompileMainPass() DEFINE_OP(op_loop_hint) DEFINE_OP(op_lshift) DEFINE_OP(op_mod) + DEFINE_OP(op_captured_mov) DEFINE_OP(op_mov) DEFINE_OP(op_mul) DEFINE_OP(op_negate) @@ -258,20 +234,20 @@ void JIT::privateCompileMainPass() DEFINE_OP(op_new_array_with_size) DEFINE_OP(op_new_array_buffer) DEFINE_OP(op_new_func) + DEFINE_OP(op_new_captured_func) DEFINE_OP(op_new_func_exp) DEFINE_OP(op_new_object) DEFINE_OP(op_new_regexp) + DEFINE_OP(op_next_pname) DEFINE_OP(op_not) DEFINE_OP(op_nstricteq) + DEFINE_OP(op_pop_scope) DEFINE_OP(op_dec) DEFINE_OP(op_inc) DEFINE_OP(op_profile_did_call) DEFINE_OP(op_profile_will_call) - DEFINE_OP(op_profile_type) - DEFINE_OP(op_profile_control_flow) + DEFINE_OP(op_push_name_scope) DEFINE_OP(op_push_with_scope) - DEFINE_OP(op_create_lexical_environment) - DEFINE_OP(op_get_parent_scope) case op_put_by_id_out_of_line: case op_put_by_id_transition_direct: case op_put_by_id_transition_normal: @@ -281,11 +257,13 @@ void JIT::privateCompileMainPass() DEFINE_OP(op_put_by_index) case op_put_by_val_direct: DEFINE_OP(op_put_by_val) - DEFINE_OP(op_put_getter_by_id) - DEFINE_OP(op_put_setter_by_id) DEFINE_OP(op_put_getter_setter) + case op_init_global_const_nop: + NEXT_OPCODE(op_init_global_const_nop); + DEFINE_OP(op_init_global_const) DEFINE_OP(op_ret) + DEFINE_OP(op_ret_object_or_this) DEFINE_OP(op_rshift) DEFINE_OP(op_unsigned) DEFINE_OP(op_urshift) @@ -295,37 +273,40 @@ void JIT::privateCompileMainPass() DEFINE_OP(op_switch_char) DEFINE_OP(op_switch_imm) DEFINE_OP(op_switch_string) + DEFINE_OP(op_tear_off_activation) + DEFINE_OP(op_tear_off_arguments) DEFINE_OP(op_throw) DEFINE_OP(op_throw_static_error) DEFINE_OP(op_to_number) - DEFINE_OP(op_to_string) DEFINE_OP(op_to_primitive) DEFINE_OP(op_resolve_scope) DEFINE_OP(op_get_from_scope) DEFINE_OP(op_put_to_scope) - DEFINE_OP(op_get_from_arguments) - DEFINE_OP(op_put_to_arguments) - - DEFINE_OP(op_get_enumerable_length) - DEFINE_OP(op_has_generic_property) - DEFINE_OP(op_has_structure_property) - DEFINE_OP(op_has_indexed_property) - DEFINE_OP(op_get_direct_pname) - DEFINE_OP(op_get_property_enumerator) - DEFINE_OP(op_enumerator_structure_pname) - DEFINE_OP(op_enumerator_generic_pname) - DEFINE_OP(op_to_index_string) - default: + + case op_get_by_id_chain: + case op_get_by_id_generic: + case op_get_by_id_proto: + case op_get_by_id_self: + case op_get_by_id_getter_chain: + case op_get_by_id_getter_proto: + case op_get_by_id_getter_self: + case op_get_by_id_custom_chain: + case op_get_by_id_custom_proto: + case op_get_by_id_custom_self: + case op_get_string_length: + case op_put_by_id_generic: + case op_put_by_id_replace: + case op_put_by_id_transition: RELEASE_ASSERT_NOT_REACHED(); } } - RELEASE_ASSERT(m_callLinkInfoIndex == m_callCompilationInfo.size()); + RELEASE_ASSERT(m_callLinkInfoIndex == m_callStructureStubCompilationInfo.size()); #ifndef NDEBUG // Reset this, in order to guard its use with ASSERTs. - m_bytecodeOffset = std::numeric_limits<unsigned>::max(); + m_bytecodeOffset = (unsigned)-1; #endif } @@ -380,17 +361,20 @@ void JIT::privateCompileSlowCases() DEFINE_SLOWCASE_OP(op_call) DEFINE_SLOWCASE_OP(op_call_eval) DEFINE_SLOWCASE_OP(op_call_varargs) - DEFINE_SLOWCASE_OP(op_construct_varargs) DEFINE_SLOWCASE_OP(op_construct) DEFINE_SLOWCASE_OP(op_to_this) - DEFINE_SLOWCASE_OP(op_check_tdz) DEFINE_SLOWCASE_OP(op_create_this) + DEFINE_SLOWCASE_OP(op_captured_mov) DEFINE_SLOWCASE_OP(op_div) DEFINE_SLOWCASE_OP(op_eq) + DEFINE_SLOWCASE_OP(op_get_callee) case op_get_by_id_out_of_line: case op_get_array_length: DEFINE_SLOWCASE_OP(op_get_by_id) + DEFINE_SLOWCASE_OP(op_get_arguments_length) DEFINE_SLOWCASE_OP(op_get_by_val) + DEFINE_SLOWCASE_OP(op_get_argument_by_val) + DEFINE_SLOWCASE_OP(op_get_by_pname) DEFINE_SLOWCASE_OP(op_check_has_instance) DEFINE_SLOWCASE_OP(op_instanceof) DEFINE_SLOWCASE_OP(op_jfalse) @@ -428,11 +412,7 @@ void JIT::privateCompileSlowCases() DEFINE_SLOWCASE_OP(op_stricteq) DEFINE_SLOWCASE_OP(op_sub) DEFINE_SLOWCASE_OP(op_to_number) - DEFINE_SLOWCASE_OP(op_to_string) DEFINE_SLOWCASE_OP(op_to_primitive) - DEFINE_SLOWCASE_OP(op_has_indexed_property) - DEFINE_SLOWCASE_OP(op_has_structure_property) - DEFINE_SLOWCASE_OP(op_get_direct_pname) DEFINE_SLOWCASE_OP(op_resolve_scope) DEFINE_SLOWCASE_OP(op_get_from_scope) @@ -453,12 +433,12 @@ void JIT::privateCompileSlowCases() RELEASE_ASSERT(m_getByIdIndex == m_getByIds.size()); RELEASE_ASSERT(m_putByIdIndex == m_putByIds.size()); - RELEASE_ASSERT(m_callLinkInfoIndex == m_callCompilationInfo.size()); + RELEASE_ASSERT(m_callLinkInfoIndex == m_callStructureStubCompilationInfo.size()); RELEASE_ASSERT(numberOfValueProfiles == m_codeBlock->numberOfValueProfiles()); #ifndef NDEBUG // Reset this, in order to guard its use with ASSERTs. - m_bytecodeOffset = std::numeric_limits<unsigned>::max(); + m_bytecodeOffset = (unsigned)-1; #endif } @@ -471,6 +451,11 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort) m_canBeOptimizedOrInlined = false; m_shouldEmitProfiling = false; break; + case DFG::CanInline: + m_canBeOptimized = false; + m_canBeOptimizedOrInlined = true; + m_shouldEmitProfiling = true; + break; case DFG::CanCompile: case DFG::CanCompileAndInline: m_canBeOptimized = true; @@ -493,13 +478,9 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort) m_codeBlock->m_shouldAlwaysBeInlined &= canInline(level) && DFG::mightInlineFunction(m_codeBlock); break; } - - // This ensures that we have the most up to date type information when performing typecheck optimizations for op_profile_type. - if (m_vm->typeProfiler()) - m_vm->typeProfilerLog()->processLogEntries(ASCIILiteral("Preparing for JIT compilation.")); if (Options::showDisassembly() || m_vm->m_perBytecodeProfiler) - m_disassembler = std::make_unique<JITDisassembler>(m_codeBlock); + m_disassembler = adoptPtr(new JITDisassembler(m_codeBlock)); if (m_vm->m_perBytecodeProfiler) { m_compilation = adoptRef( new Profiler::Compilation( @@ -515,7 +496,8 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort) if (m_randomGenerator.getUint32() & 1) nop(); - emitFunctionPrologue(); + preserveReturnAddressAfterCall(regT2); + emitPutReturnPCToCallFrameHeader(regT2); emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock); Label beginLabel(this); @@ -525,8 +507,9 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort) sampleInstruction(m_codeBlock->instructions().begin()); #endif + Jump stackCheck; if (m_codeBlock->codeType() == FunctionCode) { - ASSERT(m_bytecodeOffset == std::numeric_limits<unsigned>::max()); + ASSERT(m_bytecodeOffset == (unsigned)-1); if (shouldEmitProfiling()) { for (int argument = 0; argument < m_codeBlock->numParameters(); ++argument) { // If this is a constructor, then we want to put in a dummy profiling site (to @@ -543,14 +526,13 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort) emitValueProfilingSite(m_codeBlock->valueProfileForArgument(argument)); } } - } - - addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, regT1); - Jump stackOverflow = branchPtr(Above, AbsoluteAddress(m_vm->addressOfStackLimit()), regT1); - move(regT1, stackPointerRegister); - checkStackPointerAlignment(); + addPtr(TrustedImm32(virtualRegisterForLocal(frameRegisterCountFor(m_codeBlock)).offset() * sizeof(Register)), callFrameRegister, regT1); + stackCheck = branchPtr(Above, AbsoluteAddress(m_vm->addressOfJSStackLimit()), regT1); + } + Label functionBody = label(); + privateCompileMainPass(); privateCompileLinkPass(); privateCompileSlowCases(); @@ -558,17 +540,20 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort) if (m_disassembler) m_disassembler->setEndOfSlowPath(label()); - stackOverflow.link(this); - m_bytecodeOffset = 0; - if (maxFrameExtentForSlowPathCall) - addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister); - callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock); - Label arityCheck; if (m_codeBlock->codeType() == FunctionCode) { + stackCheck.link(this); + m_bytecodeOffset = 0; + callOperationWithCallFrameRollbackOnException(operationStackCheck, m_codeBlock); +#ifndef NDEBUG + m_bytecodeOffset = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs. +#endif + jump(functionBody); + arityCheck = label(); store8(TrustedImm32(0), &m_codeBlock->m_shouldAlwaysBeInlined); - emitFunctionPrologue(); + preserveReturnAddressAfterCall(regT2); + emitPutReturnPCToCallFrameHeader(regT2); emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock); load32(payloadFor(JSStack::ArgumentCount), regT1); @@ -576,28 +561,14 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort) m_bytecodeOffset = 0; - if (maxFrameExtentForSlowPathCall) - addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister); callOperationWithCallFrameRollbackOnException(m_codeBlock->m_isConstructor ? operationConstructArityCheck : operationCallArityCheck); - if (maxFrameExtentForSlowPathCall) - addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister); if (returnValueGPR != regT0) move(returnValueGPR, regT0); branchTest32(Zero, regT0).linkTo(beginLabel, this); - GPRReg thunkReg; -#if USE(JSVALUE64) - thunkReg = GPRInfo::regT7; -#else - thunkReg = GPRInfo::regT5; -#endif - CodeLocationLabel* failThunkLabels = - m_vm->arityCheckFailReturnThunks->returnPCsFor(*m_vm, m_codeBlock->numParameters()); - move(TrustedImmPtr(failThunkLabels), thunkReg); - loadPtr(BaseIndex(thunkReg, regT0, timesPtr()), thunkReg); - emitNakedCall(m_vm->getCTIStub(arityFixupGenerator).code()); + emitNakedCall(m_vm->getCTIStub(arityFixup).code()); #if !ASSERT_DISABLED - m_bytecodeOffset = std::numeric_limits<unsigned>::max(); // Reset this, in order to guard its use with ASSERTs. + m_bytecodeOffset = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs. #endif jump(beginLabel); @@ -610,7 +581,7 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort) if (m_disassembler) m_disassembler->setEndOfCode(label()); - LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock, effort); + LinkBuffer patchBuffer(*m_vm, this, m_codeBlock, effort); if (patchBuffer.didFailToAllocate()) return CompilationFailed; @@ -657,58 +628,58 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort) for (unsigned i = m_putByIds.size(); i--;) m_putByIds[i].finalize(patchBuffer); - for (const auto& byValCompilationInfo : m_byValCompilationInfo) { - PatchableJump patchableNotIndexJump = byValCompilationInfo.notIndexJump; - CodeLocationJump notIndexJump = CodeLocationJump(); - if (Jump(patchableNotIndexJump).isSet()) - notIndexJump = CodeLocationJump(patchBuffer.locationOf(patchableNotIndexJump)); - CodeLocationJump badTypeJump = CodeLocationJump(patchBuffer.locationOf(byValCompilationInfo.badTypeJump)); - CodeLocationLabel doneTarget = patchBuffer.locationOf(byValCompilationInfo.doneTarget); - CodeLocationLabel nextHotPathTarget = patchBuffer.locationOf(byValCompilationInfo.nextHotPathTarget); - CodeLocationLabel slowPathTarget = patchBuffer.locationOf(byValCompilationInfo.slowPathTarget); - CodeLocationCall returnAddress = patchBuffer.locationOf(byValCompilationInfo.returnAddress); - - *byValCompilationInfo.byValInfo = ByValInfo( - byValCompilationInfo.bytecodeIndex, - notIndexJump, + m_codeBlock->setNumberOfByValInfos(m_byValCompilationInfo.size()); + for (unsigned i = 0; i < m_byValCompilationInfo.size(); ++i) { + CodeLocationJump badTypeJump = CodeLocationJump(patchBuffer.locationOf(m_byValCompilationInfo[i].badTypeJump)); + CodeLocationLabel doneTarget = patchBuffer.locationOf(m_byValCompilationInfo[i].doneTarget); + CodeLocationLabel slowPathTarget = patchBuffer.locationOf(m_byValCompilationInfo[i].slowPathTarget); + CodeLocationCall returnAddress = patchBuffer.locationOf(m_byValCompilationInfo[i].returnAddress); + + m_codeBlock->byValInfo(i) = ByValInfo( + m_byValCompilationInfo[i].bytecodeIndex, badTypeJump, - byValCompilationInfo.arrayMode, - byValCompilationInfo.arrayProfile, + m_byValCompilationInfo[i].arrayMode, differenceBetweenCodePtr(badTypeJump, doneTarget), - differenceBetweenCodePtr(badTypeJump, nextHotPathTarget), differenceBetweenCodePtr(returnAddress, slowPathTarget)); } - for (unsigned i = 0; i < m_callCompilationInfo.size(); ++i) { - CallCompilationInfo& compilationInfo = m_callCompilationInfo[i]; - CallLinkInfo& info = *compilationInfo.callLinkInfo; - info.setCallLocations(patchBuffer.locationOfNearCall(compilationInfo.callReturnLocation), - patchBuffer.locationOf(compilationInfo.hotPathBegin), - patchBuffer.locationOfNearCall(compilationInfo.hotPathOther)); + m_codeBlock->setNumberOfCallLinkInfos(m_callStructureStubCompilationInfo.size()); + for (unsigned i = 0; i < m_codeBlock->numberOfCallLinkInfos(); ++i) { + CallLinkInfo& info = m_codeBlock->callLinkInfo(i); + info.callType = m_callStructureStubCompilationInfo[i].callType; + info.codeOrigin = CodeOrigin(m_callStructureStubCompilationInfo[i].bytecodeIndex); + info.callReturnLocation = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].callReturnLocation); + info.hotPathBegin = patchBuffer.locationOf(m_callStructureStubCompilationInfo[i].hotPathBegin); + info.hotPathOther = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].hotPathOther); + info.calleeGPR = regT0; } - CompactJITCodeMap::Encoder jitCodeMapEncoder; - for (unsigned bytecodeOffset = 0; bytecodeOffset < m_labels.size(); ++bytecodeOffset) { - if (m_labels[bytecodeOffset].isSet()) - jitCodeMapEncoder.append(bytecodeOffset, patchBuffer.offsetOf(m_labels[bytecodeOffset])); +#if ENABLE(DFG_JIT) || ENABLE(LLINT) + if (canBeOptimizedOrInlined() +#if ENABLE(LLINT) + || true +#endif + ) { + CompactJITCodeMap::Encoder jitCodeMapEncoder; + for (unsigned bytecodeOffset = 0; bytecodeOffset < m_labels.size(); ++bytecodeOffset) { + if (m_labels[bytecodeOffset].isSet()) + jitCodeMapEncoder.append(bytecodeOffset, patchBuffer.offsetOf(m_labels[bytecodeOffset])); + } + m_codeBlock->setJITCodeMap(jitCodeMapEncoder.finish()); } - m_codeBlock->setJITCodeMap(jitCodeMapEncoder.finish()); +#endif MacroAssemblerCodePtr withArityCheck; if (m_codeBlock->codeType() == FunctionCode) withArityCheck = patchBuffer.locationOf(arityCheck); - if (Options::showDisassembly()) { + if (Options::showDisassembly()) m_disassembler->dump(patchBuffer); - patchBuffer.didAlreadyDisassemble(); - } if (m_compilation) { m_disassembler->reportToProfiler(m_compilation.get(), patchBuffer); m_vm->m_perBytecodeProfiler->addCompilation(m_compilation); } - CodeRef result = FINALIZE_CODE( - patchBuffer, - ("Baseline JIT code for %s", toCString(CodeBlockWithJITType(m_codeBlock, JITCode::BaselineJIT)).data())); + CodeRef result = patchBuffer.finalizeCodeWithoutDisassembly(); m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT.add( static_cast<double>(result.size()) / @@ -716,7 +687,8 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort) m_codeBlock->shrinkToFit(CodeBlock::LateShrink); m_codeBlock->setJITCode( - adoptRef(new DirectJITCode(result, withArityCheck, JITCode::BaselineJIT))); + adoptRef(new DirectJITCode(result, JITCode::BaselineJIT)), + withArityCheck); #if ENABLE(JIT_VERBOSE) dataLogF("JIT generated code for %p at [%p, %p).\n", m_codeBlock, result.executableMemory()->start(), result.executableMemory()->end()); @@ -725,53 +697,72 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort) return CompilationSuccessful; } -void JIT::privateCompileExceptionHandlers() +void JIT::linkFor(ExecState* exec, JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, JIT::CodePtr code, CallLinkInfo* callLinkInfo, VM* vm, CodeSpecializationKind kind) { - if (!m_exceptionChecksWithCallFrameRollback.empty()) { - m_exceptionChecksWithCallFrameRollback.link(this); + RepatchBuffer repatchBuffer(callerCodeBlock); + + ASSERT(!callLinkInfo->isLinked()); + callLinkInfo->callee.set(*vm, callLinkInfo->hotPathBegin, callerCodeBlock->ownerExecutable(), callee); + callLinkInfo->lastSeenCallee.set(*vm, callerCodeBlock->ownerExecutable(), callee); + repatchBuffer.relink(callLinkInfo->hotPathOther, code); + + if (calleeCodeBlock) + calleeCodeBlock->linkIncomingCall(exec, callLinkInfo); + + // Patch the slow patch so we do not continue to try to link. + if (kind == CodeForCall) { + ASSERT(callLinkInfo->callType == CallLinkInfo::Call + || callLinkInfo->callType == CallLinkInfo::CallVarargs); + if (callLinkInfo->callType == CallLinkInfo::Call) { + repatchBuffer.relink(callLinkInfo->callReturnLocation, vm->getCTIStub(linkClosureCallThunkGenerator).code()); + return; + } + + repatchBuffer.relink(callLinkInfo->callReturnLocation, vm->getCTIStub(virtualCallThunkGenerator).code()); + return; + } - // lookupExceptionHandlerFromCallerFrame is passed two arguments, the VM and the exec (the CallFrame*). + ASSERT(kind == CodeForConstruct); + repatchBuffer.relink(callLinkInfo->callReturnLocation, vm->getCTIStub(virtualConstructThunkGenerator).code()); +} - move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0); - move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1); +void JIT::linkSlowCall(CodeBlock* callerCodeBlock, CallLinkInfo* callLinkInfo) +{ + RepatchBuffer repatchBuffer(callerCodeBlock); -#if CPU(X86) - // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer! - poke(GPRInfo::argumentGPR0); - poke(GPRInfo::argumentGPR1, 1); -#endif - m_calls.append(CallRecord(call(), std::numeric_limits<unsigned>::max(), FunctionPtr(lookupExceptionHandlerFromCallerFrame).value())); - jumpToExceptionHandler(); + repatchBuffer.relink(callLinkInfo->callReturnLocation, callerCodeBlock->vm()->getCTIStub(virtualCallThunkGenerator).code()); +} + +void JIT::privateCompileExceptionHandlers() +{ + if (m_exceptionChecks.empty() && m_exceptionChecksWithCallFrameRollback.empty()) + return; + + Jump doLookup; + + if (!m_exceptionChecksWithCallFrameRollback.empty()) { + m_exceptionChecksWithCallFrameRollback.link(this); + emitGetCallerFrameFromCallFrameHeaderPtr(GPRInfo::argumentGPR0); + doLookup = jump(); } - if (!m_exceptionChecks.empty()) { + if (!m_exceptionChecks.empty()) m_exceptionChecks.link(this); + + // lookupExceptionHandler is passed one argument, the exec (the CallFrame*). + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); - // lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*). - move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0); - move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1); + if (doLookup.isSet()) + doLookup.link(this); #if CPU(X86) - // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer! - poke(GPRInfo::argumentGPR0); - poke(GPRInfo::argumentGPR1, 1); + // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer! + poke(GPRInfo::argumentGPR0); #endif - m_calls.append(CallRecord(call(), std::numeric_limits<unsigned>::max(), FunctionPtr(lookupExceptionHandler).value())); - jumpToExceptionHandler(); - } -} - -unsigned JIT::frameRegisterCountFor(CodeBlock* codeBlock) -{ - ASSERT(static_cast<unsigned>(codeBlock->m_numCalleeRegisters) == WTF::roundUpToMultipleOf(stackAlignmentRegisters(), static_cast<unsigned>(codeBlock->m_numCalleeRegisters))); - - return roundLocalRegisterCountForFramePointerOffset(codeBlock->m_numCalleeRegisters + maxFrameExtentForSlowPathCallInRegisters); + m_calls.append(CallRecord(call(), (unsigned)-1, FunctionPtr(lookupExceptionHandler).value())); + jumpToExceptionHandler(); } -int JIT::stackPointerOffsetFor(CodeBlock* codeBlock) -{ - return virtualRegisterForLocal(frameRegisterCountFor(codeBlock) - 1).offset(); -} } // namespace JSC |