diff options
author | Lorry Tar Creator <lorry-tar-importer@lorry> | 2016-04-10 09:28:39 +0000 |
---|---|---|
committer | Lorry Tar Creator <lorry-tar-importer@lorry> | 2016-04-10 09:28:39 +0000 |
commit | 32761a6cee1d0dee366b885b7b9c777e67885688 (patch) | |
tree | d6bec92bebfb216f4126356e55518842c2f476a1 /Source/JavaScriptCore/jit/JIT.cpp | |
parent | a4e969f4965059196ca948db781e52f7cfebf19e (diff) | |
download | WebKitGtk-tarball-32761a6cee1d0dee366b885b7b9c777e67885688.tar.gz |
webkitgtk-2.4.11webkitgtk-2.4.11
Diffstat (limited to 'Source/JavaScriptCore/jit/JIT.cpp')
-rw-r--r-- | Source/JavaScriptCore/jit/JIT.cpp | 446 |
1 files changed, 208 insertions, 238 deletions
diff --git a/Source/JavaScriptCore/jit/JIT.cpp b/Source/JavaScriptCore/jit/JIT.cpp index ac8c132aa..c3508b01d 100644 --- a/Source/JavaScriptCore/jit/JIT.cpp +++ b/Source/JavaScriptCore/jit/JIT.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008, 2009, 2012-2015 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2009, 2012, 2013 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,11 +26,15 @@ #include "config.h" #if ENABLE(JIT) - #include "JIT.h" +// This probably does not belong here; adding here for now as a quick Windows build fix. +#if ENABLE(ASSEMBLER) && CPU(X86) && !OS(MAC_OS_X) +#include "MacroAssembler.h" +JSC::MacroAssemblerX86Common::SSE2CheckState JSC::MacroAssemblerX86Common::s_sse2CheckState = NotCheckedSSE2; +#endif + #include "CodeBlock.h" -#include "CodeBlockWithJITType.h" #include "DFGCapabilities.h" #include "Interpreter.h" #include "JITInlines.h" @@ -38,39 +42,45 @@ #include "JSArray.h" #include "JSFunction.h" #include "LinkBuffer.h" -#include "MaxFrameExtentForSlowPathCall.h" -#include "JSCInlines.h" -#include "PCToCodeOriginMap.h" -#include "ProfilerDatabase.h" +#include "Operations.h" +#include "RepatchBuffer.h" #include "ResultType.h" #include "SamplingTool.h" #include "SlowPathCall.h" -#include "StackAlignment.h" -#include "TypeProfilerLog.h" #include <wtf/CryptographicallyRandomNumber.h> using namespace std; namespace JSC { -void ctiPatchCallByReturnAddress(ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction) +void ctiPatchNearCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction) +{ + RepatchBuffer repatchBuffer(codeblock); + repatchBuffer.relinkNearCallerToTrampoline(returnAddress, newCalleeFunction); +} + +void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction) { - MacroAssembler::repatchCall( - CodeLocationCall(MacroAssemblerCodePtr(returnAddress)), - newCalleeFunction); + RepatchBuffer repatchBuffer(codeblock); + repatchBuffer.relinkCallerToTrampoline(returnAddress, newCalleeFunction); +} + +void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction) +{ + RepatchBuffer repatchBuffer(codeblock); + repatchBuffer.relinkCallerToFunction(returnAddress, newCalleeFunction); } JIT::JIT(VM* vm, CodeBlock* codeBlock) : JSInterfaceJIT(vm, codeBlock) , m_interpreter(vm->interpreter) , m_labels(codeBlock ? codeBlock->numberOfInstructions() : 0) - , m_bytecodeOffset(std::numeric_limits<unsigned>::max()) + , m_bytecodeOffset((unsigned)-1) , m_getByIdIndex(UINT_MAX) , m_putByIdIndex(UINT_MAX) , m_byValInstructionIndex(UINT_MAX) , m_callLinkInfoIndex(UINT_MAX) , m_randomGenerator(cryptographicallyRandomNumber()) - , m_pcToCodeOriginMapBuilder(*vm) , m_canBeOptimized(false) , m_shouldEmitProfiling(false) { @@ -86,41 +96,13 @@ void JIT::emitEnterOptimizationCheck() skipOptimize.append(branchAdd32(Signed, TrustedImm32(Options::executionCounterIncrementForEntry()), AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter()))); ASSERT(!m_bytecodeOffset); - - copyCalleeSavesFromFrameOrRegisterToVMCalleeSavesBuffer(); - callOperation(operationOptimize, m_bytecodeOffset); skipOptimize.append(branchTestPtr(Zero, returnValueGPR)); - move(returnValueGPR2, stackPointerRegister); jump(returnValueGPR); skipOptimize.link(this); } #endif -void JIT::emitNotifyWrite(WatchpointSet* set) -{ - if (!set || set->state() == IsInvalidated) - return; - - addSlowCase(branch8(NotEqual, AbsoluteAddress(set->addressOfState()), TrustedImm32(IsInvalidated))); -} - -void JIT::emitNotifyWrite(GPRReg pointerToSet) -{ - addSlowCase(branch8(NotEqual, Address(pointerToSet, WatchpointSet::offsetOfState()), TrustedImm32(IsInvalidated))); -} - -void JIT::assertStackPointerOffset() -{ - if (ASSERT_DISABLED) - return; - - addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, regT0); - Jump ok = branchPtr(Equal, regT0, stackPointerRegister); - breakpoint(); - ok.link(this); -} - #define NEXT_OPCODE(name) \ m_bytecodeOffset += OPCODE_LENGTH(name); \ break; @@ -146,9 +128,6 @@ void JIT::assertStackPointerOffset() void JIT::privateCompileMainPass() { - jitAssertTagsInPlace(); - jitAssertArgumentCountSane(); - Instruction* instructionsBegin = m_codeBlock->instructions().begin(); unsigned instructionCount = m_codeBlock->instructions().size(); @@ -160,8 +139,6 @@ void JIT::privateCompileMainPass() Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset; ASSERT_WITH_MESSAGE(m_interpreter->isOpcode(currentInstruction->u.opcode), "privateCompileMainPass gone bad @ %d", m_bytecodeOffset); - m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(m_bytecodeOffset)); - #if ENABLE(OPCODE_SAMPLING) if (m_bytecodeOffset > 0) // Avoid the overhead of sampling op_enter twice. sampleInstruction(currentInstruction); @@ -181,12 +158,7 @@ void JIT::privateCompileMainPass() AbsoluteAddress(m_compilation->executionCounterFor(Profiler::OriginStack(Profiler::Origin( m_compilation->bytecodes(), m_bytecodeOffset)))->address())); } - - if (Options::eagerlyUpdateTopCallFrame()) - updateTopCallFrame(); - unsigned bytecodeOffset = m_bytecodeOffset; - switch (opcodeID) { DEFINE_SLOW_OP(del_by_val) DEFINE_SLOW_OP(in) @@ -195,51 +167,46 @@ void JIT::privateCompileMainPass() DEFINE_SLOW_OP(greater) DEFINE_SLOW_OP(greatereq) DEFINE_SLOW_OP(is_function) - DEFINE_SLOW_OP(is_object_or_null) + DEFINE_SLOW_OP(is_object) DEFINE_SLOW_OP(typeof) + DEFINE_OP(op_touch_entry) DEFINE_OP(op_add) DEFINE_OP(op_bitand) DEFINE_OP(op_bitor) DEFINE_OP(op_bitxor) DEFINE_OP(op_call) - DEFINE_OP(op_tail_call) DEFINE_OP(op_call_eval) DEFINE_OP(op_call_varargs) - DEFINE_OP(op_tail_call_varargs) - DEFINE_OP(op_construct_varargs) DEFINE_OP(op_catch) DEFINE_OP(op_construct) + DEFINE_OP(op_get_callee) DEFINE_OP(op_create_this) DEFINE_OP(op_to_this) - DEFINE_OP(op_create_direct_arguments) - DEFINE_OP(op_create_scoped_arguments) - DEFINE_OP(op_create_out_of_band_arguments) - DEFINE_OP(op_copy_rest) - DEFINE_OP(op_get_rest_length) - DEFINE_OP(op_check_tdz) - DEFINE_OP(op_assert) - DEFINE_OP(op_save) - DEFINE_OP(op_resume) + DEFINE_OP(op_init_lazy_reg) + DEFINE_OP(op_create_arguments) DEFINE_OP(op_debug) DEFINE_OP(op_del_by_id) DEFINE_OP(op_div) DEFINE_OP(op_end) DEFINE_OP(op_enter) - DEFINE_OP(op_get_scope) + DEFINE_OP(op_create_activation) DEFINE_OP(op_eq) DEFINE_OP(op_eq_null) + case op_get_by_id_out_of_line: case op_get_array_length: DEFINE_OP(op_get_by_id) + DEFINE_OP(op_get_arguments_length) DEFINE_OP(op_get_by_val) - DEFINE_OP(op_overrides_has_instance) + DEFINE_OP(op_get_argument_by_val) + DEFINE_OP(op_get_by_pname) + DEFINE_OP(op_get_pnames) + DEFINE_OP(op_check_has_instance) DEFINE_OP(op_instanceof) - DEFINE_OP(op_instanceof_custom) DEFINE_OP(op_is_undefined) DEFINE_OP(op_is_boolean) DEFINE_OP(op_is_number) DEFINE_OP(op_is_string) - DEFINE_OP(op_is_object) DEFINE_OP(op_jeq_null) DEFINE_OP(op_jfalse) DEFINE_OP(op_jmp) @@ -255,9 +222,9 @@ void JIT::privateCompileMainPass() DEFINE_OP(op_jngreatereq) DEFINE_OP(op_jtrue) DEFINE_OP(op_loop_hint) - DEFINE_OP(op_watchdog) DEFINE_OP(op_lshift) DEFINE_OP(op_mod) + DEFINE_OP(op_captured_mov) DEFINE_OP(op_mov) DEFINE_OP(op_mul) DEFINE_OP(op_negate) @@ -267,34 +234,36 @@ void JIT::privateCompileMainPass() DEFINE_OP(op_new_array_with_size) DEFINE_OP(op_new_array_buffer) DEFINE_OP(op_new_func) + DEFINE_OP(op_new_captured_func) DEFINE_OP(op_new_func_exp) - DEFINE_OP(op_new_generator_func) - DEFINE_OP(op_new_generator_func_exp) - DEFINE_OP(op_new_arrow_func_exp) DEFINE_OP(op_new_object) DEFINE_OP(op_new_regexp) + DEFINE_OP(op_next_pname) DEFINE_OP(op_not) DEFINE_OP(op_nstricteq) + DEFINE_OP(op_pop_scope) DEFINE_OP(op_dec) DEFINE_OP(op_inc) DEFINE_OP(op_profile_did_call) DEFINE_OP(op_profile_will_call) - DEFINE_OP(op_profile_type) - DEFINE_OP(op_profile_control_flow) + DEFINE_OP(op_push_name_scope) DEFINE_OP(op_push_with_scope) - DEFINE_OP(op_create_lexical_environment) - DEFINE_OP(op_get_parent_scope) + case op_put_by_id_out_of_line: + case op_put_by_id_transition_direct: + case op_put_by_id_transition_normal: + case op_put_by_id_transition_direct_out_of_line: + case op_put_by_id_transition_normal_out_of_line: DEFINE_OP(op_put_by_id) DEFINE_OP(op_put_by_index) case op_put_by_val_direct: DEFINE_OP(op_put_by_val) - DEFINE_OP(op_put_getter_by_id) - DEFINE_OP(op_put_setter_by_id) - DEFINE_OP(op_put_getter_setter_by_id) - DEFINE_OP(op_put_getter_by_val) - DEFINE_OP(op_put_setter_by_val) + DEFINE_OP(op_put_getter_setter) + case op_init_global_const_nop: + NEXT_OPCODE(op_init_global_const_nop); + DEFINE_OP(op_init_global_const) DEFINE_OP(op_ret) + DEFINE_OP(op_ret_object_or_this) DEFINE_OP(op_rshift) DEFINE_OP(op_unsigned) DEFINE_OP(op_urshift) @@ -304,40 +273,40 @@ void JIT::privateCompileMainPass() DEFINE_OP(op_switch_char) DEFINE_OP(op_switch_imm) DEFINE_OP(op_switch_string) + DEFINE_OP(op_tear_off_activation) + DEFINE_OP(op_tear_off_arguments) DEFINE_OP(op_throw) DEFINE_OP(op_throw_static_error) DEFINE_OP(op_to_number) - DEFINE_OP(op_to_string) DEFINE_OP(op_to_primitive) DEFINE_OP(op_resolve_scope) DEFINE_OP(op_get_from_scope) DEFINE_OP(op_put_to_scope) - DEFINE_OP(op_get_from_arguments) - DEFINE_OP(op_put_to_arguments) - - DEFINE_OP(op_get_enumerable_length) - DEFINE_OP(op_has_generic_property) - DEFINE_OP(op_has_structure_property) - DEFINE_OP(op_has_indexed_property) - DEFINE_OP(op_get_direct_pname) - DEFINE_OP(op_get_property_enumerator) - DEFINE_OP(op_enumerator_structure_pname) - DEFINE_OP(op_enumerator_generic_pname) - DEFINE_OP(op_to_index_string) - default: + + case op_get_by_id_chain: + case op_get_by_id_generic: + case op_get_by_id_proto: + case op_get_by_id_self: + case op_get_by_id_getter_chain: + case op_get_by_id_getter_proto: + case op_get_by_id_getter_self: + case op_get_by_id_custom_chain: + case op_get_by_id_custom_proto: + case op_get_by_id_custom_self: + case op_get_string_length: + case op_put_by_id_generic: + case op_put_by_id_replace: + case op_put_by_id_transition: RELEASE_ASSERT_NOT_REACHED(); } - - if (false) - dataLog("At ", bytecodeOffset, ": ", m_slowCases.size(), "\n"); } - RELEASE_ASSERT(m_callLinkInfoIndex == m_callCompilationInfo.size()); + RELEASE_ASSERT(m_callLinkInfoIndex == m_callStructureStubCompilationInfo.size()); #ifndef NDEBUG // Reset this, in order to guard its use with ASSERTs. - m_bytecodeOffset = std::numeric_limits<unsigned>::max(); + m_bytecodeOffset = (unsigned)-1; #endif } @@ -369,8 +338,6 @@ void JIT::privateCompileSlowCases() for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end();) { m_bytecodeOffset = iter->to; - m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(m_bytecodeOffset)); - unsigned firstTo = m_bytecodeOffset; Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset; @@ -392,22 +359,24 @@ void JIT::privateCompileSlowCases() DEFINE_SLOWCASE_OP(op_bitor) DEFINE_SLOWCASE_OP(op_bitxor) DEFINE_SLOWCASE_OP(op_call) - DEFINE_SLOWCASE_OP(op_tail_call) DEFINE_SLOWCASE_OP(op_call_eval) DEFINE_SLOWCASE_OP(op_call_varargs) - DEFINE_SLOWCASE_OP(op_tail_call_varargs) - DEFINE_SLOWCASE_OP(op_construct_varargs) DEFINE_SLOWCASE_OP(op_construct) DEFINE_SLOWCASE_OP(op_to_this) - DEFINE_SLOWCASE_OP(op_check_tdz) DEFINE_SLOWCASE_OP(op_create_this) + DEFINE_SLOWCASE_OP(op_captured_mov) DEFINE_SLOWCASE_OP(op_div) DEFINE_SLOWCASE_OP(op_eq) + DEFINE_SLOWCASE_OP(op_get_callee) + case op_get_by_id_out_of_line: case op_get_array_length: DEFINE_SLOWCASE_OP(op_get_by_id) + DEFINE_SLOWCASE_OP(op_get_arguments_length) DEFINE_SLOWCASE_OP(op_get_by_val) + DEFINE_SLOWCASE_OP(op_get_argument_by_val) + DEFINE_SLOWCASE_OP(op_get_by_pname) + DEFINE_SLOWCASE_OP(op_check_has_instance) DEFINE_SLOWCASE_OP(op_instanceof) - DEFINE_SLOWCASE_OP(op_instanceof_custom) DEFINE_SLOWCASE_OP(op_jfalse) DEFINE_SLOWCASE_OP(op_jless) DEFINE_SLOWCASE_OP(op_jlesseq) @@ -419,7 +388,6 @@ void JIT::privateCompileSlowCases() DEFINE_SLOWCASE_OP(op_jngreatereq) DEFINE_SLOWCASE_OP(op_jtrue) DEFINE_SLOWCASE_OP(op_loop_hint) - DEFINE_SLOWCASE_OP(op_watchdog) DEFINE_SLOWCASE_OP(op_lshift) DEFINE_SLOWCASE_OP(op_mod) DEFINE_SLOWCASE_OP(op_mul) @@ -430,6 +398,11 @@ void JIT::privateCompileSlowCases() DEFINE_SLOWCASE_OP(op_nstricteq) DEFINE_SLOWCASE_OP(op_dec) DEFINE_SLOWCASE_OP(op_inc) + case op_put_by_id_out_of_line: + case op_put_by_id_transition_direct: + case op_put_by_id_transition_normal: + case op_put_by_id_transition_direct_out_of_line: + case op_put_by_id_transition_normal_out_of_line: DEFINE_SLOWCASE_OP(op_put_by_id) case op_put_by_val_direct: DEFINE_SLOWCASE_OP(op_put_by_val) @@ -439,11 +412,7 @@ void JIT::privateCompileSlowCases() DEFINE_SLOWCASE_OP(op_stricteq) DEFINE_SLOWCASE_OP(op_sub) DEFINE_SLOWCASE_OP(op_to_number) - DEFINE_SLOWCASE_OP(op_to_string) DEFINE_SLOWCASE_OP(op_to_primitive) - DEFINE_SLOWCASE_OP(op_has_indexed_property) - DEFINE_SLOWCASE_OP(op_has_structure_property) - DEFINE_SLOWCASE_OP(op_get_direct_pname) DEFINE_SLOWCASE_OP(op_resolve_scope) DEFINE_SLOWCASE_OP(op_get_from_scope) @@ -453,9 +422,6 @@ void JIT::privateCompileSlowCases() RELEASE_ASSERT_NOT_REACHED(); } - if (false) - dataLog("At ", firstTo, " slow: ", iter - m_slowCases.begin(), "\n"); - RELEASE_ASSERT_WITH_MESSAGE(iter == m_slowCases.end() || firstTo != iter->to, "Not enough jumps linked in slow case codegen."); RELEASE_ASSERT_WITH_MESSAGE(firstTo == (iter - 1)->to, "Too many jumps linked in slow case codegen."); @@ -467,12 +433,12 @@ void JIT::privateCompileSlowCases() RELEASE_ASSERT(m_getByIdIndex == m_getByIds.size()); RELEASE_ASSERT(m_putByIdIndex == m_putByIds.size()); - RELEASE_ASSERT(m_callLinkInfoIndex == m_callCompilationInfo.size()); + RELEASE_ASSERT(m_callLinkInfoIndex == m_callStructureStubCompilationInfo.size()); RELEASE_ASSERT(numberOfValueProfiles == m_codeBlock->numberOfValueProfiles()); #ifndef NDEBUG // Reset this, in order to guard its use with ASSERTs. - m_bytecodeOffset = std::numeric_limits<unsigned>::max(); + m_bytecodeOffset = (unsigned)-1; #endif } @@ -485,6 +451,11 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort) m_canBeOptimizedOrInlined = false; m_shouldEmitProfiling = false; break; + case DFG::CanInline: + m_canBeOptimized = false; + m_canBeOptimizedOrInlined = true; + m_shouldEmitProfiling = true; + break; case DFG::CanCompile: case DFG::CanCompileAndInline: m_canBeOptimized = true; @@ -498,7 +469,6 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort) switch (m_codeBlock->codeType()) { case GlobalCode: - case ModuleCode: case EvalCode: m_codeBlock->m_shouldAlwaysBeInlined = false; break; @@ -508,15 +478,9 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort) m_codeBlock->m_shouldAlwaysBeInlined &= canInline(level) && DFG::mightInlineFunction(m_codeBlock); break; } - - m_codeBlock->setCalleeSaveRegisters(RegisterSet::llintBaselineCalleeSaveRegisters()); // Might be able to remove as this is probably already set to this value. - - // This ensures that we have the most up to date type information when performing typecheck optimizations for op_profile_type. - if (m_vm->typeProfiler()) - m_vm->typeProfilerLog()->processLogEntries(ASCIILiteral("Preparing for JIT compilation.")); - if (Options::dumpDisassembly() || m_vm->m_perBytecodeProfiler) - m_disassembler = std::make_unique<JITDisassembler>(m_codeBlock); + if (Options::showDisassembly() || m_vm->m_perBytecodeProfiler) + m_disassembler = adoptPtr(new JITDisassembler(m_codeBlock)); if (m_vm->m_perBytecodeProfiler) { m_compilation = adoptRef( new Profiler::Compilation( @@ -525,8 +489,6 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort) m_compilation->addProfiledBytecodes(*m_vm->m_perBytecodeProfiler, m_codeBlock); } - m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(0, nullptr)); - if (m_disassembler) m_disassembler->setStartOfCode(label()); @@ -534,8 +496,9 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort) if (m_randomGenerator.getUint32() & 1) nop(); - emitFunctionPrologue(); - emitPutToCallFrameHeader(m_codeBlock, JSStack::CodeBlock); + preserveReturnAddressAfterCall(regT2); + emitPutReturnPCToCallFrameHeader(regT2); + emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock); Label beginLabel(this); @@ -544,8 +507,9 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort) sampleInstruction(m_codeBlock->instructions().begin()); #endif + Jump stackCheck; if (m_codeBlock->codeType() == FunctionCode) { - ASSERT(m_bytecodeOffset == std::numeric_limits<unsigned>::max()); + ASSERT(m_bytecodeOffset == (unsigned)-1); if (shouldEmitProfiling()) { for (int argument = 0; argument < m_codeBlock->numParameters(); ++argument) { // If this is a constructor, then we want to put in a dummy profiling site (to @@ -562,54 +526,49 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort) emitValueProfilingSite(m_codeBlock->valueProfileForArgument(argument)); } } - } - - addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, regT1); - Jump stackOverflow = branchPtr(Above, AbsoluteAddress(m_vm->addressOfStackLimit()), regT1); - move(regT1, stackPointerRegister); - checkStackPointerAlignment(); - - emitSaveCalleeSaves(); - emitMaterializeTagCheckRegisters(); + addPtr(TrustedImm32(virtualRegisterForLocal(frameRegisterCountFor(m_codeBlock)).offset() * sizeof(Register)), callFrameRegister, regT1); + stackCheck = branchPtr(Above, AbsoluteAddress(m_vm->addressOfJSStackLimit()), regT1); + } + Label functionBody = label(); + privateCompileMainPass(); privateCompileLinkPass(); privateCompileSlowCases(); if (m_disassembler) m_disassembler->setEndOfSlowPath(label()); - m_pcToCodeOriginMapBuilder.appendItem(label(), PCToCodeOriginMapBuilder::defaultCodeOrigin()); - - stackOverflow.link(this); - m_bytecodeOffset = 0; - if (maxFrameExtentForSlowPathCall) - addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister); - callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock); Label arityCheck; if (m_codeBlock->codeType() == FunctionCode) { + stackCheck.link(this); + m_bytecodeOffset = 0; + callOperationWithCallFrameRollbackOnException(operationStackCheck, m_codeBlock); +#ifndef NDEBUG + m_bytecodeOffset = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs. +#endif + jump(functionBody); + arityCheck = label(); store8(TrustedImm32(0), &m_codeBlock->m_shouldAlwaysBeInlined); - emitFunctionPrologue(); - emitPutToCallFrameHeader(m_codeBlock, JSStack::CodeBlock); + preserveReturnAddressAfterCall(regT2); + emitPutReturnPCToCallFrameHeader(regT2); + emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock); load32(payloadFor(JSStack::ArgumentCount), regT1); branch32(AboveOrEqual, regT1, TrustedImm32(m_codeBlock->m_numParameters)).linkTo(beginLabel, this); m_bytecodeOffset = 0; - if (maxFrameExtentForSlowPathCall) - addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister); callOperationWithCallFrameRollbackOnException(m_codeBlock->m_isConstructor ? operationConstructArityCheck : operationCallArityCheck); - if (maxFrameExtentForSlowPathCall) - addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister); - branchTest32(Zero, returnValueGPR).linkTo(beginLabel, this); - move(returnValueGPR, GPRInfo::argumentGPR0); - emitNakedCall(m_vm->getCTIStub(arityFixupGenerator).code()); + if (returnValueGPR != regT0) + move(returnValueGPR, regT0); + branchTest32(Zero, regT0).linkTo(beginLabel, this); + emitNakedCall(m_vm->getCTIStub(arityFixup).code()); #if !ASSERT_DISABLED - m_bytecodeOffset = std::numeric_limits<unsigned>::max(); // Reset this, in order to guard its use with ASSERTs. + m_bytecodeOffset = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs. #endif jump(beginLabel); @@ -621,10 +580,8 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort) if (m_disassembler) m_disassembler->setEndOfCode(label()); - m_pcToCodeOriginMapBuilder.appendItem(label(), PCToCodeOriginMapBuilder::defaultCodeOrigin()); - - LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock, effort); + LinkBuffer patchBuffer(*m_vm, this, m_codeBlock, effort); if (patchBuffer.didFailToAllocate()) return CompilationFailed; @@ -671,69 +628,67 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort) for (unsigned i = m_putByIds.size(); i--;) m_putByIds[i].finalize(patchBuffer); - for (const auto& byValCompilationInfo : m_byValCompilationInfo) { - PatchableJump patchableNotIndexJump = byValCompilationInfo.notIndexJump; - CodeLocationJump notIndexJump = CodeLocationJump(); - if (Jump(patchableNotIndexJump).isSet()) - notIndexJump = CodeLocationJump(patchBuffer.locationOf(patchableNotIndexJump)); - CodeLocationJump badTypeJump = CodeLocationJump(patchBuffer.locationOf(byValCompilationInfo.badTypeJump)); - CodeLocationLabel doneTarget = patchBuffer.locationOf(byValCompilationInfo.doneTarget); - CodeLocationLabel nextHotPathTarget = patchBuffer.locationOf(byValCompilationInfo.nextHotPathTarget); - CodeLocationLabel slowPathTarget = patchBuffer.locationOf(byValCompilationInfo.slowPathTarget); - CodeLocationCall returnAddress = patchBuffer.locationOf(byValCompilationInfo.returnAddress); - - *byValCompilationInfo.byValInfo = ByValInfo( - byValCompilationInfo.bytecodeIndex, - notIndexJump, + m_codeBlock->setNumberOfByValInfos(m_byValCompilationInfo.size()); + for (unsigned i = 0; i < m_byValCompilationInfo.size(); ++i) { + CodeLocationJump badTypeJump = CodeLocationJump(patchBuffer.locationOf(m_byValCompilationInfo[i].badTypeJump)); + CodeLocationLabel doneTarget = patchBuffer.locationOf(m_byValCompilationInfo[i].doneTarget); + CodeLocationLabel slowPathTarget = patchBuffer.locationOf(m_byValCompilationInfo[i].slowPathTarget); + CodeLocationCall returnAddress = patchBuffer.locationOf(m_byValCompilationInfo[i].returnAddress); + + m_codeBlock->byValInfo(i) = ByValInfo( + m_byValCompilationInfo[i].bytecodeIndex, badTypeJump, - byValCompilationInfo.arrayMode, - byValCompilationInfo.arrayProfile, + m_byValCompilationInfo[i].arrayMode, differenceBetweenCodePtr(badTypeJump, doneTarget), - differenceBetweenCodePtr(badTypeJump, nextHotPathTarget), differenceBetweenCodePtr(returnAddress, slowPathTarget)); } - for (unsigned i = 0; i < m_callCompilationInfo.size(); ++i) { - CallCompilationInfo& compilationInfo = m_callCompilationInfo[i]; - CallLinkInfo& info = *compilationInfo.callLinkInfo; - info.setCallLocations(patchBuffer.locationOfNearCall(compilationInfo.callReturnLocation), - patchBuffer.locationOf(compilationInfo.hotPathBegin), - patchBuffer.locationOfNearCall(compilationInfo.hotPathOther)); + m_codeBlock->setNumberOfCallLinkInfos(m_callStructureStubCompilationInfo.size()); + for (unsigned i = 0; i < m_codeBlock->numberOfCallLinkInfos(); ++i) { + CallLinkInfo& info = m_codeBlock->callLinkInfo(i); + info.callType = m_callStructureStubCompilationInfo[i].callType; + info.codeOrigin = CodeOrigin(m_callStructureStubCompilationInfo[i].bytecodeIndex); + info.callReturnLocation = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].callReturnLocation); + info.hotPathBegin = patchBuffer.locationOf(m_callStructureStubCompilationInfo[i].hotPathBegin); + info.hotPathOther = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].hotPathOther); + info.calleeGPR = regT0; } - CompactJITCodeMap::Encoder jitCodeMapEncoder; - for (unsigned bytecodeOffset = 0; bytecodeOffset < m_labels.size(); ++bytecodeOffset) { - if (m_labels[bytecodeOffset].isSet()) - jitCodeMapEncoder.append(bytecodeOffset, patchBuffer.offsetOf(m_labels[bytecodeOffset])); +#if ENABLE(DFG_JIT) || ENABLE(LLINT) + if (canBeOptimizedOrInlined() +#if ENABLE(LLINT) + || true +#endif + ) { + CompactJITCodeMap::Encoder jitCodeMapEncoder; + for (unsigned bytecodeOffset = 0; bytecodeOffset < m_labels.size(); ++bytecodeOffset) { + if (m_labels[bytecodeOffset].isSet()) + jitCodeMapEncoder.append(bytecodeOffset, patchBuffer.offsetOf(m_labels[bytecodeOffset])); + } + m_codeBlock->setJITCodeMap(jitCodeMapEncoder.finish()); } - m_codeBlock->setJITCodeMap(jitCodeMapEncoder.finish()); +#endif MacroAssemblerCodePtr withArityCheck; if (m_codeBlock->codeType() == FunctionCode) withArityCheck = patchBuffer.locationOf(arityCheck); - if (Options::dumpDisassembly()) { + if (Options::showDisassembly()) m_disassembler->dump(patchBuffer); - patchBuffer.didAlreadyDisassemble(); - } if (m_compilation) { m_disassembler->reportToProfiler(m_compilation.get(), patchBuffer); m_vm->m_perBytecodeProfiler->addCompilation(m_compilation); } - - if (m_pcToCodeOriginMapBuilder.didBuildMapping()) - m_codeBlock->setPCToCodeOriginMap(std::make_unique<PCToCodeOriginMap>(WTFMove(m_pcToCodeOriginMapBuilder), patchBuffer)); - CodeRef result = FINALIZE_CODE( - patchBuffer, - ("Baseline JIT code for %s", toCString(CodeBlockWithJITType(m_codeBlock, JITCode::BaselineJIT)).data())); + CodeRef result = patchBuffer.finalizeCodeWithoutDisassembly(); m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT.add( static_cast<double>(result.size()) / static_cast<double>(m_codeBlock->instructions().size())); - + m_codeBlock->shrinkToFit(CodeBlock::LateShrink); m_codeBlock->setJITCode( - adoptRef(new DirectJITCode(result, withArityCheck, JITCode::BaselineJIT))); + adoptRef(new DirectJITCode(result, JITCode::BaselineJIT)), + withArityCheck); #if ENABLE(JIT_VERBOSE) dataLogF("JIT generated code for %p at [%p, %p).\n", m_codeBlock, result.executableMemory()->start(), result.executableMemory()->end()); @@ -742,57 +697,72 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort) return CompilationSuccessful; } -void JIT::privateCompileExceptionHandlers() +void JIT::linkFor(ExecState* exec, JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, JIT::CodePtr code, CallLinkInfo* callLinkInfo, VM* vm, CodeSpecializationKind kind) { - if (!m_exceptionChecksWithCallFrameRollback.empty()) { - m_exceptionChecksWithCallFrameRollback.link(this); + RepatchBuffer repatchBuffer(callerCodeBlock); + + ASSERT(!callLinkInfo->isLinked()); + callLinkInfo->callee.set(*vm, callLinkInfo->hotPathBegin, callerCodeBlock->ownerExecutable(), callee); + callLinkInfo->lastSeenCallee.set(*vm, callerCodeBlock->ownerExecutable(), callee); + repatchBuffer.relink(callLinkInfo->hotPathOther, code); + + if (calleeCodeBlock) + calleeCodeBlock->linkIncomingCall(exec, callLinkInfo); + + // Patch the slow patch so we do not continue to try to link. + if (kind == CodeForCall) { + ASSERT(callLinkInfo->callType == CallLinkInfo::Call + || callLinkInfo->callType == CallLinkInfo::CallVarargs); + if (callLinkInfo->callType == CallLinkInfo::Call) { + repatchBuffer.relink(callLinkInfo->callReturnLocation, vm->getCTIStub(linkClosureCallThunkGenerator).code()); + return; + } - copyCalleeSavesToVMCalleeSavesBuffer(); + repatchBuffer.relink(callLinkInfo->callReturnLocation, vm->getCTIStub(virtualCallThunkGenerator).code()); + return; + } - // lookupExceptionHandlerFromCallerFrame is passed two arguments, the VM and the exec (the CallFrame*). + ASSERT(kind == CodeForConstruct); + repatchBuffer.relink(callLinkInfo->callReturnLocation, vm->getCTIStub(virtualConstructThunkGenerator).code()); +} - move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0); - move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1); +void JIT::linkSlowCall(CodeBlock* callerCodeBlock, CallLinkInfo* callLinkInfo) +{ + RepatchBuffer repatchBuffer(callerCodeBlock); -#if CPU(X86) - // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer! - poke(GPRInfo::argumentGPR0); - poke(GPRInfo::argumentGPR1, 1); -#endif - m_calls.append(CallRecord(call(), std::numeric_limits<unsigned>::max(), FunctionPtr(lookupExceptionHandlerFromCallerFrame).value())); - jumpToExceptionHandler(); + repatchBuffer.relink(callLinkInfo->callReturnLocation, callerCodeBlock->vm()->getCTIStub(virtualCallThunkGenerator).code()); +} + +void JIT::privateCompileExceptionHandlers() +{ + if (m_exceptionChecks.empty() && m_exceptionChecksWithCallFrameRollback.empty()) + return; + + Jump doLookup; + + if (!m_exceptionChecksWithCallFrameRollback.empty()) { + m_exceptionChecksWithCallFrameRollback.link(this); + emitGetCallerFrameFromCallFrameHeaderPtr(GPRInfo::argumentGPR0); + doLookup = jump(); } - if (!m_exceptionChecks.empty()) { + if (!m_exceptionChecks.empty()) m_exceptionChecks.link(this); + + // lookupExceptionHandler is passed one argument, the exec (the CallFrame*). + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); - copyCalleeSavesToVMCalleeSavesBuffer(); - - // lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*). - move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0); - move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1); + if (doLookup.isSet()) + doLookup.link(this); #if CPU(X86) - // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer! - poke(GPRInfo::argumentGPR0); - poke(GPRInfo::argumentGPR1, 1); + // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer! + poke(GPRInfo::argumentGPR0); #endif - m_calls.append(CallRecord(call(), std::numeric_limits<unsigned>::max(), FunctionPtr(lookupExceptionHandler).value())); - jumpToExceptionHandler(); - } -} - -unsigned JIT::frameRegisterCountFor(CodeBlock* codeBlock) -{ - ASSERT(static_cast<unsigned>(codeBlock->m_numCalleeLocals) == WTF::roundUpToMultipleOf(stackAlignmentRegisters(), static_cast<unsigned>(codeBlock->m_numCalleeLocals))); - - return roundLocalRegisterCountForFramePointerOffset(codeBlock->m_numCalleeLocals + maxFrameExtentForSlowPathCallInRegisters); + m_calls.append(CallRecord(call(), (unsigned)-1, FunctionPtr(lookupExceptionHandler).value())); + jumpToExceptionHandler(); } -int JIT::stackPointerOffsetFor(CodeBlock* codeBlock) -{ - return virtualRegisterForLocal(frameRegisterCountFor(codeBlock) - 1).offset(); -} } // namespace JSC |