summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/bytecode/CodeBlock.h
diff options
context:
space:
mode:
authorLorry Tar Creator <lorry-tar-importer@lorry>2015-05-20 09:56:07 +0000
committerLorry Tar Creator <lorry-tar-importer@lorry>2015-05-20 09:56:07 +0000
commit41386e9cb918eed93b3f13648cbef387e371e451 (patch)
treea97f9d7bd1d9d091833286085f72da9d83fd0606 /Source/JavaScriptCore/bytecode/CodeBlock.h
parente15dd966d523731101f70ccf768bba12435a0208 (diff)
downloadWebKitGtk-tarball-41386e9cb918eed93b3f13648cbef387e371e451.tar.gz
webkitgtk-2.4.9webkitgtk-2.4.9
Diffstat (limited to 'Source/JavaScriptCore/bytecode/CodeBlock.h')
-rw-r--r--Source/JavaScriptCore/bytecode/CodeBlock.h505
1 files changed, 273 insertions, 232 deletions
diff --git a/Source/JavaScriptCore/bytecode/CodeBlock.h b/Source/JavaScriptCore/bytecode/CodeBlock.h
index 9c78eed13..0d9868079 100644
--- a/Source/JavaScriptCore/bytecode/CodeBlock.h
+++ b/Source/JavaScriptCore/bytecode/CodeBlock.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2009, 2010, 2011, 2012, 2013, 2014 Apple Inc. All rights reserved.
* Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
*
* Redistribution and use in source and binary forms, with or without
@@ -11,7 +11,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -45,6 +45,10 @@
#include "DFGCommon.h"
#include "DFGCommonData.h"
#include "DFGExitProfile.h"
+#include "DFGMinifiedGraph.h"
+#include "DFGOSREntry.h"
+#include "DFGOSRExit.h"
+#include "DFGVariableEventStream.h"
#include "DeferredCompilationCallback.h"
#include "EvalCodeCache.h"
#include "ExecutionCounter.h"
@@ -52,6 +56,7 @@
#include "HandlerInfo.h"
#include "ObjectAllocationProfile.h"
#include "Options.h"
+#include "Operations.h"
#include "PutPropertySlot.h"
#include "Instruction.h"
#include "JITCode.h"
@@ -61,7 +66,6 @@
#include "LLIntCallLinkInfo.h"
#include "LazyOperandValueProfile.h"
#include "ProfilerCompilation.h"
-#include "ProfilerJettisonReason.h"
#include "RegExpObject.h"
#include "StructureStubInfo.h"
#include "UnconditionalFinalizer.h"
@@ -70,6 +74,7 @@
#include "Watchpoint.h"
#include <wtf/Bag.h>
#include <wtf/FastMalloc.h>
+#include <wtf/PassOwnPtr.h>
#include <wtf/RefCountedArray.h>
#include <wtf/RefPtr.h>
#include <wtf/SegmentedVector.h>
@@ -81,7 +86,10 @@ namespace JSC {
class ExecState;
class LLIntOffsetsExtractor;
class RepatchBuffer;
-class TypeLocation;
+
+inline VirtualRegister unmodifiedArgumentsRegister(VirtualRegister argumentsRegister) { return VirtualRegister(argumentsRegister.offset() + 1); }
+
+static ALWAYS_INLINE int missingThisObjectMarker() { return std::numeric_limits<int>::max(); }
enum ReoptimizationMode { DontCountReoptimization, CountReoptimization };
@@ -109,7 +117,6 @@ public:
CodeBlockHash hash() const;
bool hasHash() const;
bool isSafeToComputeHash() const;
- CString hashAsStringIfPossible() const;
CString sourceCodeForTools() const; // Not quite the actual source we parsed; this will do things like prefix the source for a function with a reified signature.
CString sourceCodeOnOneLine() const; // As sourceCodeForTools(), but replaces all whitespace runs with a single space.
void dumpAssumingJITType(PrintStream&, JITCode::JITType) const;
@@ -124,23 +131,6 @@ public:
CodeBlock* alternative() { return m_alternative.get(); }
PassRefPtr<CodeBlock> releaseAlternative() { return m_alternative.release(); }
void setAlternative(PassRefPtr<CodeBlock> alternative) { m_alternative = alternative; }
-
- template <typename Functor> void forEachRelatedCodeBlock(Functor&& functor)
- {
- Functor f(std::forward<Functor>(functor));
- Vector<CodeBlock*, 4> codeBlocks;
- codeBlocks.append(this);
-
- while (!codeBlocks.isEmpty()) {
- CodeBlock* currentCodeBlock = codeBlocks.takeLast();
- f(currentCodeBlock);
-
- if (CodeBlock* alternative = currentCodeBlock->alternative())
- codeBlocks.append(alternative);
- if (CodeBlock* osrEntryBlock = currentCodeBlock->specialOSREntryBlockOrNull())
- codeBlocks.append(osrEntryBlock);
- }
- }
CodeSpecializationKind specializationKind() const
{
@@ -155,14 +145,8 @@ public:
void visitAggregate(SlotVisitor&);
- void dumpSource();
- void dumpSource(PrintStream&);
-
- void dumpBytecode();
- void dumpBytecode(PrintStream&);
- void dumpBytecode(
- PrintStream&, unsigned bytecodeOffset,
- const StubInfoMap& = StubInfoMap(), const CallLinkInfoMap& = CallLinkInfoMap());
+ void dumpBytecode(PrintStream& = WTF::dataFile());
+ void dumpBytecode(PrintStream&, unsigned bytecodeOffset);
void printStructures(PrintStream&, const Instruction*);
void printStructure(PrintStream&, const char* name, const Instruction*, int operand);
@@ -185,46 +169,36 @@ public:
return index >= m_numVars;
}
- enum class RequiredHandler {
- CatchHandler,
- AnyHandler
- };
- HandlerInfo* handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler = RequiredHandler::AnyHandler);
+ HandlerInfo* handlerForBytecodeOffset(unsigned bytecodeOffset);
unsigned lineNumberForBytecodeOffset(unsigned bytecodeOffset);
unsigned columnNumberForBytecodeOffset(unsigned bytecodeOffset);
void expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot,
int& startOffset, int& endOffset, unsigned& line, unsigned& column);
- void getStubInfoMap(const ConcurrentJITLocker&, StubInfoMap& result);
- void getStubInfoMap(StubInfoMap& result);
-
- void getCallLinkInfoMap(const ConcurrentJITLocker&, CallLinkInfoMap& result);
- void getCallLinkInfoMap(CallLinkInfoMap& result);
-
- void getByValInfoMap(const ConcurrentJITLocker&, ByValInfoMap& result);
- void getByValInfoMap(ByValInfoMap& result);
-
#if ENABLE(JIT)
StructureStubInfo* addStubInfo();
- Bag<StructureStubInfo>::iterator stubInfoBegin() { return m_stubInfos.begin(); }
- Bag<StructureStubInfo>::iterator stubInfoEnd() { return m_stubInfos.end(); }
-
- // O(n) operation. Use getStubInfoMap() unless you really only intend to get one
- // stub info.
- StructureStubInfo* findStubInfo(CodeOrigin);
+ Bag<StructureStubInfo>::iterator begin() { return m_stubInfos.begin(); }
+ Bag<StructureStubInfo>::iterator end() { return m_stubInfos.end(); }
void resetStub(StructureStubInfo&);
+
+ void getStubInfoMap(const ConcurrentJITLocker&, StubInfoMap& result);
- ByValInfo* addByValInfo();
+ ByValInfo& getByValInfo(unsigned bytecodeIndex)
+ {
+ return *(binarySearch<ByValInfo, unsigned>(m_byValInfos, m_byValInfos.size(), bytecodeIndex, getByValInfoBytecodeIndex));
+ }
- CallLinkInfo* addCallLinkInfo();
- Bag<CallLinkInfo>::iterator callLinkInfosBegin() { return m_callLinkInfos.begin(); }
- Bag<CallLinkInfo>::iterator callLinkInfosEnd() { return m_callLinkInfos.end(); }
+ CallLinkInfo& getCallLinkInfo(ReturnAddressPtr returnAddress)
+ {
+ return *(binarySearch<CallLinkInfo, void*>(m_callLinkInfos, m_callLinkInfos.size(), returnAddress.value(), getCallLinkInfoReturnLocation));
+ }
- // This is a slow function call used primarily for compiling OSR exits in the case
- // that there had been inlining. Chances are if you want to use this, you're really
- // looking for a CallLinkInfoMap to amortize the cost of calling this.
- CallLinkInfo* getCallLinkInfoForBytecodeIndex(unsigned bytecodeIndex);
+ CallLinkInfo& getCallLinkInfo(unsigned bytecodeIndex)
+ {
+ ASSERT(!JITCode::isOptimizingJIT(jitType()));
+ return *(binarySearch<CallLinkInfo, unsigned>(m_callLinkInfos, m_callLinkInfos.size(), bytecodeIndex, getCallLinkInfoBytecodeIndex));
+ }
#endif // ENABLE(JIT)
void unlinkIncomingCalls();
@@ -233,14 +207,20 @@ public:
void unlinkCalls();
void linkIncomingCall(ExecState* callerFrame, CallLinkInfo*);
- void linkIncomingPolymorphicCall(ExecState* callerFrame, PolymorphicCallNode*);
+
+ bool isIncomingCallAlreadyLinked(CallLinkInfo* incoming)
+ {
+ return m_incomingCalls.isOnList(incoming);
+ }
#endif // ENABLE(JIT)
+#if ENABLE(LLINT)
void linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo*);
+#endif // ENABLE(LLINT)
- void setJITCodeMap(std::unique_ptr<CompactJITCodeMap> jitCodeMap)
+ void setJITCodeMap(PassOwnPtr<CompactJITCodeMap> jitCodeMap)
{
- m_jitCodeMap = WTF::move(jitCodeMap);
+ m_jitCodeMap = jitCodeMap;
}
CompactJITCodeMap* jitCodeMap()
{
@@ -253,6 +233,8 @@ public:
return static_cast<Instruction*>(returnAddress) - instructions().begin();
}
+ bool isNumericCompareFunction() { return m_unlinkedCode->isNumericCompareFunction(); }
+
unsigned numberOfInstructions() const { return m_instructions.size(); }
RefCountedArray<Instruction>& instructions() { return m_instructions; }
const RefCountedArray<Instruction>& instructions() const { return m_instructions; }
@@ -263,21 +245,28 @@ public:
unsigned instructionCount() const { return m_instructions.size(); }
+ int argumentIndexAfterCapture(size_t argument);
+
+ bool hasSlowArguments();
+ const SlowArgument* machineSlowArguments();
+
// Exactly equivalent to codeBlock->ownerExecutable()->installCode(codeBlock);
void install();
// Exactly equivalent to codeBlock->ownerExecutable()->newReplacementCodeBlockFor(codeBlock->specializationKind())
PassRefPtr<CodeBlock> newReplacement();
- void setJITCode(PassRefPtr<JITCode> code)
+ void setJITCode(PassRefPtr<JITCode> code, MacroAssemblerCodePtr codeWithArityCheck)
{
ASSERT(m_heap->isDeferred());
- m_heap->reportExtraMemoryAllocated(code->size());
+ m_heap->reportExtraMemoryCost(code->size());
ConcurrentJITLocker locker(m_lock);
WTF::storeStoreFence(); // This is probably not needed because the lock will also do something similar, but it's good to be paranoid.
m_jitCode = code;
+ m_jitCodeWithArityCheck = codeWithArityCheck;
}
PassRefPtr<JITCode> jitCode() { return m_jitCode; }
+ MacroAssemblerCodePtr jitCodeWithArityCheck() { return m_jitCodeWithArityCheck; }
JITCode::JITType jitType() const
{
JITCode* jitCode = m_jitCode.get();
@@ -296,14 +285,19 @@ public:
virtual CodeBlock* replacement() = 0;
virtual DFG::CapabilityLevel capabilityLevelInternal() = 0;
- DFG::CapabilityLevel capabilityLevel();
+ DFG::CapabilityLevel capabilityLevel()
+ {
+ DFG::CapabilityLevel result = capabilityLevelInternal();
+ m_capabilityLevelState = result;
+ return result;
+ }
DFG::CapabilityLevel capabilityLevelState() { return m_capabilityLevelState; }
bool hasOptimizedReplacement(JITCode::JITType typeToReplace);
bool hasOptimizedReplacement(); // the typeToReplace is my JITType
#endif
- void jettison(Profiler::JettisonReason, ReoptimizationMode = DontCountReoptimization, const FireDetail* = nullptr);
+ void jettison(ReoptimizationMode = DontCountReoptimization);
ScriptExecutable* ownerExecutable() const { return m_ownerExecutable.get(); }
@@ -313,41 +307,77 @@ public:
void setThisRegister(VirtualRegister thisRegister) { m_thisRegister = thisRegister; }
VirtualRegister thisRegister() const { return m_thisRegister; }
+ bool needsFullScopeChain() const { return m_unlinkedCode->needsFullScopeChain(); }
bool usesEval() const { return m_unlinkedCode->usesEval(); }
- void setScopeRegister(VirtualRegister scopeRegister)
+ void setArgumentsRegister(VirtualRegister argumentsRegister)
{
- ASSERT(scopeRegister.isLocal() || !scopeRegister.isValid());
- m_scopeRegister = scopeRegister;
+ ASSERT(argumentsRegister.isValid());
+ m_argumentsRegister = argumentsRegister;
+ ASSERT(usesArguments());
}
-
- VirtualRegister scopeRegister() const
+ VirtualRegister argumentsRegister() const
{
- return m_scopeRegister;
+ ASSERT(usesArguments());
+ return m_argumentsRegister;
+ }
+ VirtualRegister uncheckedArgumentsRegister()
+ {
+ if (!usesArguments())
+ return VirtualRegister();
+ return argumentsRegister();
}
-
void setActivationRegister(VirtualRegister activationRegister)
{
- m_lexicalEnvironmentRegister = activationRegister;
+ m_activationRegister = activationRegister;
}
VirtualRegister activationRegister() const
{
- ASSERT(m_lexicalEnvironmentRegister.isValid());
- return m_lexicalEnvironmentRegister;
+ ASSERT(needsFullScopeChain());
+ return m_activationRegister;
}
VirtualRegister uncheckedActivationRegister()
{
- return m_lexicalEnvironmentRegister;
+ if (!needsFullScopeChain())
+ return VirtualRegister();
+ return activationRegister();
}
+ bool usesArguments() const { return m_argumentsRegister.isValid(); }
+
bool needsActivation() const
{
- ASSERT(m_lexicalEnvironmentRegister.isValid() == m_needsActivation);
return m_needsActivation;
}
+ unsigned captureCount() const
+ {
+ if (!symbolTable())
+ return 0;
+ return symbolTable()->captureCount();
+ }
+
+ int captureStart() const
+ {
+ if (!symbolTable())
+ return 0;
+ return symbolTable()->captureStart();
+ }
+
+ int captureEnd() const
+ {
+ if (!symbolTable())
+ return 0;
+ return symbolTable()->captureEnd();
+ }
+
+ bool isCaptured(VirtualRegister operand, InlineCallFrame* = 0) const;
+
+ int framePointerOffsetToGetActivationRegisters(int machineCaptureStart);
+ int framePointerOffsetToGetActivationRegisters();
+
CodeType codeType() const { return m_unlinkedCode->codeType(); }
PutPropertySlot::Context putByIdContext() const
{
@@ -363,8 +393,20 @@ public:
size_t numberOfJumpTargets() const { return m_unlinkedCode->numberOfJumpTargets(); }
unsigned jumpTarget(int index) const { return m_unlinkedCode->jumpTarget(index); }
+ void clearEvalCache();
+
String nameForRegister(VirtualRegister);
+#if ENABLE(JIT)
+ void setNumberOfByValInfos(size_t size) { m_byValInfos.resizeToFit(size); }
+ size_t numberOfByValInfos() const { return m_byValInfos.size(); }
+ ByValInfo& byValInfo(size_t index) { return m_byValInfos[index]; }
+
+ void setNumberOfCallLinkInfos(size_t size) { m_callLinkInfos.resizeToFit(size); }
+ size_t numberOfCallLinkInfos() const { return m_callLinkInfos.size(); }
+ CallLinkInfo& callLinkInfo(int index) { return m_callLinkInfos[index]; }
+#endif
+
unsigned numberOfArgumentValueProfiles()
{
ASSERT(m_numParameters >= 0);
@@ -380,7 +422,17 @@ public:
unsigned numberOfValueProfiles() { return m_valueProfiles.size(); }
ValueProfile* valueProfile(int index) { return &m_valueProfiles[index]; }
- ValueProfile* valueProfileForBytecodeOffset(int bytecodeOffset);
+ ValueProfile* valueProfileForBytecodeOffset(int bytecodeOffset)
+ {
+ ValueProfile* result = binarySearch<ValueProfile, int>(
+ m_valueProfiles, m_valueProfiles.size(), bytecodeOffset,
+ getValueProfileBytecodeOffset<ValueProfile>);
+ ASSERT(result->m_bytecodeOffset != -1);
+ ASSERT(instructions()[bytecodeOffset + opcodeLength(
+ m_vm->interpreter->getOpcodeID(
+ instructions()[bytecodeOffset].u.opcode)) - 1].u.profile == result);
+ return result;
+ }
SpeculatedType valueProfilePredictionForBytecodeOffset(const ConcurrentJITLocker& locker, int bytecodeOffset)
{
return valueProfileForBytecodeOffset(bytecodeOffset)->computeUpdatedPrediction(locker);
@@ -404,7 +456,12 @@ public:
}
unsigned numberOfRareCaseProfiles() { return m_rareCaseProfiles.size(); }
RareCaseProfile* rareCaseProfile(int index) { return &m_rareCaseProfiles[index]; }
- RareCaseProfile* rareCaseProfileForBytecodeOffset(int bytecodeOffset);
+ RareCaseProfile* rareCaseProfileForBytecodeOffset(int bytecodeOffset)
+ {
+ return tryBinarySearch<RareCaseProfile, int>(
+ m_rareCaseProfiles, m_rareCaseProfiles.size(), bytecodeOffset,
+ getRareCaseProfileBytecodeOffset);
+ }
bool likelyToTakeSlowCase(int bytecodeOffset)
{
@@ -432,8 +489,8 @@ public:
RareCaseProfile* specialFastCaseProfileForBytecodeOffset(int bytecodeOffset)
{
return tryBinarySearch<RareCaseProfile, int>(
- m_specialFastCaseProfiles, m_specialFastCaseProfiles.size(), bytecodeOffset,
- getRareCaseProfileBytecodeOffset);
+ m_specialFastCaseProfiles, m_specialFastCaseProfiles.size(), bytecodeOffset,
+ getRareCaseProfileBytecodeOffset);
}
bool likelyToTakeSpecialFastCase(int bytecodeOffset)
@@ -519,15 +576,11 @@ public:
ConcurrentJITLocker locker(m_lock);
return m_exitProfile.add(locker, site);
}
-
- bool hasExitSite(const ConcurrentJITLocker& locker, const DFG::FrequentExitSite& site) const
- {
- return m_exitProfile.hasExitSite(locker, site);
- }
+
bool hasExitSite(const DFG::FrequentExitSite& site) const
{
ConcurrentJITLocker locker(m_lock);
- return hasExitSite(locker, site);
+ return m_exitProfile.hasExitSite(locker, site);
}
DFG::ExitProfile& exitProfile() { return m_exitProfile; }
@@ -536,6 +589,11 @@ public:
{
return m_lazyOperandValueProfiles;
}
+#else // ENABLE(DFG_JIT)
+ bool addFrequentExitSite(const DFG::FrequentExitSite&)
+ {
+ return false;
+ }
#endif // ENABLE(DFG_JIT)
// Constant Pool
@@ -563,13 +621,12 @@ public:
#endif
Vector<WriteBarrier<Unknown>>& constants() { return m_constantRegisters; }
- Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation() { return m_constantsSourceCodeRepresentation; }
+ size_t numberOfConstantRegisters() const { return m_constantRegisters.size(); }
unsigned addConstant(JSValue v)
{
unsigned result = m_constantRegisters.size();
m_constantRegisters.append(WriteBarrier<Unknown>());
m_constantRegisters.last().set(m_globalObject->vm(), m_ownerExecutable.get(), v);
- m_constantsSourceCodeRepresentation.append(SourceCodeRepresentation::Other);
return result;
}
@@ -577,19 +634,19 @@ public:
{
unsigned result = m_constantRegisters.size();
m_constantRegisters.append(WriteBarrier<Unknown>());
- m_constantsSourceCodeRepresentation.append(SourceCodeRepresentation::Other);
return result;
}
+ bool findConstant(JSValue, unsigned& result);
+ unsigned addOrFindConstant(JSValue);
WriteBarrier<Unknown>& constantRegister(int index) { return m_constantRegisters[index - FirstConstantRegisterIndex]; }
ALWAYS_INLINE bool isConstantRegisterIndex(int index) const { return index >= FirstConstantRegisterIndex; }
ALWAYS_INLINE JSValue getConstant(int index) const { return m_constantRegisters[index - FirstConstantRegisterIndex].get(); }
- ALWAYS_INLINE SourceCodeRepresentation constantSourceCodeRepresentation(int index) const { return m_constantsSourceCodeRepresentation[index - FirstConstantRegisterIndex]; }
FunctionExecutable* functionDecl(int index) { return m_functionDecls[index].get(); }
int numberOfFunctionDecls() { return m_functionDecls.size(); }
FunctionExecutable* functionExpr(int index) { return m_functionExprs[index].get(); }
-
+
RegExp* regexp(int index) const { return m_unlinkedCode->regexp(index); }
unsigned numberOfConstantBuffers() const
@@ -616,26 +673,15 @@ public:
return constantBufferAsVector(index).data();
}
- Heap* heap() const { return m_heap; }
JSGlobalObject* globalObject() { return m_globalObject.get(); }
JSGlobalObject* globalObjectFor(CodeOrigin);
BytecodeLivenessAnalysis& livenessAnalysis()
{
- {
- ConcurrentJITLocker locker(m_lock);
- if (!!m_livenessAnalysis)
- return *m_livenessAnalysis;
- }
- std::unique_ptr<BytecodeLivenessAnalysis> analysis =
- std::make_unique<BytecodeLivenessAnalysis>(this);
- {
- ConcurrentJITLocker locker(m_lock);
- if (!m_livenessAnalysis)
- m_livenessAnalysis = WTF::move(analysis);
- return *m_livenessAnalysis;
- }
+ if (!m_livenessAnalysis)
+ m_livenessAnalysis = std::make_unique<BytecodeLivenessAnalysis>(this);
+ return *m_livenessAnalysis;
}
void validate();
@@ -656,6 +702,9 @@ public:
StringJumpTable& addStringSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_stringSwitchJumpTables.append(StringJumpTable()); return m_rareData->m_stringSwitchJumpTables.last(); }
StringJumpTable& stringSwitchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_stringSwitchJumpTables[tableIndex]; }
+
+ SymbolTable* symbolTable() const { return m_symbolTable.get(); }
+
EvalCodeCache& evalCodeCache() { createRareDataIfNecessary(); return m_rareData->m_evalCodeCache; }
enum ShrinkMode {
@@ -692,7 +741,7 @@ public:
m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITSoon(), this);
}
- const BaselineExecutionCounter& llintExecuteCounter() const
+ const ExecutionCounter& llintExecuteCounter() const
{
return m_llintExecuteCounter;
}
@@ -718,7 +767,7 @@ public:
// When we observe a lot of speculation failures, we trigger a
// reoptimization. But each time, we increase the optimization trigger
// to avoid thrashing.
- JS_EXPORT_PRIVATE unsigned reoptimizationRetryCounter() const;
+ unsigned reoptimizationRetryCounter() const;
void countReoptimization();
#if ENABLE(JIT)
unsigned numberOfDFGCompiles();
@@ -732,11 +781,11 @@ public:
return &m_jitExecuteCounter.m_counter;
}
- static ptrdiff_t offsetOfJITExecuteCounter() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_counter); }
- static ptrdiff_t offsetOfJITExecutionActiveThreshold() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_activeThreshold); }
- static ptrdiff_t offsetOfJITExecutionTotalCount() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_totalCount); }
+ static ptrdiff_t offsetOfJITExecuteCounter() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_counter); }
+ static ptrdiff_t offsetOfJITExecutionActiveThreshold() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_activeThreshold); }
+ static ptrdiff_t offsetOfJITExecutionTotalCount() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_totalCount); }
- const BaselineExecutionCounter& jitExecuteCounter() const { return m_jitExecuteCounter; }
+ const ExecutionCounter& jitExecuteCounter() const { return m_jitExecuteCounter; }
unsigned optimizationDelayCounter() const { return m_optimizationDelayCounter; }
@@ -817,11 +866,10 @@ public:
void updateAllPredictions();
unsigned frameRegisterCount();
- int stackPointerOffset();
bool hasOpDebugForLineAndColumn(unsigned line, unsigned column);
- bool hasDebuggerRequests() const { return m_debuggerRequests; }
+ int hasDebuggerRequests() const { return !!m_debuggerRequests; }
void* debuggerRequestsAddress() { return &m_debuggerRequests; }
void addBreakpoint(unsigned numBreakpoints);
@@ -837,17 +885,13 @@ public:
};
void setSteppingMode(SteppingMode);
- void clearDebuggerRequests()
- {
- m_steppingMode = SteppingModeDisabled;
- m_numBreakpoints = 0;
- }
-
+ void clearDebuggerRequests() { m_debuggerRequests = 0; }
+
// FIXME: Make these remaining members private.
int m_numCalleeRegisters;
int m_numVars;
- bool m_isConstructor : 1;
+ bool m_isConstructor;
// This is intentionally public; it's the responsibility of anyone doing any
// of the following to hold the lock:
@@ -867,34 +911,16 @@ public:
// concurrent compilation threads finish what they're doing.
mutable ConcurrentJITLock m_lock;
- bool m_shouldAlwaysBeInlined; // Not a bitfield because the JIT wants to store to it.
- bool m_allTransitionsHaveBeenMarked : 1; // Initialized and used on every GC.
+ bool m_shouldAlwaysBeInlined;
+ bool m_allTransitionsHaveBeenMarked; // Initialized and used on every GC.
- bool m_didFailFTLCompilation : 1;
- bool m_hasBeenCompiledWithFTL : 1;
+ bool m_didFailFTLCompilation;
// Internal methods for use by validation code. It would be private if it wasn't
// for the fact that we use it from anonymous namespaces.
void beginValidationDidFail();
NO_RETURN_DUE_TO_CRASH void endValidationDidFail();
- bool isKnownToBeLiveDuringGC(); // Will only return valid results when called during GC. Assumes that you've already established that the owner executable is live.
-
- struct RareData {
- WTF_MAKE_FAST_ALLOCATED;
- public:
- Vector<HandlerInfo> m_exceptionHandlers;
-
- // Buffers used for large array literals
- Vector<Vector<JSValue>> m_constantBuffers;
-
- // Jump Tables
- Vector<SimpleJumpTable> m_switchJumpTables;
- Vector<StringJumpTable> m_stringSwitchJumpTables;
-
- EvalCodeCache m_evalCodeCache;
- };
-
protected:
virtual void visitWeakReferences(SlotVisitor&) override;
virtual void finalizeUnconditionally() override;
@@ -914,48 +940,70 @@ private:
double optimizationThresholdScalingFactor();
+#if ENABLE(JIT)
+ ClosureCallStubRoutine* findClosureCallForReturnPC(ReturnAddressPtr);
+#endif
+
void updateAllPredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles);
- void setConstantRegisters(const Vector<WriteBarrier<Unknown>>& constants, const Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation)
+ void setConstantRegisters(const Vector<WriteBarrier<Unknown>>& constants)
{
- ASSERT(constants.size() == constantsSourceCodeRepresentation.size());
size_t count = constants.size();
- m_constantRegisters.resizeToFit(count);
+ m_constantRegisters.resize(count);
for (size_t i = 0; i < count; i++)
m_constantRegisters[i].set(*m_vm, ownerExecutable(), constants[i].get());
- m_constantsSourceCodeRepresentation = constantsSourceCodeRepresentation;
- }
-
- void replaceConstant(int index, JSValue value)
- {
- ASSERT(isConstantRegisterIndex(index) && static_cast<size_t>(index - FirstConstantRegisterIndex) < m_constantRegisters.size());
- m_constantRegisters[index - FirstConstantRegisterIndex].set(m_globalObject->vm(), m_ownerExecutable.get(), value);
}
- void dumpBytecode(
- PrintStream&, ExecState*, const Instruction* begin, const Instruction*&,
- const StubInfoMap& = StubInfoMap(), const CallLinkInfoMap& = CallLinkInfoMap());
+ void dumpBytecode(PrintStream&, ExecState*, const Instruction* begin, const Instruction*&, const StubInfoMap& = StubInfoMap());
CString registerName(int r) const;
- CString constantName(int index) const;
void printUnaryOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op);
void printBinaryOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op);
void printConditionalJump(PrintStream&, ExecState*, const Instruction*, const Instruction*&, int location, const char* op);
void printGetByIdOp(PrintStream&, ExecState*, int location, const Instruction*&);
void printGetByIdCacheStatus(PrintStream&, ExecState*, int location, const StubInfoMap&);
enum CacheDumpMode { DumpCaches, DontDumpCaches };
- void printCallOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op, CacheDumpMode, bool& hasPrintedProfiling, const CallLinkInfoMap&);
+ void printCallOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op, CacheDumpMode, bool& hasPrintedProfiling);
void printPutByIdOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op);
- void printPutByIdCacheStatus(PrintStream&, ExecState*, int location, const StubInfoMap&);
- void printLocationAndOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op);
- void printLocationOpAndRegisterOperand(PrintStream&, ExecState*, int location, const Instruction*& it, const char* op, int operand);
+ void printLocationAndOp(PrintStream& out, ExecState*, int location, const Instruction*&, const char* op)
+ {
+ out.printf("[%4d] %-17s ", location, op);
+ }
+
+ void printLocationOpAndRegisterOperand(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op, int operand)
+ {
+ printLocationAndOp(out, exec, location, it, op);
+ out.printf("%s", registerName(operand).data());
+ }
void beginDumpProfiling(PrintStream&, bool& hasPrintedProfiling);
void dumpValueProfiling(PrintStream&, const Instruction*&, bool& hasPrintedProfiling);
void dumpArrayProfiling(PrintStream&, const Instruction*&, bool& hasPrintedProfiling);
void dumpRareCaseProfile(PrintStream&, const char* name, RareCaseProfile*, bool& hasPrintedProfiling);
- bool shouldImmediatelyAssumeLivenessDuringScan();
+#if ENABLE(DFG_JIT)
+ bool shouldImmediatelyAssumeLivenessDuringScan()
+ {
+ // Interpreter and Baseline JIT CodeBlocks don't need to be jettisoned when
+ // their weak references go stale. So if a basline JIT CodeBlock gets
+ // scanned, we can assume that this means that it's live.
+ if (!JITCode::isOptimizingJIT(jitType()))
+ return true;
+
+ // For simplicity, we don't attempt to jettison code blocks during GC if
+ // they are executing. Instead we strongly mark their weak references to
+ // allow them to continue to execute soundly.
+ if (m_mayBeExecuting)
+ return true;
+
+ if (Options::forceDFGCodeBlockLiveness())
+ return true;
+
+ return false;
+ }
+#else
+ bool shouldImmediatelyAssumeLivenessDuringScan() { return true; }
+#endif
void propagateTransitions(SlotVisitor&);
void determineLiveness(SlotVisitor&);
@@ -966,11 +1014,9 @@ private:
void createRareDataIfNecessary()
{
if (!m_rareData)
- m_rareData = std::make_unique<RareData>();
+ m_rareData = adoptPtr(new RareData);
}
-
- void insertBasicBlockBoundariesForControlFlowProfiler(RefCountedArray<Instruction>&);
-
+
#if ENABLE(JIT)
void resetStubInternal(RepatchBuffer&, StructureStubInfo&);
void resetStubDuringGCInternal(RepatchBuffer&, StructureStubInfo&);
@@ -980,40 +1026,42 @@ private:
union {
unsigned m_debuggerRequests;
struct {
- unsigned m_hasDebuggerStatement : 1;
unsigned m_steppingMode : 1;
- unsigned m_numBreakpoints : 30;
+ unsigned m_numBreakpoints : 31;
};
};
WriteBarrier<ScriptExecutable> m_ownerExecutable;
VM* m_vm;
RefCountedArray<Instruction> m_instructions;
+ WriteBarrier<SymbolTable> m_symbolTable;
VirtualRegister m_thisRegister;
- VirtualRegister m_scopeRegister;
- VirtualRegister m_lexicalEnvironmentRegister;
+ VirtualRegister m_argumentsRegister;
+ VirtualRegister m_activationRegister;
bool m_isStrictMode;
bool m_needsActivation;
bool m_mayBeExecuting;
- Atomic<bool> m_visitAggregateHasBeenCalled;
+ uint8_t m_visitAggregateHasBeenCalled;
RefPtr<SourceProvider> m_source;
unsigned m_sourceOffset;
unsigned m_firstLineColumnOffset;
unsigned m_codeType;
+#if ENABLE(LLINT)
Vector<LLIntCallLinkInfo> m_llintCallLinkInfos;
SentinelLinkedList<LLIntCallLinkInfo, BasicRawSentinelNode<LLIntCallLinkInfo>> m_incomingLLIntCalls;
+#endif
RefPtr<JITCode> m_jitCode;
+ MacroAssemblerCodePtr m_jitCodeWithArityCheck;
#if ENABLE(JIT)
Bag<StructureStubInfo> m_stubInfos;
- Bag<ByValInfo> m_byValInfos;
- Bag<CallLinkInfo> m_callLinkInfos;
+ Vector<ByValInfo> m_byValInfos;
+ Vector<CallLinkInfo> m_callLinkInfos;
SentinelLinkedList<CallLinkInfo, BasicRawSentinelNode<CallLinkInfo>> m_incomingCalls;
- SentinelLinkedList<PolymorphicCallNode, BasicRawSentinelNode<PolymorphicCallNode>> m_incomingPolymorphicCalls;
#endif
- std::unique_ptr<CompactJITCodeMap> m_jitCodeMap;
+ OwnPtr<CompactJITCodeMap> m_jitCodeMap;
#if ENABLE(DFG_JIT)
// This is relevant to non-DFG code blocks that serve as the profiled code block
// for DFG code blocks.
@@ -1033,15 +1081,14 @@ private:
// TODO: This could just be a pointer to m_unlinkedCodeBlock's data, but the DFG mutates
// it, so we're stuck with it for now.
Vector<WriteBarrier<Unknown>> m_constantRegisters;
- Vector<SourceCodeRepresentation> m_constantsSourceCodeRepresentation;
Vector<WriteBarrier<FunctionExecutable>> m_functionDecls;
Vector<WriteBarrier<FunctionExecutable>> m_functionExprs;
RefPtr<CodeBlock> m_alternative;
- BaselineExecutionCounter m_llintExecuteCounter;
+ ExecutionCounter m_llintExecuteCounter;
- BaselineExecutionCounter m_jitExecuteCounter;
+ ExecutionCounter m_jitExecuteCounter;
int32_t m_totalJITExecutions;
uint32_t m_osrExitCounter;
uint16_t m_optimizationDelayCounter;
@@ -1051,7 +1098,24 @@ private:
std::unique_ptr<BytecodeLivenessAnalysis> m_livenessAnalysis;
- std::unique_ptr<RareData> m_rareData;
+ struct RareData {
+ WTF_MAKE_FAST_ALLOCATED;
+ public:
+ Vector<HandlerInfo> m_exceptionHandlers;
+
+ // Buffers used for large array literals
+ Vector<Vector<JSValue>> m_constantBuffers;
+
+ // Jump Tables
+ Vector<SimpleJumpTable> m_switchJumpTables;
+ Vector<StringJumpTable> m_stringSwitchJumpTables;
+
+ EvalCodeCache m_evalCodeCache;
+ };
+#if COMPILER(MSVC)
+ friend void WTF::deleteOwnedPtr<RareData>(RareData*);
+#endif
+ OwnPtr<RareData> m_rareData;
#if ENABLE(JIT)
DFG::CapabilityLevel m_capabilityLevelState;
#endif
@@ -1141,7 +1205,7 @@ inline CodeBlock* baselineCodeBlockForInlineCallFrame(InlineCallFrame* inlineCal
RELEASE_ASSERT(inlineCallFrame);
ExecutableBase* executable = inlineCallFrame->executable.get();
RELEASE_ASSERT(executable->structure()->classInfo() == FunctionExecutable::info());
- return static_cast<FunctionExecutable*>(executable)->baselineCodeBlockFor(inlineCallFrame->specializationKind());
+ return static_cast<FunctionExecutable*>(executable)->baselineCodeBlockFor(inlineCallFrame->isCall ? CodeForCall : CodeForConstruct);
}
inline CodeBlock* baselineCodeBlockForOriginAndBaselineCodeBlock(const CodeOrigin& codeOrigin, CodeBlock* baselineCodeBlock)
@@ -1151,6 +1215,24 @@ inline CodeBlock* baselineCodeBlockForOriginAndBaselineCodeBlock(const CodeOrigi
return baselineCodeBlock;
}
+inline int CodeBlock::argumentIndexAfterCapture(size_t argument)
+{
+ if (argument >= static_cast<size_t>(symbolTable()->parameterCount()))
+ return CallFrame::argumentOffset(argument);
+
+ const SlowArgument* slowArguments = symbolTable()->slowArguments();
+ if (!slowArguments || slowArguments[argument].status == SlowArgument::Normal)
+ return CallFrame::argumentOffset(argument);
+
+ ASSERT(slowArguments[argument].status == SlowArgument::Captured);
+ return slowArguments[argument].index;
+}
+
+inline bool CodeBlock::hasSlowArguments()
+{
+ return !!symbolTable()->slowArguments();
+}
+
inline Register& ExecState::r(int index)
{
CodeBlock* codeBlock = this->codeBlock();
@@ -1159,20 +1241,21 @@ inline Register& ExecState::r(int index)
return this[index];
}
-inline Register& ExecState::r(VirtualRegister reg)
-{
- return r(reg.offset());
-}
-
inline Register& ExecState::uncheckedR(int index)
{
RELEASE_ASSERT(index < FirstConstantRegisterIndex);
return this[index];
}
-inline Register& ExecState::uncheckedR(VirtualRegister reg)
+inline JSValue ExecState::argumentAfterCapture(size_t argument)
{
- return uncheckedR(reg.offset());
+ if (argument >= argumentCount())
+ return jsUndefined();
+
+ if (!codeBlock())
+ return this[argumentOffset(argument)].jsValue();
+
+ return this[codeBlock()->argumentIndexAfterCapture(argument)].jsValue();
}
inline void CodeBlockSet::mark(void* candidateCodeBlock)
@@ -1185,59 +1268,17 @@ inline void CodeBlockSet::mark(void* candidateCodeBlock)
// -1 + 1 = 0
if (value + 1 <= 1)
return;
-
- CodeBlock* codeBlock = static_cast<CodeBlock*>(candidateCodeBlock);
- if (!m_oldCodeBlocks.contains(codeBlock) && !m_newCodeBlocks.contains(codeBlock))
- return;
-
- mark(codeBlock);
-}
-
-inline void CodeBlockSet::mark(CodeBlock* codeBlock)
-{
- if (!codeBlock)
- return;
- if (codeBlock->m_mayBeExecuting)
+ HashSet<CodeBlock*>::iterator iter = m_set.find(static_cast<CodeBlock*>(candidateCodeBlock));
+ if (iter == m_set.end())
return;
- codeBlock->m_mayBeExecuting = true;
- // We might not have cleared the marks for this CodeBlock, but we need to visit it.
- codeBlock->m_visitAggregateHasBeenCalled.store(false, std::memory_order_relaxed);
+ (*iter)->m_mayBeExecuting = true;
#if ENABLE(GGC)
- m_currentlyExecuting.append(codeBlock);
+ m_currentlyExecuting.append(static_cast<CodeBlock*>(candidateCodeBlock));
#endif
}
-template <typename Functor> inline void ScriptExecutable::forEachCodeBlock(Functor&& functor)
-{
- switch (type()) {
- case ProgramExecutableType: {
- if (CodeBlock* codeBlock = jsCast<ProgramExecutable*>(this)->m_programCodeBlock.get())
- codeBlock->forEachRelatedCodeBlock(std::forward<Functor>(functor));
- break;
- }
-
- case EvalExecutableType: {
- if (CodeBlock* codeBlock = jsCast<EvalExecutable*>(this)->m_evalCodeBlock.get())
- codeBlock->forEachRelatedCodeBlock(std::forward<Functor>(functor));
- break;
- }
-
- case FunctionExecutableType: {
- Functor f(std::forward<Functor>(functor));
- FunctionExecutable* executable = jsCast<FunctionExecutable*>(this);
- if (CodeBlock* codeBlock = executable->m_codeBlockForCall.get())
- codeBlock->forEachRelatedCodeBlock(f);
- if (CodeBlock* codeBlock = executable->m_codeBlockForConstruct.get())
- codeBlock->forEachRelatedCodeBlock(f);
- break;
- }
- default:
- RELEASE_ASSERT_NOT_REACHED();
- }
-}
-
} // namespace JSC
#endif // CodeBlock_h