diff options
Diffstat (limited to 'Source/JavaScriptCore/bytecode/CodeBlock.h')
-rw-r--r-- | Source/JavaScriptCore/bytecode/CodeBlock.h | 165 |
1 files changed, 108 insertions, 57 deletions
diff --git a/Source/JavaScriptCore/bytecode/CodeBlock.h b/Source/JavaScriptCore/bytecode/CodeBlock.h index 4e4fee2b7..ed072f832 100644 --- a/Source/JavaScriptCore/bytecode/CodeBlock.h +++ b/Source/JavaScriptCore/bytecode/CodeBlock.h @@ -35,12 +35,15 @@ #include "CallReturnOffsetToBytecodeOffset.h" #include "CodeOrigin.h" #include "CodeType.h" +#include "Comment.h" #include "CompactJITCodeMap.h" #include "DFGCodeBlocks.h" #include "DFGCommon.h" #include "DFGExitProfile.h" +#include "DFGMinifiedGraph.h" #include "DFGOSREntry.h" #include "DFGOSRExit.h" +#include "DFGVariableEventStream.h" #include "EvalCodeCache.h" #include "ExecutionCounter.h" #include "ExpressionRangeInfo.h" @@ -66,10 +69,35 @@ #include <wtf/RefCountedArray.h> #include <wtf/FastAllocBase.h> #include <wtf/PassOwnPtr.h> +#include <wtf/Platform.h> #include <wtf/RefPtr.h> #include <wtf/SegmentedVector.h> #include <wtf/Vector.h> +// Set ENABLE_BYTECODE_COMMENTS to 1 to enable recording bytecode generator +// comments for the bytecodes that it generates. This will allow +// CodeBlock::dump() to provide some contextual info about the bytecodes. +// +// The way this comment system works is as follows: +// 1. The BytecodeGenerator calls prependComment() with a constant comment +// string in .text. The string must not be a stack or heap allocated +// string. +// 2. When the BytecodeGenerator's emitOpcode() is called, the last +// prepended comment will be recorded with the PC of the opcode being +// emitted. This comment is being recorded in the CodeBlock's +// m_bytecodeComments. +// 3. When CodeBlock::dump() is called, it will pair up the comments with +// their corresponding bytecodes based on the bytecode and comment's +// PC. If a matching pair is found, the comment will be printed after +// the bytecode. If not, no comment is printed. +// +// NOTE: Enabling this will consume additional memory at runtime to store +// the comments. Since these comments are only useful for VM debugging +// (as opposed to app debugging), this feature is to be disabled by default, +// and can be enabled as needed for VM development use only. + +#define ENABLE_BYTECODE_COMMENTS 0 + namespace JSC { class DFGCodeBlocks; @@ -155,6 +183,12 @@ namespace JSC { return index >= m_numVars; } + void dumpBytecodeCommentAndNewLine(int location); +#if ENABLE(BYTECODE_COMMENTS) + const char* commentForBytecodeOffset(unsigned bytecodeOffset); + void dumpBytecodeComments(); +#endif + HandlerInfo* handlerForBytecodeOffset(unsigned bytecodeOffset); int lineNumberForBytecodeOffset(unsigned bytecodeOffset); void expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset); @@ -350,6 +384,18 @@ namespace JSC { m_dfgData->transitions.append( WeakReferenceTransition(*globalData(), ownerExecutable(), codeOrigin, from, to)); } + + DFG::MinifiedGraph& minifiedDFG() + { + createDFGDataIfNecessary(); + return m_dfgData->minifiedDFG; + } + + DFG::VariableEventStream& variableEventStream() + { + createDFGDataIfNecessary(); + return m_dfgData->variableEventStream; + } #endif unsigned bytecodeOffset(Instruction* returnAddress) @@ -365,6 +411,10 @@ namespace JSC { RefCountedArray<Instruction>& instructions() { return m_instructions; } const RefCountedArray<Instruction>& instructions() const { return m_instructions; } +#if ENABLE(BYTECODE_COMMENTS) + Vector<Comment>& bytecodeComments() { return m_bytecodeComments; } +#endif + size_t predictedMachineCodeSize(); bool usesOpcode(OpcodeID); @@ -635,7 +685,7 @@ namespace JSC { if (!numberOfRareCaseProfiles()) return false; unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; - return value >= Options::likelyToTakeSlowCaseMinimumCount && static_cast<double>(value) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold; + return value >= Options::likelyToTakeSlowCaseMinimumCount() && static_cast<double>(value) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold(); } bool couldTakeSlowCase(int bytecodeOffset) @@ -643,7 +693,7 @@ namespace JSC { if (!numberOfRareCaseProfiles()) return false; unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; - return value >= Options::couldTakeSlowCaseMinimumCount && static_cast<double>(value) / m_executionEntryCount >= Options::couldTakeSlowCaseThreshold; + return value >= Options::couldTakeSlowCaseMinimumCount() && static_cast<double>(value) / m_executionEntryCount >= Options::couldTakeSlowCaseThreshold(); } RareCaseProfile* addSpecialFastCaseProfile(int bytecodeOffset) @@ -663,7 +713,15 @@ namespace JSC { if (!numberOfRareCaseProfiles()) return false; unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; - return specialFastCaseCount >= Options::likelyToTakeSlowCaseMinimumCount && static_cast<double>(specialFastCaseCount) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold; + return specialFastCaseCount >= Options::likelyToTakeSlowCaseMinimumCount() && static_cast<double>(specialFastCaseCount) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold(); + } + + bool couldTakeSpecialFastCase(int bytecodeOffset) + { + if (!numberOfRareCaseProfiles()) + return false; + unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; + return specialFastCaseCount >= Options::couldTakeSlowCaseMinimumCount() && static_cast<double>(specialFastCaseCount) / m_executionEntryCount >= Options::couldTakeSlowCaseThreshold(); } bool likelyToTakeDeepestSlowCase(int bytecodeOffset) @@ -673,7 +731,7 @@ namespace JSC { unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; unsigned value = slowCaseCount - specialFastCaseCount; - return value >= Options::likelyToTakeSlowCaseMinimumCount && static_cast<double>(value) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold; + return value >= Options::likelyToTakeSlowCaseMinimumCount() && static_cast<double>(value) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold(); } bool likelyToTakeAnySlowCase(int bytecodeOffset) @@ -683,7 +741,7 @@ namespace JSC { unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; unsigned value = slowCaseCount + specialFastCaseCount; - return value >= Options::likelyToTakeSlowCaseMinimumCount && static_cast<double>(value) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold; + return value >= Options::likelyToTakeSlowCaseMinimumCount() && static_cast<double>(value) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold(); } unsigned executionEntryCount() const { return m_executionEntryCount; } @@ -905,12 +963,12 @@ namespace JSC { void jitAfterWarmUp() { - m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITAfterWarmUp, this); + m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITAfterWarmUp(), this); } void jitSoon() { - m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITSoon, this); + m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITSoon(), this); } const ExecutionCounter& llintExecuteCounter() const @@ -941,25 +999,25 @@ namespace JSC { // to avoid thrashing. unsigned reoptimizationRetryCounter() const { - ASSERT(m_reoptimizationRetryCounter <= Options::reoptimizationRetryCounterMax); + ASSERT(m_reoptimizationRetryCounter <= Options::reoptimizationRetryCounterMax()); return m_reoptimizationRetryCounter; } void countReoptimization() { m_reoptimizationRetryCounter++; - if (m_reoptimizationRetryCounter > Options::reoptimizationRetryCounterMax) - m_reoptimizationRetryCounter = Options::reoptimizationRetryCounterMax; + if (m_reoptimizationRetryCounter > Options::reoptimizationRetryCounterMax()) + m_reoptimizationRetryCounter = Options::reoptimizationRetryCounterMax(); } int32_t counterValueForOptimizeAfterWarmUp() { - return Options::thresholdForOptimizeAfterWarmUp << reoptimizationRetryCounter(); + return Options::thresholdForOptimizeAfterWarmUp() << reoptimizationRetryCounter(); } int32_t counterValueForOptimizeAfterLongWarmUp() { - return Options::thresholdForOptimizeAfterLongWarmUp << reoptimizationRetryCounter(); + return Options::thresholdForOptimizeAfterLongWarmUp() << reoptimizationRetryCounter(); } int32_t* addressOfJITExecuteCounter() @@ -1039,62 +1097,51 @@ namespace JSC { // in the baseline code. void optimizeSoon() { - m_jitExecuteCounter.setNewThreshold(Options::thresholdForOptimizeSoon << reoptimizationRetryCounter(), this); + m_jitExecuteCounter.setNewThreshold(Options::thresholdForOptimizeSoon() << reoptimizationRetryCounter(), this); } - // The speculative JIT tracks its success rate, so that we can - // decide when to reoptimize. It's interesting to note that these - // counters may overflow without any protection. The success - // counter will overflow before the fail one does, becuase the - // fail one is used as a trigger to reoptimize. So the worst case - // is that the success counter overflows and we reoptimize without - // needing to. But this is harmless. If a method really did - // execute 2^32 times then compiling it again probably won't hurt - // anyone. + uint32_t osrExitCounter() const { return m_osrExitCounter; } - void countSpeculationSuccess() - { - m_speculativeSuccessCounter++; + void countOSRExit() { m_osrExitCounter++; } + + uint32_t* addressOfOSRExitCounter() { return &m_osrExitCounter; } + + static ptrdiff_t offsetOfOSRExitCounter() { return OBJECT_OFFSETOF(CodeBlock, m_osrExitCounter); } + +#if ENABLE(JIT) + uint32_t adjustedExitCountThreshold(uint32_t desiredThreshold) + { + ASSERT(getJITType() == JITCode::DFGJIT); + // Compute this the lame way so we don't saturate. This is called infrequently + // enough that this loop won't hurt us. + unsigned result = desiredThreshold; + for (unsigned n = baselineVersion()->reoptimizationRetryCounter(); n--;) { + unsigned newResult = result << 1; + if (newResult < result) + return std::numeric_limits<uint32_t>::max(); + result = newResult; + } + return result; } - void countSpeculationFailure() + uint32_t exitCountThresholdForReoptimization() { - m_speculativeFailCounter++; + return adjustedExitCountThreshold(Options::osrExitCountForReoptimization()); } - uint32_t speculativeSuccessCounter() const { return m_speculativeSuccessCounter; } - uint32_t speculativeFailCounter() const { return m_speculativeFailCounter; } - uint32_t forcedOSRExitCounter() const { return m_forcedOSRExitCounter; } - - uint32_t* addressOfSpeculativeSuccessCounter() { return &m_speculativeSuccessCounter; } - uint32_t* addressOfSpeculativeFailCounter() { return &m_speculativeFailCounter; } - uint32_t* addressOfForcedOSRExitCounter() { return &m_forcedOSRExitCounter; } - - static ptrdiff_t offsetOfSpeculativeSuccessCounter() { return OBJECT_OFFSETOF(CodeBlock, m_speculativeSuccessCounter); } - static ptrdiff_t offsetOfSpeculativeFailCounter() { return OBJECT_OFFSETOF(CodeBlock, m_speculativeFailCounter); } - static ptrdiff_t offsetOfForcedOSRExitCounter() { return OBJECT_OFFSETOF(CodeBlock, m_forcedOSRExitCounter); } - -#if ENABLE(JIT) - // The number of failures that triggers the use of the ratio. - unsigned largeFailCountThreshold() { return Options::largeFailCountThresholdBase << baselineVersion()->reoptimizationRetryCounter(); } - unsigned largeFailCountThresholdForLoop() { return Options::largeFailCountThresholdBaseForLoop << baselineVersion()->reoptimizationRetryCounter(); } + uint32_t exitCountThresholdForReoptimizationFromLoop() + { + return adjustedExitCountThreshold(Options::osrExitCountForReoptimizationFromLoop()); + } bool shouldReoptimizeNow() { - return (Options::desiredSpeculativeSuccessFailRatio * - speculativeFailCounter() >= speculativeSuccessCounter() - && speculativeFailCounter() >= largeFailCountThreshold()) - || forcedOSRExitCounter() >= - Options::forcedOSRExitCountForReoptimization; + return osrExitCounter() >= exitCountThresholdForReoptimization(); } - + bool shouldReoptimizeFromLoopNow() { - return (Options::desiredSpeculativeSuccessFailRatio * - speculativeFailCounter() >= speculativeSuccessCounter() - && speculativeFailCounter() >= largeFailCountThresholdForLoop()) - || forcedOSRExitCounter() >= - Options::forcedOSRExitCountForReoptimization; + return osrExitCounter() >= exitCountThresholdForReoptimizationFromLoop(); } #endif @@ -1255,6 +1302,8 @@ namespace JSC { SegmentedVector<Watchpoint, 1, 0> watchpoints; Vector<WeakReferenceTransition> transitions; Vector<WriteBarrier<JSCell> > weakReferences; + DFG::VariableEventStream variableEventStream; + DFG::MinifiedGraph minifiedDFG; bool mayBeExecuting; bool isJettisoned; bool livenessHasBeenProved; // Initialized and used on every GC. @@ -1295,13 +1344,15 @@ namespace JSC { ExecutionCounter m_jitExecuteCounter; int32_t m_totalJITExecutions; - uint32_t m_speculativeSuccessCounter; - uint32_t m_speculativeFailCounter; - uint32_t m_forcedOSRExitCounter; + uint32_t m_osrExitCounter; uint16_t m_optimizationDelayCounter; uint16_t m_reoptimizationRetryCounter; Vector<LineInfo> m_lineInfo; +#if ENABLE(BYTECODE_COMMENTS) + Vector<Comment> m_bytecodeComments; + size_t m_bytecodeCommentIterator; +#endif struct RareData { WTF_MAKE_FAST_ALLOCATED; |