diff options
author | Simon Hausmann <simon.hausmann@nokia.com> | 2012-03-12 14:11:15 +0100 |
---|---|---|
committer | Simon Hausmann <simon.hausmann@nokia.com> | 2012-03-12 14:11:15 +0100 |
commit | dd91e772430dc294e3bf478c119ef8d43c0a3358 (patch) | |
tree | 6f33ce4d5872a5691e0291eb45bf6ab373a5f567 /Source/JavaScriptCore/bytecode | |
parent | ad0d549d4cc13433f77c1ac8f0ab379c83d93f28 (diff) | |
download | qtwebkit-dd91e772430dc294e3bf478c119ef8d43c0a3358.tar.gz |
Imported WebKit commit 3db4eb1820ac8fb03065d7ea73a4d9db1e8fea1a (http://svn.webkit.org/repository/webkit/trunk@110422)
This includes build fixes for the latest qtbase/qtdeclarative as well as the final QML2 API.
Diffstat (limited to 'Source/JavaScriptCore/bytecode')
-rw-r--r-- | Source/JavaScriptCore/bytecode/CodeBlock.cpp | 104 | ||||
-rw-r--r-- | Source/JavaScriptCore/bytecode/CodeBlock.h | 133 | ||||
-rw-r--r-- | Source/JavaScriptCore/bytecode/CodeType.h | 17 | ||||
-rw-r--r-- | Source/JavaScriptCore/bytecode/ExecutionCounter.cpp | 165 | ||||
-rw-r--r-- | Source/JavaScriptCore/bytecode/ExecutionCounter.h | 83 | ||||
-rw-r--r-- | Source/JavaScriptCore/bytecode/Opcode.h | 1 | ||||
-rw-r--r-- | Source/JavaScriptCore/bytecode/PredictedType.cpp | 11 | ||||
-rw-r--r-- | Source/JavaScriptCore/bytecode/PredictedType.h | 15 | ||||
-rw-r--r-- | Source/JavaScriptCore/bytecode/SamplingTool.cpp | 4 | ||||
-rw-r--r-- | Source/JavaScriptCore/bytecode/SamplingTool.h | 2 |
10 files changed, 417 insertions, 118 deletions
diff --git a/Source/JavaScriptCore/bytecode/CodeBlock.cpp b/Source/JavaScriptCore/bytecode/CodeBlock.cpp index ab89ad965..20972cc63 100644 --- a/Source/JavaScriptCore/bytecode/CodeBlock.cpp +++ b/Source/JavaScriptCore/bytecode/CodeBlock.cpp @@ -226,7 +226,7 @@ static bool isPropertyAccess(OpcodeID opcodeID) } } -static unsigned instructionOffsetForNth(ExecState* exec, const Vector<Instruction>& instructions, int nth, bool (*predicate)(OpcodeID)) +static unsigned instructionOffsetForNth(ExecState* exec, const RefCountedArray<Instruction>& instructions, int nth, bool (*predicate)(OpcodeID)) { size_t i = 0; while (i < instructions.size()) { @@ -347,18 +347,13 @@ void CodeBlock::printStructures(const Instruction* vPC) const void CodeBlock::dump(ExecState* exec) const { - if (!m_instructions) { - dataLog("No instructions available.\n"); - return; - } - size_t instructionCount = 0; for (size_t i = 0; i < instructions().size(); i += opcodeLengths[exec->interpreter()->getOpcodeID(instructions()[i].u.opcode)]) ++instructionCount; dataLog("%lu m_instructions; %lu bytes at %p; %d parameter(s); %d callee register(s); %d variable(s)\n\n", - static_cast<unsigned long>(instructionCount), + static_cast<unsigned long>(instructions().size()), static_cast<unsigned long>(instructions().size() * sizeof(Instruction)), this, m_numParameters, m_numCalleeRegisters, m_numVars); @@ -689,10 +684,6 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator& ++it; break; } - case op_bitnot: { - printUnaryOp(exec, location, it, "bitnot"); - break; - } case op_check_has_instance: { int base = (++it)->u.operand; dataLog("[%4d] check_has_instance\t\t %s\n", location, registerName(exec, base).data()); @@ -1424,11 +1415,9 @@ CodeBlock::CodeBlock(CopyParsedBlockTag, CodeBlock& other, SymbolTable* symTab) , m_numVars(other.m_numVars) , m_numCapturedVars(other.m_numCapturedVars) , m_isConstructor(other.m_isConstructor) - , m_shouldDiscardBytecode(false) , m_ownerExecutable(*other.m_globalData, other.m_ownerExecutable.get(), other.m_ownerExecutable.get()) , m_globalData(other.m_globalData) , m_instructions(other.m_instructions) - , m_instructionCount(other.m_instructionCount) , m_thisRegister(other.m_thisRegister) , m_argumentsRegister(other.m_argumentsRegister) , m_activationRegister(other.m_activationRegister) @@ -1484,12 +1473,9 @@ CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, CodeType codeType, JSGlo , m_numCalleeRegisters(0) , m_numVars(0) , m_isConstructor(isConstructor) - , m_shouldDiscardBytecode(false) , m_numParameters(0) , m_ownerExecutable(globalObject->globalData(), ownerExecutable, ownerExecutable) , m_globalData(0) - , m_instructions(adoptRef(new Instructions)) - , m_instructionCount(0) , m_argumentsRegister(-1) , m_needsFullScopeChain(ownerExecutable->needsActivation()) , m_usesEval(ownerExecutable->usesEval()) @@ -1595,13 +1581,15 @@ void CodeBlock::visitStructures(SlotVisitor& visitor, Instruction* vPC) const } if (vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_chain) || vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_getter_chain) || vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_custom_chain)) { visitor.append(&vPC[4].u.structure); - visitor.append(&vPC[5].u.structureChain); + if (vPC[5].u.structureChain) + visitor.append(&vPC[5].u.structureChain); return; } if (vPC[0].u.opcode == interpreter->getOpcode(op_put_by_id_transition)) { visitor.append(&vPC[4].u.structure); visitor.append(&vPC[5].u.structure); - visitor.append(&vPC[6].u.structureChain); + if (vPC[6].u.structureChain) + visitor.append(&vPC[6].u.structureChain); return; } if (vPC[0].u.opcode == interpreter->getOpcode(op_put_by_id) && vPC[4].u.structure) { @@ -1631,6 +1619,32 @@ void EvalCodeCache::visitAggregate(SlotVisitor& visitor) void CodeBlock::visitAggregate(SlotVisitor& visitor) { +#if ENABLE(PARALLEL_GC) && ENABLE(DFG_JIT) + if (!!m_dfgData) { + // I may be asked to scan myself more than once, and it may even happen concurrently. + // To this end, use a CAS loop to check if I've been called already. Only one thread + // may proceed past this point - whichever one wins the CAS race. + unsigned oldValue; + do { + oldValue = m_dfgData->visitAggregateHasBeenCalled; + if (oldValue) { + // Looks like someone else won! Return immediately to ensure that we don't + // trace the same CodeBlock concurrently. Doing so is hazardous since we will + // be mutating the state of ValueProfiles, which contain JSValues, which can + // have word-tearing on 32-bit, leading to awesome timing-dependent crashes + // that are nearly impossible to track down. + + // Also note that it must be safe to return early as soon as we see the + // value true (well, (unsigned)1), since once a GC thread is in this method + // and has won the CAS race (i.e. was responsible for setting the value true) + // it will definitely complete the rest of this method before declaring + // termination. + return; + } + } while (!WTF::weakCompareAndSwap(&m_dfgData->visitAggregateHasBeenCalled, 0, 1)); + } +#endif // ENABLE(PARALLEL_GC) && ENABLE(DFG_JIT) + if (!!m_alternative) m_alternative->visitAggregate(visitor); @@ -1903,12 +1917,6 @@ void CodeBlock::finalizeUnconditionally() } } #endif - - // Handle the bytecode discarding chore. - if (m_shouldDiscardBytecode) { - discardBytecode(); - m_shouldDiscardBytecode = false; - } } void CodeBlock::stronglyVisitStrongReferences(SlotVisitor& visitor) @@ -1980,7 +1988,7 @@ void CodeBlock::stronglyVisitWeakReferences(SlotVisitor& visitor) HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset) { - ASSERT(bytecodeOffset < m_instructionCount); + ASSERT(bytecodeOffset < instructions().size()); if (!m_rareData) return 0; @@ -1998,7 +2006,7 @@ HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset) int CodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset) { - ASSERT(bytecodeOffset < m_instructionCount); + ASSERT(bytecodeOffset < instructions().size()); if (!m_rareData) return m_ownerExecutable->source().firstLine(); @@ -2022,7 +2030,7 @@ int CodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset) void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset) { - ASSERT(bytecodeOffset < m_instructionCount); + ASSERT(bytecodeOffset < instructions().size()); if (!m_rareData) { startOffset = 0; @@ -2102,8 +2110,6 @@ bool CodeBlock::hasGlobalResolveInfoAtBytecodeOffset(unsigned bytecodeOffset) void CodeBlock::shrinkToFit() { - instructions().shrinkToFit(); - #if ENABLE(CLASSIC_INTERPRETER) m_propertyAccessInstructions.shrinkToFit(); m_globalResolveInstructions.shrinkToFit(); @@ -2345,21 +2351,21 @@ void FunctionCodeBlock::jettison() static_cast<FunctionExecutable*>(ownerExecutable())->jettisonOptimizedCodeFor(*globalData(), m_isConstructor ? CodeForConstruct : CodeForCall); } -void ProgramCodeBlock::jitCompileImpl(JSGlobalData& globalData) +bool ProgramCodeBlock::jitCompileImpl(JSGlobalData& globalData) { ASSERT(getJITType() == JITCode::InterpreterThunk); ASSERT(this == replacement()); return static_cast<ProgramExecutable*>(ownerExecutable())->jitCompile(globalData); } -void EvalCodeBlock::jitCompileImpl(JSGlobalData& globalData) +bool EvalCodeBlock::jitCompileImpl(JSGlobalData& globalData) { ASSERT(getJITType() == JITCode::InterpreterThunk); ASSERT(this == replacement()); return static_cast<EvalExecutable*>(ownerExecutable())->jitCompile(globalData); } -void FunctionCodeBlock::jitCompileImpl(JSGlobalData& globalData) +bool FunctionCodeBlock::jitCompileImpl(JSGlobalData& globalData) { ASSERT(getJITType() == JITCode::InterpreterThunk); ASSERT(this == replacement()); @@ -2467,7 +2473,38 @@ void CodeBlock::dumpValueProfiles() } #endif -#ifndef NDEBUG +size_t CodeBlock::predictedMachineCodeSize() +{ + // This will be called from CodeBlock::CodeBlock before either m_globalData or the + // instructions have been initialized. It's OK to return 0 because what will really + // matter is the recomputation of this value when the slow path is triggered. + if (!m_globalData) + return 0; + + if (!m_globalData->machineCodeBytesPerBytecodeWordForBaselineJIT) + return 0; // It's as good of a prediction as we'll get. + + // Be conservative: return a size that will be an overestimation 84% of the time. + double multiplier = m_globalData->machineCodeBytesPerBytecodeWordForBaselineJIT.mean() + + m_globalData->machineCodeBytesPerBytecodeWordForBaselineJIT.standardDeviation(); + + // Be paranoid: silently reject bogus multipiers. Silently doing the "wrong" thing + // here is OK, since this whole method is just a heuristic. + if (multiplier < 0 || multiplier > 1000) + return 0; + + double doubleResult = multiplier * m_instructions.size(); + + // Be even more paranoid: silently reject values that won't fit into a size_t. If + // the function is so huge that we can't even fit it into virtual memory then we + // should probably have some other guards in place to prevent us from even getting + // to this point. + if (doubleResult > std::numeric_limits<size_t>::max()) + return 0; + + return static_cast<size_t>(doubleResult); +} + bool CodeBlock::usesOpcode(OpcodeID opcodeID) { Interpreter* interpreter = globalData()->interpreter; @@ -2492,6 +2529,5 @@ bool CodeBlock::usesOpcode(OpcodeID opcodeID) return false; } -#endif } // namespace JSC diff --git a/Source/JavaScriptCore/bytecode/CodeBlock.h b/Source/JavaScriptCore/bytecode/CodeBlock.h index 195aa62ca..469028097 100644 --- a/Source/JavaScriptCore/bytecode/CodeBlock.h +++ b/Source/JavaScriptCore/bytecode/CodeBlock.h @@ -41,6 +41,7 @@ #include "DFGOSREntry.h" #include "DFGOSRExit.h" #include "EvalCodeCache.h" +#include "ExecutionCounter.h" #include "ExpressionRangeInfo.h" #include "GlobalResolveInfo.h" #include "HandlerInfo.h" @@ -61,6 +62,7 @@ #include "UString.h" #include "UnconditionalFinalizer.h" #include "ValueProfile.h" +#include <wtf/RefCountedArray.h> #include <wtf/FastAllocBase.h> #include <wtf/PassOwnPtr.h> #include <wtf/RefPtr.h> @@ -127,8 +129,6 @@ namespace JSC { } #endif - bool canProduceCopyWithBytecode() { return hasInstructions(); } - void visitAggregate(SlotVisitor&); static void dumpStatistics(); @@ -341,20 +341,15 @@ namespace JSC { void setIsNumericCompareFunction(bool isNumericCompareFunction) { m_isNumericCompareFunction = isNumericCompareFunction; } bool isNumericCompareFunction() { return m_isNumericCompareFunction; } - bool hasInstructions() const { return !!m_instructions; } - unsigned numberOfInstructions() const { return !m_instructions ? 0 : m_instructions->m_instructions.size(); } - Vector<Instruction>& instructions() { return m_instructions->m_instructions; } - const Vector<Instruction>& instructions() const { return m_instructions->m_instructions; } - void discardBytecode() { m_instructions.clear(); } - void discardBytecodeLater() - { - m_shouldDiscardBytecode = true; - } + unsigned numberOfInstructions() const { return m_instructions.size(); } + RefCountedArray<Instruction>& instructions() { return m_instructions; } + const RefCountedArray<Instruction>& instructions() const { return m_instructions; } + + size_t predictedMachineCodeSize(); bool usesOpcode(OpcodeID); - unsigned instructionCount() { return m_instructionCount; } - void setInstructionCount(unsigned instructionCount) { m_instructionCount = instructionCount; } + unsigned instructionCount() { return m_instructions.size(); } #if ENABLE(JIT) void setJITCode(const JITCode& code, MacroAssemblerCodePtr codeWithArityCheck) @@ -374,18 +369,20 @@ namespace JSC { ExecutableMemoryHandle* executableMemory() { return getJITCode().getExecutableMemory(); } virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*) = 0; virtual void jettison() = 0; - bool jitCompile(JSGlobalData& globalData) + enum JITCompilationResult { AlreadyCompiled, CouldNotCompile, CompiledSuccessfully }; + JITCompilationResult jitCompile(JSGlobalData& globalData) { if (getJITType() != JITCode::InterpreterThunk) { ASSERT(getJITType() == JITCode::BaselineJIT); - return false; + return AlreadyCompiled; } #if ENABLE(JIT) - jitCompileImpl(globalData); - return true; + if (jitCompileImpl(globalData)) + return CompiledSuccessfully; + return CouldNotCompile; #else UNUSED_PARAM(globalData); - return false; + return CouldNotCompile; #endif } virtual CodeBlock* replacement() = 0; @@ -541,11 +538,10 @@ namespace JSC { { ValueProfile* result = WTF::genericBinarySearch<ValueProfile, int, getValueProfileBytecodeOffset>(m_valueProfiles, m_valueProfiles.size(), bytecodeOffset); ASSERT(result->m_bytecodeOffset != -1); - ASSERT(!hasInstructions() - || instructions()[bytecodeOffset + opcodeLength( - m_globalData->interpreter->getOpcodeID( - instructions()[ - bytecodeOffset].u.opcode)) - 1].u.profile == result); + ASSERT(instructions()[bytecodeOffset + opcodeLength( + m_globalData->interpreter->getOpcodeID( + instructions()[ + bytecodeOffset].u.opcode)) - 1].u.profile == result); return result; } PredictedType valueProfilePredictionForBytecodeOffset(int bytecodeOffset) @@ -839,24 +835,29 @@ namespace JSC { // Functions for controlling when JITting kicks in, in a mixed mode // execution world. + bool checkIfJITThresholdReached() + { + return m_llintExecuteCounter.checkIfThresholdCrossedAndSet(this); + } + void dontJITAnytimeSoon() { - m_llintExecuteCounter = Options::executionCounterValueForDontJITAnytimeSoon; + m_llintExecuteCounter.deferIndefinitely(); } void jitAfterWarmUp() { - m_llintExecuteCounter = Options::executionCounterValueForJITAfterWarmUp; + m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITAfterWarmUp, this); } void jitSoon() { - m_llintExecuteCounter = Options::executionCounterValueForJITSoon; + m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITSoon, this); } int32_t llintExecuteCounter() const { - return m_llintExecuteCounter; + return m_llintExecuteCounter.m_counter; } // Functions for controlling when tiered compilation kicks in. This @@ -895,31 +896,41 @@ namespace JSC { int32_t counterValueForOptimizeAfterWarmUp() { - return Options::executionCounterValueForOptimizeAfterWarmUp << reoptimizationRetryCounter(); + return Options::thresholdForOptimizeAfterWarmUp << reoptimizationRetryCounter(); } int32_t counterValueForOptimizeAfterLongWarmUp() { - return Options::executionCounterValueForOptimizeAfterLongWarmUp << reoptimizationRetryCounter(); + return Options::thresholdForOptimizeAfterLongWarmUp << reoptimizationRetryCounter(); } int32_t* addressOfJITExecuteCounter() { - return &m_jitExecuteCounter; + return &m_jitExecuteCounter.m_counter; } - static ptrdiff_t offsetOfJITExecuteCounter() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter); } + static ptrdiff_t offsetOfJITExecuteCounter() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_counter); } + static ptrdiff_t offsetOfJITExecutionActiveThreshold() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_activeThreshold); } + static ptrdiff_t offsetOfJITExecutionTotalCount() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_totalCount); } - int32_t jitExecuteCounter() const { return m_jitExecuteCounter; } + int32_t jitExecuteCounter() const { return m_jitExecuteCounter.m_counter; } unsigned optimizationDelayCounter() const { return m_optimizationDelayCounter; } + // Check if the optimization threshold has been reached, and if not, + // adjust the heuristics accordingly. Returns true if the threshold has + // been reached. + bool checkIfOptimizationThresholdReached() + { + return m_jitExecuteCounter.checkIfThresholdCrossedAndSet(this); + } + // Call this to force the next optimization trigger to fire. This is // rarely wise, since optimization triggers are typically more // expensive than executing baseline code. void optimizeNextInvocation() { - m_jitExecuteCounter = Options::executionCounterValueForOptimizeNextInvocation; + m_jitExecuteCounter.setNewThreshold(0, this); } // Call this to prevent optimization from happening again. Note that @@ -929,7 +940,7 @@ namespace JSC { // the future as well. void dontOptimizeAnytimeSoon() { - m_jitExecuteCounter = Options::executionCounterValueForDontOptimizeAnytimeSoon; + m_jitExecuteCounter.deferIndefinitely(); } // Call this to reinitialize the counter to its starting state, @@ -940,14 +951,14 @@ namespace JSC { // counter that this corresponds to is also available directly. void optimizeAfterWarmUp() { - m_jitExecuteCounter = counterValueForOptimizeAfterWarmUp(); + m_jitExecuteCounter.setNewThreshold(counterValueForOptimizeAfterWarmUp(), this); } // Call this to force an optimization trigger to fire only after // a lot of warm-up. void optimizeAfterLongWarmUp() { - m_jitExecuteCounter = counterValueForOptimizeAfterLongWarmUp(); + m_jitExecuteCounter.setNewThreshold(counterValueForOptimizeAfterLongWarmUp(), this); } // Call this to cause an optimization trigger to fire soon, but @@ -970,7 +981,7 @@ namespace JSC { // in the baseline code. void optimizeSoon() { - m_jitExecuteCounter = Options::executionCounterValueForOptimizeSoon << reoptimizationRetryCounter(); + m_jitExecuteCounter.setNewThreshold(Options::thresholdForOptimizeSoon << reoptimizationRetryCounter(), this); } // The speculative JIT tracks its success rate, so that we can @@ -1047,12 +1058,9 @@ namespace JSC { int m_numCapturedVars; bool m_isConstructor; - // This is public because otherwise we would have many friends. - bool m_shouldDiscardBytecode; - protected: #if ENABLE(JIT) - virtual void jitCompileImpl(JSGlobalData&) = 0; + virtual bool jitCompileImpl(JSGlobalData&) = 0; #endif virtual void visitWeakReferences(SlotVisitor&); virtual void finalizeUnconditionally(); @@ -1115,11 +1123,7 @@ namespace JSC { WriteBarrier<ScriptExecutable> m_ownerExecutable; JSGlobalData* m_globalData; - struct Instructions : public RefCounted<Instructions> { - Vector<Instruction> m_instructions; - }; - RefPtr<Instructions> m_instructions; - unsigned m_instructionCount; + RefCountedArray<Instruction> m_instructions; int m_thisRegister; int m_argumentsRegister; @@ -1186,6 +1190,7 @@ namespace JSC { bool isJettisoned; bool livenessHasBeenProved; // Initialized and used on every GC. bool allTransitionsHaveBeenMarked; // Initialized and used on every GC. + unsigned visitAggregateHasBeenCalled; // Unsigned to make it work seamlessly with the broadest set of CAS implementations. }; OwnPtr<DFGData> m_dfgData; @@ -1217,13 +1222,14 @@ namespace JSC { OwnPtr<CodeBlock> m_alternative; - int32_t m_llintExecuteCounter; + ExecutionCounter m_llintExecuteCounter; - int32_t m_jitExecuteCounter; + ExecutionCounter m_jitExecuteCounter; + int32_t m_totalJITExecutions; uint32_t m_speculativeSuccessCounter; uint32_t m_speculativeFailCounter; - uint8_t m_optimizationDelayCounter; - uint8_t m_reoptimizationRetryCounter; + uint16_t m_optimizationDelayCounter; + uint16_t m_reoptimizationRetryCounter; struct RareData { WTF_MAKE_FAST_ALLOCATED; @@ -1300,7 +1306,7 @@ namespace JSC { protected: virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*); virtual void jettison(); - virtual void jitCompileImpl(JSGlobalData&); + virtual bool jitCompileImpl(JSGlobalData&); virtual CodeBlock* replacement(); virtual bool canCompileWithDFGInternal(); #endif @@ -1335,7 +1341,7 @@ namespace JSC { protected: virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*); virtual void jettison(); - virtual void jitCompileImpl(JSGlobalData&); + virtual bool jitCompileImpl(JSGlobalData&); virtual CodeBlock* replacement(); virtual bool canCompileWithDFGInternal(); #endif @@ -1373,33 +1379,12 @@ namespace JSC { protected: virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*); virtual void jettison(); - virtual void jitCompileImpl(JSGlobalData&); + virtual bool jitCompileImpl(JSGlobalData&); virtual CodeBlock* replacement(); virtual bool canCompileWithDFGInternal(); #endif }; - // Use this if you want to copy a code block and you're paranoid about a GC - // happening. - class BytecodeDestructionBlocker { - public: - BytecodeDestructionBlocker(CodeBlock* codeBlock) - : m_codeBlock(codeBlock) - , m_oldValueOfShouldDiscardBytecode(codeBlock->m_shouldDiscardBytecode) - { - codeBlock->m_shouldDiscardBytecode = false; - } - - ~BytecodeDestructionBlocker() - { - m_codeBlock->m_shouldDiscardBytecode = m_oldValueOfShouldDiscardBytecode; - } - - private: - CodeBlock* m_codeBlock; - bool m_oldValueOfShouldDiscardBytecode; - }; - inline CodeBlock* baselineCodeBlockForOriginAndBaselineCodeBlock(const CodeOrigin& codeOrigin, CodeBlock* baselineCodeBlock) { if (codeOrigin.inlineCallFrame) { diff --git a/Source/JavaScriptCore/bytecode/CodeType.h b/Source/JavaScriptCore/bytecode/CodeType.h index 03485e564..d33677ae7 100644 --- a/Source/JavaScriptCore/bytecode/CodeType.h +++ b/Source/JavaScriptCore/bytecode/CodeType.h @@ -26,11 +26,28 @@ #ifndef CodeType_h #define CodeType_h +#include <wtf/Platform.h> + namespace JSC { enum CodeType { GlobalCode, EvalCode, FunctionCode }; +inline const char* codeTypeToString(CodeType codeType) +{ + switch (codeType) { + case GlobalCode: + return "GlobalCode"; + case EvalCode: + return "EvalCode"; + case FunctionCode: + return "FunctionCode"; + default: + ASSERT_NOT_REACHED(); + return 0; + } } +} // namespace JSC + #endif // CodeType_h diff --git a/Source/JavaScriptCore/bytecode/ExecutionCounter.cpp b/Source/JavaScriptCore/bytecode/ExecutionCounter.cpp new file mode 100644 index 000000000..b3fd3ef26 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/ExecutionCounter.cpp @@ -0,0 +1,165 @@ +/* + * Copyright (C) 2012 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "ExecutionCounter.h" + +#include "CodeBlock.h" +#include "ExecutableAllocator.h" +#include <wtf/DataLog.h> + +namespace JSC { + +ExecutionCounter::ExecutionCounter() +{ + reset(); +} + +bool ExecutionCounter::checkIfThresholdCrossedAndSet(CodeBlock* codeBlock) +{ + if (hasCrossedThreshold(codeBlock)) + return true; + + if (setThreshold(codeBlock)) + return true; + + return false; +} + +void ExecutionCounter::setNewThreshold(int32_t threshold, CodeBlock* codeBlock) +{ + reset(); + m_activeThreshold = threshold; + setThreshold(codeBlock); +} + +void ExecutionCounter::deferIndefinitely() +{ + m_totalCount = 0; + m_activeThreshold = std::numeric_limits<int32_t>::max(); + m_counter = std::numeric_limits<int32_t>::min(); +} + +double ExecutionCounter::applyMemoryUsageHeuristics(int32_t value, CodeBlock* codeBlock) +{ +#if ENABLE(JIT) + double multiplier = + ExecutableAllocator::memoryPressureMultiplier( + codeBlock->predictedMachineCodeSize()); +#else + // This code path will probably not be taken, but if it is, we fake it. + double multiplier = 1.0; + UNUSED_PARAM(codeBlock); +#endif + ASSERT(multiplier >= 1.0); + return multiplier * value; +} + +int32_t ExecutionCounter::applyMemoryUsageHeuristicsAndConvertToInt( + int32_t value, CodeBlock* codeBlock) +{ + double doubleResult = applyMemoryUsageHeuristics(value, codeBlock); + + ASSERT(doubleResult >= 0); + + if (doubleResult > std::numeric_limits<int32_t>::max()) + return std::numeric_limits<int32_t>::max(); + + return static_cast<int32_t>(doubleResult); +} + +bool ExecutionCounter::hasCrossedThreshold(CodeBlock* codeBlock) const +{ + // This checks if the current count rounded up to the threshold we were targeting. + // For example, if we are using half of available executable memory and have + // m_activeThreshold = 1000, applyMemoryUsageHeuristics(m_activeThreshold) will be + // 2000, but we will pretend as if the threshold was crossed if we reach 2000 - + // 1000 / 2, or 1500. The reasoning here is that we want to avoid thrashing. If + // this method returns false, then the JIT's threshold for when it will again call + // into the slow path (which will call this method a second time) will be set + // according to the difference between the current count and the target count + // according to *current* memory usage. But by the time we call into this again, we + // may have JIT'ed more code, and so the target count will increase slightly. This + // may lead to a repeating pattern where the target count is slightly incremented, + // the JIT immediately matches that increase, calls into the slow path again, and + // again the target count is slightly incremented. Instead of having this vicious + // cycle, we declare victory a bit early if the difference between the current + // total and our target according to memory heuristics is small. Our definition of + // small is arbitrarily picked to be half of the original threshold (i.e. + // m_activeThreshold). + + double modifiedThreshold = applyMemoryUsageHeuristics(m_activeThreshold, codeBlock); + + return static_cast<double>(m_totalCount) + m_counter >= + modifiedThreshold - static_cast<double>(m_activeThreshold) / 2; +} + +bool ExecutionCounter::setThreshold(CodeBlock* codeBlock) +{ + if (m_activeThreshold == std::numeric_limits<int32_t>::max()) { + deferIndefinitely(); + return false; + } + + ASSERT(!hasCrossedThreshold(codeBlock)); + + // Compute the true total count. + double trueTotalCount = static_cast<double>(m_totalCount) + m_counter; + + // Correct the threshold for current memory usage. + double threshold = applyMemoryUsageHeuristics(m_activeThreshold, codeBlock); + + // Threshold must be non-negative and not NaN. + ASSERT(threshold >= 0); + + // Adjust the threshold according to the number of executions we have already + // seen. This shouldn't go negative, but it might, because of round-off errors. + threshold -= trueTotalCount; + + if (threshold <= 0) { + m_counter = 0; + m_totalCount = trueTotalCount; + return true; + } + + if (threshold > std::numeric_limits<int32_t>::max()) + threshold = std::numeric_limits<int32_t>::max(); + + m_counter = static_cast<int32_t>(-threshold); + + m_totalCount = trueTotalCount + threshold; + + return false; +} + +void ExecutionCounter::reset() +{ + m_counter = 0; + m_totalCount = 0; + m_activeThreshold = 0; +} + +} // namespace JSC + diff --git a/Source/JavaScriptCore/bytecode/ExecutionCounter.h b/Source/JavaScriptCore/bytecode/ExecutionCounter.h new file mode 100644 index 000000000..d2ffbb649 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/ExecutionCounter.h @@ -0,0 +1,83 @@ +/* + * Copyright (C) 2012 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef ExecutionCounter_h +#define ExecutionCounter_h + +#include <wtf/SimpleStats.h> + +namespace JSC { + +class CodeBlock; + +class ExecutionCounter { +public: + ExecutionCounter(); + bool checkIfThresholdCrossedAndSet(CodeBlock*); + void setNewThreshold(int32_t threshold, CodeBlock*); + void deferIndefinitely(); + static double applyMemoryUsageHeuristics(int32_t value, CodeBlock*); + static int32_t applyMemoryUsageHeuristicsAndConvertToInt(int32_t value, CodeBlock*); + + static int32_t formattedTotalCount(float value) + { + union { + int32_t i; + float f; + } u; + u.f = value; + return u.i; + } + +private: + bool hasCrossedThreshold(CodeBlock*) const; + bool setThreshold(CodeBlock*); + void reset(); + +public: + + // NB. These are intentionally public because it will be modified from machine code. + + // This counter is incremented by the JIT or LLInt. It starts out negative and is + // counted up until it becomes non-negative. At the start of a counting period, + // the threshold we wish to reach is m_totalCount + m_counter, in the sense that + // we will add X to m_totalCount and subtract X from m_counter. + int32_t m_counter; + + // Counts the total number of executions we have seen plus the ones we've set a + // threshold for in m_counter. Because m_counter's threshold is negative, the + // total number of actual executions can always be computed as m_totalCount + + // m_counter. + float m_totalCount; + + // This is the threshold we were originally targetting, without any correction for + // the memory usage heuristics. + int32_t m_activeThreshold; +}; + +} // namespace JSC + +#endif // ExecutionCounter_h + diff --git a/Source/JavaScriptCore/bytecode/Opcode.h b/Source/JavaScriptCore/bytecode/Opcode.h index a47fa5e9b..45598f899 100644 --- a/Source/JavaScriptCore/bytecode/Opcode.h +++ b/Source/JavaScriptCore/bytecode/Opcode.h @@ -82,7 +82,6 @@ namespace JSC { macro(op_bitand, 5) \ macro(op_bitxor, 5) \ macro(op_bitor, 5) \ - macro(op_bitnot, 3) \ \ macro(op_check_has_instance, 2) \ macro(op_instanceof, 5) \ diff --git a/Source/JavaScriptCore/bytecode/PredictedType.cpp b/Source/JavaScriptCore/bytecode/PredictedType.cpp index 2b490c24e..b04ff1f57 100644 --- a/Source/JavaScriptCore/bytecode/PredictedType.cpp +++ b/Source/JavaScriptCore/bytecode/PredictedType.cpp @@ -153,8 +153,13 @@ const char* predictionToString(PredictedType value) else isTop = false; - if (isTop) - return "Top"; + if (isTop) { + ptr = description; + ptr.strcat("Top"); + } + + if (value & PredictEmpty) + ptr.strcat("Empty"); *ptr++ = 0; @@ -221,6 +226,8 @@ PredictedType predictionFromCell(JSCell* cell) PredictedType predictionFromValue(JSValue value) { + if (value.isEmpty()) + return PredictEmpty; if (value.isInt32()) return PredictInt32; if (value.isDouble()) { diff --git a/Source/JavaScriptCore/bytecode/PredictedType.h b/Source/JavaScriptCore/bytecode/PredictedType.h index efbe9b30d..0b7916610 100644 --- a/Source/JavaScriptCore/bytecode/PredictedType.h +++ b/Source/JavaScriptCore/bytecode/PredictedType.h @@ -61,8 +61,10 @@ static const PredictedType PredictDoubleNaN = 0x00040000; // It's defini static const PredictedType PredictDouble = 0x00060000; // It's either a non-NaN or a NaN double. static const PredictedType PredictNumber = 0x00070000; // It's either an Int32 or a Double. static const PredictedType PredictBoolean = 0x00080000; // It's definitely a Boolean. -static const PredictedType PredictOther = 0x40000000; // It's definitely none of the above. -static const PredictedType PredictTop = 0x7fffffff; // It can be any of the above. +static const PredictedType PredictOther = 0x08000000; // It's definitely none of the above. +static const PredictedType PredictTop = 0x0fffffff; // It can be any of the above. +static const PredictedType PredictEmpty = 0x10000000; // It's definitely an empty value marker. +static const PredictedType PredictEmptyOrTop = 0x1fffffff; // It can be any of the above. static const PredictedType FixedIndexedStorageMask = PredictByteArray | PredictInt8Array | PredictInt16Array | PredictInt32Array | PredictUint8Array | PredictUint8ClampedArray | PredictUint16Array | PredictUint32Array | PredictFloat32Array | PredictFloat64Array; typedef bool (*PredictionChecker)(PredictedType); @@ -89,7 +91,7 @@ inline bool isFinalObjectOrOtherPrediction(PredictedType value) inline bool isFixedIndexedStorageObjectPrediction(PredictedType value) { - return (value & FixedIndexedStorageMask) == value; + return !!value && (value & FixedIndexedStorageMask) == value; } inline bool isStringPrediction(PredictedType value) @@ -199,7 +201,7 @@ inline bool isDoubleRealPrediction(PredictedType value) inline bool isDoublePrediction(PredictedType value) { - return (value & PredictDouble) == value; + return !!value && (value & PredictDouble) == value; } inline bool isNumberPrediction(PredictedType value) @@ -217,6 +219,11 @@ inline bool isOtherPrediction(PredictedType value) return value == PredictOther; } +inline bool isEmptyPrediction(PredictedType value) +{ + return value == PredictEmpty; +} + const char* predictionToString(PredictedType value); // Merge two predictions. Note that currently this just does left | right. It may diff --git a/Source/JavaScriptCore/bytecode/SamplingTool.cpp b/Source/JavaScriptCore/bytecode/SamplingTool.cpp index 077f041f4..f07dc79fb 100644 --- a/Source/JavaScriptCore/bytecode/SamplingTool.cpp +++ b/Source/JavaScriptCore/bytecode/SamplingTool.cpp @@ -100,7 +100,7 @@ SamplingRegion::Locker::Locker() #endif continue; } - if (WTF::weakCompareAndSwap(&s_currentOrReserved, previous, previous | 1)) + if (WTF::weakCompareAndSwapUIntPtr(&s_currentOrReserved, previous, previous | 1)) break; } } @@ -113,7 +113,7 @@ SamplingRegion::Locker::~Locker() uintptr_t previous; do { previous = s_currentOrReserved; - } while (!WTF::weakCompareAndSwap(&s_currentOrReserved, previous, previous & ~1)); + } while (!WTF::weakCompareAndSwapUIntPtr(&s_currentOrReserved, previous, previous & ~1)); } void SamplingRegion::sample() diff --git a/Source/JavaScriptCore/bytecode/SamplingTool.h b/Source/JavaScriptCore/bytecode/SamplingTool.h index 28fd528d0..fcb1986fd 100644 --- a/Source/JavaScriptCore/bytecode/SamplingTool.h +++ b/Source/JavaScriptCore/bytecode/SamplingTool.h @@ -148,7 +148,7 @@ namespace JSC { if (previousPtr) *previousPtr = bitwise_cast<SamplingRegion*>(previous); - if (WTF::weakCompareAndSwap(&s_currentOrReserved, previous, bitwise_cast<uintptr_t>(current))) + if (WTF::weakCompareAndSwapUIntPtr(&s_currentOrReserved, previous, bitwise_cast<uintptr_t>(current))) break; } } |