summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/bytecode/CodeBlock.cpp
diff options
context:
space:
mode:
authorSimon Hausmann <simon.hausmann@nokia.com>2012-08-21 10:57:44 +0200
committerSimon Hausmann <simon.hausmann@nokia.com>2012-08-21 10:57:44 +0200
commit5ef7c8a6a70875d4430752d146bdcb069605d71d (patch)
treef6256640b6c46d7da221435803cae65326817ba2 /Source/JavaScriptCore/bytecode/CodeBlock.cpp
parentdecad929f578d8db641febc8740649ca6c574638 (diff)
downloadqtwebkit-5ef7c8a6a70875d4430752d146bdcb069605d71d.tar.gz
Imported WebKit commit 356d83016b090995d08ad568f2d2c243aa55e831 (http://svn.webkit.org/repository/webkit/trunk@126147)
New snapshot including various build fixes for newer Qt 5
Diffstat (limited to 'Source/JavaScriptCore/bytecode/CodeBlock.cpp')
-rw-r--r--Source/JavaScriptCore/bytecode/CodeBlock.cpp116
1 files changed, 88 insertions, 28 deletions
diff --git a/Source/JavaScriptCore/bytecode/CodeBlock.cpp b/Source/JavaScriptCore/bytecode/CodeBlock.cpp
index 0e2a98bc5..2ea969fcf 100644
--- a/Source/JavaScriptCore/bytecode/CodeBlock.cpp
+++ b/Source/JavaScriptCore/bytecode/CodeBlock.cpp
@@ -1155,6 +1155,7 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator&
dataLog("[%4d] get_by_val\t %s, %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data());
dumpBytecodeCommentAndNewLine(location);
it++;
+ it++;
break;
}
case op_get_argument_by_val: {
@@ -1164,6 +1165,7 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator&
dataLog("[%4d] get_argument_by_val\t %s, %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data());
dumpBytecodeCommentAndNewLine(location);
++it;
+ ++it;
break;
}
case op_get_by_pname: {
@@ -1183,6 +1185,7 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator&
int r2 = (++it)->u.operand;
dataLog("[%4d] put_by_val\t %s, %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data());
dumpBytecodeCommentAndNewLine(location);
+ ++it;
break;
}
case op_del_by_val: {
@@ -2577,6 +2580,60 @@ void CodeBlock::unlinkIncomingCalls()
m_incomingCalls.begin()->unlink(*m_globalData, repatchBuffer);
}
+#if ENABLE(LLINT)
+Instruction* CodeBlock::adjustPCIfAtCallSite(Instruction* potentialReturnPC)
+{
+ ASSERT(potentialReturnPC);
+
+ unsigned returnPCOffset = potentialReturnPC - instructions().begin();
+ Instruction* adjustedPC;
+ unsigned opcodeLength;
+
+ // If we are at a callsite, the LLInt stores the PC after the call
+ // instruction rather than the PC of the call instruction. This requires
+ // some correcting. If so, we can rely on the fact that the preceding
+ // instruction must be one of the call instructions, so either it's a
+ // call_varargs or it's a call, construct, or eval.
+ //
+ // If we are not at a call site, then we need to guard against the
+ // possibility of peeking past the start of the bytecode range for this
+ // codeBlock. Hence, we do a bounds check before we peek at the
+ // potential "preceding" instruction.
+ // The bounds check is done by comparing the offset of the potential
+ // returnPC with the length of the opcode. If there is room for a call
+ // instruction before the returnPC, then the offset of the returnPC must
+ // be greater than the size of the call opcode we're looking for.
+
+ // The determination of the call instruction present (if we are at a
+ // callsite) depends on the following assumptions. So, assert that
+ // they are still true:
+ ASSERT(OPCODE_LENGTH(op_call_varargs) <= OPCODE_LENGTH(op_call));
+ ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct));
+ ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_eval));
+
+ // Check for the case of a preceeding op_call_varargs:
+ opcodeLength = OPCODE_LENGTH(op_call_varargs);
+ adjustedPC = potentialReturnPC - opcodeLength;
+ if ((returnPCOffset >= opcodeLength)
+ && (adjustedPC->u.pointer == bitwise_cast<void*>(llint_op_call_varargs))) {
+ return adjustedPC;
+ }
+
+ // Check for the case of the other 3 call instructions:
+ opcodeLength = OPCODE_LENGTH(op_call);
+ adjustedPC = potentialReturnPC - opcodeLength;
+ if ((returnPCOffset >= opcodeLength)
+ && (adjustedPC->u.pointer == bitwise_cast<void*>(llint_op_call)
+ || adjustedPC->u.pointer == bitwise_cast<void*>(llint_op_construct)
+ || adjustedPC->u.pointer == bitwise_cast<void*>(llint_op_call_eval))) {
+ return adjustedPC;
+ }
+
+ // Not a call site. No need to adjust PC. Just return the original.
+ return potentialReturnPC;
+}
+#endif
+
unsigned CodeBlock::bytecodeOffset(ExecState* exec, ReturnAddressPtr returnAddress)
{
#if ENABLE(LLINT)
@@ -2587,28 +2644,8 @@ unsigned CodeBlock::bytecodeOffset(ExecState* exec, ReturnAddressPtr returnAddre
ASSERT(JITCode::isBaselineCode(getJITType()));
Instruction* instruction = exec->currentVPC();
ASSERT(instruction);
-
- // The LLInt stores the PC after the call instruction rather than the PC of
- // the call instruction. This requires some correcting. We rely on the fact
- // that the preceding instruction must be one of the call instructions, so
- // either it's a call_varargs or it's a call, construct, or eval.
- ASSERT(OPCODE_LENGTH(op_call_varargs) <= OPCODE_LENGTH(op_call));
- ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct));
- ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_eval));
- if (instruction[-OPCODE_LENGTH(op_call_varargs)].u.pointer == bitwise_cast<void*>(llint_op_call_varargs)) {
- // We know that the preceding instruction must be op_call_varargs because there is no way that
- // the pointer to the call_varargs could be an operand to the call.
- instruction -= OPCODE_LENGTH(op_call_varargs);
- ASSERT(instruction[-OPCODE_LENGTH(op_call)].u.pointer != bitwise_cast<void*>(llint_op_call)
- && instruction[-OPCODE_LENGTH(op_call)].u.pointer != bitwise_cast<void*>(llint_op_construct)
- && instruction[-OPCODE_LENGTH(op_call)].u.pointer != bitwise_cast<void*>(llint_op_call_eval));
- } else {
- // Must be that the last instruction was some op_call.
- ASSERT(instruction[-OPCODE_LENGTH(op_call)].u.pointer == bitwise_cast<void*>(llint_op_call)
- || instruction[-OPCODE_LENGTH(op_call)].u.pointer == bitwise_cast<void*>(llint_op_construct)
- || instruction[-OPCODE_LENGTH(op_call)].u.pointer == bitwise_cast<void*>(llint_op_call_eval));
- instruction -= OPCODE_LENGTH(op_call);
- }
+
+ instruction = adjustPCIfAtCallSite(instruction);
return bytecodeOffset(instruction);
}
@@ -2684,27 +2721,27 @@ CodeBlock* FunctionCodeBlock::replacement()
return &static_cast<FunctionExecutable*>(ownerExecutable())->generatedBytecodeFor(m_isConstructor ? CodeForConstruct : CodeForCall);
}
-JSObject* ProgramCodeBlock::compileOptimized(ExecState* exec, ScopeChainNode* scopeChainNode)
+JSObject* ProgramCodeBlock::compileOptimized(ExecState* exec, ScopeChainNode* scopeChainNode, unsigned bytecodeIndex)
{
if (replacement()->getJITType() == JITCode::nextTierJIT(getJITType()))
return 0;
- JSObject* error = static_cast<ProgramExecutable*>(ownerExecutable())->compileOptimized(exec, scopeChainNode);
+ JSObject* error = static_cast<ProgramExecutable*>(ownerExecutable())->compileOptimized(exec, scopeChainNode, bytecodeIndex);
return error;
}
-JSObject* EvalCodeBlock::compileOptimized(ExecState* exec, ScopeChainNode* scopeChainNode)
+JSObject* EvalCodeBlock::compileOptimized(ExecState* exec, ScopeChainNode* scopeChainNode, unsigned bytecodeIndex)
{
if (replacement()->getJITType() == JITCode::nextTierJIT(getJITType()))
return 0;
- JSObject* error = static_cast<EvalExecutable*>(ownerExecutable())->compileOptimized(exec, scopeChainNode);
+ JSObject* error = static_cast<EvalExecutable*>(ownerExecutable())->compileOptimized(exec, scopeChainNode, bytecodeIndex);
return error;
}
-JSObject* FunctionCodeBlock::compileOptimized(ExecState* exec, ScopeChainNode* scopeChainNode)
+JSObject* FunctionCodeBlock::compileOptimized(ExecState* exec, ScopeChainNode* scopeChainNode, unsigned bytecodeIndex)
{
if (replacement()->getJITType() == JITCode::nextTierJIT(getJITType()))
return 0;
- JSObject* error = static_cast<FunctionExecutable*>(ownerExecutable())->compileOptimizedFor(exec, scopeChainNode, m_isConstructor ? CodeForConstruct : CodeForCall);
+ JSObject* error = static_cast<FunctionExecutable*>(ownerExecutable())->compileOptimizedFor(exec, scopeChainNode, bytecodeIndex, m_isConstructor ? CodeForConstruct : CodeForCall);
return error;
}
@@ -2769,6 +2806,23 @@ bool FunctionCodeBlock::jitCompileImpl(ExecState* exec)
#endif
#if ENABLE(VALUE_PROFILER)
+ArrayProfile* CodeBlock::getArrayProfile(unsigned bytecodeOffset)
+{
+ for (unsigned i = 0; i < m_arrayProfiles.size(); ++i) {
+ if (m_arrayProfiles[i].bytecodeOffset() == bytecodeOffset)
+ return &m_arrayProfiles[i];
+ }
+ return 0;
+}
+
+ArrayProfile* CodeBlock::getOrAddArrayProfile(unsigned bytecodeOffset)
+{
+ ArrayProfile* result = getArrayProfile(bytecodeOffset);
+ if (result)
+ return result;
+ return addArrayProfile(bytecodeOffset);
+}
+
void CodeBlock::updateAllPredictionsAndCountLiveness(
OperationInProgress operation, unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles)
{
@@ -2792,6 +2846,12 @@ void CodeBlock::updateAllPredictionsAndCountLiveness(
#if ENABLE(DFG_JIT)
m_lazyOperandValueProfiles.computeUpdatedPredictions(operation);
#endif
+
+ // Don't count the array profiles towards statistics, since each array profile
+ // site also has a value profile site - so we already know whether or not it's
+ // live.
+ for (unsigned i = m_arrayProfiles.size(); i--;)
+ m_arrayProfiles[i].computeUpdatedPrediction(operation);
}
void CodeBlock::updateAllPredictions(OperationInProgress operation)