summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/bytecode
diff options
context:
space:
mode:
authorSimon Hausmann <simon.hausmann@nokia.com>2012-08-21 10:57:44 +0200
committerSimon Hausmann <simon.hausmann@nokia.com>2012-08-21 10:57:44 +0200
commit5ef7c8a6a70875d4430752d146bdcb069605d71d (patch)
treef6256640b6c46d7da221435803cae65326817ba2 /Source/JavaScriptCore/bytecode
parentdecad929f578d8db641febc8740649ca6c574638 (diff)
downloadqtwebkit-5ef7c8a6a70875d4430752d146bdcb069605d71d.tar.gz
Imported WebKit commit 356d83016b090995d08ad568f2d2c243aa55e831 (http://svn.webkit.org/repository/webkit/trunk@126147)
New snapshot including various build fixes for newer Qt 5
Diffstat (limited to 'Source/JavaScriptCore/bytecode')
-rw-r--r--Source/JavaScriptCore/bytecode/ArrayProfile.cpp55
-rw-r--r--Source/JavaScriptCore/bytecode/ArrayProfile.h104
-rw-r--r--Source/JavaScriptCore/bytecode/CodeBlock.cpp116
-rw-r--r--Source/JavaScriptCore/bytecode/CodeBlock.h25
-rw-r--r--Source/JavaScriptCore/bytecode/Instruction.h3
-rw-r--r--Source/JavaScriptCore/bytecode/Opcode.h6
6 files changed, 273 insertions, 36 deletions
diff --git a/Source/JavaScriptCore/bytecode/ArrayProfile.cpp b/Source/JavaScriptCore/bytecode/ArrayProfile.cpp
new file mode 100644
index 000000000..6b97f7806
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ArrayProfile.cpp
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "ArrayProfile.h"
+
+namespace JSC {
+
+void ArrayProfile::computeUpdatedPrediction(OperationInProgress operation)
+{
+ if (m_lastSeenStructure) {
+ m_observedArrayModes |= arrayModeFromStructure(m_lastSeenStructure);
+ if (!m_structureIsPolymorphic) {
+ if (!m_expectedStructure)
+ m_expectedStructure = m_lastSeenStructure;
+ else if (m_expectedStructure != m_lastSeenStructure) {
+ m_expectedStructure = 0;
+ m_structureIsPolymorphic = true;
+ }
+ }
+ m_lastSeenStructure = 0;
+ }
+
+ if (operation == Collection
+ && m_expectedStructure
+ && !Heap::isMarked(m_expectedStructure)) {
+ m_expectedStructure = 0;
+ m_structureIsPolymorphic = true;
+ }
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/bytecode/ArrayProfile.h b/Source/JavaScriptCore/bytecode/ArrayProfile.h
new file mode 100644
index 000000000..5a656e2dd
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ArrayProfile.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ArrayProfile_h
+#define ArrayProfile_h
+
+#include "JSArray.h"
+#include "Structure.h"
+#include <wtf/HashMap.h>
+#include <wtf/SegmentedVector.h>
+
+namespace JSC {
+
+class LLIntOffsetsExtractor;
+
+typedef unsigned ArrayModes;
+
+static const unsigned IsNotArray = 1;
+static const unsigned IsJSArray = 2;
+
+inline ArrayModes arrayModeFromStructure(Structure* structure)
+{
+ if (structure->classInfo() == &JSArray::s_info)
+ return IsJSArray;
+ return IsNotArray;
+}
+
+class ArrayProfile {
+public:
+ ArrayProfile()
+ : m_bytecodeOffset(std::numeric_limits<unsigned>::max())
+ , m_lastSeenStructure(0)
+ , m_expectedStructure(0)
+ , m_structureIsPolymorphic(false)
+ , m_observedArrayModes(0)
+ {
+ }
+
+ ArrayProfile(unsigned bytecodeOffset)
+ : m_bytecodeOffset(bytecodeOffset)
+ , m_lastSeenStructure(0)
+ , m_expectedStructure(0)
+ , m_structureIsPolymorphic(false)
+ , m_observedArrayModes(0)
+ {
+ }
+
+ unsigned bytecodeOffset() const { return m_bytecodeOffset; }
+
+ Structure** addressOfLastSeenStructure() { return &m_lastSeenStructure; }
+
+ void observeStructure(Structure* structure)
+ {
+ m_lastSeenStructure = structure;
+ }
+
+ void computeUpdatedPrediction(OperationInProgress operation = NoOperation);
+
+ Structure* expectedStructure() const { return m_expectedStructure; }
+ bool structureIsPolymorphic() const { return m_structureIsPolymorphic; }
+ bool hasDefiniteStructure() const
+ {
+ return !structureIsPolymorphic() && m_expectedStructure;
+ }
+ ArrayModes observedArrayModes() const { return m_observedArrayModes; }
+
+private:
+ friend class LLIntOffsetsExtractor;
+
+ unsigned m_bytecodeOffset;
+ Structure* m_lastSeenStructure;
+ Structure* m_expectedStructure;
+ bool m_structureIsPolymorphic;
+ ArrayModes m_observedArrayModes;
+};
+
+typedef SegmentedVector<ArrayProfile, 4, 0> ArrayProfileVector;
+
+} // namespace JSC
+
+#endif // ArrayProfile_h
+
diff --git a/Source/JavaScriptCore/bytecode/CodeBlock.cpp b/Source/JavaScriptCore/bytecode/CodeBlock.cpp
index 0e2a98bc5..2ea969fcf 100644
--- a/Source/JavaScriptCore/bytecode/CodeBlock.cpp
+++ b/Source/JavaScriptCore/bytecode/CodeBlock.cpp
@@ -1155,6 +1155,7 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator&
dataLog("[%4d] get_by_val\t %s, %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data());
dumpBytecodeCommentAndNewLine(location);
it++;
+ it++;
break;
}
case op_get_argument_by_val: {
@@ -1164,6 +1165,7 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator&
dataLog("[%4d] get_argument_by_val\t %s, %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data());
dumpBytecodeCommentAndNewLine(location);
++it;
+ ++it;
break;
}
case op_get_by_pname: {
@@ -1183,6 +1185,7 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator&
int r2 = (++it)->u.operand;
dataLog("[%4d] put_by_val\t %s, %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data());
dumpBytecodeCommentAndNewLine(location);
+ ++it;
break;
}
case op_del_by_val: {
@@ -2577,6 +2580,60 @@ void CodeBlock::unlinkIncomingCalls()
m_incomingCalls.begin()->unlink(*m_globalData, repatchBuffer);
}
+#if ENABLE(LLINT)
+Instruction* CodeBlock::adjustPCIfAtCallSite(Instruction* potentialReturnPC)
+{
+ ASSERT(potentialReturnPC);
+
+ unsigned returnPCOffset = potentialReturnPC - instructions().begin();
+ Instruction* adjustedPC;
+ unsigned opcodeLength;
+
+ // If we are at a callsite, the LLInt stores the PC after the call
+ // instruction rather than the PC of the call instruction. This requires
+ // some correcting. If so, we can rely on the fact that the preceding
+ // instruction must be one of the call instructions, so either it's a
+ // call_varargs or it's a call, construct, or eval.
+ //
+ // If we are not at a call site, then we need to guard against the
+ // possibility of peeking past the start of the bytecode range for this
+ // codeBlock. Hence, we do a bounds check before we peek at the
+ // potential "preceding" instruction.
+ // The bounds check is done by comparing the offset of the potential
+ // returnPC with the length of the opcode. If there is room for a call
+ // instruction before the returnPC, then the offset of the returnPC must
+ // be greater than the size of the call opcode we're looking for.
+
+ // The determination of the call instruction present (if we are at a
+ // callsite) depends on the following assumptions. So, assert that
+ // they are still true:
+ ASSERT(OPCODE_LENGTH(op_call_varargs) <= OPCODE_LENGTH(op_call));
+ ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct));
+ ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_eval));
+
+ // Check for the case of a preceeding op_call_varargs:
+ opcodeLength = OPCODE_LENGTH(op_call_varargs);
+ adjustedPC = potentialReturnPC - opcodeLength;
+ if ((returnPCOffset >= opcodeLength)
+ && (adjustedPC->u.pointer == bitwise_cast<void*>(llint_op_call_varargs))) {
+ return adjustedPC;
+ }
+
+ // Check for the case of the other 3 call instructions:
+ opcodeLength = OPCODE_LENGTH(op_call);
+ adjustedPC = potentialReturnPC - opcodeLength;
+ if ((returnPCOffset >= opcodeLength)
+ && (adjustedPC->u.pointer == bitwise_cast<void*>(llint_op_call)
+ || adjustedPC->u.pointer == bitwise_cast<void*>(llint_op_construct)
+ || adjustedPC->u.pointer == bitwise_cast<void*>(llint_op_call_eval))) {
+ return adjustedPC;
+ }
+
+ // Not a call site. No need to adjust PC. Just return the original.
+ return potentialReturnPC;
+}
+#endif
+
unsigned CodeBlock::bytecodeOffset(ExecState* exec, ReturnAddressPtr returnAddress)
{
#if ENABLE(LLINT)
@@ -2587,28 +2644,8 @@ unsigned CodeBlock::bytecodeOffset(ExecState* exec, ReturnAddressPtr returnAddre
ASSERT(JITCode::isBaselineCode(getJITType()));
Instruction* instruction = exec->currentVPC();
ASSERT(instruction);
-
- // The LLInt stores the PC after the call instruction rather than the PC of
- // the call instruction. This requires some correcting. We rely on the fact
- // that the preceding instruction must be one of the call instructions, so
- // either it's a call_varargs or it's a call, construct, or eval.
- ASSERT(OPCODE_LENGTH(op_call_varargs) <= OPCODE_LENGTH(op_call));
- ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct));
- ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_eval));
- if (instruction[-OPCODE_LENGTH(op_call_varargs)].u.pointer == bitwise_cast<void*>(llint_op_call_varargs)) {
- // We know that the preceding instruction must be op_call_varargs because there is no way that
- // the pointer to the call_varargs could be an operand to the call.
- instruction -= OPCODE_LENGTH(op_call_varargs);
- ASSERT(instruction[-OPCODE_LENGTH(op_call)].u.pointer != bitwise_cast<void*>(llint_op_call)
- && instruction[-OPCODE_LENGTH(op_call)].u.pointer != bitwise_cast<void*>(llint_op_construct)
- && instruction[-OPCODE_LENGTH(op_call)].u.pointer != bitwise_cast<void*>(llint_op_call_eval));
- } else {
- // Must be that the last instruction was some op_call.
- ASSERT(instruction[-OPCODE_LENGTH(op_call)].u.pointer == bitwise_cast<void*>(llint_op_call)
- || instruction[-OPCODE_LENGTH(op_call)].u.pointer == bitwise_cast<void*>(llint_op_construct)
- || instruction[-OPCODE_LENGTH(op_call)].u.pointer == bitwise_cast<void*>(llint_op_call_eval));
- instruction -= OPCODE_LENGTH(op_call);
- }
+
+ instruction = adjustPCIfAtCallSite(instruction);
return bytecodeOffset(instruction);
}
@@ -2684,27 +2721,27 @@ CodeBlock* FunctionCodeBlock::replacement()
return &static_cast<FunctionExecutable*>(ownerExecutable())->generatedBytecodeFor(m_isConstructor ? CodeForConstruct : CodeForCall);
}
-JSObject* ProgramCodeBlock::compileOptimized(ExecState* exec, ScopeChainNode* scopeChainNode)
+JSObject* ProgramCodeBlock::compileOptimized(ExecState* exec, ScopeChainNode* scopeChainNode, unsigned bytecodeIndex)
{
if (replacement()->getJITType() == JITCode::nextTierJIT(getJITType()))
return 0;
- JSObject* error = static_cast<ProgramExecutable*>(ownerExecutable())->compileOptimized(exec, scopeChainNode);
+ JSObject* error = static_cast<ProgramExecutable*>(ownerExecutable())->compileOptimized(exec, scopeChainNode, bytecodeIndex);
return error;
}
-JSObject* EvalCodeBlock::compileOptimized(ExecState* exec, ScopeChainNode* scopeChainNode)
+JSObject* EvalCodeBlock::compileOptimized(ExecState* exec, ScopeChainNode* scopeChainNode, unsigned bytecodeIndex)
{
if (replacement()->getJITType() == JITCode::nextTierJIT(getJITType()))
return 0;
- JSObject* error = static_cast<EvalExecutable*>(ownerExecutable())->compileOptimized(exec, scopeChainNode);
+ JSObject* error = static_cast<EvalExecutable*>(ownerExecutable())->compileOptimized(exec, scopeChainNode, bytecodeIndex);
return error;
}
-JSObject* FunctionCodeBlock::compileOptimized(ExecState* exec, ScopeChainNode* scopeChainNode)
+JSObject* FunctionCodeBlock::compileOptimized(ExecState* exec, ScopeChainNode* scopeChainNode, unsigned bytecodeIndex)
{
if (replacement()->getJITType() == JITCode::nextTierJIT(getJITType()))
return 0;
- JSObject* error = static_cast<FunctionExecutable*>(ownerExecutable())->compileOptimizedFor(exec, scopeChainNode, m_isConstructor ? CodeForConstruct : CodeForCall);
+ JSObject* error = static_cast<FunctionExecutable*>(ownerExecutable())->compileOptimizedFor(exec, scopeChainNode, bytecodeIndex, m_isConstructor ? CodeForConstruct : CodeForCall);
return error;
}
@@ -2769,6 +2806,23 @@ bool FunctionCodeBlock::jitCompileImpl(ExecState* exec)
#endif
#if ENABLE(VALUE_PROFILER)
+ArrayProfile* CodeBlock::getArrayProfile(unsigned bytecodeOffset)
+{
+ for (unsigned i = 0; i < m_arrayProfiles.size(); ++i) {
+ if (m_arrayProfiles[i].bytecodeOffset() == bytecodeOffset)
+ return &m_arrayProfiles[i];
+ }
+ return 0;
+}
+
+ArrayProfile* CodeBlock::getOrAddArrayProfile(unsigned bytecodeOffset)
+{
+ ArrayProfile* result = getArrayProfile(bytecodeOffset);
+ if (result)
+ return result;
+ return addArrayProfile(bytecodeOffset);
+}
+
void CodeBlock::updateAllPredictionsAndCountLiveness(
OperationInProgress operation, unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles)
{
@@ -2792,6 +2846,12 @@ void CodeBlock::updateAllPredictionsAndCountLiveness(
#if ENABLE(DFG_JIT)
m_lazyOperandValueProfiles.computeUpdatedPredictions(operation);
#endif
+
+ // Don't count the array profiles towards statistics, since each array profile
+ // site also has a value profile site - so we already know whether or not it's
+ // live.
+ for (unsigned i = m_arrayProfiles.size(); i--;)
+ m_arrayProfiles[i].computeUpdatedPrediction(operation);
}
void CodeBlock::updateAllPredictions(OperationInProgress operation)
diff --git a/Source/JavaScriptCore/bytecode/CodeBlock.h b/Source/JavaScriptCore/bytecode/CodeBlock.h
index 2a7d2120a..a8b2a5871 100644
--- a/Source/JavaScriptCore/bytecode/CodeBlock.h
+++ b/Source/JavaScriptCore/bytecode/CodeBlock.h
@@ -30,6 +30,7 @@
#ifndef CodeBlock_h
#define CodeBlock_h
+#include "ArrayProfile.h"
#include "BytecodeConventions.h"
#include "CallLinkInfo.h"
#include "CallReturnOffsetToBytecodeOffset.h"
@@ -229,6 +230,9 @@ namespace JSC {
return *(binarySearch<MethodCallLinkInfo, unsigned, getMethodCallLinkInfoBytecodeIndex>(m_methodCallLinkInfos.begin(), m_methodCallLinkInfos.size(), bytecodeIndex));
}
+#if ENABLE(LLINT)
+ Instruction* adjustPCIfAtCallSite(Instruction*);
+#endif
unsigned bytecodeOffset(ExecState*, ReturnAddressPtr);
unsigned bytecodeOffsetForCallAtIndex(unsigned index)
@@ -441,7 +445,7 @@ namespace JSC {
MacroAssemblerCodePtr getJITCodeWithArityCheck() { return m_jitCodeWithArityCheck; }
JITCode::JITType getJITType() { return m_jitCode.jitType(); }
ExecutableMemoryHandle* executableMemory() { return getJITCode().getExecutableMemory(); }
- virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*) = 0;
+ virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*, unsigned bytecodeIndex) = 0;
virtual void jettison() = 0;
enum JITCompilationResult { AlreadyCompiled, CouldNotCompile, CompiledSuccessfully };
JITCompilationResult jitCompile(ExecState* exec)
@@ -751,8 +755,18 @@ namespace JSC {
}
unsigned executionEntryCount() const { return m_executionEntryCount; }
-#endif
+ unsigned numberOfArrayProfiles() const { return m_arrayProfiles.size(); }
+ const ArrayProfileVector& arrayProfiles() { return m_arrayProfiles; }
+ ArrayProfile* addArrayProfile(unsigned bytecodeOffset)
+ {
+ m_arrayProfiles.append(ArrayProfile(bytecodeOffset));
+ return &m_arrayProfiles.last();
+ }
+ ArrayProfile* getArrayProfile(unsigned bytecodeOffset);
+ ArrayProfile* getOrAddArrayProfile(unsigned bytecodeOffset);
+#endif
+
unsigned globalResolveInfoCount() const
{
#if ENABLE(JIT)
@@ -1333,6 +1347,7 @@ namespace JSC {
SegmentedVector<ValueProfile, 8> m_valueProfiles;
SegmentedVector<RareCaseProfile, 8> m_rareCaseProfiles;
SegmentedVector<RareCaseProfile, 8> m_specialFastCaseProfiles;
+ ArrayProfileVector m_arrayProfiles;
unsigned m_executionEntryCount;
#endif
@@ -1436,7 +1451,7 @@ namespace JSC {
#if ENABLE(JIT)
protected:
- virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*);
+ virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*, unsigned bytecodeIndex);
virtual void jettison();
virtual bool jitCompileImpl(ExecState*);
virtual CodeBlock* replacement();
@@ -1471,7 +1486,7 @@ namespace JSC {
#if ENABLE(JIT)
protected:
- virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*);
+ virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*, unsigned bytecodeIndex);
virtual void jettison();
virtual bool jitCompileImpl(ExecState*);
virtual CodeBlock* replacement();
@@ -1509,7 +1524,7 @@ namespace JSC {
#if ENABLE(JIT)
protected:
- virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*);
+ virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*, unsigned bytecodeIndex);
virtual void jettison();
virtual bool jitCompileImpl(ExecState*);
virtual CodeBlock* replacement();
diff --git a/Source/JavaScriptCore/bytecode/Instruction.h b/Source/JavaScriptCore/bytecode/Instruction.h
index 2e94b452c..6c1260abc 100644
--- a/Source/JavaScriptCore/bytecode/Instruction.h
+++ b/Source/JavaScriptCore/bytecode/Instruction.h
@@ -46,6 +46,7 @@ namespace JSC {
// curently actually use PolymorphicAccessStructureLists, which we should). Anyway, this seems like the best
// solution for now - will need to something smarter if/when we actually want mixed-mode operation.
+ class ArrayProfile;
class JSCell;
class Structure;
class StructureChain;
@@ -190,6 +191,7 @@ namespace JSC {
Instruction(LLIntCallLinkInfo* callLinkInfo) { u.callLinkInfo = callLinkInfo; }
Instruction(ValueProfile* profile) { u.profile = profile; }
+ Instruction(ArrayProfile* profile) { u.arrayProfile = profile; }
Instruction(WriteBarrier<Unknown>* registerPointer) { u.registerPointer = registerPointer; }
@@ -205,6 +207,7 @@ namespace JSC {
PropertySlot::GetValueFunc getterFunc;
LLIntCallLinkInfo* callLinkInfo;
ValueProfile* profile;
+ ArrayProfile* arrayProfile;
void* pointer;
bool* predicatePointer;
} u;
diff --git a/Source/JavaScriptCore/bytecode/Opcode.h b/Source/JavaScriptCore/bytecode/Opcode.h
index 4308e79df..777b4876f 100644
--- a/Source/JavaScriptCore/bytecode/Opcode.h
+++ b/Source/JavaScriptCore/bytecode/Opcode.h
@@ -132,10 +132,10 @@ namespace JSC {
macro(op_put_by_id_replace, 9) \
macro(op_put_by_id_generic, 9) \
macro(op_del_by_id, 4) \
- macro(op_get_by_val, 5) /* has value profiling */ \
- macro(op_get_argument_by_val, 5) /* must be the same size as op_get_by_val */ \
+ macro(op_get_by_val, 6) /* has value profiling */ \
+ macro(op_get_argument_by_val, 6) /* must be the same size as op_get_by_val */ \
macro(op_get_by_pname, 7) \
- macro(op_put_by_val, 4) \
+ macro(op_put_by_val, 5) \
macro(op_del_by_val, 4) \
macro(op_put_by_index, 4) \
macro(op_put_getter_setter, 5) \