summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/bytecode
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@digia.com>2013-09-13 12:51:20 +0200
committerThe Qt Project <gerrit-noreply@qt-project.org>2013-09-19 20:50:05 +0200
commitd441d6f39bb846989d95bcf5caf387b42414718d (patch)
treee367e64a75991c554930278175d403c072de6bb8 /Source/JavaScriptCore/bytecode
parent0060b2994c07842f4c59de64b5e3e430525c4b90 (diff)
downloadqtwebkit-d441d6f39bb846989d95bcf5caf387b42414718d.tar.gz
Import Qt5x2 branch of QtWebkit for Qt 5.2
Importing a new snapshot of webkit. Change-Id: I2d01ad12cdc8af8cb015387641120a9d7ea5f10c Reviewed-by: Allan Sandfeld Jensen <allan.jensen@digia.com>
Diffstat (limited to 'Source/JavaScriptCore/bytecode')
-rw-r--r--Source/JavaScriptCore/bytecode/ArrayAllocationProfile.cpp2
-rw-r--r--Source/JavaScriptCore/bytecode/ArrayProfile.cpp150
-rw-r--r--Source/JavaScriptCore/bytecode/ArrayProfile.h34
-rw-r--r--Source/JavaScriptCore/bytecode/CallLinkInfo.cpp8
-rw-r--r--Source/JavaScriptCore/bytecode/CallLinkInfo.h6
-rw-r--r--Source/JavaScriptCore/bytecode/CallLinkStatus.cpp96
-rw-r--r--Source/JavaScriptCore/bytecode/CallLinkStatus.h84
-rw-r--r--Source/JavaScriptCore/bytecode/CodeBlock.cpp1434
-rw-r--r--Source/JavaScriptCore/bytecode/CodeBlock.h2342
-rw-r--r--Source/JavaScriptCore/bytecode/CodeBlockHash.cpp3
-rw-r--r--Source/JavaScriptCore/bytecode/CodeOrigin.cpp58
-rw-r--r--Source/JavaScriptCore/bytecode/CodeOrigin.h35
-rw-r--r--Source/JavaScriptCore/bytecode/Comment.h37
-rw-r--r--Source/JavaScriptCore/bytecode/DFGExitProfile.cpp27
-rw-r--r--Source/JavaScriptCore/bytecode/DFGExitProfile.h82
-rw-r--r--Source/JavaScriptCore/bytecode/DataFormat.h11
-rw-r--r--Source/JavaScriptCore/bytecode/EvalCodeCache.h13
-rw-r--r--Source/JavaScriptCore/bytecode/ExecutionCounter.cpp3
-rw-r--r--Source/JavaScriptCore/bytecode/ExitKind.cpp107
-rw-r--r--Source/JavaScriptCore/bytecode/ExitKind.h66
-rw-r--r--Source/JavaScriptCore/bytecode/ExpressionRangeInfo.h71
-rw-r--r--Source/JavaScriptCore/bytecode/GetByIdStatus.cpp18
-rw-r--r--Source/JavaScriptCore/bytecode/GetByIdStatus.h2
-rw-r--r--Source/JavaScriptCore/bytecode/Instruction.h256
-rw-r--r--Source/JavaScriptCore/bytecode/LazyOperandValueProfile.cpp2
-rw-r--r--Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.cpp2
-rw-r--r--Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.h2
-rw-r--r--Source/JavaScriptCore/bytecode/ObjectAllocationProfile.h142
-rw-r--r--Source/JavaScriptCore/bytecode/Opcode.h445
-rw-r--r--Source/JavaScriptCore/bytecode/Operands.h30
-rw-r--r--Source/JavaScriptCore/bytecode/PolymorphicAccessStructureList.h139
-rw-r--r--Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.cpp4
-rw-r--r--Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.h12
-rw-r--r--Source/JavaScriptCore/bytecode/PreciseJumpTargets.cpp128
-rw-r--r--Source/JavaScriptCore/bytecode/PreciseJumpTargets.h38
-rw-r--r--Source/JavaScriptCore/bytecode/PutByIdStatus.cpp29
-rw-r--r--Source/JavaScriptCore/bytecode/PutByIdStatus.h4
-rw-r--r--Source/JavaScriptCore/bytecode/ReduceWhitespace.cpp52
-rw-r--r--Source/JavaScriptCore/bytecode/ReduceWhitespace.h38
-rw-r--r--Source/JavaScriptCore/bytecode/ResolveGlobalStatus.cpp5
-rw-r--r--Source/JavaScriptCore/bytecode/ResolveGlobalStatus.h2
-rw-r--r--Source/JavaScriptCore/bytecode/SamplingTool.cpp6
-rw-r--r--Source/JavaScriptCore/bytecode/SamplingTool.h9
-rw-r--r--Source/JavaScriptCore/bytecode/SpeculatedType.cpp28
-rw-r--r--Source/JavaScriptCore/bytecode/SpeculatedType.h34
-rw-r--r--Source/JavaScriptCore/bytecode/StructureStubInfo.cpp2
-rw-r--r--Source/JavaScriptCore/bytecode/StructureStubInfo.h485
-rw-r--r--Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.cpp192
-rw-r--r--Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.h165
-rw-r--r--Source/JavaScriptCore/bytecode/ValueProfile.h19
-rw-r--r--Source/JavaScriptCore/bytecode/ValueRecovery.h4
51 files changed, 4108 insertions, 2855 deletions
diff --git a/Source/JavaScriptCore/bytecode/ArrayAllocationProfile.cpp b/Source/JavaScriptCore/bytecode/ArrayAllocationProfile.cpp
index aa682da86..6d9afda28 100644
--- a/Source/JavaScriptCore/bytecode/ArrayAllocationProfile.cpp
+++ b/Source/JavaScriptCore/bytecode/ArrayAllocationProfile.cpp
@@ -26,6 +26,8 @@
#include "config.h"
#include "ArrayAllocationProfile.h"
+#include "Operations.h"
+
namespace JSC {
void ArrayAllocationProfile::updateIndexingType()
diff --git a/Source/JavaScriptCore/bytecode/ArrayProfile.cpp b/Source/JavaScriptCore/bytecode/ArrayProfile.cpp
index 51baf332f..ae3c8f94a 100644
--- a/Source/JavaScriptCore/bytecode/ArrayProfile.cpp
+++ b/Source/JavaScriptCore/bytecode/ArrayProfile.cpp
@@ -28,41 +28,50 @@
#include "CodeBlock.h"
#include <wtf/StringExtras.h>
+#include <wtf/StringPrintStream.h>
namespace JSC {
-const char* arrayModesToString(ArrayModes arrayModes)
+void dumpArrayModes(PrintStream& out, ArrayModes arrayModes)
{
- if (!arrayModes)
- return "0:<empty>";
+ if (!arrayModes) {
+ out.print("0:<empty>");
+ return;
+ }
- if (arrayModes == ALL_ARRAY_MODES)
- return "TOP";
-
- bool isNonArray = !!(arrayModes & asArrayModes(NonArray));
- bool isNonArrayWithContiguous = !!(arrayModes & asArrayModes(NonArrayWithContiguous));
- bool isNonArrayWithArrayStorage = !!(arrayModes & asArrayModes(NonArrayWithArrayStorage));
- bool isNonArrayWithSlowPutArrayStorage = !!(arrayModes & asArrayModes(NonArrayWithSlowPutArrayStorage));
- bool isArray = !!(arrayModes & asArrayModes(ArrayClass));
- bool isArrayWithContiguous = !!(arrayModes & asArrayModes(ArrayWithContiguous));
- bool isArrayWithArrayStorage = !!(arrayModes & asArrayModes(ArrayWithArrayStorage));
- bool isArrayWithSlowPutArrayStorage = !!(arrayModes & asArrayModes(ArrayWithSlowPutArrayStorage));
-
- static char result[256];
- snprintf(
- result, sizeof(result),
- "%u:%s%s%s%s%s%s%s%s",
- arrayModes,
- isNonArray ? "NonArray" : "",
- isNonArrayWithContiguous ? "NonArrayWithContiguous" : "",
- isNonArrayWithArrayStorage ? " NonArrayWithArrayStorage" : "",
- isNonArrayWithSlowPutArrayStorage ? "NonArrayWithSlowPutArrayStorage" : "",
- isArray ? "ArrayClass" : "",
- isArrayWithContiguous ? "ArrayWithContiguous" : "",
- isArrayWithArrayStorage ? " ArrayWithArrayStorage" : "",
- isArrayWithSlowPutArrayStorage ? "ArrayWithSlowPutArrayStorage" : "");
-
- return result;
+ if (arrayModes == ALL_ARRAY_MODES) {
+ out.print("TOP");
+ return;
+ }
+
+ out.print(arrayModes, ":");
+
+ if (arrayModes & asArrayModes(NonArray))
+ out.print("NonArray");
+ if (arrayModes & asArrayModes(NonArrayWithInt32))
+ out.print("NonArrayWithInt32");
+ if (arrayModes & asArrayModes(NonArrayWithDouble))
+ out.print("NonArrayWithDouble");
+ if (arrayModes & asArrayModes(NonArrayWithContiguous))
+ out.print("NonArrayWithContiguous");
+ if (arrayModes & asArrayModes(NonArrayWithArrayStorage))
+ out.print("NonArrayWithArrayStorage");
+ if (arrayModes & asArrayModes(NonArrayWithSlowPutArrayStorage))
+ out.print("NonArrayWithSlowPutArrayStorage");
+ if (arrayModes & asArrayModes(ArrayClass))
+ out.print("ArrayClass");
+ if (arrayModes & asArrayModes(ArrayWithUndecided))
+ out.print("ArrayWithUndecided");
+ if (arrayModes & asArrayModes(ArrayWithInt32))
+ out.print("ArrayWithInt32");
+ if (arrayModes & asArrayModes(ArrayWithDouble))
+ out.print("ArrayWithDouble");
+ if (arrayModes & asArrayModes(ArrayWithContiguous))
+ out.print("ArrayWithContiguous");
+ if (arrayModes & asArrayModes(ArrayWithArrayStorage))
+ out.print("ArrayWithArrayStorage");
+ if (arrayModes & asArrayModes(ArrayWithSlowPutArrayStorage))
+ out.print("ArrayWithSlowPutArrayStorage");
}
ArrayModes ArrayProfile::updatedObservedArrayModes() const
@@ -74,34 +83,99 @@ ArrayModes ArrayProfile::updatedObservedArrayModes() const
void ArrayProfile::computeUpdatedPrediction(CodeBlock* codeBlock, OperationInProgress operation)
{
+ const bool verbose = false;
+
if (m_lastSeenStructure) {
m_observedArrayModes |= arrayModeFromStructure(m_lastSeenStructure);
m_mayInterceptIndexedAccesses |=
m_lastSeenStructure->typeInfo().interceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero();
if (!codeBlock->globalObject()->isOriginalArrayStructure(m_lastSeenStructure))
m_usesOriginalArrayStructures = false;
- if (!m_structureIsPolymorphic) {
+ if (!structureIsPolymorphic()) {
if (!m_expectedStructure)
m_expectedStructure = m_lastSeenStructure;
else if (m_expectedStructure != m_lastSeenStructure) {
- m_expectedStructure = 0;
- m_structureIsPolymorphic = true;
+ if (verbose)
+ dataLog(*codeBlock, " bc#", m_bytecodeOffset, ": making structure polymorphic because ", RawPointer(m_expectedStructure), " (", m_expectedStructure->classInfo()->className, ") != ", RawPointer(m_lastSeenStructure), " (", m_lastSeenStructure->classInfo()->className, ")\n");
+ m_expectedStructure = polymorphicStructure();
}
}
m_lastSeenStructure = 0;
}
if (hasTwoOrMoreBitsSet(m_observedArrayModes)) {
- m_structureIsPolymorphic = true;
- m_expectedStructure = 0;
+ if (verbose)
+ dataLog(*codeBlock, " bc#", m_bytecodeOffset, ": making structure polymorphic because two or more bits are set in m_observedArrayModes\n");
+ m_expectedStructure = polymorphicStructure();
}
if (operation == Collection
- && m_expectedStructure
+ && expectedStructure()
&& !Heap::isMarked(m_expectedStructure)) {
- m_expectedStructure = 0;
- m_structureIsPolymorphic = true;
+ if (verbose)
+ dataLog(*codeBlock, " bc#", m_bytecodeOffset, ": making structure during GC\n");
+ m_expectedStructure = polymorphicStructure();
+ }
+}
+
+CString ArrayProfile::briefDescription(CodeBlock* codeBlock)
+{
+ computeUpdatedPrediction(codeBlock);
+
+ StringPrintStream out;
+
+ bool hasPrinted = false;
+
+ if (m_observedArrayModes) {
+ if (hasPrinted)
+ out.print(", ");
+ out.print(ArrayModesDump(m_observedArrayModes));
+ hasPrinted = true;
}
+
+ if (structureIsPolymorphic()) {
+ if (hasPrinted)
+ out.print(", ");
+ out.print("struct = TOP");
+ hasPrinted = true;
+ } else if (m_expectedStructure) {
+ if (hasPrinted)
+ out.print(", ");
+ out.print("struct = ", RawPointer(m_expectedStructure));
+ hasPrinted = true;
+ }
+
+ if (m_mayStoreToHole) {
+ if (hasPrinted)
+ out.print(", ");
+ out.print("Hole");
+ hasPrinted = true;
+ }
+
+ if (m_outOfBounds) {
+ if (hasPrinted)
+ out.print(", ");
+ out.print("OutOfBounds");
+ hasPrinted = true;
+ }
+
+ if (m_mayInterceptIndexedAccesses) {
+ if (hasPrinted)
+ out.print(", ");
+ out.print("Intercept");
+ hasPrinted = true;
+ }
+
+ if (m_usesOriginalArrayStructures) {
+ if (hasPrinted)
+ out.print(", ");
+ out.print("Original");
+ hasPrinted = true;
+ }
+
+ UNUSED_PARAM(hasPrinted);
+
+ return out.toCString();
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/ArrayProfile.h b/Source/JavaScriptCore/bytecode/ArrayProfile.h
index 5116cd36f..384275689 100644
--- a/Source/JavaScriptCore/bytecode/ArrayProfile.h
+++ b/Source/JavaScriptCore/bytecode/ArrayProfile.h
@@ -67,7 +67,8 @@ inline ArrayModes arrayModeFromStructure(Structure* structure)
return asArrayModes(structure->indexingType());
}
-const char* arrayModesToString(ArrayModes);
+void dumpArrayModes(PrintStream&, ArrayModes);
+MAKE_PRINT_ADAPTOR(ArrayModesDump, ArrayModes, dumpArrayModes);
inline bool mergeArrayModes(ArrayModes& left, ArrayModes right)
{
@@ -114,14 +115,24 @@ inline bool shouldUseInt32(ArrayModes arrayModes)
return arrayModesInclude(arrayModes, Int32Shape);
}
+inline bool hasSeenArray(ArrayModes arrayModes)
+{
+ return arrayModes & ALL_ARRAY_ARRAY_MODES;
+}
+
+inline bool hasSeenNonArray(ArrayModes arrayModes)
+{
+ return arrayModes & ALL_NON_ARRAY_ARRAY_MODES;
+}
+
class ArrayProfile {
public:
ArrayProfile()
: m_bytecodeOffset(std::numeric_limits<unsigned>::max())
, m_lastSeenStructure(0)
, m_expectedStructure(0)
- , m_structureIsPolymorphic(false)
, m_mayStoreToHole(false)
+ , m_outOfBounds(false)
, m_mayInterceptIndexedAccesses(false)
, m_usesOriginalArrayStructures(true)
, m_observedArrayModes(0)
@@ -132,8 +143,8 @@ public:
: m_bytecodeOffset(bytecodeOffset)
, m_lastSeenStructure(0)
, m_expectedStructure(0)
- , m_structureIsPolymorphic(false)
, m_mayStoreToHole(false)
+ , m_outOfBounds(false)
, m_mayInterceptIndexedAccesses(false)
, m_usesOriginalArrayStructures(true)
, m_observedArrayModes(0)
@@ -145,6 +156,7 @@ public:
Structure** addressOfLastSeenStructure() { return &m_lastSeenStructure; }
ArrayModes* addressOfArrayModes() { return &m_observedArrayModes; }
bool* addressOfMayStoreToHole() { return &m_mayStoreToHole; }
+ bool* addressOfOutOfBounds() { return &m_outOfBounds; }
void observeStructure(Structure* structure)
{
@@ -153,10 +165,15 @@ public:
void computeUpdatedPrediction(CodeBlock*, OperationInProgress = NoOperation);
- Structure* expectedStructure() const { return m_expectedStructure; }
+ Structure* expectedStructure() const
+ {
+ if (structureIsPolymorphic())
+ return 0;
+ return m_expectedStructure;
+ }
bool structureIsPolymorphic() const
{
- return m_structureIsPolymorphic;
+ return m_expectedStructure == polymorphicStructure();
}
bool hasDefiniteStructure() const
{
@@ -167,17 +184,22 @@ public:
bool mayInterceptIndexedAccesses() const { return m_mayInterceptIndexedAccesses; }
bool mayStoreToHole() const { return m_mayStoreToHole; }
+ bool outOfBounds() const { return m_outOfBounds; }
bool usesOriginalArrayStructures() const { return m_usesOriginalArrayStructures; }
+ CString briefDescription(CodeBlock*);
+
private:
friend class LLIntOffsetsExtractor;
+ static Structure* polymorphicStructure() { return static_cast<Structure*>(reinterpret_cast<void*>(1)); }
+
unsigned m_bytecodeOffset;
Structure* m_lastSeenStructure;
Structure* m_expectedStructure;
- bool m_structureIsPolymorphic;
bool m_mayStoreToHole; // This flag may become overloaded to indicate other special cases that were encountered during array access, as it depends on indexing type. Since we currently have basically just one indexing type (two variants of ArrayStorage), this flag for now just means exactly what its name implies.
+ bool m_outOfBounds;
bool m_mayInterceptIndexedAccesses;
bool m_usesOriginalArrayStructures;
ArrayModes m_observedArrayModes;
diff --git a/Source/JavaScriptCore/bytecode/CallLinkInfo.cpp b/Source/JavaScriptCore/bytecode/CallLinkInfo.cpp
index 762dca12a..5b2661f06 100644
--- a/Source/JavaScriptCore/bytecode/CallLinkInfo.cpp
+++ b/Source/JavaScriptCore/bytecode/CallLinkInfo.cpp
@@ -33,19 +33,19 @@
#if ENABLE(JIT)
namespace JSC {
-void CallLinkInfo::unlink(JSGlobalData& globalData, RepatchBuffer& repatchBuffer)
+void CallLinkInfo::unlink(VM& vm, RepatchBuffer& repatchBuffer)
{
ASSERT(isLinked());
repatchBuffer.revertJumpReplacementToBranchPtrWithPatch(RepatchBuffer::startOfBranchPtrWithPatchOnRegister(hotPathBegin), static_cast<MacroAssembler::RegisterID>(calleeGPR), 0);
if (isDFG) {
#if ENABLE(DFG_JIT)
- repatchBuffer.relink(callReturnLocation, (callType == Construct ? globalData.getCTIStub(DFG::linkConstructThunkGenerator) : globalData.getCTIStub(DFG::linkCallThunkGenerator)).code());
+ repatchBuffer.relink(callReturnLocation, (callType == Construct ? vm.getCTIStub(DFG::linkConstructThunkGenerator) : vm.getCTIStub(DFG::linkCallThunkGenerator)).code());
#else
- ASSERT_NOT_REACHED();
+ RELEASE_ASSERT_NOT_REACHED();
#endif
} else
- repatchBuffer.relink(callReturnLocation, callType == Construct ? globalData.jitStubs->ctiVirtualConstructLink() : globalData.jitStubs->ctiVirtualCallLink());
+ repatchBuffer.relink(callReturnLocation, callType == Construct ? vm.getCTIStub(linkConstructGenerator).code() : vm.getCTIStub(linkCallGenerator).code());
hasSeenShouldRepatch = false;
callee.clear();
stub.clear();
diff --git a/Source/JavaScriptCore/bytecode/CallLinkInfo.h b/Source/JavaScriptCore/bytecode/CallLinkInfo.h
index 57608435c..36eb84bee 100644
--- a/Source/JavaScriptCore/bytecode/CallLinkInfo.h
+++ b/Source/JavaScriptCore/bytecode/CallLinkInfo.h
@@ -57,6 +57,7 @@ struct CallLinkInfo : public BasicRawSentinelNode<CallLinkInfo> {
CallLinkInfo()
: hasSeenShouldRepatch(false)
, isDFG(false)
+ , hasSeenClosure(false)
, callType(None)
{
}
@@ -80,12 +81,13 @@ struct CallLinkInfo : public BasicRawSentinelNode<CallLinkInfo> {
RefPtr<ClosureCallStubRoutine> stub;
bool hasSeenShouldRepatch : 1;
bool isDFG : 1;
- CallType callType : 6;
+ bool hasSeenClosure : 1;
+ CallType callType : 5;
unsigned calleeGPR : 8;
CodeOrigin codeOrigin;
bool isLinked() { return stub || callee; }
- void unlink(JSGlobalData&, RepatchBuffer&);
+ void unlink(VM&, RepatchBuffer&);
bool seenOnce()
{
diff --git a/Source/JavaScriptCore/bytecode/CallLinkStatus.cpp b/Source/JavaScriptCore/bytecode/CallLinkStatus.cpp
index 7f9e9ee8a..509b15aaf 100644
--- a/Source/JavaScriptCore/bytecode/CallLinkStatus.cpp
+++ b/Source/JavaScriptCore/bytecode/CallLinkStatus.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,9 +28,59 @@
#include "CodeBlock.h"
#include "LLIntCallLinkInfo.h"
+#include "Operations.h"
+#include <wtf/CommaPrinter.h>
namespace JSC {
+CallLinkStatus::CallLinkStatus(JSValue value)
+ : m_callTarget(value)
+ , m_executable(0)
+ , m_structure(0)
+ , m_couldTakeSlowPath(false)
+ , m_isProved(false)
+{
+ if (!value || !value.isCell())
+ return;
+
+ m_structure = value.asCell()->structure();
+
+ if (!value.asCell()->inherits(&JSFunction::s_info))
+ return;
+
+ m_executable = jsCast<JSFunction*>(value.asCell())->executable();
+}
+
+JSFunction* CallLinkStatus::function() const
+{
+ if (!m_callTarget || !m_callTarget.isCell())
+ return 0;
+
+ if (!m_callTarget.asCell()->inherits(&JSFunction::s_info))
+ return 0;
+
+ return jsCast<JSFunction*>(m_callTarget.asCell());
+}
+
+InternalFunction* CallLinkStatus::internalFunction() const
+{
+ if (!m_callTarget || !m_callTarget.isCell())
+ return 0;
+
+ if (!m_callTarget.asCell()->inherits(&InternalFunction::s_info))
+ return 0;
+
+ return jsCast<InternalFunction*>(m_callTarget.asCell());
+}
+
+Intrinsic CallLinkStatus::intrinsicFor(CodeSpecializationKind kind) const
+{
+ if (!m_executable)
+ return NoIntrinsic;
+
+ return m_executable->intrinsicFor(kind);
+}
+
CallLinkStatus CallLinkStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex)
{
UNUSED_PARAM(profiledBlock);
@@ -39,9 +89,9 @@ CallLinkStatus CallLinkStatus::computeFromLLInt(CodeBlock* profiledBlock, unsign
Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex;
LLIntCallLinkInfo* callLinkInfo = instruction[4].u.callLinkInfo;
- return CallLinkStatus(callLinkInfo->lastSeenCallee.get(), false);
+ return CallLinkStatus(callLinkInfo->lastSeenCallee.get());
#else
- return CallLinkStatus(0, false);
+ return CallLinkStatus();
#endif
}
@@ -54,17 +104,49 @@ CallLinkStatus CallLinkStatus::computeFor(CodeBlock* profiledBlock, unsigned byt
return computeFromLLInt(profiledBlock, bytecodeIndex);
if (profiledBlock->couldTakeSlowCase(bytecodeIndex))
- return CallLinkStatus(0, true);
+ return CallLinkStatus::takesSlowPath();
- JSFunction* target = profiledBlock->getCallLinkInfo(bytecodeIndex).lastSeenCallee.get();
+ CallLinkInfo& callLinkInfo = profiledBlock->getCallLinkInfo(bytecodeIndex);
+ if (callLinkInfo.stub)
+ return CallLinkStatus(callLinkInfo.stub->executable(), callLinkInfo.stub->structure());
+
+ JSFunction* target = callLinkInfo.lastSeenCallee.get();
if (!target)
return computeFromLLInt(profiledBlock, bytecodeIndex);
- return CallLinkStatus(target, false);
+ if (callLinkInfo.hasSeenClosure)
+ return CallLinkStatus(target->executable(), target->structure());
+
+ return CallLinkStatus(target);
#else
- return CallLinkStatus(0, false);
+ return CallLinkStatus();
#endif
}
+void CallLinkStatus::dump(PrintStream& out) const
+{
+ if (!isSet()) {
+ out.print("Not Set");
+ return;
+ }
+
+ CommaPrinter comma;
+
+ if (m_isProved)
+ out.print(comma, "Statically Proved");
+
+ if (m_couldTakeSlowPath)
+ out.print(comma, "Could Take Slow Path");
+
+ if (m_callTarget)
+ out.print(comma, "Known target: ", m_callTarget);
+
+ if (m_executable)
+ out.print(comma, "Executable/CallHash: ", RawPointer(m_executable), "/", m_executable->hashFor(CodeForCall));
+
+ if (m_structure)
+ out.print(comma, "Structure: ", RawPointer(m_structure));
+}
+
} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/CallLinkStatus.h b/Source/JavaScriptCore/bytecode/CallLinkStatus.h
index 5f7201905..51965fe4a 100644
--- a/Source/JavaScriptCore/bytecode/CallLinkStatus.h
+++ b/Source/JavaScriptCore/bytecode/CallLinkStatus.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,40 +26,106 @@
#ifndef CallLinkStatus_h
#define CallLinkStatus_h
+#include "CodeSpecializationKind.h"
+#include "Intrinsic.h"
+#include "JSCJSValue.h"
+
namespace JSC {
-class JSFunction;
class CodeBlock;
+class ExecutableBase;
+class InternalFunction;
+class JSFunction;
+class Structure;
class CallLinkStatus {
public:
CallLinkStatus()
- : m_callTarget(0)
+ : m_executable(0)
+ , m_structure(0)
+ , m_couldTakeSlowPath(false)
+ , m_isProved(false)
+ {
+ }
+
+ static CallLinkStatus takesSlowPath()
+ {
+ CallLinkStatus result;
+ result.m_couldTakeSlowPath = true;
+ return result;
+ }
+
+ explicit CallLinkStatus(JSValue);
+
+ CallLinkStatus(ExecutableBase* executable, Structure* structure)
+ : m_executable(executable)
+ , m_structure(structure)
, m_couldTakeSlowPath(false)
+ , m_isProved(false)
{
+ ASSERT(!!executable == !!structure);
}
- CallLinkStatus(JSFunction* callTarget, bool couldTakeSlowPath)
- : m_callTarget(callTarget)
- , m_couldTakeSlowPath(couldTakeSlowPath)
+ CallLinkStatus& setIsProved(bool isProved)
{
+ m_isProved = isProved;
+ return *this;
}
static CallLinkStatus computeFor(CodeBlock*, unsigned bytecodeIndex);
- bool isSet() const { return !!m_callTarget || m_couldTakeSlowPath; }
+ CallLinkStatus& setHasBadFunctionExitSite(bool didHaveExitSite)
+ {
+ ASSERT(!m_isProved);
+ if (didHaveExitSite) {
+ // Turn this into a closure call.
+ m_callTarget = JSValue();
+ }
+ return *this;
+ }
+
+ CallLinkStatus& setHasBadCacheExitSite(bool didHaveExitSite)
+ {
+ ASSERT(!m_isProved);
+ if (didHaveExitSite)
+ *this = takesSlowPath();
+ return *this;
+ }
+
+ CallLinkStatus& setHasBadExecutableExitSite(bool didHaveExitSite)
+ {
+ ASSERT(!m_isProved);
+ if (didHaveExitSite)
+ *this = takesSlowPath();
+ return *this;
+ }
+
+ bool isSet() const { return m_callTarget || m_executable || m_couldTakeSlowPath; }
bool operator!() const { return !isSet(); }
bool couldTakeSlowPath() const { return m_couldTakeSlowPath; }
+ bool isClosureCall() const { return m_executable && !m_callTarget; }
+
+ JSValue callTarget() const { return m_callTarget; }
+ JSFunction* function() const;
+ InternalFunction* internalFunction() const;
+ Intrinsic intrinsicFor(CodeSpecializationKind) const;
+ ExecutableBase* executable() const { return m_executable; }
+ Structure* structure() const { return m_structure; }
+ bool isProved() const { return m_isProved; }
+ bool canOptimize() const { return (m_callTarget || m_executable) && !m_couldTakeSlowPath; }
- JSFunction* callTarget() const { return m_callTarget; }
+ void dump(PrintStream&) const;
private:
static CallLinkStatus computeFromLLInt(CodeBlock*, unsigned bytecodeIndex);
- JSFunction* m_callTarget;
+ JSValue m_callTarget;
+ ExecutableBase* m_executable;
+ Structure* m_structure;
bool m_couldTakeSlowPath;
+ bool m_isProved;
};
} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/CodeBlock.cpp b/Source/JavaScriptCore/bytecode/CodeBlock.cpp
index fe9f6ac7c..904e40a4c 100644
--- a/Source/JavaScriptCore/bytecode/CodeBlock.cpp
+++ b/Source/JavaScriptCore/bytecode/CodeBlock.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2009, 2010 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2009, 2010, 2012, 2013 Apple Inc. All rights reserved.
* Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
*
* Redistribution and use in source and binary forms, with or without
@@ -31,6 +31,7 @@
#include "CodeBlock.h"
#include "BytecodeGenerator.h"
+#include "CallLinkStatus.h"
#include "DFGCapabilities.h"
#include "DFGCommon.h"
#include "DFGNode.h"
@@ -40,15 +41,18 @@
#include "JIT.h"
#include "JITStubs.h"
#include "JSActivation.h"
+#include "JSCJSValue.h"
#include "JSFunction.h"
#include "JSNameScope.h"
-#include "JSValue.h"
#include "LowLevelInterpreter.h"
+#include "Operations.h"
+#include "ReduceWhitespace.h"
#include "RepatchBuffer.h"
#include "SlotVisitorInlines.h"
#include <stdio.h>
+#include <wtf/CommaPrinter.h>
#include <wtf/StringExtras.h>
-#include <wtf/UnusedParam.h>
+#include <wtf/StringPrintStream.h>
#if ENABLE(DFG_JIT)
#include "DFGOperations.h"
@@ -62,16 +66,57 @@ namespace JSC {
using namespace DFG;
#endif
+String CodeBlock::inferredName() const
+{
+ switch (codeType()) {
+ case GlobalCode:
+ return "<global>";
+ case EvalCode:
+ return "<eval>";
+ case FunctionCode:
+ return jsCast<FunctionExecutable*>(ownerExecutable())->inferredName().string();
+ default:
+ CRASH();
+ return String();
+ }
+}
+
CodeBlockHash CodeBlock::hash() const
{
return CodeBlockHash(ownerExecutable()->source(), specializationKind());
}
+String CodeBlock::sourceCodeForTools() const
+{
+ if (codeType() != FunctionCode)
+ return ownerExecutable()->source().toString();
+
+ SourceProvider* provider = source();
+ FunctionExecutable* executable = jsCast<FunctionExecutable*>(ownerExecutable());
+ UnlinkedFunctionExecutable* unlinked = executable->unlinkedExecutable();
+ unsigned unlinkedStartOffset = unlinked->startOffset();
+ unsigned linkedStartOffset = executable->source().startOffset();
+ int delta = linkedStartOffset - unlinkedStartOffset;
+ StringBuilder builder;
+ builder.append("function ");
+ builder.append(provider->getRange(
+ delta + unlinked->functionStartOffset(),
+ delta + unlinked->startOffset() + unlinked->sourceLength()));
+ return builder.toString();
+}
+
+String CodeBlock::sourceCodeOnOneLine() const
+{
+ return reduceWhitespace(sourceCodeForTools());
+}
+
void CodeBlock::dumpAssumingJITType(PrintStream& out, JITCode::JITType jitType) const
{
- out.print("#", hash(), ":[", RawPointer(this), ", ", jitType, codeType());
+ out.print(inferredName(), "#", hash(), ":[", RawPointer(this), "->", RawPointer(ownerExecutable()), ", ", jitType, codeType());
if (codeType() == FunctionCode)
out.print(specializationKind());
+ if (ownerExecutable()->neverInline())
+ out.print(" (NeverInline)");
out.print("]");
}
@@ -99,7 +144,7 @@ static String valueToSourceString(ExecState* exec, JSValue val)
if (val.isString())
return makeString("\"", escapeQuotes(val.toString(exec)->value(exec)), "\"");
- return val.description();
+ return toString(val);
}
static CString constantName(ExecState* exec, int k, JSValue value)
@@ -112,18 +157,6 @@ static CString idName(int id0, const Identifier& ident)
return makeString(ident.string(), "(@id", String::number(id0), ")").utf8();
}
-void CodeBlock::dumpBytecodeCommentAndNewLine(int location)
-{
-#if ENABLE(BYTECODE_COMMENTS)
- const char* comment = commentForBytecodeOffset(location);
- if (comment)
- dataLogF("\t\t ; %s", comment);
-#else
- UNUSED_PARAM(location);
-#endif
- dataLogF("\n");
-}
-
CString CodeBlock::registerName(ExecState* exec, int r) const
{
if (r == missingThisObjectMarker())
@@ -178,37 +211,34 @@ NEVER_INLINE static const char* debugHookName(int debugHookID)
return "didReachBreakpoint";
}
- ASSERT_NOT_REACHED();
+ RELEASE_ASSERT_NOT_REACHED();
return "";
}
-void CodeBlock::printUnaryOp(ExecState* exec, int location, const Instruction*& it, const char* op)
+void CodeBlock::printUnaryOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op)
{
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
- dataLogF("[%4d] %s\t\t %s, %s", location, op, registerName(exec, r0).data(), registerName(exec, r1).data());
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] %s\t\t %s, %s", location, op, registerName(exec, r0).data(), registerName(exec, r1).data());
}
-void CodeBlock::printBinaryOp(ExecState* exec, int location, const Instruction*& it, const char* op)
+void CodeBlock::printBinaryOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op)
{
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
int r2 = (++it)->u.operand;
- dataLogF("[%4d] %s\t\t %s, %s, %s", location, op, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data());
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] %s\t\t %s, %s, %s", location, op, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data());
}
-void CodeBlock::printConditionalJump(ExecState* exec, const Instruction*, const Instruction*& it, int location, const char* op)
+void CodeBlock::printConditionalJump(PrintStream& out, ExecState* exec, const Instruction*, const Instruction*& it, int location, const char* op)
{
int r0 = (++it)->u.operand;
int offset = (++it)->u.operand;
- dataLogF("[%4d] %s\t\t %s, %d(->%d)", location, op, registerName(exec, r0).data(), offset, location + offset);
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] %s\t\t %s, %d(->%d)", location, op, registerName(exec, r0).data(), offset, location + offset);
}
-void CodeBlock::printGetByIdOp(ExecState* exec, int location, const Instruction*& it)
+void CodeBlock::printGetByIdOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it)
{
const char* op;
switch (exec->interpreter()->getOpcodeID(it->u.opcode)) {
@@ -255,34 +285,34 @@ void CodeBlock::printGetByIdOp(ExecState* exec, int location, const Instruction*
op = "string_length";
break;
default:
- ASSERT_NOT_REACHED();
+ RELEASE_ASSERT_NOT_REACHED();
op = 0;
}
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
int id0 = (++it)->u.operand;
- dataLogF("[%4d] %s\t %s, %s, %s", location, op, registerName(exec, r0).data(), registerName(exec, r1).data(), idName(id0, m_identifiers[id0]).data());
- it += 5;
+ out.printf("[%4d] %s\t %s, %s, %s", location, op, registerName(exec, r0).data(), registerName(exec, r1).data(), idName(id0, m_identifiers[id0]).data());
+ it += 4; // Increment up to the value profiler.
}
#if ENABLE(JIT) || ENABLE(LLINT) // unused in some configurations
-static void dumpStructure(const char* name, ExecState* exec, Structure* structure, Identifier& ident)
+static void dumpStructure(PrintStream& out, const char* name, ExecState* exec, Structure* structure, Identifier& ident)
{
if (!structure)
return;
- dataLogF("%s = %p", name, structure);
+ out.printf("%s = %p", name, structure);
- PropertyOffset offset = structure->get(exec->globalData(), ident);
+ PropertyOffset offset = structure->get(exec->vm(), ident);
if (offset != invalidOffset)
- dataLogF(" (offset = %d)", offset);
+ out.printf(" (offset = %d)", offset);
}
#endif
#if ENABLE(JIT) // unused when not ENABLE(JIT), leading to silly warnings
-static void dumpChain(ExecState* exec, StructureChain* chain, Identifier& ident)
+static void dumpChain(PrintStream& out, ExecState* exec, StructureChain* chain, Identifier& ident)
{
- dataLogF("chain = %p: [", chain);
+ out.printf("chain = %p: [", chain);
bool first = true;
for (WriteBarrier<Structure>* currentStructure = chain->head();
*currentStructure;
@@ -290,14 +320,14 @@ static void dumpChain(ExecState* exec, StructureChain* chain, Identifier& ident)
if (first)
first = false;
else
- dataLogF(", ");
- dumpStructure("struct", exec, currentStructure->get(), ident);
+ out.printf(", ");
+ dumpStructure(out, "struct", exec, currentStructure->get(), ident);
}
- dataLogF("]");
+ out.printf("]");
}
#endif
-void CodeBlock::printGetByIdCacheStatus(ExecState* exec, int location)
+void CodeBlock::printGetByIdCacheStatus(PrintStream& out, ExecState* exec, int location)
{
Instruction* instruction = instructions().begin() + location;
@@ -307,22 +337,20 @@ void CodeBlock::printGetByIdCacheStatus(ExecState* exec, int location)
#if ENABLE(LLINT)
if (exec->interpreter()->getOpcodeID(instruction[0].u.opcode) == op_get_array_length)
- dataLogF(" llint(array_length)");
- else {
- Structure* structure = instruction[4].u.structure.get();
- dataLogF(" llint(");
- dumpStructure("struct", exec, structure, ident);
- dataLogF(")");
+ out.printf(" llint(array_length)");
+ else if (Structure* structure = instruction[4].u.structure.get()) {
+ out.printf(" llint(");
+ dumpStructure(out, "struct", exec, structure, ident);
+ out.printf(")");
}
#endif
#if ENABLE(JIT)
if (numberOfStructureStubInfos()) {
- dataLogF(" jit(");
StructureStubInfo& stubInfo = getStubInfo(location);
- if (!stubInfo.seen)
- dataLogF("not seen");
- else {
+ if (stubInfo.seen) {
+ out.printf(" jit(");
+
Structure* baseStructure = 0;
Structure* prototypeStructure = 0;
StructureChain* chain = 0;
@@ -331,166 +359,163 @@ void CodeBlock::printGetByIdCacheStatus(ExecState* exec, int location)
switch (stubInfo.accessType) {
case access_get_by_id_self:
- dataLogF("self");
+ out.printf("self");
baseStructure = stubInfo.u.getByIdSelf.baseObjectStructure.get();
break;
case access_get_by_id_proto:
- dataLogF("proto");
+ out.printf("proto");
baseStructure = stubInfo.u.getByIdProto.baseObjectStructure.get();
prototypeStructure = stubInfo.u.getByIdProto.prototypeStructure.get();
break;
case access_get_by_id_chain:
- dataLogF("chain");
+ out.printf("chain");
baseStructure = stubInfo.u.getByIdChain.baseObjectStructure.get();
chain = stubInfo.u.getByIdChain.chain.get();
break;
case access_get_by_id_self_list:
- dataLogF("self_list");
+ out.printf("self_list");
structureList = stubInfo.u.getByIdSelfList.structureList;
listSize = stubInfo.u.getByIdSelfList.listSize;
break;
case access_get_by_id_proto_list:
- dataLogF("proto_list");
+ out.printf("proto_list");
structureList = stubInfo.u.getByIdProtoList.structureList;
listSize = stubInfo.u.getByIdProtoList.listSize;
break;
case access_unset:
- dataLogF("unset");
+ out.printf("unset");
break;
case access_get_by_id_generic:
- dataLogF("generic");
+ out.printf("generic");
break;
case access_get_array_length:
- dataLogF("array_length");
+ out.printf("array_length");
break;
case access_get_string_length:
- dataLogF("string_length");
+ out.printf("string_length");
break;
default:
- ASSERT_NOT_REACHED();
+ RELEASE_ASSERT_NOT_REACHED();
break;
}
if (baseStructure) {
- dataLogF(", ");
- dumpStructure("struct", exec, baseStructure, ident);
+ out.printf(", ");
+ dumpStructure(out, "struct", exec, baseStructure, ident);
}
if (prototypeStructure) {
- dataLogF(", ");
- dumpStructure("prototypeStruct", exec, baseStructure, ident);
+ out.printf(", ");
+ dumpStructure(out, "prototypeStruct", exec, baseStructure, ident);
}
if (chain) {
- dataLogF(", ");
- dumpChain(exec, chain, ident);
+ out.printf(", ");
+ dumpChain(out, exec, chain, ident);
}
if (structureList) {
- dataLogF(", list = %p: [", structureList);
+ out.printf(", list = %p: [", structureList);
for (int i = 0; i < listSize; ++i) {
if (i)
- dataLogF(", ");
- dataLogF("(");
- dumpStructure("base", exec, structureList->list[i].base.get(), ident);
+ out.printf(", ");
+ out.printf("(");
+ dumpStructure(out, "base", exec, structureList->list[i].base.get(), ident);
if (structureList->list[i].isChain) {
if (structureList->list[i].u.chain.get()) {
- dataLogF(", ");
- dumpChain(exec, structureList->list[i].u.chain.get(), ident);
+ out.printf(", ");
+ dumpChain(out, exec, structureList->list[i].u.chain.get(), ident);
}
} else {
if (structureList->list[i].u.proto.get()) {
- dataLogF(", ");
- dumpStructure("proto", exec, structureList->list[i].u.proto.get(), ident);
+ out.printf(", ");
+ dumpStructure(out, "proto", exec, structureList->list[i].u.proto.get(), ident);
}
}
- dataLogF(")");
+ out.printf(")");
}
- dataLogF("]");
+ out.printf("]");
}
+ out.printf(")");
}
- dataLogF(")");
}
#endif
}
-void CodeBlock::printCallOp(ExecState* exec, int location, const Instruction*& it, const char* op, CacheDumpMode cacheDumpMode)
+void CodeBlock::printCallOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op, CacheDumpMode cacheDumpMode)
{
int func = (++it)->u.operand;
int argCount = (++it)->u.operand;
int registerOffset = (++it)->u.operand;
- dataLogF("[%4d] %s\t %s, %d, %d", location, op, registerName(exec, func).data(), argCount, registerOffset);
+ out.printf("[%4d] %s\t %s, %d, %d", location, op, registerName(exec, func).data(), argCount, registerOffset);
if (cacheDumpMode == DumpCaches) {
#if ENABLE(LLINT)
LLIntCallLinkInfo* callLinkInfo = it[1].u.callLinkInfo;
if (callLinkInfo->lastSeenCallee) {
- dataLogF(" llint(%p, exec %p)",
- callLinkInfo->lastSeenCallee.get(),
- callLinkInfo->lastSeenCallee->executable());
- } else
- dataLogF(" llint(not set)");
+ out.printf(
+ " llint(%p, exec %p)",
+ callLinkInfo->lastSeenCallee.get(),
+ callLinkInfo->lastSeenCallee->executable());
+ }
#endif
#if ENABLE(JIT)
if (numberOfCallLinkInfos()) {
JSFunction* target = getCallLinkInfo(location).lastSeenCallee.get();
if (target)
- dataLogF(" jit(%p, exec %p)", target, target->executable());
- else
- dataLogF(" jit(not set)");
+ out.printf(" jit(%p, exec %p)", target, target->executable());
}
#endif
+ out.print(" status(", CallLinkStatus::computeFor(this, location), ")");
}
- dumpBytecodeCommentAndNewLine(location);
it += 2;
}
-void CodeBlock::printPutByIdOp(ExecState* exec, int location, const Instruction*& it, const char* op)
+void CodeBlock::printPutByIdOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op)
{
int r0 = (++it)->u.operand;
int id0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
- dataLogF("[%4d] %s\t %s, %s, %s", location, op, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data(), registerName(exec, r1).data());
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] %s\t %s, %s, %s", location, op, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data(), registerName(exec, r1).data());
it += 5;
}
-void CodeBlock::printStructure(const char* name, const Instruction* vPC, int operand)
+void CodeBlock::printStructure(PrintStream& out, const char* name, const Instruction* vPC, int operand)
{
unsigned instructionOffset = vPC - instructions().begin();
- dataLogF(" [%4d] %s: %s\n", instructionOffset, name, pointerToSourceString(vPC[operand].u.structure).utf8().data());
+ out.printf(" [%4d] %s: %s\n", instructionOffset, name, pointerToSourceString(vPC[operand].u.structure).utf8().data());
}
-void CodeBlock::printStructures(const Instruction* vPC)
+void CodeBlock::printStructures(PrintStream& out, const Instruction* vPC)
{
- Interpreter* interpreter = m_globalData->interpreter;
+ Interpreter* interpreter = m_vm->interpreter;
unsigned instructionOffset = vPC - instructions().begin();
if (vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id)) {
- printStructure("get_by_id", vPC, 4);
+ printStructure(out, "get_by_id", vPC, 4);
return;
}
if (vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_self)) {
- printStructure("get_by_id_self", vPC, 4);
+ printStructure(out, "get_by_id_self", vPC, 4);
return;
}
if (vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_proto)) {
- dataLogF(" [%4d] %s: %s, %s\n", instructionOffset, "get_by_id_proto", pointerToSourceString(vPC[4].u.structure).utf8().data(), pointerToSourceString(vPC[5].u.structure).utf8().data());
+ out.printf(" [%4d] %s: %s, %s\n", instructionOffset, "get_by_id_proto", pointerToSourceString(vPC[4].u.structure).utf8().data(), pointerToSourceString(vPC[5].u.structure).utf8().data());
return;
}
if (vPC[0].u.opcode == interpreter->getOpcode(op_put_by_id_transition)) {
- dataLogF(" [%4d] %s: %s, %s, %s\n", instructionOffset, "put_by_id_transition", pointerToSourceString(vPC[4].u.structure).utf8().data(), pointerToSourceString(vPC[5].u.structure).utf8().data(), pointerToSourceString(vPC[6].u.structureChain).utf8().data());
+ out.printf(" [%4d] %s: %s, %s, %s\n", instructionOffset, "put_by_id_transition", pointerToSourceString(vPC[4].u.structure).utf8().data(), pointerToSourceString(vPC[5].u.structure).utf8().data(), pointerToSourceString(vPC[6].u.structureChain).utf8().data());
return;
}
if (vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_chain)) {
- dataLogF(" [%4d] %s: %s, %s\n", instructionOffset, "get_by_id_chain", pointerToSourceString(vPC[4].u.structure).utf8().data(), pointerToSourceString(vPC[5].u.structureChain).utf8().data());
+ out.printf(" [%4d] %s: %s, %s\n", instructionOffset, "get_by_id_chain", pointerToSourceString(vPC[4].u.structure).utf8().data(), pointerToSourceString(vPC[5].u.structureChain).utf8().data());
return;
}
if (vPC[0].u.opcode == interpreter->getOpcode(op_put_by_id)) {
- printStructure("put_by_id", vPC, 4);
+ printStructure(out, "put_by_id", vPC, 4);
return;
}
if (vPC[0].u.opcode == interpreter->getOpcode(op_put_by_id_replace)) {
- printStructure("put_by_id_replace", vPC, 4);
+ printStructure(out, "put_by_id_replace", vPC, 4);
return;
}
@@ -498,7 +523,7 @@ void CodeBlock::printStructures(const Instruction* vPC)
ASSERT(vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_generic) || vPC[0].u.opcode == interpreter->getOpcode(op_put_by_id_generic) || vPC[0].u.opcode == interpreter->getOpcode(op_call) || vPC[0].u.opcode == interpreter->getOpcode(op_call_eval) || vPC[0].u.opcode == interpreter->getOpcode(op_construct));
}
-void CodeBlock::dumpBytecode()
+void CodeBlock::dumpBytecode(PrintStream& out)
{
// We only use the ExecState* for things that don't actually lead to JS execution,
// like converting a JSString to a String. Hence the globalExec is appropriate.
@@ -509,92 +534,95 @@ void CodeBlock::dumpBytecode()
for (size_t i = 0; i < instructions().size(); i += opcodeLengths[exec->interpreter()->getOpcodeID(instructions()[i].u.opcode)])
++instructionCount;
- dataLog(*this);
- dataLogF(
+ out.print(*this);
+ out.printf(
": %lu m_instructions; %lu bytes; %d parameter(s); %d callee register(s); %d variable(s)",
static_cast<unsigned long>(instructions().size()),
static_cast<unsigned long>(instructions().size() * sizeof(Instruction)),
m_numParameters, m_numCalleeRegisters, m_numVars);
- if (symbolTable() && symbolTable()->captureCount())
- dataLogF("; %d captured var(s)", symbolTable()->captureCount());
+ if (symbolTable() && symbolTable()->captureCount()) {
+ out.printf(
+ "; %d captured var(s) (from r%d to r%d, inclusive)",
+ symbolTable()->captureCount(), symbolTable()->captureStart(), symbolTable()->captureEnd() - 1);
+ }
if (usesArguments()) {
- dataLogF(
+ out.printf(
"; uses arguments, in r%d, r%d",
argumentsRegister(),
unmodifiedArgumentsRegister(argumentsRegister()));
}
if (needsFullScopeChain() && codeType() == FunctionCode)
- dataLogF("; activation in r%d", activationRegister());
- dataLogF("\n\n");
+ out.printf("; activation in r%d", activationRegister());
+ out.print("\n\nSource: ", sourceCodeOnOneLine(), "\n\n");
const Instruction* begin = instructions().begin();
const Instruction* end = instructions().end();
for (const Instruction* it = begin; it != end; ++it)
- dumpBytecode(exec, begin, it);
+ dumpBytecode(out, exec, begin, it);
if (!m_identifiers.isEmpty()) {
- dataLogF("\nIdentifiers:\n");
+ out.printf("\nIdentifiers:\n");
size_t i = 0;
do {
- dataLogF(" id%u = %s\n", static_cast<unsigned>(i), m_identifiers[i].string().utf8().data());
+ out.printf(" id%u = %s\n", static_cast<unsigned>(i), m_identifiers[i].string().utf8().data());
++i;
} while (i != m_identifiers.size());
}
if (!m_constantRegisters.isEmpty()) {
- dataLogF("\nConstants:\n");
+ out.printf("\nConstants:\n");
size_t i = 0;
do {
- dataLogF(" k%u = %s\n", static_cast<unsigned>(i), valueToSourceString(exec, m_constantRegisters[i].get()).utf8().data());
+ out.printf(" k%u = %s\n", static_cast<unsigned>(i), valueToSourceString(exec, m_constantRegisters[i].get()).utf8().data());
++i;
} while (i < m_constantRegisters.size());
}
if (size_t count = m_unlinkedCode->numberOfRegExps()) {
- dataLogF("\nm_regexps:\n");
+ out.printf("\nm_regexps:\n");
size_t i = 0;
do {
- dataLogF(" re%u = %s\n", static_cast<unsigned>(i), regexpToSourceString(m_unlinkedCode->regexp(i)).utf8().data());
+ out.printf(" re%u = %s\n", static_cast<unsigned>(i), regexpToSourceString(m_unlinkedCode->regexp(i)).utf8().data());
++i;
} while (i < count);
}
#if ENABLE(JIT)
if (!m_structureStubInfos.isEmpty())
- dataLogF("\nStructures:\n");
+ out.printf("\nStructures:\n");
#endif
if (m_rareData && !m_rareData->m_exceptionHandlers.isEmpty()) {
- dataLogF("\nException Handlers:\n");
+ out.printf("\nException Handlers:\n");
unsigned i = 0;
do {
- dataLogF("\t %d: { start: [%4d] end: [%4d] target: [%4d] depth: [%4d] }\n", i + 1, m_rareData->m_exceptionHandlers[i].start, m_rareData->m_exceptionHandlers[i].end, m_rareData->m_exceptionHandlers[i].target, m_rareData->m_exceptionHandlers[i].scopeDepth);
+ out.printf("\t %d: { start: [%4d] end: [%4d] target: [%4d] depth: [%4d] }\n", i + 1, m_rareData->m_exceptionHandlers[i].start, m_rareData->m_exceptionHandlers[i].end, m_rareData->m_exceptionHandlers[i].target, m_rareData->m_exceptionHandlers[i].scopeDepth);
++i;
} while (i < m_rareData->m_exceptionHandlers.size());
}
if (m_rareData && !m_rareData->m_immediateSwitchJumpTables.isEmpty()) {
- dataLogF("Immediate Switch Jump Tables:\n");
+ out.printf("Immediate Switch Jump Tables:\n");
unsigned i = 0;
do {
- dataLogF(" %1d = {\n", i);
+ out.printf(" %1d = {\n", i);
int entry = 0;
Vector<int32_t>::const_iterator end = m_rareData->m_immediateSwitchJumpTables[i].branchOffsets.end();
for (Vector<int32_t>::const_iterator iter = m_rareData->m_immediateSwitchJumpTables[i].branchOffsets.begin(); iter != end; ++iter, ++entry) {
if (!*iter)
continue;
- dataLogF("\t\t%4d => %04d\n", entry + m_rareData->m_immediateSwitchJumpTables[i].min, *iter);
+ out.printf("\t\t%4d => %04d\n", entry + m_rareData->m_immediateSwitchJumpTables[i].min, *iter);
}
- dataLogF(" }\n");
+ out.printf(" }\n");
++i;
} while (i < m_rareData->m_immediateSwitchJumpTables.size());
}
if (m_rareData && !m_rareData->m_characterSwitchJumpTables.isEmpty()) {
- dataLogF("\nCharacter Switch Jump Tables:\n");
+ out.printf("\nCharacter Switch Jump Tables:\n");
unsigned i = 0;
do {
- dataLogF(" %1d = {\n", i);
+ out.printf(" %1d = {\n", i);
int entry = 0;
Vector<int32_t>::const_iterator end = m_rareData->m_characterSwitchJumpTables[i].branchOffsets.end();
for (Vector<int32_t>::const_iterator iter = m_rareData->m_characterSwitchJumpTables[i].branchOffsets.begin(); iter != end; ++iter, ++entry) {
@@ -602,96 +630,143 @@ void CodeBlock::dumpBytecode()
continue;
ASSERT(!((i + m_rareData->m_characterSwitchJumpTables[i].min) & ~0xFFFF));
UChar ch = static_cast<UChar>(entry + m_rareData->m_characterSwitchJumpTables[i].min);
- dataLogF("\t\t\"%s\" => %04d\n", String(&ch, 1).utf8().data(), *iter);
- }
- dataLogF(" }\n");
+ out.printf("\t\t\"%s\" => %04d\n", String(&ch, 1).utf8().data(), *iter);
+ }
+ out.printf(" }\n");
++i;
} while (i < m_rareData->m_characterSwitchJumpTables.size());
}
if (m_rareData && !m_rareData->m_stringSwitchJumpTables.isEmpty()) {
- dataLogF("\nString Switch Jump Tables:\n");
+ out.printf("\nString Switch Jump Tables:\n");
unsigned i = 0;
do {
- dataLogF(" %1d = {\n", i);
+ out.printf(" %1d = {\n", i);
StringJumpTable::StringOffsetTable::const_iterator end = m_rareData->m_stringSwitchJumpTables[i].offsetTable.end();
for (StringJumpTable::StringOffsetTable::const_iterator iter = m_rareData->m_stringSwitchJumpTables[i].offsetTable.begin(); iter != end; ++iter)
- dataLogF("\t\t\"%s\" => %04d\n", String(iter->key).utf8().data(), iter->value.branchOffset);
- dataLogF(" }\n");
+ out.printf("\t\t\"%s\" => %04d\n", String(iter->key).utf8().data(), iter->value.branchOffset);
+ out.printf(" }\n");
++i;
} while (i < m_rareData->m_stringSwitchJumpTables.size());
}
- dataLogF("\n");
+ out.printf("\n");
+}
+
+void CodeBlock::beginDumpProfiling(PrintStream& out, bool& hasPrintedProfiling)
+{
+ if (hasPrintedProfiling) {
+ out.print("; ");
+ return;
+ }
+
+ out.print(" ");
+ hasPrintedProfiling = true;
}
-void CodeBlock::dumpBytecode(ExecState* exec, const Instruction* begin, const Instruction*& it)
+void CodeBlock::dumpValueProfiling(PrintStream& out, const Instruction*& it, bool& hasPrintedProfiling)
+{
+ ++it;
+#if ENABLE(VALUE_PROFILER)
+ CString description = it->u.profile->briefDescription();
+ if (!description.length())
+ return;
+ beginDumpProfiling(out, hasPrintedProfiling);
+ out.print(description);
+#else
+ UNUSED_PARAM(out);
+ UNUSED_PARAM(hasPrintedProfiling);
+#endif
+}
+
+void CodeBlock::dumpArrayProfiling(PrintStream& out, const Instruction*& it, bool& hasPrintedProfiling)
+{
+ ++it;
+#if ENABLE(VALUE_PROFILER)
+ CString description = it->u.arrayProfile->briefDescription(this);
+ if (!description.length())
+ return;
+ beginDumpProfiling(out, hasPrintedProfiling);
+ out.print(description);
+#else
+ UNUSED_PARAM(out);
+ UNUSED_PARAM(hasPrintedProfiling);
+#endif
+}
+
+#if ENABLE(VALUE_PROFILER)
+void CodeBlock::dumpRareCaseProfile(PrintStream& out, const char* name, RareCaseProfile* profile, bool& hasPrintedProfiling)
+{
+ if (!profile || !profile->m_counter)
+ return;
+
+ beginDumpProfiling(out, hasPrintedProfiling);
+ out.print(name, profile->m_counter);
+}
+#endif
+
+void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instruction* begin, const Instruction*& it)
{
int location = it - begin;
+ bool hasPrintedProfiling = false;
switch (exec->interpreter()->getOpcodeID(it->u.opcode)) {
case op_enter: {
- dataLogF("[%4d] enter", location);
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] enter", location);
break;
}
case op_create_activation: {
int r0 = (++it)->u.operand;
- dataLogF("[%4d] create_activation %s", location, registerName(exec, r0).data());
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] create_activation %s", location, registerName(exec, r0).data());
break;
}
case op_create_arguments: {
int r0 = (++it)->u.operand;
- dataLogF("[%4d] create_arguments\t %s", location, registerName(exec, r0).data());
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] create_arguments\t %s", location, registerName(exec, r0).data());
break;
}
case op_init_lazy_reg: {
int r0 = (++it)->u.operand;
- dataLogF("[%4d] init_lazy_reg\t %s", location, registerName(exec, r0).data());
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] init_lazy_reg\t %s", location, registerName(exec, r0).data());
break;
}
case op_get_callee: {
int r0 = (++it)->u.operand;
- dataLogF("[%4d] op_get_callee %s\n", location, registerName(exec, r0).data());
+ out.printf("[%4d] op_get_callee %s\n", location, registerName(exec, r0).data());
++it;
break;
}
case op_create_this: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
- dataLogF("[%4d] create_this %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data());
- dumpBytecodeCommentAndNewLine(location);
+ unsigned inferredInlineCapacity = (++it)->u.operand;
+ out.printf("[%4d] create_this %s, %s, %u", location, registerName(exec, r0).data(), registerName(exec, r1).data(), inferredInlineCapacity);
break;
}
case op_convert_this: {
int r0 = (++it)->u.operand;
- dataLogF("[%4d] convert_this\t %s", location, registerName(exec, r0).data());
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] convert_this\t %s", location, registerName(exec, r0).data());
++it; // Skip value profile.
break;
}
case op_new_object: {
int r0 = (++it)->u.operand;
- dataLogF("[%4d] new_object\t %s", location, registerName(exec, r0).data());
- dumpBytecodeCommentAndNewLine(location);
+ unsigned inferredInlineCapacity = (++it)->u.operand;
+ out.printf("[%4d] new_object\t %s, %u", location, registerName(exec, r0).data(), inferredInlineCapacity);
+ ++it; // Skip object allocation profile.
break;
}
case op_new_array: {
int dst = (++it)->u.operand;
int argv = (++it)->u.operand;
int argc = (++it)->u.operand;
- dataLogF("[%4d] new_array\t %s, %s, %d", location, registerName(exec, dst).data(), registerName(exec, argv).data(), argc);
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] new_array\t %s, %s, %d", location, registerName(exec, dst).data(), registerName(exec, argv).data(), argc);
++it; // Skip array allocation profile.
break;
}
case op_new_array_with_size: {
int dst = (++it)->u.operand;
int length = (++it)->u.operand;
- dataLogF("[%4d] new_array_with_size\t %s, %s", location, registerName(exec, dst).data(), registerName(exec, length).data());
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] new_array_with_size\t %s, %s", location, registerName(exec, dst).data(), registerName(exec, length).data());
++it; // Skip array allocation profile.
break;
}
@@ -699,149 +774,136 @@ void CodeBlock::dumpBytecode(ExecState* exec, const Instruction* begin, const In
int dst = (++it)->u.operand;
int argv = (++it)->u.operand;
int argc = (++it)->u.operand;
- dataLogF("[%4d] new_array_buffer\t %s, %d, %d", location, registerName(exec, dst).data(), argv, argc);
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] new_array_buffer\t %s, %d, %d", location, registerName(exec, dst).data(), argv, argc);
++it; // Skip array allocation profile.
break;
}
case op_new_regexp: {
int r0 = (++it)->u.operand;
int re0 = (++it)->u.operand;
- dataLogF("[%4d] new_regexp\t %s, ", location, registerName(exec, r0).data());
+ out.printf("[%4d] new_regexp\t %s, ", location, registerName(exec, r0).data());
if (r0 >=0 && r0 < (int)m_unlinkedCode->numberOfRegExps())
- dataLogF("%s", regexpName(re0, regexp(re0)).data());
+ out.printf("%s", regexpName(re0, regexp(re0)).data());
else
- dataLogF("bad_regexp(%d)", re0);
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("bad_regexp(%d)", re0);
break;
}
case op_mov: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
- dataLogF("[%4d] mov\t\t %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data());
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] mov\t\t %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data());
break;
}
case op_not: {
- printUnaryOp(exec, location, it, "not");
+ printUnaryOp(out, exec, location, it, "not");
break;
}
case op_eq: {
- printBinaryOp(exec, location, it, "eq");
+ printBinaryOp(out, exec, location, it, "eq");
break;
}
case op_eq_null: {
- printUnaryOp(exec, location, it, "eq_null");
+ printUnaryOp(out, exec, location, it, "eq_null");
break;
}
case op_neq: {
- printBinaryOp(exec, location, it, "neq");
+ printBinaryOp(out, exec, location, it, "neq");
break;
}
case op_neq_null: {
- printUnaryOp(exec, location, it, "neq_null");
+ printUnaryOp(out, exec, location, it, "neq_null");
break;
}
case op_stricteq: {
- printBinaryOp(exec, location, it, "stricteq");
+ printBinaryOp(out, exec, location, it, "stricteq");
break;
}
case op_nstricteq: {
- printBinaryOp(exec, location, it, "nstricteq");
+ printBinaryOp(out, exec, location, it, "nstricteq");
break;
}
case op_less: {
- printBinaryOp(exec, location, it, "less");
+ printBinaryOp(out, exec, location, it, "less");
break;
}
case op_lesseq: {
- printBinaryOp(exec, location, it, "lesseq");
+ printBinaryOp(out, exec, location, it, "lesseq");
break;
}
case op_greater: {
- printBinaryOp(exec, location, it, "greater");
+ printBinaryOp(out, exec, location, it, "greater");
break;
}
case op_greatereq: {
- printBinaryOp(exec, location, it, "greatereq");
+ printBinaryOp(out, exec, location, it, "greatereq");
break;
}
- case op_pre_inc: {
+ case op_inc: {
int r0 = (++it)->u.operand;
- dataLogF("[%4d] pre_inc\t\t %s", location, registerName(exec, r0).data());
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] pre_inc\t\t %s", location, registerName(exec, r0).data());
break;
}
- case op_pre_dec: {
+ case op_dec: {
int r0 = (++it)->u.operand;
- dataLogF("[%4d] pre_dec\t\t %s", location, registerName(exec, r0).data());
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] pre_dec\t\t %s", location, registerName(exec, r0).data());
break;
}
- case op_post_inc: {
- printUnaryOp(exec, location, it, "post_inc");
- break;
- }
- case op_post_dec: {
- printUnaryOp(exec, location, it, "post_dec");
- break;
- }
- case op_to_jsnumber: {
- printUnaryOp(exec, location, it, "to_jsnumber");
+ case op_to_number: {
+ printUnaryOp(out, exec, location, it, "to_number");
break;
}
case op_negate: {
- printUnaryOp(exec, location, it, "negate");
+ printUnaryOp(out, exec, location, it, "negate");
break;
}
case op_add: {
- printBinaryOp(exec, location, it, "add");
+ printBinaryOp(out, exec, location, it, "add");
++it;
break;
}
case op_mul: {
- printBinaryOp(exec, location, it, "mul");
+ printBinaryOp(out, exec, location, it, "mul");
++it;
break;
}
case op_div: {
- printBinaryOp(exec, location, it, "div");
+ printBinaryOp(out, exec, location, it, "div");
++it;
break;
}
case op_mod: {
- printBinaryOp(exec, location, it, "mod");
+ printBinaryOp(out, exec, location, it, "mod");
break;
}
case op_sub: {
- printBinaryOp(exec, location, it, "sub");
+ printBinaryOp(out, exec, location, it, "sub");
++it;
break;
}
case op_lshift: {
- printBinaryOp(exec, location, it, "lshift");
+ printBinaryOp(out, exec, location, it, "lshift");
break;
}
case op_rshift: {
- printBinaryOp(exec, location, it, "rshift");
+ printBinaryOp(out, exec, location, it, "rshift");
break;
}
case op_urshift: {
- printBinaryOp(exec, location, it, "urshift");
+ printBinaryOp(out, exec, location, it, "urshift");
break;
}
case op_bitand: {
- printBinaryOp(exec, location, it, "bitand");
+ printBinaryOp(out, exec, location, it, "bitand");
++it;
break;
}
case op_bitxor: {
- printBinaryOp(exec, location, it, "bitxor");
+ printBinaryOp(out, exec, location, it, "bitxor");
++it;
break;
}
case op_bitor: {
- printBinaryOp(exec, location, it, "bitor");
+ printBinaryOp(out, exec, location, it, "bitor");
++it;
break;
}
@@ -850,48 +912,46 @@ void CodeBlock::dumpBytecode(ExecState* exec, const Instruction* begin, const In
int r1 = (++it)->u.operand;
int r2 = (++it)->u.operand;
int offset = (++it)->u.operand;
- dataLogF("[%4d] check_has_instance\t\t %s, %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data(), offset, location + offset);
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] check_has_instance\t\t %s, %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data(), offset, location + offset);
break;
}
case op_instanceof: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
int r2 = (++it)->u.operand;
- dataLogF("[%4d] instanceof\t\t %s, %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data());
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] instanceof\t\t %s, %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data());
break;
}
case op_typeof: {
- printUnaryOp(exec, location, it, "typeof");
+ printUnaryOp(out, exec, location, it, "typeof");
break;
}
case op_is_undefined: {
- printUnaryOp(exec, location, it, "is_undefined");
+ printUnaryOp(out, exec, location, it, "is_undefined");
break;
}
case op_is_boolean: {
- printUnaryOp(exec, location, it, "is_boolean");
+ printUnaryOp(out, exec, location, it, "is_boolean");
break;
}
case op_is_number: {
- printUnaryOp(exec, location, it, "is_number");
+ printUnaryOp(out, exec, location, it, "is_number");
break;
}
case op_is_string: {
- printUnaryOp(exec, location, it, "is_string");
+ printUnaryOp(out, exec, location, it, "is_string");
break;
}
case op_is_object: {
- printUnaryOp(exec, location, it, "is_object");
+ printUnaryOp(out, exec, location, it, "is_object");
break;
}
case op_is_function: {
- printUnaryOp(exec, location, it, "is_function");
+ printUnaryOp(out, exec, location, it, "is_function");
break;
}
case op_in: {
- printBinaryOp(exec, location, it, "in");
+ printBinaryOp(out, exec, location, it, "in");
break;
}
case op_put_to_base_variable:
@@ -900,8 +960,7 @@ void CodeBlock::dumpBytecode(ExecState* exec, const Instruction* begin, const In
int id0 = (++it)->u.operand;
int value = (++it)->u.operand;
int resolveInfo = (++it)->u.operand;
- dataLogF("[%4d] put_to_base\t %s, %s, %s, %d", location, registerName(exec, base).data(), idName(id0, m_identifiers[id0]).data(), registerName(exec, value).data(), resolveInfo);
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] put_to_base\t %s, %s, %s, %d", location, registerName(exec, base).data(), idName(id0, m_identifiers[id0]).data(), registerName(exec, value).data(), resolveInfo);
break;
}
case op_resolve:
@@ -913,14 +972,27 @@ void CodeBlock::dumpBytecode(ExecState* exec, const Instruction* begin, const In
int r0 = (++it)->u.operand;
int id0 = (++it)->u.operand;
int resolveInfo = (++it)->u.operand;
- dataLogF("[%4d] resolve\t\t %s, %s, %d", location, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data(), resolveInfo);
- dumpBytecodeCommentAndNewLine(location);
- it++;
+ out.printf("[%4d] resolve\t\t %s, %s, %d", location, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data(), resolveInfo);
+ dumpValueProfiling(out, it, hasPrintedProfiling);
+ break;
+ }
+ case op_get_scoped_var: {
+ int r0 = (++it)->u.operand;
+ int index = (++it)->u.operand;
+ int skipLevels = (++it)->u.operand;
+ out.printf("[%4d] get_scoped_var\t %s, %d, %d", location, registerName(exec, r0).data(), index, skipLevels);
+ dumpValueProfiling(out, it, hasPrintedProfiling);
+ break;
+ }
+ case op_put_scoped_var: {
+ int index = (++it)->u.operand;
+ int skipLevels = (++it)->u.operand;
+ int r0 = (++it)->u.operand;
+ out.printf("[%4d] put_scoped_var\t %d, %d, %s", location, index, skipLevels, registerName(exec, r0).data());
break;
}
case op_init_global_const_nop: {
- dataLogF("[%4d] init_global_const_nop\t", location);
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] init_global_const_nop\t", location);
it++;
it++;
it++;
@@ -930,8 +1002,7 @@ void CodeBlock::dumpBytecode(ExecState* exec, const Instruction* begin, const In
case op_init_global_const: {
WriteBarrier<Unknown>* registerPointer = (++it)->u.registerPointer;
int r0 = (++it)->u.operand;
- dataLogF("[%4d] init_global_const\t g%d(%p), %s", location, m_globalObject->findRegisterIndex(registerPointer), registerPointer, registerName(exec, r0).data());
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] init_global_const\t g%d(%p), %s", location, m_globalObject->findRegisterIndex(registerPointer), registerPointer, registerName(exec, r0).data());
it++;
it++;
break;
@@ -939,8 +1010,7 @@ void CodeBlock::dumpBytecode(ExecState* exec, const Instruction* begin, const In
case op_init_global_const_check: {
WriteBarrier<Unknown>* registerPointer = (++it)->u.registerPointer;
int r0 = (++it)->u.operand;
- dataLogF("[%4d] init_global_const_check\t g%d(%p), %s", location, m_globalObject->findRegisterIndex(registerPointer), registerPointer, registerName(exec, r0).data());
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] init_global_const_check\t g%d(%p), %s", location, m_globalObject->findRegisterIndex(registerPointer), registerPointer, registerName(exec, r0).data());
it++;
it++;
break;
@@ -955,16 +1025,8 @@ void CodeBlock::dumpBytecode(ExecState* exec, const Instruction* begin, const In
int isStrict = (++it)->u.operand;
int resolveInfo = (++it)->u.operand;
int putToBaseInfo = (++it)->u.operand;
- dataLogF("[%4d] resolve_base%s\t %s, %s, %d, %d", location, isStrict ? "_strict" : "", registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data(), resolveInfo, putToBaseInfo);
- dumpBytecodeCommentAndNewLine(location);
- it++;
- break;
- }
- case op_ensure_property_exists: {
- int r0 = (++it)->u.operand;
- int id0 = (++it)->u.operand;
- dataLogF("[%4d] ensure_property_exists\t %s, %s", location, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data());
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] resolve_base%s\t %s, %s, %d, %d", location, isStrict ? "_strict" : "", registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data(), resolveInfo, putToBaseInfo);
+ dumpValueProfiling(out, it, hasPrintedProfiling);
break;
}
case op_resolve_with_base: {
@@ -973,9 +1035,8 @@ void CodeBlock::dumpBytecode(ExecState* exec, const Instruction* begin, const In
int id0 = (++it)->u.operand;
int resolveInfo = (++it)->u.operand;
int putToBaseInfo = (++it)->u.operand;
- dataLogF("[%4d] resolve_with_base %s, %s, %s, %d, %d", location, registerName(exec, r0).data(), registerName(exec, r1).data(), idName(id0, m_identifiers[id0]).data(), resolveInfo, putToBaseInfo);
- dumpBytecodeCommentAndNewLine(location);
- it++;
+ out.printf("[%4d] resolve_with_base %s, %s, %s, %d, %d", location, registerName(exec, r0).data(), registerName(exec, r1).data(), idName(id0, m_identifiers[id0]).data(), resolveInfo, putToBaseInfo);
+ dumpValueProfiling(out, it, hasPrintedProfiling);
break;
}
case op_resolve_with_this: {
@@ -983,9 +1044,8 @@ void CodeBlock::dumpBytecode(ExecState* exec, const Instruction* begin, const In
int r1 = (++it)->u.operand;
int id0 = (++it)->u.operand;
int resolveInfo = (++it)->u.operand;
- dataLogF("[%4d] resolve_with_this %s, %s, %s, %d", location, registerName(exec, r0).data(), registerName(exec, r1).data(), idName(id0, m_identifiers[id0]).data(), resolveInfo);
- dumpBytecodeCommentAndNewLine(location);
- it++;
+ out.printf("[%4d] resolve_with_this %s, %s, %s, %d", location, registerName(exec, r0).data(), registerName(exec, r1).data(), idName(id0, m_identifiers[id0]).data(), resolveInfo);
+ dumpValueProfiling(out, it, hasPrintedProfiling);
break;
}
case op_get_by_id:
@@ -1002,50 +1062,50 @@ void CodeBlock::dumpBytecode(ExecState* exec, const Instruction* begin, const In
case op_get_by_id_generic:
case op_get_array_length:
case op_get_string_length: {
- printGetByIdOp(exec, location, it);
- printGetByIdCacheStatus(exec, location);
- dumpBytecodeCommentAndNewLine(location);
+ printGetByIdOp(out, exec, location, it);
+ printGetByIdCacheStatus(out, exec, location);
+ dumpValueProfiling(out, it, hasPrintedProfiling);
break;
}
case op_get_arguments_length: {
- printUnaryOp(exec, location, it, "get_arguments_length");
+ printUnaryOp(out, exec, location, it, "get_arguments_length");
it++;
break;
}
case op_put_by_id: {
- printPutByIdOp(exec, location, it, "put_by_id");
+ printPutByIdOp(out, exec, location, it, "put_by_id");
break;
}
case op_put_by_id_out_of_line: {
- printPutByIdOp(exec, location, it, "put_by_id_out_of_line");
+ printPutByIdOp(out, exec, location, it, "put_by_id_out_of_line");
break;
}
case op_put_by_id_replace: {
- printPutByIdOp(exec, location, it, "put_by_id_replace");
+ printPutByIdOp(out, exec, location, it, "put_by_id_replace");
break;
}
case op_put_by_id_transition: {
- printPutByIdOp(exec, location, it, "put_by_id_transition");
+ printPutByIdOp(out, exec, location, it, "put_by_id_transition");
break;
}
case op_put_by_id_transition_direct: {
- printPutByIdOp(exec, location, it, "put_by_id_transition_direct");
+ printPutByIdOp(out, exec, location, it, "put_by_id_transition_direct");
break;
}
case op_put_by_id_transition_direct_out_of_line: {
- printPutByIdOp(exec, location, it, "put_by_id_transition_direct_out_of_line");
+ printPutByIdOp(out, exec, location, it, "put_by_id_transition_direct_out_of_line");
break;
}
case op_put_by_id_transition_normal: {
- printPutByIdOp(exec, location, it, "put_by_id_transition_normal");
+ printPutByIdOp(out, exec, location, it, "put_by_id_transition_normal");
break;
}
case op_put_by_id_transition_normal_out_of_line: {
- printPutByIdOp(exec, location, it, "put_by_id_transition_normal_out_of_line");
+ printPutByIdOp(out, exec, location, it, "put_by_id_transition_normal_out_of_line");
break;
}
case op_put_by_id_generic: {
- printPutByIdOp(exec, location, it, "put_by_id_generic");
+ printPutByIdOp(out, exec, location, it, "put_by_id_generic");
break;
}
case op_put_getter_setter: {
@@ -1053,36 +1113,32 @@ void CodeBlock::dumpBytecode(ExecState* exec, const Instruction* begin, const In
int id0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
int r2 = (++it)->u.operand;
- dataLogF("[%4d] put_getter_setter\t %s, %s, %s, %s", location, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data(), registerName(exec, r1).data(), registerName(exec, r2).data());
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] put_getter_setter\t %s, %s, %s, %s", location, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data(), registerName(exec, r1).data(), registerName(exec, r2).data());
break;
}
case op_del_by_id: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
int id0 = (++it)->u.operand;
- dataLogF("[%4d] del_by_id\t %s, %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data(), idName(id0, m_identifiers[id0]).data());
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] del_by_id\t %s, %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data(), idName(id0, m_identifiers[id0]).data());
break;
}
case op_get_by_val: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
int r2 = (++it)->u.operand;
- dataLogF("[%4d] get_by_val\t %s, %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data());
- dumpBytecodeCommentAndNewLine(location);
- it++;
- it++;
+ out.printf("[%4d] get_by_val\t %s, %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data());
+ dumpArrayProfiling(out, it, hasPrintedProfiling);
+ dumpValueProfiling(out, it, hasPrintedProfiling);
break;
}
case op_get_argument_by_val: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
int r2 = (++it)->u.operand;
- dataLogF("[%4d] get_argument_by_val\t %s, %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data());
- dumpBytecodeCommentAndNewLine(location);
- ++it;
+ out.printf("[%4d] get_argument_by_val\t %s, %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data());
++it;
+ dumpValueProfiling(out, it, hasPrintedProfiling);
break;
}
case op_get_by_pname: {
@@ -1092,225 +1148,159 @@ void CodeBlock::dumpBytecode(ExecState* exec, const Instruction* begin, const In
int r3 = (++it)->u.operand;
int r4 = (++it)->u.operand;
int r5 = (++it)->u.operand;
- dataLogF("[%4d] get_by_pname\t %s, %s, %s, %s, %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data(), registerName(exec, r3).data(), registerName(exec, r4).data(), registerName(exec, r5).data());
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] get_by_pname\t %s, %s, %s, %s, %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data(), registerName(exec, r3).data(), registerName(exec, r4).data(), registerName(exec, r5).data());
break;
}
case op_put_by_val: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
int r2 = (++it)->u.operand;
- dataLogF("[%4d] put_by_val\t %s, %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data());
- dumpBytecodeCommentAndNewLine(location);
- ++it;
+ out.printf("[%4d] put_by_val\t %s, %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data());
+ dumpArrayProfiling(out, it, hasPrintedProfiling);
break;
}
case op_del_by_val: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
int r2 = (++it)->u.operand;
- dataLogF("[%4d] del_by_val\t %s, %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data());
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] del_by_val\t %s, %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data());
break;
}
case op_put_by_index: {
int r0 = (++it)->u.operand;
unsigned n0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
- dataLogF("[%4d] put_by_index\t %s, %u, %s", location, registerName(exec, r0).data(), n0, registerName(exec, r1).data());
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] put_by_index\t %s, %u, %s", location, registerName(exec, r0).data(), n0, registerName(exec, r1).data());
break;
}
case op_jmp: {
int offset = (++it)->u.operand;
- dataLogF("[%4d] jmp\t\t %d(->%d)", location, offset, location + offset);
- dumpBytecodeCommentAndNewLine(location);
- break;
- }
- case op_loop: {
- int offset = (++it)->u.operand;
- dataLogF("[%4d] loop\t\t %d(->%d)", location, offset, location + offset);
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] jmp\t\t %d(->%d)", location, offset, location + offset);
break;
}
case op_jtrue: {
- printConditionalJump(exec, begin, it, location, "jtrue");
- break;
- }
- case op_loop_if_true: {
- printConditionalJump(exec, begin, it, location, "loop_if_true");
- break;
- }
- case op_loop_if_false: {
- printConditionalJump(exec, begin, it, location, "loop_if_false");
+ printConditionalJump(out, exec, begin, it, location, "jtrue");
break;
}
case op_jfalse: {
- printConditionalJump(exec, begin, it, location, "jfalse");
+ printConditionalJump(out, exec, begin, it, location, "jfalse");
break;
}
case op_jeq_null: {
- printConditionalJump(exec, begin, it, location, "jeq_null");
+ printConditionalJump(out, exec, begin, it, location, "jeq_null");
break;
}
case op_jneq_null: {
- printConditionalJump(exec, begin, it, location, "jneq_null");
+ printConditionalJump(out, exec, begin, it, location, "jneq_null");
break;
}
case op_jneq_ptr: {
int r0 = (++it)->u.operand;
Special::Pointer pointer = (++it)->u.specialPointer;
int offset = (++it)->u.operand;
- dataLogF("[%4d] jneq_ptr\t\t %s, %d (%p), %d(->%d)", location, registerName(exec, r0).data(), pointer, m_globalObject->actualPointerFor(pointer), offset, location + offset);
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] jneq_ptr\t\t %s, %d (%p), %d(->%d)", location, registerName(exec, r0).data(), pointer, m_globalObject->actualPointerFor(pointer), offset, location + offset);
break;
}
case op_jless: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
int offset = (++it)->u.operand;
- dataLogF("[%4d] jless\t\t %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] jless\t\t %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
break;
}
case op_jlesseq: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
int offset = (++it)->u.operand;
- dataLogF("[%4d] jlesseq\t\t %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] jlesseq\t\t %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
break;
}
case op_jgreater: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
int offset = (++it)->u.operand;
- dataLogF("[%4d] jgreater\t\t %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] jgreater\t\t %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
break;
}
case op_jgreatereq: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
int offset = (++it)->u.operand;
- dataLogF("[%4d] jgreatereq\t\t %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] jgreatereq\t\t %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
break;
}
case op_jnless: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
int offset = (++it)->u.operand;
- dataLogF("[%4d] jnless\t\t %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] jnless\t\t %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
break;
}
case op_jnlesseq: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
int offset = (++it)->u.operand;
- dataLogF("[%4d] jnlesseq\t\t %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] jnlesseq\t\t %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
break;
}
case op_jngreater: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
int offset = (++it)->u.operand;
- dataLogF("[%4d] jngreater\t\t %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] jngreater\t\t %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
break;
}
case op_jngreatereq: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
int offset = (++it)->u.operand;
- dataLogF("[%4d] jngreatereq\t\t %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
- dumpBytecodeCommentAndNewLine(location);
- break;
- }
- case op_loop_if_less: {
- int r0 = (++it)->u.operand;
- int r1 = (++it)->u.operand;
- int offset = (++it)->u.operand;
- dataLogF("[%4d] loop_if_less\t %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
- dumpBytecodeCommentAndNewLine(location);
- break;
- }
- case op_loop_if_lesseq: {
- int r0 = (++it)->u.operand;
- int r1 = (++it)->u.operand;
- int offset = (++it)->u.operand;
- dataLogF("[%4d] loop_if_lesseq\t %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
- dumpBytecodeCommentAndNewLine(location);
- break;
- }
- case op_loop_if_greater: {
- int r0 = (++it)->u.operand;
- int r1 = (++it)->u.operand;
- int offset = (++it)->u.operand;
- dataLogF("[%4d] loop_if_greater\t %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
- dumpBytecodeCommentAndNewLine(location);
- break;
- }
- case op_loop_if_greatereq: {
- int r0 = (++it)->u.operand;
- int r1 = (++it)->u.operand;
- int offset = (++it)->u.operand;
- dataLogF("[%4d] loop_if_greatereq\t %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] jngreatereq\t\t %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
break;
}
case op_loop_hint: {
- dataLogF("[%4d] loop_hint", location);
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] loop_hint", location);
break;
}
case op_switch_imm: {
int tableIndex = (++it)->u.operand;
int defaultTarget = (++it)->u.operand;
int scrutineeRegister = (++it)->u.operand;
- dataLogF("[%4d] switch_imm\t %d, %d(->%d), %s", location, tableIndex, defaultTarget, location + defaultTarget, registerName(exec, scrutineeRegister).data());
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] switch_imm\t %d, %d(->%d), %s", location, tableIndex, defaultTarget, location + defaultTarget, registerName(exec, scrutineeRegister).data());
break;
}
case op_switch_char: {
int tableIndex = (++it)->u.operand;
int defaultTarget = (++it)->u.operand;
int scrutineeRegister = (++it)->u.operand;
- dataLogF("[%4d] switch_char\t %d, %d(->%d), %s", location, tableIndex, defaultTarget, location + defaultTarget, registerName(exec, scrutineeRegister).data());
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] switch_char\t %d, %d(->%d), %s", location, tableIndex, defaultTarget, location + defaultTarget, registerName(exec, scrutineeRegister).data());
break;
}
case op_switch_string: {
int tableIndex = (++it)->u.operand;
int defaultTarget = (++it)->u.operand;
int scrutineeRegister = (++it)->u.operand;
- dataLogF("[%4d] switch_string\t %d, %d(->%d), %s", location, tableIndex, defaultTarget, location + defaultTarget, registerName(exec, scrutineeRegister).data());
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] switch_string\t %d, %d(->%d), %s", location, tableIndex, defaultTarget, location + defaultTarget, registerName(exec, scrutineeRegister).data());
break;
}
case op_new_func: {
int r0 = (++it)->u.operand;
int f0 = (++it)->u.operand;
int shouldCheck = (++it)->u.operand;
- dataLogF("[%4d] new_func\t\t %s, f%d, %s", location, registerName(exec, r0).data(), f0, shouldCheck ? "<Checked>" : "<Unchecked>");
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] new_func\t\t %s, f%d, %s", location, registerName(exec, r0).data(), f0, shouldCheck ? "<Checked>" : "<Unchecked>");
break;
}
case op_new_func_exp: {
int r0 = (++it)->u.operand;
int f0 = (++it)->u.operand;
- dataLogF("[%4d] new_func_exp\t %s, f%d", location, registerName(exec, r0).data(), f0);
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] new_func_exp\t %s, f%d", location, registerName(exec, r0).data(), f0);
break;
}
case op_call: {
- printCallOp(exec, location, it, "call", DumpCaches);
+ printCallOp(out, exec, location, it, "call", DumpCaches);
break;
}
case op_call_eval: {
- printCallOp(exec, location, it, "call_eval", DontDumpCaches);
+ printCallOp(out, exec, location, it, "call_eval", DontDumpCaches);
break;
}
case op_call_varargs: {
@@ -1318,60 +1308,52 @@ void CodeBlock::dumpBytecode(ExecState* exec, const Instruction* begin, const In
int thisValue = (++it)->u.operand;
int arguments = (++it)->u.operand;
int firstFreeRegister = (++it)->u.operand;
- dataLogF("[%4d] call_varargs\t %s, %s, %s, %d", location, registerName(exec, callee).data(), registerName(exec, thisValue).data(), registerName(exec, arguments).data(), firstFreeRegister);
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] call_varargs\t %s, %s, %s, %d", location, registerName(exec, callee).data(), registerName(exec, thisValue).data(), registerName(exec, arguments).data(), firstFreeRegister);
break;
}
case op_tear_off_activation: {
int r0 = (++it)->u.operand;
- dataLogF("[%4d] tear_off_activation\t %s", location, registerName(exec, r0).data());
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] tear_off_activation\t %s", location, registerName(exec, r0).data());
break;
}
case op_tear_off_arguments: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
- dataLogF("[%4d] tear_off_arguments %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data());
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] tear_off_arguments %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data());
break;
}
case op_ret: {
int r0 = (++it)->u.operand;
- dataLogF("[%4d] ret\t\t %s", location, registerName(exec, r0).data());
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] ret\t\t %s", location, registerName(exec, r0).data());
break;
}
case op_call_put_result: {
int r0 = (++it)->u.operand;
- dataLogF("[%4d] call_put_result\t\t %s", location, registerName(exec, r0).data());
- dumpBytecodeCommentAndNewLine(location);
- it++;
+ out.printf("[%4d] call_put_result\t\t %s", location, registerName(exec, r0).data());
+ dumpValueProfiling(out, it, hasPrintedProfiling);
break;
}
case op_ret_object_or_this: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
- dataLogF("[%4d] constructor_ret\t\t %s %s", location, registerName(exec, r0).data(), registerName(exec, r1).data());
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] constructor_ret\t\t %s %s", location, registerName(exec, r0).data(), registerName(exec, r1).data());
break;
}
case op_construct: {
- printCallOp(exec, location, it, "construct", DumpCaches);
+ printCallOp(out, exec, location, it, "construct", DumpCaches);
break;
}
case op_strcat: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
int count = (++it)->u.operand;
- dataLogF("[%4d] strcat\t\t %s, %s, %d", location, registerName(exec, r0).data(), registerName(exec, r1).data(), count);
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] strcat\t\t %s, %s, %d", location, registerName(exec, r0).data(), registerName(exec, r1).data(), count);
break;
}
case op_to_primitive: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
- dataLogF("[%4d] to_primitive\t %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data());
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] to_primitive\t %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data());
break;
}
case op_get_pnames: {
@@ -1380,8 +1362,7 @@ void CodeBlock::dumpBytecode(ExecState* exec, const Instruction* begin, const In
int r2 = it[3].u.operand;
int r3 = it[4].u.operand;
int offset = it[5].u.operand;
- dataLogF("[%4d] get_pnames\t %s, %s, %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data(), registerName(exec, r3).data(), offset, location + offset);
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] get_pnames\t %s, %s, %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data(), registerName(exec, r3).data(), offset, location + offset);
it += OPCODE_LENGTH(op_get_pnames) - 1;
break;
}
@@ -1392,54 +1373,40 @@ void CodeBlock::dumpBytecode(ExecState* exec, const Instruction* begin, const In
int size = it[4].u.operand;
int iter = it[5].u.operand;
int offset = it[6].u.operand;
- dataLogF("[%4d] next_pname\t %s, %s, %s, %s, %s, %d(->%d)", location, registerName(exec, dest).data(), registerName(exec, base).data(), registerName(exec, i).data(), registerName(exec, size).data(), registerName(exec, iter).data(), offset, location + offset);
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] next_pname\t %s, %s, %s, %s, %s, %d(->%d)", location, registerName(exec, dest).data(), registerName(exec, base).data(), registerName(exec, i).data(), registerName(exec, size).data(), registerName(exec, iter).data(), offset, location + offset);
it += OPCODE_LENGTH(op_next_pname) - 1;
break;
}
case op_push_with_scope: {
int r0 = (++it)->u.operand;
- dataLogF("[%4d] push_with_scope\t %s", location, registerName(exec, r0).data());
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] push_with_scope\t %s", location, registerName(exec, r0).data());
break;
}
case op_pop_scope: {
- dataLogF("[%4d] pop_scope", location);
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] pop_scope", location);
break;
}
case op_push_name_scope: {
int id0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
unsigned attributes = (++it)->u.operand;
- dataLogF("[%4d] push_name_scope \t%s, %s, %u", location, idName(id0, m_identifiers[id0]).data(), registerName(exec, r1).data(), attributes);
- dumpBytecodeCommentAndNewLine(location);
- break;
- }
- case op_jmp_scopes: {
- int scopeDelta = (++it)->u.operand;
- int offset = (++it)->u.operand;
- dataLogF("[%4d] jmp_scopes\t^%d, %d(->%d)", location, scopeDelta, offset, location + offset);
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] push_name_scope \t%s, %s, %u", location, idName(id0, m_identifiers[id0]).data(), registerName(exec, r1).data(), attributes);
break;
}
case op_catch: {
int r0 = (++it)->u.operand;
- dataLogF("[%4d] catch\t\t %s", location, registerName(exec, r0).data());
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] catch\t\t %s", location, registerName(exec, r0).data());
break;
}
case op_throw: {
int r0 = (++it)->u.operand;
- dataLogF("[%4d] throw\t\t %s", location, registerName(exec, r0).data());
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] throw\t\t %s", location, registerName(exec, r0).data());
break;
}
case op_throw_static_error: {
int k0 = (++it)->u.operand;
int k1 = (++it)->u.operand;
- dataLogF("[%4d] throw_static_error\t %s, %s", location, constantName(exec, k0, getConstant(k0)).data(), k1 ? "true" : "false");
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] throw_static_error\t %s, %s", location, constantName(exec, k0, getConstant(k0)).data(), k1 ? "true" : "false");
break;
}
case op_debug: {
@@ -1447,40 +1414,54 @@ void CodeBlock::dumpBytecode(ExecState* exec, const Instruction* begin, const In
int firstLine = (++it)->u.operand;
int lastLine = (++it)->u.operand;
int column = (++it)->u.operand;
- dataLogF("[%4d] debug\t\t %s, %d, %d, %d", location, debugHookName(debugHookID), firstLine, lastLine, column);
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] debug\t\t %s, %d, %d, %d", location, debugHookName(debugHookID), firstLine, lastLine, column);
break;
}
case op_profile_will_call: {
int function = (++it)->u.operand;
- dataLogF("[%4d] profile_will_call %s", location, registerName(exec, function).data());
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] profile_will_call %s", location, registerName(exec, function).data());
break;
}
case op_profile_did_call: {
int function = (++it)->u.operand;
- dataLogF("[%4d] profile_did_call\t %s", location, registerName(exec, function).data());
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] profile_did_call\t %s", location, registerName(exec, function).data());
break;
}
case op_end: {
int r0 = (++it)->u.operand;
- dataLogF("[%4d] end\t\t %s", location, registerName(exec, r0).data());
- dumpBytecodeCommentAndNewLine(location);
+ out.printf("[%4d] end\t\t %s", location, registerName(exec, r0).data());
break;
}
#if ENABLE(LLINT_C_LOOP)
default:
- ASSERT(false); // We should never get here.
+ RELEASE_ASSERT_NOT_REACHED();
+#endif
+ }
+
+#if ENABLE(VALUE_PROFILER)
+ dumpRareCaseProfile(out, "rare case: ", rareCaseProfileForBytecodeOffset(location), hasPrintedProfiling);
+ dumpRareCaseProfile(out, "special fast case: ", specialFastCaseProfileForBytecodeOffset(location), hasPrintedProfiling);
#endif
+
+#if ENABLE(DFG_JIT)
+ Vector<FrequentExitSite> exitSites = exitProfile().exitSitesFor(location);
+ if (!exitSites.isEmpty()) {
+ out.print(" !! frequent exits: ");
+ CommaPrinter comma;
+ for (unsigned i = 0; i < exitSites.size(); ++i)
+ out.print(comma, exitSites[i].kind());
}
+#else // ENABLE(DFG_JIT)
+ UNUSED_PARAM(location);
+#endif // ENABLE(DFG_JIT)
+ out.print("\n");
}
-void CodeBlock::dumpBytecode(unsigned bytecodeOffset)
+void CodeBlock::dumpBytecode(PrintStream& out, unsigned bytecodeOffset)
{
ExecState* exec = m_globalObject->globalExec();
const Instruction* it = instructions().begin() + bytecodeOffset;
- dumpBytecode(exec, instructions().begin(), it);
+ dumpBytecode(out, exec, instructions().begin(), it);
}
#if DUMP_CODE_BLOCK_STATISTICS
@@ -1613,19 +1594,19 @@ CodeBlock::CodeBlock(CopyParsedBlockTag, CodeBlock& other)
, m_numCalleeRegisters(other.m_numCalleeRegisters)
, m_numVars(other.m_numVars)
, m_isConstructor(other.m_isConstructor)
- , m_unlinkedCode(*other.m_globalData, other.m_ownerExecutable.get(), other.m_unlinkedCode.get())
- , m_ownerExecutable(*other.m_globalData, other.m_ownerExecutable.get(), other.m_ownerExecutable.get())
- , m_globalData(other.m_globalData)
+ , m_unlinkedCode(*other.m_vm, other.m_ownerExecutable.get(), other.m_unlinkedCode.get())
+ , m_ownerExecutable(*other.m_vm, other.m_ownerExecutable.get(), other.m_ownerExecutable.get())
+ , m_vm(other.m_vm)
, m_instructions(other.m_instructions)
, m_thisRegister(other.m_thisRegister)
, m_argumentsRegister(other.m_argumentsRegister)
, m_activationRegister(other.m_activationRegister)
, m_isStrictMode(other.m_isStrictMode)
+ , m_needsActivation(other.m_needsActivation)
, m_source(other.m_source)
, m_sourceOffset(other.m_sourceOffset)
-#if ENABLE(VALUE_PROFILER)
- , m_executionEntryCount(0)
-#endif
+ , m_firstLineColumnOffset(other.m_firstLineColumnOffset)
+ , m_codeType(other.m_codeType)
, m_identifiers(other.m_identifiers)
, m_constantRegisters(other.m_constantRegisters)
, m_functionDecls(other.m_functionDecls)
@@ -1635,9 +1616,6 @@ CodeBlock::CodeBlock(CopyParsedBlockTag, CodeBlock& other)
, m_reoptimizationRetryCounter(0)
, m_resolveOperations(other.m_resolveOperations)
, m_putToBaseOperations(other.m_putToBaseOperations)
-#if ENABLE(BYTECODE_COMMENTS)
- , m_bytecodeCommentIterator(0)
-#endif
#if ENABLE(JIT)
, m_canCompileWithDFGState(DFG::CapabilityLevelNotSet)
#endif
@@ -1657,56 +1635,53 @@ CodeBlock::CodeBlock(CopyParsedBlockTag, CodeBlock& other)
}
}
-CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSGlobalObject* globalObject, unsigned baseScopeDepth, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, PassOwnPtr<CodeBlock> alternative)
- : m_globalObject(globalObject->globalData(), ownerExecutable, globalObject)
- , m_heap(&m_globalObject->globalData().heap)
+CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSGlobalObject* globalObject, unsigned baseScopeDepth, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset, PassOwnPtr<CodeBlock> alternative)
+ : m_globalObject(globalObject->vm(), ownerExecutable, globalObject)
+ , m_heap(&m_globalObject->vm().heap)
, m_numCalleeRegisters(unlinkedCodeBlock->m_numCalleeRegisters)
, m_numVars(unlinkedCodeBlock->m_numVars)
, m_isConstructor(unlinkedCodeBlock->isConstructor())
- , m_unlinkedCode(globalObject->globalData(), ownerExecutable, unlinkedCodeBlock)
- , m_ownerExecutable(globalObject->globalData(), ownerExecutable, ownerExecutable)
- , m_globalData(unlinkedCodeBlock->globalData())
+ , m_unlinkedCode(globalObject->vm(), ownerExecutable, unlinkedCodeBlock)
+ , m_ownerExecutable(globalObject->vm(), ownerExecutable, ownerExecutable)
+ , m_vm(unlinkedCodeBlock->vm())
, m_thisRegister(unlinkedCodeBlock->thisRegister())
, m_argumentsRegister(unlinkedCodeBlock->argumentsRegister())
, m_activationRegister(unlinkedCodeBlock->activationRegister())
, m_isStrictMode(unlinkedCodeBlock->isStrictMode())
+ , m_needsActivation(unlinkedCodeBlock->needsFullScopeChain())
, m_source(sourceProvider)
, m_sourceOffset(sourceOffset)
-#if ENABLE(VALUE_PROFILER)
- , m_executionEntryCount(0)
-#endif
+ , m_firstLineColumnOffset(firstLineColumnOffset)
+ , m_codeType(unlinkedCodeBlock->codeType())
, m_alternative(alternative)
, m_osrExitCounter(0)
, m_optimizationDelayCounter(0)
, m_reoptimizationRetryCounter(0)
-#if ENABLE(BYTECODE_COMMENTS)
- , m_bytecodeCommentIterator(0)
-#endif
{
- m_globalData->startedCompiling(this);
+ m_vm->startedCompiling(this);
ASSERT(m_source);
setNumParameters(unlinkedCodeBlock->numParameters());
- optimizeAfterWarmUp();
- jitAfterWarmUp();
-
#if DUMP_CODE_BLOCK_STATISTICS
liveCodeBlockSet.add(this);
#endif
setIdentifiers(unlinkedCodeBlock->identifiers());
setConstantRegisters(unlinkedCodeBlock->constantRegisters());
-
+ if (unlinkedCodeBlock->usesGlobalObject())
+ m_constantRegisters[unlinkedCodeBlock->globalObjectRegister()].set(*m_vm, ownerExecutable, globalObject);
m_functionDecls.grow(unlinkedCodeBlock->numberOfFunctionDecls());
for (size_t count = unlinkedCodeBlock->numberOfFunctionDecls(), i = 0; i < count; ++i) {
UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionDecl(i);
unsigned lineCount = unlinkedExecutable->lineCount();
unsigned firstLine = ownerExecutable->lineNo() + unlinkedExecutable->firstLineOffset();
+ unsigned startColumn = unlinkedExecutable->functionStartColumn();
+ startColumn += (unlinkedExecutable->firstLineOffset() ? 1 : ownerExecutable->startColumn());
unsigned startOffset = sourceOffset + unlinkedExecutable->startOffset();
unsigned sourceLength = unlinkedExecutable->sourceLength();
- SourceCode code(m_source, startOffset, startOffset + sourceLength, firstLine);
- FunctionExecutable* executable = FunctionExecutable::create(*m_globalData, code, unlinkedExecutable, firstLine, firstLine + lineCount);
- m_functionDecls[i].set(*m_globalData, ownerExecutable, executable);
+ SourceCode code(m_source, startOffset, startOffset + sourceLength, firstLine, startColumn);
+ FunctionExecutable* executable = FunctionExecutable::create(*m_vm, code, unlinkedExecutable, firstLine, firstLine + lineCount, startColumn);
+ m_functionDecls[i].set(*m_vm, ownerExecutable, executable);
}
m_functionExprs.grow(unlinkedCodeBlock->numberOfFunctionExprs());
@@ -1714,11 +1689,13 @@ CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlin
UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionExpr(i);
unsigned lineCount = unlinkedExecutable->lineCount();
unsigned firstLine = ownerExecutable->lineNo() + unlinkedExecutable->firstLineOffset();
+ unsigned startColumn = unlinkedExecutable->functionStartColumn();
+ startColumn += (unlinkedExecutable->firstLineOffset() ? 1 : ownerExecutable->startColumn());
unsigned startOffset = sourceOffset + unlinkedExecutable->startOffset();
unsigned sourceLength = unlinkedExecutable->sourceLength();
- SourceCode code(m_source, startOffset, startOffset + sourceLength, firstLine);
- FunctionExecutable* executable = FunctionExecutable::create(*m_globalData, code, unlinkedExecutable, firstLine, firstLine + lineCount);
- m_functionExprs[i].set(*m_globalData, ownerExecutable, executable);
+ SourceCode code(m_source, startOffset, startOffset + sourceLength, firstLine, startColumn);
+ FunctionExecutable* executable = FunctionExecutable::create(*m_vm, code, unlinkedExecutable, firstLine, firstLine + lineCount, startColumn);
+ m_functionExprs[i].set(*m_vm, ownerExecutable, executable);
}
if (unlinkedCodeBlock->hasRareData()) {
@@ -1791,22 +1768,23 @@ CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlin
if (size_t size = unlinkedCodeBlock->numberOfValueProfiles())
m_valueProfiles.grow(size);
#endif
+ if (size_t size = unlinkedCodeBlock->numberOfObjectAllocationProfiles())
+ m_objectAllocationProfiles.grow(size);
if (size_t size = unlinkedCodeBlock->numberOfResolveOperations())
m_resolveOperations.grow(size);
- size_t putToBaseCount = unlinkedCodeBlock->numberOfPutToBaseOperations();
- m_putToBaseOperations.reserveCapacity(putToBaseCount);
- for (size_t i = 0; i < putToBaseCount; ++i)
- m_putToBaseOperations.append(PutToBaseOperation(isStrictMode()));
-
- ASSERT(m_putToBaseOperations.capacity() == putToBaseCount);
+ if (size_t putToBaseCount = unlinkedCodeBlock->numberOfPutToBaseOperations()) {
+ m_putToBaseOperations.reserveInitialCapacity(putToBaseCount);
+ for (size_t i = 0; i < putToBaseCount; ++i)
+ m_putToBaseOperations.uncheckedAppend(PutToBaseOperation(isStrictMode()));
+ }
// Copy and translate the UnlinkedInstructions
size_t instructionCount = unlinkedCodeBlock->instructions().size();
UnlinkedInstruction* pc = unlinkedCodeBlock->instructions().data();
- Vector<Instruction> instructions(instructionCount);
+ Vector<Instruction, 0, UnsafeVectorOverflow> instructions(instructionCount);
for (size_t i = 0; i < unlinkedCodeBlock->instructions().size(); ) {
unsigned opLength = opcodeLength(pc[i].u.opcode);
- instructions[i] = globalData()->interpreter->getOpcode(pc[i].u.opcode);
+ instructions[i] = vm()->interpreter->getOpcode(pc[i].u.opcode);
for (size_t j = 1; j < opLength; ++j) {
if (sizeof(int32_t) != sizeof(intptr_t))
instructions[i + j].u.pointer = 0;
@@ -1823,10 +1801,6 @@ CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlin
// fallthrough
}
case op_convert_this:
- case op_resolve:
- case op_resolve_base:
- case op_resolve_with_base:
- case op_resolve_with_this:
case op_get_by_id:
case op_call_put_result:
case op_get_callee: {
@@ -1851,6 +1825,79 @@ CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlin
break;
}
#endif
+ case op_resolve_base:
+ case op_resolve_base_to_global:
+ case op_resolve_base_to_global_dynamic:
+ case op_resolve_base_to_scope:
+ case op_resolve_base_to_scope_with_top_scope_check: {
+ instructions[i + 4].u.resolveOperations = &m_resolveOperations[pc[i + 4].u.operand];
+ instructions[i + 5].u.putToBaseOperation = &m_putToBaseOperations[pc[i + 5].u.operand];
+#if ENABLE(DFG_JIT)
+ ValueProfile* profile = &m_valueProfiles[pc[i + opLength - 1].u.operand];
+ ASSERT(profile->m_bytecodeOffset == -1);
+ profile->m_bytecodeOffset = i;
+ ASSERT((opLength - 1) > 5);
+ instructions[i + opLength - 1] = profile;
+#endif
+ break;
+ }
+ case op_resolve_global_property:
+ case op_resolve_global_var:
+ case op_resolve_scoped_var:
+ case op_resolve_scoped_var_on_top_scope:
+ case op_resolve_scoped_var_with_top_scope_check: {
+ instructions[i + 3].u.resolveOperations = &m_resolveOperations[pc[i + 3].u.operand];
+ break;
+ }
+ case op_put_to_base:
+ case op_put_to_base_variable: {
+ instructions[i + 4].u.putToBaseOperation = &m_putToBaseOperations[pc[i + 4].u.operand];
+ break;
+ }
+ case op_resolve: {
+#if ENABLE(DFG_JIT)
+ ValueProfile* profile = &m_valueProfiles[pc[i + opLength - 1].u.operand];
+ ASSERT(profile->m_bytecodeOffset == -1);
+ profile->m_bytecodeOffset = i;
+ ASSERT((opLength - 1) > 3);
+ instructions[i + opLength - 1] = profile;
+#endif
+ instructions[i + 3].u.resolveOperations = &m_resolveOperations[pc[i + 3].u.operand];
+ break;
+ }
+ case op_resolve_with_base:
+ case op_resolve_with_this: {
+ instructions[i + 4].u.resolveOperations = &m_resolveOperations[pc[i + 4].u.operand];
+ if (pc[i].u.opcode != op_resolve_with_this)
+ instructions[i + 5].u.putToBaseOperation = &m_putToBaseOperations[pc[i + 5].u.operand];
+#if ENABLE(DFG_JIT)
+ ValueProfile* profile = &m_valueProfiles[pc[i + opLength - 1].u.operand];
+ ASSERT(profile->m_bytecodeOffset == -1);
+ profile->m_bytecodeOffset = i;
+ instructions[i + opLength - 1] = profile;
+#endif
+ break;
+ }
+ case op_new_object: {
+ int objectAllocationProfileIndex = pc[i + opLength - 1].u.operand;
+ ObjectAllocationProfile* objectAllocationProfile = &m_objectAllocationProfiles[objectAllocationProfileIndex];
+ int inferredInlineCapacity = pc[i + opLength - 2].u.operand;
+
+ instructions[i + opLength - 1] = objectAllocationProfile;
+ objectAllocationProfile->initialize(*vm(),
+ m_ownerExecutable.get(), m_globalObject->objectPrototype(), inferredInlineCapacity);
+ break;
+ }
+
+ case op_get_scoped_var: {
+#if ENABLE(DFG_JIT)
+ ValueProfile* profile = &m_valueProfiles[pc[i + opLength - 1].u.operand];
+ ASSERT(profile->m_bytecodeOffset == -1);
+ profile->m_bytecodeOffset = i;
+ instructions[i + opLength - 1] = profile;
+#endif
+ break;
+ }
case op_call:
case op_call_eval: {
@@ -1892,16 +1939,22 @@ CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlin
break;
if (entry.couldBeWatched()) {
- instructions[i + 0] = globalData()->interpreter->getOpcode(op_init_global_const_check);
+ instructions[i + 0] = vm()->interpreter->getOpcode(op_init_global_const_check);
instructions[i + 1] = &globalObject->registerAt(entry.getIndex());
instructions[i + 3] = entry.addressOfIsWatched();
break;
}
- instructions[i + 0] = globalData()->interpreter->getOpcode(op_init_global_const);
+ instructions[i + 0] = vm()->interpreter->getOpcode(op_init_global_const);
instructions[i + 1] = &globalObject->registerAt(entry.getIndex());
break;
}
+
+ case op_debug: {
+ instructions[i + 4] = columnNumberForBytecodeOffset(i);
+ break;
+ }
+
default:
break;
}
@@ -1909,17 +1962,26 @@ CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlin
}
m_instructions = WTF::RefCountedArray<Instruction>(instructions);
+ // Set optimization thresholds only after m_instructions is initialized, since these
+ // rely on the instruction count (and are in theory permitted to also inspect the
+ // instruction stream to more accurate assess the cost of tier-up).
+ optimizeAfterWarmUp();
+ jitAfterWarmUp();
+
if (Options::dumpGeneratedBytecodes())
dumpBytecode();
- m_globalData->finishedCompiling(this);
+ m_vm->finishedCompiling(this);
}
CodeBlock::~CodeBlock()
{
+ if (m_vm->m_perBytecodeProfiler)
+ m_vm->m_perBytecodeProfiler->notifyDestruction(this);
+
#if ENABLE(DFG_JIT)
// Remove myself from the set of DFG code blocks. Note that I may not be in this set
// (because I'm not a DFG code block), in which case this is a no-op anyway.
- m_globalData->heap.m_dfgCodeBlocks.m_set.remove(this);
+ m_vm->heap.m_dfgCodeBlocks.m_set.remove(this);
#endif
#if ENABLE(VERBOSE_VALUE_PROFILE)
@@ -1958,13 +2020,13 @@ void CodeBlock::setNumParameters(int newValue)
m_numParameters = newValue;
#if ENABLE(VALUE_PROFILER)
- m_argumentValueProfiles.resize(newValue);
+ m_argumentValueProfiles.resizeToFit(newValue);
#endif
}
void CodeBlock::visitStructures(SlotVisitor& visitor, Instruction* vPC)
{
- Interpreter* interpreter = m_globalData->interpreter;
+ Interpreter* interpreter = m_vm->interpreter;
if (vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id) && vPC[4].u.structure) {
visitor.append(&vPC[4].u.structure);
@@ -2081,7 +2143,7 @@ void CodeBlock::visitAggregate(SlotVisitor& visitor)
visitor.addWeakReferenceHarvester(this);
#else // ENABLE(DFG_JIT)
- ASSERT_NOT_REACHED();
+ RELEASE_ASSERT_NOT_REACHED();
#endif // ENABLE(DFG_JIT)
}
@@ -2158,7 +2220,7 @@ static const bool verboseUnlinking = false;
void CodeBlock::finalizeUnconditionally()
{
#if ENABLE(LLINT)
- Interpreter* interpreter = m_globalData->interpreter;
+ Interpreter* interpreter = m_vm->interpreter;
if (!!numberOfInstructions()) {
const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
for (size_t size = propertyAccessInstructions.size(), i = 0; i < size; ++i) {
@@ -2197,7 +2259,7 @@ void CodeBlock::finalizeUnconditionally()
case op_get_array_length:
break;
default:
- ASSERT_NOT_REACHED();
+ RELEASE_ASSERT_NOT_REACHED();
}
}
@@ -2219,12 +2281,8 @@ void CodeBlock::finalizeUnconditionally()
if (verboseUnlinking)
dataLog(*this, " has dead weak references, jettisoning during GC.\n");
- // Make sure that the baseline JIT knows that it should re-warm-up before
- // optimizing.
- alternative()->optimizeAfterWarmUp();
-
if (DFG::shouldShowDisassembly()) {
- dataLog(*this, "will be jettisoned because of the following dead references:\n");
+ dataLog(*this, " will be jettisoned because of the following dead references:\n");
for (unsigned i = 0; i < m_dfgData->transitions.size(); ++i) {
WeakReferenceTransition& transition = m_dfgData->transitions[i];
JSCell* origin = transition.m_codeOrigin.get();
@@ -2232,15 +2290,13 @@ void CodeBlock::finalizeUnconditionally()
JSCell* to = transition.m_to.get();
if ((!origin || Heap::isMarked(origin)) && Heap::isMarked(from))
continue;
- dataLogF(" Transition under %s, ", JSValue(origin).description());
- dataLogF("%s -> ", JSValue(from).description());
- dataLogF("%s.\n", JSValue(to).description());
+ dataLog(" Transition under ", JSValue(origin), ", ", JSValue(from), " -> ", JSValue(to), ".\n");
}
for (unsigned i = 0; i < m_dfgData->weakReferences.size(); ++i) {
JSCell* weak = m_dfgData->weakReferences[i].get();
if (Heap::isMarked(weak))
continue;
- dataLogF(" Weak reference %s.\n", JSValue(weak).description());
+ dataLog(" Weak reference ", JSValue(weak), ".\n");
}
}
@@ -2286,7 +2342,7 @@ void CodeBlock::finalizeUnconditionally()
stub->executable()->hashFor(callLinkInfo(i).specializationKind()),
", stub routine ", RawPointer(stub), ".\n");
}
- callLinkInfo(i).unlink(*m_globalData, repatchBuffer);
+ callLinkInfo(i).unlink(*m_vm, repatchBuffer);
}
} else if (!Heap::isMarked(callLinkInfo(i).callee.get())) {
if (verboseUnlinking) {
@@ -2297,7 +2353,7 @@ void CodeBlock::finalizeUnconditionally()
callLinkInfo(i).specializationKind()),
").\n");
}
- callLinkInfo(i).unlink(*m_globalData, repatchBuffer);
+ callLinkInfo(i).unlink(*m_vm, repatchBuffer);
}
}
if (!!callLinkInfo(i).lastSeenCallee
@@ -2310,7 +2366,7 @@ void CodeBlock::finalizeUnconditionally()
if (stubInfo.visitWeakReferences())
continue;
- resetStubInternal(repatchBuffer, stubInfo);
+ resetStubDuringGCInternal(repatchBuffer, stubInfo);
}
}
#endif
@@ -2348,6 +2404,12 @@ void CodeBlock::resetStubInternal(RepatchBuffer& repatchBuffer, StructureStubInf
stubInfo.reset();
}
+
+void CodeBlock::resetStubDuringGCInternal(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo)
+{
+ resetStubInternal(repatchBuffer, stubInfo);
+ stubInfo.resetByGC = true;
+}
#endif
void CodeBlock::stronglyVisitStrongReferences(SlotVisitor& visitor)
@@ -2362,6 +2424,8 @@ void CodeBlock::stronglyVisitStrongReferences(SlotVisitor& visitor)
visitor.append(&m_functionExprs[i]);
for (size_t i = 0; i < m_functionDecls.size(); ++i)
visitor.append(&m_functionDecls[i]);
+ for (unsigned i = 0; i < m_objectAllocationProfiles.size(); ++i)
+ m_objectAllocationProfiles[i].visitAggregate(visitor);
updateAllPredictions(Collection);
}
@@ -2386,85 +2450,9 @@ void CodeBlock::stronglyVisitWeakReferences(SlotVisitor& visitor)
#endif
}
-#if ENABLE(BYTECODE_COMMENTS)
-// Finds the comment string for the specified bytecode offset/PC is available.
-const char* CodeBlock::commentForBytecodeOffset(unsigned bytecodeOffset)
-{
- ASSERT(bytecodeOffset < instructions().size());
-
- Vector<Comment>& comments = m_bytecodeComments;
- size_t numberOfComments = comments.size();
- const char* result = 0;
-
- if (!numberOfComments)
- return 0; // No comments to match with.
-
- // The next match is most likely the next comment in the list.
- // Do a quick check to see if that is a match first.
- // m_bytecodeCommentIterator should already be pointing to the
- // next comment we should check.
-
- ASSERT(m_bytecodeCommentIterator < comments.size());
-
- size_t i = m_bytecodeCommentIterator;
- size_t commentPC = comments[i].pc;
- if (commentPC == bytecodeOffset) {
- // We've got a match. All done!
- m_bytecodeCommentIterator = i;
- result = comments[i].string;
- } else if (commentPC > bytecodeOffset) {
- // The current comment is already greater than the requested PC.
- // Start searching from the first comment.
- i = 0;
- } else {
- // Otherwise, the current comment's PC is less than the requested PC.
- // Hence, we can just start searching from the next comment in the
- // list.
- i++;
- }
-
- // If the result is still not found, do a linear search in the range
- // that we've determined above.
- if (!result) {
- for (; i < comments.size(); ++i) {
- commentPC = comments[i].pc;
- if (commentPC == bytecodeOffset) {
- result = comments[i].string;
- break;
- }
- if (comments[i].pc > bytecodeOffset) {
- // The current comment PC is already past the requested
- // bytecodeOffset. Hence, there are no more possible
- // matches. Just fail.
- break;
- }
- }
- }
-
- // Update the iterator to point to the next comment.
- if (++i >= numberOfComments) {
- // At most point to the last comment entry. This ensures that the
- // next time we call this function, the quick checks will at least
- // have one entry to check and can fail fast if appropriate.
- i = numberOfComments - 1;
- }
- m_bytecodeCommentIterator = i;
- return result;
-}
-
-void CodeBlock::dumpBytecodeComments()
-{
- Vector<Comment>& comments = m_bytecodeComments;
- printf("Comments for codeblock %p: size %lu\n", this, comments.size());
- for (size_t i = 0; i < comments.size(); ++i)
- printf(" pc %lu : '%s'\n", comments[i].pc, comments[i].string);
- printf("End of comments for codeblock %p\n", this);
-}
-#endif // ENABLE_BYTECODE_COMMENTS
-
HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset)
{
- ASSERT(bytecodeOffset < instructions().size());
+ RELEASE_ASSERT(bytecodeOffset < instructions().size());
if (!m_rareData)
return 0;
@@ -2480,16 +2468,29 @@ HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset)
return 0;
}
-int CodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset)
+unsigned CodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset)
{
- ASSERT(bytecodeOffset < instructions().size());
+ RELEASE_ASSERT(bytecodeOffset < instructions().size());
return m_ownerExecutable->lineNo() + m_unlinkedCode->lineNumberForBytecodeOffset(bytecodeOffset);
}
-void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset)
+unsigned CodeBlock::columnNumberForBytecodeOffset(unsigned bytecodeOffset)
{
- m_unlinkedCode->expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset);
+ int divot;
+ int startOffset;
+ int endOffset;
+ unsigned line;
+ unsigned column;
+ expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
+ return column;
+}
+
+void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column)
+{
+ m_unlinkedCode->expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
divot += m_sourceOffset;
+ column += line ? 1 : firstLineColumnOffset();
+ line += m_ownerExecutable->lineNo();
}
void CodeBlock::shrinkToFit(ShrinkMode shrinkMode)
@@ -2502,8 +2503,6 @@ void CodeBlock::shrinkToFit(ShrinkMode shrinkMode)
m_callLinkInfos.shrinkToFit();
#endif
#if ENABLE(VALUE_PROFILER)
- if (shrinkMode == EarlyShrink)
- m_argumentValueProfiles.shrinkToFit();
m_rareCaseProfiles.shrinkToFit();
m_specialFastCaseProfiles.shrinkToFit();
#endif
@@ -2547,7 +2546,7 @@ void CodeBlock::createActivation(CallFrame* callFrame)
ASSERT(codeType() == FunctionCode);
ASSERT(needsFullScopeChain());
ASSERT(!callFrame->uncheckedR(activationRegister()).jsValue());
- JSActivation* activation = JSActivation::create(callFrame->globalData(), callFrame, this);
+ JSActivation* activation = JSActivation::create(callFrame->vm(), callFrame, this);
callFrame->uncheckedR(activationRegister()) = JSValue(activation);
callFrame->setScope(activation);
}
@@ -2575,13 +2574,13 @@ void CodeBlock::unlinkCalls()
#endif
if (!m_callLinkInfos.size())
return;
- if (!m_globalData->canUseJIT())
+ if (!m_vm->canUseJIT())
return;
RepatchBuffer repatchBuffer(this);
for (size_t i = 0; i < m_callLinkInfos.size(); i++) {
if (!m_callLinkInfos[i].isLinked())
continue;
- m_callLinkInfos[i].unlink(*m_globalData, repatchBuffer);
+ m_callLinkInfos[i].unlink(*m_vm, repatchBuffer);
}
}
@@ -2595,7 +2594,7 @@ void CodeBlock::unlinkIncomingCalls()
return;
RepatchBuffer repatchBuffer(this);
while (m_incomingCalls.begin() != m_incomingCalls.end())
- m_incomingCalls.begin()->unlink(*m_globalData, repatchBuffer);
+ m_incomingCalls.begin()->unlink(*m_vm, repatchBuffer);
}
#endif // ENABLE(JIT)
@@ -2662,12 +2661,13 @@ ClosureCallStubRoutine* CodeBlock::findClosureCallForReturnPC(ReturnAddressPtr r
continue;
if (!info.stub->code().executableMemory()->contains(returnAddress.value()))
continue;
-
+
+ RELEASE_ASSERT(info.stub->codeOrigin().bytecodeIndex < CodeOrigin::maximumBytecodeIndex);
return info.stub.get();
}
// The stub routine may have been jettisoned. This is rare, but we have to handle it.
- const JITStubRoutineSet& set = m_globalData->heap.jitStubRoutines();
+ const JITStubRoutineSet& set = m_vm->heap.jitStubRoutines();
for (unsigned i = set.size(); i--;) {
GCAwareJITStubRoutine* genericStub = set.at(i);
if (!genericStub->isClosureCall())
@@ -2675,6 +2675,7 @@ ClosureCallStubRoutine* CodeBlock::findClosureCallForReturnPC(ReturnAddressPtr r
ClosureCallStubRoutine* stub = static_cast<ClosureCallStubRoutine*>(genericStub);
if (!stub->code().executableMemory()->contains(returnAddress.value()))
continue;
+ RELEASE_ASSERT(stub->codeOrigin().bytecodeIndex < CodeOrigin::maximumBytecodeIndex);
return stub;
}
@@ -2702,11 +2703,11 @@ unsigned CodeBlock::bytecodeOffset(ExecState* exec, ReturnAddressPtr returnAddre
&& returnAddress.value() <= LLInt::getCodePtr(llint_end))
#endif
{
- ASSERT(exec->codeBlock());
- ASSERT(exec->codeBlock() == this);
- ASSERT(JITCode::isBaselineCode(getJITType()));
+ RELEASE_ASSERT(exec->codeBlock());
+ RELEASE_ASSERT(exec->codeBlock() == this);
+ RELEASE_ASSERT(JITCode::isBaselineCode(getJITType()));
Instruction* instruction = exec->currentVPC();
- ASSERT(instruction);
+ RELEASE_ASSERT(instruction);
instruction = adjustPCIfAtCallSite(instruction);
return bytecodeOffset(instruction);
@@ -2716,19 +2717,31 @@ unsigned CodeBlock::bytecodeOffset(ExecState* exec, ReturnAddressPtr returnAddre
#if ENABLE(JIT)
if (!m_rareData)
return 1;
- Vector<CallReturnOffsetToBytecodeOffset>& callIndices = m_rareData->m_callReturnIndexVector;
+ Vector<CallReturnOffsetToBytecodeOffset, 0, UnsafeVectorOverflow>& callIndices = m_rareData->m_callReturnIndexVector;
if (!callIndices.size())
return 1;
if (getJITCode().getExecutableMemory()->contains(returnAddress.value())) {
unsigned callReturnOffset = getJITCode().offsetOf(returnAddress.value());
CallReturnOffsetToBytecodeOffset* result =
- binarySearch<CallReturnOffsetToBytecodeOffset, unsigned, getCallReturnOffset>(callIndices.begin(), callIndices.size(), callReturnOffset);
- ASSERT(result->callReturnOffset == callReturnOffset);
+ binarySearch<CallReturnOffsetToBytecodeOffset, unsigned>(
+ callIndices, callIndices.size(), callReturnOffset, getCallReturnOffset);
+ RELEASE_ASSERT(result->callReturnOffset == callReturnOffset);
+ RELEASE_ASSERT(result->bytecodeOffset < instructionCount());
return result->bytecodeOffset;
}
-
- return findClosureCallForReturnPC(returnAddress)->codeOrigin().bytecodeIndex;
+ ClosureCallStubRoutine* closureInfo = findClosureCallForReturnPC(returnAddress);
+ CodeOrigin origin = closureInfo->codeOrigin();
+ while (InlineCallFrame* inlineCallFrame = origin.inlineCallFrame) {
+ if (inlineCallFrame->baselineCodeBlock() == this)
+ break;
+ origin = inlineCallFrame->caller;
+ RELEASE_ASSERT(origin.bytecodeIndex < CodeOrigin::maximumBytecodeIndex);
+ }
+ RELEASE_ASSERT(origin.bytecodeIndex < CodeOrigin::maximumBytecodeIndex);
+ unsigned bytecodeIndex = origin.bytecodeIndex;
+ RELEASE_ASSERT(bytecodeIndex < instructionCount());
+ return bytecodeIndex;
#endif // ENABLE(JIT)
#if !ENABLE(LLINT) && !ENABLE(JIT)
@@ -2743,13 +2756,20 @@ bool CodeBlock::codeOriginForReturn(ReturnAddressPtr returnAddress, CodeOrigin&
return false;
if (!getJITCode().getExecutableMemory()->contains(returnAddress.value())) {
- codeOrigin = findClosureCallForReturnPC(returnAddress)->codeOrigin();
+ ClosureCallStubRoutine* stub = findClosureCallForReturnPC(returnAddress);
+ ASSERT(stub);
+ if (!stub)
+ return false;
+ codeOrigin = stub->codeOrigin();
return true;
}
unsigned offset = getJITCode().offsetOf(returnAddress.value());
- CodeOriginAtCallReturnOffset* entry = binarySearch<CodeOriginAtCallReturnOffset, unsigned, getCallReturnOffsetForCodeOrigin>(codeOrigins().begin(), codeOrigins().size(), offset, WTF::KeyMustNotBePresentInArray);
- if (entry->callReturnOffset != offset)
+ CodeOriginAtCallReturnOffset* entry =
+ tryBinarySearch<CodeOriginAtCallReturnOffset, unsigned>(
+ codeOrigins(), codeOrigins().size(), offset,
+ getCallReturnOffsetForCodeOrigin);
+ if (!entry)
return false;
codeOrigin = entry->codeOrigin;
return true;
@@ -2765,8 +2785,8 @@ void CodeBlock::clearEvalCache()
m_rareData->m_evalCodeCache.clear();
}
-template<typename T>
-inline void replaceExistingEntries(Vector<T>& target, Vector<T>& source)
+template<typename T, size_t inlineCapacity, typename U, typename V>
+inline void replaceExistingEntries(Vector<T, inlineCapacity, U>& target, Vector<T, inlineCapacity, V>& source)
{
ASSERT(target.size() <= source.size());
for (size_t i = 0; i < target.size(); ++i)
@@ -2795,12 +2815,10 @@ void CodeBlock::reoptimize()
{
ASSERT(replacement() != this);
ASSERT(replacement()->alternative() == this);
- replacement()->tallyFrequentExitSites();
if (DFG::shouldShowDisassembly())
dataLog(*replacement(), " will be jettisoned due to reoptimization of ", *this, ".\n");
replacement()->jettison();
countReoptimization();
- optimizeAfterWarmUp();
}
CodeBlock* ProgramCodeBlock::replacement()
@@ -2859,31 +2877,30 @@ DFG::CapabilityLevel FunctionCodeBlock::canCompileWithDFGInternal()
return DFG::canCompileFunctionForCall(this);
}
-void ProgramCodeBlock::jettison()
+void CodeBlock::jettison()
{
ASSERT(JITCode::isOptimizingJIT(getJITType()));
ASSERT(this == replacement());
+ alternative()->optimizeAfterWarmUp();
+ tallyFrequentExitSites();
if (DFG::shouldShowDisassembly())
dataLog("Jettisoning ", *this, ".\n");
- static_cast<ProgramExecutable*>(ownerExecutable())->jettisonOptimizedCode(*globalData());
+ jettisonImpl();
}
-void EvalCodeBlock::jettison()
+void ProgramCodeBlock::jettisonImpl()
{
- ASSERT(JITCode::isOptimizingJIT(getJITType()));
- ASSERT(this == replacement());
- if (DFG::shouldShowDisassembly())
- dataLog("Jettisoning ", *this, ".\n");
- static_cast<EvalExecutable*>(ownerExecutable())->jettisonOptimizedCode(*globalData());
+ static_cast<ProgramExecutable*>(ownerExecutable())->jettisonOptimizedCode(*vm());
}
-void FunctionCodeBlock::jettison()
+void EvalCodeBlock::jettisonImpl()
{
- ASSERT(JITCode::isOptimizingJIT(getJITType()));
- ASSERT(this == replacement());
- if (DFG::shouldShowDisassembly())
- dataLog("Jettisoning ", *this, ".\n");
- static_cast<FunctionExecutable*>(ownerExecutable())->jettisonOptimizedCodeFor(*globalData(), m_isConstructor ? CodeForConstruct : CodeForCall);
+ static_cast<EvalExecutable*>(ownerExecutable())->jettisonOptimizedCode(*vm());
+}
+
+void FunctionCodeBlock::jettisonImpl()
+{
+ static_cast<FunctionExecutable*>(ownerExecutable())->jettisonOptimizedCodeFor(*vm(), m_isConstructor ? CodeForConstruct : CodeForCall);
}
bool ProgramCodeBlock::jitCompileImpl(ExecState* exec)
@@ -2908,6 +2925,217 @@ bool FunctionCodeBlock::jitCompileImpl(ExecState* exec)
}
#endif
+JSGlobalObject* CodeBlock::globalObjectFor(CodeOrigin codeOrigin)
+{
+ if (!codeOrigin.inlineCallFrame)
+ return globalObject();
+ return jsCast<FunctionExecutable*>(codeOrigin.inlineCallFrame->executable.get())->generatedBytecode().globalObject();
+}
+
+unsigned CodeBlock::reoptimizationRetryCounter() const
+{
+ ASSERT(m_reoptimizationRetryCounter <= Options::reoptimizationRetryCounterMax());
+ return m_reoptimizationRetryCounter;
+}
+
+void CodeBlock::countReoptimization()
+{
+ m_reoptimizationRetryCounter++;
+ if (m_reoptimizationRetryCounter > Options::reoptimizationRetryCounterMax())
+ m_reoptimizationRetryCounter = Options::reoptimizationRetryCounterMax();
+}
+
+unsigned CodeBlock::numberOfDFGCompiles()
+{
+ ASSERT(JITCode::isBaselineCode(getJITType()));
+ return (JITCode::isOptimizingJIT(replacement()->getJITType()) ? 1 : 0) + m_reoptimizationRetryCounter;
+}
+
+int32_t CodeBlock::codeTypeThresholdMultiplier() const
+{
+ if (codeType() == EvalCode)
+ return Options::evalThresholdMultiplier();
+
+ return 1;
+}
+
+double CodeBlock::optimizationThresholdScalingFactor()
+{
+ // This expression arises from doing a least-squares fit of
+ //
+ // F[x_] =: a * Sqrt[x + b] + Abs[c * x] + d
+ //
+ // against the data points:
+ //
+ // x F[x_]
+ // 10 0.9 (smallest reasonable code block)
+ // 200 1.0 (typical small-ish code block)
+ // 320 1.2 (something I saw in 3d-cube that I wanted to optimize)
+ // 1268 5.0 (something I saw in 3d-cube that I didn't want to optimize)
+ // 4000 5.5 (random large size, used to cause the function to converge to a shallow curve of some sort)
+ // 10000 6.0 (similar to above)
+ //
+ // I achieve the minimization using the following Mathematica code:
+ //
+ // MyFunctionTemplate[x_, a_, b_, c_, d_] := a*Sqrt[x + b] + Abs[c*x] + d
+ //
+ // samples = {{10, 0.9}, {200, 1}, {320, 1.2}, {1268, 5}, {4000, 5.5}, {10000, 6}}
+ //
+ // solution =
+ // Minimize[Plus @@ ((MyFunctionTemplate[#[[1]], a, b, c, d] - #[[2]])^2 & /@ samples),
+ // {a, b, c, d}][[2]]
+ //
+ // And the code below (to initialize a, b, c, d) is generated by:
+ //
+ // Print["const double " <> ToString[#[[1]]] <> " = " <>
+ // If[#[[2]] < 0.00001, "0.0", ToString[#[[2]]]] <> ";"] & /@ solution
+ //
+ // We've long known the following to be true:
+ // - Small code blocks are cheap to optimize and so we should do it sooner rather
+ // than later.
+ // - Large code blocks are expensive to optimize and so we should postpone doing so,
+ // and sometimes have a large enough threshold that we never optimize them.
+ // - The difference in cost is not totally linear because (a) just invoking the
+ // DFG incurs some base cost and (b) for large code blocks there is enough slop
+ // in the correlation between instruction count and the actual compilation cost
+ // that for those large blocks, the instruction count should not have a strong
+ // influence on our threshold.
+ //
+ // I knew the goals but I didn't know how to achieve them; so I picked an interesting
+ // example where the heuristics were right (code block in 3d-cube with instruction
+ // count 320, which got compiled early as it should have been) and one where they were
+ // totally wrong (code block in 3d-cube with instruction count 1268, which was expensive
+ // to compile and didn't run often enough to warrant compilation in my opinion), and
+ // then threw in additional data points that represented my own guess of what our
+ // heuristics should do for some round-numbered examples.
+ //
+ // The expression to which I decided to fit the data arose because I started with an
+ // affine function, and then did two things: put the linear part in an Abs to ensure
+ // that the fit didn't end up choosing a negative value of c (which would result in
+ // the function turning over and going negative for large x) and I threw in a Sqrt
+ // term because Sqrt represents my intution that the function should be more sensitive
+ // to small changes in small values of x, but less sensitive when x gets large.
+
+ // Note that the current fit essentially eliminates the linear portion of the
+ // expression (c == 0.0).
+ const double a = 0.061504;
+ const double b = 1.02406;
+ const double c = 0.0;
+ const double d = 0.825914;
+
+ double instructionCount = this->instructionCount();
+
+ ASSERT(instructionCount); // Make sure this is called only after we have an instruction stream; otherwise it'll just return the value of d, which makes no sense.
+
+ double result = d + a * sqrt(instructionCount + b) + c * instructionCount;
+#if ENABLE(JIT_VERBOSE_OSR)
+ dataLog(*this, ": instruction count is ", instructionCount, ", scaling execution counter by ", result, " * ", codeTypeThresholdMultiplier(), "\n");
+#endif
+ return result * codeTypeThresholdMultiplier();
+}
+
+static int32_t clipThreshold(double threshold)
+{
+ if (threshold < 1.0)
+ return 1;
+
+ if (threshold > static_cast<double>(std::numeric_limits<int32_t>::max()))
+ return std::numeric_limits<int32_t>::max();
+
+ return static_cast<int32_t>(threshold);
+}
+
+int32_t CodeBlock::counterValueForOptimizeAfterWarmUp()
+{
+ return clipThreshold(
+ Options::thresholdForOptimizeAfterWarmUp() *
+ optimizationThresholdScalingFactor() *
+ (1 << reoptimizationRetryCounter()));
+}
+
+int32_t CodeBlock::counterValueForOptimizeAfterLongWarmUp()
+{
+ return clipThreshold(
+ Options::thresholdForOptimizeAfterLongWarmUp() *
+ optimizationThresholdScalingFactor() *
+ (1 << reoptimizationRetryCounter()));
+}
+
+int32_t CodeBlock::counterValueForOptimizeSoon()
+{
+ return clipThreshold(
+ Options::thresholdForOptimizeSoon() *
+ optimizationThresholdScalingFactor() *
+ (1 << reoptimizationRetryCounter()));
+}
+
+bool CodeBlock::checkIfOptimizationThresholdReached()
+{
+ return m_jitExecuteCounter.checkIfThresholdCrossedAndSet(this);
+}
+
+void CodeBlock::optimizeNextInvocation()
+{
+ m_jitExecuteCounter.setNewThreshold(0, this);
+}
+
+void CodeBlock::dontOptimizeAnytimeSoon()
+{
+ m_jitExecuteCounter.deferIndefinitely();
+}
+
+void CodeBlock::optimizeAfterWarmUp()
+{
+ m_jitExecuteCounter.setNewThreshold(counterValueForOptimizeAfterWarmUp(), this);
+}
+
+void CodeBlock::optimizeAfterLongWarmUp()
+{
+ m_jitExecuteCounter.setNewThreshold(counterValueForOptimizeAfterLongWarmUp(), this);
+}
+
+void CodeBlock::optimizeSoon()
+{
+ m_jitExecuteCounter.setNewThreshold(counterValueForOptimizeSoon(), this);
+}
+
+#if ENABLE(JIT)
+uint32_t CodeBlock::adjustedExitCountThreshold(uint32_t desiredThreshold)
+{
+ ASSERT(getJITType() == JITCode::DFGJIT);
+ // Compute this the lame way so we don't saturate. This is called infrequently
+ // enough that this loop won't hurt us.
+ unsigned result = desiredThreshold;
+ for (unsigned n = baselineVersion()->reoptimizationRetryCounter(); n--;) {
+ unsigned newResult = result << 1;
+ if (newResult < result)
+ return std::numeric_limits<uint32_t>::max();
+ result = newResult;
+ }
+ return result;
+}
+
+uint32_t CodeBlock::exitCountThresholdForReoptimization()
+{
+ return adjustedExitCountThreshold(Options::osrExitCountForReoptimization() * codeTypeThresholdMultiplier());
+}
+
+uint32_t CodeBlock::exitCountThresholdForReoptimizationFromLoop()
+{
+ return adjustedExitCountThreshold(Options::osrExitCountForReoptimizationFromLoop() * codeTypeThresholdMultiplier());
+}
+
+bool CodeBlock::shouldReoptimizeNow()
+{
+ return osrExitCounter() >= exitCountThresholdForReoptimization();
+}
+
+bool CodeBlock::shouldReoptimizeFromLoopNow()
+{
+ return osrExitCounter() >= exitCountThresholdForReoptimizationFromLoop();
+}
+#endif
+
#if ENABLE(VALUE_PROFILER)
ArrayProfile* CodeBlock::getArrayProfile(unsigned bytecodeOffset)
{
@@ -3020,11 +3248,11 @@ void CodeBlock::tallyFrequentExitSites()
for (unsigned i = 0; i < m_dfgData->osrExit.size(); ++i) {
DFG::OSRExit& exit = m_dfgData->osrExit[i];
- if (!exit.considerAddingAsFrequentExitSite(this, profiledBlock))
+ if (!exit.considerAddingAsFrequentExitSite(profiledBlock))
continue;
#if DFG_ENABLE(DEBUG_VERBOSE)
- dataLog("OSR exit #", i, " (bc#", exit.m_codeOrigin.bytecodeIndex, ", @", exit.m_nodeIndex, ", ", DFG::exitKindToString(exit.m_kind), ") for ", *this, " occurred frequently: counting as frequent exit site.\n");
+ dataLog("OSR exit #", i, " (bc#", exit.m_codeOrigin.bytecodeIndex, ", ", exit.m_kind, ") for ", *this, " occurred frequently: counting as frequent exit site.\n");
#endif
}
}
@@ -3063,18 +3291,18 @@ void CodeBlock::dumpValueProfiles()
size_t CodeBlock::predictedMachineCodeSize()
{
- // This will be called from CodeBlock::CodeBlock before either m_globalData or the
+ // This will be called from CodeBlock::CodeBlock before either m_vm or the
// instructions have been initialized. It's OK to return 0 because what will really
// matter is the recomputation of this value when the slow path is triggered.
- if (!m_globalData)
+ if (!m_vm)
return 0;
- if (!m_globalData->machineCodeBytesPerBytecodeWordForBaselineJIT)
+ if (!m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT)
return 0; // It's as good of a prediction as we'll get.
// Be conservative: return a size that will be an overestimation 84% of the time.
- double multiplier = m_globalData->machineCodeBytesPerBytecodeWordForBaselineJIT.mean() +
- m_globalData->machineCodeBytesPerBytecodeWordForBaselineJIT.standardDeviation();
+ double multiplier = m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT.mean() +
+ m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT.standardDeviation();
// Be paranoid: silently reject bogus multipiers. Silently doing the "wrong" thing
// here is OK, since this whole method is just a heuristic.
@@ -3095,7 +3323,7 @@ size_t CodeBlock::predictedMachineCodeSize()
bool CodeBlock::usesOpcode(OpcodeID opcodeID)
{
- Interpreter* interpreter = globalData()->interpreter;
+ Interpreter* interpreter = vm()->interpreter;
Instruction* instructionsBegin = instructions().begin();
unsigned instructionCount = instructions().size();
@@ -3110,7 +3338,7 @@ bool CodeBlock::usesOpcode(OpcodeID opcodeID)
FOR_EACH_OPCODE_ID(DEFINE_OP)
#undef DEFINE_OP
default:
- ASSERT_NOT_REACHED();
+ RELEASE_ASSERT_NOT_REACHED();
break;
}
}
diff --git a/Source/JavaScriptCore/bytecode/CodeBlock.h b/Source/JavaScriptCore/bytecode/CodeBlock.h
index eec95cac1..0b3d18e17 100644
--- a/Source/JavaScriptCore/bytecode/CodeBlock.h
+++ b/Source/JavaScriptCore/bytecode/CodeBlock.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2009, 2010, 2011, 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2009, 2010, 2011, 2012, 2013 Apple Inc. All rights reserved.
* Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
*
* Redistribution and use in source and binary forms, with or without
@@ -38,7 +38,6 @@
#include "CodeBlockHash.h"
#include "CodeOrigin.h"
#include "CodeType.h"
-#include "Comment.h"
#include "CompactJITCodeMap.h"
#include "DFGCodeBlocks.h"
#include "DFGCommon.h"
@@ -51,6 +50,7 @@
#include "ExecutionCounter.h"
#include "ExpressionRangeInfo.h"
#include "HandlerInfo.h"
+#include "ObjectAllocationProfile.h"
#include "Options.h"
#include "Instruction.h"
#include "JITCode.h"
@@ -61,7 +61,7 @@
#include "LLIntCallLinkInfo.h"
#include "LazyOperandValueProfile.h"
#include "LineInfo.h"
-#include "Nodes.h"
+#include "ProfilerCompilation.h"
#include "RegExpObject.h"
#include "ResolveOperation.h"
#include "StructureStubInfo.h"
@@ -77,1511 +77,1407 @@
#include <wtf/Vector.h>
#include <wtf/text/WTFString.h>
-// Set ENABLE_BYTECODE_COMMENTS to 1 to enable recording bytecode generator
-// comments for the bytecodes that it generates. This will allow
-// CodeBlock::dump() to provide some contextual info about the bytecodes.
-//
-// The way this comment system works is as follows:
-// 1. The BytecodeGenerator calls prependComment() with a constant comment
-// string in .text. The string must not be a stack or heap allocated
-// string.
-// 2. When the BytecodeGenerator's emitOpcode() is called, the last
-// prepended comment will be recorded with the PC of the opcode being
-// emitted. This comment is being recorded in the CodeBlock's
-// m_bytecodeComments.
-// 3. When CodeBlock::dump() is called, it will pair up the comments with
-// their corresponding bytecodes based on the bytecode and comment's
-// PC. If a matching pair is found, the comment will be printed after
-// the bytecode. If not, no comment is printed.
-//
-// NOTE: Enabling this will consume additional memory at runtime to store
-// the comments. Since these comments are only useful for VM debugging
-// (as opposed to app debugging), this feature is to be disabled by default,
-// and can be enabled as needed for VM development use only.
-
-#define ENABLE_BYTECODE_COMMENTS 0
-
namespace JSC {
- class DFGCodeBlocks;
- class ExecState;
- class LLIntOffsetsExtractor;
- class RepatchBuffer;
+class DFGCodeBlocks;
+class ExecState;
+class LLIntOffsetsExtractor;
+class RepatchBuffer;
- inline int unmodifiedArgumentsRegister(int argumentsRegister) { return argumentsRegister - 1; }
+inline int unmodifiedArgumentsRegister(int argumentsRegister) { return argumentsRegister - 1; }
- static ALWAYS_INLINE int missingThisObjectMarker() { return std::numeric_limits<int>::max(); }
+static ALWAYS_INLINE int missingThisObjectMarker() { return std::numeric_limits<int>::max(); }
- class CodeBlock : public UnconditionalFinalizer, public WeakReferenceHarvester {
- WTF_MAKE_FAST_ALLOCATED;
- friend class JIT;
- friend class LLIntOffsetsExtractor;
- public:
- enum CopyParsedBlockTag { CopyParsedBlock };
- protected:
- CodeBlock(CopyParsedBlockTag, CodeBlock& other);
+class CodeBlock : public UnconditionalFinalizer, public WeakReferenceHarvester {
+ WTF_MAKE_FAST_ALLOCATED;
+ friend class JIT;
+ friend class LLIntOffsetsExtractor;
+public:
+ enum CopyParsedBlockTag { CopyParsedBlock };
+protected:
+ CodeBlock(CopyParsedBlockTag, CodeBlock& other);
- CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock*, JSGlobalObject*, unsigned baseScopeDepth, PassRefPtr<SourceProvider>, unsigned sourceOffset, PassOwnPtr<CodeBlock> alternative);
+ CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock*, JSGlobalObject*, unsigned baseScopeDepth, PassRefPtr<SourceProvider>, unsigned sourceOffset, unsigned firstLineColumnOffset, PassOwnPtr<CodeBlock> alternative);
- WriteBarrier<JSGlobalObject> m_globalObject;
- Heap* m_heap;
+ WriteBarrier<JSGlobalObject> m_globalObject;
+ Heap* m_heap;
- public:
- JS_EXPORT_PRIVATE virtual ~CodeBlock();
+public:
+ JS_EXPORT_PRIVATE virtual ~CodeBlock();
+
+ UnlinkedCodeBlock* unlinkedCodeBlock() const { return m_unlinkedCode.get(); }
- CodeBlockHash hash() const;
- void dumpAssumingJITType(PrintStream&, JITCode::JITType) const;
- void dump(PrintStream&) const;
+ String inferredName() const;
+ CodeBlockHash hash() const;
+ String sourceCodeForTools() const; // Not quite the actual source we parsed; this will do things like prefix the source for a function with a reified signature.
+ String sourceCodeOnOneLine() const; // As sourceCodeForTools(), but replaces all whitespace runs with a single space.
+ void dumpAssumingJITType(PrintStream&, JITCode::JITType) const;
+ void dump(PrintStream&) const;
- int numParameters() const { return m_numParameters; }
- void setNumParameters(int newValue);
+ int numParameters() const { return m_numParameters; }
+ void setNumParameters(int newValue);
- int* addressOfNumParameters() { return &m_numParameters; }
- static ptrdiff_t offsetOfNumParameters() { return OBJECT_OFFSETOF(CodeBlock, m_numParameters); }
+ int* addressOfNumParameters() { return &m_numParameters; }
+ static ptrdiff_t offsetOfNumParameters() { return OBJECT_OFFSETOF(CodeBlock, m_numParameters); }
- CodeBlock* alternative() { return m_alternative.get(); }
- PassOwnPtr<CodeBlock> releaseAlternative() { return m_alternative.release(); }
- void setAlternative(PassOwnPtr<CodeBlock> alternative) { m_alternative = alternative; }
+ CodeBlock* alternative() { return m_alternative.get(); }
+ PassOwnPtr<CodeBlock> releaseAlternative() { return m_alternative.release(); }
+ void setAlternative(PassOwnPtr<CodeBlock> alternative) { m_alternative = alternative; }
- CodeSpecializationKind specializationKind() const
- {
- return specializationFromIsConstruct(m_isConstructor);
- }
+ CodeSpecializationKind specializationKind() const
+ {
+ return specializationFromIsConstruct(m_isConstructor);
+ }
#if ENABLE(JIT)
- CodeBlock* baselineVersion()
- {
- CodeBlock* result = replacement();
- if (!result)
- return 0; // This can happen if we're in the process of creating the baseline version.
- while (result->alternative())
- result = result->alternative();
- ASSERT(result);
- ASSERT(JITCode::isBaselineCode(result->getJITType()));
- return result;
- }
+ CodeBlock* baselineVersion()
+ {
+ CodeBlock* result = replacement();
+ if (!result)
+ return 0; // This can happen if we're in the process of creating the baseline version.
+ while (result->alternative())
+ result = result->alternative();
+ ASSERT(result);
+ ASSERT(JITCode::isBaselineCode(result->getJITType()));
+ return result;
+ }
+#else
+ CodeBlock* baselineVersion()
+ {
+ return this;
+ }
#endif
- void visitAggregate(SlotVisitor&);
+ void visitAggregate(SlotVisitor&);
- static void dumpStatistics();
+ static void dumpStatistics();
- void dumpBytecode();
- void dumpBytecode(unsigned bytecodeOffset);
- void printStructures(const Instruction*);
- void printStructure(const char* name, const Instruction*, int operand);
+ void dumpBytecode(PrintStream& = WTF::dataFile());
+ void dumpBytecode(PrintStream&, unsigned bytecodeOffset);
+ void printStructures(PrintStream&, const Instruction*);
+ void printStructure(PrintStream&, const char* name, const Instruction*, int operand);
- bool isStrictMode() const { return m_isStrictMode; }
-
- inline bool isKnownNotImmediate(int index)
- {
- if (index == m_thisRegister && !m_isStrictMode)
- return true;
+ bool isStrictMode() const { return m_isStrictMode; }
- if (isConstantRegisterIndex(index))
- return getConstant(index).isCell();
-
- return false;
- }
-
- ALWAYS_INLINE bool isTemporaryRegisterIndex(int index)
- {
- return index >= m_numVars;
- }
-
- void dumpBytecodeCommentAndNewLine(int location);
-#if ENABLE(BYTECODE_COMMENTS)
- const char* commentForBytecodeOffset(unsigned bytecodeOffset);
- void dumpBytecodeComments();
-#endif
-
- HandlerInfo* handlerForBytecodeOffset(unsigned bytecodeOffset);
- int lineNumberForBytecodeOffset(unsigned bytecodeOffset);
- void expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset);
+ inline bool isKnownNotImmediate(int index)
+ {
+ if (index == m_thisRegister && !m_isStrictMode)
+ return true;
- uint32_t addResolve()
- {
- m_resolveOperations.grow(m_resolveOperations.size() + 1);
- return m_resolveOperations.size() - 1;
- }
- uint32_t addPutToBase()
- {
- m_putToBaseOperations.append(PutToBaseOperation(isStrictMode()));
- return m_putToBaseOperations.size() - 1;
- }
+ if (isConstantRegisterIndex(index))
+ return getConstant(index).isCell();
- ResolveOperations* resolveOperations(uint32_t i)
- {
- return &m_resolveOperations[i];
- }
+ return false;
+ }
- PutToBaseOperation* putToBaseOperation(uint32_t i)
- {
- return &m_putToBaseOperations[i];
- }
+ ALWAYS_INLINE bool isTemporaryRegisterIndex(int index)
+ {
+ return index >= m_numVars;
+ }
- size_t numberOfResolveOperations() const { return m_resolveOperations.size(); }
- size_t numberOfPutToBaseOperations() const { return m_putToBaseOperations.size(); }
+ HandlerInfo* handlerForBytecodeOffset(unsigned bytecodeOffset);
+ unsigned lineNumberForBytecodeOffset(unsigned bytecodeOffset);
+ unsigned columnNumberForBytecodeOffset(unsigned bytecodeOffset);
+ void expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot,
+ int& startOffset, int& endOffset, unsigned& line, unsigned& column);
#if ENABLE(JIT)
- StructureStubInfo& getStubInfo(ReturnAddressPtr returnAddress)
- {
- return *(binarySearch<StructureStubInfo, void*, getStructureStubInfoReturnLocation>(m_structureStubInfos.begin(), m_structureStubInfos.size(), returnAddress.value()));
- }
+ StructureStubInfo& getStubInfo(ReturnAddressPtr returnAddress)
+ {
+ return *(binarySearch<StructureStubInfo, void*>(m_structureStubInfos, m_structureStubInfos.size(), returnAddress.value(), getStructureStubInfoReturnLocation));
+ }
- StructureStubInfo& getStubInfo(unsigned bytecodeIndex)
- {
- return *(binarySearch<StructureStubInfo, unsigned, getStructureStubInfoBytecodeIndex>(m_structureStubInfos.begin(), m_structureStubInfos.size(), bytecodeIndex));
- }
+ StructureStubInfo& getStubInfo(unsigned bytecodeIndex)
+ {
+ return *(binarySearch<StructureStubInfo, unsigned>(m_structureStubInfos, m_structureStubInfos.size(), bytecodeIndex, getStructureStubInfoBytecodeIndex));
+ }
- void resetStub(StructureStubInfo&);
+ void resetStub(StructureStubInfo&);
- ByValInfo& getByValInfo(unsigned bytecodeIndex)
- {
- return *(binarySearch<ByValInfo, unsigned, getByValInfoBytecodeIndex>(m_byValInfos.begin(), m_byValInfos.size(), bytecodeIndex));
- }
+ ByValInfo& getByValInfo(unsigned bytecodeIndex)
+ {
+ return *(binarySearch<ByValInfo, unsigned>(m_byValInfos, m_byValInfos.size(), bytecodeIndex, getByValInfoBytecodeIndex));
+ }
- CallLinkInfo& getCallLinkInfo(ReturnAddressPtr returnAddress)
- {
- return *(binarySearch<CallLinkInfo, void*, getCallLinkInfoReturnLocation>(m_callLinkInfos.begin(), m_callLinkInfos.size(), returnAddress.value()));
- }
+ CallLinkInfo& getCallLinkInfo(ReturnAddressPtr returnAddress)
+ {
+ return *(binarySearch<CallLinkInfo, void*>(m_callLinkInfos, m_callLinkInfos.size(), returnAddress.value(), getCallLinkInfoReturnLocation));
+ }
- CallLinkInfo& getCallLinkInfo(unsigned bytecodeIndex)
- {
- ASSERT(JITCode::isBaselineCode(getJITType()));
- return *(binarySearch<CallLinkInfo, unsigned, getCallLinkInfoBytecodeIndex>(m_callLinkInfos.begin(), m_callLinkInfos.size(), bytecodeIndex));
- }
+ CallLinkInfo& getCallLinkInfo(unsigned bytecodeIndex)
+ {
+ ASSERT(JITCode::isBaselineCode(getJITType()));
+ return *(binarySearch<CallLinkInfo, unsigned>(m_callLinkInfos, m_callLinkInfos.size(), bytecodeIndex, getCallLinkInfoBytecodeIndex));
+ }
#endif // ENABLE(JIT)
#if ENABLE(LLINT)
- Instruction* adjustPCIfAtCallSite(Instruction*);
+ Instruction* adjustPCIfAtCallSite(Instruction*);
#endif
- unsigned bytecodeOffset(ExecState*, ReturnAddressPtr);
+ unsigned bytecodeOffset(ExecState*, ReturnAddressPtr);
#if ENABLE(JIT)
- unsigned bytecodeOffsetForCallAtIndex(unsigned index)
- {
- if (!m_rareData)
- return 1;
- Vector<CallReturnOffsetToBytecodeOffset>& callIndices = m_rareData->m_callReturnIndexVector;
- if (!callIndices.size())
- return 1;
- ASSERT(index < m_rareData->m_callReturnIndexVector.size());
- return m_rareData->m_callReturnIndexVector[index].bytecodeOffset;
- }
+ unsigned bytecodeOffsetForCallAtIndex(unsigned index)
+ {
+ if (!m_rareData)
+ return 1;
+ Vector<CallReturnOffsetToBytecodeOffset, 0, UnsafeVectorOverflow>& callIndices = m_rareData->m_callReturnIndexVector;
+ if (!callIndices.size())
+ return 1;
+ // FIXME: Fix places in DFG that call out to C that don't set the CodeOrigin. https://bugs.webkit.org/show_bug.cgi?id=118315
+ ASSERT(index < m_rareData->m_callReturnIndexVector.size());
+ if (index >= m_rareData->m_callReturnIndexVector.size())
+ return 1;
+ return m_rareData->m_callReturnIndexVector[index].bytecodeOffset;
+ }
- void unlinkCalls();
+ void unlinkCalls();
- bool hasIncomingCalls() { return m_incomingCalls.begin() != m_incomingCalls.end(); }
+ bool hasIncomingCalls() { return m_incomingCalls.begin() != m_incomingCalls.end(); }
- void linkIncomingCall(CallLinkInfo* incoming)
- {
- m_incomingCalls.push(incoming);
- }
+ void linkIncomingCall(CallLinkInfo* incoming)
+ {
+ m_incomingCalls.push(incoming);
+ }
- bool isIncomingCallAlreadyLinked(CallLinkInfo* incoming)
- {
- return m_incomingCalls.isOnList(incoming);
- }
+ bool isIncomingCallAlreadyLinked(CallLinkInfo* incoming)
+ {
+ return m_incomingCalls.isOnList(incoming);
+ }
#endif // ENABLE(JIT)
#if ENABLE(LLINT)
- void linkIncomingCall(LLIntCallLinkInfo* incoming)
- {
- m_incomingLLIntCalls.push(incoming);
- }
+ void linkIncomingCall(LLIntCallLinkInfo* incoming)
+ {
+ m_incomingLLIntCalls.push(incoming);
+ }
#endif // ENABLE(LLINT)
- void unlinkIncomingCalls();
+ void unlinkIncomingCalls();
#if ENABLE(DFG_JIT) || ENABLE(LLINT)
- void setJITCodeMap(PassOwnPtr<CompactJITCodeMap> jitCodeMap)
- {
- m_jitCodeMap = jitCodeMap;
- }
- CompactJITCodeMap* jitCodeMap()
- {
- return m_jitCodeMap.get();
- }
+ void setJITCodeMap(PassOwnPtr<CompactJITCodeMap> jitCodeMap)
+ {
+ m_jitCodeMap = jitCodeMap;
+ }
+ CompactJITCodeMap* jitCodeMap()
+ {
+ return m_jitCodeMap.get();
+ }
#endif
#if ENABLE(DFG_JIT)
- void createDFGDataIfNecessary()
- {
- if (!!m_dfgData)
- return;
+ void createDFGDataIfNecessary()
+ {
+ if (!!m_dfgData)
+ return;
- m_dfgData = adoptPtr(new DFGData);
- }
+ m_dfgData = adoptPtr(new DFGData);
+ }
- DFG::OSREntryData* appendDFGOSREntryData(unsigned bytecodeIndex, unsigned machineCodeOffset)
- {
- createDFGDataIfNecessary();
- DFG::OSREntryData entry;
- entry.m_bytecodeIndex = bytecodeIndex;
- entry.m_machineCodeOffset = machineCodeOffset;
- m_dfgData->osrEntry.append(entry);
- return &m_dfgData->osrEntry.last();
- }
- unsigned numberOfDFGOSREntries() const
- {
- if (!m_dfgData)
- return 0;
- return m_dfgData->osrEntry.size();
- }
- DFG::OSREntryData* dfgOSREntryData(unsigned i) { return &m_dfgData->osrEntry[i]; }
- DFG::OSREntryData* dfgOSREntryDataForBytecodeIndex(unsigned bytecodeIndex)
- {
- if (!m_dfgData)
- return 0;
- if (m_dfgData->osrEntry.isEmpty())
- return 0;
- DFG::OSREntryData* result = binarySearch<
- DFG::OSREntryData, unsigned, DFG::getOSREntryDataBytecodeIndex>(
- m_dfgData->osrEntry.begin(), m_dfgData->osrEntry.size(),
- bytecodeIndex, WTF::KeyMustNotBePresentInArray);
- if (result->m_bytecodeIndex != bytecodeIndex)
- return 0;
- return result;
- }
+ void saveCompilation(PassRefPtr<Profiler::Compilation> compilation)
+ {
+ createDFGDataIfNecessary();
+ m_dfgData->compilation = compilation;
+ }
- unsigned appendOSRExit(const DFG::OSRExit& osrExit)
- {
- createDFGDataIfNecessary();
- unsigned result = m_dfgData->osrExit.size();
- m_dfgData->osrExit.append(osrExit);
- return result;
- }
+ Profiler::Compilation* compilation()
+ {
+ if (!m_dfgData)
+ return 0;
+ return m_dfgData->compilation.get();
+ }
- DFG::OSRExit& lastOSRExit()
- {
- return m_dfgData->osrExit.last();
- }
+ DFG::OSREntryData* appendDFGOSREntryData(unsigned bytecodeIndex, unsigned machineCodeOffset)
+ {
+ createDFGDataIfNecessary();
+ DFG::OSREntryData entry;
+ entry.m_bytecodeIndex = bytecodeIndex;
+ entry.m_machineCodeOffset = machineCodeOffset;
+ m_dfgData->osrEntry.append(entry);
+ return &m_dfgData->osrEntry.last();
+ }
+ unsigned numberOfDFGOSREntries() const
+ {
+ if (!m_dfgData)
+ return 0;
+ return m_dfgData->osrEntry.size();
+ }
+ DFG::OSREntryData* dfgOSREntryData(unsigned i) { return &m_dfgData->osrEntry[i]; }
+ DFG::OSREntryData* dfgOSREntryDataForBytecodeIndex(unsigned bytecodeIndex)
+ {
+ if (!m_dfgData)
+ return 0;
+ return tryBinarySearch<DFG::OSREntryData, unsigned>(
+ m_dfgData->osrEntry, m_dfgData->osrEntry.size(), bytecodeIndex,
+ DFG::getOSREntryDataBytecodeIndex);
+ }
- unsigned appendSpeculationRecovery(const DFG::SpeculationRecovery& recovery)
- {
- createDFGDataIfNecessary();
- unsigned result = m_dfgData->speculationRecovery.size();
- m_dfgData->speculationRecovery.append(recovery);
- return result;
- }
+ unsigned appendOSRExit(const DFG::OSRExit& osrExit)
+ {
+ createDFGDataIfNecessary();
+ unsigned result = m_dfgData->osrExit.size();
+ m_dfgData->osrExit.append(osrExit);
+ return result;
+ }
- unsigned appendWatchpoint(const JumpReplacementWatchpoint& watchpoint)
- {
- createDFGDataIfNecessary();
- unsigned result = m_dfgData->watchpoints.size();
- m_dfgData->watchpoints.append(watchpoint);
- return result;
- }
+ DFG::OSRExit& lastOSRExit()
+ {
+ return m_dfgData->osrExit.last();
+ }
- unsigned numberOfOSRExits()
- {
- if (!m_dfgData)
- return 0;
- return m_dfgData->osrExit.size();
- }
+ unsigned appendSpeculationRecovery(const DFG::SpeculationRecovery& recovery)
+ {
+ createDFGDataIfNecessary();
+ unsigned result = m_dfgData->speculationRecovery.size();
+ m_dfgData->speculationRecovery.append(recovery);
+ return result;
+ }
- unsigned numberOfSpeculationRecoveries()
- {
- if (!m_dfgData)
- return 0;
- return m_dfgData->speculationRecovery.size();
- }
+ unsigned appendWatchpoint(const JumpReplacementWatchpoint& watchpoint)
+ {
+ createDFGDataIfNecessary();
+ unsigned result = m_dfgData->watchpoints.size();
+ m_dfgData->watchpoints.append(watchpoint);
+ return result;
+ }
- unsigned numberOfWatchpoints()
- {
- if (!m_dfgData)
- return 0;
- return m_dfgData->watchpoints.size();
- }
+ unsigned numberOfOSRExits()
+ {
+ if (!m_dfgData)
+ return 0;
+ return m_dfgData->osrExit.size();
+ }
- DFG::OSRExit& osrExit(unsigned index)
- {
- return m_dfgData->osrExit[index];
- }
+ unsigned numberOfSpeculationRecoveries()
+ {
+ if (!m_dfgData)
+ return 0;
+ return m_dfgData->speculationRecovery.size();
+ }
- DFG::SpeculationRecovery& speculationRecovery(unsigned index)
- {
- return m_dfgData->speculationRecovery[index];
- }
+ unsigned numberOfWatchpoints()
+ {
+ if (!m_dfgData)
+ return 0;
+ return m_dfgData->watchpoints.size();
+ }
- JumpReplacementWatchpoint& watchpoint(unsigned index)
- {
- return m_dfgData->watchpoints[index];
- }
+ DFG::OSRExit& osrExit(unsigned index)
+ {
+ return m_dfgData->osrExit[index];
+ }
- void appendWeakReference(JSCell* target)
- {
- createDFGDataIfNecessary();
- m_dfgData->weakReferences.append(WriteBarrier<JSCell>(*globalData(), ownerExecutable(), target));
- }
+ DFG::SpeculationRecovery& speculationRecovery(unsigned index)
+ {
+ return m_dfgData->speculationRecovery[index];
+ }
- void appendWeakReferenceTransition(JSCell* codeOrigin, JSCell* from, JSCell* to)
- {
- createDFGDataIfNecessary();
- m_dfgData->transitions.append(
- WeakReferenceTransition(*globalData(), ownerExecutable(), codeOrigin, from, to));
- }
+ JumpReplacementWatchpoint& watchpoint(unsigned index)
+ {
+ return m_dfgData->watchpoints[index];
+ }
- DFG::MinifiedGraph& minifiedDFG()
- {
- createDFGDataIfNecessary();
- return m_dfgData->minifiedDFG;
- }
+ void appendWeakReference(JSCell* target)
+ {
+ createDFGDataIfNecessary();
+ m_dfgData->weakReferences.append(WriteBarrier<JSCell>(*vm(), ownerExecutable(), target));
+ }
- DFG::VariableEventStream& variableEventStream()
- {
- createDFGDataIfNecessary();
- return m_dfgData->variableEventStream;
- }
+ void appendWeakReferenceTransition(JSCell* codeOrigin, JSCell* from, JSCell* to)
+ {
+ createDFGDataIfNecessary();
+ m_dfgData->transitions.append(
+ WeakReferenceTransition(*vm(), ownerExecutable(), codeOrigin, from, to));
+ }
+
+ DFG::MinifiedGraph& minifiedDFG()
+ {
+ createDFGDataIfNecessary();
+ return m_dfgData->minifiedDFG;
+ }
+
+ DFG::VariableEventStream& variableEventStream()
+ {
+ createDFGDataIfNecessary();
+ return m_dfgData->variableEventStream;
+ }
#endif
- unsigned bytecodeOffset(Instruction* returnAddress)
- {
- ASSERT(returnAddress >= instructions().begin() && returnAddress < instructions().end());
- return static_cast<Instruction*>(returnAddress) - instructions().begin();
- }
+ unsigned bytecodeOffset(Instruction* returnAddress)
+ {
+ RELEASE_ASSERT(returnAddress >= instructions().begin() && returnAddress < instructions().end());
+ return static_cast<Instruction*>(returnAddress) - instructions().begin();
+ }
- bool isNumericCompareFunction() { return m_unlinkedCode->isNumericCompareFunction(); }
+ bool isNumericCompareFunction() { return m_unlinkedCode->isNumericCompareFunction(); }
- unsigned numberOfInstructions() const { return m_instructions.size(); }
- RefCountedArray<Instruction>& instructions() { return m_instructions; }
- const RefCountedArray<Instruction>& instructions() const { return m_instructions; }
+ unsigned numberOfInstructions() const { return m_instructions.size(); }
+ RefCountedArray<Instruction>& instructions() { return m_instructions; }
+ const RefCountedArray<Instruction>& instructions() const { return m_instructions; }
-#if ENABLE(BYTECODE_COMMENTS)
- Vector<Comment>& bytecodeComments() { return m_bytecodeComments; }
-#endif
-
- size_t predictedMachineCodeSize();
+ size_t predictedMachineCodeSize();
- bool usesOpcode(OpcodeID);
+ bool usesOpcode(OpcodeID);
- unsigned instructionCount() { return m_instructions.size(); }
+ unsigned instructionCount() { return m_instructions.size(); }
- int argumentIndexAfterCapture(size_t argument);
+ int argumentIndexAfterCapture(size_t argument);
#if ENABLE(JIT)
- void setJITCode(const JITCode& code, MacroAssemblerCodePtr codeWithArityCheck)
- {
- m_jitCode = code;
- m_jitCodeWithArityCheck = codeWithArityCheck;
+ void setJITCode(const JITCode& code, MacroAssemblerCodePtr codeWithArityCheck)
+ {
+ m_jitCode = code;
+ m_jitCodeWithArityCheck = codeWithArityCheck;
#if ENABLE(DFG_JIT)
- if (m_jitCode.jitType() == JITCode::DFGJIT) {
- createDFGDataIfNecessary();
- m_globalData->heap.m_dfgCodeBlocks.m_set.add(this);
- }
+ if (m_jitCode.jitType() == JITCode::DFGJIT) {
+ createDFGDataIfNecessary();
+ m_vm->heap.m_dfgCodeBlocks.m_set.add(this);
+ }
#endif
+ }
+ JITCode& getJITCode() { return m_jitCode; }
+ MacroAssemblerCodePtr getJITCodeWithArityCheck() { return m_jitCodeWithArityCheck; }
+ JITCode::JITType getJITType() const { return m_jitCode.jitType(); }
+ ExecutableMemoryHandle* executableMemory() { return getJITCode().getExecutableMemory(); }
+ virtual JSObject* compileOptimized(ExecState*, JSScope*, unsigned bytecodeIndex) = 0;
+ void jettison();
+ enum JITCompilationResult { AlreadyCompiled, CouldNotCompile, CompiledSuccessfully };
+ JITCompilationResult jitCompile(ExecState* exec)
+ {
+ if (getJITType() != JITCode::InterpreterThunk) {
+ ASSERT(getJITType() == JITCode::BaselineJIT);
+ return AlreadyCompiled;
}
- JITCode& getJITCode() { return m_jitCode; }
- MacroAssemblerCodePtr getJITCodeWithArityCheck() { return m_jitCodeWithArityCheck; }
- JITCode::JITType getJITType() const { return m_jitCode.jitType(); }
- ExecutableMemoryHandle* executableMemory() { return getJITCode().getExecutableMemory(); }
- virtual JSObject* compileOptimized(ExecState*, JSScope*, unsigned bytecodeIndex) = 0;
- virtual void jettison() = 0;
- enum JITCompilationResult { AlreadyCompiled, CouldNotCompile, CompiledSuccessfully };
- JITCompilationResult jitCompile(ExecState* exec)
- {
- if (getJITType() != JITCode::InterpreterThunk) {
- ASSERT(getJITType() == JITCode::BaselineJIT);
- return AlreadyCompiled;
- }
#if ENABLE(JIT)
- if (jitCompileImpl(exec))
- return CompiledSuccessfully;
- return CouldNotCompile;
+ if (jitCompileImpl(exec))
+ return CompiledSuccessfully;
+ return CouldNotCompile;
#else
- UNUSED_PARAM(exec);
- return CouldNotCompile;
+ UNUSED_PARAM(exec);
+ return CouldNotCompile;
#endif
- }
- virtual CodeBlock* replacement() = 0;
+ }
+ virtual CodeBlock* replacement() = 0;
- virtual DFG::CapabilityLevel canCompileWithDFGInternal() = 0;
- DFG::CapabilityLevel canCompileWithDFG()
- {
- DFG::CapabilityLevel result = canCompileWithDFGInternal();
- m_canCompileWithDFGState = result;
- return result;
- }
- DFG::CapabilityLevel canCompileWithDFGState() { return m_canCompileWithDFGState; }
+ virtual DFG::CapabilityLevel canCompileWithDFGInternal() = 0;
+ DFG::CapabilityLevel canCompileWithDFG()
+ {
+ DFG::CapabilityLevel result = canCompileWithDFGInternal();
+ m_canCompileWithDFGState = result;
+ return result;
+ }
+ DFG::CapabilityLevel canCompileWithDFGState() { return m_canCompileWithDFGState; }
- bool hasOptimizedReplacement()
- {
- ASSERT(JITCode::isBaselineCode(getJITType()));
- bool result = replacement()->getJITType() > getJITType();
+ bool hasOptimizedReplacement()
+ {
+ ASSERT(JITCode::isBaselineCode(getJITType()));
+ bool result = replacement()->getJITType() > getJITType();
#if !ASSERT_DISABLED
- if (result)
- ASSERT(replacement()->getJITType() == JITCode::DFGJIT);
- else {
- ASSERT(JITCode::isBaselineCode(replacement()->getJITType()));
- ASSERT(replacement() == this);
- }
-#endif
- return result;
+ if (result)
+ ASSERT(replacement()->getJITType() == JITCode::DFGJIT);
+ else {
+ ASSERT(JITCode::isBaselineCode(replacement()->getJITType()));
+ ASSERT(replacement() == this);
}
+#endif
+ return result;
+ }
#else
- JITCode::JITType getJITType() const { return JITCode::BaselineJIT; }
+ JITCode::JITType getJITType() const { return JITCode::BaselineJIT; }
#endif
- ScriptExecutable* ownerExecutable() const { return m_ownerExecutable.get(); }
+ ScriptExecutable* ownerExecutable() const { return m_ownerExecutable.get(); }
- void setGlobalData(JSGlobalData* globalData) { m_globalData = globalData; }
- JSGlobalData* globalData() { return m_globalData; }
+ void setVM(VM* vm) { m_vm = vm; }
+ VM* vm() { return m_vm; }
- void setThisRegister(int thisRegister) { m_thisRegister = thisRegister; }
- int thisRegister() const { return m_thisRegister; }
+ void setThisRegister(int thisRegister) { m_thisRegister = thisRegister; }
+ int thisRegister() const { return m_thisRegister; }
- bool needsFullScopeChain() const { return m_unlinkedCode->needsFullScopeChain(); }
- bool usesEval() const { return m_unlinkedCode->usesEval(); }
+ bool needsFullScopeChain() const { return m_unlinkedCode->needsFullScopeChain(); }
+ bool usesEval() const { return m_unlinkedCode->usesEval(); }
- void setArgumentsRegister(int argumentsRegister)
- {
- ASSERT(argumentsRegister != -1);
- m_argumentsRegister = argumentsRegister;
- ASSERT(usesArguments());
- }
- int argumentsRegister() const
- {
- ASSERT(usesArguments());
- return m_argumentsRegister;
- }
- int uncheckedArgumentsRegister()
- {
- if (!usesArguments())
- return InvalidVirtualRegister;
- return argumentsRegister();
- }
- void setActivationRegister(int activationRegister)
- {
- m_activationRegister = activationRegister;
- }
- int activationRegister() const
- {
- ASSERT(needsFullScopeChain());
- return m_activationRegister;
- }
- int uncheckedActivationRegister()
- {
- if (!needsFullScopeChain())
- return InvalidVirtualRegister;
- return activationRegister();
- }
- bool usesArguments() const { return m_argumentsRegister != -1; }
-
- bool needsActivation() const
- {
- return needsFullScopeChain() && codeType() != GlobalCode;
- }
+ void setArgumentsRegister(int argumentsRegister)
+ {
+ ASSERT(argumentsRegister != -1);
+ m_argumentsRegister = argumentsRegister;
+ ASSERT(usesArguments());
+ }
+ int argumentsRegister() const
+ {
+ ASSERT(usesArguments());
+ return m_argumentsRegister;
+ }
+ int uncheckedArgumentsRegister()
+ {
+ if (!usesArguments())
+ return InvalidVirtualRegister;
+ return argumentsRegister();
+ }
+ void setActivationRegister(int activationRegister)
+ {
+ m_activationRegister = activationRegister;
+ }
+ int activationRegister() const
+ {
+ ASSERT(needsFullScopeChain());
+ return m_activationRegister;
+ }
+ int uncheckedActivationRegister()
+ {
+ if (!needsFullScopeChain())
+ return InvalidVirtualRegister;
+ return activationRegister();
+ }
+ bool usesArguments() const { return m_argumentsRegister != -1; }
- bool isCaptured(int operand, InlineCallFrame* inlineCallFrame = 0) const
- {
- if (inlineCallFrame && !operandIsArgument(operand))
- return inlineCallFrame->capturedVars.get(operand);
+ bool needsActivation() const
+ {
+ return needsFullScopeChain() && codeType() != GlobalCode;
+ }
- if (operandIsArgument(operand))
- return usesArguments();
+ bool isCaptured(int operand, InlineCallFrame* inlineCallFrame = 0) const
+ {
+ if (operandIsArgument(operand))
+ return operandToArgument(operand) && usesArguments();
- // The activation object isn't in the captured region, but it's "captured"
- // in the sense that stores to its location can be observed indirectly.
- if (needsActivation() && operand == activationRegister())
- return true;
+ if (inlineCallFrame)
+ return inlineCallFrame->capturedVars.get(operand);
- // Ditto for the arguments object.
- if (usesArguments() && operand == argumentsRegister())
- return true;
+ // The activation object isn't in the captured region, but it's "captured"
+ // in the sense that stores to its location can be observed indirectly.
+ if (needsActivation() && operand == activationRegister())
+ return true;
- // Ditto for the arguments object.
- if (usesArguments() && operand == unmodifiedArgumentsRegister(argumentsRegister()))
- return true;
+ // Ditto for the arguments object.
+ if (usesArguments() && operand == argumentsRegister())
+ return true;
- // We're in global code so there are no locals to capture
- if (!symbolTable())
- return false;
+ // Ditto for the arguments object.
+ if (usesArguments() && operand == unmodifiedArgumentsRegister(argumentsRegister()))
+ return true;
- return operand >= symbolTable()->captureStart()
- && operand < symbolTable()->captureEnd();
- }
+ // We're in global code so there are no locals to capture
+ if (!symbolTable())
+ return false;
- CodeType codeType() const { return m_unlinkedCode->codeType(); }
+ return operand >= symbolTable()->captureStart()
+ && operand < symbolTable()->captureEnd();
+ }
- SourceProvider* source() const { return m_source.get(); }
- unsigned sourceOffset() const { return m_sourceOffset; }
+ CodeType codeType() const { return m_unlinkedCode->codeType(); }
- size_t numberOfJumpTargets() const { return m_unlinkedCode->numberOfJumpTargets(); }
- unsigned jumpTarget(int index) const { return m_unlinkedCode->jumpTarget(index); }
+ SourceProvider* source() const { return m_source.get(); }
+ unsigned sourceOffset() const { return m_sourceOffset; }
+ unsigned firstLineColumnOffset() const { return m_firstLineColumnOffset; }
- void createActivation(CallFrame*);
+ size_t numberOfJumpTargets() const { return m_unlinkedCode->numberOfJumpTargets(); }
+ unsigned jumpTarget(int index) const { return m_unlinkedCode->jumpTarget(index); }
- void clearEvalCache();
+ void createActivation(CallFrame*);
+
+ void clearEvalCache();
- String nameForRegister(int registerNumber);
+ String nameForRegister(int registerNumber);
#if ENABLE(JIT)
- void setNumberOfStructureStubInfos(size_t size) { m_structureStubInfos.grow(size); }
- size_t numberOfStructureStubInfos() const { return m_structureStubInfos.size(); }
- StructureStubInfo& structureStubInfo(int index) { return m_structureStubInfos[index]; }
+ void setNumberOfStructureStubInfos(size_t size) { m_structureStubInfos.grow(size); }
+ size_t numberOfStructureStubInfos() const { return m_structureStubInfos.size(); }
+ StructureStubInfo& structureStubInfo(int index) { return m_structureStubInfos[index]; }
- void setNumberOfByValInfos(size_t size) { m_byValInfos.grow(size); }
- size_t numberOfByValInfos() const { return m_byValInfos.size(); }
- ByValInfo& byValInfo(size_t index) { return m_byValInfos[index]; }
+ void setNumberOfByValInfos(size_t size) { m_byValInfos.grow(size); }
+ size_t numberOfByValInfos() const { return m_byValInfos.size(); }
+ ByValInfo& byValInfo(size_t index) { return m_byValInfos[index]; }
- void setNumberOfCallLinkInfos(size_t size) { m_callLinkInfos.grow(size); }
- size_t numberOfCallLinkInfos() const { return m_callLinkInfos.size(); }
- CallLinkInfo& callLinkInfo(int index) { return m_callLinkInfos[index]; }
+ void setNumberOfCallLinkInfos(size_t size) { m_callLinkInfos.grow(size); }
+ size_t numberOfCallLinkInfos() const { return m_callLinkInfos.size(); }
+ CallLinkInfo& callLinkInfo(int index) { return m_callLinkInfos[index]; }
#endif
#if ENABLE(VALUE_PROFILER)
- unsigned numberOfArgumentValueProfiles()
- {
- ASSERT(m_numParameters >= 0);
- ASSERT(m_argumentValueProfiles.size() == static_cast<unsigned>(m_numParameters));
- return m_argumentValueProfiles.size();
- }
- ValueProfile* valueProfileForArgument(unsigned argumentIndex)
- {
- ValueProfile* result = &m_argumentValueProfiles[argumentIndex];
- ASSERT(result->m_bytecodeOffset == -1);
- return result;
- }
+ unsigned numberOfArgumentValueProfiles()
+ {
+ ASSERT(m_numParameters >= 0);
+ ASSERT(m_argumentValueProfiles.size() == static_cast<unsigned>(m_numParameters));
+ return m_argumentValueProfiles.size();
+ }
+ ValueProfile* valueProfileForArgument(unsigned argumentIndex)
+ {
+ ValueProfile* result = &m_argumentValueProfiles[argumentIndex];
+ ASSERT(result->m_bytecodeOffset == -1);
+ return result;
+ }
- unsigned numberOfValueProfiles() { return m_valueProfiles.size(); }
- ValueProfile* valueProfile(int index)
- {
- ValueProfile* result = &m_valueProfiles[index];
- ASSERT(result->m_bytecodeOffset != -1);
- return result;
- }
- ValueProfile* valueProfileForBytecodeOffset(int bytecodeOffset)
- {
- ValueProfile* result = WTF::genericBinarySearch<ValueProfile, int, getValueProfileBytecodeOffset>(m_valueProfiles, m_valueProfiles.size(), bytecodeOffset);
- ASSERT(result->m_bytecodeOffset != -1);
- ASSERT(instructions()[bytecodeOffset + opcodeLength(
- m_globalData->interpreter->getOpcodeID(
- instructions()[
- bytecodeOffset].u.opcode)) - 1].u.profile == result);
- return result;
- }
- SpeculatedType valueProfilePredictionForBytecodeOffset(int bytecodeOffset)
- {
- return valueProfileForBytecodeOffset(bytecodeOffset)->computeUpdatedPrediction();
- }
-
- unsigned totalNumberOfValueProfiles()
- {
- return numberOfArgumentValueProfiles() + numberOfValueProfiles();
- }
- ValueProfile* getFromAllValueProfiles(unsigned index)
- {
- if (index < numberOfArgumentValueProfiles())
- return valueProfileForArgument(index);
- return valueProfile(index - numberOfArgumentValueProfiles());
- }
+ unsigned numberOfValueProfiles() { return m_valueProfiles.size(); }
+ ValueProfile* valueProfile(int index) { return &m_valueProfiles[index]; }
+ ValueProfile* valueProfileForBytecodeOffset(int bytecodeOffset)
+ {
+ ValueProfile* result = binarySearch<ValueProfile, int>(
+ m_valueProfiles, m_valueProfiles.size(), bytecodeOffset,
+ getValueProfileBytecodeOffset<ValueProfile>);
+ ASSERT(result->m_bytecodeOffset != -1);
+ ASSERT(instructions()[bytecodeOffset + opcodeLength(
+ m_vm->interpreter->getOpcodeID(
+ instructions()[
+ bytecodeOffset].u.opcode)) - 1].u.profile == result);
+ return result;
+ }
+ SpeculatedType valueProfilePredictionForBytecodeOffset(int bytecodeOffset)
+ {
+ return valueProfileForBytecodeOffset(bytecodeOffset)->computeUpdatedPrediction();
+ }
- RareCaseProfile* addRareCaseProfile(int bytecodeOffset)
- {
- m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset));
- return &m_rareCaseProfiles.last();
- }
- unsigned numberOfRareCaseProfiles() { return m_rareCaseProfiles.size(); }
- RareCaseProfile* rareCaseProfile(int index) { return &m_rareCaseProfiles[index]; }
- RareCaseProfile* rareCaseProfileForBytecodeOffset(int bytecodeOffset)
- {
- return WTF::genericBinarySearch<RareCaseProfile, int, getRareCaseProfileBytecodeOffset>(m_rareCaseProfiles, m_rareCaseProfiles.size(), bytecodeOffset);
- }
+ unsigned totalNumberOfValueProfiles()
+ {
+ return numberOfArgumentValueProfiles() + numberOfValueProfiles();
+ }
+ ValueProfile* getFromAllValueProfiles(unsigned index)
+ {
+ if (index < numberOfArgumentValueProfiles())
+ return valueProfileForArgument(index);
+ return valueProfile(index - numberOfArgumentValueProfiles());
+ }
- bool likelyToTakeSlowCase(int bytecodeOffset)
- {
- if (!numberOfRareCaseProfiles())
- return false;
- unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
- return value >= Options::likelyToTakeSlowCaseMinimumCount() && static_cast<double>(value) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold();
- }
+ RareCaseProfile* addRareCaseProfile(int bytecodeOffset)
+ {
+ m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset));
+ return &m_rareCaseProfiles.last();
+ }
+ unsigned numberOfRareCaseProfiles() { return m_rareCaseProfiles.size(); }
+ RareCaseProfile* rareCaseProfile(int index) { return &m_rareCaseProfiles[index]; }
+ RareCaseProfile* rareCaseProfileForBytecodeOffset(int bytecodeOffset)
+ {
+ return tryBinarySearch<RareCaseProfile, int>(
+ m_rareCaseProfiles, m_rareCaseProfiles.size(), bytecodeOffset,
+ getRareCaseProfileBytecodeOffset);
+ }
- bool couldTakeSlowCase(int bytecodeOffset)
- {
- if (!numberOfRareCaseProfiles())
- return false;
- unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
- return value >= Options::couldTakeSlowCaseMinimumCount() && static_cast<double>(value) / m_executionEntryCount >= Options::couldTakeSlowCaseThreshold();
- }
+ bool likelyToTakeSlowCase(int bytecodeOffset)
+ {
+ if (!numberOfRareCaseProfiles())
+ return false;
+ unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
+ return value >= Options::likelyToTakeSlowCaseMinimumCount();
+ }
- RareCaseProfile* addSpecialFastCaseProfile(int bytecodeOffset)
- {
- m_specialFastCaseProfiles.append(RareCaseProfile(bytecodeOffset));
- return &m_specialFastCaseProfiles.last();
- }
- unsigned numberOfSpecialFastCaseProfiles() { return m_specialFastCaseProfiles.size(); }
- RareCaseProfile* specialFastCaseProfile(int index) { return &m_specialFastCaseProfiles[index]; }
- RareCaseProfile* specialFastCaseProfileForBytecodeOffset(int bytecodeOffset)
- {
- return WTF::genericBinarySearch<RareCaseProfile, int, getRareCaseProfileBytecodeOffset>(m_specialFastCaseProfiles, m_specialFastCaseProfiles.size(), bytecodeOffset);
- }
+ bool couldTakeSlowCase(int bytecodeOffset)
+ {
+ if (!numberOfRareCaseProfiles())
+ return false;
+ unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
+ return value >= Options::couldTakeSlowCaseMinimumCount();
+ }
- bool likelyToTakeSpecialFastCase(int bytecodeOffset)
- {
- if (!numberOfRareCaseProfiles())
- return false;
- unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
- return specialFastCaseCount >= Options::likelyToTakeSlowCaseMinimumCount() && static_cast<double>(specialFastCaseCount) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold();
- }
+ RareCaseProfile* addSpecialFastCaseProfile(int bytecodeOffset)
+ {
+ m_specialFastCaseProfiles.append(RareCaseProfile(bytecodeOffset));
+ return &m_specialFastCaseProfiles.last();
+ }
+ unsigned numberOfSpecialFastCaseProfiles() { return m_specialFastCaseProfiles.size(); }
+ RareCaseProfile* specialFastCaseProfile(int index) { return &m_specialFastCaseProfiles[index]; }
+ RareCaseProfile* specialFastCaseProfileForBytecodeOffset(int bytecodeOffset)
+ {
+ return tryBinarySearch<RareCaseProfile, int>(
+ m_specialFastCaseProfiles, m_specialFastCaseProfiles.size(), bytecodeOffset,
+ getRareCaseProfileBytecodeOffset);
+ }
- bool couldTakeSpecialFastCase(int bytecodeOffset)
- {
- if (!numberOfRareCaseProfiles())
- return false;
- unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
- return specialFastCaseCount >= Options::couldTakeSlowCaseMinimumCount() && static_cast<double>(specialFastCaseCount) / m_executionEntryCount >= Options::couldTakeSlowCaseThreshold();
- }
+ bool likelyToTakeSpecialFastCase(int bytecodeOffset)
+ {
+ if (!numberOfRareCaseProfiles())
+ return false;
+ unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
+ return specialFastCaseCount >= Options::likelyToTakeSlowCaseMinimumCount();
+ }
- bool likelyToTakeDeepestSlowCase(int bytecodeOffset)
- {
- if (!numberOfRareCaseProfiles())
- return false;
- unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
- unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
- unsigned value = slowCaseCount - specialFastCaseCount;
- return value >= Options::likelyToTakeSlowCaseMinimumCount() && static_cast<double>(value) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold();
- }
+ bool couldTakeSpecialFastCase(int bytecodeOffset)
+ {
+ if (!numberOfRareCaseProfiles())
+ return false;
+ unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
+ return specialFastCaseCount >= Options::couldTakeSlowCaseMinimumCount();
+ }
- bool likelyToTakeAnySlowCase(int bytecodeOffset)
- {
- if (!numberOfRareCaseProfiles())
- return false;
- unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
- unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
- unsigned value = slowCaseCount + specialFastCaseCount;
- return value >= Options::likelyToTakeSlowCaseMinimumCount() && static_cast<double>(value) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold();
- }
+ bool likelyToTakeDeepestSlowCase(int bytecodeOffset)
+ {
+ if (!numberOfRareCaseProfiles())
+ return false;
+ unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
+ unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
+ unsigned value = slowCaseCount - specialFastCaseCount;
+ return value >= Options::likelyToTakeSlowCaseMinimumCount();
+ }
- unsigned executionEntryCount() const { return m_executionEntryCount; }
-
- unsigned numberOfArrayProfiles() const { return m_arrayProfiles.size(); }
- const ArrayProfileVector& arrayProfiles() { return m_arrayProfiles; }
- ArrayProfile* addArrayProfile(unsigned bytecodeOffset)
- {
- m_arrayProfiles.append(ArrayProfile(bytecodeOffset));
- return &m_arrayProfiles.last();
- }
- ArrayProfile* getArrayProfile(unsigned bytecodeOffset);
- ArrayProfile* getOrAddArrayProfile(unsigned bytecodeOffset);
+ bool likelyToTakeAnySlowCase(int bytecodeOffset)
+ {
+ if (!numberOfRareCaseProfiles())
+ return false;
+ unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
+ unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
+ unsigned value = slowCaseCount + specialFastCaseCount;
+ return value >= Options::likelyToTakeSlowCaseMinimumCount();
+ }
- unsigned numberOfArrayAllocationProfiles() const { return m_arrayAllocationProfiles.size(); }
- ArrayAllocationProfile* addArrayAllocationProfile()
- {
- m_arrayAllocationProfiles.append(ArrayAllocationProfile());
- return &m_arrayAllocationProfiles.last();
- }
+ unsigned numberOfArrayProfiles() const { return m_arrayProfiles.size(); }
+ const ArrayProfileVector& arrayProfiles() { return m_arrayProfiles; }
+ ArrayProfile* addArrayProfile(unsigned bytecodeOffset)
+ {
+ m_arrayProfiles.append(ArrayProfile(bytecodeOffset));
+ return &m_arrayProfiles.last();
+ }
+ ArrayProfile* getArrayProfile(unsigned bytecodeOffset);
+ ArrayProfile* getOrAddArrayProfile(unsigned bytecodeOffset);
#endif
- // Exception handling support
-
- size_t numberOfExceptionHandlers() const { return m_rareData ? m_rareData->m_exceptionHandlers.size() : 0; }
- void allocateHandlers(const Vector<UnlinkedHandlerInfo>& unlinkedHandlers)
- {
- size_t count = unlinkedHandlers.size();
- if (!count)
- return;
- createRareDataIfNecessary();
- m_rareData->m_exceptionHandlers.resize(count);
- for (size_t i = 0; i < count; ++i) {
- m_rareData->m_exceptionHandlers[i].start = unlinkedHandlers[i].start;
- m_rareData->m_exceptionHandlers[i].end = unlinkedHandlers[i].end;
- m_rareData->m_exceptionHandlers[i].target = unlinkedHandlers[i].target;
- m_rareData->m_exceptionHandlers[i].scopeDepth = unlinkedHandlers[i].scopeDepth;
- }
+ // Exception handling support
+ size_t numberOfExceptionHandlers() const { return m_rareData ? m_rareData->m_exceptionHandlers.size() : 0; }
+ void allocateHandlers(const Vector<UnlinkedHandlerInfo>& unlinkedHandlers)
+ {
+ size_t count = unlinkedHandlers.size();
+ if (!count)
+ return;
+ createRareDataIfNecessary();
+ m_rareData->m_exceptionHandlers.resize(count);
+ for (size_t i = 0; i < count; ++i) {
+ m_rareData->m_exceptionHandlers[i].start = unlinkedHandlers[i].start;
+ m_rareData->m_exceptionHandlers[i].end = unlinkedHandlers[i].end;
+ m_rareData->m_exceptionHandlers[i].target = unlinkedHandlers[i].target;
+ m_rareData->m_exceptionHandlers[i].scopeDepth = unlinkedHandlers[i].scopeDepth;
}
- HandlerInfo& exceptionHandler(int index) { ASSERT(m_rareData); return m_rareData->m_exceptionHandlers[index]; }
- bool hasExpressionInfo() { return m_unlinkedCode->hasExpressionInfo(); }
+ }
+ HandlerInfo& exceptionHandler(int index) { RELEASE_ASSERT(m_rareData); return m_rareData->m_exceptionHandlers[index]; }
+
+ bool hasExpressionInfo() { return m_unlinkedCode->hasExpressionInfo(); }
#if ENABLE(JIT)
- Vector<CallReturnOffsetToBytecodeOffset>& callReturnIndexVector()
- {
- createRareDataIfNecessary();
- return m_rareData->m_callReturnIndexVector;
- }
+ Vector<CallReturnOffsetToBytecodeOffset, 0, UnsafeVectorOverflow>& callReturnIndexVector()
+ {
+ createRareDataIfNecessary();
+ return m_rareData->m_callReturnIndexVector;
+ }
#endif
#if ENABLE(DFG_JIT)
- SegmentedVector<InlineCallFrame, 4>& inlineCallFrames()
- {
- createRareDataIfNecessary();
- return m_rareData->m_inlineCallFrames;
- }
+ SegmentedVector<InlineCallFrame, 4>& inlineCallFrames()
+ {
+ createRareDataIfNecessary();
+ return m_rareData->m_inlineCallFrames;
+ }
- Vector<CodeOriginAtCallReturnOffset>& codeOrigins()
- {
- createRareDataIfNecessary();
- return m_rareData->m_codeOrigins;
- }
+ Vector<CodeOriginAtCallReturnOffset, 0, UnsafeVectorOverflow>& codeOrigins()
+ {
+ createRareDataIfNecessary();
+ return m_rareData->m_codeOrigins;
+ }
- // Having code origins implies that there has been some inlining.
- bool hasCodeOrigins()
- {
- return m_rareData && !!m_rareData->m_codeOrigins.size();
- }
+ // Having code origins implies that there has been some inlining.
+ bool hasCodeOrigins()
+ {
+ return m_rareData && !!m_rareData->m_codeOrigins.size();
+ }
- bool codeOriginForReturn(ReturnAddressPtr, CodeOrigin&);
+ bool codeOriginForReturn(ReturnAddressPtr, CodeOrigin&);
- CodeOrigin codeOrigin(unsigned index)
- {
- ASSERT(m_rareData);
- return m_rareData->m_codeOrigins[index].codeOrigin;
- }
+ bool canGetCodeOrigin(unsigned index)
+ {
+ if (!m_rareData)
+ return false;
+ return m_rareData->m_codeOrigins.size() > index;
+ }
- bool addFrequentExitSite(const DFG::FrequentExitSite& site)
- {
- ASSERT(JITCode::isBaselineCode(getJITType()));
- return m_exitProfile.add(site);
- }
+ CodeOrigin codeOrigin(unsigned index)
+ {
+ RELEASE_ASSERT(m_rareData);
+ return m_rareData->m_codeOrigins[index].codeOrigin;
+ }
+
+ bool addFrequentExitSite(const DFG::FrequentExitSite& site)
+ {
+ ASSERT(JITCode::isBaselineCode(getJITType()));
+ return m_exitProfile.add(site);
+ }
+
+ bool hasExitSite(const DFG::FrequentExitSite& site) const { return m_exitProfile.hasExitSite(site); }
- DFG::ExitProfile& exitProfile() { return m_exitProfile; }
+ DFG::ExitProfile& exitProfile() { return m_exitProfile; }
- CompressedLazyOperandValueProfileHolder& lazyOperandValueProfiles()
- {
- return m_lazyOperandValueProfiles;
- }
+ CompressedLazyOperandValueProfileHolder& lazyOperandValueProfiles()
+ {
+ return m_lazyOperandValueProfiles;
+ }
#endif
- // Constant Pool
+ // Constant Pool
- size_t numberOfIdentifiers() const { return m_identifiers.size(); }
- void addIdentifier(const Identifier& i) { return m_identifiers.append(i); }
- Identifier& identifier(int index) { return m_identifiers[index]; }
+ size_t numberOfIdentifiers() const { return m_identifiers.size(); }
+ void addIdentifier(const Identifier& i) { return m_identifiers.append(i); }
+ Identifier& identifier(int index) { return m_identifiers[index]; }
- size_t numberOfConstantRegisters() const { return m_constantRegisters.size(); }
- unsigned addConstant(JSValue v)
- {
- unsigned result = m_constantRegisters.size();
- m_constantRegisters.append(WriteBarrier<Unknown>());
- m_constantRegisters.last().set(m_globalObject->globalData(), m_ownerExecutable.get(), v);
- return result;
- }
+ size_t numberOfConstantRegisters() const { return m_constantRegisters.size(); }
+ unsigned addConstant(JSValue v)
+ {
+ unsigned result = m_constantRegisters.size();
+ m_constantRegisters.append(WriteBarrier<Unknown>());
+ m_constantRegisters.last().set(m_globalObject->vm(), m_ownerExecutable.get(), v);
+ return result;
+ }
- unsigned addOrFindConstant(JSValue);
- WriteBarrier<Unknown>& constantRegister(int index) { return m_constantRegisters[index - FirstConstantRegisterIndex]; }
- ALWAYS_INLINE bool isConstantRegisterIndex(int index) const { return index >= FirstConstantRegisterIndex; }
- ALWAYS_INLINE JSValue getConstant(int index) const { return m_constantRegisters[index - FirstConstantRegisterIndex].get(); }
+ unsigned addOrFindConstant(JSValue);
+ WriteBarrier<Unknown>& constantRegister(int index) { return m_constantRegisters[index - FirstConstantRegisterIndex]; }
+ ALWAYS_INLINE bool isConstantRegisterIndex(int index) const { return index >= FirstConstantRegisterIndex; }
+ ALWAYS_INLINE JSValue getConstant(int index) const { return m_constantRegisters[index - FirstConstantRegisterIndex].get(); }
- unsigned addFunctionDecl(FunctionExecutable* n)
- {
- unsigned size = m_functionDecls.size();
- m_functionDecls.append(WriteBarrier<FunctionExecutable>());
- m_functionDecls.last().set(m_globalObject->globalData(), m_ownerExecutable.get(), n);
- return size;
- }
- FunctionExecutable* functionDecl(int index) { return m_functionDecls[index].get(); }
- int numberOfFunctionDecls() { return m_functionDecls.size(); }
- unsigned addFunctionExpr(FunctionExecutable* n)
- {
- unsigned size = m_functionExprs.size();
- m_functionExprs.append(WriteBarrier<FunctionExecutable>());
- m_functionExprs.last().set(m_globalObject->globalData(), m_ownerExecutable.get(), n);
- return size;
- }
- FunctionExecutable* functionExpr(int index) { return m_functionExprs[index].get(); }
+ FunctionExecutable* functionDecl(int index) { return m_functionDecls[index].get(); }
+ int numberOfFunctionDecls() { return m_functionDecls.size(); }
+ FunctionExecutable* functionExpr(int index) { return m_functionExprs[index].get(); }
- RegExp* regexp(int index) const { return m_unlinkedCode->regexp(index); }
+ RegExp* regexp(int index) const { return m_unlinkedCode->regexp(index); }
- unsigned numberOfConstantBuffers() const
- {
- if (!m_rareData)
- return 0;
- return m_rareData->m_constantBuffers.size();
- }
- unsigned addConstantBuffer(const Vector<JSValue>& buffer)
- {
- createRareDataIfNecessary();
- unsigned size = m_rareData->m_constantBuffers.size();
- m_rareData->m_constantBuffers.append(buffer);
- return size;
- }
+ unsigned numberOfConstantBuffers() const
+ {
+ if (!m_rareData)
+ return 0;
+ return m_rareData->m_constantBuffers.size();
+ }
+ unsigned addConstantBuffer(const Vector<JSValue>& buffer)
+ {
+ createRareDataIfNecessary();
+ unsigned size = m_rareData->m_constantBuffers.size();
+ m_rareData->m_constantBuffers.append(buffer);
+ return size;
+ }
- Vector<JSValue>& constantBufferAsVector(unsigned index)
- {
- ASSERT(m_rareData);
- return m_rareData->m_constantBuffers[index];
- }
- JSValue* constantBuffer(unsigned index)
- {
- return constantBufferAsVector(index).data();
- }
+ Vector<JSValue>& constantBufferAsVector(unsigned index)
+ {
+ ASSERT(m_rareData);
+ return m_rareData->m_constantBuffers[index];
+ }
+ JSValue* constantBuffer(unsigned index)
+ {
+ return constantBufferAsVector(index).data();
+ }
- JSGlobalObject* globalObject() { return m_globalObject.get(); }
+ JSGlobalObject* globalObject() { return m_globalObject.get(); }
- JSGlobalObject* globalObjectFor(CodeOrigin codeOrigin)
- {
- if (!codeOrigin.inlineCallFrame)
- return globalObject();
- // FIXME: if we ever inline based on executable not function, this code will need to change.
- return codeOrigin.inlineCallFrame->callee->scope()->globalObject();
- }
+ JSGlobalObject* globalObjectFor(CodeOrigin);
- // Jump Tables
+ // Jump Tables
- size_t numberOfImmediateSwitchJumpTables() const { return m_rareData ? m_rareData->m_immediateSwitchJumpTables.size() : 0; }
- SimpleJumpTable& addImmediateSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_immediateSwitchJumpTables.append(SimpleJumpTable()); return m_rareData->m_immediateSwitchJumpTables.last(); }
- SimpleJumpTable& immediateSwitchJumpTable(int tableIndex) { ASSERT(m_rareData); return m_rareData->m_immediateSwitchJumpTables[tableIndex]; }
+ size_t numberOfImmediateSwitchJumpTables() const { return m_rareData ? m_rareData->m_immediateSwitchJumpTables.size() : 0; }
+ SimpleJumpTable& addImmediateSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_immediateSwitchJumpTables.append(SimpleJumpTable()); return m_rareData->m_immediateSwitchJumpTables.last(); }
+ SimpleJumpTable& immediateSwitchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_immediateSwitchJumpTables[tableIndex]; }
- size_t numberOfCharacterSwitchJumpTables() const { return m_rareData ? m_rareData->m_characterSwitchJumpTables.size() : 0; }
- SimpleJumpTable& addCharacterSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_characterSwitchJumpTables.append(SimpleJumpTable()); return m_rareData->m_characterSwitchJumpTables.last(); }
- SimpleJumpTable& characterSwitchJumpTable(int tableIndex) { ASSERT(m_rareData); return m_rareData->m_characterSwitchJumpTables[tableIndex]; }
+ size_t numberOfCharacterSwitchJumpTables() const { return m_rareData ? m_rareData->m_characterSwitchJumpTables.size() : 0; }
+ SimpleJumpTable& addCharacterSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_characterSwitchJumpTables.append(SimpleJumpTable()); return m_rareData->m_characterSwitchJumpTables.last(); }
+ SimpleJumpTable& characterSwitchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_characterSwitchJumpTables[tableIndex]; }
- size_t numberOfStringSwitchJumpTables() const { return m_rareData ? m_rareData->m_stringSwitchJumpTables.size() : 0; }
- StringJumpTable& addStringSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_stringSwitchJumpTables.append(StringJumpTable()); return m_rareData->m_stringSwitchJumpTables.last(); }
- StringJumpTable& stringSwitchJumpTable(int tableIndex) { ASSERT(m_rareData); return m_rareData->m_stringSwitchJumpTables[tableIndex]; }
+ size_t numberOfStringSwitchJumpTables() const { return m_rareData ? m_rareData->m_stringSwitchJumpTables.size() : 0; }
+ StringJumpTable& addStringSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_stringSwitchJumpTables.append(StringJumpTable()); return m_rareData->m_stringSwitchJumpTables.last(); }
+ StringJumpTable& stringSwitchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_stringSwitchJumpTables[tableIndex]; }
- SharedSymbolTable* symbolTable() const { return m_unlinkedCode->symbolTable(); }
+ SharedSymbolTable* symbolTable() const { return m_unlinkedCode->symbolTable(); }
- EvalCodeCache& evalCodeCache() { createRareDataIfNecessary(); return m_rareData->m_evalCodeCache; }
+ EvalCodeCache& evalCodeCache() { createRareDataIfNecessary(); return m_rareData->m_evalCodeCache; }
- enum ShrinkMode {
- // Shrink prior to generating machine code that may point directly into vectors.
- EarlyShrink,
+ enum ShrinkMode {
+ // Shrink prior to generating machine code that may point directly into vectors.
+ EarlyShrink,
- // Shrink after generating machine code, and after possibly creating new vectors
- // and appending to others. At this time it is not safe to shrink certain vectors
- // because we would have generated machine code that references them directly.
- LateShrink
- };
- void shrinkToFit(ShrinkMode);
-
- void copyPostParseDataFrom(CodeBlock* alternative);
- void copyPostParseDataFromAlternative();
-
- // Functions for controlling when JITting kicks in, in a mixed mode
- // execution world.
-
- bool checkIfJITThresholdReached()
- {
- return m_llintExecuteCounter.checkIfThresholdCrossedAndSet(this);
- }
-
- void dontJITAnytimeSoon()
- {
- m_llintExecuteCounter.deferIndefinitely();
- }
-
- void jitAfterWarmUp()
- {
- m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITAfterWarmUp(), this);
- }
+ // Shrink after generating machine code, and after possibly creating new vectors
+ // and appending to others. At this time it is not safe to shrink certain vectors
+ // because we would have generated machine code that references them directly.
+ LateShrink
+ };
+ void shrinkToFit(ShrinkMode);
- void jitSoon()
- {
- m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITSoon(), this);
- }
+ void copyPostParseDataFrom(CodeBlock* alternative);
+ void copyPostParseDataFromAlternative();
- const ExecutionCounter& llintExecuteCounter() const
- {
- return m_llintExecuteCounter;
- }
+ // Functions for controlling when JITting kicks in, in a mixed mode
+ // execution world.
- // Functions for controlling when tiered compilation kicks in. This
- // controls both when the optimizing compiler is invoked and when OSR
- // entry happens. Two triggers exist: the loop trigger and the return
- // trigger. In either case, when an addition to m_jitExecuteCounter
- // causes it to become non-negative, the optimizing compiler is
- // invoked. This includes a fast check to see if this CodeBlock has
- // already been optimized (i.e. replacement() returns a CodeBlock
- // that was optimized with a higher tier JIT than this one). In the
- // case of the loop trigger, if the optimized compilation succeeds
- // (or has already succeeded in the past) then OSR is attempted to
- // redirect program flow into the optimized code.
-
- // These functions are called from within the optimization triggers,
- // and are used as a single point at which we define the heuristics
- // for how much warm-up is mandated before the next optimization
- // trigger files. All CodeBlocks start out with optimizeAfterWarmUp(),
- // as this is called from the CodeBlock constructor.
-
- // When we observe a lot of speculation failures, we trigger a
- // reoptimization. But each time, we increase the optimization trigger
- // to avoid thrashing.
- unsigned reoptimizationRetryCounter() const
- {
- ASSERT(m_reoptimizationRetryCounter <= Options::reoptimizationRetryCounterMax());
- return m_reoptimizationRetryCounter;
- }
+ bool checkIfJITThresholdReached()
+ {
+ return m_llintExecuteCounter.checkIfThresholdCrossedAndSet(this);
+ }
- void countReoptimization()
- {
- m_reoptimizationRetryCounter++;
- if (m_reoptimizationRetryCounter > Options::reoptimizationRetryCounterMax())
- m_reoptimizationRetryCounter = Options::reoptimizationRetryCounterMax();
- }
+ void dontJITAnytimeSoon()
+ {
+ m_llintExecuteCounter.deferIndefinitely();
+ }
- int32_t counterValueForOptimizeAfterWarmUp()
- {
- return Options::thresholdForOptimizeAfterWarmUp() << reoptimizationRetryCounter();
- }
+ void jitAfterWarmUp()
+ {
+ m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITAfterWarmUp(), this);
+ }
- int32_t counterValueForOptimizeAfterLongWarmUp()
- {
- return Options::thresholdForOptimizeAfterLongWarmUp() << reoptimizationRetryCounter();
- }
+ void jitSoon()
+ {
+ m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITSoon(), this);
+ }
- int32_t* addressOfJITExecuteCounter()
- {
- return &m_jitExecuteCounter.m_counter;
- }
+ const ExecutionCounter& llintExecuteCounter() const
+ {
+ return m_llintExecuteCounter;
+ }
- static ptrdiff_t offsetOfJITExecuteCounter() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_counter); }
- static ptrdiff_t offsetOfJITExecutionActiveThreshold() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_activeThreshold); }
- static ptrdiff_t offsetOfJITExecutionTotalCount() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_totalCount); }
+ // Functions for controlling when tiered compilation kicks in. This
+ // controls both when the optimizing compiler is invoked and when OSR
+ // entry happens. Two triggers exist: the loop trigger and the return
+ // trigger. In either case, when an addition to m_jitExecuteCounter
+ // causes it to become non-negative, the optimizing compiler is
+ // invoked. This includes a fast check to see if this CodeBlock has
+ // already been optimized (i.e. replacement() returns a CodeBlock
+ // that was optimized with a higher tier JIT than this one). In the
+ // case of the loop trigger, if the optimized compilation succeeds
+ // (or has already succeeded in the past) then OSR is attempted to
+ // redirect program flow into the optimized code.
+
+ // These functions are called from within the optimization triggers,
+ // and are used as a single point at which we define the heuristics
+ // for how much warm-up is mandated before the next optimization
+ // trigger files. All CodeBlocks start out with optimizeAfterWarmUp(),
+ // as this is called from the CodeBlock constructor.
+
+ // When we observe a lot of speculation failures, we trigger a
+ // reoptimization. But each time, we increase the optimization trigger
+ // to avoid thrashing.
+ unsigned reoptimizationRetryCounter() const;
+ void countReoptimization();
+
+ unsigned numberOfDFGCompiles();
- const ExecutionCounter& jitExecuteCounter() const { return m_jitExecuteCounter; }
+ int32_t codeTypeThresholdMultiplier() const;
- unsigned optimizationDelayCounter() const { return m_optimizationDelayCounter; }
+ int32_t counterValueForOptimizeAfterWarmUp();
+ int32_t counterValueForOptimizeAfterLongWarmUp();
+ int32_t counterValueForOptimizeSoon();
- // Check if the optimization threshold has been reached, and if not,
- // adjust the heuristics accordingly. Returns true if the threshold has
- // been reached.
- bool checkIfOptimizationThresholdReached()
- {
- return m_jitExecuteCounter.checkIfThresholdCrossedAndSet(this);
- }
-
- // Call this to force the next optimization trigger to fire. This is
- // rarely wise, since optimization triggers are typically more
- // expensive than executing baseline code.
- void optimizeNextInvocation()
- {
- m_jitExecuteCounter.setNewThreshold(0, this);
- }
-
- // Call this to prevent optimization from happening again. Note that
- // optimization will still happen after roughly 2^29 invocations,
- // so this is really meant to delay that as much as possible. This
- // is called if optimization failed, and we expect it to fail in
- // the future as well.
- void dontOptimizeAnytimeSoon()
- {
- m_jitExecuteCounter.deferIndefinitely();
- }
-
- // Call this to reinitialize the counter to its starting state,
- // forcing a warm-up to happen before the next optimization trigger
- // fires. This is called in the CodeBlock constructor. It also
- // makes sense to call this if an OSR exit occurred. Note that
- // OSR exit code is code generated, so the value of the execute
- // counter that this corresponds to is also available directly.
- void optimizeAfterWarmUp()
- {
- m_jitExecuteCounter.setNewThreshold(counterValueForOptimizeAfterWarmUp(), this);
- }
-
- // Call this to force an optimization trigger to fire only after
- // a lot of warm-up.
- void optimizeAfterLongWarmUp()
- {
- m_jitExecuteCounter.setNewThreshold(counterValueForOptimizeAfterLongWarmUp(), this);
- }
-
- // Call this to cause an optimization trigger to fire soon, but
- // not necessarily the next one. This makes sense if optimization
- // succeeds. Successfuly optimization means that all calls are
- // relinked to the optimized code, so this only affects call
- // frames that are still executing this CodeBlock. The value here
- // is tuned to strike a balance between the cost of OSR entry
- // (which is too high to warrant making every loop back edge to
- // trigger OSR immediately) and the cost of executing baseline
- // code (which is high enough that we don't necessarily want to
- // have a full warm-up). The intuition for calling this instead of
- // optimizeNextInvocation() is for the case of recursive functions
- // with loops. Consider that there may be N call frames of some
- // recursive function, for a reasonably large value of N. The top
- // one triggers optimization, and then returns, and then all of
- // the others return. We don't want optimization to be triggered on
- // each return, as that would be superfluous. It only makes sense
- // to trigger optimization if one of those functions becomes hot
- // in the baseline code.
- void optimizeSoon()
- {
- m_jitExecuteCounter.setNewThreshold(Options::thresholdForOptimizeSoon() << reoptimizationRetryCounter(), this);
- }
-
- uint32_t osrExitCounter() const { return m_osrExitCounter; }
-
- void countOSRExit() { m_osrExitCounter++; }
-
- uint32_t* addressOfOSRExitCounter() { return &m_osrExitCounter; }
+ int32_t* addressOfJITExecuteCounter()
+ {
+ return &m_jitExecuteCounter.m_counter;
+ }
- static ptrdiff_t offsetOfOSRExitCounter() { return OBJECT_OFFSETOF(CodeBlock, m_osrExitCounter); }
+ static ptrdiff_t offsetOfJITExecuteCounter() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_counter); }
+ static ptrdiff_t offsetOfJITExecutionActiveThreshold() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_activeThreshold); }
+ static ptrdiff_t offsetOfJITExecutionTotalCount() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_totalCount); }
+
+ const ExecutionCounter& jitExecuteCounter() const { return m_jitExecuteCounter; }
+
+ unsigned optimizationDelayCounter() const { return m_optimizationDelayCounter; }
+
+ // Check if the optimization threshold has been reached, and if not,
+ // adjust the heuristics accordingly. Returns true if the threshold has
+ // been reached.
+ bool checkIfOptimizationThresholdReached();
+
+ // Call this to force the next optimization trigger to fire. This is
+ // rarely wise, since optimization triggers are typically more
+ // expensive than executing baseline code.
+ void optimizeNextInvocation();
+
+ // Call this to prevent optimization from happening again. Note that
+ // optimization will still happen after roughly 2^29 invocations,
+ // so this is really meant to delay that as much as possible. This
+ // is called if optimization failed, and we expect it to fail in
+ // the future as well.
+ void dontOptimizeAnytimeSoon();
+
+ // Call this to reinitialize the counter to its starting state,
+ // forcing a warm-up to happen before the next optimization trigger
+ // fires. This is called in the CodeBlock constructor. It also
+ // makes sense to call this if an OSR exit occurred. Note that
+ // OSR exit code is code generated, so the value of the execute
+ // counter that this corresponds to is also available directly.
+ void optimizeAfterWarmUp();
+
+ // Call this to force an optimization trigger to fire only after
+ // a lot of warm-up.
+ void optimizeAfterLongWarmUp();
+
+ // Call this to cause an optimization trigger to fire soon, but
+ // not necessarily the next one. This makes sense if optimization
+ // succeeds. Successfuly optimization means that all calls are
+ // relinked to the optimized code, so this only affects call
+ // frames that are still executing this CodeBlock. The value here
+ // is tuned to strike a balance between the cost of OSR entry
+ // (which is too high to warrant making every loop back edge to
+ // trigger OSR immediately) and the cost of executing baseline
+ // code (which is high enough that we don't necessarily want to
+ // have a full warm-up). The intuition for calling this instead of
+ // optimizeNextInvocation() is for the case of recursive functions
+ // with loops. Consider that there may be N call frames of some
+ // recursive function, for a reasonably large value of N. The top
+ // one triggers optimization, and then returns, and then all of
+ // the others return. We don't want optimization to be triggered on
+ // each return, as that would be superfluous. It only makes sense
+ // to trigger optimization if one of those functions becomes hot
+ // in the baseline code.
+ void optimizeSoon();
+
+ uint32_t osrExitCounter() const { return m_osrExitCounter; }
+
+ void countOSRExit() { m_osrExitCounter++; }
+
+ uint32_t* addressOfOSRExitCounter() { return &m_osrExitCounter; }
+
+ static ptrdiff_t offsetOfOSRExitCounter() { return OBJECT_OFFSETOF(CodeBlock, m_osrExitCounter); }
#if ENABLE(JIT)
- uint32_t adjustedExitCountThreshold(uint32_t desiredThreshold)
- {
- ASSERT(getJITType() == JITCode::DFGJIT);
- // Compute this the lame way so we don't saturate. This is called infrequently
- // enough that this loop won't hurt us.
- unsigned result = desiredThreshold;
- for (unsigned n = baselineVersion()->reoptimizationRetryCounter(); n--;) {
- unsigned newResult = result << 1;
- if (newResult < result)
- return std::numeric_limits<uint32_t>::max();
- result = newResult;
- }
- return result;
- }
-
- uint32_t exitCountThresholdForReoptimization()
- {
- return adjustedExitCountThreshold(Options::osrExitCountForReoptimization());
- }
-
- uint32_t exitCountThresholdForReoptimizationFromLoop()
- {
- return adjustedExitCountThreshold(Options::osrExitCountForReoptimizationFromLoop());
- }
-
- bool shouldReoptimizeNow()
- {
- return osrExitCounter() >= exitCountThresholdForReoptimization();
- }
-
- bool shouldReoptimizeFromLoopNow()
- {
- return osrExitCounter() >= exitCountThresholdForReoptimizationFromLoop();
- }
+ uint32_t adjustedExitCountThreshold(uint32_t desiredThreshold);
+ uint32_t exitCountThresholdForReoptimization();
+ uint32_t exitCountThresholdForReoptimizationFromLoop();
+ bool shouldReoptimizeNow();
+ bool shouldReoptimizeFromLoopNow();
#endif
#if ENABLE(VALUE_PROFILER)
- bool shouldOptimizeNow();
- void updateAllValueProfilePredictions(OperationInProgress = NoOperation);
- void updateAllArrayPredictions(OperationInProgress = NoOperation);
- void updateAllPredictions(OperationInProgress = NoOperation);
+ bool shouldOptimizeNow();
+ void updateAllValueProfilePredictions(OperationInProgress = NoOperation);
+ void updateAllArrayPredictions(OperationInProgress = NoOperation);
+ void updateAllPredictions(OperationInProgress = NoOperation);
#else
- bool shouldOptimizeNow() { return false; }
- void updateAllValueProfilePredictions(OperationInProgress = NoOperation) { }
- void updateAllArrayPredictions(OperationInProgress = NoOperation) { }
- void updateAllPredictions(OperationInProgress = NoOperation) { }
+ bool shouldOptimizeNow() { return false; }
+ void updateAllValueProfilePredictions(OperationInProgress = NoOperation) { }
+ void updateAllArrayPredictions(OperationInProgress = NoOperation) { }
+ void updateAllPredictions(OperationInProgress = NoOperation) { }
#endif
#if ENABLE(JIT)
- void reoptimize();
+ void reoptimize();
#endif
#if ENABLE(VERBOSE_VALUE_PROFILE)
- void dumpValueProfiles();
+ void dumpValueProfiles();
#endif
- // FIXME: Make these remaining members private.
+ // FIXME: Make these remaining members private.
- int m_numCalleeRegisters;
- int m_numVars;
- bool m_isConstructor;
+ int m_numCalleeRegisters;
+ int m_numVars;
+ bool m_isConstructor;
- protected:
+protected:
#if ENABLE(JIT)
- virtual bool jitCompileImpl(ExecState*) = 0;
+ virtual bool jitCompileImpl(ExecState*) = 0;
+ virtual void jettisonImpl() = 0;
#endif
- virtual void visitWeakReferences(SlotVisitor&);
- virtual void finalizeUnconditionally();
+ virtual void visitWeakReferences(SlotVisitor&);
+ virtual void finalizeUnconditionally();
- UnlinkedCodeBlock* unlinkedCodeBlock() const { return m_unlinkedCode.get(); }
+#if ENABLE(DFG_JIT)
+ void tallyFrequentExitSites();
+#else
+ void tallyFrequentExitSites() { }
+#endif
- private:
- friend class DFGCodeBlocks;
+private:
+ friend class DFGCodeBlocks;
+
+ double optimizationThresholdScalingFactor();
#if ENABLE(JIT)
- ClosureCallStubRoutine* findClosureCallForReturnPC(ReturnAddressPtr);
+ ClosureCallStubRoutine* findClosureCallForReturnPC(ReturnAddressPtr);
#endif
-#if ENABLE(DFG_JIT)
- void tallyFrequentExitSites();
-#else
- void tallyFrequentExitSites() { }
-#endif
#if ENABLE(VALUE_PROFILER)
- void updateAllPredictionsAndCountLiveness(OperationInProgress, unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles);
+ void updateAllPredictionsAndCountLiveness(OperationInProgress, unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles);
#endif
- void setIdentifiers(const Vector<Identifier>& identifiers)
- {
- ASSERT(m_identifiers.isEmpty());
- m_identifiers.appendVector(identifiers);
- }
+ void setIdentifiers(const Vector<Identifier>& identifiers)
+ {
+ RELEASE_ASSERT(m_identifiers.isEmpty());
+ m_identifiers.appendVector(identifiers);
+ }
- void setConstantRegisters(const Vector<WriteBarrier<Unknown> >& constants)
- {
- size_t count = constants.size();
- m_constantRegisters.resize(count);
- for (size_t i = 0; i < count; i++)
- m_constantRegisters[i].set(*m_globalData, ownerExecutable(), constants[i].get());
- }
+ void setConstantRegisters(const Vector<WriteBarrier<Unknown> >& constants)
+ {
+ size_t count = constants.size();
+ m_constantRegisters.resize(count);
+ for (size_t i = 0; i < count; i++)
+ m_constantRegisters[i].set(*m_vm, ownerExecutable(), constants[i].get());
+ }
- void dumpBytecode(ExecState*, const Instruction* begin, const Instruction*&);
+ void dumpBytecode(PrintStream&, ExecState*, const Instruction* begin, const Instruction*&);
+
+ CString registerName(ExecState*, int r) const;
+ void printUnaryOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op);
+ void printBinaryOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op);
+ void printConditionalJump(PrintStream&, ExecState*, const Instruction*, const Instruction*&, int location, const char* op);
+ void printGetByIdOp(PrintStream&, ExecState*, int location, const Instruction*&);
+ void printGetByIdCacheStatus(PrintStream&, ExecState*, int location);
+ enum CacheDumpMode { DumpCaches, DontDumpCaches };
+ void printCallOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op, CacheDumpMode);
+ void printPutByIdOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op);
+ void beginDumpProfiling(PrintStream&, bool& hasPrintedProfiling);
+ void dumpValueProfiling(PrintStream&, const Instruction*&, bool& hasPrintedProfiling);
+ void dumpArrayProfiling(PrintStream&, const Instruction*&, bool& hasPrintedProfiling);
+#if ENABLE(VALUE_PROFILER)
+ void dumpRareCaseProfile(PrintStream&, const char* name, RareCaseProfile*, bool& hasPrintedProfiling);
+#endif
- CString registerName(ExecState*, int r) const;
- void printUnaryOp(ExecState*, int location, const Instruction*&, const char* op);
- void printBinaryOp(ExecState*, int location, const Instruction*&, const char* op);
- void printConditionalJump(ExecState*, const Instruction*, const Instruction*&, int location, const char* op);
- void printGetByIdOp(ExecState*, int location, const Instruction*&);
- void printGetByIdCacheStatus(ExecState*, int location);
- enum CacheDumpMode { DumpCaches, DontDumpCaches };
- void printCallOp(ExecState*, int location, const Instruction*&, const char* op, CacheDumpMode);
- void printPutByIdOp(ExecState*, int location, const Instruction*&, const char* op);
- void visitStructures(SlotVisitor&, Instruction* vPC);
+ void visitStructures(SlotVisitor&, Instruction* vPC);
#if ENABLE(DFG_JIT)
- bool shouldImmediatelyAssumeLivenessDuringScan()
- {
- // Null m_dfgData means that this is a baseline JIT CodeBlock. Baseline JIT
- // CodeBlocks don't need to be jettisoned when their weak references go
- // stale. So if a basline JIT CodeBlock gets scanned, we can assume that
- // this means that it's live.
- if (!m_dfgData)
- return true;
+ bool shouldImmediatelyAssumeLivenessDuringScan()
+ {
+ // Null m_dfgData means that this is a baseline JIT CodeBlock. Baseline JIT
+ // CodeBlocks don't need to be jettisoned when their weak references go
+ // stale. So if a basline JIT CodeBlock gets scanned, we can assume that
+ // this means that it's live.
+ if (!m_dfgData)
+ return true;
- // For simplicity, we don't attempt to jettison code blocks during GC if
- // they are executing. Instead we strongly mark their weak references to
- // allow them to continue to execute soundly.
- if (m_dfgData->mayBeExecuting)
- return true;
+ // For simplicity, we don't attempt to jettison code blocks during GC if
+ // they are executing. Instead we strongly mark their weak references to
+ // allow them to continue to execute soundly.
+ if (m_dfgData->mayBeExecuting)
+ return true;
- if (Options::forceDFGCodeBlockLiveness())
- return true;
+ if (Options::forceDFGCodeBlockLiveness())
+ return true;
- return false;
- }
+ return false;
+ }
#else
- bool shouldImmediatelyAssumeLivenessDuringScan() { return true; }
+ bool shouldImmediatelyAssumeLivenessDuringScan() { return true; }
#endif
- void performTracingFixpointIteration(SlotVisitor&);
+ void performTracingFixpointIteration(SlotVisitor&);
- void stronglyVisitStrongReferences(SlotVisitor&);
- void stronglyVisitWeakReferences(SlotVisitor&);
+ void stronglyVisitStrongReferences(SlotVisitor&);
+ void stronglyVisitWeakReferences(SlotVisitor&);
- void createRareDataIfNecessary()
- {
- if (!m_rareData)
- m_rareData = adoptPtr(new RareData);
- }
+ void createRareDataIfNecessary()
+ {
+ if (!m_rareData)
+ m_rareData = adoptPtr(new RareData);
+ }
#if ENABLE(JIT)
- void resetStubInternal(RepatchBuffer&, StructureStubInfo&);
+ void resetStubInternal(RepatchBuffer&, StructureStubInfo&);
+ void resetStubDuringGCInternal(RepatchBuffer&, StructureStubInfo&);
#endif
- WriteBarrier<UnlinkedCodeBlock> m_unlinkedCode;
- int m_numParameters;
- WriteBarrier<ScriptExecutable> m_ownerExecutable;
- JSGlobalData* m_globalData;
+ WriteBarrier<UnlinkedCodeBlock> m_unlinkedCode;
+ int m_numParameters;
+ WriteBarrier<ScriptExecutable> m_ownerExecutable;
+ VM* m_vm;
- RefCountedArray<Instruction> m_instructions;
- int m_thisRegister;
- int m_argumentsRegister;
- int m_activationRegister;
+ RefCountedArray<Instruction> m_instructions;
+ int m_thisRegister;
+ int m_argumentsRegister;
+ int m_activationRegister;
- bool m_isStrictMode;
+ bool m_isStrictMode;
+ bool m_needsActivation;
- RefPtr<SourceProvider> m_source;
- unsigned m_sourceOffset;
+ RefPtr<SourceProvider> m_source;
+ unsigned m_sourceOffset;
+ unsigned m_firstLineColumnOffset;
+ unsigned m_codeType;
#if ENABLE(LLINT)
- SegmentedVector<LLIntCallLinkInfo, 8> m_llintCallLinkInfos;
- SentinelLinkedList<LLIntCallLinkInfo, BasicRawSentinelNode<LLIntCallLinkInfo> > m_incomingLLIntCalls;
+ SegmentedVector<LLIntCallLinkInfo, 8> m_llintCallLinkInfos;
+ SentinelLinkedList<LLIntCallLinkInfo, BasicRawSentinelNode<LLIntCallLinkInfo> > m_incomingLLIntCalls;
#endif
#if ENABLE(JIT)
- Vector<StructureStubInfo> m_structureStubInfos;
- Vector<ByValInfo> m_byValInfos;
- Vector<CallLinkInfo> m_callLinkInfos;
- JITCode m_jitCode;
- MacroAssemblerCodePtr m_jitCodeWithArityCheck;
- SentinelLinkedList<CallLinkInfo, BasicRawSentinelNode<CallLinkInfo> > m_incomingCalls;
+ Vector<StructureStubInfo> m_structureStubInfos;
+ Vector<ByValInfo> m_byValInfos;
+ Vector<CallLinkInfo> m_callLinkInfos;
+ JITCode m_jitCode;
+ MacroAssemblerCodePtr m_jitCodeWithArityCheck;
+ SentinelLinkedList<CallLinkInfo, BasicRawSentinelNode<CallLinkInfo> > m_incomingCalls;
#endif
#if ENABLE(DFG_JIT) || ENABLE(LLINT)
- OwnPtr<CompactJITCodeMap> m_jitCodeMap;
+ OwnPtr<CompactJITCodeMap> m_jitCodeMap;
#endif
#if ENABLE(DFG_JIT)
- struct WeakReferenceTransition {
- WeakReferenceTransition() { }
+ struct WeakReferenceTransition {
+ WeakReferenceTransition() { }
- WeakReferenceTransition(JSGlobalData& globalData, JSCell* owner, JSCell* codeOrigin, JSCell* from, JSCell* to)
- : m_from(globalData, owner, from)
- , m_to(globalData, owner, to)
- {
- if (!!codeOrigin)
- m_codeOrigin.set(globalData, owner, codeOrigin);
- }
-
- WriteBarrier<JSCell> m_codeOrigin;
- WriteBarrier<JSCell> m_from;
- WriteBarrier<JSCell> m_to;
- };
-
- struct DFGData {
- DFGData()
- : mayBeExecuting(false)
- , isJettisoned(false)
- {
- }
+ WeakReferenceTransition(VM& vm, JSCell* owner, JSCell* codeOrigin, JSCell* from, JSCell* to)
+ : m_from(vm, owner, from)
+ , m_to(vm, owner, to)
+ {
+ if (!!codeOrigin)
+ m_codeOrigin.set(vm, owner, codeOrigin);
+ }
+
+ WriteBarrier<JSCell> m_codeOrigin;
+ WriteBarrier<JSCell> m_from;
+ WriteBarrier<JSCell> m_to;
+ };
+
+ struct DFGData {
+ DFGData()
+ : mayBeExecuting(false)
+ , isJettisoned(false)
+ {
+ }
- Vector<DFG::OSREntryData> osrEntry;
- SegmentedVector<DFG::OSRExit, 8> osrExit;
- Vector<DFG::SpeculationRecovery> speculationRecovery;
- SegmentedVector<JumpReplacementWatchpoint, 1, 0> watchpoints;
- Vector<WeakReferenceTransition> transitions;
- Vector<WriteBarrier<JSCell> > weakReferences;
- DFG::VariableEventStream variableEventStream;
- DFG::MinifiedGraph minifiedDFG;
- bool mayBeExecuting;
- bool isJettisoned;
- bool livenessHasBeenProved; // Initialized and used on every GC.
- bool allTransitionsHaveBeenMarked; // Initialized and used on every GC.
- unsigned visitAggregateHasBeenCalled; // Unsigned to make it work seamlessly with the broadest set of CAS implementations.
- };
-
- OwnPtr<DFGData> m_dfgData;
-
- // This is relevant to non-DFG code blocks that serve as the profiled code block
- // for DFG code blocks.
- DFG::ExitProfile m_exitProfile;
- CompressedLazyOperandValueProfileHolder m_lazyOperandValueProfiles;
+ Vector<DFG::OSREntryData> osrEntry;
+ SegmentedVector<DFG::OSRExit, 8> osrExit;
+ Vector<DFG::SpeculationRecovery> speculationRecovery;
+ SegmentedVector<JumpReplacementWatchpoint, 1, 0> watchpoints;
+ Vector<WeakReferenceTransition> transitions;
+ Vector<WriteBarrier<JSCell> > weakReferences;
+ DFG::VariableEventStream variableEventStream;
+ DFG::MinifiedGraph minifiedDFG;
+ RefPtr<Profiler::Compilation> compilation;
+ bool mayBeExecuting;
+ bool isJettisoned;
+ bool livenessHasBeenProved; // Initialized and used on every GC.
+ bool allTransitionsHaveBeenMarked; // Initialized and used on every GC.
+ unsigned visitAggregateHasBeenCalled; // Unsigned to make it work seamlessly with the broadest set of CAS implementations.
+ };
+
+ OwnPtr<DFGData> m_dfgData;
+
+ // This is relevant to non-DFG code blocks that serve as the profiled code block
+ // for DFG code blocks.
+ DFG::ExitProfile m_exitProfile;
+ CompressedLazyOperandValueProfileHolder m_lazyOperandValueProfiles;
#endif
#if ENABLE(VALUE_PROFILER)
- Vector<ValueProfile> m_argumentValueProfiles;
- SegmentedVector<ValueProfile, 8> m_valueProfiles;
- SegmentedVector<RareCaseProfile, 8> m_rareCaseProfiles;
- SegmentedVector<RareCaseProfile, 8> m_specialFastCaseProfiles;
- SegmentedVector<ArrayAllocationProfile, 8> m_arrayAllocationProfiles;
- ArrayProfileVector m_arrayProfiles;
- unsigned m_executionEntryCount;
+ Vector<ValueProfile> m_argumentValueProfiles;
+ SegmentedVector<ValueProfile, 8> m_valueProfiles;
+ SegmentedVector<RareCaseProfile, 8> m_rareCaseProfiles;
+ SegmentedVector<RareCaseProfile, 8> m_specialFastCaseProfiles;
+ SegmentedVector<ArrayAllocationProfile, 8> m_arrayAllocationProfiles;
+ ArrayProfileVector m_arrayProfiles;
#endif
+ SegmentedVector<ObjectAllocationProfile, 8> m_objectAllocationProfiles;
- // Constant Pool
- Vector<Identifier> m_identifiers;
- COMPILE_ASSERT(sizeof(Register) == sizeof(WriteBarrier<Unknown>), Register_must_be_same_size_as_WriteBarrier_Unknown);
- // TODO: This could just be a pointer to m_unlinkedCodeBlock's data, but the DFG mutates
- // it, so we're stuck with it for now.
- Vector<WriteBarrier<Unknown> > m_constantRegisters;
- Vector<WriteBarrier<FunctionExecutable> > m_functionDecls;
- Vector<WriteBarrier<FunctionExecutable> > m_functionExprs;
+ // Constant Pool
+ Vector<Identifier> m_identifiers;
+ COMPILE_ASSERT(sizeof(Register) == sizeof(WriteBarrier<Unknown>), Register_must_be_same_size_as_WriteBarrier_Unknown);
+ // TODO: This could just be a pointer to m_unlinkedCodeBlock's data, but the DFG mutates
+ // it, so we're stuck with it for now.
+ Vector<WriteBarrier<Unknown> > m_constantRegisters;
+ Vector<WriteBarrier<FunctionExecutable> > m_functionDecls;
+ Vector<WriteBarrier<FunctionExecutable> > m_functionExprs;
- OwnPtr<CodeBlock> m_alternative;
+ OwnPtr<CodeBlock> m_alternative;
- ExecutionCounter m_llintExecuteCounter;
+ ExecutionCounter m_llintExecuteCounter;
- ExecutionCounter m_jitExecuteCounter;
- int32_t m_totalJITExecutions;
- uint32_t m_osrExitCounter;
- uint16_t m_optimizationDelayCounter;
- uint16_t m_reoptimizationRetryCounter;
+ ExecutionCounter m_jitExecuteCounter;
+ int32_t m_totalJITExecutions;
+ uint32_t m_osrExitCounter;
+ uint16_t m_optimizationDelayCounter;
+ uint16_t m_reoptimizationRetryCounter;
- Vector<ResolveOperations> m_resolveOperations;
- Vector<PutToBaseOperation, 1> m_putToBaseOperations;
+ Vector<ResolveOperations> m_resolveOperations;
+ Vector<PutToBaseOperation, 1> m_putToBaseOperations;
- struct RareData {
- WTF_MAKE_FAST_ALLOCATED;
- public:
- Vector<HandlerInfo> m_exceptionHandlers;
+ struct RareData {
+ WTF_MAKE_FAST_ALLOCATED;
+ public:
+ Vector<HandlerInfo> m_exceptionHandlers;
- // Buffers used for large array literals
- Vector<Vector<JSValue> > m_constantBuffers;
+ // Buffers used for large array literals
+ Vector<Vector<JSValue> > m_constantBuffers;
- // Jump Tables
- Vector<SimpleJumpTable> m_immediateSwitchJumpTables;
- Vector<SimpleJumpTable> m_characterSwitchJumpTables;
- Vector<StringJumpTable> m_stringSwitchJumpTables;
+ // Jump Tables
+ Vector<SimpleJumpTable> m_immediateSwitchJumpTables;
+ Vector<SimpleJumpTable> m_characterSwitchJumpTables;
+ Vector<StringJumpTable> m_stringSwitchJumpTables;
- EvalCodeCache m_evalCodeCache;
+ EvalCodeCache m_evalCodeCache;
#if ENABLE(JIT)
- Vector<CallReturnOffsetToBytecodeOffset> m_callReturnIndexVector;
+ Vector<CallReturnOffsetToBytecodeOffset, 0, UnsafeVectorOverflow> m_callReturnIndexVector;
#endif
#if ENABLE(DFG_JIT)
- SegmentedVector<InlineCallFrame, 4> m_inlineCallFrames;
- Vector<CodeOriginAtCallReturnOffset> m_codeOrigins;
+ SegmentedVector<InlineCallFrame, 4> m_inlineCallFrames;
+ Vector<CodeOriginAtCallReturnOffset, 0, UnsafeVectorOverflow> m_codeOrigins;
#endif
- };
+ };
#if COMPILER(MSVC)
- friend void WTF::deleteOwnedPtr<RareData>(RareData*);
+ friend void WTF::deleteOwnedPtr<RareData>(RareData*);
#endif
- OwnPtr<RareData> m_rareData;
+ OwnPtr<RareData> m_rareData;
#if ENABLE(JIT)
- DFG::CapabilityLevel m_canCompileWithDFGState;
+ DFG::CapabilityLevel m_canCompileWithDFGState;
#endif
- };
+};
- // Program code is not marked by any function, so we make the global object
- // responsible for marking it.
+// Program code is not marked by any function, so we make the global object
+// responsible for marking it.
- class GlobalCodeBlock : public CodeBlock {
- protected:
- GlobalCodeBlock(CopyParsedBlockTag, GlobalCodeBlock& other)
- : CodeBlock(CopyParsedBlock, other)
- {
- }
-
- GlobalCodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSGlobalObject* globalObject, unsigned baseScopeDepth, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, PassOwnPtr<CodeBlock> alternative)
- : CodeBlock(ownerExecutable, unlinkedCodeBlock, globalObject, baseScopeDepth, sourceProvider, sourceOffset, alternative)
- {
- }
- };
-
- class ProgramCodeBlock : public GlobalCodeBlock {
- public:
- ProgramCodeBlock(CopyParsedBlockTag, ProgramCodeBlock& other)
- : GlobalCodeBlock(CopyParsedBlock, other)
- {
- }
-
- ProgramCodeBlock(ProgramExecutable* ownerExecutable, UnlinkedProgramCodeBlock* unlinkedCodeBlock, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, PassOwnPtr<CodeBlock> alternative)
- : GlobalCodeBlock(ownerExecutable, unlinkedCodeBlock, globalObject, 0, sourceProvider, 0, alternative)
- {
- }
-
-#if ENABLE(JIT)
- protected:
- virtual JSObject* compileOptimized(ExecState*, JSScope*, unsigned bytecodeIndex);
- virtual void jettison();
- virtual bool jitCompileImpl(ExecState*);
- virtual CodeBlock* replacement();
- virtual DFG::CapabilityLevel canCompileWithDFGInternal();
-#endif
- };
-
- class EvalCodeBlock : public GlobalCodeBlock {
- public:
- EvalCodeBlock(CopyParsedBlockTag, EvalCodeBlock& other)
- : GlobalCodeBlock(CopyParsedBlock, other)
- {
- }
-
- EvalCodeBlock(EvalExecutable* ownerExecutable, UnlinkedEvalCodeBlock* unlinkedCodeBlock, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, int baseScopeDepth, PassOwnPtr<CodeBlock> alternative)
- : GlobalCodeBlock(ownerExecutable, unlinkedCodeBlock, globalObject, baseScopeDepth, sourceProvider, 0, alternative)
- {
- }
-
- const Identifier& variable(unsigned index) { return unlinkedEvalCodeBlock()->variable(index); }
- unsigned numVariables() { return unlinkedEvalCodeBlock()->numVariables(); }
+class GlobalCodeBlock : public CodeBlock {
+protected:
+ GlobalCodeBlock(CopyParsedBlockTag, GlobalCodeBlock& other)
+ : CodeBlock(CopyParsedBlock, other)
+ {
+ }
-#if ENABLE(JIT)
- protected:
- virtual JSObject* compileOptimized(ExecState*, JSScope*, unsigned bytecodeIndex);
- virtual void jettison();
- virtual bool jitCompileImpl(ExecState*);
- virtual CodeBlock* replacement();
- virtual DFG::CapabilityLevel canCompileWithDFGInternal();
-#endif
+ GlobalCodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSGlobalObject* globalObject, unsigned baseScopeDepth, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset, PassOwnPtr<CodeBlock> alternative)
+ : CodeBlock(ownerExecutable, unlinkedCodeBlock, globalObject, baseScopeDepth, sourceProvider, sourceOffset, firstLineColumnOffset, alternative)
+ {
+ }
+};
- private:
- UnlinkedEvalCodeBlock* unlinkedEvalCodeBlock() const { return jsCast<UnlinkedEvalCodeBlock*>(unlinkedCodeBlock()); }
- };
+class ProgramCodeBlock : public GlobalCodeBlock {
+public:
+ ProgramCodeBlock(CopyParsedBlockTag, ProgramCodeBlock& other)
+ : GlobalCodeBlock(CopyParsedBlock, other)
+ {
+ }
- class FunctionCodeBlock : public CodeBlock {
- public:
- FunctionCodeBlock(CopyParsedBlockTag, FunctionCodeBlock& other)
- : CodeBlock(CopyParsedBlock, other)
- {
- }
+ ProgramCodeBlock(ProgramExecutable* ownerExecutable, UnlinkedProgramCodeBlock* unlinkedCodeBlock, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, unsigned firstLineColumnOffset, PassOwnPtr<CodeBlock> alternative)
+ : GlobalCodeBlock(ownerExecutable, unlinkedCodeBlock, globalObject, 0, sourceProvider, 0, firstLineColumnOffset, alternative)
+ {
+ }
- FunctionCodeBlock(FunctionExecutable* ownerExecutable, UnlinkedFunctionCodeBlock* unlinkedCodeBlock, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, PassOwnPtr<CodeBlock> alternative = nullptr)
- : CodeBlock(ownerExecutable, unlinkedCodeBlock, globalObject, 0, sourceProvider, sourceOffset, alternative)
- {
- }
-
#if ENABLE(JIT)
- protected:
- virtual JSObject* compileOptimized(ExecState*, JSScope*, unsigned bytecodeIndex);
- virtual void jettison();
- virtual bool jitCompileImpl(ExecState*);
- virtual CodeBlock* replacement();
- virtual DFG::CapabilityLevel canCompileWithDFGInternal();
+protected:
+ virtual JSObject* compileOptimized(ExecState*, JSScope*, unsigned bytecodeIndex);
+ virtual void jettisonImpl();
+ virtual bool jitCompileImpl(ExecState*);
+ virtual CodeBlock* replacement();
+ virtual DFG::CapabilityLevel canCompileWithDFGInternal();
#endif
- };
+};
- inline CodeBlock* baselineCodeBlockForInlineCallFrame(InlineCallFrame* inlineCallFrame)
+class EvalCodeBlock : public GlobalCodeBlock {
+public:
+ EvalCodeBlock(CopyParsedBlockTag, EvalCodeBlock& other)
+ : GlobalCodeBlock(CopyParsedBlock, other)
{
- ASSERT(inlineCallFrame);
- ExecutableBase* executable = inlineCallFrame->executable.get();
- ASSERT(executable->structure()->classInfo() == &FunctionExecutable::s_info);
- return static_cast<FunctionExecutable*>(executable)->baselineCodeBlockFor(inlineCallFrame->isCall ? CodeForCall : CodeForConstruct);
}
-
- inline CodeBlock* baselineCodeBlockForOriginAndBaselineCodeBlock(const CodeOrigin& codeOrigin, CodeBlock* baselineCodeBlock)
+
+ EvalCodeBlock(EvalExecutable* ownerExecutable, UnlinkedEvalCodeBlock* unlinkedCodeBlock, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, int baseScopeDepth, PassOwnPtr<CodeBlock> alternative)
+ : GlobalCodeBlock(ownerExecutable, unlinkedCodeBlock, globalObject, baseScopeDepth, sourceProvider, 0, 1, alternative)
{
- if (codeOrigin.inlineCallFrame)
- return baselineCodeBlockForInlineCallFrame(codeOrigin.inlineCallFrame);
- return baselineCodeBlock;
}
-
- inline int CodeBlock::argumentIndexAfterCapture(size_t argument)
- {
- if (argument >= static_cast<size_t>(symbolTable()->parameterCount()))
- return CallFrame::argumentOffset(argument);
- const SlowArgument* slowArguments = symbolTable()->slowArguments();
- if (!slowArguments || slowArguments[argument].status == SlowArgument::Normal)
- return CallFrame::argumentOffset(argument);
+ const Identifier& variable(unsigned index) { return unlinkedEvalCodeBlock()->variable(index); }
+ unsigned numVariables() { return unlinkedEvalCodeBlock()->numVariables(); }
+
+#if ENABLE(JIT)
+protected:
+ virtual JSObject* compileOptimized(ExecState*, JSScope*, unsigned bytecodeIndex);
+ virtual void jettisonImpl();
+ virtual bool jitCompileImpl(ExecState*);
+ virtual CodeBlock* replacement();
+ virtual DFG::CapabilityLevel canCompileWithDFGInternal();
+#endif
- ASSERT(slowArguments[argument].status == SlowArgument::Captured);
- return slowArguments[argument].index;
- }
+private:
+ UnlinkedEvalCodeBlock* unlinkedEvalCodeBlock() const { return jsCast<UnlinkedEvalCodeBlock*>(unlinkedCodeBlock()); }
+};
- inline Register& ExecState::r(int index)
+class FunctionCodeBlock : public CodeBlock {
+public:
+ FunctionCodeBlock(CopyParsedBlockTag, FunctionCodeBlock& other)
+ : CodeBlock(CopyParsedBlock, other)
{
- CodeBlock* codeBlock = this->codeBlock();
- if (codeBlock->isConstantRegisterIndex(index))
- return *reinterpret_cast<Register*>(&codeBlock->constantRegister(index));
- return this[index];
}
- inline Register& ExecState::uncheckedR(int index)
+ FunctionCodeBlock(FunctionExecutable* ownerExecutable, UnlinkedFunctionCodeBlock* unlinkedCodeBlock, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset, PassOwnPtr<CodeBlock> alternative = nullptr)
+ : CodeBlock(ownerExecutable, unlinkedCodeBlock, globalObject, 0, sourceProvider, sourceOffset, firstLineColumnOffset, alternative)
{
- ASSERT(index < FirstConstantRegisterIndex);
- return this[index];
}
+
+#if ENABLE(JIT)
+protected:
+ virtual JSObject* compileOptimized(ExecState*, JSScope*, unsigned bytecodeIndex);
+ virtual void jettisonImpl();
+ virtual bool jitCompileImpl(ExecState*);
+ virtual CodeBlock* replacement();
+ virtual DFG::CapabilityLevel canCompileWithDFGInternal();
+#endif
+};
+
+inline CodeBlock* baselineCodeBlockForInlineCallFrame(InlineCallFrame* inlineCallFrame)
+{
+ RELEASE_ASSERT(inlineCallFrame);
+ ExecutableBase* executable = inlineCallFrame->executable.get();
+ RELEASE_ASSERT(executable->structure()->classInfo() == &FunctionExecutable::s_info);
+ return static_cast<FunctionExecutable*>(executable)->baselineCodeBlockFor(inlineCallFrame->isCall ? CodeForCall : CodeForConstruct);
+}
+
+inline CodeBlock* baselineCodeBlockForOriginAndBaselineCodeBlock(const CodeOrigin& codeOrigin, CodeBlock* baselineCodeBlock)
+{
+ if (codeOrigin.inlineCallFrame)
+ return baselineCodeBlockForInlineCallFrame(codeOrigin.inlineCallFrame);
+ return baselineCodeBlock;
+}
+
+inline int CodeBlock::argumentIndexAfterCapture(size_t argument)
+{
+ if (argument >= static_cast<size_t>(symbolTable()->parameterCount()))
+ return CallFrame::argumentOffset(argument);
+
+ const SlowArgument* slowArguments = symbolTable()->slowArguments();
+ if (!slowArguments || slowArguments[argument].status == SlowArgument::Normal)
+ return CallFrame::argumentOffset(argument);
+
+ ASSERT(slowArguments[argument].status == SlowArgument::Captured);
+ return slowArguments[argument].index;
+}
+
+inline Register& ExecState::r(int index)
+{
+ CodeBlock* codeBlock = this->codeBlock();
+ if (codeBlock->isConstantRegisterIndex(index))
+ return *reinterpret_cast<Register*>(&codeBlock->constantRegister(index));
+ return this[index];
+}
+
+inline Register& ExecState::uncheckedR(int index)
+{
+ RELEASE_ASSERT(index < FirstConstantRegisterIndex);
+ return this[index];
+}
#if ENABLE(DFG_JIT)
- inline bool ExecState::isInlineCallFrame()
- {
- if (LIKELY(!codeBlock() || codeBlock()->getJITType() != JITCode::DFGJIT))
- return false;
- return isInlineCallFrameSlow();
- }
+inline bool ExecState::isInlineCallFrame()
+{
+ if (LIKELY(!codeBlock() || codeBlock()->getJITType() != JITCode::DFGJIT))
+ return false;
+ return isInlineCallFrameSlow();
+}
#endif
- inline JSValue ExecState::argumentAfterCapture(size_t argument)
- {
- if (argument >= argumentCount())
- return jsUndefined();
+inline JSValue ExecState::argumentAfterCapture(size_t argument)
+{
+ if (argument >= argumentCount())
+ return jsUndefined();
- if (!codeBlock())
- return this[argumentOffset(argument)].jsValue();
+ if (!codeBlock())
+ return this[argumentOffset(argument)].jsValue();
- return this[codeBlock()->argumentIndexAfterCapture(argument)].jsValue();
- }
+ return this[codeBlock()->argumentIndexAfterCapture(argument)].jsValue();
+}
#if ENABLE(DFG_JIT)
- inline void DFGCodeBlocks::mark(void* candidateCodeBlock)
- {
- // We have to check for 0 and -1 because those are used by the HashMap as markers.
- uintptr_t value = reinterpret_cast<uintptr_t>(candidateCodeBlock);
-
- // This checks for both of those nasty cases in one go.
- // 0 + 1 = 1
- // -1 + 1 = 0
- if (value + 1 <= 1)
- return;
-
- HashSet<CodeBlock*>::iterator iter = m_set.find(static_cast<CodeBlock*>(candidateCodeBlock));
- if (iter == m_set.end())
- return;
-
- (*iter)->m_dfgData->mayBeExecuting = true;
- }
+inline void DFGCodeBlocks::mark(void* candidateCodeBlock)
+{
+ // We have to check for 0 and -1 because those are used by the HashMap as markers.
+ uintptr_t value = reinterpret_cast<uintptr_t>(candidateCodeBlock);
+
+ // This checks for both of those nasty cases in one go.
+ // 0 + 1 = 1
+ // -1 + 1 = 0
+ if (value + 1 <= 1)
+ return;
+
+ HashSet<CodeBlock*>::iterator iter = m_set.find(static_cast<CodeBlock*>(candidateCodeBlock));
+ if (iter == m_set.end())
+ return;
+
+ (*iter)->m_dfgData->mayBeExecuting = true;
+}
#endif
- inline JSValue Structure::prototypeForLookup(CodeBlock* codeBlock) const
- {
- return prototypeForLookup(codeBlock->globalObject());
- }
-
} // namespace JSC
#endif // CodeBlock_h
diff --git a/Source/JavaScriptCore/bytecode/CodeBlockHash.cpp b/Source/JavaScriptCore/bytecode/CodeBlockHash.cpp
index 79fe9ccb5..7c890cc88 100644
--- a/Source/JavaScriptCore/bytecode/CodeBlockHash.cpp
+++ b/Source/JavaScriptCore/bytecode/CodeBlockHash.cpp
@@ -36,8 +36,7 @@ namespace JSC {
CodeBlockHash::CodeBlockHash(const char* string)
: m_hash(0)
{
- if (strlen(string) != 6)
- CRASH();
+ RELEASE_ASSERT(strlen(string) == 6);
for (unsigned i = 0; i < 6; ++i) {
m_hash *= 62;
diff --git a/Source/JavaScriptCore/bytecode/CodeOrigin.cpp b/Source/JavaScriptCore/bytecode/CodeOrigin.cpp
index 92e2b0fc9..52bc2bf7f 100644
--- a/Source/JavaScriptCore/bytecode/CodeOrigin.cpp
+++ b/Source/JavaScriptCore/bytecode/CodeOrigin.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,7 +26,10 @@
#include "config.h"
#include "CodeOrigin.h"
+#include "CallFrame.h"
+#include "CodeBlock.h"
#include "Executable.h"
+#include "Operations.h"
namespace JSC {
@@ -50,13 +53,66 @@ Vector<CodeOrigin> CodeOrigin::inlineStack() const
unsigned index = result.size() - 2;
for (InlineCallFrame* current = inlineCallFrame; current; current = current->caller.inlineCallFrame)
result[index--] = current->caller;
+ RELEASE_ASSERT(!result[0].inlineCallFrame);
return result;
}
+void CodeOrigin::dump(PrintStream& out) const
+{
+ Vector<CodeOrigin> stack = inlineStack();
+ for (unsigned i = 0; i < stack.size(); ++i) {
+ if (i)
+ out.print(" --> ");
+
+ if (InlineCallFrame* frame = stack[i].inlineCallFrame) {
+ out.print(frame->briefFunctionInformation(), ":<", RawPointer(frame->executable.get()), "> ");
+ if (frame->isClosureCall())
+ out.print("(closure) ");
+ }
+
+ out.print("bc#", stack[i].bytecodeIndex);
+ }
+}
+
+JSFunction* InlineCallFrame::calleeForCallFrame(ExecState* exec) const
+{
+ if (!isClosureCall())
+ return callee.get();
+
+ return jsCast<JSFunction*>((exec + stackOffset)->callee());
+}
+
CodeBlockHash InlineCallFrame::hash() const
{
return executable->hashFor(specializationKind());
}
+String InlineCallFrame::inferredName() const
+{
+ return jsCast<FunctionExecutable*>(executable.get())->inferredName().string();
+}
+
+CodeBlock* InlineCallFrame::baselineCodeBlock() const
+{
+ return jsCast<FunctionExecutable*>(executable.get())->baselineCodeBlockFor(specializationKind());
+}
+
+void InlineCallFrame::dumpBriefFunctionInformation(PrintStream& out) const
+{
+ out.print(inferredName(), "#", hash());
+}
+
+void InlineCallFrame::dump(PrintStream& out) const
+{
+ out.print(briefFunctionInformation(), ":<", RawPointer(executable.get()), ", bc#", caller.bytecodeIndex, ", ", specializationKind());
+ if (callee)
+ out.print(", known callee: ", JSValue(callee.get()));
+ else
+ out.print(", closure call");
+ out.print(", numArgs+this = ", arguments.size());
+ out.print(", stack >= r", stackOffset);
+ out.print(">");
+}
+
} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/CodeOrigin.h b/Source/JavaScriptCore/bytecode/CodeOrigin.h
index d8fbf7328..5d9eaa041 100644
--- a/Source/JavaScriptCore/bytecode/CodeOrigin.h
+++ b/Source/JavaScriptCore/bytecode/CodeOrigin.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -31,12 +31,14 @@
#include "ValueRecovery.h"
#include "WriteBarrier.h"
#include <wtf/BitVector.h>
+#include <wtf/PrintStream.h>
#include <wtf/StdLibExtras.h>
#include <wtf/Vector.h>
namespace JSC {
struct InlineCallFrame;
+class ExecState;
class ExecutableBase;
class JSFunction;
@@ -63,8 +65,8 @@ struct CodeOrigin {
, valueProfileOffset(valueProfileOffset)
, inlineCallFrame(inlineCallFrame)
{
- ASSERT(bytecodeIndex <= maximumBytecodeIndex);
- ASSERT(valueProfileOffset < (1u << 3));
+ RELEASE_ASSERT(bytecodeIndex <= maximumBytecodeIndex);
+ RELEASE_ASSERT(valueProfileOffset < (1u << 3));
}
bool isSet() const { return bytecodeIndex != maximumBytecodeIndex; }
@@ -82,6 +84,8 @@ struct CodeOrigin {
// would have owned the code if it had not been inlined. Otherwise returns 0.
ExecutableBase* codeOriginOwner() const;
+ unsigned stackOffset() const;
+
static unsigned inlineDepthForCallFrame(InlineCallFrame*);
bool operator==(const CodeOrigin& other) const;
@@ -90,12 +94,14 @@ struct CodeOrigin {
// Get the inline stack. This is slow, and is intended for debugging only.
Vector<CodeOrigin> inlineStack() const;
+
+ void dump(PrintStream&) const;
};
struct InlineCallFrame {
Vector<ValueRecovery> arguments;
WriteBarrier<ExecutableBase> executable;
- WriteBarrier<JSFunction> callee;
+ WriteBarrier<JSFunction> callee; // This may be null, indicating that this is a closure call and that the JSFunction and JSScope are already on the stack.
CodeOrigin caller;
BitVector capturedVars; // Indexed by the machine call frame's variable numbering.
unsigned stackOffset : 31;
@@ -103,7 +109,20 @@ struct InlineCallFrame {
CodeSpecializationKind specializationKind() const { return specializationFromIsCall(isCall); }
+ bool isClosureCall() const { return !callee; }
+
+ // Get the callee given a machine call frame to which this InlineCallFrame belongs.
+ JSFunction* calleeForCallFrame(ExecState*) const;
+
+ String inferredName() const;
CodeBlockHash hash() const;
+
+ CodeBlock* baselineCodeBlock() const;
+
+ void dumpBriefFunctionInformation(PrintStream&) const;
+ void dump(PrintStream&) const;
+
+ MAKE_PRINT_METHOD(InlineCallFrame, dumpBriefFunctionInformation, briefFunctionInformation);
};
struct CodeOriginAtCallReturnOffset {
@@ -111,6 +130,14 @@ struct CodeOriginAtCallReturnOffset {
unsigned callReturnOffset;
};
+inline unsigned CodeOrigin::stackOffset() const
+{
+ if (!inlineCallFrame)
+ return 0;
+
+ return inlineCallFrame->stackOffset;
+}
+
inline bool CodeOrigin::operator==(const CodeOrigin& other) const
{
return bytecodeIndex == other.bytecodeIndex
diff --git a/Source/JavaScriptCore/bytecode/Comment.h b/Source/JavaScriptCore/bytecode/Comment.h
deleted file mode 100644
index c28f3a068..000000000
--- a/Source/JavaScriptCore/bytecode/Comment.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef Comment_h
-#define Comment_h
-
-namespace JSC {
-
-struct Comment {
- size_t pc;
- const char* string;
-};
-
-} // namespace JSC
-
-#endif // Comment_h
diff --git a/Source/JavaScriptCore/bytecode/DFGExitProfile.cpp b/Source/JavaScriptCore/bytecode/DFGExitProfile.cpp
index 69fdc3737..d36878fc9 100644
--- a/Source/JavaScriptCore/bytecode/DFGExitProfile.cpp
+++ b/Source/JavaScriptCore/bytecode/DFGExitProfile.cpp
@@ -55,6 +55,33 @@ bool ExitProfile::add(const FrequentExitSite& site)
return true;
}
+Vector<FrequentExitSite> ExitProfile::exitSitesFor(unsigned bytecodeIndex)
+{
+ Vector<FrequentExitSite> result;
+
+ if (!m_frequentExitSites)
+ return result;
+
+ for (unsigned i = 0; i < m_frequentExitSites->size(); ++i) {
+ if (m_frequentExitSites->at(i).bytecodeOffset() == bytecodeIndex)
+ result.append(m_frequentExitSites->at(i));
+ }
+
+ return result;
+}
+
+bool ExitProfile::hasExitSite(const FrequentExitSite& site) const
+{
+ if (!m_frequentExitSites)
+ return false;
+
+ for (unsigned i = m_frequentExitSites->size(); i--;) {
+ if (m_frequentExitSites->at(i) == site)
+ return true;
+ }
+ return false;
+}
+
QueryableExitProfile::QueryableExitProfile(const ExitProfile& profile)
{
if (!profile.m_frequentExitSites)
diff --git a/Source/JavaScriptCore/bytecode/DFGExitProfile.h b/Source/JavaScriptCore/bytecode/DFGExitProfile.h
index 7132adfd4..fe7b2f921 100644
--- a/Source/JavaScriptCore/bytecode/DFGExitProfile.h
+++ b/Source/JavaScriptCore/bytecode/DFGExitProfile.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,73 +26,13 @@
#ifndef DFGExitProfile_h
#define DFGExitProfile_h
+#include "ExitKind.h"
#include <wtf/HashSet.h>
#include <wtf/OwnPtr.h>
#include <wtf/Vector.h>
namespace JSC { namespace DFG {
-enum ExitKind {
- ExitKindUnset,
- BadType, // We exited because a type prediction was wrong.
- BadCache, // We exited because an inline cache was wrong.
- BadWeakConstantCache, // We exited because a cache on a weak constant (usually a prototype) was wrong.
- BadIndexingType, // We exited because an indexing type was wrong.
- Overflow, // We exited because of overflow.
- NegativeZero, // We exited because we encountered negative zero.
- OutOfBounds, // We had an out-of-bounds access to an array.
- InadequateCoverage, // We exited because we ended up in code that didn't have profiling coverage.
- ArgumentsEscaped, // We exited because arguments escaped but we didn't expect them to.
- Uncountable, // We exited for none of the above reasons, and we should not count it. Most uses of this should be viewed as a FIXME.
- UncountableWatchpoint // We exited because of a watchpoint, which isn't counted because watchpoints do tracking themselves.
-};
-
-inline const char* exitKindToString(ExitKind kind)
-{
- switch (kind) {
- case ExitKindUnset:
- return "Unset";
- case BadType:
- return "BadType";
- case BadCache:
- return "BadCache";
- case BadWeakConstantCache:
- return "BadWeakConstantCache";
- case BadIndexingType:
- return "BadIndexingType";
- case Overflow:
- return "Overflow";
- case NegativeZero:
- return "NegativeZero";
- case OutOfBounds:
- return "OutOfBounds";
- case InadequateCoverage:
- return "InadequateCoverage";
- case ArgumentsEscaped:
- return "ArgumentsEscaped";
- case Uncountable:
- return "Uncountable";
- case UncountableWatchpoint:
- return "UncountableWatchpoint";
- default:
- return "Unknown";
- }
-}
-
-inline bool exitKindIsCountable(ExitKind kind)
-{
- switch (kind) {
- case ExitKindUnset:
- ASSERT_NOT_REACHED();
- case BadType:
- case Uncountable:
- case UncountableWatchpoint:
- return false;
- default:
- return true;
- }
-}
-
class FrequentExitSite {
public:
FrequentExitSite()
@@ -189,6 +129,24 @@ public:
// anyway.
bool add(const FrequentExitSite&);
+ // Get the frequent exit sites for a bytecode index. This is O(n), and is
+ // meant to only be used from debugging/profiling code.
+ Vector<FrequentExitSite> exitSitesFor(unsigned bytecodeIndex);
+
+ // This is O(n) and should be called on less-frequently executed code paths
+ // in the compiler. It should be strictly cheaper than building a
+ // QueryableExitProfile, if you really expect this to be called infrequently
+ // and you believe that there are few exit sites.
+ bool hasExitSite(const FrequentExitSite&) const;
+ bool hasExitSite(ExitKind kind) const
+ {
+ return hasExitSite(FrequentExitSite(kind));
+ }
+ bool hasExitSite(unsigned bytecodeIndex, ExitKind kind) const
+ {
+ return hasExitSite(FrequentExitSite(bytecodeIndex, kind));
+ }
+
private:
friend class QueryableExitProfile;
diff --git a/Source/JavaScriptCore/bytecode/DataFormat.h b/Source/JavaScriptCore/bytecode/DataFormat.h
index 51c8afbf6..da8dacf49 100644
--- a/Source/JavaScriptCore/bytecode/DataFormat.h
+++ b/Source/JavaScriptCore/bytecode/DataFormat.h
@@ -82,7 +82,12 @@ inline const char* dataFormatToString(DataFormat dataFormat)
return "JSCell";
case DataFormatJSBoolean:
return "JSBoolean";
+ case DataFormatDead:
+ return "Dead";
+ case DataFormatArguments:
+ return "Arguments";
default:
+ RELEASE_ASSERT_NOT_REACHED();
return "Unknown";
}
}
@@ -115,14 +120,14 @@ inline bool needDataFormatConversion(DataFormat from, DataFormat to)
return false;
default:
// This captures DataFormatBoolean, which is currently unused.
- ASSERT_NOT_REACHED();
+ RELEASE_ASSERT_NOT_REACHED();
}
case DataFormatStorage:
ASSERT(to == DataFormatStorage);
return false;
default:
// This captures DataFormatBoolean, which is currently unused.
- ASSERT_NOT_REACHED();
+ RELEASE_ASSERT_NOT_REACHED();
}
return true;
}
@@ -149,7 +154,7 @@ inline bool needDataFormatConversion(DataFormat from, DataFormat to)
ASSERT(to == DataFormatStorage);
return false;
default:
- ASSERT_NOT_REACHED();
+ RELEASE_ASSERT_NOT_REACHED();
}
return true;
}
diff --git a/Source/JavaScriptCore/bytecode/EvalCodeCache.h b/Source/JavaScriptCore/bytecode/EvalCodeCache.h
index 29b17dd82..5d04637f4 100644
--- a/Source/JavaScriptCore/bytecode/EvalCodeCache.h
+++ b/Source/JavaScriptCore/bytecode/EvalCodeCache.h
@@ -31,8 +31,6 @@
#include "Executable.h"
#include "JSGlobalObject.h"
-#include "Nodes.h"
-#include "Parser.h"
#include "SourceCode.h"
#include <wtf/HashMap.h>
#include <wtf/RefPtr.h>
@@ -40,6 +38,7 @@
namespace JSC {
+ class CodeCache;
class SlotVisitor;
class EvalCodeCache {
@@ -51,25 +50,25 @@ namespace JSC {
return 0;
}
- EvalExecutable* getSlow(ExecState* exec, ScriptExecutable* owner, bool inStrictContext, const String& evalSource, JSScope* scope, JSValue& exceptionValue)
+ EvalExecutable* getSlow(ExecState* exec, CodeCache* codeCache, ScriptExecutable* owner, bool inStrictContext, const String& evalSource, JSScope* scope, JSValue& exceptionValue)
{
- EvalExecutable* evalExecutable = EvalExecutable::create(exec, makeSource(evalSource), inStrictContext);
+ EvalExecutable* evalExecutable = EvalExecutable::create(exec, codeCache, makeSource(evalSource), inStrictContext);
exceptionValue = evalExecutable->compile(exec, scope);
if (exceptionValue)
return 0;
if (!inStrictContext && evalSource.length() < maxCacheableSourceLength && scope->begin()->isVariableObject() && m_cacheMap.size() < maxCacheEntries)
- m_cacheMap.set(evalSource.impl(), WriteBarrier<EvalExecutable>(exec->globalData(), owner, evalExecutable));
+ m_cacheMap.set(evalSource.impl(), WriteBarrier<EvalExecutable>(exec->vm(), owner, evalExecutable));
return evalExecutable;
}
- EvalExecutable* get(ExecState* exec, ScriptExecutable* owner, bool inStrictContext, const String& evalSource, JSScope* scope, JSValue& exceptionValue)
+ EvalExecutable* get(ExecState* exec, CodeCache* codeCache, ScriptExecutable* owner, bool inStrictContext, const String& evalSource, JSScope* scope, JSValue& exceptionValue)
{
EvalExecutable* evalExecutable = tryGet(inStrictContext, evalSource, scope);
if (!evalExecutable)
- evalExecutable = getSlow(exec, owner, inStrictContext, evalSource, scope, exceptionValue);
+ evalExecutable = getSlow(exec, codeCache, owner, inStrictContext, evalSource, scope, exceptionValue);
return evalExecutable;
}
diff --git a/Source/JavaScriptCore/bytecode/ExecutionCounter.cpp b/Source/JavaScriptCore/bytecode/ExecutionCounter.cpp
index e619a0376..dca9c5126 100644
--- a/Source/JavaScriptCore/bytecode/ExecutionCounter.cpp
+++ b/Source/JavaScriptCore/bytecode/ExecutionCounter.cpp
@@ -113,7 +113,8 @@ bool ExecutionCounter::hasCrossedThreshold(CodeBlock* codeBlock) const
double modifiedThreshold = applyMemoryUsageHeuristics(m_activeThreshold, codeBlock);
return static_cast<double>(m_totalCount) + m_counter >=
- modifiedThreshold - static_cast<double>(m_activeThreshold) / 2;
+ modifiedThreshold - static_cast<double>(
+ std::min(m_activeThreshold, Options::maximumExecutionCountsBetweenCheckpoints())) / 2;
}
bool ExecutionCounter::setThreshold(CodeBlock* codeBlock)
diff --git a/Source/JavaScriptCore/bytecode/ExitKind.cpp b/Source/JavaScriptCore/bytecode/ExitKind.cpp
new file mode 100644
index 000000000..a8d904585
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ExitKind.cpp
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "ExitKind.h"
+
+#include <wtf/Assertions.h>
+#include <wtf/PrintStream.h>
+
+namespace JSC {
+
+const char* exitKindToString(ExitKind kind)
+{
+ switch (kind) {
+ case ExitKindUnset:
+ return "Unset";
+ case BadType:
+ return "BadType";
+ case BadFunction:
+ return "BadFunction";
+ case BadExecutable:
+ return "BadExecutable";
+ case BadCache:
+ return "BadCache";
+ case BadWeakConstantCache:
+ return "BadWeakConstantCache";
+ case BadIndexingType:
+ return "BadIndexingType";
+ case Overflow:
+ return "Overflow";
+ case NegativeZero:
+ return "NegativeZero";
+ case StoreToHole:
+ return "StoreToHole";
+ case LoadFromHole:
+ return "LoadFromHole";
+ case OutOfBounds:
+ return "OutOfBounds";
+ case StoreToHoleOrOutOfBounds:
+ return "StoreToHoleOrOutOfBounds";
+ case InadequateCoverage:
+ return "InadequateCoverage";
+ case ArgumentsEscaped:
+ return "ArgumentsEscaped";
+ case NotStringObject:
+ return "NotStringObject";
+ case Uncountable:
+ return "Uncountable";
+ case UncountableWatchpoint:
+ return "UncountableWatchpoint";
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return "Unknown";
+ }
+}
+
+bool exitKindIsCountable(ExitKind kind)
+{
+ switch (kind) {
+ case ExitKindUnset:
+ RELEASE_ASSERT_NOT_REACHED();
+ case BadType:
+ case Uncountable:
+ case UncountableWatchpoint:
+ case LoadFromHole: // Already counted directly by the baseline JIT.
+ case StoreToHole: // Already counted directly by the baseline JIT.
+ case OutOfBounds: // Already counted directly by the baseline JIT.
+ case StoreToHoleOrOutOfBounds: // Already counted directly by the baseline JIT.
+ return false;
+ default:
+ return true;
+ }
+}
+
+} // namespace JSC
+
+namespace WTF {
+
+void printInternal(PrintStream& out, JSC::ExitKind kind)
+{
+ out.print(exitKindToString(kind));
+}
+
+} // namespace WTF
+
diff --git a/Source/JavaScriptCore/bytecode/ExitKind.h b/Source/JavaScriptCore/bytecode/ExitKind.h
new file mode 100644
index 000000000..af918ace3
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ExitKind.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ExitKind_h
+#define ExitKind_h
+
+namespace JSC {
+
+enum ExitKind {
+ ExitKindUnset,
+ BadType, // We exited because a type prediction was wrong.
+ BadFunction, // We exited because we made an incorrect assumption about what function we would see.
+ BadExecutable, // We exited because we made an incorrect assumption about what executable we would see.
+ BadCache, // We exited because an inline cache was wrong.
+ BadWeakConstantCache, // We exited because a cache on a weak constant (usually a prototype) was wrong.
+ BadIndexingType, // We exited because an indexing type was wrong.
+ Overflow, // We exited because of overflow.
+ NegativeZero, // We exited because we encountered negative zero.
+ StoreToHole, // We had a store to a hole.
+ LoadFromHole, // We had a load from a hole.
+ OutOfBounds, // We had an out-of-bounds access to an array.
+ StoreToHoleOrOutOfBounds, // We're simultaneously speculating that we're in bounds and not accessing a hole, and one of those things didn't pan out.
+ InadequateCoverage, // We exited because we ended up in code that didn't have profiling coverage.
+ ArgumentsEscaped, // We exited because arguments escaped but we didn't expect them to.
+ NotStringObject, // We exited because we shouldn't have attempted to optimize string object access.
+ Uncountable, // We exited for none of the above reasons, and we should not count it. Most uses of this should be viewed as a FIXME.
+ UncountableWatchpoint, // We exited because of a watchpoint, which isn't counted because watchpoints do tracking themselves.
+ WatchdogTimerFired // We exited because we need to service the watchdog timer.
+};
+
+const char* exitKindToString(ExitKind);
+bool exitKindIsCountable(ExitKind);
+
+} // namespace JSC
+
+namespace WTF {
+
+class PrintStream;
+void printInternal(PrintStream&, JSC::ExitKind);
+
+} // namespace WTF
+
+#endif // ExitKind_h
+
diff --git a/Source/JavaScriptCore/bytecode/ExpressionRangeInfo.h b/Source/JavaScriptCore/bytecode/ExpressionRangeInfo.h
index f4949a686..855738aec 100644
--- a/Source/JavaScriptCore/bytecode/ExpressionRangeInfo.h
+++ b/Source/JavaScriptCore/bytecode/ExpressionRangeInfo.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -31,14 +31,79 @@
namespace JSC {
struct ExpressionRangeInfo {
+ // Line and column values are encoded in 1 of 3 modes depending on the size
+ // of their values. These modes are:
+ //
+ // 1. FatLine: 22-bit line, 8-bit column.
+ // 2. FatColumn: 8-bit line, 22-bit column.
+ // 3. FatLineAndColumn: 32-bit line, 32-bit column.
+ //
+ // For the first 2 modes, the line and column will be encoded in the 30-bit
+ // position field in the ExpressionRangeInfo. For the FatLineAndColumn mode,
+ // the position field will hold an index into a FatPosition vector which
+ // holds the FatPosition records with the full 32-bit line and column values.
+
+ enum {
+ FatLineMode,
+ FatColumnMode,
+ FatLineAndColumnMode
+ };
+
+ struct FatPosition {
+ uint32_t line;
+ uint32_t column;
+ };
+
+ enum {
+ FatLineModeLineShift = 8,
+ FatLineModeLineMask = (1 << 22) - 1,
+ FatLineModeColumnMask = (1 << 8) - 1,
+ FatColumnModeLineShift = 22,
+ FatColumnModeLineMask = (1 << 8) - 1,
+ FatColumnModeColumnMask = (1 << 22) - 1
+ };
+
enum {
MaxOffset = (1 << 7) - 1,
- MaxDivot = (1 << 25) - 1
+ MaxDivot = (1 << 25) - 1,
+ MaxFatLineModeLine = (1 << 22) - 1,
+ MaxFatLineModeColumn = (1 << 8) - 1,
+ MaxFatColumnModeLine = (1 << 8) - 1,
+ MaxFatColumnModeColumn = (1 << 22) - 1
};
+
+ void encodeFatLineMode(unsigned line, unsigned column)
+ {
+ ASSERT(line <= MaxFatLineModeLine);
+ ASSERT(column <= MaxFatLineModeColumn);
+ position = ((line & FatLineModeLineMask) << FatLineModeLineShift | (column & FatLineModeColumnMask));
+ }
+
+ void encodeFatColumnMode(unsigned line, unsigned column)
+ {
+ ASSERT(line <= MaxFatColumnModeLine);
+ ASSERT(column <= MaxFatColumnModeColumn);
+ position = ((line & FatColumnModeLineMask) << FatColumnModeLineShift | (column & FatColumnModeColumnMask));
+ }
+
+ void decodeFatLineMode(unsigned& line, unsigned& column)
+ {
+ line = (position >> FatLineModeLineShift) & FatLineModeLineMask;
+ column = position & FatLineModeColumnMask;
+ }
+
+ void decodeFatColumnMode(unsigned& line, unsigned& column)
+ {
+ line = (position >> FatColumnModeLineShift) & FatColumnModeLineMask;
+ column = position & FatColumnModeColumnMask;
+ }
+
uint32_t instructionOffset : 25;
- uint32_t divotPoint : 25;
uint32_t startOffset : 7;
+ uint32_t divotPoint : 25;
uint32_t endOffset : 7;
+ uint32_t mode : 2;
+ uint32_t position : 30;
};
} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp b/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp
index d17c17325..db4aa9b99 100644
--- a/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp
+++ b/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp
@@ -30,6 +30,7 @@
#include "JSScope.h"
#include "LLIntData.h"
#include "LowLevelInterpreter.h"
+#include "Operations.h"
namespace JSC {
@@ -51,7 +52,7 @@ GetByIdStatus GetByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned
unsigned attributesIgnored;
JSCell* specificValue;
PropertyOffset offset = structure->get(
- *profiledBlock->globalData(), ident, attributesIgnored, specificValue);
+ *profiledBlock->vm(), ident, attributesIgnored, specificValue);
if (structure->isDictionary())
specificValue = 0;
if (!isValidOffset(offset))
@@ -92,7 +93,7 @@ void GetByIdStatus::computeForChain(GetByIdStatus& result, CodeBlock* profiledBl
JSCell* specificValue;
result.m_offset = currentStructure->get(
- *profiledBlock->globalData(), ident, attributesIgnored, specificValue);
+ *profiledBlock->vm(), ident, attributesIgnored, specificValue);
if (currentStructure->isDictionary())
specificValue = 0;
if (!isValidOffset(result.m_offset))
@@ -124,6 +125,9 @@ GetByIdStatus GetByIdStatus::computeFor(CodeBlock* profiledBlock, unsigned bytec
if (!stubInfo.seen)
return computeFromLLInt(profiledBlock, bytecodeIndex, ident);
+ if (stubInfo.resetByGC)
+ return GetByIdStatus(TakesSlowPath, true);
+
PolymorphicAccessStructureList* list;
int listSize;
switch (stubInfo.accessType) {
@@ -161,7 +165,7 @@ GetByIdStatus GetByIdStatus::computeFor(CodeBlock* profiledBlock, unsigned bytec
unsigned attributesIgnored;
JSCell* specificValue;
result.m_offset = structure->get(
- *profiledBlock->globalData(), ident, attributesIgnored, specificValue);
+ *profiledBlock->vm(), ident, attributesIgnored, specificValue);
if (structure->isDictionary())
specificValue = 0;
@@ -186,7 +190,7 @@ GetByIdStatus GetByIdStatus::computeFor(CodeBlock* profiledBlock, unsigned bytec
unsigned attributesIgnored;
JSCell* specificValue;
PropertyOffset myOffset = structure->get(
- *profiledBlock->globalData(), ident, attributesIgnored, specificValue);
+ *profiledBlock->vm(), ident, attributesIgnored, specificValue);
if (structure->isDictionary())
specificValue = 0;
@@ -252,7 +256,7 @@ GetByIdStatus GetByIdStatus::computeFor(CodeBlock* profiledBlock, unsigned bytec
#endif // ENABLE(JIT)
}
-GetByIdStatus GetByIdStatus::computeFor(JSGlobalData& globalData, Structure* structure, Identifier& ident)
+GetByIdStatus GetByIdStatus::computeFor(VM& vm, Structure* structure, Identifier& ident)
{
// For now we only handle the super simple self access case. We could handle the
// prototype case in the future.
@@ -267,10 +271,10 @@ GetByIdStatus GetByIdStatus::computeFor(JSGlobalData& globalData, Structure* str
return GetByIdStatus(TakesSlowPath);
GetByIdStatus result;
- result.m_wasSeenInJIT = false; // To my knowledge nobody that uses computeFor(JSGlobalData&, Structure*, Identifier&) reads this field, but I might as well be honest: no, it wasn't seen in the JIT, since I computed it statically.
+ result.m_wasSeenInJIT = false; // To my knowledge nobody that uses computeFor(VM&, Structure*, Identifier&) reads this field, but I might as well be honest: no, it wasn't seen in the JIT, since I computed it statically.
unsigned attributes;
JSCell* specificValue;
- result.m_offset = structure->get(globalData, ident, attributes, specificValue);
+ result.m_offset = structure->get(vm, ident, attributes, specificValue);
if (!isValidOffset(result.m_offset))
return GetByIdStatus(TakesSlowPath); // It's probably a prototype lookup. Give up on life for now, even though we could totally be way smarter about it.
if (attributes & Accessor)
diff --git a/Source/JavaScriptCore/bytecode/GetByIdStatus.h b/Source/JavaScriptCore/bytecode/GetByIdStatus.h
index 45d8c0b1f..117766646 100644
--- a/Source/JavaScriptCore/bytecode/GetByIdStatus.h
+++ b/Source/JavaScriptCore/bytecode/GetByIdStatus.h
@@ -72,7 +72,7 @@ public:
}
static GetByIdStatus computeFor(CodeBlock*, unsigned bytecodeIndex, Identifier&);
- static GetByIdStatus computeFor(JSGlobalData&, Structure*, Identifier&);
+ static GetByIdStatus computeFor(VM&, Structure*, Identifier&);
State state() const { return m_state; }
diff --git a/Source/JavaScriptCore/bytecode/Instruction.h b/Source/JavaScriptCore/bytecode/Instruction.h
index 50b80e03c..988b1ddf1 100644
--- a/Source/JavaScriptCore/bytecode/Instruction.h
+++ b/Source/JavaScriptCore/bytecode/Instruction.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -29,206 +29,108 @@
#ifndef Instruction_h
#define Instruction_h
-#include "JITStubRoutine.h"
#include "MacroAssembler.h"
#include "Opcode.h"
#include "PropertySlot.h"
+#include "ResolveOperation.h"
#include "SpecialPointer.h"
#include "Structure.h"
#include "StructureChain.h"
#include <wtf/VectorTraits.h>
-#define POLYMORPHIC_LIST_CACHE_SIZE 8
-
namespace JSC {
- // *Sigh*, If the JIT is enabled we need to track the stubRountine (of type CodeLocationLabel),
- // If the JIT is not in use we don't actually need the variable (that said, if the JIT is not in use we don't
- // curently actually use PolymorphicAccessStructureLists, which we should). Anyway, this seems like the best
- // solution for now - will need to something smarter if/when we actually want mixed-mode operation.
-
- class ArrayAllocationProfile;
- class ArrayProfile;
- class JSCell;
- class Structure;
- class StructureChain;
- struct LLIntCallLinkInfo;
- struct ValueProfile;
-
-#if ENABLE(JIT)
- // Structure used by op_get_by_id_self_list and op_get_by_id_proto_list instruction to hold data off the main opcode stream.
- struct PolymorphicAccessStructureList {
- WTF_MAKE_FAST_ALLOCATED;
- public:
- struct PolymorphicStubInfo {
- bool isChain;
- bool isDirect;
- RefPtr<JITStubRoutine> stubRoutine;
- WriteBarrier<Structure> base;
- union {
- WriteBarrierBase<Structure> proto;
- WriteBarrierBase<StructureChain> chain;
- } u;
-
- PolymorphicStubInfo()
- {
- u.proto.clear();
- }
-
- void set(JSGlobalData& globalData, JSCell* owner, PassRefPtr<JITStubRoutine> _stubRoutine, Structure* _base, bool isDirect)
- {
- stubRoutine = _stubRoutine;
- base.set(globalData, owner, _base);
- u.proto.clear();
- isChain = false;
- this->isDirect = isDirect;
- }
-
- void set(JSGlobalData& globalData, JSCell* owner, PassRefPtr<JITStubRoutine> _stubRoutine, Structure* _base, Structure* _proto, bool isDirect)
- {
- stubRoutine = _stubRoutine;
- base.set(globalData, owner, _base);
- u.proto.set(globalData, owner, _proto);
- isChain = false;
- this->isDirect = isDirect;
- }
-
- void set(JSGlobalData& globalData, JSCell* owner, PassRefPtr<JITStubRoutine> _stubRoutine, Structure* _base, StructureChain* _chain, bool isDirect)
- {
- stubRoutine = _stubRoutine;
- base.set(globalData, owner, _base);
- u.chain.set(globalData, owner, _chain);
- isChain = true;
- this->isDirect = isDirect;
- }
- } list[POLYMORPHIC_LIST_CACHE_SIZE];
+class ArrayAllocationProfile;
+class ArrayProfile;
+class ObjectAllocationProfile;
+struct LLIntCallLinkInfo;
+struct ValueProfile;
+
+struct Instruction {
+ Instruction()
+ {
+ u.jsCell.clear();
+ }
- PolymorphicAccessStructureList()
- {
- }
-
- PolymorphicAccessStructureList(JSGlobalData& globalData, JSCell* owner, PassRefPtr<JITStubRoutine> stubRoutine, Structure* firstBase, bool isDirect)
- {
- list[0].set(globalData, owner, stubRoutine, firstBase, isDirect);
- }
-
- PolymorphicAccessStructureList(JSGlobalData& globalData, JSCell* owner, PassRefPtr<JITStubRoutine> stubRoutine, Structure* firstBase, Structure* firstProto, bool isDirect)
- {
- list[0].set(globalData, owner, stubRoutine, firstBase, firstProto, isDirect);
- }
-
- PolymorphicAccessStructureList(JSGlobalData& globalData, JSCell* owner, PassRefPtr<JITStubRoutine> stubRoutine, Structure* firstBase, StructureChain* firstChain, bool isDirect)
- {
- list[0].set(globalData, owner, stubRoutine, firstBase, firstChain, isDirect);
- }
-
- bool visitWeak(int count)
- {
- for (int i = 0; i < count; ++i) {
- PolymorphicStubInfo& info = list[i];
- if (!info.base) {
- // We're being marked during initialisation of an entry
- ASSERT(!info.u.proto);
- continue;
- }
-
- if (!Heap::isMarked(info.base.get()))
- return false;
- if (info.u.proto && !info.isChain
- && !Heap::isMarked(info.u.proto.get()))
- return false;
- if (info.u.chain && info.isChain
- && !Heap::isMarked(info.u.chain.get()))
- return false;
- }
-
- return true;
- }
- };
-
-#endif
-
- struct Instruction {
- Instruction()
- {
- u.jsCell.clear();
- }
-
- Instruction(Opcode opcode)
- {
+ Instruction(Opcode opcode)
+ {
#if !ENABLE(COMPUTED_GOTO_OPCODES)
- // We have to initialize one of the pointer members to ensure that
- // the entire struct is initialized, when opcode is not a pointer.
- u.jsCell.clear();
+ // We have to initialize one of the pointer members to ensure that
+ // the entire struct is initialized, when opcode is not a pointer.
+ u.jsCell.clear();
#endif
- u.opcode = opcode;
- }
-
- Instruction(int operand)
- {
- // We have to initialize one of the pointer members to ensure that
- // the entire struct is initialized in 64-bit.
- u.jsCell.clear();
- u.operand = operand;
- }
-
- Instruction(JSGlobalData& globalData, JSCell* owner, Structure* structure)
- {
- u.structure.clear();
- u.structure.set(globalData, owner, structure);
- }
- Instruction(JSGlobalData& globalData, JSCell* owner, StructureChain* structureChain)
- {
- u.structureChain.clear();
- u.structureChain.set(globalData, owner, structureChain);
- }
- Instruction(JSGlobalData& globalData, JSCell* owner, JSCell* jsCell)
- {
- u.jsCell.clear();
- u.jsCell.set(globalData, owner, jsCell);
- }
-
- Instruction(PropertySlot::GetValueFunc getterFunc) { u.getterFunc = getterFunc; }
+ u.opcode = opcode;
+ }
+
+ Instruction(int operand)
+ {
+ // We have to initialize one of the pointer members to ensure that
+ // the entire struct is initialized in 64-bit.
+ u.jsCell.clear();
+ u.operand = operand;
+ }
+
+ Instruction(VM& vm, JSCell* owner, Structure* structure)
+ {
+ u.structure.clear();
+ u.structure.set(vm, owner, structure);
+ }
+ Instruction(VM& vm, JSCell* owner, StructureChain* structureChain)
+ {
+ u.structureChain.clear();
+ u.structureChain.set(vm, owner, structureChain);
+ }
+ Instruction(VM& vm, JSCell* owner, JSCell* jsCell)
+ {
+ u.jsCell.clear();
+ u.jsCell.set(vm, owner, jsCell);
+ }
+
+ Instruction(PropertySlot::GetValueFunc getterFunc) { u.getterFunc = getterFunc; }
- Instruction(LLIntCallLinkInfo* callLinkInfo) { u.callLinkInfo = callLinkInfo; }
+ Instruction(LLIntCallLinkInfo* callLinkInfo) { u.callLinkInfo = callLinkInfo; }
- Instruction(ValueProfile* profile) { u.profile = profile; }
- Instruction(ArrayProfile* profile) { u.arrayProfile = profile; }
- Instruction(ArrayAllocationProfile* profile) { u.arrayAllocationProfile = profile; }
+ Instruction(ValueProfile* profile) { u.profile = profile; }
+ Instruction(ArrayProfile* profile) { u.arrayProfile = profile; }
+ Instruction(ArrayAllocationProfile* profile) { u.arrayAllocationProfile = profile; }
+ Instruction(ObjectAllocationProfile* profile) { u.objectAllocationProfile = profile; }
- Instruction(WriteBarrier<Unknown>* registerPointer) { u.registerPointer = registerPointer; }
+ Instruction(WriteBarrier<Unknown>* registerPointer) { u.registerPointer = registerPointer; }
- Instruction(Special::Pointer pointer) { u.specialPointer = pointer; }
+ Instruction(Special::Pointer pointer) { u.specialPointer = pointer; }
- Instruction(bool* predicatePointer) { u.predicatePointer = predicatePointer; }
-
- union {
- Opcode opcode;
- int operand;
- WriteBarrierBase<Structure> structure;
- WriteBarrierBase<StructureChain> structureChain;
- WriteBarrierBase<JSCell> jsCell;
- WriteBarrier<Unknown>* registerPointer;
- Special::Pointer specialPointer;
- PropertySlot::GetValueFunc getterFunc;
- LLIntCallLinkInfo* callLinkInfo;
- ValueProfile* profile;
- ArrayProfile* arrayProfile;
- ArrayAllocationProfile* arrayAllocationProfile;
- void* pointer;
- bool* predicatePointer;
- } u;
+ Instruction(bool* predicatePointer) { u.predicatePointer = predicatePointer; }
+
+ union {
+ Opcode opcode;
+ int operand;
+ WriteBarrierBase<Structure> structure;
+ WriteBarrierBase<StructureChain> structureChain;
+ WriteBarrierBase<JSCell> jsCell;
+ WriteBarrier<Unknown>* registerPointer;
+ Special::Pointer specialPointer;
+ PropertySlot::GetValueFunc getterFunc;
+ LLIntCallLinkInfo* callLinkInfo;
+ ValueProfile* profile;
+ ArrayProfile* arrayProfile;
+ ArrayAllocationProfile* arrayAllocationProfile;
+ ObjectAllocationProfile* objectAllocationProfile;
+ void* pointer;
+ bool* predicatePointer;
+ ResolveOperations* resolveOperations;
+ PutToBaseOperation* putToBaseOperation;
+ } u;
- private:
- Instruction(StructureChain*);
- Instruction(Structure*);
- };
+private:
+ Instruction(StructureChain*);
+ Instruction(Structure*);
+};
} // namespace JSC
namespace WTF {
- template<> struct VectorTraits<JSC::Instruction> : VectorTraitsBase<true, JSC::Instruction> { };
+template<> struct VectorTraits<JSC::Instruction> : VectorTraitsBase<true, JSC::Instruction> { };
} // namespace WTF
diff --git a/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.cpp b/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.cpp
index f923e4a28..97b8f3bcd 100644
--- a/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.cpp
+++ b/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.cpp
@@ -28,6 +28,8 @@
#if ENABLE(VALUE_PROFILER)
+#include "Operations.h"
+
namespace JSC {
CompressedLazyOperandValueProfileHolder::CompressedLazyOperandValueProfileHolder() { }
diff --git a/Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.cpp b/Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.cpp
index 857ed9c87..a0f301a0c 100644
--- a/Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.cpp
+++ b/Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.cpp
@@ -58,7 +58,7 @@ EncodedJSValue* MethodOfGettingAValueProfile::getSpecFailBucket(unsigned index)
u.lazyOperand.bytecodeOffset, u.lazyOperand.operand))->specFailBucket(index);
default:
- ASSERT_NOT_REACHED();
+ RELEASE_ASSERT_NOT_REACHED();
return 0;
}
}
diff --git a/Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.h b/Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.h
index 0f5c2be7b..c6fe6c5f0 100644
--- a/Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.h
+++ b/Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.h
@@ -34,7 +34,7 @@
// these #if's will disappear...
#if ENABLE(DFG_JIT)
-#include "JSValue.h"
+#include "JSCJSValue.h"
namespace JSC {
diff --git a/Source/JavaScriptCore/bytecode/ObjectAllocationProfile.h b/Source/JavaScriptCore/bytecode/ObjectAllocationProfile.h
new file mode 100644
index 000000000..9a9db0bc7
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ObjectAllocationProfile.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ObjectAllocationProfile_h
+#define ObjectAllocationProfile_h
+
+#include "VM.h"
+#include "JSGlobalObject.h"
+#include "ObjectPrototype.h"
+#include "SlotVisitor.h"
+#include "WriteBarrier.h"
+
+namespace JSC {
+
+class ObjectAllocationProfile {
+ friend class LLIntOffsetsExtractor;
+public:
+ static ptrdiff_t offsetOfAllocator() { return OBJECT_OFFSETOF(ObjectAllocationProfile, m_allocator); }
+ static ptrdiff_t offsetOfStructure() { return OBJECT_OFFSETOF(ObjectAllocationProfile, m_structure); }
+
+ ObjectAllocationProfile()
+ : m_allocator(0)
+ {
+ }
+
+ bool isNull() { return !m_allocator; }
+
+ void initialize(VM& vm, JSCell* owner, JSObject* prototype, unsigned inferredInlineCapacity)
+ {
+ ASSERT(!m_allocator);
+ ASSERT(!m_structure);
+
+ unsigned inlineCapacity = 0;
+ if (inferredInlineCapacity < JSFinalObject::defaultInlineCapacity()) {
+ // Try to shrink the object based on static analysis.
+ inferredInlineCapacity += possibleDefaultPropertyCount(vm, prototype);
+
+ if (!inferredInlineCapacity) {
+ // Empty objects are rare, so most likely the static analyzer just didn't
+ // see the real initializer function. This can happen with helper functions.
+ inferredInlineCapacity = JSFinalObject::defaultInlineCapacity();
+ } else if (inferredInlineCapacity > JSFinalObject::defaultInlineCapacity()) {
+ // Default properties are weak guesses, so don't allow them to turn a small
+ // object into a large object.
+ inferredInlineCapacity = JSFinalObject::defaultInlineCapacity();
+ }
+
+ inlineCapacity = inferredInlineCapacity;
+ ASSERT(inlineCapacity < JSFinalObject::maxInlineCapacity());
+ } else {
+ // Normal or large object.
+ inlineCapacity = inferredInlineCapacity;
+ if (inlineCapacity > JSFinalObject::maxInlineCapacity())
+ inlineCapacity = JSFinalObject::maxInlineCapacity();
+ }
+
+ ASSERT(inlineCapacity > 0);
+ ASSERT(inlineCapacity <= JSFinalObject::maxInlineCapacity());
+
+ size_t allocationSize = JSFinalObject::allocationSize(inlineCapacity);
+ MarkedAllocator* allocator = &vm.heap.allocatorForObjectWithoutDestructor(allocationSize);
+ ASSERT(allocator->cellSize());
+
+ // Take advantage of extra inline capacity available in the size class.
+ size_t slop = (allocator->cellSize() - allocationSize) / sizeof(WriteBarrier<Unknown>);
+ inlineCapacity += slop;
+ if (inlineCapacity > JSFinalObject::maxInlineCapacity())
+ inlineCapacity = JSFinalObject::maxInlineCapacity();
+
+ m_allocator = allocator;
+ m_structure.set(vm, owner,
+ vm.prototypeMap.emptyObjectStructureForPrototype(prototype, inlineCapacity));
+ }
+
+ Structure* structure() { return m_structure.get(); }
+ unsigned inlineCapacity() { return m_structure->inlineCapacity(); }
+
+ void clear()
+ {
+ m_allocator = 0;
+ m_structure.clear();
+ ASSERT(isNull());
+ }
+
+ void visitAggregate(SlotVisitor& visitor)
+ {
+ visitor.append(&m_structure);
+ }
+
+private:
+
+ unsigned possibleDefaultPropertyCount(VM& vm, JSObject* prototype)
+ {
+ if (prototype == prototype->globalObject()->objectPrototype())
+ return 0;
+
+ size_t count = 0;
+ PropertyNameArray propertyNameArray(&vm);
+ prototype->structure()->getPropertyNamesFromStructure(vm, propertyNameArray, ExcludeDontEnumProperties);
+ PropertyNameArrayData::PropertyNameVector& propertyNameVector = propertyNameArray.data()->propertyNameVector();
+ for (size_t i = 0; i < propertyNameVector.size(); ++i) {
+ JSValue value = prototype->getDirect(vm, propertyNameVector[i]);
+
+ // Functions are common, and are usually class-level objects that are not overridden.
+ if (jsDynamicCast<JSFunction*>(value))
+ continue;
+
+ ++count;
+
+ }
+ return count;
+ }
+
+ MarkedAllocator* m_allocator; // Precomputed to make things easier for generated code.
+ WriteBarrier<Structure> m_structure;
+};
+
+} // namespace JSC
+
+#endif // ObjectAllocationProfile_h
diff --git a/Source/JavaScriptCore/bytecode/Opcode.h b/Source/JavaScriptCore/bytecode/Opcode.h
index 5fe28bc09..72ac51b4f 100644
--- a/Source/JavaScriptCore/bytecode/Opcode.h
+++ b/Source/JavaScriptCore/bytecode/Opcode.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2009, 2013 Apple Inc. All rights reserved.
* Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
*
* Redistribution and use in source and binary forms, with or without
@@ -39,260 +39,253 @@
namespace JSC {
- #define FOR_EACH_CORE_OPCODE_ID_WITH_EXTENSION(macro, extension__) \
- macro(op_enter, 1) \
- macro(op_create_activation, 2) \
- macro(op_init_lazy_reg, 2) \
- macro(op_create_arguments, 2) \
- macro(op_create_this, 3) \
- macro(op_get_callee, 3) \
- macro(op_convert_this, 3) \
- \
- macro(op_new_object, 2) \
- macro(op_new_array, 5) \
- macro(op_new_array_with_size, 4) \
- macro(op_new_array_buffer, 5) \
- macro(op_new_regexp, 3) \
- macro(op_mov, 3) \
- \
- macro(op_not, 3) \
- macro(op_eq, 4) \
- macro(op_eq_null, 3) \
- macro(op_neq, 4) \
- macro(op_neq_null, 3) \
- macro(op_stricteq, 4) \
- macro(op_nstricteq, 4) \
- macro(op_less, 4) \
- macro(op_lesseq, 4) \
- macro(op_greater, 4) \
- macro(op_greatereq, 4) \
- \
- macro(op_pre_inc, 2) \
- macro(op_pre_dec, 2) \
- macro(op_post_inc, 3) \
- macro(op_post_dec, 3) \
- macro(op_to_jsnumber, 3) \
- macro(op_negate, 3) \
- macro(op_add, 5) \
- macro(op_mul, 5) \
- macro(op_div, 5) \
- macro(op_mod, 4) \
- macro(op_sub, 5) \
- \
- macro(op_lshift, 4) \
- macro(op_rshift, 4) \
- macro(op_urshift, 4) \
- macro(op_bitand, 5) \
- macro(op_bitxor, 5) \
- macro(op_bitor, 5) \
- \
- macro(op_check_has_instance, 5) \
- macro(op_instanceof, 4) \
- macro(op_typeof, 3) \
- macro(op_is_undefined, 3) \
- macro(op_is_boolean, 3) \
- macro(op_is_number, 3) \
- macro(op_is_string, 3) \
- macro(op_is_object, 3) \
- macro(op_is_function, 3) \
- macro(op_in, 4) \
- \
- macro(op_resolve, 5) /* has value profiling */ \
- macro(op_resolve_global_property, 5) /* has value profiling */ \
- macro(op_resolve_global_var, 5) /* has value profiling */ \
- macro(op_resolve_scoped_var, 5) /* has value profiling */ \
- macro(op_resolve_scoped_var_on_top_scope, 5) /* has value profiling */ \
- macro(op_resolve_scoped_var_with_top_scope_check, 5) /* has value profiling */ \
- \
- macro(op_resolve_base_to_global, 7) /* has value profiling */ \
- macro(op_resolve_base_to_global_dynamic, 7) /* has value profiling */ \
- macro(op_resolve_base_to_scope, 7) /* has value profiling */ \
- macro(op_resolve_base_to_scope_with_top_scope_check, 7) /* has value profiling */ \
- macro(op_resolve_base, 7) /* has value profiling */ \
- \
- macro(op_ensure_property_exists, 3) \
- \
- macro(op_resolve_with_base, 7) /* has value profiling */ \
- \
- macro(op_resolve_with_this, 6) /* has value profiling */ \
- \
- macro(op_put_to_base, 5) \
- macro(op_put_to_base_variable, 5) \
- \
- macro(op_init_global_const_nop, 5) \
- macro(op_init_global_const, 5) \
- macro(op_init_global_const_check, 5) \
- macro(op_get_by_id, 9) /* has value profiling */ \
- macro(op_get_by_id_out_of_line, 9) /* has value profiling */ \
- macro(op_get_by_id_self, 9) /* has value profiling */ \
- macro(op_get_by_id_proto, 9) /* has value profiling */ \
- macro(op_get_by_id_chain, 9) /* has value profiling */ \
- macro(op_get_by_id_getter_self, 9) /* has value profiling */ \
- macro(op_get_by_id_getter_proto, 9) /* has value profiling */ \
- macro(op_get_by_id_getter_chain, 9) /* has value profiling */ \
- macro(op_get_by_id_custom_self, 9) /* has value profiling */ \
- macro(op_get_by_id_custom_proto, 9) /* has value profiling */ \
- macro(op_get_by_id_custom_chain, 9) /* has value profiling */ \
- macro(op_get_by_id_generic, 9) /* has value profiling */ \
- macro(op_get_array_length, 9) /* has value profiling */ \
- macro(op_get_string_length, 9) /* has value profiling */ \
- macro(op_get_arguments_length, 4) \
- macro(op_put_by_id, 9) \
- macro(op_put_by_id_out_of_line, 9) \
- macro(op_put_by_id_transition, 9) \
- macro(op_put_by_id_transition_direct, 9) \
- macro(op_put_by_id_transition_direct_out_of_line, 9) \
- macro(op_put_by_id_transition_normal, 9) \
- macro(op_put_by_id_transition_normal_out_of_line, 9) \
- macro(op_put_by_id_replace, 9) \
- macro(op_put_by_id_generic, 9) \
- macro(op_del_by_id, 4) \
- macro(op_get_by_val, 6) /* has value profiling */ \
- macro(op_get_argument_by_val, 6) /* must be the same size as op_get_by_val */ \
- macro(op_get_by_pname, 7) \
- macro(op_put_by_val, 5) \
- macro(op_del_by_val, 4) \
- macro(op_put_by_index, 4) \
- macro(op_put_getter_setter, 5) \
- \
- macro(op_jmp, 2) \
- macro(op_jtrue, 3) \
- macro(op_jfalse, 3) \
- macro(op_jeq_null, 3) \
- macro(op_jneq_null, 3) \
- macro(op_jneq_ptr, 4) \
- macro(op_jless, 4) \
- macro(op_jlesseq, 4) \
- macro(op_jgreater, 4) \
- macro(op_jgreatereq, 4) \
- macro(op_jnless, 4) \
- macro(op_jnlesseq, 4) \
- macro(op_jngreater, 4) \
- macro(op_jngreatereq, 4) \
- macro(op_jmp_scopes, 3) \
- macro(op_loop, 2) \
- macro(op_loop_if_true, 3) \
- macro(op_loop_if_false, 3) \
- macro(op_loop_if_less, 4) \
- macro(op_loop_if_lesseq, 4) \
- macro(op_loop_if_greater, 4) \
- macro(op_loop_if_greatereq, 4) \
- macro(op_loop_hint, 1) \
- macro(op_switch_imm, 4) \
- macro(op_switch_char, 4) \
- macro(op_switch_string, 4) \
- \
- macro(op_new_func, 4) \
- macro(op_new_func_exp, 3) \
- macro(op_call, 6) \
- macro(op_call_eval, 6) \
- macro(op_call_varargs, 5) \
- macro(op_tear_off_activation, 2) \
- macro(op_tear_off_arguments, 3) \
- macro(op_ret, 2) \
- macro(op_call_put_result, 3) /* has value profiling */ \
- macro(op_ret_object_or_this, 3) \
- \
- macro(op_construct, 6) \
- macro(op_strcat, 4) \
- macro(op_to_primitive, 3) \
- \
- macro(op_get_pnames, 6) \
- macro(op_next_pname, 7) \
- \
- macro(op_push_with_scope, 2) \
- macro(op_pop_scope, 1) \
- macro(op_push_name_scope, 4) \
- \
- macro(op_catch, 2) \
- macro(op_throw, 2) \
- macro(op_throw_static_error, 3) \
- \
- macro(op_debug, 5) \
- macro(op_profile_will_call, 2) \
- macro(op_profile_did_call, 2) \
- \
- extension__ \
- \
- macro(op_end, 2) // end must be the last opcode in the list
+#define FOR_EACH_CORE_OPCODE_ID_WITH_EXTENSION(macro, extension__) \
+ macro(op_enter, 1) \
+ macro(op_create_activation, 2) \
+ macro(op_init_lazy_reg, 2) \
+ macro(op_create_arguments, 2) \
+ macro(op_create_this, 4) \
+ macro(op_get_callee, 3) \
+ macro(op_convert_this, 3) \
+ \
+ macro(op_new_object, 4) \
+ macro(op_new_array, 5) \
+ macro(op_new_array_with_size, 4) \
+ macro(op_new_array_buffer, 5) \
+ macro(op_new_regexp, 3) \
+ macro(op_mov, 3) \
+ \
+ macro(op_not, 3) \
+ macro(op_eq, 4) \
+ macro(op_eq_null, 3) \
+ macro(op_neq, 4) \
+ macro(op_neq_null, 3) \
+ macro(op_stricteq, 4) \
+ macro(op_nstricteq, 4) \
+ macro(op_less, 4) \
+ macro(op_lesseq, 4) \
+ macro(op_greater, 4) \
+ macro(op_greatereq, 4) \
+ \
+ macro(op_inc, 2) \
+ macro(op_dec, 2) \
+ macro(op_to_number, 3) \
+ macro(op_negate, 3) \
+ macro(op_add, 5) \
+ macro(op_mul, 5) \
+ macro(op_div, 5) \
+ macro(op_mod, 4) \
+ macro(op_sub, 5) \
+ \
+ macro(op_lshift, 4) \
+ macro(op_rshift, 4) \
+ macro(op_urshift, 4) \
+ macro(op_bitand, 5) \
+ macro(op_bitxor, 5) \
+ macro(op_bitor, 5) \
+ \
+ macro(op_check_has_instance, 5) \
+ macro(op_instanceof, 4) \
+ macro(op_typeof, 3) \
+ macro(op_is_undefined, 3) \
+ macro(op_is_boolean, 3) \
+ macro(op_is_number, 3) \
+ macro(op_is_string, 3) \
+ macro(op_is_object, 3) \
+ macro(op_is_function, 3) \
+ macro(op_in, 4) \
+ \
+ macro(op_get_scoped_var, 5) /* has value profiling */ \
+ macro(op_put_scoped_var, 4) \
+ \
+ macro(op_resolve, 5) /* has value profiling */ \
+ macro(op_resolve_global_property, 5) /* has value profiling */ \
+ macro(op_resolve_global_var, 5) /* has value profiling */ \
+ macro(op_resolve_scoped_var, 5) /* has value profiling */ \
+ macro(op_resolve_scoped_var_on_top_scope, 5) /* has value profiling */ \
+ macro(op_resolve_scoped_var_with_top_scope_check, 5) /* has value profiling */ \
+ \
+ macro(op_resolve_base_to_global, 7) /* has value profiling */ \
+ macro(op_resolve_base_to_global_dynamic, 7) /* has value profiling */ \
+ macro(op_resolve_base_to_scope, 7) /* has value profiling */ \
+ macro(op_resolve_base_to_scope_with_top_scope_check, 7) /* has value profiling */ \
+ macro(op_resolve_base, 7) /* has value profiling */ \
+ \
+ macro(op_resolve_with_base, 7) /* has value profiling */ \
+ \
+ macro(op_resolve_with_this, 6) /* has value profiling */ \
+ \
+ macro(op_put_to_base, 5) \
+ macro(op_put_to_base_variable, 5) \
+ \
+ macro(op_init_global_const_nop, 5) \
+ macro(op_init_global_const, 5) \
+ macro(op_init_global_const_check, 5) \
+ macro(op_get_by_id, 9) /* has value profiling */ \
+ macro(op_get_by_id_out_of_line, 9) /* has value profiling */ \
+ macro(op_get_by_id_self, 9) /* has value profiling */ \
+ macro(op_get_by_id_proto, 9) /* has value profiling */ \
+ macro(op_get_by_id_chain, 9) /* has value profiling */ \
+ macro(op_get_by_id_getter_self, 9) /* has value profiling */ \
+ macro(op_get_by_id_getter_proto, 9) /* has value profiling */ \
+ macro(op_get_by_id_getter_chain, 9) /* has value profiling */ \
+ macro(op_get_by_id_custom_self, 9) /* has value profiling */ \
+ macro(op_get_by_id_custom_proto, 9) /* has value profiling */ \
+ macro(op_get_by_id_custom_chain, 9) /* has value profiling */ \
+ macro(op_get_by_id_generic, 9) /* has value profiling */ \
+ macro(op_get_array_length, 9) /* has value profiling */ \
+ macro(op_get_string_length, 9) /* has value profiling */ \
+ macro(op_get_arguments_length, 4) \
+ macro(op_put_by_id, 9) \
+ macro(op_put_by_id_out_of_line, 9) \
+ macro(op_put_by_id_transition, 9) \
+ macro(op_put_by_id_transition_direct, 9) \
+ macro(op_put_by_id_transition_direct_out_of_line, 9) \
+ macro(op_put_by_id_transition_normal, 9) \
+ macro(op_put_by_id_transition_normal_out_of_line, 9) \
+ macro(op_put_by_id_replace, 9) \
+ macro(op_put_by_id_generic, 9) \
+ macro(op_del_by_id, 4) \
+ macro(op_get_by_val, 6) /* has value profiling */ \
+ macro(op_get_argument_by_val, 6) /* must be the same size as op_get_by_val */ \
+ macro(op_get_by_pname, 7) \
+ macro(op_put_by_val, 5) \
+ macro(op_del_by_val, 4) \
+ macro(op_put_by_index, 4) \
+ macro(op_put_getter_setter, 5) \
+ \
+ macro(op_jmp, 2) \
+ macro(op_jtrue, 3) \
+ macro(op_jfalse, 3) \
+ macro(op_jeq_null, 3) \
+ macro(op_jneq_null, 3) \
+ macro(op_jneq_ptr, 4) \
+ macro(op_jless, 4) \
+ macro(op_jlesseq, 4) \
+ macro(op_jgreater, 4) \
+ macro(op_jgreatereq, 4) \
+ macro(op_jnless, 4) \
+ macro(op_jnlesseq, 4) \
+ macro(op_jngreater, 4) \
+ macro(op_jngreatereq, 4) \
+ \
+ macro(op_loop_hint, 1) \
+ \
+ macro(op_switch_imm, 4) \
+ macro(op_switch_char, 4) \
+ macro(op_switch_string, 4) \
+ \
+ macro(op_new_func, 4) \
+ macro(op_new_func_exp, 3) \
+ macro(op_call, 6) \
+ macro(op_call_eval, 6) \
+ macro(op_call_varargs, 5) \
+ macro(op_tear_off_activation, 2) \
+ macro(op_tear_off_arguments, 3) \
+ macro(op_ret, 2) \
+ macro(op_call_put_result, 3) /* has value profiling */ \
+ macro(op_ret_object_or_this, 3) \
+ \
+ macro(op_construct, 6) \
+ macro(op_strcat, 4) \
+ macro(op_to_primitive, 3) \
+ \
+ macro(op_get_pnames, 6) \
+ macro(op_next_pname, 7) \
+ \
+ macro(op_push_with_scope, 2) \
+ macro(op_pop_scope, 1) \
+ macro(op_push_name_scope, 4) \
+ \
+ macro(op_catch, 2) \
+ macro(op_throw, 2) \
+ macro(op_throw_static_error, 3) \
+ \
+ macro(op_debug, 5) \
+ macro(op_profile_will_call, 2) \
+ macro(op_profile_did_call, 2) \
+ \
+ extension__ \
+ \
+ macro(op_end, 2) // end must be the last opcode in the list
- #define FOR_EACH_CORE_OPCODE_ID(macro) \
- FOR_EACH_CORE_OPCODE_ID_WITH_EXTENSION(macro, /* No extension */ )
+#define FOR_EACH_CORE_OPCODE_ID(macro) \
+ FOR_EACH_CORE_OPCODE_ID_WITH_EXTENSION(macro, /* No extension */ )
- #define FOR_EACH_OPCODE_ID(macro) \
- FOR_EACH_CORE_OPCODE_ID_WITH_EXTENSION( \
- macro, \
- FOR_EACH_LLINT_OPCODE_EXTENSION(macro) \
- )
+#define FOR_EACH_OPCODE_ID(macro) \
+ FOR_EACH_CORE_OPCODE_ID_WITH_EXTENSION( \
+ macro, \
+ FOR_EACH_LLINT_OPCODE_EXTENSION(macro) \
+ )
- #define OPCODE_ID_ENUM(opcode, length) opcode,
- typedef enum { FOR_EACH_OPCODE_ID(OPCODE_ID_ENUM) } OpcodeID;
- #undef OPCODE_ID_ENUM
+#define OPCODE_ID_ENUM(opcode, length) opcode,
+ typedef enum { FOR_EACH_OPCODE_ID(OPCODE_ID_ENUM) } OpcodeID;
+#undef OPCODE_ID_ENUM
- const int maxOpcodeLength = 9;
- const int numOpcodeIDs = op_end + 1;
+const int maxOpcodeLength = 9;
+const int numOpcodeIDs = op_end + 1;
- #define OPCODE_ID_LENGTHS(id, length) const int id##_length = length;
- FOR_EACH_OPCODE_ID(OPCODE_ID_LENGTHS);
- #undef OPCODE_ID_LENGTHS
-
- #define OPCODE_LENGTH(opcode) opcode##_length
+#define OPCODE_ID_LENGTHS(id, length) const int id##_length = length;
+ FOR_EACH_OPCODE_ID(OPCODE_ID_LENGTHS);
+#undef OPCODE_ID_LENGTHS
+
+#define OPCODE_LENGTH(opcode) opcode##_length
- #define OPCODE_ID_LENGTH_MAP(opcode, length) length,
- const int opcodeLengths[numOpcodeIDs] = { FOR_EACH_OPCODE_ID(OPCODE_ID_LENGTH_MAP) };
- #undef OPCODE_ID_LENGTH_MAP
+#define OPCODE_ID_LENGTH_MAP(opcode, length) length,
+ const int opcodeLengths[numOpcodeIDs] = { FOR_EACH_OPCODE_ID(OPCODE_ID_LENGTH_MAP) };
+#undef OPCODE_ID_LENGTH_MAP
- #define VERIFY_OPCODE_ID(id, size) COMPILE_ASSERT(id <= op_end, ASSERT_THAT_JS_OPCODE_IDS_ARE_VALID);
- FOR_EACH_OPCODE_ID(VERIFY_OPCODE_ID);
- #undef VERIFY_OPCODE_ID
+#define VERIFY_OPCODE_ID(id, size) COMPILE_ASSERT(id <= op_end, ASSERT_THAT_JS_OPCODE_IDS_ARE_VALID);
+ FOR_EACH_OPCODE_ID(VERIFY_OPCODE_ID);
+#undef VERIFY_OPCODE_ID
#if ENABLE(COMPUTED_GOTO_OPCODES)
- typedef void* Opcode;
+typedef void* Opcode;
#else
- typedef OpcodeID Opcode;
+typedef OpcodeID Opcode;
#endif
#define PADDING_STRING " "
#define PADDING_STRING_LENGTH static_cast<unsigned>(strlen(PADDING_STRING))
- extern const char* const opcodeNames[];
+extern const char* const opcodeNames[];
- inline const char* padOpcodeName(OpcodeID op, unsigned width)
- {
- unsigned pad = width - strlen(opcodeNames[op]);
- pad = std::min(pad, PADDING_STRING_LENGTH);
- return PADDING_STRING + PADDING_STRING_LENGTH - pad;
- }
+inline const char* padOpcodeName(OpcodeID op, unsigned width)
+{
+ unsigned pad = width - strlen(opcodeNames[op]);
+ pad = std::min(pad, PADDING_STRING_LENGTH);
+ return PADDING_STRING + PADDING_STRING_LENGTH - pad;
+}
#undef PADDING_STRING_LENGTH
#undef PADDING_STRING
#if ENABLE(OPCODE_STATS)
- struct OpcodeStats {
- OpcodeStats();
- ~OpcodeStats();
- static long long opcodeCounts[numOpcodeIDs];
- static long long opcodePairCounts[numOpcodeIDs][numOpcodeIDs];
- static int lastOpcode;
-
- static void recordInstruction(int opcode);
- static void resetLastInstruction();
- };
+struct OpcodeStats {
+ OpcodeStats();
+ ~OpcodeStats();
+ static long long opcodeCounts[numOpcodeIDs];
+ static long long opcodePairCounts[numOpcodeIDs][numOpcodeIDs];
+ static int lastOpcode;
+
+ static void recordInstruction(int opcode);
+ static void resetLastInstruction();
+};
#endif
- inline size_t opcodeLength(OpcodeID opcode)
- {
- switch (opcode) {
+inline size_t opcodeLength(OpcodeID opcode)
+{
+ switch (opcode) {
#define OPCODE_ID_LENGTHS(id, length) case id: return OPCODE_LENGTH(id);
- FOR_EACH_OPCODE_ID(OPCODE_ID_LENGTHS)
+ FOR_EACH_OPCODE_ID(OPCODE_ID_LENGTHS)
#undef OPCODE_ID_LENGTHS
- }
- ASSERT_NOT_REACHED();
- return 0;
}
+ RELEASE_ASSERT_NOT_REACHED();
+ return 0;
+}
} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/Operands.h b/Source/JavaScriptCore/bytecode/Operands.h
index 20f79ffd1..e7b3e241f 100644
--- a/Source/JavaScriptCore/bytecode/Operands.h
+++ b/Source/JavaScriptCore/bytecode/Operands.h
@@ -46,6 +46,8 @@ struct OperandValueTraits {
static void dump(const T& value, PrintStream& out) { value.dump(out); }
};
+enum OperandKind { ArgumentOperand, LocalOperand };
+
template<typename T, typename Traits = OperandValueTraits<T> >
class Operands {
public:
@@ -66,6 +68,28 @@ public:
T& local(size_t idx) { return m_locals[idx]; }
const T& local(size_t idx) const { return m_locals[idx]; }
+ template<OperandKind operandKind>
+ size_t sizeFor() const
+ {
+ if (operandKind == ArgumentOperand)
+ return numberOfArguments();
+ return numberOfLocals();
+ }
+ template<OperandKind operandKind>
+ T& atFor(size_t idx)
+ {
+ if (operandKind == ArgumentOperand)
+ return argument(idx);
+ return local(idx);
+ }
+ template<OperandKind operandKind>
+ const T& atFor(size_t idx) const
+ {
+ if (operandKind == ArgumentOperand)
+ return argument(idx);
+ return local(idx);
+ }
+
void ensureLocals(size_t size)
{
if (size <= m_locals.size())
@@ -192,15 +216,17 @@ private:
template<typename T, typename Traits>
void dumpOperands(const Operands<T, Traits>& operands, PrintStream& out)
{
- for (size_t argument = 0; argument < operands.numberOfArguments(); ++argument) {
- if (argument)
+ for (size_t argument = operands.numberOfArguments(); argument--;) {
+ if (argument != operands.numberOfArguments() - 1)
out.printf(" ");
+ out.print("arg", argument, ":");
Traits::dump(operands.argument(argument), out);
}
out.printf(" : ");
for (size_t local = 0; local < operands.numberOfLocals(); ++local) {
if (local)
out.printf(" ");
+ out.print("r", local, ":");
Traits::dump(operands.local(local), out);
}
}
diff --git a/Source/JavaScriptCore/bytecode/PolymorphicAccessStructureList.h b/Source/JavaScriptCore/bytecode/PolymorphicAccessStructureList.h
new file mode 100644
index 000000000..d1da89d77
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/PolymorphicAccessStructureList.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PolymorphicAccessStructureList_h
+#define PolymorphicAccessStructureList_h
+
+#include "JITStubRoutine.h"
+#include "Structure.h"
+#include "StructureChain.h"
+#include <wtf/Platform.h>
+
+#define POLYMORPHIC_LIST_CACHE_SIZE 8
+
+namespace JSC {
+
+// *Sigh*, If the JIT is enabled we need to track the stubRountine (of type CodeLocationLabel),
+// If the JIT is not in use we don't actually need the variable (that said, if the JIT is not in use we don't
+// curently actually use PolymorphicAccessStructureLists, which we should). Anyway, this seems like the best
+// solution for now - will need to something smarter if/when we actually want mixed-mode operation.
+
+#if ENABLE(JIT)
+// Structure used by op_get_by_id_self_list and op_get_by_id_proto_list instruction to hold data off the main opcode stream.
+struct PolymorphicAccessStructureList {
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ struct PolymorphicStubInfo {
+ bool isChain;
+ bool isDirect;
+ RefPtr<JITStubRoutine> stubRoutine;
+ WriteBarrier<Structure> base;
+ union {
+ WriteBarrierBase<Structure> proto;
+ WriteBarrierBase<StructureChain> chain;
+ } u;
+
+ PolymorphicStubInfo()
+ {
+ u.proto.clear();
+ }
+
+ void set(VM& vm, JSCell* owner, PassRefPtr<JITStubRoutine> _stubRoutine, Structure* _base, bool isDirect)
+ {
+ stubRoutine = _stubRoutine;
+ base.set(vm, owner, _base);
+ u.proto.clear();
+ isChain = false;
+ this->isDirect = isDirect;
+ }
+
+ void set(VM& vm, JSCell* owner, PassRefPtr<JITStubRoutine> _stubRoutine, Structure* _base, Structure* _proto, bool isDirect)
+ {
+ stubRoutine = _stubRoutine;
+ base.set(vm, owner, _base);
+ u.proto.set(vm, owner, _proto);
+ isChain = false;
+ this->isDirect = isDirect;
+ }
+
+ void set(VM& vm, JSCell* owner, PassRefPtr<JITStubRoutine> _stubRoutine, Structure* _base, StructureChain* _chain, bool isDirect)
+ {
+ stubRoutine = _stubRoutine;
+ base.set(vm, owner, _base);
+ u.chain.set(vm, owner, _chain);
+ isChain = true;
+ this->isDirect = isDirect;
+ }
+ } list[POLYMORPHIC_LIST_CACHE_SIZE];
+
+ PolymorphicAccessStructureList()
+ {
+ }
+
+ PolymorphicAccessStructureList(VM& vm, JSCell* owner, PassRefPtr<JITStubRoutine> stubRoutine, Structure* firstBase, bool isDirect)
+ {
+ list[0].set(vm, owner, stubRoutine, firstBase, isDirect);
+ }
+
+ PolymorphicAccessStructureList(VM& vm, JSCell* owner, PassRefPtr<JITStubRoutine> stubRoutine, Structure* firstBase, Structure* firstProto, bool isDirect)
+ {
+ list[0].set(vm, owner, stubRoutine, firstBase, firstProto, isDirect);
+ }
+
+ PolymorphicAccessStructureList(VM& vm, JSCell* owner, PassRefPtr<JITStubRoutine> stubRoutine, Structure* firstBase, StructureChain* firstChain, bool isDirect)
+ {
+ list[0].set(vm, owner, stubRoutine, firstBase, firstChain, isDirect);
+ }
+
+ bool visitWeak(int count)
+ {
+ for (int i = 0; i < count; ++i) {
+ PolymorphicStubInfo& info = list[i];
+ if (!info.base) {
+ // We're being marked during initialisation of an entry
+ ASSERT(!info.u.proto);
+ continue;
+ }
+
+ if (!Heap::isMarked(info.base.get()))
+ return false;
+ if (info.u.proto && !info.isChain
+ && !Heap::isMarked(info.u.proto.get()))
+ return false;
+ if (info.u.chain && info.isChain
+ && !Heap::isMarked(info.u.chain.get()))
+ return false;
+ }
+
+ return true;
+ }
+};
+
+#endif // ENABLE(JIT)
+
+} // namespace JSC
+
+#endif // PolymorphicAccessStructureList_h
+
diff --git a/Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.cpp b/Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.cpp
index 3a87567d8..6a6ec8141 100644
--- a/Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.cpp
+++ b/Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.cpp
@@ -55,7 +55,7 @@ PutByIdAccess PutByIdAccess::fromStructureStubInfo(
break;
default:
- ASSERT_NOT_REACHED();
+ RELEASE_ASSERT_NOT_REACHED();
}
return result;
@@ -77,7 +77,7 @@ bool PutByIdAccess::visitWeak() const
return false;
break;
default:
- ASSERT_NOT_REACHED();
+ RELEASE_ASSERT_NOT_REACHED();
return false;
}
return true;
diff --git a/Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.h b/Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.h
index 4a20b6d1c..6e88e7062 100644
--- a/Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.h
+++ b/Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.h
@@ -55,7 +55,7 @@ public:
}
static PutByIdAccess transition(
- JSGlobalData& globalData,
+ VM& vm,
JSCell* owner,
Structure* oldStructure,
Structure* newStructure,
@@ -64,22 +64,22 @@ public:
{
PutByIdAccess result;
result.m_type = Transition;
- result.m_oldStructure.set(globalData, owner, oldStructure);
- result.m_newStructure.set(globalData, owner, newStructure);
- result.m_chain.set(globalData, owner, chain);
+ result.m_oldStructure.set(vm, owner, oldStructure);
+ result.m_newStructure.set(vm, owner, newStructure);
+ result.m_chain.set(vm, owner, chain);
result.m_stubRoutine = stubRoutine;
return result;
}
static PutByIdAccess replace(
- JSGlobalData& globalData,
+ VM& vm,
JSCell* owner,
Structure* structure,
PassRefPtr<JITStubRoutine> stubRoutine)
{
PutByIdAccess result;
result.m_type = Replace;
- result.m_oldStructure.set(globalData, owner, structure);
+ result.m_oldStructure.set(vm, owner, structure);
result.m_stubRoutine = stubRoutine;
return result;
}
diff --git a/Source/JavaScriptCore/bytecode/PreciseJumpTargets.cpp b/Source/JavaScriptCore/bytecode/PreciseJumpTargets.cpp
new file mode 100644
index 000000000..0cdf51a98
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/PreciseJumpTargets.cpp
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "PreciseJumpTargets.h"
+
+namespace JSC {
+
+static void addSimpleSwitchTargets(SimpleJumpTable& jumpTable, unsigned bytecodeOffset, Vector<unsigned, 32>& out)
+{
+ for (unsigned i = jumpTable.branchOffsets.size(); i--;)
+ out.append(bytecodeOffset + jumpTable.branchOffsets[i]);
+}
+
+void computePreciseJumpTargets(CodeBlock* codeBlock, Vector<unsigned, 32>& out)
+{
+ ASSERT(out.isEmpty());
+
+ // We will derive a superset of the jump targets that the code block thinks it has.
+ // So, if the code block claims there are none, then we are done.
+ if (!codeBlock->numberOfJumpTargets())
+ return;
+
+ for (unsigned i = codeBlock->numberOfExceptionHandlers(); i--;)
+ out.append(codeBlock->exceptionHandler(i).target);
+
+ Interpreter* interpreter = codeBlock->vm()->interpreter;
+ Instruction* instructionsBegin = codeBlock->instructions().begin();
+ unsigned instructionCount = codeBlock->instructions().size();
+ for (unsigned bytecodeOffset = 0; bytecodeOffset < instructionCount;) {
+ OpcodeID opcodeID = interpreter->getOpcodeID(instructionsBegin[bytecodeOffset].u.opcode);
+ Instruction* current = instructionsBegin + bytecodeOffset;
+ switch (opcodeID) {
+ case op_jmp:
+ out.append(bytecodeOffset + current[1].u.operand);
+ break;
+ case op_jtrue:
+ case op_jfalse:
+ case op_jeq_null:
+ case op_jneq_null:
+ out.append(bytecodeOffset + current[2].u.operand);
+ break;
+ case op_jneq_ptr:
+ case op_jless:
+ case op_jlesseq:
+ case op_jgreater:
+ case op_jgreatereq:
+ case op_jnless:
+ case op_jnlesseq:
+ case op_jngreater:
+ case op_jngreatereq:
+ out.append(bytecodeOffset + current[3].u.operand);
+ break;
+ case op_switch_imm:
+ addSimpleSwitchTargets(codeBlock->immediateSwitchJumpTable(current[1].u.operand), bytecodeOffset, out);
+ out.append(bytecodeOffset + current[2].u.operand);
+ break;
+ case op_switch_char:
+ addSimpleSwitchTargets(codeBlock->characterSwitchJumpTable(current[1].u.operand), bytecodeOffset, out);
+ out.append(bytecodeOffset + current[2].u.operand);
+ break;
+ case op_switch_string: {
+ StringJumpTable& table = codeBlock->stringSwitchJumpTable(current[1].u.operand);
+ StringJumpTable::StringOffsetTable::iterator iter = table.offsetTable.begin();
+ StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end();
+ for (; iter != end; ++iter)
+ out.append(bytecodeOffset + iter->value.branchOffset);
+ out.append(bytecodeOffset + current[2].u.operand);
+ break;
+ }
+ case op_get_pnames:
+ out.append(bytecodeOffset + current[5].u.operand);
+ break;
+ case op_next_pname:
+ out.append(bytecodeOffset + current[6].u.operand);
+ break;
+ case op_check_has_instance:
+ out.append(bytecodeOffset + current[4].u.operand);
+ break;
+ case op_loop_hint:
+ out.append(bytecodeOffset);
+ break;
+ default:
+ break;
+ }
+ bytecodeOffset += opcodeLengths[opcodeID];
+ }
+
+ std::sort(out.begin(), out.end());
+
+ // We will have duplicates, and we must remove them.
+ unsigned toIndex = 0;
+ unsigned fromIndex = 0;
+ unsigned lastValue = UINT_MAX;
+ while (fromIndex < out.size()) {
+ unsigned value = out[fromIndex++];
+ if (value == lastValue)
+ continue;
+ out[toIndex++] = value;
+ lastValue = value;
+ }
+ out.resize(toIndex);
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/bytecode/PreciseJumpTargets.h b/Source/JavaScriptCore/bytecode/PreciseJumpTargets.h
new file mode 100644
index 000000000..109c40cea
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/PreciseJumpTargets.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PreciseJumpTargets_h
+#define PreciseJumpTargets_h
+
+#include "CodeBlock.h"
+
+namespace JSC {
+
+void computePreciseJumpTargets(CodeBlock*, Vector<unsigned, 32>& out);
+
+} // namespace JSC
+
+#endif // PreciseJumpTargets_h
+
diff --git a/Source/JavaScriptCore/bytecode/PutByIdStatus.cpp b/Source/JavaScriptCore/bytecode/PutByIdStatus.cpp
index 7d6ba0987..24a57eb50 100644
--- a/Source/JavaScriptCore/bytecode/PutByIdStatus.cpp
+++ b/Source/JavaScriptCore/bytecode/PutByIdStatus.cpp
@@ -49,7 +49,7 @@ PutByIdStatus PutByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned
if (instruction[0].u.opcode == LLInt::getOpcode(llint_op_put_by_id)
|| instruction[0].u.opcode == LLInt::getOpcode(llint_op_put_by_id_out_of_line)) {
- PropertyOffset offset = structure->get(*profiledBlock->globalData(), ident);
+ PropertyOffset offset = structure->get(*profiledBlock->vm(), ident);
if (!isValidOffset(offset))
return PutByIdStatus(NoInformation, 0, 0, 0, invalidOffset);
@@ -68,7 +68,7 @@ PutByIdStatus PutByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned
ASSERT(newStructure);
ASSERT(chain);
- PropertyOffset offset = newStructure->get(*profiledBlock->globalData(), ident);
+ PropertyOffset offset = newStructure->get(*profiledBlock->vm(), ident);
if (!isValidOffset(offset))
return PutByIdStatus(NoInformation, 0, 0, 0, invalidOffset);
@@ -94,13 +94,17 @@ PutByIdStatus PutByIdStatus::computeFor(CodeBlock* profiledBlock, unsigned bytec
if (!stubInfo.seen)
return computeFromLLInt(profiledBlock, bytecodeIndex, ident);
+ if (stubInfo.resetByGC)
+ return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset);
+
switch (stubInfo.accessType) {
case access_unset:
- return computeFromLLInt(profiledBlock, bytecodeIndex, ident);
+ // If the JIT saw it but didn't optimize it, then assume that this takes slow path.
+ return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset);
case access_put_by_id_replace: {
PropertyOffset offset = stubInfo.u.putByIdReplace.baseObjectStructure->get(
- *profiledBlock->globalData(), ident);
+ *profiledBlock->vm(), ident);
if (isValidOffset(offset)) {
return PutByIdStatus(
SimpleReplace,
@@ -115,7 +119,7 @@ PutByIdStatus PutByIdStatus::computeFor(CodeBlock* profiledBlock, unsigned bytec
case access_put_by_id_transition_direct: {
ASSERT(stubInfo.u.putByIdTransition.previousStructure->transitionWatchpointSetHasBeenInvalidated());
PropertyOffset offset = stubInfo.u.putByIdTransition.structure->get(
- *profiledBlock->globalData(), ident);
+ *profiledBlock->vm(), ident);
if (isValidOffset(offset)) {
return PutByIdStatus(
SimpleTransition,
@@ -135,7 +139,7 @@ PutByIdStatus PutByIdStatus::computeFor(CodeBlock* profiledBlock, unsigned bytec
#endif // ENABLE(JIT)
}
-PutByIdStatus PutByIdStatus::computeFor(JSGlobalData& globalData, JSGlobalObject* globalObject, Structure* structure, Identifier& ident, bool isDirect)
+PutByIdStatus PutByIdStatus::computeFor(VM& vm, JSGlobalObject* globalObject, Structure* structure, Identifier& ident, bool isDirect)
{
if (PropertyName(ident).asIndex() != PropertyName::NotAnIndex)
return PutByIdStatus(TakesSlowPath);
@@ -147,11 +151,16 @@ PutByIdStatus PutByIdStatus::computeFor(JSGlobalData& globalData, JSGlobalObject
return PutByIdStatus(TakesSlowPath);
unsigned attributes;
- JSCell* specificValueIgnored;
- PropertyOffset offset = structure->get(globalData, ident, attributes, specificValueIgnored);
+ JSCell* specificValue;
+ PropertyOffset offset = structure->get(vm, ident, attributes, specificValue);
if (isValidOffset(offset)) {
if (attributes & (Accessor | ReadOnly))
return PutByIdStatus(TakesSlowPath);
+ if (specificValue) {
+ // We need the PutById slow path to verify that we're storing the right value into
+ // the specialized slot.
+ return PutByIdStatus(TakesSlowPath);
+ }
return PutByIdStatus(SimpleReplace, structure, 0, 0, offset);
}
@@ -169,7 +178,7 @@ PutByIdStatus PutByIdStatus::computeFor(JSGlobalData& globalData, JSGlobalObject
if (!isDirect) {
// If the prototype chain has setters or read-only properties, then give up.
- if (structure->prototypeChainMayInterceptStoreTo(globalData, ident))
+ if (structure->prototypeChainMayInterceptStoreTo(vm, ident))
return PutByIdStatus(TakesSlowPath);
// If the prototype chain hasn't been normalized (i.e. there are proxies or dictionaries)
@@ -204,7 +213,7 @@ PutByIdStatus PutByIdStatus::computeFor(JSGlobalData& globalData, JSGlobalObject
return PutByIdStatus(
SimpleTransition, structure, transition,
- structure->prototypeChain(globalData, globalObject), offset);
+ structure->prototypeChain(vm, globalObject), offset);
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/PutByIdStatus.h b/Source/JavaScriptCore/bytecode/PutByIdStatus.h
index fe22009fe..659e629d2 100644
--- a/Source/JavaScriptCore/bytecode/PutByIdStatus.h
+++ b/Source/JavaScriptCore/bytecode/PutByIdStatus.h
@@ -33,7 +33,7 @@ namespace JSC {
class CodeBlock;
class Identifier;
-class JSGlobalData;
+class VM;
class JSGlobalObject;
class Structure;
class StructureChain;
@@ -91,7 +91,7 @@ public:
}
static PutByIdStatus computeFor(CodeBlock*, unsigned bytecodeIndex, Identifier&);
- static PutByIdStatus computeFor(JSGlobalData&, JSGlobalObject*, Structure*, Identifier&, bool isDirect);
+ static PutByIdStatus computeFor(VM&, JSGlobalObject*, Structure*, Identifier&, bool isDirect);
State state() const { return m_state; }
diff --git a/Source/JavaScriptCore/bytecode/ReduceWhitespace.cpp b/Source/JavaScriptCore/bytecode/ReduceWhitespace.cpp
new file mode 100644
index 000000000..1636cba1d
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ReduceWhitespace.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "ReduceWhitespace.h"
+
+#include <wtf/text/StringBuilder.h>
+#include <wtf/text/WTFString.h>
+
+namespace JSC {
+
+String reduceWhitespace(const String& input)
+{
+ StringBuilder builder;
+
+ for (unsigned i = 0; i < input.length();) {
+ if (isASCIISpace(input[i])) {
+ while (i < input.length() && isASCIISpace(input[i]))
+ ++i;
+ builder.append(' ');
+ continue;
+ }
+ builder.append(input[i]);
+ ++i;
+ }
+
+ return builder.toString();
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/ReduceWhitespace.h b/Source/JavaScriptCore/bytecode/ReduceWhitespace.h
new file mode 100644
index 000000000..383dd3798
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ReduceWhitespace.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ReduceWhitespace_h
+#define ReduceWhitespace_h
+
+#include <wtf/text/WTFString.h>
+
+namespace JSC {
+
+// Replace all whitespace runs with a single space.
+String reduceWhitespace(const String&);
+
+} // namespace JSC
+
+#endif // ReduceWhitespace_h
diff --git a/Source/JavaScriptCore/bytecode/ResolveGlobalStatus.cpp b/Source/JavaScriptCore/bytecode/ResolveGlobalStatus.cpp
index 7814f8c99..c02acb38d 100644
--- a/Source/JavaScriptCore/bytecode/ResolveGlobalStatus.cpp
+++ b/Source/JavaScriptCore/bytecode/ResolveGlobalStatus.cpp
@@ -27,7 +27,8 @@
#include "ResolveGlobalStatus.h"
#include "CodeBlock.h"
-#include "JSValue.h"
+#include "JSCJSValue.h"
+#include "Operations.h"
#include "Structure.h"
namespace JSC {
@@ -36,7 +37,7 @@ static ResolveGlobalStatus computeForStructure(CodeBlock* codeBlock, Structure*
{
unsigned attributesIgnored;
JSCell* specificValue;
- PropertyOffset offset = structure->get(*codeBlock->globalData(), identifier, attributesIgnored, specificValue);
+ PropertyOffset offset = structure->get(*codeBlock->vm(), identifier, attributesIgnored, specificValue);
if (structure->isDictionary())
specificValue = 0;
if (!isValidOffset(offset))
diff --git a/Source/JavaScriptCore/bytecode/ResolveGlobalStatus.h b/Source/JavaScriptCore/bytecode/ResolveGlobalStatus.h
index 46a9254e7..6763ff7c8 100644
--- a/Source/JavaScriptCore/bytecode/ResolveGlobalStatus.h
+++ b/Source/JavaScriptCore/bytecode/ResolveGlobalStatus.h
@@ -26,7 +26,7 @@
#ifndef ResolveGlobalStatus_h
#define ResolveGlobalStatus_h
-#include "JSValue.h"
+#include "JSCJSValue.h"
#include "PropertyOffset.h"
#include <wtf/NotFound.h>
diff --git a/Source/JavaScriptCore/bytecode/SamplingTool.cpp b/Source/JavaScriptCore/bytecode/SamplingTool.cpp
index a76fee179..d18dbc1ff 100644
--- a/Source/JavaScriptCore/bytecode/SamplingTool.cpp
+++ b/Source/JavaScriptCore/bytecode/SamplingTool.cpp
@@ -297,13 +297,13 @@ void SamplingTool::sample()
s_samplingTool->doRun();
}
-void SamplingTool::notifyOfScope(JSGlobalData& globalData, ScriptExecutable* script)
+void SamplingTool::notifyOfScope(VM& vm, ScriptExecutable* script)
{
#if ENABLE(CODEBLOCK_SAMPLING)
MutexLocker locker(m_scriptSampleMapMutex);
- m_scopeSampleMap->set(script, adoptPtr(new ScriptSampleRecord(globalData, script)));
+ m_scopeSampleMap->set(script, adoptPtr(new ScriptSampleRecord(vm, script)));
#else
- UNUSED_PARAM(globalData);
+ UNUSED_PARAM(vm);
UNUSED_PARAM(script);
#endif
}
diff --git a/Source/JavaScriptCore/bytecode/SamplingTool.h b/Source/JavaScriptCore/bytecode/SamplingTool.h
index 6c9df62ee..516968a33 100644
--- a/Source/JavaScriptCore/bytecode/SamplingTool.h
+++ b/Source/JavaScriptCore/bytecode/SamplingTool.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -30,7 +30,6 @@
#define SamplingTool_h
#include "Strong.h"
-#include "Nodes.h"
#include "Opcode.h"
#include "SamplingCounter.h"
#include <wtf/Assertions.h>
@@ -186,8 +185,8 @@ namespace JSC {
struct Instruction;
struct ScriptSampleRecord {
- ScriptSampleRecord(JSGlobalData& globalData, ScriptExecutable* executable)
- : m_executable(globalData, executable)
+ ScriptSampleRecord(VM& vm, ScriptExecutable* executable)
+ : m_executable(vm, executable)
, m_codeBlock(0)
, m_sampleCount(0)
, m_opcodeSampleCount(0)
@@ -282,7 +281,7 @@ namespace JSC {
JS_EXPORT_PRIVATE void setup();
void dump(ExecState*);
- void notifyOfScope(JSGlobalData&, ScriptExecutable* scope);
+ void notifyOfScope(VM&, ScriptExecutable* scope);
void sample(CodeBlock* codeBlock, Instruction* vPC)
{
diff --git a/Source/JavaScriptCore/bytecode/SpeculatedType.cpp b/Source/JavaScriptCore/bytecode/SpeculatedType.cpp
index a07ca2b22..0e33b650f 100644
--- a/Source/JavaScriptCore/bytecode/SpeculatedType.cpp
+++ b/Source/JavaScriptCore/bytecode/SpeculatedType.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -32,6 +32,8 @@
#include "Arguments.h"
#include "JSArray.h"
#include "JSFunction.h"
+#include "Operations.h"
+#include "StringObject.h"
#include "ValueProfile.h"
#include <wtf/BoundsCheckedPointer.h>
#include <wtf/StringPrintStream.h>
@@ -119,18 +121,18 @@ void dumpSpeculation(PrintStream& out, SpeculatedType value)
else
isTop = false;
- if (value & SpecMyArguments)
- myOut.print("Myarguments");
+ if (value & SpecArguments)
+ myOut.print("Arguments");
else
isTop = false;
- if (value & SpecForeignArguments)
- myOut.print("Foreignarguments");
+ if (value & SpecString)
+ myOut.print("String");
else
isTop = false;
- if (value & SpecString)
- myOut.print("String");
+ if (value & SpecStringObject)
+ myOut.print("Stringobject");
else
isTop = false;
@@ -196,10 +198,12 @@ static const char* speculationToAbbreviatedString(SpeculatedType prediction)
return "<Float32array>";
if (isFloat64ArraySpeculation(prediction))
return "<Float64array>";
- if (isMyArgumentsSpeculation(prediction))
- return "<Myarguments>";
if (isArgumentsSpeculation(prediction))
return "<Arguments>";
+ if (isStringObjectSpeculation(prediction))
+ return "<StringObject>";
+ if (isStringOrStringObjectSpeculation(prediction))
+ return "<StringOrStringObject>";
if (isObjectSpeculation(prediction))
return "<Object>";
if (isCellSpeculation(prediction))
@@ -231,11 +235,13 @@ SpeculatedType speculationFromClassInfo(const ClassInfo* classInfo)
return SpecArray;
if (classInfo == &Arguments::s_info)
- return SpecArguments; // Cannot distinguish between MyArguments and ForeignArguments at this stage. That happens in the flow analysis.
+ return SpecArguments;
+
+ if (classInfo == &StringObject::s_info)
+ return SpecStringObject;
if (classInfo->isSubClassOf(&JSFunction::s_info))
return SpecFunction;
-
if (classInfo->typedArrayStorageType != TypedArrayNone) {
switch (classInfo->typedArrayStorageType) {
diff --git a/Source/JavaScriptCore/bytecode/SpeculatedType.h b/Source/JavaScriptCore/bytecode/SpeculatedType.h
index 261a26b0e..05788f0f1 100644
--- a/Source/JavaScriptCore/bytecode/SpeculatedType.h
+++ b/Source/JavaScriptCore/bytecode/SpeculatedType.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -29,7 +29,8 @@
#ifndef SpeculatedType_h
#define SpeculatedType_h
-#include "JSValue.h"
+#include "JSCJSValue.h"
+#include <wtf/PrintStream.h>
namespace JSC {
@@ -49,11 +50,10 @@ static const SpeculatedType SpecUint16Array = 0x00000200; // It's definite
static const SpeculatedType SpecUint32Array = 0x00000400; // It's definitely an Uint32Array or one of its subclasses.
static const SpeculatedType SpecFloat32Array = 0x00000800; // It's definitely an Uint16Array or one of its subclasses.
static const SpeculatedType SpecFloat64Array = 0x00001000; // It's definitely an Uint16Array or one of its subclasses.
-static const SpeculatedType SpecMyArguments = 0x00002000; // It's definitely an Arguments object, and it's definitely the one for my current frame.
-static const SpeculatedType SpecForeignArguments = 0x00004000; // It's definitely an Arguments object, and it's definitely not mine.
-static const SpeculatedType SpecArguments = 0x00006000; // It's definitely an Arguments object.
+static const SpeculatedType SpecArguments = 0x00002000; // It's definitely an Arguments object.
+static const SpeculatedType SpecStringObject = 0x00004000; // It's definitely a StringObject.
static const SpeculatedType SpecObjectOther = 0x00008000; // It's definitely an object but not JSFinalObject, JSArray, or JSFunction.
-static const SpeculatedType SpecObjectMask = 0x0000ffff; // Bitmask used for testing for any kind of object prediction.
+static const SpeculatedType SpecObject = 0x0000ffff; // Bitmask used for testing for any kind of object prediction.
static const SpeculatedType SpecString = 0x00010000; // It's definitely a JSString.
static const SpeculatedType SpecCellOther = 0x00020000; // It's definitely a JSCell but not a subclass of JSObject and definitely not a JSString.
static const SpeculatedType SpecCell = 0x0003ffff; // It's definitely a JSCell.
@@ -83,19 +83,14 @@ inline bool isCellSpeculation(SpeculatedType value)
return !!(value & SpecCell) && !(value & ~SpecCell);
}
-inline bool isNonStringCellSpeculation(SpeculatedType value)
-{
- return !!(value & (SpecCell & ~SpecString)) && !(value & ~(SpecCell & ~SpecString));
-}
-
-inline bool isNonStringCellOrOtherSpeculation(SpeculatedType value)
+inline bool isObjectSpeculation(SpeculatedType value)
{
- return !!(value & ((SpecCell & ~SpecString) | SpecOther)) && !(value & ~((SpecCell & ~SpecString) | SpecOther));
+ return !!(value & SpecObject) && !(value & ~SpecObject);
}
-inline bool isObjectSpeculation(SpeculatedType value)
+inline bool isObjectOrOtherSpeculation(SpeculatedType value)
{
- return !!(value & SpecObjectMask) && !(value & ~SpecObjectMask);
+ return !!(value & (SpecObject | SpecOther)) && !(value & ~(SpecObject | SpecOther));
}
inline bool isFinalObjectSpeculation(SpeculatedType value)
@@ -219,9 +214,14 @@ inline bool isArrayOrOtherSpeculation(SpeculatedType value)
return !!(value & (SpecArray | SpecOther)) && !(value & ~(SpecArray | SpecOther));
}
-inline bool isMyArgumentsSpeculation(SpeculatedType value)
+inline bool isStringObjectSpeculation(SpeculatedType value)
+{
+ return value == SpecStringObject;
+}
+
+inline bool isStringOrStringObjectSpeculation(SpeculatedType value)
{
- return value == SpecMyArguments;
+ return !!value && !(value & ~(SpecString | SpecStringObject));
}
inline bool isInt32Speculation(SpeculatedType value)
diff --git a/Source/JavaScriptCore/bytecode/StructureStubInfo.cpp b/Source/JavaScriptCore/bytecode/StructureStubInfo.cpp
index 9238fefdc..70cf2fccb 100644
--- a/Source/JavaScriptCore/bytecode/StructureStubInfo.cpp
+++ b/Source/JavaScriptCore/bytecode/StructureStubInfo.cpp
@@ -63,7 +63,7 @@ void StructureStubInfo::deref()
// These instructions don't have to release any allocated memory
return;
default:
- ASSERT_NOT_REACHED();
+ RELEASE_ASSERT_NOT_REACHED();
}
}
diff --git a/Source/JavaScriptCore/bytecode/StructureStubInfo.h b/Source/JavaScriptCore/bytecode/StructureStubInfo.h
index 445ffe6c6..f5c39357c 100644
--- a/Source/JavaScriptCore/bytecode/StructureStubInfo.h
+++ b/Source/JavaScriptCore/bytecode/StructureStubInfo.h
@@ -36,282 +36,285 @@
#include "JITStubRoutine.h"
#include "MacroAssembler.h"
#include "Opcode.h"
+#include "PolymorphicAccessStructureList.h"
#include "Structure.h"
#include "StructureStubClearingWatchpoint.h"
#include <wtf/OwnPtr.h>
namespace JSC {
- class PolymorphicPutByIdList;
-
- enum AccessType {
- access_get_by_id_self,
- access_get_by_id_proto,
- access_get_by_id_chain,
- access_get_by_id_self_list,
- access_get_by_id_proto_list,
- access_put_by_id_transition_normal,
- access_put_by_id_transition_direct,
- access_put_by_id_replace,
- access_put_by_id_list,
- access_unset,
- access_get_by_id_generic,
- access_put_by_id_generic,
- access_get_array_length,
- access_get_string_length,
- };
-
- inline bool isGetByIdAccess(AccessType accessType)
- {
- switch (accessType) {
- case access_get_by_id_self:
- case access_get_by_id_proto:
- case access_get_by_id_chain:
- case access_get_by_id_self_list:
- case access_get_by_id_proto_list:
- case access_get_by_id_generic:
- case access_get_array_length:
- case access_get_string_length:
- return true;
- default:
- return false;
- }
+class PolymorphicPutByIdList;
+
+enum AccessType {
+ access_get_by_id_self,
+ access_get_by_id_proto,
+ access_get_by_id_chain,
+ access_get_by_id_self_list,
+ access_get_by_id_proto_list,
+ access_put_by_id_transition_normal,
+ access_put_by_id_transition_direct,
+ access_put_by_id_replace,
+ access_put_by_id_list,
+ access_unset,
+ access_get_by_id_generic,
+ access_put_by_id_generic,
+ access_get_array_length,
+ access_get_string_length,
+};
+
+inline bool isGetByIdAccess(AccessType accessType)
+{
+ switch (accessType) {
+ case access_get_by_id_self:
+ case access_get_by_id_proto:
+ case access_get_by_id_chain:
+ case access_get_by_id_self_list:
+ case access_get_by_id_proto_list:
+ case access_get_by_id_generic:
+ case access_get_array_length:
+ case access_get_string_length:
+ return true;
+ default:
+ return false;
}
+}
- inline bool isPutByIdAccess(AccessType accessType)
+inline bool isPutByIdAccess(AccessType accessType)
+{
+ switch (accessType) {
+ case access_put_by_id_transition_normal:
+ case access_put_by_id_transition_direct:
+ case access_put_by_id_replace:
+ case access_put_by_id_list:
+ case access_put_by_id_generic:
+ return true;
+ default:
+ return false;
+ }
+}
+
+struct StructureStubInfo {
+ StructureStubInfo()
+ : accessType(access_unset)
+ , seen(false)
+ , resetByGC(false)
+ {
+ }
+
+ void initGetByIdSelf(VM& vm, JSCell* owner, Structure* baseObjectStructure)
{
- switch (accessType) {
- case access_put_by_id_transition_normal:
- case access_put_by_id_transition_direct:
- case access_put_by_id_replace:
- case access_put_by_id_list:
- case access_put_by_id_generic:
- return true;
- default:
- return false;
- }
+ accessType = access_get_by_id_self;
+
+ u.getByIdSelf.baseObjectStructure.set(vm, owner, baseObjectStructure);
+ }
+
+ void initGetByIdProto(VM& vm, JSCell* owner, Structure* baseObjectStructure, Structure* prototypeStructure, bool isDirect)
+ {
+ accessType = access_get_by_id_proto;
+
+ u.getByIdProto.baseObjectStructure.set(vm, owner, baseObjectStructure);
+ u.getByIdProto.prototypeStructure.set(vm, owner, prototypeStructure);
+ u.getByIdProto.isDirect = isDirect;
+ }
+
+ void initGetByIdChain(VM& vm, JSCell* owner, Structure* baseObjectStructure, StructureChain* chain, unsigned count, bool isDirect)
+ {
+ accessType = access_get_by_id_chain;
+
+ u.getByIdChain.baseObjectStructure.set(vm, owner, baseObjectStructure);
+ u.getByIdChain.chain.set(vm, owner, chain);
+ u.getByIdChain.count = count;
+ u.getByIdChain.isDirect = isDirect;
+ }
+
+ void initGetByIdSelfList(PolymorphicAccessStructureList* structureList, int listSize)
+ {
+ accessType = access_get_by_id_self_list;
+
+ u.getByIdSelfList.structureList = structureList;
+ u.getByIdSelfList.listSize = listSize;
+ }
+
+ void initGetByIdProtoList(PolymorphicAccessStructureList* structureList, int listSize)
+ {
+ accessType = access_get_by_id_proto_list;
+
+ u.getByIdProtoList.structureList = structureList;
+ u.getByIdProtoList.listSize = listSize;
}
- struct StructureStubInfo {
- StructureStubInfo()
- : accessType(access_unset)
- , seen(false)
- {
- }
-
- void initGetByIdSelf(JSGlobalData& globalData, JSCell* owner, Structure* baseObjectStructure)
- {
- accessType = access_get_by_id_self;
-
- u.getByIdSelf.baseObjectStructure.set(globalData, owner, baseObjectStructure);
- }
-
- void initGetByIdProto(JSGlobalData& globalData, JSCell* owner, Structure* baseObjectStructure, Structure* prototypeStructure, bool isDirect)
- {
- accessType = access_get_by_id_proto;
-
- u.getByIdProto.baseObjectStructure.set(globalData, owner, baseObjectStructure);
- u.getByIdProto.prototypeStructure.set(globalData, owner, prototypeStructure);
- u.getByIdProto.isDirect = isDirect;
- }
-
- void initGetByIdChain(JSGlobalData& globalData, JSCell* owner, Structure* baseObjectStructure, StructureChain* chain, unsigned count, bool isDirect)
- {
- accessType = access_get_by_id_chain;
-
- u.getByIdChain.baseObjectStructure.set(globalData, owner, baseObjectStructure);
- u.getByIdChain.chain.set(globalData, owner, chain);
- u.getByIdChain.count = count;
- u.getByIdChain.isDirect = isDirect;
- }
-
- void initGetByIdSelfList(PolymorphicAccessStructureList* structureList, int listSize)
- {
- accessType = access_get_by_id_self_list;
-
- u.getByIdSelfList.structureList = structureList;
- u.getByIdSelfList.listSize = listSize;
- }
-
- void initGetByIdProtoList(PolymorphicAccessStructureList* structureList, int listSize)
- {
- accessType = access_get_by_id_proto_list;
-
- u.getByIdProtoList.structureList = structureList;
- u.getByIdProtoList.listSize = listSize;
- }
-
- // PutById*
-
- void initPutByIdTransition(JSGlobalData& globalData, JSCell* owner, Structure* previousStructure, Structure* structure, StructureChain* chain, bool isDirect)
- {
- if (isDirect)
- accessType = access_put_by_id_transition_direct;
- else
- accessType = access_put_by_id_transition_normal;
-
- u.putByIdTransition.previousStructure.set(globalData, owner, previousStructure);
- u.putByIdTransition.structure.set(globalData, owner, structure);
- u.putByIdTransition.chain.set(globalData, owner, chain);
- }
-
- void initPutByIdReplace(JSGlobalData& globalData, JSCell* owner, Structure* baseObjectStructure)
- {
- accessType = access_put_by_id_replace;
+ // PutById*
+
+ void initPutByIdTransition(VM& vm, JSCell* owner, Structure* previousStructure, Structure* structure, StructureChain* chain, bool isDirect)
+ {
+ if (isDirect)
+ accessType = access_put_by_id_transition_direct;
+ else
+ accessType = access_put_by_id_transition_normal;
+
+ u.putByIdTransition.previousStructure.set(vm, owner, previousStructure);
+ u.putByIdTransition.structure.set(vm, owner, structure);
+ u.putByIdTransition.chain.set(vm, owner, chain);
+ }
+
+ void initPutByIdReplace(VM& vm, JSCell* owner, Structure* baseObjectStructure)
+ {
+ accessType = access_put_by_id_replace;
- u.putByIdReplace.baseObjectStructure.set(globalData, owner, baseObjectStructure);
- }
+ u.putByIdReplace.baseObjectStructure.set(vm, owner, baseObjectStructure);
+ }
- void initPutByIdList(PolymorphicPutByIdList* list)
- {
- accessType = access_put_by_id_list;
- u.putByIdList.list = list;
- }
+ void initPutByIdList(PolymorphicPutByIdList* list)
+ {
+ accessType = access_put_by_id_list;
+ u.putByIdList.list = list;
+ }
- void reset()
- {
- deref();
- accessType = access_unset;
- stubRoutine.clear();
- watchpoints.clear();
- }
+ void reset()
+ {
+ deref();
+ accessType = access_unset;
+ stubRoutine.clear();
+ watchpoints.clear();
+ }
- void deref();
+ void deref();
- bool visitWeakReferences();
+ bool visitWeakReferences();
- bool seenOnce()
- {
- return seen;
- }
-
- void setSeen()
- {
- seen = true;
- }
+ bool seenOnce()
+ {
+ return seen;
+ }
+
+ void setSeen()
+ {
+ seen = true;
+ }
- StructureStubClearingWatchpoint* addWatchpoint(CodeBlock* codeBlock)
- {
- return WatchpointsOnStructureStubInfo::ensureReferenceAndAddWatchpoint(
- watchpoints, codeBlock, this);
- }
+ StructureStubClearingWatchpoint* addWatchpoint(CodeBlock* codeBlock)
+ {
+ return WatchpointsOnStructureStubInfo::ensureReferenceAndAddWatchpoint(
+ watchpoints, codeBlock, this);
+ }
- unsigned bytecodeIndex;
+ unsigned bytecodeIndex;
- int8_t accessType;
- int8_t seen;
+ int8_t accessType;
+ bool seen : 1;
+ bool resetByGC : 1;
#if ENABLE(DFG_JIT)
- CodeOrigin codeOrigin;
+ CodeOrigin codeOrigin;
#endif // ENABLE(DFG_JIT)
- union {
- struct {
- int8_t registersFlushed;
- int8_t baseGPR;
+ union {
+ struct {
+ int8_t registersFlushed;
+ int8_t baseGPR;
#if USE(JSVALUE32_64)
- int8_t valueTagGPR;
+ int8_t valueTagGPR;
#endif
- int8_t valueGPR;
- DFG::RegisterSetPOD usedRegisters;
- int32_t deltaCallToDone;
- int32_t deltaCallToStorageLoad;
- int32_t deltaCallToStructCheck;
- int32_t deltaCallToSlowCase;
- int32_t deltaCheckImmToCall;
+ int8_t valueGPR;
+ DFG::RegisterSetPOD usedRegisters;
+ int32_t deltaCallToDone;
+ int32_t deltaCallToStorageLoad;
+ int32_t deltaCallToStructCheck;
+ int32_t deltaCallToSlowCase;
+ int32_t deltaCheckImmToCall;
#if USE(JSVALUE64)
- int32_t deltaCallToLoadOrStore;
+ int32_t deltaCallToLoadOrStore;
#else
- int32_t deltaCallToTagLoadOrStore;
- int32_t deltaCallToPayloadLoadOrStore;
+ int32_t deltaCallToTagLoadOrStore;
+ int32_t deltaCallToPayloadLoadOrStore;
#endif
- } dfg;
- struct {
- union {
- struct {
- int16_t structureToCompare;
- int16_t structureCheck;
- int16_t propertyStorageLoad;
+ } dfg;
+ struct {
+ union {
+ struct {
+ int16_t structureToCompare;
+ int16_t structureCheck;
+ int16_t propertyStorageLoad;
#if USE(JSVALUE64)
- int16_t displacementLabel;
+ int16_t displacementLabel;
#else
- int16_t displacementLabel1;
- int16_t displacementLabel2;
+ int16_t displacementLabel1;
+ int16_t displacementLabel2;
#endif
- int16_t putResult;
- int16_t coldPathBegin;
- } get;
- struct {
- int16_t structureToCompare;
- int16_t propertyStorageLoad;
+ int16_t putResult;
+ int16_t coldPathBegin;
+ } get;
+ struct {
+ int16_t structureToCompare;
+ int16_t propertyStorageLoad;
#if USE(JSVALUE64)
- int16_t displacementLabel;
+ int16_t displacementLabel;
#else
- int16_t displacementLabel1;
- int16_t displacementLabel2;
+ int16_t displacementLabel1;
+ int16_t displacementLabel2;
#endif
- } put;
- } u;
- int16_t methodCheckProtoObj;
- int16_t methodCheckProtoStructureToCompare;
- int16_t methodCheckPutFunction;
- } baseline;
- } patch;
-
- union {
- struct {
- // It would be unwise to put anything here, as it will surely be overwritten.
- } unset;
- struct {
- WriteBarrierBase<Structure> baseObjectStructure;
- } getByIdSelf;
- struct {
- WriteBarrierBase<Structure> baseObjectStructure;
- WriteBarrierBase<Structure> prototypeStructure;
- bool isDirect;
- } getByIdProto;
- struct {
- WriteBarrierBase<Structure> baseObjectStructure;
- WriteBarrierBase<StructureChain> chain;
- unsigned count : 31;
- bool isDirect : 1;
- } getByIdChain;
- struct {
- PolymorphicAccessStructureList* structureList;
- int listSize;
- } getByIdSelfList;
- struct {
- PolymorphicAccessStructureList* structureList;
- int listSize;
- } getByIdProtoList;
- struct {
- WriteBarrierBase<Structure> previousStructure;
- WriteBarrierBase<Structure> structure;
- WriteBarrierBase<StructureChain> chain;
- } putByIdTransition;
- struct {
- WriteBarrierBase<Structure> baseObjectStructure;
- } putByIdReplace;
- struct {
- PolymorphicPutByIdList* list;
- } putByIdList;
- } u;
-
- RefPtr<JITStubRoutine> stubRoutine;
- CodeLocationCall callReturnLocation;
- CodeLocationLabel hotPathBegin;
- RefPtr<WatchpointsOnStructureStubInfo> watchpoints;
- };
-
- inline void* getStructureStubInfoReturnLocation(StructureStubInfo* structureStubInfo)
- {
- return structureStubInfo->callReturnLocation.executableAddress();
- }
-
- inline unsigned getStructureStubInfoBytecodeIndex(StructureStubInfo* structureStubInfo)
- {
- return structureStubInfo->bytecodeIndex;
- }
+ } put;
+ } u;
+ int16_t methodCheckProtoObj;
+ int16_t methodCheckProtoStructureToCompare;
+ int16_t methodCheckPutFunction;
+ } baseline;
+ } patch;
+
+ union {
+ struct {
+ // It would be unwise to put anything here, as it will surely be overwritten.
+ } unset;
+ struct {
+ WriteBarrierBase<Structure> baseObjectStructure;
+ } getByIdSelf;
+ struct {
+ WriteBarrierBase<Structure> baseObjectStructure;
+ WriteBarrierBase<Structure> prototypeStructure;
+ bool isDirect;
+ } getByIdProto;
+ struct {
+ WriteBarrierBase<Structure> baseObjectStructure;
+ WriteBarrierBase<StructureChain> chain;
+ unsigned count : 31;
+ bool isDirect : 1;
+ } getByIdChain;
+ struct {
+ PolymorphicAccessStructureList* structureList;
+ int listSize;
+ } getByIdSelfList;
+ struct {
+ PolymorphicAccessStructureList* structureList;
+ int listSize;
+ } getByIdProtoList;
+ struct {
+ WriteBarrierBase<Structure> previousStructure;
+ WriteBarrierBase<Structure> structure;
+ WriteBarrierBase<StructureChain> chain;
+ } putByIdTransition;
+ struct {
+ WriteBarrierBase<Structure> baseObjectStructure;
+ } putByIdReplace;
+ struct {
+ PolymorphicPutByIdList* list;
+ } putByIdList;
+ } u;
+
+ RefPtr<JITStubRoutine> stubRoutine;
+ CodeLocationCall callReturnLocation;
+ CodeLocationLabel hotPathBegin;
+ RefPtr<WatchpointsOnStructureStubInfo> watchpoints;
+};
+
+inline void* getStructureStubInfoReturnLocation(StructureStubInfo* structureStubInfo)
+{
+ return structureStubInfo->callReturnLocation.executableAddress();
+}
+
+inline unsigned getStructureStubInfoBytecodeIndex(StructureStubInfo* structureStubInfo)
+{
+ return structureStubInfo->bytecodeIndex;
+}
} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.cpp b/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.cpp
index e98d4de0a..8aba1ff8b 100644
--- a/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.cpp
+++ b/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All Rights Reserved.
+ * Copyright (C) 2012, 2013 Apple Inc. All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -32,6 +32,8 @@
#include "CodeCache.h"
#include "Executable.h"
#include "JSString.h"
+#include "Operations.h"
+#include "Parser.h"
#include "SourceProvider.h"
#include "Structure.h"
#include "SymbolTable.h"
@@ -45,6 +47,29 @@ const ClassInfo UnlinkedProgramCodeBlock::s_info = { "UnlinkedProgramCodeBlock",
const ClassInfo UnlinkedEvalCodeBlock::s_info = { "UnlinkedEvalCodeBlock", &Base::s_info, 0, 0, CREATE_METHOD_TABLE(UnlinkedEvalCodeBlock) };
const ClassInfo UnlinkedFunctionCodeBlock::s_info = { "UnlinkedFunctionCodeBlock", &Base::s_info, 0, 0, CREATE_METHOD_TABLE(UnlinkedFunctionCodeBlock) };
+static UnlinkedFunctionCodeBlock* generateFunctionCodeBlock(VM& vm, JSScope* scope, UnlinkedFunctionExecutable* executable, const SourceCode& source, CodeSpecializationKind kind, DebuggerMode debuggerMode, ProfilerMode profilerMode, ParserError& error)
+{
+ RefPtr<FunctionBodyNode> body = parse<FunctionBodyNode>(&vm, source, executable->parameters(), executable->name(), executable->isInStrictContext() ? JSParseStrict : JSParseNormal, JSParseFunctionCode, error);
+
+ if (!body) {
+ ASSERT(error.m_type != ParserError::ErrorNone);
+ return 0;
+ }
+
+ if (executable->forceUsesArguments())
+ body->setUsesArguments();
+ body->finishParsing(executable->parameters(), executable->name(), executable->functionNameIsInScopeToggle());
+ executable->recordParse(body->features(), body->hasCapturedVariables(), body->lineNo(), body->lastLine());
+
+ UnlinkedFunctionCodeBlock* result = UnlinkedFunctionCodeBlock::create(&vm, FunctionCode, ExecutableInfo(body->needsActivation(), body->usesEval(), body->isStrictMode(), kind == CodeForConstruct));
+ OwnPtr<BytecodeGenerator> generator(adoptPtr(new BytecodeGenerator(vm, scope, body.get(), result, debuggerMode, profilerMode)));
+ error = generator->generate();
+ body->destroyData();
+ if (error.m_type != ParserError::ErrorNone)
+ return 0;
+ return result;
+}
+
unsigned UnlinkedCodeBlock::addOrFindConstant(JSValue v)
{
unsigned numberOfConstants = numberOfConstantRegisters();
@@ -55,8 +80,8 @@ unsigned UnlinkedCodeBlock::addOrFindConstant(JSValue v)
return addConstant(v);
}
-UnlinkedFunctionExecutable::UnlinkedFunctionExecutable(JSGlobalData* globalData, Structure* structure, const SourceCode& source, FunctionBodyNode* node)
- : Base(*globalData, structure)
+UnlinkedFunctionExecutable::UnlinkedFunctionExecutable(VM* vm, Structure* structure, const SourceCode& source, FunctionBodyNode* node)
+ : Base(*vm, structure)
, m_numCapturedVariables(node->capturedVariableCount())
, m_forceUsesArguments(node->usesArguments())
, m_isInStrictContext(node->isStrictMode())
@@ -66,6 +91,8 @@ UnlinkedFunctionExecutable::UnlinkedFunctionExecutable(JSGlobalData* globalData,
, m_parameters(node->parameters())
, m_firstLineOffset(node->firstLine() - source.firstLine())
, m_lineCount(node->lastLine() - node->firstLine())
+ , m_functionStartOffset(node->functionStart() - source.startOffset())
+ , m_functionStartColumn(node->startColumn())
, m_startOffset(node->source().startOffset() - source.startOffset())
, m_sourceLength(node->source().length())
, m_features(node->features())
@@ -73,6 +100,11 @@ UnlinkedFunctionExecutable::UnlinkedFunctionExecutable(JSGlobalData* globalData,
{
}
+size_t UnlinkedFunctionExecutable::parameterCount() const
+{
+ return m_parameters->size();
+}
+
void UnlinkedFunctionExecutable::visitChildren(JSCell* cell, SlotVisitor& visitor)
{
UnlinkedFunctionExecutable* thisObject = jsCast<UnlinkedFunctionExecutable*>(cell);
@@ -80,24 +112,31 @@ void UnlinkedFunctionExecutable::visitChildren(JSCell* cell, SlotVisitor& visito
COMPILE_ASSERT(StructureFlags & OverridesVisitChildren, OverridesVisitChildrenWithoutSettingFlag);
ASSERT(thisObject->structure()->typeInfo().overridesVisitChildren());
Base::visitChildren(thisObject, visitor);
+ visitor.append(&thisObject->m_codeBlockForCall);
+ visitor.append(&thisObject->m_codeBlockForConstruct);
visitor.append(&thisObject->m_nameValue);
visitor.append(&thisObject->m_symbolTableForCall);
visitor.append(&thisObject->m_symbolTableForConstruct);
}
-FunctionExecutable* UnlinkedFunctionExecutable::link(JSGlobalData& globalData, const SourceCode& source, size_t lineOffset, size_t sourceOffset)
+FunctionExecutable* UnlinkedFunctionExecutable::link(VM& vm, const SourceCode& source, size_t lineOffset, size_t sourceOffset)
{
unsigned firstLine = lineOffset + m_firstLineOffset;
unsigned startOffset = sourceOffset + m_startOffset;
- SourceCode code(source.provider(), startOffset, startOffset + m_sourceLength, firstLine);
- return FunctionExecutable::create(globalData, code, this, firstLine, firstLine + m_lineCount);
+ unsigned startColumn = m_functionStartColumn + 1; // startColumn should start from 1, not 0.
+ SourceCode code(source.provider(), startOffset, startOffset + m_sourceLength, firstLine, startColumn);
+ return FunctionExecutable::create(vm, code, this, firstLine, firstLine + m_lineCount, startColumn);
}
UnlinkedFunctionExecutable* UnlinkedFunctionExecutable::fromGlobalCode(const Identifier& name, ExecState* exec, Debugger*, const SourceCode& source, JSObject** exception)
{
ParserError error;
- CodeCache* codeCache = exec->globalData().codeCache();
- UnlinkedFunctionExecutable* executable = codeCache->getFunctionExecutableFromGlobalCode(exec->globalData(), name, source, error);
+ CodeCache* codeCache = exec->vm().codeCache();
+ UnlinkedFunctionExecutable* executable = codeCache->getFunctionExecutableFromGlobalCode(exec->vm(), name, source, error);
+
+ if (exec->lexicalGlobalObject()->hasDebugger())
+ exec->lexicalGlobalObject()->debugger()->sourceParsed(exec, source.provider(), error.m_line, error.m_message);
+
if (error.m_type != ParserError::ErrorNone) {
*exception = error.toErrorObject(exec->lexicalGlobalObject(), source);
return 0;
@@ -106,36 +145,32 @@ UnlinkedFunctionExecutable* UnlinkedFunctionExecutable::fromGlobalCode(const Ide
return executable;
}
-UnlinkedFunctionCodeBlock* UnlinkedFunctionExecutable::codeBlockFor(JSGlobalData& globalData, const SourceCode& source, CodeSpecializationKind specializationKind, DebuggerMode debuggerMode, ProfilerMode profilerMode, ParserError& error)
+UnlinkedFunctionCodeBlock* UnlinkedFunctionExecutable::codeBlockFor(VM& vm, JSScope* scope, const SourceCode& source, CodeSpecializationKind specializationKind, DebuggerMode debuggerMode, ProfilerMode profilerMode, ParserError& error)
{
switch (specializationKind) {
case CodeForCall:
- if (UnlinkedFunctionCodeBlock* codeBlock = m_codeBlockForCall.get()) {
- globalData.codeCache()->usedFunctionCode(globalData, codeBlock);
+ if (UnlinkedFunctionCodeBlock* codeBlock = m_codeBlockForCall.get())
return codeBlock;
- }
break;
case CodeForConstruct:
- if (UnlinkedFunctionCodeBlock* codeBlock = m_codeBlockForConstruct.get()) {
- globalData.codeCache()->usedFunctionCode(globalData, codeBlock);
+ if (UnlinkedFunctionCodeBlock* codeBlock = m_codeBlockForConstruct.get())
return codeBlock;
- }
break;
}
- UnlinkedFunctionCodeBlock* result = globalData.codeCache()->getFunctionCodeBlock(globalData, this, source, specializationKind, debuggerMode, profilerMode, error);
+ UnlinkedFunctionCodeBlock* result = generateFunctionCodeBlock(vm, scope, this, source, specializationKind, debuggerMode, profilerMode, error);
if (error.m_type != ParserError::ErrorNone)
return 0;
switch (specializationKind) {
case CodeForCall:
- m_codeBlockForCall = PassWeak<UnlinkedFunctionCodeBlock>(result);
- m_symbolTableForCall.set(globalData, this, result->symbolTable());
+ m_codeBlockForCall.set(vm, this, result);
+ m_symbolTableForCall.set(vm, this, result->symbolTable());
break;
case CodeForConstruct:
- m_codeBlockForConstruct = PassWeak<UnlinkedFunctionCodeBlock>(result);
- m_symbolTableForConstruct.set(globalData, this, result->symbolTable());
+ m_codeBlockForConstruct.set(vm, this, result);
+ m_symbolTableForConstruct.set(vm, this, result->symbolTable());
break;
}
return result;
@@ -148,18 +183,19 @@ String UnlinkedFunctionExecutable::paramString() const
for (size_t pos = 0; pos < parameters.size(); ++pos) {
if (!builder.isEmpty())
builder.appendLiteral(", ");
- builder.append(parameters[pos].string());
+ builder.append(parameters.at(pos).string());
}
return builder.toString();
}
-UnlinkedCodeBlock::UnlinkedCodeBlock(JSGlobalData* globalData, Structure* structure, CodeType codeType, const ExecutableInfo& info)
- : Base(*globalData, structure)
+UnlinkedCodeBlock::UnlinkedCodeBlock(VM* vm, Structure* structure, CodeType codeType, const ExecutableInfo& info)
+ : Base(*vm, structure)
, m_numVars(0)
, m_numCalleeRegisters(0)
, m_numParameters(0)
- , m_globalData(globalData)
+ , m_vm(vm)
, m_argumentsRegister(-1)
+ , m_globalObjectRegister(-1)
, m_needsFullScopeChain(info.m_needsActivation)
, m_usesEval(info.m_usesEval)
, m_isNumericCompareFunction(false)
@@ -174,6 +210,7 @@ UnlinkedCodeBlock::UnlinkedCodeBlock(JSGlobalData* globalData, Structure* struct
, m_putToBaseOperationCount(1)
, m_arrayProfileCount(0)
, m_arrayAllocationProfileCount(0)
+ , m_objectAllocationProfileCount(0)
, m_valueProfileCount(0)
, m_llintCallLinkInfoCount(0)
#if ENABLE(BYTECODE_COMMENTS)
@@ -205,24 +242,17 @@ void UnlinkedCodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor)
int UnlinkedCodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset)
{
ASSERT(bytecodeOffset < instructions().size());
- Vector<LineInfo>& lineInfo = m_lineInfo;
-
- int low = 0;
- int high = lineInfo.size();
- while (low < high) {
- int mid = low + (high - low) / 2;
- if (lineInfo[mid].instructionOffset <= bytecodeOffset)
- low = mid + 1;
- else
- high = mid;
- }
-
- if (!low)
- return 0;
- return lineInfo[low - 1].lineNumber;
+ int divot;
+ int startOffset;
+ int endOffset;
+ unsigned line;
+ unsigned column;
+ expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
+ return line;
}
-void UnlinkedCodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset)
+void UnlinkedCodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset,
+ int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column)
{
ASSERT(bytecodeOffset < instructions().size());
@@ -230,6 +260,8 @@ void UnlinkedCodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset
startOffset = 0;
endOffset = 0;
divot = 0;
+ line = 0;
+ column = 0;
return;
}
@@ -245,17 +277,83 @@ void UnlinkedCodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset
high = mid;
}
- ASSERT(low);
- if (!low) {
+ if (!low)
+ low = 1;
+
+ ExpressionRangeInfo& info = expressionInfo[low - 1];
+ startOffset = info.startOffset;
+ endOffset = info.endOffset;
+ divot = info.divotPoint;
+
+ switch (info.mode) {
+ case ExpressionRangeInfo::FatLineMode:
+ info.decodeFatLineMode(line, column);
+ break;
+ case ExpressionRangeInfo::FatColumnMode:
+ info.decodeFatColumnMode(line, column);
+ break;
+ case ExpressionRangeInfo::FatLineAndColumnMode: {
+ unsigned fatIndex = info.position;
+ ExpressionRangeInfo::FatPosition& fatPos = m_rareData->m_expressionInfoFatPositions[fatIndex];
+ line = fatPos.line;
+ column = fatPos.column;
+ break;
+ }
+ } // switch
+}
+
+void UnlinkedCodeBlock::addExpressionInfo(unsigned instructionOffset,
+ int divot, int startOffset, int endOffset, unsigned line, unsigned column)
+{
+ if (divot > ExpressionRangeInfo::MaxDivot) {
+ // Overflow has occurred, we can only give line number info for errors for this region
+ divot = 0;
startOffset = 0;
endOffset = 0;
- divot = 0;
- return;
+ } else if (startOffset > ExpressionRangeInfo::MaxOffset) {
+ // If the start offset is out of bounds we clear both offsets
+ // so we only get the divot marker. Error message will have to be reduced
+ // to line and charPosition number.
+ startOffset = 0;
+ endOffset = 0;
+ } else if (endOffset > ExpressionRangeInfo::MaxOffset) {
+ // The end offset is only used for additional context, and is much more likely
+ // to overflow (eg. function call arguments) so we are willing to drop it without
+ // dropping the rest of the range.
+ endOffset = 0;
+ }
+
+ unsigned positionMode =
+ (line <= ExpressionRangeInfo::MaxFatLineModeLine && column <= ExpressionRangeInfo::MaxFatLineModeColumn)
+ ? ExpressionRangeInfo::FatLineMode
+ : (line <= ExpressionRangeInfo::MaxFatColumnModeLine && column <= ExpressionRangeInfo::MaxFatColumnModeColumn)
+ ? ExpressionRangeInfo::FatColumnMode
+ : ExpressionRangeInfo::FatLineAndColumnMode;
+
+ ExpressionRangeInfo info;
+ info.instructionOffset = instructionOffset;
+ info.divotPoint = divot;
+ info.startOffset = startOffset;
+ info.endOffset = endOffset;
+
+ info.mode = positionMode;
+ switch (positionMode) {
+ case ExpressionRangeInfo::FatLineMode:
+ info.encodeFatLineMode(line, column);
+ break;
+ case ExpressionRangeInfo::FatColumnMode:
+ info.encodeFatColumnMode(line, column);
+ break;
+ case ExpressionRangeInfo::FatLineAndColumnMode: {
+ createRareDataIfNecessary();
+ unsigned fatIndex = m_rareData->m_expressionInfoFatPositions.size();
+ ExpressionRangeInfo::FatPosition fatPos = { line, column };
+ m_rareData->m_expressionInfoFatPositions.append(fatPos);
+ info.position = fatIndex;
}
+ } // switch
- startOffset = expressionInfo[low - 1].startOffset;
- endOffset = expressionInfo[low - 1].endOffset;
- divot = expressionInfo[low - 1].divotPoint;
+ m_expressionInfo.append(info);
}
void UnlinkedProgramCodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor)
diff --git a/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.h b/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.h
index 23937d773..634968313 100644
--- a/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.h
+++ b/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All Rights Reserved.
+ * Copyright (C) 2012, 2013 Apple Inc. All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -27,16 +27,18 @@
#define UnlinkedCodeBlock_h
#include "BytecodeConventions.h"
+#include "CodeCache.h"
#include "CodeSpecializationKind.h"
#include "CodeType.h"
#include "ExpressionRangeInfo.h"
#include "Identifier.h"
#include "JSCell.h"
+#include "JSString.h"
#include "LineInfo.h"
-#include "Nodes.h"
+#include "ParserModes.h"
#include "RegExp.h"
#include "SpecialPointer.h"
-#include "Weak.h"
+#include "SymbolTable.h"
#include <wtf/RefCountedArray.h>
#include <wtf/Vector.h>
@@ -47,6 +49,7 @@ class Debugger;
class FunctionBodyNode;
class FunctionExecutable;
class FunctionParameters;
+class JSScope;
struct ParserError;
class ScriptExecutable;
class SourceCode;
@@ -58,6 +61,7 @@ class UnlinkedFunctionCodeBlock;
typedef unsigned UnlinkedValueProfile;
typedef unsigned UnlinkedArrayProfile;
typedef unsigned UnlinkedArrayAllocationProfile;
+typedef unsigned UnlinkedObjectAllocationProfile;
typedef unsigned UnlinkedLLIntCallLinkInfo;
struct ExecutableInfo {
@@ -78,10 +82,10 @@ class UnlinkedFunctionExecutable : public JSCell {
public:
friend class CodeCache;
typedef JSCell Base;
- static UnlinkedFunctionExecutable* create(JSGlobalData* globalData, const SourceCode& source, FunctionBodyNode* node)
+ static UnlinkedFunctionExecutable* create(VM* vm, const SourceCode& source, FunctionBodyNode* node)
{
- UnlinkedFunctionExecutable* instance = new (NotNull, allocateCell<UnlinkedFunctionExecutable>(globalData->heap)) UnlinkedFunctionExecutable(globalData, globalData->unlinkedFunctionExecutableStructure.get(), source, node);
- instance->finishCreation(*globalData);
+ UnlinkedFunctionExecutable* instance = new (NotNull, allocateCell<UnlinkedFunctionExecutable>(vm->heap)) UnlinkedFunctionExecutable(vm, vm->unlinkedFunctionExecutableStructure.get(), source, node);
+ instance->finishCreation(*vm);
return instance;
}
@@ -92,22 +96,24 @@ public:
{
return (kind == CodeForCall) ? m_symbolTableForCall.get() : m_symbolTableForConstruct.get();
}
- size_t parameterCount() const { return m_parameters->size(); }
+ size_t parameterCount() const;
bool isInStrictContext() const { return m_isInStrictContext; }
FunctionNameIsInScopeToggle functionNameIsInScopeToggle() const { return m_functionNameIsInScopeToggle; }
unsigned firstLineOffset() const { return m_firstLineOffset; }
unsigned lineCount() const { return m_lineCount; }
+ unsigned functionStartOffset() const { return m_functionStartOffset; }
+ unsigned functionStartColumn() const { return m_functionStartColumn; }
unsigned startOffset() const { return m_startOffset; }
unsigned sourceLength() { return m_sourceLength; }
String paramString() const;
- UnlinkedFunctionCodeBlock* codeBlockFor(JSGlobalData&, const SourceCode&, CodeSpecializationKind, DebuggerMode, ProfilerMode, ParserError&);
+ UnlinkedFunctionCodeBlock* codeBlockFor(VM&, JSScope*, const SourceCode&, CodeSpecializationKind, DebuggerMode, ProfilerMode, ParserError&);
static UnlinkedFunctionExecutable* fromGlobalCode(const Identifier&, ExecState*, Debugger*, const SourceCode&, JSObject** exception);
- FunctionExecutable* link(JSGlobalData&, const SourceCode&, size_t lineOffset, size_t sourceOffset);
+ FunctionExecutable* link(VM&, const SourceCode&, size_t lineOffset, size_t sourceOffset);
void clearCodeForRecompilation()
{
@@ -136,9 +142,9 @@ public:
static void destroy(JSCell*);
private:
- UnlinkedFunctionExecutable(JSGlobalData*, Structure*, const SourceCode&, FunctionBodyNode*);
- Weak<UnlinkedFunctionCodeBlock> m_codeBlockForCall;
- Weak<UnlinkedFunctionCodeBlock> m_codeBlockForConstruct;
+ UnlinkedFunctionExecutable(VM*, Structure*, const SourceCode&, FunctionBodyNode*);
+ WriteBarrier<UnlinkedFunctionCodeBlock> m_codeBlockForCall;
+ WriteBarrier<UnlinkedFunctionCodeBlock> m_codeBlockForConstruct;
unsigned m_numCapturedVariables : 29;
bool m_forceUsesArguments : 1;
@@ -153,6 +159,8 @@ private:
RefPtr<FunctionParameters> m_parameters;
unsigned m_firstLineOffset;
unsigned m_lineCount;
+ unsigned m_functionStartOffset;
+ unsigned m_functionStartColumn;
unsigned m_startOffset;
unsigned m_sourceLength;
@@ -161,18 +169,18 @@ private:
FunctionNameIsInScopeToggle m_functionNameIsInScopeToggle;
protected:
- void finishCreation(JSGlobalData& globalData)
+ void finishCreation(VM& vm)
{
- Base::finishCreation(globalData);
- m_nameValue.set(globalData, this, jsString(&globalData, name().string()));
+ Base::finishCreation(vm);
+ m_nameValue.set(vm, this, jsString(&vm, name().string()));
}
static void visitChildren(JSCell*, SlotVisitor&);
public:
- static Structure* createStructure(JSGlobalData& globalData, JSGlobalObject* globalObject, JSValue proto)
+ static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue proto)
{
- return Structure::create(globalData, globalObject, proto, TypeInfo(UnlinkedFunctionExecutableType, StructureFlags), &s_info);
+ return Structure::create(vm, globalObject, proto, TypeInfo(UnlinkedFunctionExecutableType, StructureFlags), &s_info);
}
static const unsigned StructureFlags = OverridesVisitChildren | JSCell::StructureFlags;
@@ -239,19 +247,8 @@ public:
bool needsFullScopeChain() const { return m_needsFullScopeChain; }
void setNeedsFullScopeChain(bool needsFullScopeChain) { m_needsFullScopeChain = needsFullScopeChain; }
- void addExpressionInfo(const ExpressionRangeInfo& expressionInfo)
- {
- m_expressionInfo.append(expressionInfo);
- }
-
- void addLineInfo(unsigned bytecodeOffset, int lineNo)
- {
- Vector<LineInfo>& lineInfo = m_lineInfo;
- if (!lineInfo.size() || lineInfo.last().lineNumber != lineNo) {
- LineInfo info = { bytecodeOffset, lineNo };
- lineInfo.append(info);
- }
- }
+ void addExpressionInfo(unsigned instructionOffset, int divot,
+ int startOffset, int endOffset, unsigned line, unsigned column);
bool hasExpressionInfo() { return m_expressionInfo.size(); }
@@ -263,6 +260,11 @@ public:
bool usesArguments() const { return m_argumentsRegister != -1; }
int argumentsRegister() const { return m_argumentsRegister; }
+
+ bool usesGlobalObject() const { return m_globalObjectRegister != -1; }
+ void setGlobalObjectRegister(int globalObjectRegister) { m_globalObjectRegister = globalObjectRegister; }
+ int globalObjectRegister() const { return m_globalObjectRegister; }
+
// Parameter information
void setNumParameters(int newValue) { m_numParameters = newValue; }
void addParameter() { m_numParameters++; }
@@ -272,7 +274,7 @@ public:
{
createRareDataIfNecessary();
unsigned size = m_rareData->m_regexps.size();
- m_rareData->m_regexps.append(WriteBarrier<RegExp>(*m_globalData, this, r));
+ m_rareData->m_regexps.append(WriteBarrier<RegExp>(*m_vm, this, r));
return size;
}
unsigned numberOfRegExps() const
@@ -295,7 +297,7 @@ public:
{
unsigned result = m_constantRegisters.size();
m_constantRegisters.append(WriteBarrier<Unknown>());
- m_constantRegisters.last().set(*m_globalData, this, v);
+ m_constantRegisters.last().set(*m_vm, this, v);
return result;
}
unsigned addOrFindConstant(JSValue);
@@ -320,7 +322,6 @@ public:
m_constantRegisters.shrinkToFit();
m_functionDecls.shrinkToFit();
m_functionExprs.shrinkToFit();
- m_lineInfo.shrinkToFit();
m_propertyAccessInstructions.shrinkToFit();
m_expressionInfo.shrinkToFit();
@@ -334,6 +335,7 @@ public:
m_rareData->m_immediateSwitchJumpTables.shrinkToFit();
m_rareData->m_characterSwitchJumpTables.shrinkToFit();
m_rareData->m_stringSwitchJumpTables.shrinkToFit();
+ m_rareData->m_expressionInfoFatPositions.shrinkToFit();
}
}
@@ -363,7 +365,7 @@ public:
{
unsigned size = m_functionDecls.size();
m_functionDecls.append(WriteBarrier<UnlinkedFunctionExecutable>());
- m_functionDecls.last().set(*m_globalData, this, n);
+ m_functionDecls.last().set(*m_vm, this, n);
return size;
}
UnlinkedFunctionExecutable* functionDecl(int index) { return m_functionDecls[index].get(); }
@@ -372,7 +374,7 @@ public:
{
unsigned size = m_functionExprs.size();
m_functionExprs.append(WriteBarrier<UnlinkedFunctionExecutable>());
- m_functionExprs.last().set(*m_globalData, this, n);
+ m_functionExprs.last().set(*m_vm, this, n);
return size;
}
UnlinkedFunctionExecutable* functionExpr(int index) { return m_functionExprs[index].get(); }
@@ -385,7 +387,7 @@ public:
SharedSymbolTable* symbolTable() const { return m_symbolTable.get(); }
- JSGlobalData* globalData() const { return m_globalData; }
+ VM* vm() const { return m_vm; }
unsigned addResolve() { return m_resolveOperationCount++; }
unsigned numberOfResolveOperations() const { return m_resolveOperationCount; }
@@ -396,6 +398,8 @@ public:
unsigned numberOfArrayProfiles() { return m_arrayProfileCount; }
UnlinkedArrayAllocationProfile addArrayAllocationProfile() { return m_arrayAllocationProfileCount++; }
unsigned numberOfArrayAllocationProfiles() { return m_arrayAllocationProfileCount; }
+ UnlinkedObjectAllocationProfile addObjectAllocationProfile() { return m_objectAllocationProfileCount++; }
+ unsigned numberOfObjectAllocationProfiles() { return m_objectAllocationProfileCount; }
UnlinkedValueProfile addValueProfile() { return m_valueProfileCount++; }
unsigned numberOfValueProfiles() { return m_valueProfileCount; }
@@ -443,7 +447,8 @@ public:
int lineNumberForBytecodeOffset(unsigned bytecodeOffset);
- void expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset);
+ void expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot,
+ int& startOffset, int& endOffset, unsigned& line, unsigned& column);
void recordParse(CodeFeatures features, bool hasCapturedVariables, unsigned firstLine, unsigned lineCount)
{
@@ -458,16 +463,26 @@ public:
unsigned firstLine() const { return m_firstLine; }
unsigned lineCount() const { return m_lineCount; }
+ PassRefPtr<CodeCache> codeCacheForEval()
+ {
+ if (m_codeType == GlobalCode)
+ return m_vm->codeCache();
+ createRareDataIfNecessary();
+ if (!m_rareData->m_evalCodeCache)
+ m_rareData->m_evalCodeCache = CodeCache::create(CodeCache::NonGlobalCodeCache);
+ return m_rareData->m_evalCodeCache.get();
+ }
+
protected:
- UnlinkedCodeBlock(JSGlobalData*, Structure*, CodeType, const ExecutableInfo&);
+ UnlinkedCodeBlock(VM*, Structure*, CodeType, const ExecutableInfo&);
~UnlinkedCodeBlock();
- void finishCreation(JSGlobalData& globalData)
+ void finishCreation(VM& vm)
{
- Base::finishCreation(globalData);
+ Base::finishCreation(vm);
if (codeType() == GlobalCode)
return;
- m_symbolTable.set(globalData, this, SharedSymbolTable::create(globalData));
+ m_symbolTable.set(vm, this, SharedSymbolTable::create(vm));
}
private:
@@ -481,11 +496,12 @@ private:
RefCountedArray<UnlinkedInstruction> m_unlinkedInstructions;
int m_numParameters;
- JSGlobalData* m_globalData;
+ VM* m_vm;
int m_thisRegister;
int m_argumentsRegister;
int m_activationRegister;
+ int m_globalObjectRegister;
bool m_needsFullScopeChain : 1;
bool m_usesEval : 1;
@@ -510,8 +526,6 @@ private:
WriteBarrier<SharedSymbolTable> m_symbolTable;
- Vector<LineInfo> m_lineInfo;
-
Vector<unsigned> m_propertyAccessInstructions;
#if ENABLE(BYTECODE_COMMENTS)
@@ -523,6 +537,7 @@ private:
unsigned m_putToBaseOperationCount;
unsigned m_arrayProfileCount;
unsigned m_arrayAllocationProfileCount;
+ unsigned m_objectAllocationProfileCount;
unsigned m_valueProfileCount;
unsigned m_llintCallLinkInfoCount;
@@ -542,8 +557,9 @@ public:
Vector<UnlinkedSimpleJumpTable> m_immediateSwitchJumpTables;
Vector<UnlinkedSimpleJumpTable> m_characterSwitchJumpTables;
Vector<UnlinkedStringJumpTable> m_stringSwitchJumpTables;
+ RefPtr<CodeCache> m_evalCodeCache;
- // Expression info - present if debugging.
+ Vector<ExpressionRangeInfo::FatPosition> m_expressionInfoFatPositions;
};
private:
@@ -564,8 +580,8 @@ public:
typedef UnlinkedCodeBlock Base;
protected:
- UnlinkedGlobalCodeBlock(JSGlobalData* globalData, Structure* structure, CodeType codeType, const ExecutableInfo& info)
- : Base(globalData, structure, codeType, info)
+ UnlinkedGlobalCodeBlock(VM* vm, Structure* structure, CodeType codeType, const ExecutableInfo& info)
+ : Base(vm, structure, codeType, info)
{
}
@@ -577,10 +593,10 @@ protected:
class UnlinkedProgramCodeBlock : public UnlinkedGlobalCodeBlock {
private:
friend class CodeCache;
- static UnlinkedProgramCodeBlock* create(JSGlobalData* globalData, const ExecutableInfo& info)
+ static UnlinkedProgramCodeBlock* create(VM* vm, const ExecutableInfo& info)
{
- UnlinkedProgramCodeBlock* instance = new (NotNull, allocateCell<UnlinkedProgramCodeBlock>(globalData->heap)) UnlinkedProgramCodeBlock(globalData, globalData->unlinkedProgramCodeBlockStructure.get(), info);
- instance->finishCreation(*globalData);
+ UnlinkedProgramCodeBlock* instance = new (NotNull, allocateCell<UnlinkedProgramCodeBlock>(vm->heap)) UnlinkedProgramCodeBlock(vm, vm->unlinkedProgramCodeBlockStructure.get(), info);
+ instance->finishCreation(*vm);
return instance;
}
@@ -588,9 +604,9 @@ public:
typedef UnlinkedGlobalCodeBlock Base;
static void destroy(JSCell*);
- void addFunctionDeclaration(JSGlobalData& globalData, const Identifier& name, UnlinkedFunctionExecutable* functionExecutable)
+ void addFunctionDeclaration(VM& vm, const Identifier& name, UnlinkedFunctionExecutable* functionExecutable)
{
- m_functionDeclarations.append(std::make_pair(name, WriteBarrier<UnlinkedFunctionExecutable>(globalData, this, functionExecutable)));
+ m_functionDeclarations.append(std::make_pair(name, WriteBarrier<UnlinkedFunctionExecutable>(vm, this, functionExecutable)));
}
void addVariableDeclaration(const Identifier& name, bool isConstant)
@@ -607,8 +623,8 @@ public:
static void visitChildren(JSCell*, SlotVisitor&);
private:
- UnlinkedProgramCodeBlock(JSGlobalData* globalData, Structure* structure, const ExecutableInfo& info)
- : Base(globalData, structure, GlobalCode, info)
+ UnlinkedProgramCodeBlock(VM* vm, Structure* structure, const ExecutableInfo& info)
+ : Base(vm, structure, GlobalCode, info)
{
}
@@ -616,9 +632,9 @@ private:
FunctionDeclations m_functionDeclarations;
public:
- static Structure* createStructure(JSGlobalData& globalData, JSGlobalObject* globalObject, JSValue proto)
+ static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue proto)
{
- return Structure::create(globalData, globalObject, proto, TypeInfo(UnlinkedProgramCodeBlockType, StructureFlags), &s_info);
+ return Structure::create(vm, globalObject, proto, TypeInfo(UnlinkedProgramCodeBlockType, StructureFlags), &s_info);
}
static const unsigned StructureFlags = OverridesVisitChildren | Base::StructureFlags;
@@ -630,10 +646,10 @@ class UnlinkedEvalCodeBlock : public UnlinkedGlobalCodeBlock {
private:
friend class CodeCache;
- static UnlinkedEvalCodeBlock* create(JSGlobalData* globalData, const ExecutableInfo& info)
+ static UnlinkedEvalCodeBlock* create(VM* vm, const ExecutableInfo& info)
{
- UnlinkedEvalCodeBlock* instance = new (NotNull, allocateCell<UnlinkedEvalCodeBlock>(globalData->heap)) UnlinkedEvalCodeBlock(globalData, globalData->unlinkedEvalCodeBlockStructure.get(), info);
- instance->finishCreation(*globalData);
+ UnlinkedEvalCodeBlock* instance = new (NotNull, allocateCell<UnlinkedEvalCodeBlock>(vm->heap)) UnlinkedEvalCodeBlock(vm, vm->unlinkedEvalCodeBlockStructure.get(), info);
+ instance->finishCreation(*vm);
return instance;
}
@@ -643,24 +659,24 @@ public:
const Identifier& variable(unsigned index) { return m_variables[index]; }
unsigned numVariables() { return m_variables.size(); }
- void adoptVariables(Vector<Identifier>& variables)
+ void adoptVariables(Vector<Identifier, 0, UnsafeVectorOverflow>& variables)
{
ASSERT(m_variables.isEmpty());
m_variables.swap(variables);
}
private:
- UnlinkedEvalCodeBlock(JSGlobalData* globalData, Structure* structure, const ExecutableInfo& info)
- : Base(globalData, structure, EvalCode, info)
+ UnlinkedEvalCodeBlock(VM* vm, Structure* structure, const ExecutableInfo& info)
+ : Base(vm, structure, EvalCode, info)
{
}
- Vector<Identifier> m_variables;
+ Vector<Identifier, 0, UnsafeVectorOverflow> m_variables;
public:
- static Structure* createStructure(JSGlobalData& globalData, JSGlobalObject* globalObject, JSValue proto)
+ static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue proto)
{
- return Structure::create(globalData, globalObject, proto, TypeInfo(UnlinkedEvalCodeBlockType, StructureFlags), &s_info);
+ return Structure::create(vm, globalObject, proto, TypeInfo(UnlinkedEvalCodeBlockType, StructureFlags), &s_info);
}
static const unsigned StructureFlags = OverridesVisitChildren | Base::StructureFlags;
@@ -669,30 +685,27 @@ public:
};
class UnlinkedFunctionCodeBlock : public UnlinkedCodeBlock {
-private:
- friend class CodeCache;
-
- static UnlinkedFunctionCodeBlock* create(JSGlobalData* globalData, CodeType codeType, const ExecutableInfo& info)
+public:
+ static UnlinkedFunctionCodeBlock* create(VM* vm, CodeType codeType, const ExecutableInfo& info)
{
- UnlinkedFunctionCodeBlock* instance = new (NotNull, allocateCell<UnlinkedFunctionCodeBlock>(globalData->heap)) UnlinkedFunctionCodeBlock(globalData, globalData->unlinkedFunctionCodeBlockStructure.get(), codeType, info);
- instance->finishCreation(*globalData);
+ UnlinkedFunctionCodeBlock* instance = new (NotNull, allocateCell<UnlinkedFunctionCodeBlock>(vm->heap)) UnlinkedFunctionCodeBlock(vm, vm->unlinkedFunctionCodeBlockStructure.get(), codeType, info);
+ instance->finishCreation(*vm);
return instance;
}
-public:
typedef UnlinkedCodeBlock Base;
static void destroy(JSCell*);
private:
- UnlinkedFunctionCodeBlock(JSGlobalData* globalData, Structure* structure, CodeType codeType, const ExecutableInfo& info)
- : Base(globalData, structure, codeType, info)
+ UnlinkedFunctionCodeBlock(VM* vm, Structure* structure, CodeType codeType, const ExecutableInfo& info)
+ : Base(vm, structure, codeType, info)
{
}
public:
- static Structure* createStructure(JSGlobalData& globalData, JSGlobalObject* globalObject, JSValue proto)
+ static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue proto)
{
- return Structure::create(globalData, globalObject, proto, TypeInfo(UnlinkedFunctionCodeBlockType, StructureFlags), &s_info);
+ return Structure::create(vm, globalObject, proto, TypeInfo(UnlinkedFunctionCodeBlockType, StructureFlags), &s_info);
}
static const unsigned StructureFlags = OverridesVisitChildren | Base::StructureFlags;
diff --git a/Source/JavaScriptCore/bytecode/ValueProfile.h b/Source/JavaScriptCore/bytecode/ValueProfile.h
index e56e6eb6e..028c1f696 100644
--- a/Source/JavaScriptCore/bytecode/ValueProfile.h
+++ b/Source/JavaScriptCore/bytecode/ValueProfile.h
@@ -39,6 +39,7 @@
#include "Structure.h"
#include "WriteBarrier.h"
#include <wtf/PrintStream.h>
+#include <wtf/StringPrintStream.h>
namespace JSC {
@@ -110,6 +111,20 @@ struct ValueProfileBase {
return false;
}
+ CString briefDescription()
+ {
+ computeUpdatedPrediction();
+
+ StringPrintStream out;
+
+ if (m_singletonValueIsTop)
+ out.print("predicting ", SpeculationDump(m_prediction));
+ else if (m_singletonValue)
+ out.print("predicting ", m_singletonValue);
+
+ return out.toCString();
+ }
+
void dump(PrintStream& out)
{
out.print("samples = ", totalNumberOfSamples(), " prediction = ", SpeculationDump(m_prediction));
@@ -117,7 +132,7 @@ struct ValueProfileBase {
if (m_singletonValueIsTop)
out.printf("TOP");
else
- out.printf("%s", m_singletonValue.description());
+ out.print(m_singletonValue);
bool first = true;
for (unsigned i = 0; i < totalNumberOfBuckets; ++i) {
JSValue value = JSValue::decode(m_buckets[i]);
@@ -127,7 +142,7 @@ struct ValueProfileBase {
first = false;
} else
out.printf(", ");
- out.printf("%s", value.description());
+ out.print(value);
}
}
}
diff --git a/Source/JavaScriptCore/bytecode/ValueRecovery.h b/Source/JavaScriptCore/bytecode/ValueRecovery.h
index fc991a413..77d5a1030 100644
--- a/Source/JavaScriptCore/bytecode/ValueRecovery.h
+++ b/Source/JavaScriptCore/bytecode/ValueRecovery.h
@@ -27,7 +27,7 @@
#define ValueRecovery_h
#include "DataFormat.h"
-#include "JSValue.h"
+#include "JSCJSValue.h"
#include "MacroAssembler.h"
#include "VirtualRegister.h"
#include <stdio.h>
@@ -331,7 +331,7 @@ public:
out.printf("arguments");
break;
case Constant:
- out.printf("[%s]", constant().description());
+ out.print("[", constant(), "]");
break;
case DontKnow:
out.printf("!");