summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/bytecode
diff options
context:
space:
mode:
authorLorry Tar Creator <lorry-tar-importer@lorry>2016-05-24 08:28:08 +0000
committerLorry Tar Creator <lorry-tar-importer@lorry>2016-05-24 08:28:08 +0000
commita4e969f4965059196ca948db781e52f7cfebf19e (patch)
tree6ca352808c8fdc52006a0f33f6ae3c593b23867d /Source/JavaScriptCore/bytecode
parent41386e9cb918eed93b3f13648cbef387e371e451 (diff)
downloadWebKitGtk-tarball-a4e969f4965059196ca948db781e52f7cfebf19e.tar.gz
webkitgtk-2.12.3webkitgtk-2.12.3
Diffstat (limited to 'Source/JavaScriptCore/bytecode')
-rw-r--r--Source/JavaScriptCore/bytecode/AdaptiveInferredPropertyValueWatchpointBase.cpp91
-rw-r--r--Source/JavaScriptCore/bytecode/AdaptiveInferredPropertyValueWatchpointBase.h75
-rw-r--r--Source/JavaScriptCore/bytecode/ArrayAllocationProfile.cpp4
-rw-r--r--Source/JavaScriptCore/bytecode/ArrayAllocationProfile.h2
-rw-r--r--Source/JavaScriptCore/bytecode/ArrayProfile.cpp42
-rw-r--r--Source/JavaScriptCore/bytecode/ArrayProfile.h68
-rw-r--r--Source/JavaScriptCore/bytecode/ByValInfo.h80
-rw-r--r--Source/JavaScriptCore/bytecode/BytecodeBasicBlock.cpp57
-rw-r--r--Source/JavaScriptCore/bytecode/BytecodeBasicBlock.h15
-rw-r--r--Source/JavaScriptCore/bytecode/BytecodeIntrinsicRegistry.cpp76
-rw-r--r--Source/JavaScriptCore/bytecode/BytecodeIntrinsicRegistry.h82
-rw-r--r--Source/JavaScriptCore/bytecode/BytecodeKills.h181
-rw-r--r--Source/JavaScriptCore/bytecode/BytecodeList.json182
-rw-r--r--Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.cpp219
-rw-r--r--Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.h18
-rw-r--r--Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysisInlines.h24
-rw-r--r--Source/JavaScriptCore/bytecode/BytecodeUseDef.h256
-rw-r--r--Source/JavaScriptCore/bytecode/CallEdge.cpp37
-rw-r--r--Source/JavaScriptCore/bytecode/CallEdge.h71
-rw-r--r--Source/JavaScriptCore/bytecode/CallLinkInfo.cpp102
-rw-r--r--Source/JavaScriptCore/bytecode/CallLinkInfo.h329
-rw-r--r--Source/JavaScriptCore/bytecode/CallLinkStatus.cpp332
-rw-r--r--Source/JavaScriptCore/bytecode/CallLinkStatus.h115
-rw-r--r--Source/JavaScriptCore/bytecode/CallMode.cpp49
-rw-r--r--Source/JavaScriptCore/bytecode/CallMode.h55
-rw-r--r--Source/JavaScriptCore/bytecode/CallReturnOffsetToBytecodeOffset.h2
-rw-r--r--Source/JavaScriptCore/bytecode/CallVariant.cpp97
-rw-r--r--Source/JavaScriptCore/bytecode/CallVariant.h203
-rw-r--r--Source/JavaScriptCore/bytecode/CodeBlock.cpp3123
-rw-r--r--Source/JavaScriptCore/bytecode/CodeBlock.h1061
-rw-r--r--Source/JavaScriptCore/bytecode/CodeBlockJettisoningWatchpoint.cpp12
-rw-r--r--Source/JavaScriptCore/bytecode/CodeBlockJettisoningWatchpoint.h7
-rw-r--r--Source/JavaScriptCore/bytecode/CodeOrigin.cpp132
-rw-r--r--Source/JavaScriptCore/bytecode/CodeOrigin.h88
-rw-r--r--Source/JavaScriptCore/bytecode/CodeType.cpp3
-rw-r--r--Source/JavaScriptCore/bytecode/CodeType.h4
-rw-r--r--Source/JavaScriptCore/bytecode/ComplexGetStatus.cpp79
-rw-r--r--Source/JavaScriptCore/bytecode/ComplexGetStatus.h114
-rw-r--r--Source/JavaScriptCore/bytecode/DFGExitProfile.cpp8
-rw-r--r--Source/JavaScriptCore/bytecode/DFGExitProfile.h48
-rw-r--r--Source/JavaScriptCore/bytecode/DataFormat.cpp39
-rw-r--r--Source/JavaScriptCore/bytecode/DataFormat.h14
-rw-r--r--Source/JavaScriptCore/bytecode/DeferredCompilationCallback.cpp38
-rw-r--r--Source/JavaScriptCore/bytecode/DeferredCompilationCallback.h13
-rw-r--r--Source/JavaScriptCore/bytecode/DeferredSourceDump.cpp66
-rw-r--r--Source/JavaScriptCore/bytecode/DeferredSourceDump.h (renamed from Source/JavaScriptCore/bytecode/ProfiledCodeBlockJettisoningWatchpoint.h)41
-rw-r--r--Source/JavaScriptCore/bytecode/EvalCodeCache.h91
-rw-r--r--Source/JavaScriptCore/bytecode/ExecutableInfo.h80
-rw-r--r--Source/JavaScriptCore/bytecode/ExecutionCounter.cpp42
-rw-r--r--Source/JavaScriptCore/bytecode/ExecutionCounter.h56
-rw-r--r--Source/JavaScriptCore/bytecode/ExitKind.cpp45
-rw-r--r--Source/JavaScriptCore/bytecode/ExitKind.h34
-rw-r--r--Source/JavaScriptCore/bytecode/ExitingJITType.cpp52
-rw-r--r--Source/JavaScriptCore/bytecode/ExitingJITType.h62
-rw-r--r--Source/JavaScriptCore/bytecode/FullBytecodeLiveness.h23
-rw-r--r--Source/JavaScriptCore/bytecode/GetByIdStatus.cpp473
-rw-r--r--Source/JavaScriptCore/bytecode/GetByIdStatus.h65
-rw-r--r--Source/JavaScriptCore/bytecode/GetByIdVariant.cpp145
-rw-r--r--Source/JavaScriptCore/bytecode/GetByIdVariant.h87
-rw-r--r--Source/JavaScriptCore/bytecode/HandlerInfo.h60
-rw-r--r--Source/JavaScriptCore/bytecode/InlineCallFrame.cpp124
-rw-r--r--Source/JavaScriptCore/bytecode/InlineCallFrame.h269
-rw-r--r--Source/JavaScriptCore/bytecode/InlineCallFrameSet.cpp3
-rw-r--r--Source/JavaScriptCore/bytecode/InlineCallFrameSet.h7
-rw-r--r--Source/JavaScriptCore/bytecode/Instruction.h41
-rw-r--r--Source/JavaScriptCore/bytecode/InternalFunctionAllocationProfile.h64
-rw-r--r--Source/JavaScriptCore/bytecode/JumpTable.cpp2
-rw-r--r--Source/JavaScriptCore/bytecode/JumpTable.h8
-rw-r--r--Source/JavaScriptCore/bytecode/LLIntCallLinkInfo.h2
-rw-r--r--Source/JavaScriptCore/bytecode/LazyOperandValueProfile.cpp4
-rw-r--r--Source/JavaScriptCore/bytecode/LazyOperandValueProfile.h3
-rw-r--r--Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.cpp1
-rw-r--r--Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.h2
-rw-r--r--Source/JavaScriptCore/bytecode/ObjectAllocationProfile.h22
-rw-r--r--Source/JavaScriptCore/bytecode/ObjectPropertyCondition.cpp160
-rw-r--r--Source/JavaScriptCore/bytecode/ObjectPropertyCondition.h268
-rw-r--r--Source/JavaScriptCore/bytecode/ObjectPropertyConditionSet.cpp368
-rw-r--r--Source/JavaScriptCore/bytecode/ObjectPropertyConditionSet.h175
-rw-r--r--Source/JavaScriptCore/bytecode/Opcode.cpp15
-rw-r--r--Source/JavaScriptCore/bytecode/Opcode.h173
-rw-r--r--Source/JavaScriptCore/bytecode/Operands.h23
-rw-r--r--Source/JavaScriptCore/bytecode/OperandsInlines.h16
-rw-r--r--Source/JavaScriptCore/bytecode/PolymorphicAccess.cpp1469
-rw-r--r--Source/JavaScriptCore/bytecode/PolymorphicAccess.h451
-rw-r--r--Source/JavaScriptCore/bytecode/PolymorphicAccessStructureList.h139
-rw-r--r--Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.cpp148
-rw-r--r--Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.h195
-rw-r--r--Source/JavaScriptCore/bytecode/PreciseJumpTargets.cpp20
-rw-r--r--Source/JavaScriptCore/bytecode/PreciseJumpTargets.h2
-rw-r--r--Source/JavaScriptCore/bytecode/PropertyCondition.cpp364
-rw-r--r--Source/JavaScriptCore/bytecode/PropertyCondition.h338
-rw-r--r--Source/JavaScriptCore/bytecode/PutByIdFlags.cpp50
-rw-r--r--Source/JavaScriptCore/bytecode/PutByIdFlags.h105
-rw-r--r--Source/JavaScriptCore/bytecode/PutByIdStatus.cpp457
-rw-r--r--Source/JavaScriptCore/bytecode/PutByIdStatus.h90
-rw-r--r--Source/JavaScriptCore/bytecode/PutByIdVariant.cpp249
-rw-r--r--Source/JavaScriptCore/bytecode/PutByIdVariant.h148
-rw-r--r--Source/JavaScriptCore/bytecode/SamplingTool.cpp9
-rw-r--r--Source/JavaScriptCore/bytecode/SamplingTool.h12
-rw-r--r--Source/JavaScriptCore/bytecode/SpecialPointer.cpp1
-rw-r--r--Source/JavaScriptCore/bytecode/SpecialPointer.h5
-rw-r--r--Source/JavaScriptCore/bytecode/SpeculatedType.cpp221
-rw-r--r--Source/JavaScriptCore/bytecode/SpeculatedType.h217
-rw-r--r--Source/JavaScriptCore/bytecode/StructureSet.cpp109
-rw-r--r--Source/JavaScriptCore/bytecode/StructureSet.h157
-rw-r--r--Source/JavaScriptCore/bytecode/StructureStubClearingWatchpoint.cpp43
-rw-r--r--Source/JavaScriptCore/bytecode/StructureStubClearingWatchpoint.h44
-rw-r--r--Source/JavaScriptCore/bytecode/StructureStubInfo.cpp218
-rw-r--r--Source/JavaScriptCore/bytecode/StructureStubInfo.h287
-rw-r--r--Source/JavaScriptCore/bytecode/ToThisStatus.cpp72
-rw-r--r--Source/JavaScriptCore/bytecode/ToThisStatus.h50
-rw-r--r--Source/JavaScriptCore/bytecode/TrackedReferences.cpp (renamed from Source/JavaScriptCore/bytecode/ProfiledCodeBlockJettisoningWatchpoint.cpp)71
-rw-r--r--Source/JavaScriptCore/bytecode/TrackedReferences.h56
-rw-r--r--Source/JavaScriptCore/bytecode/TypeLocation.h63
-rw-r--r--Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.cpp261
-rw-r--r--Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.h402
-rw-r--r--Source/JavaScriptCore/bytecode/UnlinkedFunctionExecutable.cpp225
-rw-r--r--Source/JavaScriptCore/bytecode/UnlinkedFunctionExecutable.h190
-rw-r--r--Source/JavaScriptCore/bytecode/UnlinkedInstructionStream.cpp82
-rw-r--r--Source/JavaScriptCore/bytecode/UnlinkedInstructionStream.h79
-rw-r--r--Source/JavaScriptCore/bytecode/ValueProfile.cpp68
-rw-r--r--Source/JavaScriptCore/bytecode/ValueProfile.h60
-rw-r--r--Source/JavaScriptCore/bytecode/ValueRecovery.cpp28
-rw-r--r--Source/JavaScriptCore/bytecode/ValueRecovery.h174
-rw-r--r--Source/JavaScriptCore/bytecode/VariableWatchpointSet.h109
-rw-r--r--Source/JavaScriptCore/bytecode/VariableWriteFireDetail.cpp44
-rw-r--r--Source/JavaScriptCore/bytecode/VariableWriteFireDetail.h55
-rw-r--r--Source/JavaScriptCore/bytecode/VirtualRegister.cpp65
-rw-r--r--Source/JavaScriptCore/bytecode/VirtualRegister.h51
-rw-r--r--Source/JavaScriptCore/bytecode/Watchpoint.cpp67
-rw-r--r--Source/JavaScriptCore/bytecode/Watchpoint.h205
131 files changed, 14081 insertions, 4763 deletions
diff --git a/Source/JavaScriptCore/bytecode/AdaptiveInferredPropertyValueWatchpointBase.cpp b/Source/JavaScriptCore/bytecode/AdaptiveInferredPropertyValueWatchpointBase.cpp
new file mode 100644
index 000000000..9f02e8cb6
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/AdaptiveInferredPropertyValueWatchpointBase.cpp
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AdaptiveInferredPropertyValueWatchpointBase.h"
+
+#include "JSCellInlines.h"
+#include "StructureInlines.h"
+
+namespace JSC {
+
+AdaptiveInferredPropertyValueWatchpointBase::AdaptiveInferredPropertyValueWatchpointBase(const ObjectPropertyCondition& key)
+ : m_key(key)
+{
+ RELEASE_ASSERT(key.kind() == PropertyCondition::Equivalence);
+}
+
+void AdaptiveInferredPropertyValueWatchpointBase::install()
+{
+ RELEASE_ASSERT(m_key.isWatchable());
+
+ m_key.object()->structure()->addTransitionWatchpoint(&m_structureWatchpoint);
+
+ PropertyOffset offset = m_key.object()->structure()->getConcurrently(m_key.uid());
+ WatchpointSet* set = m_key.object()->structure()->propertyReplacementWatchpointSet(offset);
+ set->add(&m_propertyWatchpoint);
+}
+
+void AdaptiveInferredPropertyValueWatchpointBase::fire(const FireDetail& detail)
+{
+ // We need to defer GC here otherwise we might trigger a GC that could destroy the owner
+ // CodeBlock. In particular, this can happen when we add rare data to a structure when
+ // we EnsureWatchability.
+ DeferGCForAWhile defer(*Heap::heap(m_key.object()));
+ // One of the watchpoints fired, but the other one didn't. Make sure that neither of them are
+ // in any set anymore. This simplifies things by allowing us to reinstall the watchpoints
+ // wherever from scratch.
+ if (m_structureWatchpoint.isOnList())
+ m_structureWatchpoint.remove();
+ if (m_propertyWatchpoint.isOnList())
+ m_propertyWatchpoint.remove();
+
+ if (m_key.isWatchable(PropertyCondition::EnsureWatchability)) {
+ install();
+ return;
+ }
+
+ handleFire(detail);
+}
+
+void AdaptiveInferredPropertyValueWatchpointBase::StructureWatchpoint::fireInternal(const FireDetail& detail)
+{
+ ptrdiff_t myOffset = OBJECT_OFFSETOF(AdaptiveInferredPropertyValueWatchpointBase, m_structureWatchpoint);
+
+ AdaptiveInferredPropertyValueWatchpointBase* parent = bitwise_cast<AdaptiveInferredPropertyValueWatchpointBase*>(bitwise_cast<char*>(this) - myOffset);
+
+ parent->fire(detail);
+}
+
+void AdaptiveInferredPropertyValueWatchpointBase::PropertyWatchpoint::fireInternal(const FireDetail& detail)
+{
+ ptrdiff_t myOffset = OBJECT_OFFSETOF(AdaptiveInferredPropertyValueWatchpointBase, m_propertyWatchpoint);
+
+ AdaptiveInferredPropertyValueWatchpointBase* parent = bitwise_cast<AdaptiveInferredPropertyValueWatchpointBase*>(bitwise_cast<char*>(this) - myOffset);
+
+ parent->fire(detail);
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/AdaptiveInferredPropertyValueWatchpointBase.h b/Source/JavaScriptCore/bytecode/AdaptiveInferredPropertyValueWatchpointBase.h
new file mode 100644
index 000000000..3fd022303
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/AdaptiveInferredPropertyValueWatchpointBase.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef AdaptiveInferredPropertyValueWatchpointBase_h
+#define AdaptiveInferredPropertyValueWatchpointBase_h
+
+#include "ObjectPropertyCondition.h"
+#include "Watchpoint.h"
+#include <wtf/FastMalloc.h>
+#include <wtf/Noncopyable.h>
+
+namespace JSC {
+
+class AdaptiveInferredPropertyValueWatchpointBase {
+ WTF_MAKE_NONCOPYABLE(AdaptiveInferredPropertyValueWatchpointBase);
+ WTF_MAKE_FAST_ALLOCATED;
+
+public:
+ AdaptiveInferredPropertyValueWatchpointBase(const ObjectPropertyCondition&);
+
+ const ObjectPropertyCondition& key() const { return m_key; }
+
+ void install();
+
+ virtual ~AdaptiveInferredPropertyValueWatchpointBase() = default;
+
+protected:
+ virtual void handleFire(const FireDetail&) = 0;
+
+private:
+ class StructureWatchpoint : public Watchpoint {
+ public:
+ StructureWatchpoint() { }
+ protected:
+ virtual void fireInternal(const FireDetail&) override;
+ };
+ class PropertyWatchpoint : public Watchpoint {
+ public:
+ PropertyWatchpoint() { }
+ protected:
+ virtual void fireInternal(const FireDetail&) override;
+ };
+
+ void fire(const FireDetail&);
+
+ ObjectPropertyCondition m_key;
+ StructureWatchpoint m_structureWatchpoint;
+ PropertyWatchpoint m_propertyWatchpoint;
+};
+
+} // namespace JSC
+
+#endif /* AdaptiveInferredPropertyValueWatchpointBase_h */
diff --git a/Source/JavaScriptCore/bytecode/ArrayAllocationProfile.cpp b/Source/JavaScriptCore/bytecode/ArrayAllocationProfile.cpp
index 4a008e083..905b5bd3c 100644
--- a/Source/JavaScriptCore/bytecode/ArrayAllocationProfile.cpp
+++ b/Source/JavaScriptCore/bytecode/ArrayAllocationProfile.cpp
@@ -26,7 +26,7 @@
#include "config.h"
#include "ArrayAllocationProfile.h"
-#include "Operations.h"
+#include "JSCInlines.h"
namespace JSC {
@@ -49,7 +49,7 @@ void ArrayAllocationProfile::updateIndexingType()
JSArray* lastArray = m_lastArray;
if (!lastArray)
return;
- m_currentIndexingType = leastUpperBoundOfIndexingTypes(m_currentIndexingType, lastArray->structure()->indexingType());
+ m_currentIndexingType = leastUpperBoundOfIndexingTypes(m_currentIndexingType, lastArray->indexingType());
m_lastArray = 0;
}
diff --git a/Source/JavaScriptCore/bytecode/ArrayAllocationProfile.h b/Source/JavaScriptCore/bytecode/ArrayAllocationProfile.h
index f77b92a2f..f03763f70 100644
--- a/Source/JavaScriptCore/bytecode/ArrayAllocationProfile.h
+++ b/Source/JavaScriptCore/bytecode/ArrayAllocationProfile.h
@@ -42,7 +42,7 @@ public:
IndexingType selectIndexingType()
{
JSArray* lastArray = m_lastArray;
- if (lastArray && UNLIKELY(lastArray->structure()->indexingType() != m_currentIndexingType))
+ if (lastArray && UNLIKELY(lastArray->indexingType() != m_currentIndexingType))
updateIndexingType();
return m_currentIndexingType;
}
diff --git a/Source/JavaScriptCore/bytecode/ArrayProfile.cpp b/Source/JavaScriptCore/bytecode/ArrayProfile.cpp
index 4c055fea5..b8ade2223 100644
--- a/Source/JavaScriptCore/bytecode/ArrayProfile.cpp
+++ b/Source/JavaScriptCore/bytecode/ArrayProfile.cpp
@@ -27,6 +27,7 @@
#include "ArrayProfile.h"
#include "CodeBlock.h"
+#include "JSCInlines.h"
#include <wtf/CommaPrinter.h>
#include <wtf/StringExtras.h>
#include <wtf/StringPrintStream.h>
@@ -72,28 +73,53 @@ void dumpArrayModes(PrintStream& out, ArrayModes arrayModes)
out.print(comma, "ArrayWithArrayStorage");
if (arrayModes & asArrayModes(ArrayWithSlowPutArrayStorage))
out.print(comma, "ArrayWithSlowPutArrayStorage");
+
+ if (arrayModes & Int8ArrayMode)
+ out.print(comma, "Int8ArrayMode");
+ if (arrayModes & Int16ArrayMode)
+ out.print(comma, "Int16ArrayMode");
+ if (arrayModes & Int32ArrayMode)
+ out.print(comma, "Int32ArrayMode");
+ if (arrayModes & Uint8ArrayMode)
+ out.print(comma, "Uint8ArrayMode");
+ if (arrayModes & Uint8ClampedArrayMode)
+ out.print(comma, "Uint8ClampedArrayMode");
+ if (arrayModes & Uint16ArrayMode)
+ out.print(comma, "Uint16ArrayMode");
+ if (arrayModes & Uint32ArrayMode)
+ out.print(comma, "Uint32ArrayMode");
+ if (arrayModes & Float32ArrayMode)
+ out.print(comma, "Float32ArrayMode");
+ if (arrayModes & Float64ArrayMode)
+ out.print(comma, "Float64ArrayMode");
}
-void ArrayProfile::computeUpdatedPrediction(const ConcurrentJITLocker&, CodeBlock* codeBlock)
+void ArrayProfile::computeUpdatedPrediction(const ConcurrentJITLocker& locker, CodeBlock* codeBlock)
{
- if (!m_lastSeenStructure)
+ if (!m_lastSeenStructureID)
return;
- m_observedArrayModes |= arrayModeFromStructure(m_lastSeenStructure);
+ Structure* lastSeenStructure = codeBlock->heap()->structureIDTable().get(m_lastSeenStructureID);
+ computeUpdatedPrediction(locker, codeBlock, lastSeenStructure);
+ m_lastSeenStructureID = 0;
+}
+
+void ArrayProfile::computeUpdatedPrediction(const ConcurrentJITLocker&, CodeBlock* codeBlock, Structure* lastSeenStructure)
+{
+ m_observedArrayModes |= arrayModeFromStructure(lastSeenStructure);
if (!m_didPerformFirstRunPruning
&& hasTwoOrMoreBitsSet(m_observedArrayModes)) {
- m_observedArrayModes = arrayModeFromStructure(m_lastSeenStructure);
+ m_observedArrayModes = arrayModeFromStructure(lastSeenStructure);
m_didPerformFirstRunPruning = true;
}
m_mayInterceptIndexedAccesses |=
- m_lastSeenStructure->typeInfo().interceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero();
+ lastSeenStructure->typeInfo().interceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero();
JSGlobalObject* globalObject = codeBlock->globalObject();
- if (!globalObject->isOriginalArrayStructure(m_lastSeenStructure)
- && !globalObject->isOriginalTypedArrayStructure(m_lastSeenStructure))
+ if (!globalObject->isOriginalArrayStructure(lastSeenStructure)
+ && !globalObject->isOriginalTypedArrayStructure(lastSeenStructure))
m_usesOriginalArrayStructures = false;
- m_lastSeenStructure = 0;
}
CString ArrayProfile::briefDescription(const ConcurrentJITLocker& locker, CodeBlock* codeBlock)
diff --git a/Source/JavaScriptCore/bytecode/ArrayProfile.h b/Source/JavaScriptCore/bytecode/ArrayProfile.h
index c23230e06..66b295da7 100644
--- a/Source/JavaScriptCore/bytecode/ArrayProfile.h
+++ b/Source/JavaScriptCore/bytecode/ArrayProfile.h
@@ -37,20 +37,44 @@ namespace JSC {
class CodeBlock;
class LLIntOffsetsExtractor;
-// This is a bitfield where each bit represents an IndexingType that we have seen.
-// There are 32 indexing types, so an unsigned is enough.
+// This is a bitfield where each bit represents an type of array access that we have seen.
+// There are 16 indexing types that use the lower bits.
+// There are 9 typed array types taking the bits 16 to 25.
typedef unsigned ArrayModes;
+const ArrayModes Int8ArrayMode = 1 << 16;
+const ArrayModes Int16ArrayMode = 1 << 17;
+const ArrayModes Int32ArrayMode = 1 << 18;
+const ArrayModes Uint8ArrayMode = 1 << 19;
+const ArrayModes Uint8ClampedArrayMode = 1 << 20;
+const ArrayModes Uint16ArrayMode = 1 << 21;
+const ArrayModes Uint32ArrayMode = 1 << 22;
+const ArrayModes Float32ArrayMode = 1 << 23;
+const ArrayModes Float64ArrayMode = 1 << 24;
+
#define asArrayModes(type) \
(static_cast<unsigned>(1) << static_cast<unsigned>(type))
+#define ALL_TYPED_ARRAY_MODES \
+ (Int8ArrayMode \
+ | Int16ArrayMode \
+ | Int32ArrayMode \
+ | Uint8ArrayMode \
+ | Uint8ClampedArrayMode \
+ | Uint16ArrayMode \
+ | Uint32ArrayMode \
+ | Float32ArrayMode \
+ | Float64ArrayMode \
+ )
+
#define ALL_NON_ARRAY_ARRAY_MODES \
(asArrayModes(NonArray) \
| asArrayModes(NonArrayWithInt32) \
| asArrayModes(NonArrayWithDouble) \
| asArrayModes(NonArrayWithContiguous) \
| asArrayModes(NonArrayWithArrayStorage) \
- | asArrayModes(NonArrayWithSlowPutArrayStorage))
+ | asArrayModes(NonArrayWithSlowPutArrayStorage) \
+ | ALL_TYPED_ARRAY_MODES)
#define ALL_ARRAY_ARRAY_MODES \
(asArrayModes(ArrayClass) \
@@ -65,6 +89,29 @@ typedef unsigned ArrayModes;
inline ArrayModes arrayModeFromStructure(Structure* structure)
{
+ switch (structure->classInfo()->typedArrayStorageType) {
+ case TypeInt8:
+ return Int8ArrayMode;
+ case TypeUint8:
+ return Uint8ArrayMode;
+ case TypeUint8Clamped:
+ return Uint8ClampedArrayMode;
+ case TypeInt16:
+ return Int16ArrayMode;
+ case TypeUint16:
+ return Uint16ArrayMode;
+ case TypeInt32:
+ return Int32ArrayMode;
+ case TypeUint32:
+ return Uint32ArrayMode;
+ case TypeFloat32:
+ return Float32ArrayMode;
+ case TypeFloat64:
+ return Float64ArrayMode;
+ case TypeDataView:
+ case NotTypedArray:
+ break;
+ }
return asArrayModes(structure->indexingType());
}
@@ -135,7 +182,7 @@ class ArrayProfile {
public:
ArrayProfile()
: m_bytecodeOffset(std::numeric_limits<unsigned>::max())
- , m_lastSeenStructure(0)
+ , m_lastSeenStructureID(0)
, m_mayStoreToHole(false)
, m_outOfBounds(false)
, m_mayInterceptIndexedAccesses(false)
@@ -147,7 +194,7 @@ public:
ArrayProfile(unsigned bytecodeOffset)
: m_bytecodeOffset(bytecodeOffset)
- , m_lastSeenStructure(0)
+ , m_lastSeenStructureID(0)
, m_mayStoreToHole(false)
, m_outOfBounds(false)
, m_mayInterceptIndexedAccesses(false)
@@ -159,17 +206,20 @@ public:
unsigned bytecodeOffset() const { return m_bytecodeOffset; }
- Structure** addressOfLastSeenStructure() { return &m_lastSeenStructure; }
+ StructureID* addressOfLastSeenStructureID() { return &m_lastSeenStructureID; }
ArrayModes* addressOfArrayModes() { return &m_observedArrayModes; }
bool* addressOfMayStoreToHole() { return &m_mayStoreToHole; }
+
+ void setOutOfBounds() { m_outOfBounds = true; }
bool* addressOfOutOfBounds() { return &m_outOfBounds; }
void observeStructure(Structure* structure)
{
- m_lastSeenStructure = structure;
+ m_lastSeenStructureID = structure->id();
}
void computeUpdatedPrediction(const ConcurrentJITLocker&, CodeBlock*);
+ void computeUpdatedPrediction(const ConcurrentJITLocker&, CodeBlock*, Structure* lastSeenStructure);
ArrayModes observedArrayModes(const ConcurrentJITLocker&) const { return m_observedArrayModes; }
bool mayInterceptIndexedAccesses(const ConcurrentJITLocker&) const { return m_mayInterceptIndexedAccesses; }
@@ -188,7 +238,7 @@ private:
static Structure* polymorphicStructure() { return static_cast<Structure*>(reinterpret_cast<void*>(1)); }
unsigned m_bytecodeOffset;
- Structure* m_lastSeenStructure;
+ StructureID m_lastSeenStructureID;
bool m_mayStoreToHole; // This flag may become overloaded to indicate other special cases that were encountered during array access, as it depends on indexing type. Since we currently have basically just one indexing type (two variants of ArrayStorage), this flag for now just means exactly what its name implies.
bool m_outOfBounds;
bool m_mayInterceptIndexedAccesses : 1;
@@ -197,7 +247,7 @@ private:
ArrayModes m_observedArrayModes;
};
-typedef SegmentedVector<ArrayProfile, 4, 0> ArrayProfileVector;
+typedef SegmentedVector<ArrayProfile, 4> ArrayProfileVector;
} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/ByValInfo.h b/Source/JavaScriptCore/bytecode/ByValInfo.h
index 35fae0c60..20518300c 100644
--- a/Source/JavaScriptCore/bytecode/ByValInfo.h
+++ b/Source/JavaScriptCore/bytecode/ByValInfo.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,23 +26,25 @@
#ifndef ByValInfo_h
#define ByValInfo_h
-#include <wtf/Platform.h>
-
-#if ENABLE(JIT)
-
#include "ClassInfo.h"
#include "CodeLocation.h"
+#include "CodeOrigin.h"
#include "IndexingType.h"
#include "JITStubRoutine.h"
#include "Structure.h"
+#include "StructureStubInfo.h"
namespace JSC {
+#if ENABLE(JIT)
+
enum JITArrayMode {
JITInt32,
JITDouble,
JITContiguous,
JITArrayStorage,
+ JITDirectArguments,
+ JITScopedArguments,
JITInt8Array,
JITInt16Array,
JITInt32Array,
@@ -67,6 +69,17 @@ inline bool isOptimizableIndexingType(IndexingType indexingType)
}
}
+inline bool hasOptimizableIndexingForJSType(JSType type)
+{
+ switch (type) {
+ case DirectArgumentsType:
+ case ScopedArgumentsType:
+ return true;
+ default:
+ return false;
+ }
+}
+
inline bool hasOptimizableIndexingForClassInfo(const ClassInfo* classInfo)
{
return isTypedView(classInfo->typedArrayStorageType);
@@ -75,6 +88,7 @@ inline bool hasOptimizableIndexingForClassInfo(const ClassInfo* classInfo)
inline bool hasOptimizableIndexing(Structure* structure)
{
return isOptimizableIndexingType(structure->indexingType())
+ || hasOptimizableIndexingForJSType(structure->typeInfo().type())
|| hasOptimizableIndexingForClassInfo(structure->classInfo());
}
@@ -95,6 +109,19 @@ inline JITArrayMode jitArrayModeForIndexingType(IndexingType indexingType)
}
}
+inline JITArrayMode jitArrayModeForJSType(JSType type)
+{
+ switch (type) {
+ case DirectArgumentsType:
+ return JITDirectArguments;
+ case ScopedArgumentsType:
+ return JITScopedArguments;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return JITContiguous;
+ }
+}
+
inline JITArrayMode jitArrayModeForClassInfo(const ClassInfo* classInfo)
{
switch (classInfo->typedArrayStorageType) {
@@ -122,6 +149,19 @@ inline JITArrayMode jitArrayModeForClassInfo(const ClassInfo* classInfo)
}
}
+inline bool jitArrayModePermitsPut(JITArrayMode mode)
+{
+ switch (mode) {
+ case JITDirectArguments:
+ case JITScopedArguments:
+ // We could support put_by_val on these at some point, but it's just not that profitable
+ // at the moment.
+ return false;
+ default:
+ return true;
+ }
+}
+
inline TypedArrayType typedArrayTypeForJITArrayMode(JITArrayMode mode)
{
switch (mode) {
@@ -154,30 +194,46 @@ inline JITArrayMode jitArrayModeForStructure(Structure* structure)
if (isOptimizableIndexingType(structure->indexingType()))
return jitArrayModeForIndexingType(structure->indexingType());
+ if (hasOptimizableIndexingForJSType(structure->typeInfo().type()))
+ return jitArrayModeForJSType(structure->typeInfo().type());
+
ASSERT(hasOptimizableIndexingForClassInfo(structure->classInfo()));
return jitArrayModeForClassInfo(structure->classInfo());
}
struct ByValInfo {
ByValInfo() { }
-
- ByValInfo(unsigned bytecodeIndex, CodeLocationJump badTypeJump, JITArrayMode arrayMode, int16_t badTypeJumpToDone, int16_t returnAddressToSlowPath)
+
+ ByValInfo(unsigned bytecodeIndex, CodeLocationJump notIndexJump, CodeLocationJump badTypeJump, JITArrayMode arrayMode, ArrayProfile* arrayProfile, int16_t badTypeJumpToDone, int16_t badTypeJumpToNextHotPath, int16_t returnAddressToSlowPath)
: bytecodeIndex(bytecodeIndex)
+ , notIndexJump(notIndexJump)
, badTypeJump(badTypeJump)
, arrayMode(arrayMode)
+ , arrayProfile(arrayProfile)
, badTypeJumpToDone(badTypeJumpToDone)
+ , badTypeJumpToNextHotPath(badTypeJumpToNextHotPath)
, returnAddressToSlowPath(returnAddressToSlowPath)
, slowPathCount(0)
+ , stubInfo(nullptr)
+ , tookSlowPath(false)
+ , seen(false)
{
}
-
+
unsigned bytecodeIndex;
+ CodeLocationJump notIndexJump;
CodeLocationJump badTypeJump;
JITArrayMode arrayMode; // The array mode that was baked into the inline JIT code.
+ ArrayProfile* arrayProfile;
int16_t badTypeJumpToDone;
+ int16_t badTypeJumpToNextHotPath;
int16_t returnAddressToSlowPath;
unsigned slowPathCount;
RefPtr<JITStubRoutine> stubRoutine;
+ Identifier cachedId;
+ StructureStubInfo* stubInfo;
+ bool tookSlowPath : 1;
+ bool seen : 1;
};
inline unsigned getByValInfoBytecodeIndex(ByValInfo* info)
@@ -185,9 +241,15 @@ inline unsigned getByValInfoBytecodeIndex(ByValInfo* info)
return info->bytecodeIndex;
}
-} // namespace JSC
+typedef HashMap<CodeOrigin, ByValInfo*, CodeOriginApproximateHash> ByValInfoMap;
+
+#else // ENABLE(JIT)
+
+typedef HashMap<int, void*> ByValInfoMap;
#endif // ENABLE(JIT)
+} // namespace JSC
+
#endif // ByValInfo_h
diff --git a/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.cpp b/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.cpp
index d7489d31a..7f17c0ef1 100644
--- a/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.cpp
+++ b/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -27,10 +27,17 @@
#include "BytecodeBasicBlock.h"
#include "CodeBlock.h"
+#include "JSCInlines.h"
#include "PreciseJumpTargets.h"
namespace JSC {
+void BytecodeBasicBlock::shrinkToFit()
+{
+ m_bytecodeOffsets.shrinkToFit();
+ m_successors.shrinkToFit();
+}
+
static bool isBranch(OpcodeID opcodeID)
{
switch (opcodeID) {
@@ -51,9 +58,7 @@ static bool isBranch(OpcodeID opcodeID)
case op_switch_imm:
case op_switch_char:
case op_switch_string:
- case op_get_pnames:
- case op_next_pname:
- case op_check_has_instance:
+ case op_save:
return true;
default:
return false;
@@ -74,7 +79,6 @@ static bool isTerminal(OpcodeID opcodeID)
{
switch (opcodeID) {
case op_ret:
- case op_ret_object_or_this:
case op_end:
return true;
default:
@@ -93,38 +97,36 @@ static bool isThrow(OpcodeID opcodeID)
}
}
-static bool isJumpTarget(OpcodeID opcodeID, Vector<unsigned, 32>& jumpTargets, unsigned bytecodeOffset)
+static bool isJumpTarget(OpcodeID opcodeID, const Vector<unsigned, 32>& jumpTargets, unsigned bytecodeOffset)
{
if (opcodeID == op_catch)
return true;
- for (unsigned i = 0; i < jumpTargets.size(); i++) {
- if (bytecodeOffset == jumpTargets[i])
- return true;
- }
- return false;
+ return std::binary_search(jumpTargets.begin(), jumpTargets.end(), bytecodeOffset);
}
static void linkBlocks(BytecodeBasicBlock* predecessor, BytecodeBasicBlock* successor)
{
predecessor->addSuccessor(successor);
- successor->addPredecessor(predecessor);
}
-void computeBytecodeBasicBlocks(CodeBlock* codeBlock, Vector<RefPtr<BytecodeBasicBlock> >& basicBlocks)
+void computeBytecodeBasicBlocks(CodeBlock* codeBlock, Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks)
{
Vector<unsigned, 32> jumpTargets;
computePreciseJumpTargets(codeBlock, jumpTargets);
// Create the entry and exit basic blocks.
- BytecodeBasicBlock* entry = new BytecodeBasicBlock(BytecodeBasicBlock::EntryBlock);
- basicBlocks.append(adoptRef(entry));
- BytecodeBasicBlock* exit = new BytecodeBasicBlock(BytecodeBasicBlock::ExitBlock);
+ basicBlocks.reserveCapacity(jumpTargets.size() + 2);
+
+ auto entry = std::make_unique<BytecodeBasicBlock>(BytecodeBasicBlock::EntryBlock);
+ auto firstBlock = std::make_unique<BytecodeBasicBlock>(0, 0);
+ linkBlocks(entry.get(), firstBlock.get());
+
+ basicBlocks.append(WTFMove(entry));
+ BytecodeBasicBlock* current = firstBlock.get();
+ basicBlocks.append(WTFMove(firstBlock));
- // Find basic block boundaries.
- BytecodeBasicBlock* current = new BytecodeBasicBlock(0, 0);
- linkBlocks(entry, current);
- basicBlocks.append(adoptRef(current));
+ auto exit = std::make_unique<BytecodeBasicBlock>(BytecodeBasicBlock::ExitBlock);
bool nextInstructionIsLeader = false;
@@ -138,9 +140,9 @@ void computeBytecodeBasicBlocks(CodeBlock* codeBlock, Vector<RefPtr<BytecodeBasi
bool createdBlock = false;
// If the current bytecode is a jump target, then it's the leader of its own basic block.
if (isJumpTarget(opcodeID, jumpTargets, bytecodeOffset) || nextInstructionIsLeader) {
- BytecodeBasicBlock* block = new BytecodeBasicBlock(bytecodeOffset, opcodeLength);
- basicBlocks.append(adoptRef(block));
- current = block;
+ auto newBlock = std::make_unique<BytecodeBasicBlock>(bytecodeOffset, opcodeLength);
+ current = newBlock.get();
+ basicBlocks.append(WTFMove(newBlock));
createdBlock = true;
nextInstructionIsLeader = false;
bytecodeOffset += opcodeLength;
@@ -173,7 +175,7 @@ void computeBytecodeBasicBlocks(CodeBlock* codeBlock, Vector<RefPtr<BytecodeBasi
// If we found a terminal bytecode, link to the exit block.
if (isTerminal(opcodeID)) {
ASSERT(bytecodeOffset + opcodeLength == block->leaderBytecodeOffset() + block->totalBytecodeLength());
- linkBlocks(block, exit);
+ linkBlocks(block, exit.get());
fallsThrough = false;
break;
}
@@ -186,7 +188,7 @@ void computeBytecodeBasicBlocks(CodeBlock* codeBlock, Vector<RefPtr<BytecodeBasi
HandlerInfo* handler = codeBlock->handlerForBytecodeOffset(bytecodeOffset);
fallsThrough = false;
if (!handler) {
- linkBlocks(block, exit);
+ linkBlocks(block, exit.get());
break;
}
for (unsigned i = 0; i < basicBlocks.size(); i++) {
@@ -227,7 +229,10 @@ void computeBytecodeBasicBlocks(CodeBlock* codeBlock, Vector<RefPtr<BytecodeBasi
}
}
- basicBlocks.append(adoptRef(exit));
+ basicBlocks.append(WTFMove(exit));
+
+ for (auto& basicBlock : basicBlocks)
+ basicBlock->shrinkToFit();
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.h b/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.h
index 736ba8540..bd7d3ae9b 100644
--- a/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.h
+++ b/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -36,11 +36,13 @@ namespace JSC {
class CodeBlock;
-class BytecodeBasicBlock : public RefCounted<BytecodeBasicBlock> {
+class BytecodeBasicBlock {
+ WTF_MAKE_FAST_ALLOCATED;
public:
enum SpecialBlockType { EntryBlock, ExitBlock };
BytecodeBasicBlock(unsigned start, unsigned length);
BytecodeBasicBlock(SpecialBlockType);
+ void shrinkToFit();
bool isEntryBlock() { return !m_leaderBytecodeOffset && !m_totalBytecodeLength; }
bool isExitBlock() { return m_leaderBytecodeOffset == UINT_MAX && m_totalBytecodeLength == UINT_MAX; }
@@ -51,11 +53,8 @@ public:
Vector<unsigned>& bytecodeOffsets() { return m_bytecodeOffsets; }
void addBytecodeLength(unsigned);
- void addPredecessor(BytecodeBasicBlock* block) { m_predecessors.append(block); }
- void addSuccessor(BytecodeBasicBlock* block) { m_successors.append(block); }
-
- Vector<BytecodeBasicBlock*>& predecessors() { return m_predecessors; }
Vector<BytecodeBasicBlock*>& successors() { return m_successors; }
+ void addSuccessor(BytecodeBasicBlock* block) { m_successors.append(block); }
FastBitVector& in() { return m_in; }
FastBitVector& out() { return m_out; }
@@ -65,15 +64,13 @@ private:
unsigned m_totalBytecodeLength;
Vector<unsigned> m_bytecodeOffsets;
-
- Vector<BytecodeBasicBlock*> m_predecessors;
Vector<BytecodeBasicBlock*> m_successors;
FastBitVector m_in;
FastBitVector m_out;
};
-void computeBytecodeBasicBlocks(CodeBlock*, Vector<RefPtr<BytecodeBasicBlock> >&);
+void computeBytecodeBasicBlocks(CodeBlock*, Vector<std::unique_ptr<BytecodeBasicBlock>>&);
inline BytecodeBasicBlock::BytecodeBasicBlock(unsigned start, unsigned length)
: m_leaderBytecodeOffset(start)
diff --git a/Source/JavaScriptCore/bytecode/BytecodeIntrinsicRegistry.cpp b/Source/JavaScriptCore/bytecode/BytecodeIntrinsicRegistry.cpp
new file mode 100644
index 000000000..57740ac3e
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/BytecodeIntrinsicRegistry.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2015 Yusuke Suzuki <utatane.tea@gmail.com>.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "BytecodeIntrinsicRegistry.h"
+#include "BytecodeGenerator.h"
+#include "JSArrayIterator.h"
+#include "JSCJSValueInlines.h"
+#include "JSPromise.h"
+#include "Nodes.h"
+#include "StrongInlines.h"
+
+namespace JSC {
+
+#define INITIALIZE_BYTECODE_INTRINSIC_NAMES_TO_SET(name) m_bytecodeIntrinsicMap.add(vm.propertyNames->name##PrivateName.impl(), &BytecodeIntrinsicNode::emit_intrinsic_##name);
+
+BytecodeIntrinsicRegistry::BytecodeIntrinsicRegistry(VM& vm)
+ : m_vm(vm)
+ , m_bytecodeIntrinsicMap()
+{
+ JSC_COMMON_BYTECODE_INTRINSIC_FUNCTIONS_EACH_NAME(INITIALIZE_BYTECODE_INTRINSIC_NAMES_TO_SET)
+ JSC_COMMON_BYTECODE_INTRINSIC_CONSTANTS_EACH_NAME(INITIALIZE_BYTECODE_INTRINSIC_NAMES_TO_SET)
+
+ m_undefined.set(m_vm, jsUndefined());
+ m_arrayIterationKindKey.set(m_vm, jsNumber(ArrayIterateKey));
+ m_arrayIterationKindValue.set(m_vm, jsNumber(ArrayIterateValue));
+ m_arrayIterationKindKeyValue.set(m_vm, jsNumber(ArrayIterateKeyValue));
+ m_promiseStatePending.set(m_vm, jsNumber(static_cast<unsigned>(JSPromise::Status::Pending)));
+ m_promiseStateFulfilled.set(m_vm, jsNumber(static_cast<unsigned>(JSPromise::Status::Fulfilled)));
+ m_promiseStateRejected.set(m_vm, jsNumber(static_cast<unsigned>(JSPromise::Status::Rejected)));
+ m_symbolIterator.set(m_vm, Symbol::create(m_vm, static_cast<SymbolImpl&>(*m_vm.propertyNames->iteratorSymbol.impl())));
+ m_symbolSearch.set(m_vm, Symbol::create(m_vm, static_cast<SymbolImpl&>(*m_vm.propertyNames->searchSymbol.impl())));
+}
+
+BytecodeIntrinsicNode::EmitterType BytecodeIntrinsicRegistry::lookup(const Identifier& ident) const
+{
+ if (!m_vm.propertyNames->isPrivateName(ident))
+ return nullptr;
+ auto iterator = m_bytecodeIntrinsicMap.find(ident.impl());
+ if (iterator == m_bytecodeIntrinsicMap.end())
+ return nullptr;
+ return iterator->value;
+}
+
+#define JSC_DECLARE_BYTECODE_INTRINSIC_CONSTANT_GENERATORS(name) \
+ JSValue BytecodeIntrinsicRegistry::name##Value(BytecodeGenerator&) \
+ { \
+ return m_##name.get(); \
+ }
+ JSC_COMMON_BYTECODE_INTRINSIC_CONSTANTS_EACH_NAME(JSC_DECLARE_BYTECODE_INTRINSIC_CONSTANT_GENERATORS)
+#undef JSC_DECLARE_BYTECODE_INTRINSIC_CONSTANT_GENERATORS
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/bytecode/BytecodeIntrinsicRegistry.h b/Source/JavaScriptCore/bytecode/BytecodeIntrinsicRegistry.h
new file mode 100644
index 000000000..bd44dd39e
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/BytecodeIntrinsicRegistry.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2015 Yusuke Suzuki <utatane.tea@gmail.com>.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef BytecodeIntrinsicRegistry_h
+#define BytecodeIntrinsicRegistry_h
+
+#include "Identifier.h"
+#include <wtf/HashTable.h>
+#include <wtf/Noncopyable.h>
+
+namespace JSC {
+
+class CommonIdentifiers;
+class BytecodeGenerator;
+class BytecodeIntrinsicNode;
+class RegisterID;
+class Identifier;
+
+#define JSC_COMMON_BYTECODE_INTRINSIC_FUNCTIONS_EACH_NAME(macro) \
+ macro(assert) \
+ macro(isObject) \
+ macro(putByValDirect) \
+ macro(toString)
+
+#define JSC_COMMON_BYTECODE_INTRINSIC_CONSTANTS_EACH_NAME(macro) \
+ macro(undefined) \
+ macro(arrayIterationKindKey) \
+ macro(arrayIterationKindValue) \
+ macro(arrayIterationKindKeyValue) \
+ macro(promiseStatePending) \
+ macro(promiseStateFulfilled) \
+ macro(promiseStateRejected) \
+ macro(symbolIterator) \
+ macro(symbolSearch)
+
+class BytecodeIntrinsicRegistry {
+ WTF_MAKE_NONCOPYABLE(BytecodeIntrinsicRegistry);
+public:
+ explicit BytecodeIntrinsicRegistry(VM&);
+
+ typedef RegisterID* (BytecodeIntrinsicNode::* EmitterType)(BytecodeGenerator&, RegisterID*);
+
+ EmitterType lookup(const Identifier&) const;
+
+#define JSC_DECLARE_BYTECODE_INTRINSIC_CONSTANT_GENERATORS(name) JSValue name##Value(BytecodeGenerator&);
+ JSC_COMMON_BYTECODE_INTRINSIC_CONSTANTS_EACH_NAME(JSC_DECLARE_BYTECODE_INTRINSIC_CONSTANT_GENERATORS)
+#undef JSC_DECLARE_BYTECODE_INTRINSIC_CONSTANT_GENERATORS
+
+private:
+ VM& m_vm;
+ HashMap<RefPtr<UniquedStringImpl>, EmitterType, IdentifierRepHash> m_bytecodeIntrinsicMap;
+
+#define JSC_DECLARE_BYTECODE_INTRINSIC_CONSTANT_GENERATORS(name) Strong<Unknown> m_##name;
+ JSC_COMMON_BYTECODE_INTRINSIC_CONSTANTS_EACH_NAME(JSC_DECLARE_BYTECODE_INTRINSIC_CONSTANT_GENERATORS)
+#undef JSC_DECLARE_BYTECODE_INTRINSIC_CONSTANT_GENERATORS
+};
+
+} // namespace JSC
+
+#endif // BytecodeIntrinsicRegistry_h
diff --git a/Source/JavaScriptCore/bytecode/BytecodeKills.h b/Source/JavaScriptCore/bytecode/BytecodeKills.h
new file mode 100644
index 000000000..d073ded25
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/BytecodeKills.h
@@ -0,0 +1,181 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef BytecodeKills_h
+#define BytecodeKills_h
+
+#include "CodeBlock.h"
+#include <wtf/FastBitVector.h>
+
+namespace JSC {
+
+class BytecodeLivenessAnalysis;
+
+class BytecodeKills {
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ BytecodeKills()
+ : m_codeBlock(nullptr)
+ {
+ }
+
+ // By convention, we say that non-local operands are never killed.
+ bool operandIsKilled(unsigned bytecodeIndex, int operand) const
+ {
+ ASSERT_WITH_SECURITY_IMPLICATION(bytecodeIndex < m_codeBlock->instructions().size());
+ VirtualRegister reg(operand);
+ if (reg.isLocal())
+ return m_killSets[bytecodeIndex].contains(operand);
+ return false;
+ }
+
+ bool operandIsKilled(Instruction* instruction, int operand) const
+ {
+ return operandIsKilled(instruction - m_codeBlock->instructions().begin(), operand);
+ }
+
+ template<typename Functor>
+ void forEachOperandKilledAt(unsigned bytecodeIndex, const Functor& functor) const
+ {
+ ASSERT_WITH_SECURITY_IMPLICATION(bytecodeIndex < m_codeBlock->instructions().size());
+ m_killSets[bytecodeIndex].forEachLocal(
+ [&] (unsigned local) {
+ functor(virtualRegisterForLocal(local));
+ });
+ }
+
+ template<typename Functor>
+ void forEachOperandKilledAt(Instruction* pc, const Functor& functor) const
+ {
+ forEachOperandKilledAt(pc - m_codeBlock->instructions().begin(), functor);
+ }
+
+private:
+ friend class BytecodeLivenessAnalysis;
+
+ class KillSet {
+ public:
+ KillSet()
+ : m_word(0)
+ {
+ }
+
+ ~KillSet()
+ {
+ if (hasVector())
+ delete vector();
+ }
+
+ void add(unsigned local)
+ {
+ if (isEmpty()) {
+ setOneItem(local);
+ return;
+ }
+ if (hasOneItem()) {
+ ASSERT(oneItem() != local);
+ Vector<unsigned>* vector = new Vector<unsigned>();
+ vector->append(oneItem());
+ vector->append(local);
+ setVector(vector);
+ return;
+ }
+ ASSERT(!vector()->contains(local));
+ vector()->append(local);
+ }
+
+ template<typename Functor>
+ void forEachLocal(const Functor& functor)
+ {
+ if (isEmpty())
+ return;
+ if (hasOneItem()) {
+ functor(oneItem());
+ return;
+ }
+ for (unsigned local : *vector())
+ functor(local);
+ }
+
+ bool contains(unsigned expectedLocal)
+ {
+ if (isEmpty())
+ return false;
+ if (hasOneItem())
+ return oneItem() == expectedLocal;
+ for (unsigned local : *vector()) {
+ if (local == expectedLocal)
+ return true;
+ }
+ return false;
+ }
+
+ private:
+ bool isEmpty() const
+ {
+ return !m_word;
+ }
+
+ bool hasOneItem() const
+ {
+ return m_word & 1;
+ }
+
+ unsigned oneItem() const
+ {
+ return m_word >> 1;
+ }
+
+ void setOneItem(unsigned value)
+ {
+ m_word = (value << 1) | 1;
+ }
+
+ bool hasVector() const
+ {
+ return !isEmpty() && !hasOneItem();
+ }
+
+ Vector<unsigned>* vector()
+ {
+ return bitwise_cast<Vector<unsigned>*>(m_word);
+ }
+
+ void setVector(Vector<unsigned>* value)
+ {
+ m_word = bitwise_cast<uintptr_t>(value);
+ }
+
+ uintptr_t m_word;
+ };
+
+ CodeBlock* m_codeBlock;
+ std::unique_ptr<KillSet[]> m_killSets;
+};
+
+} // namespace JSC
+
+#endif // BytecodeKills_h
+
diff --git a/Source/JavaScriptCore/bytecode/BytecodeList.json b/Source/JavaScriptCore/bytecode/BytecodeList.json
new file mode 100644
index 000000000..053b8dc9b
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/BytecodeList.json
@@ -0,0 +1,182 @@
+[
+ {
+ "section" : "Bytecodes", "emitInHFile" : true, "emitInASMFile" : true,
+ "macroNameComponent" : "BYTECODE", "asmPrefix" : "llint_",
+ "bytecodes" : [
+ { "name" : "op_enter", "length" : 1 },
+ { "name" : "op_get_scope", "length" : 2 },
+ { "name" : "op_create_direct_arguments", "length" : 2 },
+ { "name" : "op_create_scoped_arguments", "length" : 3 },
+ { "name" : "op_create_out_of_band_arguments", "length" : 2 },
+ { "name" : "op_create_this", "length" : 5 },
+ { "name" : "op_to_this", "length" : 4 },
+ { "name" : "op_check_tdz", "length" : 2 },
+ { "name" : "op_new_object", "length" : 4 },
+ { "name" : "op_new_array", "length" : 5 },
+ { "name" : "op_new_array_with_size", "length" : 4 },
+ { "name" : "op_new_array_buffer", "length" : 5 },
+ { "name" : "op_new_regexp", "length" : 3 },
+ { "name" : "op_mov", "length" : 3 },
+ { "name" : "op_not", "length" : 3 },
+ { "name" : "op_eq", "length" : 4 },
+ { "name" : "op_eq_null", "length" : 3 },
+ { "name" : "op_neq", "length" : 4 },
+ { "name" : "op_neq_null", "length" : 3 },
+ { "name" : "op_stricteq", "length" : 4 },
+ { "name" : "op_nstricteq", "length" : 4 },
+ { "name" : "op_less", "length" : 4 },
+ { "name" : "op_lesseq", "length" : 4 },
+ { "name" : "op_greater", "length" : 4 },
+ { "name" : "op_greatereq", "length" : 4 },
+ { "name" : "op_inc", "length" : 2 },
+ { "name" : "op_dec", "length" : 2 },
+ { "name" : "op_to_number", "length" : 3 },
+ { "name" : "op_to_string", "length" : 3 },
+ { "name" : "op_negate", "length" : 3 },
+ { "name" : "op_add", "length" : 5 },
+ { "name" : "op_mul", "length" : 5 },
+ { "name" : "op_div", "length" : 5 },
+ { "name" : "op_mod", "length" : 4 },
+ { "name" : "op_sub", "length" : 5 },
+ { "name" : "op_lshift", "length" : 4 },
+ { "name" : "op_rshift", "length" : 4 },
+ { "name" : "op_urshift", "length" : 4 },
+ { "name" : "op_unsigned", "length" : 3 },
+ { "name" : "op_bitand", "length" : 5 },
+ { "name" : "op_bitxor", "length" : 5 },
+ { "name" : "op_bitor", "length" : 5 },
+ { "name" : "op_overrides_has_instance", "length" : 4 },
+ { "name" : "op_instanceof", "length" : 4 },
+ { "name" : "op_instanceof_custom", "length" : 5 },
+ { "name" : "op_typeof", "length" : 3 },
+ { "name" : "op_is_undefined", "length" : 3 },
+ { "name" : "op_is_boolean", "length" : 3 },
+ { "name" : "op_is_number", "length" : 3 },
+ { "name" : "op_is_string", "length" : 3 },
+ { "name" : "op_is_object", "length" : 3 },
+ { "name" : "op_is_object_or_null", "length" : 3 },
+ { "name" : "op_is_function", "length" : 3 },
+ { "name" : "op_in", "length" : 4 },
+ { "name" : "op_get_by_id", "length" : 9 },
+ { "name" : "op_get_array_length", "length" : 9 },
+ { "name" : "op_put_by_id", "length" : 9 },
+ { "name" : "op_del_by_id", "length" : 4 },
+ { "name" : "op_get_by_val", "length" : 6 },
+ { "name" : "op_put_by_val", "length" : 5 },
+ { "name" : "op_put_by_val_direct", "length" : 5 },
+ { "name" : "op_del_by_val", "length" : 4 },
+ { "name" : "op_put_by_index", "length" : 4 },
+ { "name" : "op_put_getter_by_id", "length" : 5 },
+ { "name" : "op_put_setter_by_id", "length" : 5 },
+ { "name" : "op_put_getter_setter_by_id", "length" : 6 },
+ { "name" : "op_put_getter_by_val", "length" : 5 },
+ { "name" : "op_put_setter_by_val", "length" : 5 },
+ { "name" : "op_jmp", "length" : 2 },
+ { "name" : "op_jtrue", "length" : 3 },
+ { "name" : "op_jfalse", "length" : 3 },
+ { "name" : "op_jeq_null", "length" : 3 },
+ { "name" : "op_jneq_null", "length" : 3 },
+ { "name" : "op_jneq_ptr", "length" : 4 },
+ { "name" : "op_jless", "length" : 4 },
+ { "name" : "op_jlesseq", "length" : 4 },
+ { "name" : "op_jgreater", "length" : 4 },
+ { "name" : "op_jgreatereq", "length" : 4 },
+ { "name" : "op_jnless", "length" : 4 },
+ { "name" : "op_jnlesseq", "length" : 4 },
+ { "name" : "op_jngreater", "length" : 4 },
+ { "name" : "op_jngreatereq", "length" : 4 },
+ { "name" : "op_loop_hint", "length" : 1 },
+ { "name" : "op_switch_imm", "length" : 4 },
+ { "name" : "op_switch_char", "length" : 4 },
+ { "name" : "op_switch_string", "length" : 4 },
+ { "name" : "op_new_func", "length" : 4 },
+ { "name" : "op_new_func_exp", "length" : 4 },
+ { "name" : "op_new_generator_func", "length" : 4 },
+ { "name" : "op_new_generator_func_exp", "length" : 4 },
+ { "name" : "op_new_arrow_func_exp", "length" : 4 },
+ { "name" : "op_call", "length" : 9 },
+ { "name" : "op_tail_call", "length" : 9 },
+ { "name" : "op_call_eval", "length" : 9 },
+ { "name" : "op_call_varargs", "length" : 9 },
+ { "name" : "op_tail_call_varargs", "length" : 9 },
+ { "name" : "op_ret", "length" : 2 },
+ { "name" : "op_construct", "length" : 9 },
+ { "name" : "op_construct_varargs", "length" : 9 },
+ { "name" : "op_strcat", "length" : 4 },
+ { "name" : "op_to_primitive", "length" : 3 },
+ { "name" : "op_resolve_scope", "length" : 7 },
+ { "name" : "op_get_from_scope", "length" : 8 },
+ { "name" : "op_put_to_scope", "length" : 7 },
+ { "name" : "op_get_from_arguments", "length" : 5 },
+ { "name" : "op_put_to_arguments", "length" : 4 },
+ { "name" : "op_push_with_scope", "length" : 4 },
+ { "name" : "op_create_lexical_environment", "length" : 5 },
+ { "name" : "op_get_parent_scope", "length" : 3 },
+ { "name" : "op_catch", "length" : 3 },
+ { "name" : "op_throw", "length" : 2 },
+ { "name" : "op_throw_static_error", "length" : 3 },
+ { "name" : "op_debug", "length" : 3 },
+ { "name" : "op_profile_will_call", "length" : 2 },
+ { "name" : "op_profile_did_call", "length" : 2 },
+ { "name" : "op_end", "length" : 2 },
+ { "name" : "op_profile_type", "length" : 6 },
+ { "name" : "op_profile_control_flow", "length" : 2 },
+ { "name" : "op_get_enumerable_length", "length" : 3 },
+ { "name" : "op_has_indexed_property", "length" : 5 },
+ { "name" : "op_has_structure_property", "length" : 5 },
+ { "name" : "op_has_generic_property", "length" : 4 },
+ { "name" : "op_get_direct_pname", "length" : 7 },
+ { "name" : "op_get_property_enumerator", "length" : 3 },
+ { "name" : "op_enumerator_structure_pname", "length" : 4 },
+ { "name" : "op_enumerator_generic_pname", "length" : 4 },
+ { "name" : "op_to_index_string", "length" : 3 },
+ { "name" : "op_assert", "length" : 3 },
+ { "name" : "op_copy_rest", "length": 4 },
+ { "name" : "op_get_rest_length", "length": 3 },
+ { "name" : "op_save", "length" : 4 },
+ { "name" : "op_resume", "length" : 3 },
+ { "name" : "op_watchdog", "length" : 1 }
+ ]
+ },
+ {
+ "section" : "CLoopHelpers", "emitInHFile" : true, "emitInASMFile" : false, "defaultLength" : 1,
+ "macroNameComponent" : "CLOOP_BYTECODE_HELPER",
+ "bytecodes" : [
+ { "name" : "llint_entry" },
+ { "name" : "getHostCallReturnValue" },
+ { "name" : "llint_return_to_host" },
+ { "name" : "llint_vm_entry_to_javascript" },
+ { "name" : "llint_vm_entry_to_native" },
+ { "name" : "llint_cloop_did_return_from_js_1" },
+ { "name" : "llint_cloop_did_return_from_js_2" },
+ { "name" : "llint_cloop_did_return_from_js_3" },
+ { "name" : "llint_cloop_did_return_from_js_4" },
+ { "name" : "llint_cloop_did_return_from_js_5" },
+ { "name" : "llint_cloop_did_return_from_js_6" },
+ { "name" : "llint_cloop_did_return_from_js_7" },
+ { "name" : "llint_cloop_did_return_from_js_8" },
+ { "name" : "llint_cloop_did_return_from_js_9" },
+ { "name" : "llint_cloop_did_return_from_js_10" },
+ { "name" : "llint_cloop_did_return_from_js_11" }
+ ]
+ },
+ {
+ "section" : "NativeHelpers", "emitInHFile" : true, "emitInASMFile" : true, "defaultLength" : 1,
+ "macroNameComponent" : "BYTECODE_HELPER",
+ "bytecodes" : [
+ { "name" : "llint_program_prologue" },
+ { "name" : "llint_eval_prologue" },
+ { "name" : "llint_module_program_prologue" },
+ { "name" : "llint_function_for_call_prologue" },
+ { "name" : "llint_function_for_construct_prologue" },
+ { "name" : "llint_function_for_call_arity_check" },
+ { "name" : "llint_function_for_construct_arity_check" },
+ { "name" : "llint_generic_return_point" },
+ { "name" : "llint_throw_from_slow_path_trampoline" },
+ { "name" : "llint_throw_during_call_trampoline" },
+ { "name" : "llint_native_call_trampoline" },
+ { "name" : "llint_native_construct_trampoline" },
+ { "name" : "handleUncaughtException" }
+ ]
+ }
+]
diff --git a/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.cpp b/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.cpp
index 926334c44..7228b0333 100644
--- a/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.cpp
+++ b/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,6 +26,7 @@
#include "config.h"
#include "BytecodeLivenessAnalysis.h"
+#include "BytecodeKills.h"
#include "BytecodeLivenessAnalysisInlines.h"
#include "BytecodeUseDef.h"
#include "CodeBlock.h"
@@ -47,56 +48,17 @@ static bool isValidRegisterForLiveness(CodeBlock* codeBlock, int operand)
return false;
VirtualRegister virtualReg(operand);
- if (!virtualReg.isLocal())
- return false;
-
- if (codeBlock->captureCount()
- && operand <= codeBlock->captureStart()
- && operand > codeBlock->captureEnd())
- return false;
-
- return true;
+ return virtualReg.isLocal();
}
-static void setForOperand(CodeBlock* codeBlock, FastBitVector& bits, int operand)
-{
- ASSERT(isValidRegisterForLiveness(codeBlock, operand));
- VirtualRegister virtualReg(operand);
- if (virtualReg.offset() > codeBlock->captureStart())
- bits.set(virtualReg.toLocal());
- else
- bits.set(virtualReg.toLocal() - codeBlock->captureCount());
-}
-
-namespace {
-
-class SetBit {
-public:
- SetBit(FastBitVector& bits)
- : m_bits(bits)
- {
- }
-
- void operator()(CodeBlock* codeBlock, Instruction*, OpcodeID, int operand)
- {
- if (isValidRegisterForLiveness(codeBlock, operand))
- setForOperand(codeBlock, m_bits, operand);
- }
-
-private:
- FastBitVector& m_bits;
-};
-
-} // anonymous namespace
-
-static unsigned getLeaderOffsetForBasicBlock(RefPtr<BytecodeBasicBlock>* basicBlock)
+static unsigned getLeaderOffsetForBasicBlock(std::unique_ptr<BytecodeBasicBlock>* basicBlock)
{
return (*basicBlock)->leaderBytecodeOffset();
}
-static BytecodeBasicBlock* findBasicBlockWithLeaderOffset(Vector<RefPtr<BytecodeBasicBlock> >& basicBlocks, unsigned leaderOffset)
+static BytecodeBasicBlock* findBasicBlockWithLeaderOffset(Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks, unsigned leaderOffset)
{
- return (*tryBinarySearch<RefPtr<BytecodeBasicBlock>, unsigned>(basicBlocks, basicBlocks.size(), leaderOffset, getLeaderOffsetForBasicBlock)).get();
+ return (*tryBinarySearch<std::unique_ptr<BytecodeBasicBlock>, unsigned>(basicBlocks, basicBlocks.size(), leaderOffset, getLeaderOffsetForBasicBlock)).get();
}
static bool blockContainsBytecodeOffset(BytecodeBasicBlock* block, unsigned bytecodeOffset)
@@ -105,7 +67,7 @@ static bool blockContainsBytecodeOffset(BytecodeBasicBlock* block, unsigned byte
return bytecodeOffset >= leaderOffset && bytecodeOffset < leaderOffset + block->totalBytecodeLength();
}
-static BytecodeBasicBlock* findBasicBlockForBytecodeOffset(Vector<RefPtr<BytecodeBasicBlock> >& basicBlocks, unsigned bytecodeOffset)
+static BytecodeBasicBlock* findBasicBlockForBytecodeOffset(Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks, unsigned bytecodeOffset)
{
/*
for (unsigned i = 0; i < basicBlocks.size(); i++) {
@@ -114,7 +76,7 @@ static BytecodeBasicBlock* findBasicBlockForBytecodeOffset(Vector<RefPtr<Bytecod
}
return 0;
*/
- RefPtr<BytecodeBasicBlock>* basicBlock = approximateBinarySearch<RefPtr<BytecodeBasicBlock>, unsigned>(
+ std::unique_ptr<BytecodeBasicBlock>* basicBlock = approximateBinarySearch<std::unique_ptr<BytecodeBasicBlock>, unsigned>(
basicBlocks, basicBlocks.size(), bytecodeOffset, getLeaderOffsetForBasicBlock);
// We found the block we were looking for.
if (blockContainsBytecodeOffset((*basicBlock).get(), bytecodeOffset))
@@ -133,52 +95,82 @@ static BytecodeBasicBlock* findBasicBlockForBytecodeOffset(Vector<RefPtr<Bytecod
return basicBlock[1].get();
}
-static void stepOverInstruction(CodeBlock* codeBlock, Vector<RefPtr<BytecodeBasicBlock>>& basicBlocks, unsigned bytecodeOffset, FastBitVector& uses, FastBitVector& defs, FastBitVector& out)
+// Simplified interface to bytecode use/def, which determines defs first and then uses, and includes
+// exception handlers in the uses.
+template<typename UseFunctor, typename DefFunctor>
+static void stepOverInstruction(CodeBlock* codeBlock, BytecodeBasicBlock* block, Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks, unsigned bytecodeOffset, const UseFunctor& use, const DefFunctor& def)
{
- uses.clearAll();
- defs.clearAll();
-
- SetBit setUses(uses);
- SetBit setDefs(defs);
- computeUsesForBytecodeOffset(codeBlock, bytecodeOffset, setUses);
- computeDefsForBytecodeOffset(codeBlock, bytecodeOffset, setDefs);
-
- out.exclude(defs);
- out.merge(uses);
+ // This abstractly execute the instruction in reverse. Instructions logically first use operands and
+ // then define operands. This logical ordering is necessary for operations that use and def the same
+ // operand, like:
+ //
+ // op_add loc1, loc1, loc2
+ //
+ // The use of loc1 happens before the def of loc1. That's a semantic requirement since the add
+ // operation cannot travel forward in time to read the value that it will produce after reading that
+ // value. Since we are executing in reverse, this means that we must do defs before uses (reverse of
+ // uses before defs).
+ //
+ // Since this is a liveness analysis, this ordering ends up being particularly important: if we did
+ // uses before defs, then the add operation above would appear to not have loc1 live, since we'd
+ // first add it to the out set (the use), and then we'd remove it (the def).
+ computeDefsForBytecodeOffset(
+ codeBlock, block, bytecodeOffset,
+ [&] (CodeBlock* codeBlock, Instruction*, OpcodeID, int operand) {
+ if (isValidRegisterForLiveness(codeBlock, operand))
+ def(VirtualRegister(operand).toLocal());
+ });
+
+ computeUsesForBytecodeOffset(
+ codeBlock, block, bytecodeOffset,
+ [&] (CodeBlock* codeBlock, Instruction*, OpcodeID, int operand) {
+ if (isValidRegisterForLiveness(codeBlock, operand))
+ use(VirtualRegister(operand).toLocal());
+ });
+
// If we have an exception handler, we want the live-in variables of the
// exception handler block to be included in the live-in of this particular bytecode.
if (HandlerInfo* handler = codeBlock->handlerForBytecodeOffset(bytecodeOffset)) {
BytecodeBasicBlock* handlerBlock = findBasicBlockWithLeaderOffset(basicBlocks, handler->target);
ASSERT(handlerBlock);
- out.merge(handlerBlock->in());
+ handlerBlock->in().forEachSetBit(use);
}
}
-static void computeLocalLivenessForBytecodeOffset(CodeBlock* codeBlock, BytecodeBasicBlock* block, Vector<RefPtr<BytecodeBasicBlock> >& basicBlocks, unsigned targetOffset, FastBitVector& result)
+static void stepOverInstruction(CodeBlock* codeBlock, BytecodeBasicBlock* block, Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks, unsigned bytecodeOffset, FastBitVector& out)
+{
+ stepOverInstruction(
+ codeBlock, block, basicBlocks, bytecodeOffset,
+ [&] (unsigned bitIndex) {
+ // This is the use functor, so we set the bit.
+ out.set(bitIndex);
+ },
+ [&] (unsigned bitIndex) {
+ // This is the def functor, so we clear the bit.
+ out.clear(bitIndex);
+ });
+}
+
+static void computeLocalLivenessForBytecodeOffset(CodeBlock* codeBlock, BytecodeBasicBlock* block, Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks, unsigned targetOffset, FastBitVector& result)
{
ASSERT(!block->isExitBlock());
ASSERT(!block->isEntryBlock());
FastBitVector out = block->out();
- FastBitVector uses;
- FastBitVector defs;
- uses.resize(out.numBits());
- defs.resize(out.numBits());
-
for (int i = block->bytecodeOffsets().size() - 1; i >= 0; i--) {
unsigned bytecodeOffset = block->bytecodeOffsets()[i];
if (targetOffset > bytecodeOffset)
break;
- stepOverInstruction(codeBlock, basicBlocks, bytecodeOffset, uses, defs, out);
+ stepOverInstruction(codeBlock, block, basicBlocks, bytecodeOffset, out);
}
result.set(out);
}
-static void computeLocalLivenessForBlock(CodeBlock* codeBlock, BytecodeBasicBlock* block, Vector<RefPtr<BytecodeBasicBlock> >& basicBlocks)
+static void computeLocalLivenessForBlock(CodeBlock* codeBlock, BytecodeBasicBlock* block, Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks)
{
if (block->isExitBlock() || block->isEntryBlock())
return;
@@ -188,8 +180,7 @@ static void computeLocalLivenessForBlock(CodeBlock* codeBlock, BytecodeBasicBloc
void BytecodeLivenessAnalysis::runLivenessFixpoint()
{
UnlinkedCodeBlock* unlinkedCodeBlock = m_codeBlock->unlinkedCodeBlock();
- unsigned numberOfVariables =
- unlinkedCodeBlock->m_numCalleeRegisters - m_codeBlock->captureCount();
+ unsigned numberOfVariables = unlinkedCodeBlock->m_numCalleeLocals;
for (unsigned i = 0; i < m_basicBlocks.size(); i++) {
BytecodeBasicBlock* block = m_basicBlocks[i].get();
@@ -204,7 +195,7 @@ void BytecodeLivenessAnalysis::runLivenessFixpoint()
newOut.resize(m_basicBlocks.last()->out().numBits());
do {
changed = false;
- for (int i = m_basicBlocks.size() - 2; i >= 0; i--) {
+ for (unsigned i = m_basicBlocks.size() - 1; i--;) {
BytecodeBasicBlock* block = m_basicBlocks[i].get();
newOut.clearAll();
for (unsigned j = 0; j < block->successors().size(); j++)
@@ -216,7 +207,7 @@ void BytecodeLivenessAnalysis::runLivenessFixpoint()
} while (changed);
}
-void BytecodeLivenessAnalysis::getLivenessInfoForNonCapturedVarsAtBytecodeOffset(unsigned bytecodeOffset, FastBitVector& result)
+void BytecodeLivenessAnalysis::getLivenessInfoAtBytecodeOffset(unsigned bytecodeOffset, FastBitVector& result)
{
BytecodeBasicBlock* block = findBasicBlockForBytecodeOffset(m_basicBlocks, bytecodeOffset);
ASSERT(block);
@@ -228,60 +219,47 @@ void BytecodeLivenessAnalysis::getLivenessInfoForNonCapturedVarsAtBytecodeOffset
bool BytecodeLivenessAnalysis::operandIsLiveAtBytecodeOffset(int operand, unsigned bytecodeOffset)
{
- if (operandIsAlwaysLive(m_codeBlock, operand))
+ if (operandIsAlwaysLive(operand))
return true;
FastBitVector result;
- getLivenessInfoForNonCapturedVarsAtBytecodeOffset(bytecodeOffset, result);
- return operandThatIsNotAlwaysLiveIsLive(m_codeBlock, result, operand);
+ getLivenessInfoAtBytecodeOffset(bytecodeOffset, result);
+ return operandThatIsNotAlwaysLiveIsLive(result, operand);
}
-FastBitVector getLivenessInfo(CodeBlock* codeBlock, const FastBitVector& out)
+FastBitVector BytecodeLivenessAnalysis::getLivenessInfoAtBytecodeOffset(unsigned bytecodeOffset)
{
- FastBitVector result;
-
- unsigned numCapturedVars = codeBlock->captureCount();
- if (numCapturedVars) {
- int firstCapturedLocal = VirtualRegister(codeBlock->captureStart()).toLocal();
- result.resize(out.numBits() + numCapturedVars);
- for (unsigned i = 0; i < numCapturedVars; ++i)
- result.set(firstCapturedLocal + i);
- } else
- result.resize(out.numBits());
-
- int outLength = out.numBits();
- ASSERT(outLength >= 0);
- for (int i = 0; i < outLength; i++) {
- if (!out.get(i))
- continue;
-
- if (!numCapturedVars) {
- result.set(i);
- continue;
- }
-
- if (virtualRegisterForLocal(i).offset() > codeBlock->captureStart())
- result.set(i);
- else
- result.set(numCapturedVars + i);
- }
- return result;
+ FastBitVector out;
+ getLivenessInfoAtBytecodeOffset(bytecodeOffset, out);
+ return out;
}
-FastBitVector BytecodeLivenessAnalysis::getLivenessInfoAtBytecodeOffset(unsigned bytecodeOffset)
+void BytecodeLivenessAnalysis::computeFullLiveness(FullBytecodeLiveness& result)
{
FastBitVector out;
- getLivenessInfoForNonCapturedVarsAtBytecodeOffset(bytecodeOffset, out);
- return getLivenessInfo(m_codeBlock, out);
+
+ result.m_map.resize(m_codeBlock->instructions().size());
+
+ for (unsigned i = m_basicBlocks.size(); i--;) {
+ BytecodeBasicBlock* block = m_basicBlocks[i].get();
+ if (block->isEntryBlock() || block->isExitBlock())
+ continue;
+
+ out = block->out();
+
+ for (unsigned i = block->bytecodeOffsets().size(); i--;) {
+ unsigned bytecodeOffset = block->bytecodeOffsets()[i];
+ stepOverInstruction(m_codeBlock, block, m_basicBlocks, bytecodeOffset, out);
+ result.m_map[bytecodeOffset] = out;
+ }
+ }
}
-void BytecodeLivenessAnalysis::computeFullLiveness(FullBytecodeLiveness& result)
+void BytecodeLivenessAnalysis::computeKills(BytecodeKills& result)
{
FastBitVector out;
- FastBitVector uses;
- FastBitVector defs;
result.m_codeBlock = m_codeBlock;
- result.m_map.clear();
+ result.m_killSets = std::make_unique<BytecodeKills::KillSet[]>(m_codeBlock->instructions().size());
for (unsigned i = m_basicBlocks.size(); i--;) {
BytecodeBasicBlock* block = m_basicBlocks[i].get();
@@ -289,13 +267,22 @@ void BytecodeLivenessAnalysis::computeFullLiveness(FullBytecodeLiveness& result)
continue;
out = block->out();
- uses.resize(out.numBits());
- defs.resize(out.numBits());
for (unsigned i = block->bytecodeOffsets().size(); i--;) {
unsigned bytecodeOffset = block->bytecodeOffsets()[i];
- stepOverInstruction(m_codeBlock, m_basicBlocks, bytecodeOffset, uses, defs, out);
- result.m_map.add(bytecodeOffset, out);
+ stepOverInstruction(
+ m_codeBlock, block, m_basicBlocks, bytecodeOffset,
+ [&] (unsigned index) {
+ // This is for uses.
+ if (out.get(index))
+ return;
+ result.m_killSets[bytecodeOffset].add(index);
+ out.set(index);
+ },
+ [&] (unsigned index) {
+ // This is for defs.
+ out.clear(index);
+ });
}
}
}
@@ -307,12 +294,6 @@ void BytecodeLivenessAnalysis::dumpResults()
for (unsigned i = 0; i < m_basicBlocks.size(); i++) {
BytecodeBasicBlock* block = m_basicBlocks[i].get();
dataLogF("\nBytecode basic block %u: %p (offset: %u, length: %u)\n", i, block, block->leaderBytecodeOffset(), block->totalBytecodeLength());
- dataLogF("Predecessors: ");
- for (unsigned j = 0; j < block->predecessors().size(); j++) {
- BytecodeBasicBlock* predecessor = block->predecessors()[j];
- dataLogF("%p ", predecessor);
- }
- dataLogF("\n");
dataLogF("Successors: ");
for (unsigned j = 0; j < block->successors().size(); j++) {
BytecodeBasicBlock* successor = block->successors()[j];
diff --git a/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.h b/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.h
index 349912175..ece16f21f 100644
--- a/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.h
+++ b/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -33,10 +33,13 @@
namespace JSC {
+class BytecodeKills;
class CodeBlock;
class FullBytecodeLiveness;
class BytecodeLivenessAnalysis {
+ WTF_MAKE_FAST_ALLOCATED;
+ WTF_MAKE_NONCOPYABLE(BytecodeLivenessAnalysis);
public:
BytecodeLivenessAnalysis(CodeBlock*);
@@ -44,23 +47,22 @@ public:
FastBitVector getLivenessInfoAtBytecodeOffset(unsigned bytecodeOffset);
void computeFullLiveness(FullBytecodeLiveness& result);
+ void computeKills(BytecodeKills& result);
private:
void compute();
void runLivenessFixpoint();
void dumpResults();
- void getLivenessInfoForNonCapturedVarsAtBytecodeOffset(unsigned bytecodeOffset, FastBitVector&);
+ void getLivenessInfoAtBytecodeOffset(unsigned bytecodeOffset, FastBitVector&);
CodeBlock* m_codeBlock;
- Vector<RefPtr<BytecodeBasicBlock> > m_basicBlocks;
+ Vector<std::unique_ptr<BytecodeBasicBlock>> m_basicBlocks;
};
-inline bool operandIsAlwaysLive(CodeBlock*, int operand);
-inline bool operandThatIsNotAlwaysLiveIsLive(CodeBlock*, const FastBitVector& out, int operand);
-inline bool operandIsLive(CodeBlock*, const FastBitVector& out, int operand);
-
-FastBitVector getLivenessInfo(CodeBlock*, const FastBitVector& out);
+inline bool operandIsAlwaysLive(int operand);
+inline bool operandThatIsNotAlwaysLiveIsLive(const FastBitVector& out, int operand);
+inline bool operandIsLive(const FastBitVector& out, int operand);
} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysisInlines.h b/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysisInlines.h
index 8824bd85c..9b5c755fc 100644
--- a/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysisInlines.h
+++ b/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysisInlines.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,30 +28,26 @@
#include "BytecodeLivenessAnalysis.h"
#include "CodeBlock.h"
+#include "Operations.h"
namespace JSC {
-inline bool operandIsAlwaysLive(CodeBlock* codeBlock, int operand)
+inline bool operandIsAlwaysLive(int operand)
{
- if (VirtualRegister(operand).isArgument())
- return true;
- return operand <= codeBlock->captureStart() && operand > codeBlock->captureEnd();
+ return !VirtualRegister(operand).isLocal();
}
-inline bool operandThatIsNotAlwaysLiveIsLive(CodeBlock* codeBlock, const FastBitVector& out, int operand)
+inline bool operandThatIsNotAlwaysLiveIsLive(const FastBitVector& out, int operand)
{
- VirtualRegister virtualReg(operand);
- if (virtualReg.offset() > codeBlock->captureStart())
- return out.get(virtualReg.toLocal());
- size_t index = virtualReg.toLocal() - codeBlock->captureCount();
- if (index >= out.numBits())
+ unsigned local = VirtualRegister(operand).toLocal();
+ if (local >= out.numBits())
return false;
- return out.get(index);
+ return out.get(local);
}
-inline bool operandIsLive(CodeBlock* codeBlock, const FastBitVector& out, int operand)
+inline bool operandIsLive(const FastBitVector& out, int operand)
{
- return operandIsAlwaysLive(codeBlock, operand) || operandThatIsNotAlwaysLiveIsLive(codeBlock, out, operand);
+ return operandIsAlwaysLive(operand) || operandThatIsNotAlwaysLiveIsLive(out, operand);
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/BytecodeUseDef.h b/Source/JavaScriptCore/bytecode/BytecodeUseDef.h
index 45cb91a1c..14a69f68a 100644
--- a/Source/JavaScriptCore/bytecode/BytecodeUseDef.h
+++ b/Source/JavaScriptCore/bytecode/BytecodeUseDef.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -32,7 +32,7 @@ namespace JSC {
template<typename Functor>
void computeUsesForBytecodeOffset(
- CodeBlock* codeBlock, unsigned bytecodeOffset, Functor& functor)
+ CodeBlock* codeBlock, BytecodeBasicBlock* block, unsigned bytecodeOffset, const Functor& functor)
{
Interpreter* interpreter = codeBlock->vm()->interpreter;
Instruction* instructionsBegin = codeBlock->instructions().begin();
@@ -44,29 +44,26 @@ void computeUsesForBytecodeOffset(
case op_new_array_buffer:
case op_throw_static_error:
case op_debug:
- case op_resolve_scope:
- case op_pop_scope:
case op_jneq_ptr:
- case op_new_func_exp:
case op_loop_hint:
case op_jmp:
case op_new_object:
- case op_init_lazy_reg:
- case op_get_callee:
case op_enter:
case op_catch:
- case op_touch_entry:
+ case op_profile_control_flow:
+ case op_create_direct_arguments:
+ case op_create_out_of_band_arguments:
+ case op_get_rest_length:
+ case op_watchdog:
return;
- case op_new_func:
- case op_new_captured_func:
- case op_create_activation:
- case op_create_arguments:
+ case op_assert:
+ case op_get_scope:
case op_to_this:
- case op_tear_off_activation:
+ case op_check_tdz:
case op_profile_will_call:
case op_profile_did_call:
+ case op_profile_type:
case op_throw:
- case op_push_with_scope:
case op_end:
case op_ret:
case op_jtrue:
@@ -74,11 +71,12 @@ void computeUsesForBytecodeOffset(
case op_jeq_null:
case op_jneq_null:
case op_dec:
- case op_inc: {
+ case op_inc:
+ case op_resume: {
+ ASSERT(opcodeLengths[opcodeID] > 1);
functor(codeBlock, instruction, opcodeID, instruction[1].u.operand);
return;
}
- case op_ret_object_or_this:
case op_jlesseq:
case op_jgreater:
case op_jgreatereq:
@@ -86,86 +84,100 @@ void computeUsesForBytecodeOffset(
case op_jnlesseq:
case op_jngreater:
case op_jngreatereq:
- case op_jless: {
+ case op_jless:
+ case op_copy_rest: {
+ ASSERT(opcodeLengths[opcodeID] > 2);
functor(codeBlock, instruction, opcodeID, instruction[1].u.operand);
functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
return;
}
case op_put_by_val_direct:
case op_put_by_val: {
+ ASSERT(opcodeLengths[opcodeID] > 3);
functor(codeBlock, instruction, opcodeID, instruction[1].u.operand);
functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
functor(codeBlock, instruction, opcodeID, instruction[3].u.operand);
return;
}
case op_put_by_index:
- case op_put_by_id_replace:
- case op_put_by_id_transition:
- case op_put_by_id_transition_direct:
- case op_put_by_id_transition_direct_out_of_line:
- case op_put_by_id_transition_normal:
- case op_put_by_id_transition_normal_out_of_line:
- case op_put_by_id_generic:
- case op_put_by_id_out_of_line:
case op_put_by_id:
- case op_put_to_scope: {
+ case op_put_to_scope:
+ case op_put_to_arguments: {
+ ASSERT(opcodeLengths[opcodeID] > 3);
functor(codeBlock, instruction, opcodeID, instruction[1].u.operand);
functor(codeBlock, instruction, opcodeID, instruction[3].u.operand);
return;
}
- case op_put_getter_setter: {
+ case op_put_getter_by_id:
+ case op_put_setter_by_id: {
+ ASSERT(opcodeLengths[opcodeID] > 4);
functor(codeBlock, instruction, opcodeID, instruction[1].u.operand);
- functor(codeBlock, instruction, opcodeID, instruction[3].u.operand);
functor(codeBlock, instruction, opcodeID, instruction[4].u.operand);
return;
}
- case op_init_global_const_nop:
- case op_init_global_const:
- case op_push_name_scope:
+ case op_put_getter_setter_by_id: {
+ ASSERT(opcodeLengths[opcodeID] > 5);
+ functor(codeBlock, instruction, opcodeID, instruction[1].u.operand);
+ functor(codeBlock, instruction, opcodeID, instruction[4].u.operand);
+ functor(codeBlock, instruction, opcodeID, instruction[5].u.operand);
+ return;
+ }
+ case op_put_getter_by_val:
+ case op_put_setter_by_val: {
+ ASSERT(opcodeLengths[opcodeID] > 4);
+ functor(codeBlock, instruction, opcodeID, instruction[1].u.operand);
+ functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
+ functor(codeBlock, instruction, opcodeID, instruction[4].u.operand);
+ return;
+ }
+ case op_get_property_enumerator:
+ case op_get_enumerable_length:
+ case op_new_func_exp:
+ case op_new_generator_func_exp:
+ case op_new_arrow_func_exp:
+ case op_to_index_string:
+ case op_create_lexical_environment:
+ case op_resolve_scope:
case op_get_from_scope:
case op_to_primitive:
case op_get_by_id:
- case op_get_by_id_out_of_line:
- case op_get_by_id_self:
- case op_get_by_id_proto:
- case op_get_by_id_chain:
- case op_get_by_id_getter_self:
- case op_get_by_id_getter_proto:
- case op_get_by_id_getter_chain:
- case op_get_by_id_custom_self:
- case op_get_by_id_custom_proto:
- case op_get_by_id_custom_chain:
- case op_get_by_id_generic:
case op_get_array_length:
- case op_get_string_length:
- case op_get_arguments_length:
case op_typeof:
case op_is_undefined:
case op_is_boolean:
case op_is_number:
case op_is_string:
case op_is_object:
+ case op_is_object_or_null:
case op_is_function:
case op_to_number:
+ case op_to_string:
case op_negate:
case op_neq_null:
case op_eq_null:
case op_not:
case op_mov:
- case op_captured_mov:
case op_new_array_with_size:
case op_create_this:
- case op_get_pnames:
case op_del_by_id:
- case op_unsigned: {
+ case op_unsigned:
+ case op_new_func:
+ case op_new_generator_func:
+ case op_get_parent_scope:
+ case op_create_scoped_arguments:
+ case op_get_from_arguments: {
+ ASSERT(opcodeLengths[opcodeID] > 2);
functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
return;
}
+ case op_has_generic_property:
+ case op_has_indexed_property:
+ case op_enumerator_structure_pname:
+ case op_enumerator_generic_pname:
case op_get_by_val:
- case op_get_argument_by_val:
case op_in:
+ case op_overrides_has_instance:
case op_instanceof:
- case op_check_has_instance:
case op_add:
case op_mul:
case op_div:
@@ -185,35 +197,36 @@ void computeUsesForBytecodeOffset(
case op_stricteq:
case op_neq:
case op_eq:
+ case op_push_with_scope:
case op_del_by_val: {
+ ASSERT(opcodeLengths[opcodeID] > 3);
functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
functor(codeBlock, instruction, opcodeID, instruction[3].u.operand);
return;
}
- case op_call_varargs: {
- functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
- functor(codeBlock, instruction, opcodeID, instruction[3].u.operand);
- functor(codeBlock, instruction, opcodeID, instruction[4].u.operand);
- return;
- }
- case op_next_pname: {
+ case op_instanceof_custom:
+ case op_has_structure_property:
+ case op_construct_varargs:
+ case op_call_varargs:
+ case op_tail_call_varargs: {
+ ASSERT(opcodeLengths[opcodeID] > 4);
functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
functor(codeBlock, instruction, opcodeID, instruction[3].u.operand);
functor(codeBlock, instruction, opcodeID, instruction[4].u.operand);
- functor(codeBlock, instruction, opcodeID, instruction[5].u.operand);
return;
}
- case op_get_by_pname: {
+ case op_get_direct_pname: {
+ ASSERT(opcodeLengths[opcodeID] > 5);
functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
functor(codeBlock, instruction, opcodeID, instruction[3].u.operand);
functor(codeBlock, instruction, opcodeID, instruction[4].u.operand);
functor(codeBlock, instruction, opcodeID, instruction[5].u.operand);
- functor(codeBlock, instruction, opcodeID, instruction[6].u.operand);
return;
}
case op_switch_string:
case op_switch_char:
case op_switch_imm: {
+ ASSERT(opcodeLengths[opcodeID] > 3);
functor(codeBlock, instruction, opcodeID, instruction[3].u.operand);
return;
}
@@ -227,19 +240,30 @@ void computeUsesForBytecodeOffset(
}
case op_construct:
case op_call_eval:
- case op_call: {
+ case op_call:
+ case op_tail_call: {
functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
int argCount = instruction[3].u.operand;
int registerOffset = -instruction[4].u.operand;
int lastArg = registerOffset + CallFrame::thisArgumentOffset();
- for (int i = opcodeID == op_construct ? 1 : 0; i < argCount; i++)
+ for (int i = 0; i < argCount; i++)
functor(codeBlock, instruction, opcodeID, lastArg + i);
return;
}
- case op_tear_off_arguments: {
+ case op_save: {
functor(codeBlock, instruction, opcodeID, instruction[1].u.operand);
- functor(codeBlock, instruction, opcodeID, unmodifiedArgumentsRegister(VirtualRegister(instruction[1].u.operand)).offset());
- functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
+ unsigned mergePointBytecodeOffset = bytecodeOffset + instruction[3].u.operand;
+ BytecodeBasicBlock* mergePointBlock = nullptr;
+ for (BytecodeBasicBlock* successor : block->successors()) {
+ if (successor->leaderBytecodeOffset() == mergePointBytecodeOffset) {
+ mergePointBlock = successor;
+ break;
+ }
+ }
+ ASSERT(mergePointBlock);
+ mergePointBlock->in().forEachSetBit([&](unsigned local) {
+ functor(codeBlock, instruction, opcodeID, virtualRegisterForLocal(local).offset());
+ });
return;
}
default:
@@ -249,7 +273,7 @@ void computeUsesForBytecodeOffset(
}
template<typename Functor>
-void computeDefsForBytecodeOffset(CodeBlock* codeBlock, unsigned bytecodeOffset, Functor& functor)
+void computeDefsForBytecodeOffset(CodeBlock* codeBlock, BytecodeBasicBlock* block, unsigned bytecodeOffset, const Functor& functor)
{
Interpreter* interpreter = codeBlock->vm()->interpreter;
Instruction* instructionsBegin = codeBlock->instructions().begin();
@@ -257,20 +281,17 @@ void computeDefsForBytecodeOffset(CodeBlock* codeBlock, unsigned bytecodeOffset,
OpcodeID opcodeID = interpreter->getOpcodeID(instruction->u.opcode);
switch (opcodeID) {
// These don't define anything.
- case op_init_global_const:
- case op_init_global_const_nop:
- case op_push_name_scope:
- case op_push_with_scope:
+ case op_copy_rest:
case op_put_to_scope:
- case op_pop_scope:
case op_end:
case op_profile_will_call:
case op_profile_did_call:
case op_throw:
case op_throw_static_error:
+ case op_save:
+ case op_assert:
case op_debug:
case op_ret:
- case op_ret_object_or_this:
case op_jmp:
case op_jtrue:
case op_jfalse:
@@ -290,73 +311,73 @@ void computeDefsForBytecodeOffset(CodeBlock* codeBlock, unsigned bytecodeOffset,
case op_switch_char:
case op_switch_string:
case op_put_by_id:
- case op_put_by_id_out_of_line:
- case op_put_by_id_replace:
- case op_put_by_id_transition:
- case op_put_by_id_transition_direct:
- case op_put_by_id_transition_direct_out_of_line:
- case op_put_by_id_transition_normal:
- case op_put_by_id_transition_normal_out_of_line:
- case op_put_by_id_generic:
- case op_put_getter_setter:
+ case op_put_getter_by_id:
+ case op_put_setter_by_id:
+ case op_put_getter_setter_by_id:
+ case op_put_getter_by_val:
+ case op_put_setter_by_val:
case op_put_by_val:
case op_put_by_val_direct:
case op_put_by_index:
- case op_tear_off_arguments:
- case op_touch_entry:
+ case op_profile_type:
+ case op_profile_control_flow:
+ case op_put_to_arguments:
+ case op_watchdog:
#define LLINT_HELPER_OPCODES(opcode, length) case opcode:
FOR_EACH_LLINT_OPCODE_EXTENSION(LLINT_HELPER_OPCODES);
#undef LLINT_HELPER_OPCODES
return;
// These all have a single destination for the first argument.
- case op_next_pname:
+ case op_to_index_string:
+ case op_get_enumerable_length:
+ case op_has_indexed_property:
+ case op_has_structure_property:
+ case op_has_generic_property:
+ case op_get_direct_pname:
+ case op_get_property_enumerator:
+ case op_enumerator_structure_pname:
+ case op_enumerator_generic_pname:
+ case op_get_parent_scope:
+ case op_push_with_scope:
+ case op_create_lexical_environment:
case op_resolve_scope:
case op_strcat:
- case op_tear_off_activation:
case op_to_primitive:
- case op_catch:
case op_create_this:
case op_new_array:
case op_new_array_buffer:
case op_new_array_with_size:
case op_new_regexp:
case op_new_func:
- case op_new_captured_func:
case op_new_func_exp:
+ case op_new_generator_func:
+ case op_new_generator_func_exp:
+ case op_new_arrow_func_exp:
case op_call_varargs:
+ case op_tail_call_varargs:
+ case op_construct_varargs:
case op_get_from_scope:
case op_call:
+ case op_tail_call:
case op_call_eval:
case op_construct:
case op_get_by_id:
- case op_get_by_id_out_of_line:
- case op_get_by_id_self:
- case op_get_by_id_proto:
- case op_get_by_id_chain:
- case op_get_by_id_getter_self:
- case op_get_by_id_getter_proto:
- case op_get_by_id_getter_chain:
- case op_get_by_id_custom_self:
- case op_get_by_id_custom_proto:
- case op_get_by_id_custom_chain:
- case op_get_by_id_generic:
case op_get_array_length:
- case op_get_string_length:
- case op_check_has_instance:
+ case op_overrides_has_instance:
case op_instanceof:
+ case op_instanceof_custom:
case op_get_by_val:
- case op_get_argument_by_val:
- case op_get_by_pname:
- case op_get_arguments_length:
case op_typeof:
case op_is_undefined:
case op_is_boolean:
case op_is_number:
case op_is_string:
case op_is_object:
+ case op_is_object_or_null:
case op_is_function:
case op_in:
case op_to_number:
+ case op_to_string:
case op_negate:
case op_add:
case op_mul:
@@ -383,30 +404,41 @@ void computeDefsForBytecodeOffset(CodeBlock* codeBlock, unsigned bytecodeOffset,
case op_eq_null:
case op_not:
case op_mov:
- case op_captured_mov:
case op_new_object:
case op_to_this:
- case op_get_callee:
- case op_init_lazy_reg:
- case op_create_activation:
- case op_create_arguments:
+ case op_check_tdz:
+ case op_get_scope:
+ case op_create_direct_arguments:
+ case op_create_scoped_arguments:
+ case op_create_out_of_band_arguments:
case op_del_by_id:
case op_del_by_val:
- case op_unsigned: {
+ case op_unsigned:
+ case op_get_from_arguments:
+ case op_get_rest_length: {
+ ASSERT(opcodeLengths[opcodeID] > 1);
functor(codeBlock, instruction, opcodeID, instruction[1].u.operand);
return;
}
- case op_get_pnames: {
+ case op_catch: {
+ ASSERT(opcodeLengths[opcodeID] > 2);
functor(codeBlock, instruction, opcodeID, instruction[1].u.operand);
- functor(codeBlock, instruction, opcodeID, instruction[3].u.operand);
- functor(codeBlock, instruction, opcodeID, instruction[4].u.operand);
+ functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
return;
}
case op_enter: {
for (unsigned i = codeBlock->m_numVars; i--;)
functor(codeBlock, instruction, opcodeID, virtualRegisterForLocal(i).offset());
return;
- } }
+ }
+ case op_resume: {
+ RELEASE_ASSERT(block->successors().size() == 1);
+ block->successors()[0]->in().forEachSetBit([&](unsigned local) {
+ functor(codeBlock, instruction, opcodeID, virtualRegisterForLocal(local).offset());
+ });
+ return;
+ }
+ }
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/CallEdge.cpp b/Source/JavaScriptCore/bytecode/CallEdge.cpp
new file mode 100644
index 000000000..dffff6dfd
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/CallEdge.cpp
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "CallEdge.h"
+
+namespace JSC {
+
+void CallEdge::dump(PrintStream& out) const
+{
+ out.print("<", m_callee, ", count: ", m_count, ">");
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/bytecode/CallEdge.h b/Source/JavaScriptCore/bytecode/CallEdge.h
new file mode 100644
index 000000000..304520951
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/CallEdge.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef CallEdge_h
+#define CallEdge_h
+
+#include "CallVariant.h"
+
+namespace JSC {
+
+class CallEdge {
+public:
+ CallEdge();
+ CallEdge(CallVariant, uint32_t);
+
+ bool operator!() const { return !m_callee; }
+
+ CallVariant callee() const { return m_callee; }
+ uint32_t count() const { return m_count; }
+
+ CallEdge despecifiedClosure() const
+ {
+ return CallEdge(m_callee.despecifiedClosure(), m_count);
+ }
+
+ void dump(PrintStream&) const;
+
+private:
+ CallVariant m_callee;
+ uint32_t m_count;
+};
+
+inline CallEdge::CallEdge(CallVariant callee, uint32_t count)
+ : m_callee(callee)
+ , m_count(count)
+{
+}
+
+inline CallEdge::CallEdge()
+ : CallEdge(CallVariant(), 0)
+{
+}
+
+typedef Vector<CallEdge, 1> CallEdgeList;
+
+} // namespace JSC
+
+#endif // CallEdge_h
+
diff --git a/Source/JavaScriptCore/bytecode/CallLinkInfo.cpp b/Source/JavaScriptCore/bytecode/CallLinkInfo.cpp
index a4baa6100..0579d4250 100644
--- a/Source/JavaScriptCore/bytecode/CallLinkInfo.cpp
+++ b/Source/JavaScriptCore/bytecode/CallLinkInfo.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2013, 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,35 +26,105 @@
#include "config.h"
#include "CallLinkInfo.h"
+#include "CallFrameShuffleData.h"
#include "DFGOperations.h"
#include "DFGThunks.h"
-#include "RepatchBuffer.h"
+#include "JSCInlines.h"
+#include "Repatch.h"
+#include <wtf/ListDump.h>
+#include <wtf/NeverDestroyed.h>
#if ENABLE(JIT)
namespace JSC {
-void CallLinkInfo::unlink(VM& vm, RepatchBuffer& repatchBuffer)
+CallLinkInfo::CallLinkInfo()
+ : m_hasSeenShouldRepatch(false)
+ , m_hasSeenClosure(false)
+ , m_clearedByGC(false)
+ , m_allowStubs(true)
+ , m_callType(None)
+ , m_maxNumArguments(0)
+ , m_slowPathCount(0)
{
- ASSERT(isLinked());
+}
+
+CallLinkInfo::~CallLinkInfo()
+{
+ clearStub();
+
+ if (isOnList())
+ remove();
+}
+
+void CallLinkInfo::clearStub()
+{
+ if (!stub())
+ return;
+
+ m_stub->clearCallNodesFor(this);
+ m_stub = nullptr;
+}
+
+void CallLinkInfo::unlink(VM& vm)
+{
+ if (!isLinked()) {
+ // We could be called even if we're not linked anymore because of how polymorphic calls
+ // work. Each callsite within the polymorphic call stub may separately ask us to unlink().
+ RELEASE_ASSERT(!isOnList());
+ return;
+ }
- repatchBuffer.revertJumpReplacementToBranchPtrWithPatch(RepatchBuffer::startOfBranchPtrWithPatchOnRegister(hotPathBegin), static_cast<MacroAssembler::RegisterID>(calleeGPR), 0);
- if (isDFG) {
-#if ENABLE(DFG_JIT)
- repatchBuffer.relink(callReturnLocation, (callType == Construct ? vm.getCTIStub(linkConstructThunkGenerator) : vm.getCTIStub(linkCallThunkGenerator)).code());
-#else
- RELEASE_ASSERT_NOT_REACHED();
-#endif
- } else
- repatchBuffer.relink(callReturnLocation, callType == Construct ? vm.getCTIStub(linkConstructThunkGenerator).code() : vm.getCTIStub(linkCallThunkGenerator).code());
- hasSeenShouldRepatch = false;
- callee.clear();
- stub.clear();
+ unlinkFor(vm, *this);
// It will be on a list if the callee has a code block.
if (isOnList())
remove();
}
+void CallLinkInfo::visitWeak(VM& vm)
+{
+ auto handleSpecificCallee = [&] (JSFunction* callee) {
+ if (Heap::isMarked(callee->executable()))
+ m_hasSeenClosure = true;
+ else
+ m_clearedByGC = true;
+ };
+
+ if (isLinked()) {
+ if (stub()) {
+ if (!stub()->visitWeak(vm)) {
+ if (Options::verboseOSR()) {
+ dataLog(
+ "Clearing closure call to ",
+ listDump(stub()->variants()), ", stub routine ", RawPointer(stub()),
+ ".\n");
+ }
+ unlink(vm);
+ m_clearedByGC = true;
+ }
+ } else if (!Heap::isMarked(m_callee.get())) {
+ if (Options::verboseOSR()) {
+ dataLog(
+ "Clearing call to ",
+ RawPointer(m_callee.get()), " (",
+ m_callee.get()->executable()->hashFor(specializationKind()),
+ ").\n");
+ }
+ handleSpecificCallee(m_callee.get());
+ unlink(vm);
+ }
+ }
+ if (haveLastSeenCallee() && !Heap::isMarked(lastSeenCallee())) {
+ handleSpecificCallee(lastSeenCallee());
+ clearLastSeenCallee();
+ }
+}
+
+void CallLinkInfo::setFrameShuffleData(const CallFrameShuffleData& shuffleData)
+{
+ m_frameShuffleData = std::make_unique<CallFrameShuffleData>(shuffleData);
+}
+
} // namespace JSC
#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/bytecode/CallLinkInfo.h b/Source/JavaScriptCore/bytecode/CallLinkInfo.h
index 0244497df..beeeaa12c 100644
--- a/Source/JavaScriptCore/bytecode/CallLinkInfo.h
+++ b/Source/JavaScriptCore/bytecode/CallLinkInfo.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2014, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,89 +26,336 @@
#ifndef CallLinkInfo_h
#define CallLinkInfo_h
-#include "ClosureCallStubRoutine.h"
+#include "CallMode.h"
#include "CodeLocation.h"
#include "CodeSpecializationKind.h"
#include "JITWriteBarrier.h"
#include "JSFunction.h"
#include "Opcode.h"
+#include "PolymorphicCallStubRoutine.h"
#include "WriteBarrier.h"
-#include <wtf/Platform.h>
#include <wtf/SentinelLinkedList.h>
namespace JSC {
#if ENABLE(JIT)
-class RepatchBuffer;
+struct CallFrameShuffleData;
-struct CallLinkInfo : public BasicRawSentinelNode<CallLinkInfo> {
- enum CallType { None, Call, CallVarargs, Construct };
+class CallLinkInfo : public BasicRawSentinelNode<CallLinkInfo> {
+public:
+ enum CallType { None, Call, CallVarargs, Construct, ConstructVarargs, TailCall, TailCallVarargs };
static CallType callTypeFor(OpcodeID opcodeID)
{
if (opcodeID == op_call || opcodeID == op_call_eval)
return Call;
+ if (opcodeID == op_call_varargs)
+ return CallVarargs;
if (opcodeID == op_construct)
return Construct;
- ASSERT(opcodeID == op_call_varargs);
- return CallVarargs;
+ if (opcodeID == op_construct_varargs)
+ return ConstructVarargs;
+ if (opcodeID == op_tail_call)
+ return TailCall;
+ ASSERT(opcodeID == op_tail_call_varargs);
+ return TailCallVarargs;
}
-
- CallLinkInfo()
- : hasSeenShouldRepatch(false)
- , isDFG(false)
- , hasSeenClosure(false)
- , callType(None)
+
+ static bool isVarargsCallType(CallType callType)
{
+ switch (callType) {
+ case CallVarargs:
+ case ConstructVarargs:
+ case TailCallVarargs:
+ return true;
+
+ default:
+ return false;
+ }
}
+
+ CallLinkInfo();
- ~CallLinkInfo()
+ ~CallLinkInfo();
+
+ static CodeSpecializationKind specializationKindFor(CallType callType)
{
- if (isOnList())
- remove();
+ return specializationFromIsConstruct(callType == Construct || callType == ConstructVarargs);
}
-
CodeSpecializationKind specializationKind() const
{
- return specializationFromIsConstruct(callType == Construct);
+ return specializationKindFor(static_cast<CallType>(m_callType));
+ }
+
+ static CallMode callModeFor(CallType callType)
+ {
+ switch (callType) {
+ case Call:
+ case CallVarargs:
+ return CallMode::Regular;
+ case TailCall:
+ case TailCallVarargs:
+ return CallMode::Tail;
+ case Construct:
+ case ConstructVarargs:
+ return CallMode::Construct;
+ case None:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+
+ CallMode callMode() const
+ {
+ return callModeFor(static_cast<CallType>(m_callType));
+ }
+
+ bool isTailCall() const
+ {
+ return callMode() == CallMode::Tail;
+ }
+
+ bool isVarargs() const
+ {
+ return isVarargsCallType(static_cast<CallType>(m_callType));
+ }
+
+ bool isLinked() { return m_stub || m_callee; }
+ void unlink(VM&);
+
+ void setUpCall(CallType callType, CodeOrigin codeOrigin, unsigned calleeGPR)
+ {
+ m_callType = callType;
+ m_codeOrigin = codeOrigin;
+ m_calleeGPR = calleeGPR;
+ }
+
+ void setCallLocations(CodeLocationNearCall callReturnLocation, CodeLocationDataLabelPtr hotPathBegin,
+ CodeLocationNearCall hotPathOther)
+ {
+ m_callReturnLocation = callReturnLocation;
+ m_hotPathBegin = hotPathBegin;
+ m_hotPathOther = hotPathOther;
+ }
+
+ bool allowStubs() const { return m_allowStubs; }
+
+ void disallowStubs()
+ {
+ m_allowStubs = false;
+ }
+
+ void setUpCallFromFTL(CallType callType, CodeOrigin codeOrigin,
+ CodeLocationNearCall callReturnLocation, CodeLocationDataLabelPtr hotPathBegin,
+ CodeLocationNearCall hotPathOther, unsigned calleeGPR)
+ {
+ m_callType = callType;
+ m_codeOrigin = codeOrigin;
+ m_callReturnLocation = callReturnLocation;
+ m_hotPathBegin = hotPathBegin;
+ m_hotPathOther = hotPathOther;
+ m_calleeGPR = calleeGPR;
+ }
+
+ CodeLocationNearCall callReturnLocation()
+ {
+ return m_callReturnLocation;
+ }
+
+ CodeLocationDataLabelPtr hotPathBegin()
+ {
+ return m_hotPathBegin;
+ }
+
+ CodeLocationNearCall hotPathOther()
+ {
+ return m_hotPathOther;
+ }
+
+ void setCallee(VM& vm, CodeLocationDataLabelPtr location, JSCell* owner, JSFunction* callee)
+ {
+ m_callee.set(vm, location, owner, callee);
+ }
+
+ void clearCallee()
+ {
+ m_callee.clear();
+ }
+
+ JSFunction* callee()
+ {
+ return m_callee.get();
+ }
+
+ void setLastSeenCallee(VM& vm, const JSCell* owner, JSFunction* callee)
+ {
+ m_lastSeenCallee.set(vm, owner, callee);
+ }
+
+ void clearLastSeenCallee()
+ {
+ m_lastSeenCallee.clear();
+ }
+
+ JSFunction* lastSeenCallee()
+ {
+ return m_lastSeenCallee.get();
+ }
+
+ bool haveLastSeenCallee()
+ {
+ return !!m_lastSeenCallee;
+ }
+
+ void setStub(PassRefPtr<PolymorphicCallStubRoutine> newStub)
+ {
+ clearStub();
+ m_stub = newStub;
}
- CodeLocationNearCall callReturnLocation;
- CodeLocationDataLabelPtr hotPathBegin;
- CodeLocationNearCall hotPathOther;
- JITWriteBarrier<JSFunction> callee;
- WriteBarrier<JSFunction> lastSeenCallee;
- RefPtr<ClosureCallStubRoutine> stub;
- bool hasSeenShouldRepatch : 1;
- bool isDFG : 1;
- bool hasSeenClosure : 1;
- unsigned callType : 5; // CallType
- unsigned calleeGPR : 8;
- CodeOrigin codeOrigin;
+ void clearStub();
- bool isLinked() { return stub || callee; }
- void unlink(VM&, RepatchBuffer&);
+ PolymorphicCallStubRoutine* stub()
+ {
+ return m_stub.get();
+ }
+
+ void setSlowStub(PassRefPtr<JITStubRoutine> newSlowStub)
+ {
+ m_slowStub = newSlowStub;
+ }
+
+ void clearSlowStub()
+ {
+ m_slowStub = nullptr;
+ }
+
+ JITStubRoutine* slowStub()
+ {
+ return m_slowStub.get();
+ }
bool seenOnce()
{
- return hasSeenShouldRepatch;
+ return m_hasSeenShouldRepatch;
+ }
+
+ void clearSeen()
+ {
+ m_hasSeenShouldRepatch = false;
}
void setSeen()
{
- hasSeenShouldRepatch = true;
+ m_hasSeenShouldRepatch = true;
+ }
+
+ bool hasSeenClosure()
+ {
+ return m_hasSeenClosure;
+ }
+
+ void setHasSeenClosure()
+ {
+ m_hasSeenClosure = true;
+ }
+
+ bool clearedByGC()
+ {
+ return m_clearedByGC;
+ }
+
+ void setCallType(CallType callType)
+ {
+ m_callType = callType;
+ }
+
+ CallType callType()
+ {
+ return static_cast<CallType>(m_callType);
+ }
+
+ uint8_t* addressOfMaxNumArguments()
+ {
+ return &m_maxNumArguments;
}
+
+ uint8_t maxNumArguments()
+ {
+ return m_maxNumArguments;
+ }
+
+ static ptrdiff_t offsetOfSlowPathCount()
+ {
+ return OBJECT_OFFSETOF(CallLinkInfo, m_slowPathCount);
+ }
+
+ void setCalleeGPR(unsigned calleeGPR)
+ {
+ m_calleeGPR = calleeGPR;
+ }
+
+ unsigned calleeGPR()
+ {
+ return m_calleeGPR;
+ }
+
+ uint32_t slowPathCount()
+ {
+ return m_slowPathCount;
+ }
+
+ void setCodeOrigin(CodeOrigin codeOrigin)
+ {
+ m_codeOrigin = codeOrigin;
+ }
+
+ CodeOrigin codeOrigin()
+ {
+ return m_codeOrigin;
+ }
+
+ void visitWeak(VM&);
+
+ void setFrameShuffleData(const CallFrameShuffleData&);
+
+ const CallFrameShuffleData* frameShuffleData()
+ {
+ return m_frameShuffleData.get();
+ }
+
+private:
+ CodeLocationNearCall m_callReturnLocation;
+ CodeLocationDataLabelPtr m_hotPathBegin;
+ CodeLocationNearCall m_hotPathOther;
+ JITWriteBarrier<JSFunction> m_callee;
+ WriteBarrier<JSFunction> m_lastSeenCallee;
+ RefPtr<PolymorphicCallStubRoutine> m_stub;
+ RefPtr<JITStubRoutine> m_slowStub;
+ std::unique_ptr<CallFrameShuffleData> m_frameShuffleData;
+ bool m_hasSeenShouldRepatch : 1;
+ bool m_hasSeenClosure : 1;
+ bool m_clearedByGC : 1;
+ bool m_allowStubs : 1;
+ unsigned m_callType : 4; // CallType
+ unsigned m_calleeGPR : 8;
+ uint8_t m_maxNumArguments; // Only used for varargs calls.
+ uint32_t m_slowPathCount;
+ CodeOrigin m_codeOrigin;
};
-inline void* getCallLinkInfoReturnLocation(CallLinkInfo* callLinkInfo)
+inline CodeOrigin getCallLinkInfoCodeOrigin(CallLinkInfo& callLinkInfo)
{
- return callLinkInfo->callReturnLocation.executableAddress();
+ return callLinkInfo.codeOrigin();
}
-inline unsigned getCallLinkInfoBytecodeIndex(CallLinkInfo* callLinkInfo)
-{
- return callLinkInfo->codeOrigin.bytecodeIndex;
-}
+typedef HashMap<CodeOrigin, CallLinkInfo*, CodeOriginApproximateHash> CallLinkInfoMap;
+
+#else // ENABLE(JIT)
+
+typedef HashMap<int, void*> CallLinkInfoMap;
+
#endif // ENABLE(JIT)
} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/CallLinkStatus.cpp b/Source/JavaScriptCore/bytecode/CallLinkStatus.cpp
index b64c967e9..8ffc23d13 100644
--- a/Source/JavaScriptCore/bytecode/CallLinkStatus.cpp
+++ b/Source/JavaScriptCore/bytecode/CallLinkStatus.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,103 +26,305 @@
#include "config.h"
#include "CallLinkStatus.h"
+#include "CallLinkInfo.h"
#include "CodeBlock.h"
+#include "DFGJITCode.h"
+#include "InlineCallFrame.h"
#include "LLIntCallLinkInfo.h"
-#include "Operations.h"
+#include "JSCInlines.h"
#include <wtf/CommaPrinter.h>
+#include <wtf/ListDump.h>
namespace JSC {
+static const bool verbose = false;
+
CallLinkStatus::CallLinkStatus(JSValue value)
- : m_callTarget(value)
- , m_executable(0)
- , m_structure(0)
- , m_couldTakeSlowPath(false)
+ : m_couldTakeSlowPath(false)
, m_isProved(false)
{
- if (!value || !value.isCell())
- return;
-
- m_structure = value.asCell()->structure();
-
- if (!value.asCell()->inherits(JSFunction::info()))
+ if (!value || !value.isCell()) {
+ m_couldTakeSlowPath = true;
return;
+ }
- m_executable = jsCast<JSFunction*>(value.asCell())->executable();
+ m_variants.append(CallVariant(value.asCell()));
}
-JSFunction* CallLinkStatus::function() const
+CallLinkStatus CallLinkStatus::computeFromLLInt(const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, unsigned bytecodeIndex)
{
- if (!m_callTarget || !m_callTarget.isCell())
- return 0;
-
- if (!m_callTarget.asCell()->inherits(JSFunction::info()))
- return 0;
-
- return jsCast<JSFunction*>(m_callTarget.asCell());
-}
+ UNUSED_PARAM(profiledBlock);
+ UNUSED_PARAM(bytecodeIndex);
+#if ENABLE(DFG_JIT)
+ if (profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadCell))) {
+ // We could force this to be a closure call, but instead we'll just assume that it
+ // takes slow path.
+ return takesSlowPath();
+ }
+#else
+ UNUSED_PARAM(locker);
+#endif
-InternalFunction* CallLinkStatus::internalFunction() const
-{
- if (!m_callTarget || !m_callTarget.isCell())
- return 0;
+ VM& vm = *profiledBlock->vm();
- if (!m_callTarget.asCell()->inherits(InternalFunction::info()))
- return 0;
+ Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex;
+ OpcodeID op = vm.interpreter->getOpcodeID(instruction[0].u.opcode);
+ if (op != op_call && op != op_construct && op != op_tail_call)
+ return CallLinkStatus();
- return jsCast<InternalFunction*>(m_callTarget.asCell());
-}
-
-Intrinsic CallLinkStatus::intrinsicFor(CodeSpecializationKind kind) const
-{
- if (!m_executable)
- return NoIntrinsic;
+ LLIntCallLinkInfo* callLinkInfo = instruction[5].u.callLinkInfo;
- return m_executable->intrinsicFor(kind);
+ return CallLinkStatus(callLinkInfo->lastSeenCallee.get());
}
-CallLinkStatus CallLinkStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex)
+CallLinkStatus CallLinkStatus::computeFor(
+ CodeBlock* profiledBlock, unsigned bytecodeIndex, const CallLinkInfoMap& map)
{
+ ConcurrentJITLocker locker(profiledBlock->m_lock);
+
UNUSED_PARAM(profiledBlock);
UNUSED_PARAM(bytecodeIndex);
-#if ENABLE(LLINT)
- Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex;
- LLIntCallLinkInfo* callLinkInfo = instruction[5].u.callLinkInfo;
+ UNUSED_PARAM(map);
+#if ENABLE(DFG_JIT)
+ ExitSiteData exitSiteData = computeExitSiteData(locker, profiledBlock, bytecodeIndex);
- return CallLinkStatus(callLinkInfo->lastSeenCallee.get());
+ CallLinkInfo* callLinkInfo = map.get(CodeOrigin(bytecodeIndex));
+ if (!callLinkInfo) {
+ if (exitSiteData.takesSlowPath)
+ return takesSlowPath();
+ return computeFromLLInt(locker, profiledBlock, bytecodeIndex);
+ }
+
+ return computeFor(locker, profiledBlock, *callLinkInfo, exitSiteData);
#else
return CallLinkStatus();
#endif
}
-CallLinkStatus CallLinkStatus::computeFor(CodeBlock* profiledBlock, unsigned bytecodeIndex)
+CallLinkStatus::ExitSiteData CallLinkStatus::computeExitSiteData(
+ const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, unsigned bytecodeIndex)
{
- ConcurrentJITLocker locker(profiledBlock->m_lock);
+ ExitSiteData exitSiteData;
+#if ENABLE(DFG_JIT)
+ exitSiteData.takesSlowPath =
+ profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadType))
+ || profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadExecutable));
+ exitSiteData.badFunction =
+ profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadCell));
+#else
+ UNUSED_PARAM(locker);
UNUSED_PARAM(profiledBlock);
UNUSED_PARAM(bytecodeIndex);
+#endif
+
+ return exitSiteData;
+}
+
#if ENABLE(JIT)
- if (!profiledBlock->hasBaselineJITProfiling())
- return computeFromLLInt(profiledBlock, bytecodeIndex);
+CallLinkStatus CallLinkStatus::computeFor(
+ const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, CallLinkInfo& callLinkInfo)
+{
+ // We don't really need this, but anytime we have to debug this code, it becomes indispensable.
+ UNUSED_PARAM(profiledBlock);
+
+ CallLinkStatus result = computeFromCallLinkInfo(locker, callLinkInfo);
+ result.m_maxNumArguments = callLinkInfo.maxNumArguments();
+ return result;
+}
+
+CallLinkStatus CallLinkStatus::computeFromCallLinkInfo(
+ const ConcurrentJITLocker&, CallLinkInfo& callLinkInfo)
+{
+ if (callLinkInfo.clearedByGC())
+ return takesSlowPath();
- if (profiledBlock->couldTakeSlowCase(bytecodeIndex))
- return CallLinkStatus::takesSlowPath();
+ // Note that despite requiring that the locker is held, this code is racy with respect
+ // to the CallLinkInfo: it may get cleared while this code runs! This is because
+ // CallLinkInfo::unlink() may be called from a different CodeBlock than the one that owns
+ // the CallLinkInfo and currently we save space by not having CallLinkInfos know who owns
+ // them. So, there is no way for either the caller of CallLinkInfo::unlock() or unlock()
+ // itself to figure out which lock to lock.
+ //
+ // Fortunately, that doesn't matter. The only things we ask of CallLinkInfo - the slow
+ // path count, the stub, and the target - can all be asked racily. Stubs and targets can
+ // only be deleted at next GC, so if we load a non-null one, then it must contain data
+ // that is still marginally valid (i.e. the pointers ain't stale). This kind of raciness
+ // is probably OK for now.
- CallLinkInfo& callLinkInfo = profiledBlock->getCallLinkInfo(bytecodeIndex);
- if (callLinkInfo.stub)
- return CallLinkStatus(callLinkInfo.stub->executable(), callLinkInfo.stub->structure());
+ // PolymorphicCallStubRoutine is a GCAwareJITStubRoutine, so if non-null, it will stay alive
+ // until next GC even if the CallLinkInfo is concurrently cleared. Also, the variants list is
+ // never mutated after the PolymorphicCallStubRoutine is instantiated. We have some conservative
+ // fencing in place to make sure that we see the variants list after construction.
+ if (PolymorphicCallStubRoutine* stub = callLinkInfo.stub()) {
+ WTF::loadLoadFence();
+
+ CallEdgeList edges = stub->edges();
+
+ // Now that we've loaded the edges list, there are no further concurrency concerns. We will
+ // just manipulate and prune this list to our liking - mostly removing entries that are too
+ // infrequent and ensuring that it's sorted in descending order of frequency.
+
+ RELEASE_ASSERT(edges.size());
+
+ std::sort(
+ edges.begin(), edges.end(),
+ [] (CallEdge a, CallEdge b) {
+ return a.count() > b.count();
+ });
+ RELEASE_ASSERT(edges.first().count() >= edges.last().count());
+
+ double totalCallsToKnown = 0;
+ double totalCallsToUnknown = callLinkInfo.slowPathCount();
+ CallVariantList variants;
+ for (size_t i = 0; i < edges.size(); ++i) {
+ CallEdge edge = edges[i];
+ // If the call is at the tail of the distribution, then we don't optimize it and we
+ // treat it as if it was a call to something unknown. We define the tail as being either
+ // a call that doesn't belong to the N most frequent callees (N =
+ // maxPolymorphicCallVariantsForInlining) or that has a total call count that is too
+ // small.
+ if (i >= Options::maxPolymorphicCallVariantsForInlining()
+ || edge.count() < Options::frequentCallThreshold())
+ totalCallsToUnknown += edge.count();
+ else {
+ totalCallsToKnown += edge.count();
+ variants.append(edge.callee());
+ }
+ }
+
+ // Bail if we didn't find any calls that qualified.
+ RELEASE_ASSERT(!!totalCallsToKnown == !!variants.size());
+ if (variants.isEmpty())
+ return takesSlowPath();
+
+ // We require that the distribution of callees is skewed towards a handful of common ones.
+ if (totalCallsToKnown / totalCallsToUnknown < Options::minimumCallToKnownRate())
+ return takesSlowPath();
+
+ RELEASE_ASSERT(totalCallsToKnown);
+ RELEASE_ASSERT(variants.size());
+
+ CallLinkStatus result;
+ result.m_variants = variants;
+ result.m_couldTakeSlowPath = !!totalCallsToUnknown;
+ result.m_isBasedOnStub = true;
+ return result;
+ }
- JSFunction* target = callLinkInfo.lastSeenCallee.get();
- if (!target)
- return computeFromLLInt(profiledBlock, bytecodeIndex);
+ CallLinkStatus result;
- if (callLinkInfo.hasSeenClosure)
- return CallLinkStatus(target->executable(), target->structure());
+ if (JSFunction* target = callLinkInfo.lastSeenCallee()) {
+ CallVariant variant(target);
+ if (callLinkInfo.hasSeenClosure())
+ variant = variant.despecifiedClosure();
+ result.m_variants.append(variant);
+ }
+
+ result.m_couldTakeSlowPath = !!callLinkInfo.slowPathCount();
- return CallLinkStatus(target);
-#else
- return CallLinkStatus();
+ return result;
+}
+
+CallLinkStatus CallLinkStatus::computeFor(
+ const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, CallLinkInfo& callLinkInfo,
+ ExitSiteData exitSiteData)
+{
+ CallLinkStatus result = computeFor(locker, profiledBlock, callLinkInfo);
+ if (exitSiteData.badFunction) {
+ if (result.isBasedOnStub()) {
+ // If we have a polymorphic stub, then having an exit site is not quite so useful. In
+ // most cases, the information in the stub has higher fidelity.
+ result.makeClosureCall();
+ } else {
+ // We might not have a polymorphic stub for any number of reasons. When this happens, we
+ // are in less certain territory, so exit sites mean a lot.
+ result.m_couldTakeSlowPath = true;
+ }
+ }
+ if (exitSiteData.takesSlowPath)
+ result.m_couldTakeSlowPath = true;
+
+ return result;
+}
#endif
+
+void CallLinkStatus::computeDFGStatuses(
+ CodeBlock* dfgCodeBlock, CallLinkStatus::ContextMap& map)
+{
+#if ENABLE(DFG_JIT)
+ RELEASE_ASSERT(dfgCodeBlock->jitType() == JITCode::DFGJIT);
+ CodeBlock* baselineCodeBlock = dfgCodeBlock->alternative();
+ for (auto iter = dfgCodeBlock->callLinkInfosBegin(); !!iter; ++iter) {
+ CallLinkInfo& info = **iter;
+ CodeOrigin codeOrigin = info.codeOrigin();
+
+ // Check if we had already previously made a terrible mistake in the FTL for this
+ // code origin. Note that this is approximate because we could have a monovariant
+ // inline in the FTL that ended up failing. We should fix that at some point by
+ // having data structures to track the context of frequent exits. This is currently
+ // challenging because it would require creating a CodeOrigin-based database in
+ // baseline CodeBlocks, but those CodeBlocks don't really have a place to put the
+ // InlineCallFrames.
+ CodeBlock* currentBaseline =
+ baselineCodeBlockForOriginAndBaselineCodeBlock(codeOrigin, baselineCodeBlock);
+ ExitSiteData exitSiteData;
+ {
+ ConcurrentJITLocker locker(currentBaseline->m_lock);
+ exitSiteData = computeExitSiteData(
+ locker, currentBaseline, codeOrigin.bytecodeIndex);
+ }
+
+ {
+ ConcurrentJITLocker locker(dfgCodeBlock->m_lock);
+ map.add(info.codeOrigin(), computeFor(locker, dfgCodeBlock, info, exitSiteData));
+ }
+ }
+#else
+ UNUSED_PARAM(dfgCodeBlock);
+#endif // ENABLE(DFG_JIT)
+
+ if (verbose) {
+ dataLog("Context map:\n");
+ ContextMap::iterator iter = map.begin();
+ ContextMap::iterator end = map.end();
+ for (; iter != end; ++iter) {
+ dataLog(" ", iter->key, ":\n");
+ dataLog(" ", iter->value, "\n");
+ }
+ }
+}
+
+CallLinkStatus CallLinkStatus::computeFor(
+ CodeBlock* profiledBlock, CodeOrigin codeOrigin,
+ const CallLinkInfoMap& baselineMap, const CallLinkStatus::ContextMap& dfgMap)
+{
+ auto iter = dfgMap.find(codeOrigin);
+ if (iter != dfgMap.end())
+ return iter->value;
+
+ return computeFor(profiledBlock, codeOrigin.bytecodeIndex, baselineMap);
+}
+
+void CallLinkStatus::setProvenConstantCallee(CallVariant variant)
+{
+ m_variants = CallVariantList{ variant };
+ m_couldTakeSlowPath = false;
+ m_isProved = true;
+}
+
+bool CallLinkStatus::isClosureCall() const
+{
+ for (unsigned i = m_variants.size(); i--;) {
+ if (m_variants[i].isClosureCall())
+ return true;
+ }
+ return false;
+}
+
+void CallLinkStatus::makeClosureCall()
+{
+ m_variants = despecifiedVariantList(m_variants);
}
void CallLinkStatus::dump(PrintStream& out) const
@@ -140,17 +342,11 @@ void CallLinkStatus::dump(PrintStream& out) const
if (m_couldTakeSlowPath)
out.print(comma, "Could Take Slow Path");
- if (m_callTarget)
- out.print(comma, "Known target: ", m_callTarget);
-
- if (m_executable) {
- out.print(comma, "Executable/CallHash: ", RawPointer(m_executable));
- if (!isCompilationThread())
- out.print("/", m_executable->hashFor(CodeForCall));
- }
+ if (!m_variants.isEmpty())
+ out.print(comma, listDump(m_variants));
- if (m_structure)
- out.print(comma, "Structure: ", RawPointer(m_structure));
+ if (m_maxNumArguments)
+ out.print(comma, "maxNumArguments = ", m_maxNumArguments);
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/CallLinkStatus.h b/Source/JavaScriptCore/bytecode/CallLinkStatus.h
index 51965fe4a..d3c1eee0c 100644
--- a/Source/JavaScriptCore/bytecode/CallLinkStatus.h
+++ b/Source/JavaScriptCore/bytecode/CallLinkStatus.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,7 +26,12 @@
#ifndef CallLinkStatus_h
#define CallLinkStatus_h
+#include "CallLinkInfo.h"
+#include "CallVariant.h"
+#include "CodeOrigin.h"
#include "CodeSpecializationKind.h"
+#include "ConcurrentJITLock.h"
+#include "ExitingJITType.h"
#include "Intrinsic.h"
#include "JSCJSValue.h"
@@ -37,14 +42,12 @@ class ExecutableBase;
class InternalFunction;
class JSFunction;
class Structure;
+class CallLinkInfo;
class CallLinkStatus {
+ WTF_MAKE_FAST_ALLOCATED;
public:
CallLinkStatus()
- : m_executable(0)
- , m_structure(0)
- , m_couldTakeSlowPath(false)
- , m_isProved(false)
{
}
@@ -57,75 +60,75 @@ public:
explicit CallLinkStatus(JSValue);
- CallLinkStatus(ExecutableBase* executable, Structure* structure)
- : m_executable(executable)
- , m_structure(structure)
- , m_couldTakeSlowPath(false)
- , m_isProved(false)
+ CallLinkStatus(CallVariant variant)
+ : m_variants(1, variant)
{
- ASSERT(!!executable == !!structure);
}
- CallLinkStatus& setIsProved(bool isProved)
- {
- m_isProved = isProved;
- return *this;
- }
+ static CallLinkStatus computeFor(
+ CodeBlock*, unsigned bytecodeIndex, const CallLinkInfoMap&);
+
+ struct ExitSiteData {
+ bool takesSlowPath { false };
+ bool badFunction { false };
+ };
+ static ExitSiteData computeExitSiteData(const ConcurrentJITLocker&, CodeBlock*, unsigned bytecodeIndex);
- static CallLinkStatus computeFor(CodeBlock*, unsigned bytecodeIndex);
+#if ENABLE(JIT)
+ // Computes the status assuming that we never took slow path and never previously
+ // exited.
+ static CallLinkStatus computeFor(const ConcurrentJITLocker&, CodeBlock*, CallLinkInfo&);
+ static CallLinkStatus computeFor(
+ const ConcurrentJITLocker&, CodeBlock*, CallLinkInfo&, ExitSiteData);
+#endif
- CallLinkStatus& setHasBadFunctionExitSite(bool didHaveExitSite)
- {
- ASSERT(!m_isProved);
- if (didHaveExitSite) {
- // Turn this into a closure call.
- m_callTarget = JSValue();
- }
- return *this;
- }
+ typedef HashMap<CodeOrigin, CallLinkStatus, CodeOriginApproximateHash> ContextMap;
- CallLinkStatus& setHasBadCacheExitSite(bool didHaveExitSite)
- {
- ASSERT(!m_isProved);
- if (didHaveExitSite)
- *this = takesSlowPath();
- return *this;
- }
+ // Computes all of the statuses of the DFG code block. Doesn't include statuses that had
+ // no information. Currently we use this when compiling FTL code, to enable polyvariant
+ // inlining.
+ static void computeDFGStatuses(CodeBlock* dfgCodeBlock, ContextMap&);
- CallLinkStatus& setHasBadExecutableExitSite(bool didHaveExitSite)
- {
- ASSERT(!m_isProved);
- if (didHaveExitSite)
- *this = takesSlowPath();
- return *this;
- }
+ // Helper that first consults the ContextMap and then does computeFor().
+ static CallLinkStatus computeFor(
+ CodeBlock*, CodeOrigin, const CallLinkInfoMap&, const ContextMap&);
- bool isSet() const { return m_callTarget || m_executable || m_couldTakeSlowPath; }
+ void setProvenConstantCallee(CallVariant);
+
+ bool isSet() const { return !m_variants.isEmpty() || m_couldTakeSlowPath; }
bool operator!() const { return !isSet(); }
bool couldTakeSlowPath() const { return m_couldTakeSlowPath; }
- bool isClosureCall() const { return m_executable && !m_callTarget; }
-
- JSValue callTarget() const { return m_callTarget; }
- JSFunction* function() const;
- InternalFunction* internalFunction() const;
- Intrinsic intrinsicFor(CodeSpecializationKind) const;
- ExecutableBase* executable() const { return m_executable; }
- Structure* structure() const { return m_structure; }
+
+ CallVariantList variants() const { return m_variants; }
+ unsigned size() const { return m_variants.size(); }
+ CallVariant at(unsigned i) const { return m_variants[i]; }
+ CallVariant operator[](unsigned i) const { return at(i); }
bool isProved() const { return m_isProved; }
- bool canOptimize() const { return (m_callTarget || m_executable) && !m_couldTakeSlowPath; }
+ bool isBasedOnStub() const { return m_isBasedOnStub; }
+ bool canOptimize() const { return !m_variants.isEmpty(); }
+
+ bool isClosureCall() const; // Returns true if any callee is a closure call.
+
+ unsigned maxNumArguments() const { return m_maxNumArguments; }
void dump(PrintStream&) const;
private:
- static CallLinkStatus computeFromLLInt(CodeBlock*, unsigned bytecodeIndex);
+ void makeClosureCall();
+
+ static CallLinkStatus computeFromLLInt(const ConcurrentJITLocker&, CodeBlock*, unsigned bytecodeIndex);
+#if ENABLE(JIT)
+ static CallLinkStatus computeFromCallLinkInfo(
+ const ConcurrentJITLocker&, CallLinkInfo&);
+#endif
- JSValue m_callTarget;
- ExecutableBase* m_executable;
- Structure* m_structure;
- bool m_couldTakeSlowPath;
- bool m_isProved;
+ CallVariantList m_variants;
+ bool m_couldTakeSlowPath { false };
+ bool m_isProved { false };
+ bool m_isBasedOnStub { false };
+ unsigned m_maxNumArguments { 0 };
};
} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/CallMode.cpp b/Source/JavaScriptCore/bytecode/CallMode.cpp
new file mode 100644
index 000000000..5757b1850
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/CallMode.cpp
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "CallMode.h"
+
+#include <wtf/PrintStream.h>
+
+namespace WTF {
+
+void printInternal(PrintStream& out, JSC::CallMode callMode)
+{
+ switch (callMode) {
+ case JSC::CallMode::Tail:
+ out.print("TailCall");
+ return;
+ case JSC::CallMode::Regular:
+ out.print("Call");
+ return;
+ case JSC::CallMode::Construct:
+ out.print("Construct");
+ return;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
diff --git a/Source/JavaScriptCore/bytecode/CallMode.h b/Source/JavaScriptCore/bytecode/CallMode.h
new file mode 100644
index 000000000..bf21d8634
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/CallMode.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef CallMode_h
+#define CallMode_h
+
+#include "CodeSpecializationKind.h"
+
+namespace JSC {
+
+enum class CallMode { Regular, Tail, Construct };
+
+enum FrameAction { KeepTheFrame = 0, ReuseTheFrame };
+
+inline CodeSpecializationKind specializationKindFor(CallMode callMode)
+{
+ if (callMode == CallMode::Construct)
+ return CodeForConstruct;
+
+ return CodeForCall;
+}
+
+} // namespace JSC
+
+namespace WTF {
+
+class PrintStream;
+void printInternal(PrintStream&, JSC::CallMode);
+
+} // namespace WTF
+
+#endif // CallMode_h
+
diff --git a/Source/JavaScriptCore/bytecode/CallReturnOffsetToBytecodeOffset.h b/Source/JavaScriptCore/bytecode/CallReturnOffsetToBytecodeOffset.h
index 3a7448efd..496738f09 100644
--- a/Source/JavaScriptCore/bytecode/CallReturnOffsetToBytecodeOffset.h
+++ b/Source/JavaScriptCore/bytecode/CallReturnOffsetToBytecodeOffset.h
@@ -26,8 +26,6 @@
#ifndef CallReturnOffsetToBytecodeOffset_h
#define CallReturnOffsetToBytecodeOffset_h
-#include <wtf/Platform.h>
-
namespace JSC {
#if ENABLE(JIT)
diff --git a/Source/JavaScriptCore/bytecode/CallVariant.cpp b/Source/JavaScriptCore/bytecode/CallVariant.cpp
new file mode 100644
index 000000000..9745dde2b
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/CallVariant.cpp
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "CallVariant.h"
+
+#include "JSCInlines.h"
+#include <wtf/ListDump.h>
+
+namespace JSC {
+
+void CallVariant::dump(PrintStream& out) const
+{
+ if (!*this) {
+ out.print("null");
+ return;
+ }
+
+ if (InternalFunction* internalFunction = this->internalFunction()) {
+ out.print("InternalFunction: ", JSValue(internalFunction));
+ return;
+ }
+
+ if (JSFunction* function = this->function()) {
+ out.print("(Function: ", JSValue(function), "; Executable: ", *executable(), ")");
+ return;
+ }
+
+ out.print("Executable: ", *executable());
+}
+
+CallVariantList variantListWithVariant(const CallVariantList& list, CallVariant variantToAdd)
+{
+ ASSERT(variantToAdd);
+ CallVariantList result;
+ for (CallVariant variant : list) {
+ ASSERT(variant);
+ if (!!variantToAdd) {
+ if (variant == variantToAdd)
+ variantToAdd = CallVariant();
+ else if (variant.despecifiedClosure() == variantToAdd.despecifiedClosure()) {
+ variant = variant.despecifiedClosure();
+ variantToAdd = CallVariant();
+ }
+ }
+ result.append(variant);
+ }
+ if (!!variantToAdd)
+ result.append(variantToAdd);
+
+ if (!ASSERT_DISABLED) {
+ for (unsigned i = 0; i < result.size(); ++i) {
+ for (unsigned j = i + 1; j < result.size(); ++j) {
+ if (result[i] != result[j])
+ continue;
+
+ dataLog("variantListWithVariant(", listDump(list), ", ", variantToAdd, ") failed: got duplicates in result: ", listDump(result), "\n");
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ }
+ }
+
+ return result;
+}
+
+CallVariantList despecifiedVariantList(const CallVariantList& list)
+{
+ CallVariantList result;
+ for (CallVariant variant : list)
+ result = variantListWithVariant(result, variant.despecifiedClosure());
+ return result;
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/bytecode/CallVariant.h b/Source/JavaScriptCore/bytecode/CallVariant.h
new file mode 100644
index 000000000..2514f72b8
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/CallVariant.h
@@ -0,0 +1,203 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef CallVariant_h
+#define CallVariant_h
+
+#include "Executable.h"
+#include "JSCell.h"
+#include "JSFunction.h"
+
+namespace JSC {
+
+// The CallVariant class is meant to encapsulate a callee in a way that is useful for call linking
+// and inlining. Because JavaScript has closures, and because JSC implements the notion of internal
+// non-function objects that nevertheless provide call traps, the call machinery wants to see a
+// callee in one of the following four forms:
+//
+// JSFunction callee: This means that we expect the callsite to always call a particular function
+// instance, that is associated with a particular lexical environment. This pinpoints not
+// just the code that will be called (i.e. the executable) but also the scope within which
+// the code runs.
+//
+// Executable callee: This corresponds to a call to a closure. In this case, we know that the
+// callsite will call a JSFunction, but we do not know which particular JSFunction. We do know
+// what code will be called - i.e. we know the executable.
+//
+// InternalFunction callee: JSC supports a special kind of native functions that support bizarre
+// semantics. These are always singletons. If we know that the callee is an InternalFunction
+// then we know both the code that will be called and the scope; in fact the "scope" is really
+// just the InternalFunction itself.
+//
+// Something else: It's possible call all manner of rubbish in JavaScript. This implicitly supports
+// bizarre object callees, but it can't really tell you anything interesting about them other
+// than the fact that they don't fall into any of the above categories.
+//
+// This class serves as a kind of union over these four things. It does so by just holding a
+// JSCell*. We determine which of the modes its in by doing type checks on the cell. Note that we
+// cannot use WriteBarrier<> here because this gets used inside the compiler.
+
+class CallVariant {
+public:
+ explicit CallVariant(JSCell* callee = nullptr)
+ : m_callee(callee)
+ {
+ }
+
+ CallVariant(WTF::HashTableDeletedValueType)
+ : m_callee(deletedToken())
+ {
+ }
+
+ bool operator!() const { return !m_callee; }
+
+ // If this variant refers to a function, change it to refer to its executable.
+ ALWAYS_INLINE CallVariant despecifiedClosure() const
+ {
+ if (m_callee->type() == JSFunctionType)
+ return CallVariant(jsCast<JSFunction*>(m_callee)->executable());
+ return *this;
+ }
+
+ JSCell* rawCalleeCell() const { return m_callee; }
+
+ InternalFunction* internalFunction() const
+ {
+ return jsDynamicCast<InternalFunction*>(m_callee);
+ }
+
+ JSFunction* function() const
+ {
+ return jsDynamicCast<JSFunction*>(m_callee);
+ }
+
+ bool isClosureCall() const { return !!jsDynamicCast<ExecutableBase*>(m_callee); }
+
+ ExecutableBase* executable() const
+ {
+ if (JSFunction* function = this->function())
+ return function->executable();
+ return jsDynamicCast<ExecutableBase*>(m_callee);
+ }
+
+ JSCell* nonExecutableCallee() const
+ {
+ RELEASE_ASSERT(!isClosureCall());
+ return m_callee;
+ }
+
+ Intrinsic intrinsicFor(CodeSpecializationKind kind) const
+ {
+ if (ExecutableBase* executable = this->executable())
+ return executable->intrinsicFor(kind);
+ return NoIntrinsic;
+ }
+
+ FunctionExecutable* functionExecutable() const
+ {
+ if (ExecutableBase* executable = this->executable())
+ return jsDynamicCast<FunctionExecutable*>(executable);
+ return nullptr;
+ }
+
+ void dump(PrintStream& out) const;
+
+ bool isHashTableDeletedValue() const
+ {
+ return m_callee == deletedToken();
+ }
+
+ bool operator==(const CallVariant& other) const
+ {
+ return m_callee == other.m_callee;
+ }
+
+ bool operator!=(const CallVariant& other) const
+ {
+ return !(*this == other);
+ }
+
+ bool operator<(const CallVariant& other) const
+ {
+ return m_callee < other.m_callee;
+ }
+
+ bool operator>(const CallVariant& other) const
+ {
+ return other < *this;
+ }
+
+ bool operator<=(const CallVariant& other) const
+ {
+ return !(*this < other);
+ }
+
+ bool operator>=(const CallVariant& other) const
+ {
+ return other <= *this;
+ }
+
+ unsigned hash() const
+ {
+ return WTF::PtrHash<JSCell*>::hash(m_callee);
+ }
+
+private:
+ static JSCell* deletedToken() { return bitwise_cast<JSCell*>(static_cast<uintptr_t>(1)); }
+
+ JSCell* m_callee;
+};
+
+struct CallVariantHash {
+ static unsigned hash(const CallVariant& key) { return key.hash(); }
+ static bool equal(const CallVariant& a, const CallVariant& b) { return a == b; }
+ static const bool safeToCompareToEmptyOrDeleted = true;
+};
+
+typedef Vector<CallVariant, 1> CallVariantList;
+
+// Returns a new variant list by attempting to either append the given variant or merge it with one
+// of the variants we already have by despecifying closures.
+CallVariantList variantListWithVariant(const CallVariantList&, CallVariant);
+
+// Returns a new list where every element is despecified, and the list is deduplicated.
+CallVariantList despecifiedVariantList(const CallVariantList&);
+
+} // namespace JSC
+
+namespace WTF {
+
+template<typename T> struct DefaultHash;
+template<> struct DefaultHash<JSC::CallVariant> {
+ typedef JSC::CallVariantHash Hash;
+};
+
+template<typename T> struct HashTraits;
+template<> struct HashTraits<JSC::CallVariant> : SimpleClassHashTraits<JSC::CallVariant> { };
+
+} // namespace WTF
+
+#endif // CallVariant_h
+
diff --git a/Source/JavaScriptCore/bytecode/CodeBlock.cpp b/Source/JavaScriptCore/bytecode/CodeBlock.cpp
index eec5b7076..ba6a4bdf9 100644
--- a/Source/JavaScriptCore/bytecode/CodeBlock.cpp
+++ b/Source/JavaScriptCore/bytecode/CodeBlock.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2009, 2010, 2012, 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2008-2010, 2012-2015 Apple Inc. All rights reserved.
* Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
*
* Redistribution and use in source and binary forms, with or without
@@ -11,7 +11,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -30,35 +30,47 @@
#include "config.h"
#include "CodeBlock.h"
+#include "BasicBlockLocation.h"
#include "BytecodeGenerator.h"
#include "BytecodeUseDef.h"
#include "CallLinkStatus.h"
#include "DFGCapabilities.h"
#include "DFGCommon.h"
#include "DFGDriver.h"
-#include "DFGNode.h"
+#include "DFGJITCode.h"
#include "DFGWorklist.h"
#include "Debugger.h"
+#include "FunctionExecutableDump.h"
+#include "GetPutInfo.h"
+#include "InlineCallFrame.h"
#include "Interpreter.h"
#include "JIT.h"
-#include "JITStubs.h"
-#include "JSActivation.h"
#include "JSCJSValue.h"
#include "JSFunction.h"
-#include "JSNameScope.h"
+#include "JSLexicalEnvironment.h"
+#include "JSModuleEnvironment.h"
#include "LLIntEntrypoint.h"
#include "LowLevelInterpreter.h"
-#include "Operations.h"
-#include "PolymorphicPutByIdList.h"
+#include "JSCInlines.h"
+#include "PCToCodeOriginMap.h"
+#include "PolymorphicAccess.h"
+#include "ProfilerDatabase.h"
#include "ReduceWhitespace.h"
#include "Repatch.h"
-#include "RepatchBuffer.h"
#include "SlotVisitorInlines.h"
+#include "StackVisitor.h"
+#include "TypeLocationCache.h"
+#include "TypeProfiler.h"
#include "UnlinkedInstructionStream.h"
#include <wtf/BagToHashMap.h>
#include <wtf/CommaPrinter.h>
#include <wtf/StringExtras.h>
#include <wtf/StringPrintStream.h>
+#include <wtf/text/UniquedStringImpl.h>
+
+#if ENABLE(JIT)
+#include "RegisterAtOffsetList.h"
+#endif
#if ENABLE(DFG_JIT)
#include "DFGOperations.h"
@@ -70,6 +82,70 @@
namespace JSC {
+const ClassInfo CodeBlock::s_info = {
+ "CodeBlock", 0, 0,
+ CREATE_METHOD_TABLE(CodeBlock)
+};
+
+const ClassInfo FunctionCodeBlock::s_info = {
+ "FunctionCodeBlock", &Base::s_info, 0,
+ CREATE_METHOD_TABLE(FunctionCodeBlock)
+};
+
+#if ENABLE(WEBASSEMBLY)
+const ClassInfo WebAssemblyCodeBlock::s_info = {
+ "WebAssemblyCodeBlock", &Base::s_info, 0,
+ CREATE_METHOD_TABLE(WebAssemblyCodeBlock)
+};
+#endif
+
+const ClassInfo GlobalCodeBlock::s_info = {
+ "GlobalCodeBlock", &Base::s_info, 0,
+ CREATE_METHOD_TABLE(GlobalCodeBlock)
+};
+
+const ClassInfo ProgramCodeBlock::s_info = {
+ "ProgramCodeBlock", &Base::s_info, 0,
+ CREATE_METHOD_TABLE(ProgramCodeBlock)
+};
+
+const ClassInfo ModuleProgramCodeBlock::s_info = {
+ "ModuleProgramCodeBlock", &Base::s_info, 0,
+ CREATE_METHOD_TABLE(ModuleProgramCodeBlock)
+};
+
+const ClassInfo EvalCodeBlock::s_info = {
+ "EvalCodeBlock", &Base::s_info, 0,
+ CREATE_METHOD_TABLE(EvalCodeBlock)
+};
+
+void FunctionCodeBlock::destroy(JSCell* cell)
+{
+ jsCast<FunctionCodeBlock*>(cell)->~FunctionCodeBlock();
+}
+
+#if ENABLE(WEBASSEMBLY)
+void WebAssemblyCodeBlock::destroy(JSCell* cell)
+{
+ jsCast<WebAssemblyCodeBlock*>(cell)->~WebAssemblyCodeBlock();
+}
+#endif
+
+void ProgramCodeBlock::destroy(JSCell* cell)
+{
+ jsCast<ProgramCodeBlock*>(cell)->~ProgramCodeBlock();
+}
+
+void ModuleProgramCodeBlock::destroy(JSCell* cell)
+{
+ jsCast<ModuleProgramCodeBlock*>(cell)->~ModuleProgramCodeBlock();
+}
+
+void EvalCodeBlock::destroy(JSCell* cell)
+{
+ jsCast<EvalCodeBlock*>(cell)->~EvalCodeBlock();
+}
+
CString CodeBlock::inferredName() const
{
switch (codeType()) {
@@ -79,6 +155,8 @@ CString CodeBlock::inferredName() const
return "<eval>";
case FunctionCode:
return jsCast<FunctionExecutable*>(ownerExecutable())->inferredName().utf8();
+ case ModuleCode:
+ return "<module>";
default:
CRASH();
return CString("", 0);
@@ -99,7 +177,7 @@ CodeBlockHash CodeBlock::hash() const
{
if (!m_hash) {
RELEASE_ASSERT(isSafeToComputeHash());
- m_hash = CodeBlockHash(ownerExecutable()->source(), specializationKind());
+ m_hash = CodeBlockHash(ownerScriptExecutable()->source(), specializationKind());
}
return m_hash;
}
@@ -107,7 +185,7 @@ CodeBlockHash CodeBlock::hash() const
CString CodeBlock::sourceCodeForTools() const
{
if (codeType() != FunctionCode)
- return ownerExecutable()->source().toUTF8();
+ return ownerScriptExecutable()->source().toUTF8();
SourceProvider* provider = source();
FunctionExecutable* executable = jsCast<FunctionExecutable*>(ownerExecutable());
@@ -119,7 +197,7 @@ CString CodeBlock::sourceCodeForTools() const
unsigned rangeEnd = delta + unlinked->startOffset() + unlinked->sourceLength();
return toCString(
"function ",
- provider->source().impl()->utf8ForRange(rangeStart, rangeEnd - rangeStart));
+ provider->source().substring(rangeStart, rangeEnd - rangeStart).utf8());
}
CString CodeBlock::sourceCodeOnOneLine() const
@@ -127,22 +205,38 @@ CString CodeBlock::sourceCodeOnOneLine() const
return reduceWhitespace(sourceCodeForTools());
}
-void CodeBlock::dumpAssumingJITType(PrintStream& out, JITCode::JITType jitType) const
+CString CodeBlock::hashAsStringIfPossible() const
{
if (hasHash() || isSafeToComputeHash())
- out.print(inferredName(), "#", hash(), ":[", RawPointer(this), "->", RawPointer(ownerExecutable()), ", ", jitType, codeType());
- else
- out.print(inferredName(), "#<no-hash>:[", RawPointer(this), "->", RawPointer(ownerExecutable()), ", ", jitType, codeType());
+ return toCString(hash());
+ return "<no-hash>";
+}
+
+void CodeBlock::dumpAssumingJITType(PrintStream& out, JITCode::JITType jitType) const
+{
+ out.print(inferredName(), "#", hashAsStringIfPossible());
+ out.print(":[", RawPointer(this), "->");
+ if (!!m_alternative)
+ out.print(RawPointer(alternative()), "->");
+ out.print(RawPointer(ownerExecutable()), ", ", jitType, codeType());
if (codeType() == FunctionCode)
out.print(specializationKind());
out.print(", ", instructionCount());
if (this->jitType() == JITCode::BaselineJIT && m_shouldAlwaysBeInlined)
- out.print(" (SABI)");
- if (ownerExecutable()->neverInline())
+ out.print(" (ShouldAlwaysBeInlined)");
+ if (ownerScriptExecutable()->neverInline())
out.print(" (NeverInline)");
- if (ownerExecutable()->isStrictMode())
+ if (ownerScriptExecutable()->neverOptimize())
+ out.print(" (NeverOptimize)");
+ if (ownerScriptExecutable()->didTryToEnterInLoop())
+ out.print(" (DidTryToEnterInLoop)");
+ if (ownerScriptExecutable()->isStrictMode())
out.print(" (StrictMode)");
+ if (this->jitType() == JITCode::BaselineJIT && m_didFailFTLCompilation)
+ out.print(" (FTLFail)");
+ if (this->jitType() == JITCode::BaselineJIT && m_hasBeenCompiledWithFTL)
+ out.print(" (HadFTLReplacement)");
out.print("]");
}
@@ -151,11 +245,6 @@ void CodeBlock::dump(PrintStream& out) const
dumpAssumingJITType(out, jitType());
}
-static CString constantName(int k, JSValue value)
-{
- return toCString(value, "(@k", k - FirstConstantRegisterIndex, ")");
-}
-
static CString idName(int id0, const Identifier& ident)
{
return toCString(ident.impl(), "(@id", id0, ")");
@@ -163,19 +252,16 @@ static CString idName(int id0, const Identifier& ident)
CString CodeBlock::registerName(int r) const
{
- if (r == missingThisObjectMarker())
- return "<null>";
-
if (isConstantRegisterIndex(r))
- return constantName(r, getConstant(r));
+ return constantName(r);
- if (operandIsArgument(r)) {
- if (!VirtualRegister(r).toArgument())
- return "this";
- return toCString("arg", VirtualRegister(r).toArgument());
- }
+ return toCString(VirtualRegister(r));
+}
- return toCString("loc", VirtualRegister(r).toLocal());
+CString CodeBlock::constantName(int index) const
+{
+ JSValue value = getConstant(index);
+ return toCString(value, "(", VirtualRegister(index), ")");
}
static CString regexpToSourceString(RegExp* regExp)
@@ -251,48 +337,14 @@ void CodeBlock::printGetByIdOp(PrintStream& out, ExecState* exec, int location,
case op_get_by_id:
op = "get_by_id";
break;
- case op_get_by_id_out_of_line:
- op = "get_by_id_out_of_line";
- break;
- case op_get_by_id_self:
- op = "get_by_id_self";
- break;
- case op_get_by_id_proto:
- op = "get_by_id_proto";
- break;
- case op_get_by_id_chain:
- op = "get_by_id_chain";
- break;
- case op_get_by_id_getter_self:
- op = "get_by_id_getter_self";
- break;
- case op_get_by_id_getter_proto:
- op = "get_by_id_getter_proto";
- break;
- case op_get_by_id_getter_chain:
- op = "get_by_id_getter_chain";
- break;
- case op_get_by_id_custom_self:
- op = "get_by_id_custom_self";
- break;
- case op_get_by_id_custom_proto:
- op = "get_by_id_custom_proto";
- break;
- case op_get_by_id_custom_chain:
- op = "get_by_id_custom_chain";
- break;
- case op_get_by_id_generic:
- op = "get_by_id_generic";
- break;
case op_get_array_length:
op = "array_length";
break;
- case op_get_string_length:
- op = "string_length";
- break;
default:
RELEASE_ASSERT_NOT_REACHED();
+#if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
op = 0;
+#endif
}
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
@@ -302,22 +354,19 @@ void CodeBlock::printGetByIdOp(PrintStream& out, ExecState* exec, int location,
it += 4; // Increment up to the value profiler.
}
-#if ENABLE(JIT) || ENABLE(LLINT) // unused in some configurations
-static void dumpStructure(PrintStream& out, const char* name, ExecState* exec, Structure* structure, const Identifier& ident)
+static void dumpStructure(PrintStream& out, const char* name, Structure* structure, const Identifier& ident)
{
if (!structure)
return;
out.printf("%s = %p", name, structure);
- PropertyOffset offset = structure->getConcurrently(exec->vm(), ident.impl());
+ PropertyOffset offset = structure->getConcurrently(ident.impl());
if (offset != invalidOffset)
out.printf(" (offset = %d)", offset);
}
-#endif
-#if ENABLE(JIT) // unused when not ENABLE(JIT), leading to silly warnings
-static void dumpChain(PrintStream& out, ExecState* exec, StructureChain* chain, const Identifier& ident)
+static void dumpChain(PrintStream& out, StructureChain* chain, const Identifier& ident)
{
out.printf("chain = %p: [", chain);
bool first = true;
@@ -328,11 +377,10 @@ static void dumpChain(PrintStream& out, ExecState* exec, StructureChain* chain,
first = false;
else
out.printf(", ");
- dumpStructure(out, "struct", exec, currentStructure->get(), ident);
+ dumpStructure(out, "struct", currentStructure->get(), ident);
}
out.printf("]");
}
-#endif
void CodeBlock::printGetByIdCacheStatus(PrintStream& out, ExecState* exec, int location, const StubInfoMap& map)
{
@@ -342,116 +390,117 @@ void CodeBlock::printGetByIdCacheStatus(PrintStream& out, ExecState* exec, int l
UNUSED_PARAM(ident); // tell the compiler to shut up in certain platform configurations.
-#if ENABLE(LLINT)
if (exec->interpreter()->getOpcodeID(instruction[0].u.opcode) == op_get_array_length)
out.printf(" llint(array_length)");
- else if (Structure* structure = instruction[4].u.structure.get()) {
+ else if (StructureID structureID = instruction[4].u.structureID) {
+ Structure* structure = m_vm->heap.structureIDTable().get(structureID);
out.printf(" llint(");
- dumpStructure(out, "struct", exec, structure, ident);
+ dumpStructure(out, "struct", structure, ident);
out.printf(")");
}
-#endif
#if ENABLE(JIT)
if (StructureStubInfo* stubPtr = map.get(CodeOrigin(location))) {
StructureStubInfo& stubInfo = *stubPtr;
- if (stubInfo.seen) {
- out.printf(" jit(");
-
- Structure* baseStructure = 0;
- Structure* prototypeStructure = 0;
- StructureChain* chain = 0;
- PolymorphicAccessStructureList* structureList = 0;
- int listSize = 0;
-
- switch (stubInfo.accessType) {
- case access_get_by_id_self:
- out.printf("self");
- baseStructure = stubInfo.u.getByIdSelf.baseObjectStructure.get();
- break;
- case access_get_by_id_proto:
- out.printf("proto");
- baseStructure = stubInfo.u.getByIdProto.baseObjectStructure.get();
- prototypeStructure = stubInfo.u.getByIdProto.prototypeStructure.get();
- break;
- case access_get_by_id_chain:
- out.printf("chain");
- baseStructure = stubInfo.u.getByIdChain.baseObjectStructure.get();
- chain = stubInfo.u.getByIdChain.chain.get();
- break;
- case access_get_by_id_self_list:
- out.printf("self_list");
- structureList = stubInfo.u.getByIdSelfList.structureList;
- listSize = stubInfo.u.getByIdSelfList.listSize;
- break;
- case access_get_by_id_proto_list:
- out.printf("proto_list");
- structureList = stubInfo.u.getByIdProtoList.structureList;
- listSize = stubInfo.u.getByIdProtoList.listSize;
- break;
- case access_unset:
- out.printf("unset");
- break;
- case access_get_by_id_generic:
- out.printf("generic");
- break;
- case access_get_array_length:
- out.printf("array_length");
- break;
- case access_get_string_length:
- out.printf("string_length");
- break;
- default:
- RELEASE_ASSERT_NOT_REACHED();
- break;
- }
-
- if (baseStructure) {
- out.printf(", ");
- dumpStructure(out, "struct", exec, baseStructure, ident);
- }
+ if (stubInfo.resetByGC)
+ out.print(" (Reset By GC)");
+
+ out.printf(" jit(");
- if (prototypeStructure) {
- out.printf(", ");
- dumpStructure(out, "prototypeStruct", exec, baseStructure, ident);
- }
+ Structure* baseStructure = nullptr;
+ PolymorphicAccess* stub = nullptr;
- if (chain) {
- out.printf(", ");
- dumpChain(out, exec, chain, ident);
- }
+ switch (stubInfo.cacheType) {
+ case CacheType::GetByIdSelf:
+ out.printf("self");
+ baseStructure = stubInfo.u.byIdSelf.baseObjectStructure.get();
+ break;
+ case CacheType::Stub:
+ out.printf("stub");
+ stub = stubInfo.u.stub;
+ break;
+ case CacheType::Unset:
+ out.printf("unset");
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
- if (structureList) {
- out.printf(", list = %p: [", structureList);
- for (int i = 0; i < listSize; ++i) {
- if (i)
- out.printf(", ");
- out.printf("(");
- dumpStructure(out, "base", exec, structureList->list[i].base.get(), ident);
- if (structureList->list[i].isChain) {
- if (structureList->list[i].u.chain.get()) {
- out.printf(", ");
- dumpChain(out, exec, structureList->list[i].u.chain.get(), ident);
- }
- } else {
- if (structureList->list[i].u.proto.get()) {
- out.printf(", ");
- dumpStructure(out, "proto", exec, structureList->list[i].u.proto.get(), ident);
- }
- }
- out.printf(")");
- }
- out.printf("]");
+ if (baseStructure) {
+ out.printf(", ");
+ dumpStructure(out, "struct", baseStructure, ident);
+ }
+
+ if (stub)
+ out.print(", ", *stub);
+
+ out.printf(")");
+ }
+#else
+ UNUSED_PARAM(map);
+#endif
+}
+
+void CodeBlock::printPutByIdCacheStatus(PrintStream& out, int location, const StubInfoMap& map)
+{
+ Instruction* instruction = instructions().begin() + location;
+
+ const Identifier& ident = identifier(instruction[2].u.operand);
+
+ UNUSED_PARAM(ident); // tell the compiler to shut up in certain platform configurations.
+
+ out.print(", ", instruction[8].u.putByIdFlags);
+
+ if (StructureID structureID = instruction[4].u.structureID) {
+ Structure* structure = m_vm->heap.structureIDTable().get(structureID);
+ out.print(" llint(");
+ if (StructureID newStructureID = instruction[6].u.structureID) {
+ Structure* newStructure = m_vm->heap.structureIDTable().get(newStructureID);
+ dumpStructure(out, "prev", structure, ident);
+ out.print(", ");
+ dumpStructure(out, "next", newStructure, ident);
+ if (StructureChain* chain = instruction[7].u.structureChain.get()) {
+ out.print(", ");
+ dumpChain(out, chain, ident);
}
- out.printf(")");
+ } else
+ dumpStructure(out, "struct", structure, ident);
+ out.print(")");
+ }
+
+#if ENABLE(JIT)
+ if (StructureStubInfo* stubPtr = map.get(CodeOrigin(location))) {
+ StructureStubInfo& stubInfo = *stubPtr;
+ if (stubInfo.resetByGC)
+ out.print(" (Reset By GC)");
+
+ out.printf(" jit(");
+
+ switch (stubInfo.cacheType) {
+ case CacheType::PutByIdReplace:
+ out.print("replace, ");
+ dumpStructure(out, "struct", stubInfo.u.byIdSelf.baseObjectStructure.get(), ident);
+ break;
+ case CacheType::Stub: {
+ out.print("stub, ", *stubInfo.u.stub);
+ break;
}
+ case CacheType::Unset:
+ out.printf("unset");
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+ out.printf(")");
}
#else
UNUSED_PARAM(map);
#endif
}
-void CodeBlock::printCallOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op, CacheDumpMode cacheDumpMode, bool& hasPrintedProfiling)
+void CodeBlock::printCallOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op, CacheDumpMode cacheDumpMode, bool& hasPrintedProfiling, const CallLinkInfoMap& map)
{
int dst = (++it)->u.operand;
int func = (++it)->u.operand;
@@ -460,7 +509,6 @@ void CodeBlock::printCallOp(PrintStream& out, ExecState* exec, int location, con
printLocationAndOp(out, exec, location, it, op);
out.printf("%s, %s, %d, %d", registerName(dst).data(), registerName(func).data(), argCount, registerOffset);
if (cacheDumpMode == DumpCaches) {
-#if ENABLE(LLINT)
LLIntCallLinkInfo* callLinkInfo = it[1].u.callLinkInfo;
if (callLinkInfo->lastSeenCallee) {
out.printf(
@@ -468,17 +516,21 @@ void CodeBlock::printCallOp(PrintStream& out, ExecState* exec, int location, con
callLinkInfo->lastSeenCallee.get(),
callLinkInfo->lastSeenCallee->executable());
}
-#endif
#if ENABLE(JIT)
- if (numberOfCallLinkInfos()) {
- JSFunction* target = getCallLinkInfo(location).lastSeenCallee.get();
+ if (CallLinkInfo* info = map.get(CodeOrigin(location))) {
+ JSFunction* target = info->lastSeenCallee();
if (target)
out.printf(" jit(%p, exec %p)", target, target->executable());
}
+
+ if (jitType() != JITCode::FTLJIT)
+ out.print(" status(", CallLinkStatus::computeFor(this, location, map), ")");
+#else
+ UNUSED_PARAM(map);
#endif
- out.print(" status(", CallLinkStatus::computeFor(this, location), ")");
}
++it;
+ ++it;
dumpArrayProfiling(out, it, hasPrintedProfiling);
dumpValueProfiling(out, it, hasPrintedProfiling);
}
@@ -493,6 +545,31 @@ void CodeBlock::printPutByIdOp(PrintStream& out, ExecState* exec, int location,
it += 5;
}
+void CodeBlock::dumpSource()
+{
+ dumpSource(WTF::dataFile());
+}
+
+void CodeBlock::dumpSource(PrintStream& out)
+{
+ ScriptExecutable* executable = ownerScriptExecutable();
+ if (executable->isFunctionExecutable()) {
+ FunctionExecutable* functionExecutable = reinterpret_cast<FunctionExecutable*>(executable);
+ StringView source = functionExecutable->source().provider()->getRange(
+ functionExecutable->parametersStartOffset(),
+ functionExecutable->typeProfilingEndOffset() + 1); // Type profiling end offset is the character before the '}'.
+
+ out.print("function ", inferredName(), source);
+ return;
+ }
+ out.print(executable->source().view());
+}
+
+void CodeBlock::dumpBytecode()
+{
+ dumpBytecode(WTF::dataFile());
+}
+
void CodeBlock::dumpBytecode(PrintStream& out)
{
// We only use the ExecState* for things that don't actually lead to JS execution,
@@ -509,34 +586,18 @@ void CodeBlock::dumpBytecode(PrintStream& out)
": %lu m_instructions; %lu bytes; %d parameter(s); %d callee register(s); %d variable(s)",
static_cast<unsigned long>(instructions().size()),
static_cast<unsigned long>(instructions().size() * sizeof(Instruction)),
- m_numParameters, m_numCalleeRegisters, m_numVars);
- if (symbolTable() && symbolTable()->captureCount()) {
- out.printf(
- "; %d captured var(s) (from r%d to r%d, inclusive)",
- symbolTable()->captureCount(), symbolTable()->captureStart(), symbolTable()->captureEnd() + 1);
- }
- if (usesArguments()) {
- out.printf(
- "; uses arguments, in r%d, r%d",
- argumentsRegister().offset(),
- unmodifiedArgumentsRegister(argumentsRegister()).offset());
- }
- if (needsFullScopeChain() && codeType() == FunctionCode)
- out.printf("; activation in r%d", activationRegister().offset());
+ m_numParameters, m_numCalleeLocals, m_numVars);
out.printf("\n");
StubInfoMap stubInfos;
-#if ENABLE(JIT)
- {
- ConcurrentJITLocker locker(m_lock);
- getStubInfoMap(locker, stubInfos);
- }
-#endif
+ CallLinkInfoMap callLinkInfos;
+ getStubInfoMap(stubInfos);
+ getCallLinkInfoMap(callLinkInfos);
const Instruction* begin = instructions().begin();
const Instruction* end = instructions().end();
for (const Instruction* it = begin; it != end; ++it)
- dumpBytecode(out, exec, begin, it, stubInfos);
+ dumpBytecode(out, exec, begin, it, stubInfos, callLinkInfos);
if (numberOfIdentifiers()) {
out.printf("\nIdentifiers:\n");
@@ -551,7 +612,19 @@ void CodeBlock::dumpBytecode(PrintStream& out)
out.printf("\nConstants:\n");
size_t i = 0;
do {
- out.printf(" k%u = %s\n", static_cast<unsigned>(i), toCString(m_constantRegisters[i].get()).data());
+ const char* sourceCodeRepresentationDescription = nullptr;
+ switch (m_constantsSourceCodeRepresentation[i]) {
+ case SourceCodeRepresentation::Double:
+ sourceCodeRepresentationDescription = ": in source as double";
+ break;
+ case SourceCodeRepresentation::Integer:
+ sourceCodeRepresentationDescription = ": in source as integer";
+ break;
+ case SourceCodeRepresentation::Other:
+ sourceCodeRepresentationDescription = "";
+ break;
+ }
+ out.printf(" k%u = %s%s\n", static_cast<unsigned>(i), toCString(m_constantRegisters[i].get()).data(), sourceCodeRepresentationDescription);
++i;
} while (i < m_constantRegisters.size());
}
@@ -565,14 +638,7 @@ void CodeBlock::dumpBytecode(PrintStream& out)
} while (i < count);
}
- if (m_rareData && !m_rareData->m_exceptionHandlers.isEmpty()) {
- out.printf("\nException Handlers:\n");
- unsigned i = 0;
- do {
- out.printf("\t %d: { start: [%4d] end: [%4d] target: [%4d] depth: [%4d] }\n", i + 1, m_rareData->m_exceptionHandlers[i].start, m_rareData->m_exceptionHandlers[i].end, m_rareData->m_exceptionHandlers[i].target, m_rareData->m_exceptionHandlers[i].scopeDepth);
- ++i;
- } while (i < m_rareData->m_exceptionHandlers.size());
- }
+ dumpExceptionHandlers(out);
if (m_rareData && !m_rareData->m_switchJumpTables.isEmpty()) {
out.printf("Switch Jump Tables:\n");
@@ -598,15 +664,41 @@ void CodeBlock::dumpBytecode(PrintStream& out)
out.printf(" %1d = {\n", i);
StringJumpTable::StringOffsetTable::const_iterator end = m_rareData->m_stringSwitchJumpTables[i].offsetTable.end();
for (StringJumpTable::StringOffsetTable::const_iterator iter = m_rareData->m_stringSwitchJumpTables[i].offsetTable.begin(); iter != end; ++iter)
- out.printf("\t\t\"%s\" => %04d\n", String(iter->key).utf8().data(), iter->value.branchOffset);
+ out.printf("\t\t\"%s\" => %04d\n", iter->key->utf8().data(), iter->value.branchOffset);
out.printf(" }\n");
++i;
} while (i < m_rareData->m_stringSwitchJumpTables.size());
}
+ if (m_rareData && !m_rareData->m_liveCalleeLocalsAtYield.isEmpty()) {
+ out.printf("\nLive Callee Locals:\n");
+ unsigned i = 0;
+ do {
+ const FastBitVector& liveness = m_rareData->m_liveCalleeLocalsAtYield[i];
+ out.printf(" live%1u = ", i);
+ liveness.dump(out);
+ out.printf("\n");
+ ++i;
+ } while (i < m_rareData->m_liveCalleeLocalsAtYield.size());
+ }
+
out.printf("\n");
}
+void CodeBlock::dumpExceptionHandlers(PrintStream& out)
+{
+ if (m_rareData && !m_rareData->m_exceptionHandlers.isEmpty()) {
+ out.printf("\nException Handlers:\n");
+ unsigned i = 0;
+ do {
+ HandlerInfo& handler = m_rareData->m_exceptionHandlers[i];
+ out.printf("\t %d: { start: [%4d] end: [%4d] target: [%4d] } %s\n",
+ i + 1, handler.start, handler.end, handler.target, handler.typeName());
+ ++i;
+ } while (i < m_rareData->m_exceptionHandlers.size());
+ }
+}
+
void CodeBlock::beginDumpProfiling(PrintStream& out, bool& hasPrintedProfiling)
{
if (hasPrintedProfiling) {
@@ -653,52 +745,100 @@ void CodeBlock::dumpRareCaseProfile(PrintStream& out, const char* name, RareCase
out.print(name, profile->m_counter);
}
-void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instruction* begin, const Instruction*& it, const StubInfoMap& map)
+void CodeBlock::dumpResultProfile(PrintStream& out, ResultProfile* profile, bool& hasPrintedProfiling)
+{
+ if (!profile)
+ return;
+
+ beginDumpProfiling(out, hasPrintedProfiling);
+ out.print("results: ", *profile);
+}
+
+void CodeBlock::printLocationAndOp(PrintStream& out, ExecState*, int location, const Instruction*&, const char* op)
+{
+ out.printf("[%4d] %-17s ", location, op);
+}
+
+void CodeBlock::printLocationOpAndRegisterOperand(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op, int operand)
+{
+ printLocationAndOp(out, exec, location, it, op);
+ out.printf("%s", registerName(operand).data());
+}
+
+void CodeBlock::dumpBytecode(
+ PrintStream& out, ExecState* exec, const Instruction* begin, const Instruction*& it,
+ const StubInfoMap& stubInfos, const CallLinkInfoMap& callLinkInfos)
{
int location = it - begin;
bool hasPrintedProfiling = false;
- switch (exec->interpreter()->getOpcodeID(it->u.opcode)) {
+ OpcodeID opcode = exec->interpreter()->getOpcodeID(it->u.opcode);
+ switch (opcode) {
case op_enter: {
printLocationAndOp(out, exec, location, it, "enter");
break;
}
- case op_touch_entry: {
- printLocationAndOp(out, exec, location, it, "touch_entry");
+ case op_get_scope: {
+ int r0 = (++it)->u.operand;
+ printLocationOpAndRegisterOperand(out, exec, location, it, "get_scope", r0);
break;
}
- case op_create_activation: {
+ case op_create_direct_arguments: {
int r0 = (++it)->u.operand;
- printLocationOpAndRegisterOperand(out, exec, location, it, "create_activation", r0);
+ printLocationAndOp(out, exec, location, it, "create_direct_arguments");
+ out.printf("%s", registerName(r0).data());
break;
}
- case op_create_arguments: {
+ case op_create_scoped_arguments: {
int r0 = (++it)->u.operand;
- printLocationOpAndRegisterOperand(out, exec, location, it, "create_arguments", r0);
+ int r1 = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "create_scoped_arguments");
+ out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
break;
}
- case op_init_lazy_reg: {
+ case op_create_out_of_band_arguments: {
int r0 = (++it)->u.operand;
- printLocationOpAndRegisterOperand(out, exec, location, it, "init_lazy_reg", r0);
+ printLocationAndOp(out, exec, location, it, "create_out_of_band_arguments");
+ out.printf("%s", registerName(r0).data());
break;
}
- case op_get_callee: {
+ case op_copy_rest: {
int r0 = (++it)->u.operand;
- printLocationOpAndRegisterOperand(out, exec, location, it, "get_callee", r0);
- ++it;
+ int r1 = (++it)->u.operand;
+ unsigned argumentOffset = (++it)->u.unsignedValue;
+ printLocationAndOp(out, exec, location, it, "copy_rest");
+ out.printf("%s, %s, ", registerName(r0).data(), registerName(r1).data());
+ out.printf("ArgumentsOffset: %u", argumentOffset);
+ break;
+ }
+ case op_get_rest_length: {
+ int r0 = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "get_rest_length");
+ out.printf("%s, ", registerName(r0).data());
+ unsigned argumentOffset = (++it)->u.unsignedValue;
+ out.printf("ArgumentsOffset: %u", argumentOffset);
break;
}
case op_create_this: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
unsigned inferredInlineCapacity = (++it)->u.operand;
+ unsigned cachedFunction = (++it)->u.operand;
printLocationAndOp(out, exec, location, it, "create_this");
- out.printf("%s, %s, %u", registerName(r0).data(), registerName(r1).data(), inferredInlineCapacity);
+ out.printf("%s, %s, %u, %u", registerName(r0).data(), registerName(r1).data(), inferredInlineCapacity, cachedFunction);
break;
}
case op_to_this: {
int r0 = (++it)->u.operand;
printLocationOpAndRegisterOperand(out, exec, location, it, "to_this", r0);
- ++it; // Skip value profile.
+ Structure* structure = (++it)->u.structure.get();
+ if (structure)
+ out.print(", cache(struct = ", RawPointer(structure), ")");
+ out.print(", ", (++it)->u.toThisStatus);
+ break;
+ }
+ case op_check_tdz: {
+ int r0 = (++it)->u.operand;
+ printLocationOpAndRegisterOperand(out, exec, location, it, "op_check_tdz", r0);
break;
}
case op_new_object: {
@@ -753,12 +893,20 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
break;
}
- case op_captured_mov: {
+ case op_profile_type: {
int r0 = (++it)->u.operand;
- int r1 = (++it)->u.operand;
- printLocationAndOp(out, exec, location, it, "captured_mov");
- out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
++it;
+ ++it;
+ ++it;
+ ++it;
+ printLocationAndOp(out, exec, location, it, "op_profile_type");
+ out.printf("%s", registerName(r0).data());
+ break;
+ }
+ case op_profile_control_flow: {
+ BasicBlockLocation* basicBlockLocation = (++it)->u.basicBlockLocation;
+ printLocationAndOp(out, exec, location, it, "profile_control_flow");
+ out.printf("[%d, %d]", basicBlockLocation->startOffset(), basicBlockLocation->endOffset());
break;
}
case op_not: {
@@ -819,6 +967,10 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
printUnaryOp(out, exec, location, it, "to_number");
break;
}
+ case op_to_string: {
+ printUnaryOp(out, exec, location, it, "to_string");
+ break;
+ }
case op_negate: {
printUnaryOp(out, exec, location, it, "negate");
break;
@@ -874,13 +1026,12 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
++it;
break;
}
- case op_check_has_instance: {
+ case op_overrides_has_instance: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
int r2 = (++it)->u.operand;
- int offset = (++it)->u.operand;
- printLocationAndOp(out, exec, location, it, "check_has_instance");
- out.printf("%s, %s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), offset, location + offset);
+ printLocationAndOp(out, exec, location, it, "overrides_has_instance");
+ out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
break;
}
case op_instanceof: {
@@ -891,6 +1042,15 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
break;
}
+ case op_instanceof_custom: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int r2 = (++it)->u.operand;
+ int r3 = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "instanceof_custom");
+ out.printf("%s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data());
+ break;
+ }
case op_unsigned: {
printUnaryOp(out, exec, location, it, "unsigned");
break;
@@ -919,6 +1079,10 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
printUnaryOp(out, exec, location, it, "is_object");
break;
}
+ case op_is_object_or_null: {
+ printUnaryOp(out, exec, location, it, "is_object_or_null");
+ break;
+ }
case op_is_function: {
printUnaryOp(out, exec, location, it, "is_function");
break;
@@ -927,90 +1091,62 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
printBinaryOp(out, exec, location, it, "in");
break;
}
- case op_init_global_const_nop: {
- printLocationAndOp(out, exec, location, it, "init_global_const_nop");
- it++;
- it++;
- it++;
- it++;
- break;
- }
- case op_init_global_const: {
- WriteBarrier<Unknown>* registerPointer = (++it)->u.registerPointer;
- int r0 = (++it)->u.operand;
- printLocationAndOp(out, exec, location, it, "init_global_const");
- out.printf("g%d(%p), %s", m_globalObject->findRegisterIndex(registerPointer), registerPointer, registerName(r0).data());
- it++;
- it++;
- break;
- }
case op_get_by_id:
- case op_get_by_id_out_of_line:
- case op_get_by_id_self:
- case op_get_by_id_proto:
- case op_get_by_id_chain:
- case op_get_by_id_getter_self:
- case op_get_by_id_getter_proto:
- case op_get_by_id_getter_chain:
- case op_get_by_id_custom_self:
- case op_get_by_id_custom_proto:
- case op_get_by_id_custom_chain:
- case op_get_by_id_generic:
- case op_get_array_length:
- case op_get_string_length: {
+ case op_get_array_length: {
printGetByIdOp(out, exec, location, it);
- printGetByIdCacheStatus(out, exec, location, map);
+ printGetByIdCacheStatus(out, exec, location, stubInfos);
dumpValueProfiling(out, it, hasPrintedProfiling);
break;
}
- case op_get_arguments_length: {
- printUnaryOp(out, exec, location, it, "get_arguments_length");
- it++;
- break;
- }
case op_put_by_id: {
printPutByIdOp(out, exec, location, it, "put_by_id");
+ printPutByIdCacheStatus(out, location, stubInfos);
break;
}
- case op_put_by_id_out_of_line: {
- printPutByIdOp(out, exec, location, it, "put_by_id_out_of_line");
- break;
- }
- case op_put_by_id_replace: {
- printPutByIdOp(out, exec, location, it, "put_by_id_replace");
- break;
- }
- case op_put_by_id_transition: {
- printPutByIdOp(out, exec, location, it, "put_by_id_transition");
- break;
- }
- case op_put_by_id_transition_direct: {
- printPutByIdOp(out, exec, location, it, "put_by_id_transition_direct");
- break;
- }
- case op_put_by_id_transition_direct_out_of_line: {
- printPutByIdOp(out, exec, location, it, "put_by_id_transition_direct_out_of_line");
+ case op_put_getter_by_id: {
+ int r0 = (++it)->u.operand;
+ int id0 = (++it)->u.operand;
+ int n0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "put_getter_by_id");
+ out.printf("%s, %s, %d, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), n0, registerName(r1).data());
break;
}
- case op_put_by_id_transition_normal: {
- printPutByIdOp(out, exec, location, it, "put_by_id_transition_normal");
+ case op_put_setter_by_id: {
+ int r0 = (++it)->u.operand;
+ int id0 = (++it)->u.operand;
+ int n0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "put_setter_by_id");
+ out.printf("%s, %s, %d, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), n0, registerName(r1).data());
break;
}
- case op_put_by_id_transition_normal_out_of_line: {
- printPutByIdOp(out, exec, location, it, "put_by_id_transition_normal_out_of_line");
+ case op_put_getter_setter_by_id: {
+ int r0 = (++it)->u.operand;
+ int id0 = (++it)->u.operand;
+ int n0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int r2 = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "put_getter_setter_by_id");
+ out.printf("%s, %s, %d, %s, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), n0, registerName(r1).data(), registerName(r2).data());
break;
}
- case op_put_by_id_generic: {
- printPutByIdOp(out, exec, location, it, "put_by_id_generic");
+ case op_put_getter_by_val: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int n0 = (++it)->u.operand;
+ int r2 = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "put_getter_by_val");
+ out.printf("%s, %s, %d, %s", registerName(r0).data(), registerName(r1).data(), n0, registerName(r2).data());
break;
}
- case op_put_getter_setter: {
+ case op_put_setter_by_val: {
int r0 = (++it)->u.operand;
- int id0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
+ int n0 = (++it)->u.operand;
int r2 = (++it)->u.operand;
- printLocationAndOp(out, exec, location, it, "put_getter_setter");
- out.printf("%s, %s, %s, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), registerName(r1).data(), registerName(r2).data());
+ printLocationAndOp(out, exec, location, it, "put_setter_by_val");
+ out.printf("%s, %s, %d, %s", registerName(r0).data(), registerName(r1).data(), n0, registerName(r2).data());
break;
}
case op_del_by_id: {
@@ -1031,27 +1167,6 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
dumpValueProfiling(out, it, hasPrintedProfiling);
break;
}
- case op_get_argument_by_val: {
- int r0 = (++it)->u.operand;
- int r1 = (++it)->u.operand;
- int r2 = (++it)->u.operand;
- printLocationAndOp(out, exec, location, it, "get_argument_by_val");
- out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
- ++it;
- dumpValueProfiling(out, it, hasPrintedProfiling);
- break;
- }
- case op_get_by_pname: {
- int r0 = (++it)->u.operand;
- int r1 = (++it)->u.operand;
- int r2 = (++it)->u.operand;
- int r3 = (++it)->u.operand;
- int r4 = (++it)->u.operand;
- int r5 = (++it)->u.operand;
- printLocationAndOp(out, exec, location, it, "get_by_pname");
- out.printf("%s, %s, %s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data(), registerName(r4).data(), registerName(r5).data());
- break;
- }
case op_put_by_val: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
@@ -1184,6 +1299,10 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
printLocationAndOp(out, exec, location, it, "loop_hint");
break;
}
+ case op_watchdog: {
+ printLocationAndOp(out, exec, location, it, "watchdog");
+ break;
+ }
case op_switch_imm: {
int tableIndex = (++it)->u.operand;
int defaultTarget = (++it)->u.operand;
@@ -1210,73 +1329,80 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
}
case op_new_func: {
int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
int f0 = (++it)->u.operand;
- int shouldCheck = (++it)->u.operand;
printLocationAndOp(out, exec, location, it, "new_func");
- out.printf("%s, f%d, %s", registerName(r0).data(), f0, shouldCheck ? "<Checked>" : "<Unchecked>");
+ out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0);
break;
}
- case op_new_captured_func: {
+ case op_new_generator_func: {
int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
int f0 = (++it)->u.operand;
- printLocationAndOp(out, exec, location, it, "new_captured_func");
- out.printf("%s, f%d", registerName(r0).data(), f0);
- ++it;
+ printLocationAndOp(out, exec, location, it, "new_generator_func");
+ out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0);
+ break;
+ }
+ case op_new_arrow_func_exp: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int f0 = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "op_new_arrow_func_exp");
+ out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0);
break;
}
case op_new_func_exp: {
int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
int f0 = (++it)->u.operand;
printLocationAndOp(out, exec, location, it, "new_func_exp");
- out.printf("%s, f%d", registerName(r0).data(), f0);
+ out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0);
+ break;
+ }
+ case op_new_generator_func_exp: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int f0 = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "new_generator_func_exp");
+ out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0);
break;
}
case op_call: {
- printCallOp(out, exec, location, it, "call", DumpCaches, hasPrintedProfiling);
+ printCallOp(out, exec, location, it, "call", DumpCaches, hasPrintedProfiling, callLinkInfos);
+ break;
+ }
+ case op_tail_call: {
+ printCallOp(out, exec, location, it, "tail_call", DumpCaches, hasPrintedProfiling, callLinkInfos);
break;
}
case op_call_eval: {
- printCallOp(out, exec, location, it, "call_eval", DontDumpCaches, hasPrintedProfiling);
+ printCallOp(out, exec, location, it, "call_eval", DontDumpCaches, hasPrintedProfiling, callLinkInfos);
break;
}
- case op_call_varargs: {
+
+ case op_construct_varargs:
+ case op_call_varargs:
+ case op_tail_call_varargs: {
int result = (++it)->u.operand;
int callee = (++it)->u.operand;
int thisValue = (++it)->u.operand;
int arguments = (++it)->u.operand;
int firstFreeRegister = (++it)->u.operand;
+ int varArgOffset = (++it)->u.operand;
++it;
- printLocationAndOp(out, exec, location, it, "call_varargs");
- out.printf("%s, %s, %s, %s, %d", registerName(result).data(), registerName(callee).data(), registerName(thisValue).data(), registerName(arguments).data(), firstFreeRegister);
+ printLocationAndOp(out, exec, location, it, opcode == op_call_varargs ? "call_varargs" : opcode == op_construct_varargs ? "construct_varargs" : "tail_call_varargs");
+ out.printf("%s, %s, %s, %s, %d, %d", registerName(result).data(), registerName(callee).data(), registerName(thisValue).data(), registerName(arguments).data(), firstFreeRegister, varArgOffset);
dumpValueProfiling(out, it, hasPrintedProfiling);
break;
}
- case op_tear_off_activation: {
- int r0 = (++it)->u.operand;
- printLocationOpAndRegisterOperand(out, exec, location, it, "tear_off_activation", r0);
- break;
- }
- case op_tear_off_arguments: {
- int r0 = (++it)->u.operand;
- int r1 = (++it)->u.operand;
- printLocationAndOp(out, exec, location, it, "tear_off_arguments");
- out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
- break;
- }
+
case op_ret: {
int r0 = (++it)->u.operand;
printLocationOpAndRegisterOperand(out, exec, location, it, "ret", r0);
break;
}
- case op_ret_object_or_this: {
- int r0 = (++it)->u.operand;
- int r1 = (++it)->u.operand;
- printLocationAndOp(out, exec, location, it, "constructor_ret");
- out.printf("%s %s", registerName(r0).data(), registerName(r1).data());
- break;
- }
case op_construct: {
- printCallOp(out, exec, location, it, "construct", DumpCaches, hasPrintedProfiling);
+ printCallOp(out, exec, location, it, "construct", DumpCaches, hasPrintedProfiling, callLinkInfos);
break;
}
case op_strcat: {
@@ -1294,49 +1420,120 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
break;
}
- case op_get_pnames: {
- int r0 = it[1].u.operand;
- int r1 = it[2].u.operand;
- int r2 = it[3].u.operand;
- int r3 = it[4].u.operand;
- int offset = it[5].u.operand;
- printLocationAndOp(out, exec, location, it, "get_pnames");
- out.printf("%s, %s, %s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data(), offset, location + offset);
- it += OPCODE_LENGTH(op_get_pnames) - 1;
+ case op_get_enumerable_length: {
+ int dst = it[1].u.operand;
+ int base = it[2].u.operand;
+ printLocationAndOp(out, exec, location, it, "op_get_enumerable_length");
+ out.printf("%s, %s", registerName(dst).data(), registerName(base).data());
+ it += OPCODE_LENGTH(op_get_enumerable_length) - 1;
break;
}
- case op_next_pname: {
- int dest = it[1].u.operand;
+ case op_has_indexed_property: {
+ int dst = it[1].u.operand;
int base = it[2].u.operand;
- int i = it[3].u.operand;
- int size = it[4].u.operand;
- int iter = it[5].u.operand;
- int offset = it[6].u.operand;
- printLocationAndOp(out, exec, location, it, "next_pname");
- out.printf("%s, %s, %s, %s, %s, %d(->%d)", registerName(dest).data(), registerName(base).data(), registerName(i).data(), registerName(size).data(), registerName(iter).data(), offset, location + offset);
- it += OPCODE_LENGTH(op_next_pname) - 1;
+ int propertyName = it[3].u.operand;
+ ArrayProfile* arrayProfile = it[4].u.arrayProfile;
+ printLocationAndOp(out, exec, location, it, "op_has_indexed_property");
+ out.printf("%s, %s, %s, %p", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data(), arrayProfile);
+ it += OPCODE_LENGTH(op_has_indexed_property) - 1;
+ break;
+ }
+ case op_has_structure_property: {
+ int dst = it[1].u.operand;
+ int base = it[2].u.operand;
+ int propertyName = it[3].u.operand;
+ int enumerator = it[4].u.operand;
+ printLocationAndOp(out, exec, location, it, "op_has_structure_property");
+ out.printf("%s, %s, %s, %s", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data(), registerName(enumerator).data());
+ it += OPCODE_LENGTH(op_has_structure_property) - 1;
+ break;
+ }
+ case op_has_generic_property: {
+ int dst = it[1].u.operand;
+ int base = it[2].u.operand;
+ int propertyName = it[3].u.operand;
+ printLocationAndOp(out, exec, location, it, "op_has_generic_property");
+ out.printf("%s, %s, %s", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data());
+ it += OPCODE_LENGTH(op_has_generic_property) - 1;
+ break;
+ }
+ case op_get_direct_pname: {
+ int dst = it[1].u.operand;
+ int base = it[2].u.operand;
+ int propertyName = it[3].u.operand;
+ int index = it[4].u.operand;
+ int enumerator = it[5].u.operand;
+ ValueProfile* profile = it[6].u.profile;
+ printLocationAndOp(out, exec, location, it, "op_get_direct_pname");
+ out.printf("%s, %s, %s, %s, %s, %p", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data(), registerName(index).data(), registerName(enumerator).data(), profile);
+ it += OPCODE_LENGTH(op_get_direct_pname) - 1;
+ break;
+
+ }
+ case op_get_property_enumerator: {
+ int dst = it[1].u.operand;
+ int base = it[2].u.operand;
+ printLocationAndOp(out, exec, location, it, "op_get_property_enumerator");
+ out.printf("%s, %s", registerName(dst).data(), registerName(base).data());
+ it += OPCODE_LENGTH(op_get_property_enumerator) - 1;
+ break;
+ }
+ case op_enumerator_structure_pname: {
+ int dst = it[1].u.operand;
+ int enumerator = it[2].u.operand;
+ int index = it[3].u.operand;
+ printLocationAndOp(out, exec, location, it, "op_enumerator_structure_pname");
+ out.printf("%s, %s, %s", registerName(dst).data(), registerName(enumerator).data(), registerName(index).data());
+ it += OPCODE_LENGTH(op_enumerator_structure_pname) - 1;
+ break;
+ }
+ case op_enumerator_generic_pname: {
+ int dst = it[1].u.operand;
+ int enumerator = it[2].u.operand;
+ int index = it[3].u.operand;
+ printLocationAndOp(out, exec, location, it, "op_enumerator_generic_pname");
+ out.printf("%s, %s, %s", registerName(dst).data(), registerName(enumerator).data(), registerName(index).data());
+ it += OPCODE_LENGTH(op_enumerator_generic_pname) - 1;
+ break;
+ }
+ case op_to_index_string: {
+ int dst = it[1].u.operand;
+ int index = it[2].u.operand;
+ printLocationAndOp(out, exec, location, it, "op_to_index_string");
+ out.printf("%s, %s", registerName(dst).data(), registerName(index).data());
+ it += OPCODE_LENGTH(op_to_index_string) - 1;
break;
}
case op_push_with_scope: {
- int r0 = (++it)->u.operand;
- printLocationOpAndRegisterOperand(out, exec, location, it, "push_with_scope", r0);
+ int dst = (++it)->u.operand;
+ int newScope = (++it)->u.operand;
+ int currentScope = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "push_with_scope");
+ out.printf("%s, %s, %s", registerName(dst).data(), registerName(newScope).data(), registerName(currentScope).data());
break;
}
- case op_pop_scope: {
- printLocationAndOp(out, exec, location, it, "pop_scope");
+ case op_get_parent_scope: {
+ int dst = (++it)->u.operand;
+ int parentScope = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "get_parent_scope");
+ out.printf("%s, %s", registerName(dst).data(), registerName(parentScope).data());
break;
}
- case op_push_name_scope: {
- int id0 = (++it)->u.operand;
- int r1 = (++it)->u.operand;
- unsigned attributes = (++it)->u.operand;
- printLocationAndOp(out, exec, location, it, "push_name_scope");
- out.printf("%s, %s, %u", idName(id0, identifier(id0)).data(), registerName(r1).data(), attributes);
+ case op_create_lexical_environment: {
+ int dst = (++it)->u.operand;
+ int scope = (++it)->u.operand;
+ int symbolTable = (++it)->u.operand;
+ int initialValue = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "create_lexical_environment");
+ out.printf("%s, %s, %s, %s",
+ registerName(dst).data(), registerName(scope).data(), registerName(symbolTable).data(), registerName(initialValue).data());
break;
}
case op_catch: {
int r0 = (++it)->u.operand;
- printLocationOpAndRegisterOperand(out, exec, location, it, "catch", r0);
+ int r1 = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "catch");
+ out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
break;
}
case op_throw: {
@@ -1348,14 +1545,42 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
int k0 = (++it)->u.operand;
int k1 = (++it)->u.operand;
printLocationAndOp(out, exec, location, it, "throw_static_error");
- out.printf("%s, %s", constantName(k0, getConstant(k0)).data(), k1 ? "true" : "false");
+ out.printf("%s, %s", constantName(k0).data(), k1 ? "true" : "false");
break;
}
case op_debug: {
int debugHookID = (++it)->u.operand;
int hasBreakpointFlag = (++it)->u.operand;
printLocationAndOp(out, exec, location, it, "debug");
- out.printf("%s %d", debugHookName(debugHookID), hasBreakpointFlag);
+ out.printf("%s, %d", debugHookName(debugHookID), hasBreakpointFlag);
+ break;
+ }
+ case op_save: {
+ int generator = (++it)->u.operand;
+ unsigned liveCalleeLocalsIndex = (++it)->u.unsignedValue;
+ int offset = (++it)->u.operand;
+ const FastBitVector& liveness = m_rareData->m_liveCalleeLocalsAtYield[liveCalleeLocalsIndex];
+ printLocationAndOp(out, exec, location, it, "save");
+ out.printf("%s, ", registerName(generator).data());
+ liveness.dump(out);
+ out.printf("(@live%1u), %d(->%d)", liveCalleeLocalsIndex, offset, location + offset);
+ break;
+ }
+ case op_resume: {
+ int generator = (++it)->u.operand;
+ unsigned liveCalleeLocalsIndex = (++it)->u.unsignedValue;
+ const FastBitVector& liveness = m_rareData->m_liveCalleeLocalsAtYield[liveCalleeLocalsIndex];
+ printLocationAndOp(out, exec, location, it, "resume");
+ out.printf("%s, ", registerName(generator).data());
+ liveness.dump(out);
+ out.printf("(@live%1u)", liveCalleeLocalsIndex);
+ break;
+ }
+ case op_assert: {
+ int condition = (++it)->u.operand;
+ int line = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "assert");
+ out.printf("%s, %d", registerName(condition).data(), line);
break;
}
case op_profile_will_call: {
@@ -1375,45 +1600,71 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
}
case op_resolve_scope: {
int r0 = (++it)->u.operand;
+ int scope = (++it)->u.operand;
int id0 = (++it)->u.operand;
- int resolveModeAndType = (++it)->u.operand;
- ++it; // depth
+ ResolveType resolveType = static_cast<ResolveType>((++it)->u.operand);
+ int depth = (++it)->u.operand;
+ void* pointer = (++it)->u.pointer;
printLocationAndOp(out, exec, location, it, "resolve_scope");
- out.printf("%s, %s, %d", registerName(r0).data(), idName(id0, identifier(id0)).data(), resolveModeAndType);
- ++it;
+ out.printf("%s, %s, %s, <%s>, %d, %p", registerName(r0).data(), registerName(scope).data(), idName(id0, identifier(id0)).data(), resolveTypeName(resolveType), depth, pointer);
break;
}
case op_get_from_scope: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
int id0 = (++it)->u.operand;
- int resolveModeAndType = (++it)->u.operand;
+ GetPutInfo getPutInfo = GetPutInfo((++it)->u.operand);
++it; // Structure
- ++it; // Operand
- ++it; // Skip value profile.
+ int operand = (++it)->u.operand; // Operand
printLocationAndOp(out, exec, location, it, "get_from_scope");
- out.printf("%s, %s, %s, %d", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data(), resolveModeAndType);
+ out.print(registerName(r0), ", ", registerName(r1));
+ if (static_cast<unsigned>(id0) == UINT_MAX)
+ out.print(", anonymous");
+ else
+ out.print(", ", idName(id0, identifier(id0)));
+ out.print(", ", getPutInfo.operand(), "<", resolveModeName(getPutInfo.resolveMode()), "|", resolveTypeName(getPutInfo.resolveType()), "|", initializationModeName(getPutInfo.initializationMode()), ">, ", operand);
+ dumpValueProfiling(out, it, hasPrintedProfiling);
break;
}
case op_put_to_scope: {
int r0 = (++it)->u.operand;
int id0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
- int resolveModeAndType = (++it)->u.operand;
+ GetPutInfo getPutInfo = GetPutInfo((++it)->u.operand);
++it; // Structure
- ++it; // Operand
+ int operand = (++it)->u.operand; // Operand
printLocationAndOp(out, exec, location, it, "put_to_scope");
- out.printf("%s, %s, %s, %d", registerName(r0).data(), idName(id0, identifier(id0)).data(), registerName(r1).data(), resolveModeAndType);
+ out.print(registerName(r0));
+ if (static_cast<unsigned>(id0) == UINT_MAX)
+ out.print(", anonymous");
+ else
+ out.print(", ", idName(id0, identifier(id0)));
+ out.print(", ", registerName(r1), ", ", getPutInfo.operand(), "<", resolveModeName(getPutInfo.resolveMode()), "|", resolveTypeName(getPutInfo.resolveType()), "|", initializationModeName(getPutInfo.initializationMode()), ">, <structure>, ", operand);
+ break;
+ }
+ case op_get_from_arguments: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int offset = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "get_from_arguments");
+ out.printf("%s, %s, %d", registerName(r0).data(), registerName(r1).data(), offset);
+ dumpValueProfiling(out, it, hasPrintedProfiling);
+ break;
+ }
+ case op_put_to_arguments: {
+ int r0 = (++it)->u.operand;
+ int offset = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "put_to_arguments");
+ out.printf("%s, %d, %s", registerName(r0).data(), offset, registerName(r1).data());
break;
}
-#if ENABLE(LLINT_C_LOOP)
default:
RELEASE_ASSERT_NOT_REACHED();
-#endif
}
dumpRareCaseProfile(out, "rare case: ", rareCaseProfileForBytecodeOffset(location), hasPrintedProfiling);
- dumpRareCaseProfile(out, "special fast case: ", specialFastCaseProfileForBytecodeOffset(location), hasPrintedProfiling);
+ dumpResultProfile(out, resultProfileForBytecodeOffset(location), hasPrintedProfiling);
#if ENABLE(DFG_JIT)
Vector<DFG::FrequentExitSite> exitSites = exitProfile().exitSitesFor(location);
@@ -1421,7 +1672,7 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
out.print(" !! frequent exits: ");
CommaPrinter comma;
for (unsigned i = 0; i < exitSites.size(); ++i)
- out.print(comma, exitSites[i].kind());
+ out.print(comma, exitSites[i].kind(), " ", exitSites[i].jitType());
}
#else // ENABLE(DFG_JIT)
UNUSED_PARAM(location);
@@ -1429,11 +1680,13 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
out.print("\n");
}
-void CodeBlock::dumpBytecode(PrintStream& out, unsigned bytecodeOffset)
+void CodeBlock::dumpBytecode(
+ PrintStream& out, unsigned bytecodeOffset,
+ const StubInfoMap& stubInfos, const CallLinkInfoMap& callLinkInfos)
{
ExecState* exec = m_globalObject->globalExec();
const Instruction* it = instructions().begin() + bytecodeOffset;
- dumpBytecode(out, exec, instructions().begin(), it);
+ dumpBytecode(out, exec, instructions().begin(), it, stubInfos, callLinkInfos);
}
#define FOR_EACH_MEMBER_VECTOR(macro) \
@@ -1461,46 +1714,76 @@ static size_t sizeInBytes(const Vector<T>& vector)
return vector.capacity() * sizeof(T);
}
-CodeBlock::CodeBlock(CopyParsedBlockTag, CodeBlock& other)
- : m_globalObject(other.m_globalObject)
- , m_heap(other.m_heap)
- , m_numCalleeRegisters(other.m_numCalleeRegisters)
+namespace {
+
+class PutToScopeFireDetail : public FireDetail {
+public:
+ PutToScopeFireDetail(CodeBlock* codeBlock, const Identifier& ident)
+ : m_codeBlock(codeBlock)
+ , m_ident(ident)
+ {
+ }
+
+ virtual void dump(PrintStream& out) const override
+ {
+ out.print("Linking put_to_scope in ", FunctionExecutableDump(jsCast<FunctionExecutable*>(m_codeBlock->ownerExecutable())), " for ", m_ident);
+ }
+
+private:
+ CodeBlock* m_codeBlock;
+ const Identifier& m_ident;
+};
+
+} // anonymous namespace
+
+CodeBlock::CodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, CodeBlock& other)
+ : JSCell(*vm, structure)
+ , m_globalObject(other.m_globalObject)
+ , m_numCalleeLocals(other.m_numCalleeLocals)
, m_numVars(other.m_numVars)
- , m_isConstructor(other.m_isConstructor)
, m_shouldAlwaysBeInlined(true)
+#if ENABLE(JIT)
+ , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
+#endif
, m_didFailFTLCompilation(false)
- , m_unlinkedCode(*other.m_vm, other.m_ownerExecutable.get(), other.m_unlinkedCode.get())
+ , m_hasBeenCompiledWithFTL(false)
+ , m_isConstructor(other.m_isConstructor)
+ , m_isStrictMode(other.m_isStrictMode)
+ , m_codeType(other.m_codeType)
+ , m_unlinkedCode(*other.m_vm, this, other.m_unlinkedCode.get())
+ , m_hasDebuggerStatement(false)
, m_steppingMode(SteppingModeDisabled)
, m_numBreakpoints(0)
- , m_ownerExecutable(*other.m_vm, other.m_ownerExecutable.get(), other.m_ownerExecutable.get())
+ , m_ownerExecutable(*other.m_vm, this, other.m_ownerExecutable.get())
, m_vm(other.m_vm)
, m_instructions(other.m_instructions)
, m_thisRegister(other.m_thisRegister)
- , m_argumentsRegister(other.m_argumentsRegister)
- , m_activationRegister(other.m_activationRegister)
- , m_isStrictMode(other.m_isStrictMode)
- , m_needsActivation(other.m_needsActivation)
+ , m_scopeRegister(other.m_scopeRegister)
+ , m_hash(other.m_hash)
, m_source(other.m_source)
, m_sourceOffset(other.m_sourceOffset)
, m_firstLineColumnOffset(other.m_firstLineColumnOffset)
- , m_codeType(other.m_codeType)
, m_constantRegisters(other.m_constantRegisters)
+ , m_constantsSourceCodeRepresentation(other.m_constantsSourceCodeRepresentation)
, m_functionDecls(other.m_functionDecls)
, m_functionExprs(other.m_functionExprs)
, m_osrExitCounter(0)
, m_optimizationDelayCounter(0)
, m_reoptimizationRetryCounter(0)
- , m_hash(other.m_hash)
-#if ENABLE(JIT)
- , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
-#endif
+ , m_creationTime(std::chrono::steady_clock::now())
{
- ASSERT(m_heap->isDeferred());
-
- if (SymbolTable* symbolTable = other.symbolTable())
- m_symbolTable.set(*m_vm, m_ownerExecutable.get(), symbolTable);
-
+ m_visitWeaklyHasBeenCalled.store(false, std::memory_order_relaxed);
+
+ ASSERT(heap()->isDeferred());
+ ASSERT(m_scopeRegister.isLocal());
+
setNumParameters(other.numParameters());
+}
+
+void CodeBlock::finishCreation(VM& vm, CopyParsedBlockTag, CodeBlock& other)
+{
+ Base::finishCreation(vm);
+
optimizeAfterWarmUp();
jitAfterWarmUp();
@@ -1511,89 +1794,121 @@ CodeBlock::CodeBlock(CopyParsedBlockTag, CodeBlock& other)
m_rareData->m_constantBuffers = other.m_rareData->m_constantBuffers;
m_rareData->m_switchJumpTables = other.m_rareData->m_switchJumpTables;
m_rareData->m_stringSwitchJumpTables = other.m_rareData->m_stringSwitchJumpTables;
+ m_rareData->m_liveCalleeLocalsAtYield = other.m_rareData->m_liveCalleeLocalsAtYield;
}
- m_heap->m_codeBlocks.add(this);
- m_heap->reportExtraMemoryCost(sizeof(CodeBlock));
+ heap()->m_codeBlocks.add(this);
}
-CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
- : m_globalObject(scope->globalObject()->vm(), ownerExecutable, scope->globalObject())
- , m_heap(&m_globalObject->vm().heap)
- , m_numCalleeRegisters(unlinkedCodeBlock->m_numCalleeRegisters)
+CodeBlock::CodeBlock(VM* vm, Structure* structure, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock,
+ JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
+ : JSCell(*vm, structure)
+ , m_globalObject(scope->globalObject()->vm(), this, scope->globalObject())
+ , m_numCalleeLocals(unlinkedCodeBlock->m_numCalleeLocals)
, m_numVars(unlinkedCodeBlock->m_numVars)
- , m_isConstructor(unlinkedCodeBlock->isConstructor())
, m_shouldAlwaysBeInlined(true)
+#if ENABLE(JIT)
+ , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
+#endif
, m_didFailFTLCompilation(false)
- , m_unlinkedCode(m_globalObject->vm(), ownerExecutable, unlinkedCodeBlock)
+ , m_hasBeenCompiledWithFTL(false)
+ , m_isConstructor(unlinkedCodeBlock->isConstructor())
+ , m_isStrictMode(unlinkedCodeBlock->isStrictMode())
+ , m_codeType(unlinkedCodeBlock->codeType())
+ , m_unlinkedCode(m_globalObject->vm(), this, unlinkedCodeBlock)
+ , m_hasDebuggerStatement(false)
, m_steppingMode(SteppingModeDisabled)
, m_numBreakpoints(0)
- , m_ownerExecutable(m_globalObject->vm(), ownerExecutable, ownerExecutable)
+ , m_ownerExecutable(m_globalObject->vm(), this, ownerExecutable)
, m_vm(unlinkedCodeBlock->vm())
, m_thisRegister(unlinkedCodeBlock->thisRegister())
- , m_argumentsRegister(unlinkedCodeBlock->argumentsRegister())
- , m_activationRegister(unlinkedCodeBlock->activationRegister())
- , m_isStrictMode(unlinkedCodeBlock->isStrictMode())
- , m_needsActivation(unlinkedCodeBlock->needsFullScopeChain() && unlinkedCodeBlock->codeType() == FunctionCode)
+ , m_scopeRegister(unlinkedCodeBlock->scopeRegister())
, m_source(sourceProvider)
, m_sourceOffset(sourceOffset)
, m_firstLineColumnOffset(firstLineColumnOffset)
- , m_codeType(unlinkedCodeBlock->codeType())
, m_osrExitCounter(0)
, m_optimizationDelayCounter(0)
, m_reoptimizationRetryCounter(0)
-#if ENABLE(JIT)
- , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
-#endif
+ , m_creationTime(std::chrono::steady_clock::now())
{
- ASSERT(m_heap->isDeferred());
+ m_visitWeaklyHasBeenCalled.store(false, std::memory_order_relaxed);
+
+ ASSERT(heap()->isDeferred());
+ ASSERT(m_scopeRegister.isLocal());
- bool didCloneSymbolTable = false;
-
- if (SymbolTable* symbolTable = unlinkedCodeBlock->symbolTable()) {
- if (codeType() == FunctionCode && symbolTable->captureCount()) {
- m_symbolTable.set(*m_vm, m_ownerExecutable.get(), symbolTable->clone(*m_vm));
- didCloneSymbolTable = true;
- } else
- m_symbolTable.set(*m_vm, m_ownerExecutable.get(), symbolTable);
- }
-
ASSERT(m_source);
setNumParameters(unlinkedCodeBlock->numParameters());
+}
- setConstantRegisters(unlinkedCodeBlock->constantRegisters());
+void CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock,
+ JSScope* scope)
+{
+ Base::finishCreation(vm);
+
+ if (vm.typeProfiler() || vm.controlFlowProfiler())
+ vm.functionHasExecutedCache()->removeUnexecutedRange(ownerExecutable->sourceID(), ownerExecutable->typeProfilingStartOffset(), ownerExecutable->typeProfilingEndOffset());
+
+ setConstantRegisters(unlinkedCodeBlock->constantRegisters(), unlinkedCodeBlock->constantsSourceCodeRepresentation());
if (unlinkedCodeBlock->usesGlobalObject())
- m_constantRegisters[unlinkedCodeBlock->globalObjectRegister().offset()].set(*m_vm, ownerExecutable, m_globalObject.get());
- m_functionDecls.resizeToFit(unlinkedCodeBlock->numberOfFunctionDecls());
+ m_constantRegisters[unlinkedCodeBlock->globalObjectRegister().toConstantIndex()].set(*m_vm, this, m_globalObject.get());
+
+ for (unsigned i = 0; i < LinkTimeConstantCount; i++) {
+ LinkTimeConstant type = static_cast<LinkTimeConstant>(i);
+ if (unsigned registerIndex = unlinkedCodeBlock->registerIndexForLinkTimeConstant(type))
+ m_constantRegisters[registerIndex].set(*m_vm, this, m_globalObject->jsCellForLinkTimeConstant(type));
+ }
+
+#if !ASSERT_DISABLED
+ HashSet<int, WTF::IntHash<int>, WTF::UnsignedWithZeroKeyHashTraits<int>> clonedConstantSymbolTables;
+#endif
+ {
+#if !ASSERT_DISABLED
+ HashSet<SymbolTable*> clonedSymbolTables;
+#endif
+ bool hasTypeProfiler = !!vm.typeProfiler();
+ for (unsigned i = 0; i < m_constantRegisters.size(); i++) {
+ if (m_constantRegisters[i].get().isEmpty())
+ continue;
+ if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(m_constantRegisters[i].get())) {
+ ASSERT(clonedSymbolTables.add(symbolTable).isNewEntry);
+ if (hasTypeProfiler) {
+ ConcurrentJITLocker locker(symbolTable->m_lock);
+ symbolTable->prepareForTypeProfiling(locker);
+ }
+ m_constantRegisters[i].set(*m_vm, this, symbolTable->cloneScopePart(*m_vm));
+#if !ASSERT_DISABLED
+ clonedConstantSymbolTables.add(i + FirstConstantRegisterIndex);
+#endif
+ }
+ }
+ }
+
+ // We already have the cloned symbol table for the module environment since we need to instantiate
+ // the module environments before linking the code block. We replace the stored symbol table with the already cloned one.
+ if (UnlinkedModuleProgramCodeBlock* unlinkedModuleProgramCodeBlock = jsDynamicCast<UnlinkedModuleProgramCodeBlock*>(unlinkedCodeBlock)) {
+ SymbolTable* clonedSymbolTable = jsCast<ModuleProgramExecutable*>(ownerExecutable)->moduleEnvironmentSymbolTable();
+ if (m_vm->typeProfiler()) {
+ ConcurrentJITLocker locker(clonedSymbolTable->m_lock);
+ clonedSymbolTable->prepareForTypeProfiling(locker);
+ }
+ replaceConstant(unlinkedModuleProgramCodeBlock->moduleEnvironmentSymbolTableConstantRegisterOffset(), clonedSymbolTable);
+ }
+
+ bool shouldUpdateFunctionHasExecutedCache = vm.typeProfiler() || vm.controlFlowProfiler();
+ m_functionDecls = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionDecls());
for (size_t count = unlinkedCodeBlock->numberOfFunctionDecls(), i = 0; i < count; ++i) {
UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionDecl(i);
- unsigned lineCount = unlinkedExecutable->lineCount();
- unsigned firstLine = ownerExecutable->lineNo() + unlinkedExecutable->firstLineOffset();
- bool startColumnIsOnOwnerStartLine = !unlinkedExecutable->firstLineOffset();
- unsigned startColumn = unlinkedExecutable->unlinkedBodyStartColumn() + (startColumnIsOnOwnerStartLine ? ownerExecutable->startColumn() : 1);
- bool endColumnIsOnStartLine = !lineCount;
- unsigned endColumn = unlinkedExecutable->unlinkedBodyEndColumn() + (endColumnIsOnStartLine ? startColumn : 1);
- unsigned startOffset = sourceOffset + unlinkedExecutable->startOffset();
- unsigned sourceLength = unlinkedExecutable->sourceLength();
- SourceCode code(m_source, startOffset, startOffset + sourceLength, firstLine, startColumn);
- FunctionExecutable* executable = FunctionExecutable::create(*m_vm, code, unlinkedExecutable, firstLine, firstLine + lineCount, startColumn, endColumn);
- m_functionDecls[i].set(*m_vm, ownerExecutable, executable);
- }
-
- m_functionExprs.resizeToFit(unlinkedCodeBlock->numberOfFunctionExprs());
+ if (shouldUpdateFunctionHasExecutedCache)
+ vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
+ m_functionDecls[i].set(*m_vm, this, unlinkedExecutable->link(*m_vm, ownerExecutable->source()));
+ }
+
+ m_functionExprs = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionExprs());
for (size_t count = unlinkedCodeBlock->numberOfFunctionExprs(), i = 0; i < count; ++i) {
UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionExpr(i);
- unsigned lineCount = unlinkedExecutable->lineCount();
- unsigned firstLine = ownerExecutable->lineNo() + unlinkedExecutable->firstLineOffset();
- bool startColumnIsOnOwnerStartLine = !unlinkedExecutable->firstLineOffset();
- unsigned startColumn = unlinkedExecutable->unlinkedBodyStartColumn() + (startColumnIsOnOwnerStartLine ? ownerExecutable->startColumn() : 1);
- bool endColumnIsOnStartLine = !lineCount;
- unsigned endColumn = unlinkedExecutable->unlinkedBodyEndColumn() + (endColumnIsOnStartLine ? startColumn : 1);
- unsigned startOffset = sourceOffset + unlinkedExecutable->startOffset();
- unsigned sourceLength = unlinkedExecutable->sourceLength();
- SourceCode code(m_source, startOffset, startOffset + sourceLength, firstLine, startColumn);
- FunctionExecutable* executable = FunctionExecutable::create(*m_vm, code, unlinkedExecutable, firstLine, firstLine + lineCount, startColumn, endColumn);
- m_functionExprs[i].set(*m_vm, ownerExecutable, executable);
+ if (shouldUpdateFunctionHasExecutedCache)
+ vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
+ m_functionExprs[i].set(*m_vm, this, unlinkedExecutable->link(*m_vm, ownerExecutable->source()));
}
if (unlinkedCodeBlock->hasRareData()) {
@@ -1607,15 +1922,13 @@ CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlin
}
if (size_t count = unlinkedCodeBlock->numberOfExceptionHandlers()) {
m_rareData->m_exceptionHandlers.resizeToFit(count);
- size_t nonLocalScopeDepth = scope->depth();
for (size_t i = 0; i < count; i++) {
- const UnlinkedHandlerInfo& handler = unlinkedCodeBlock->exceptionHandler(i);
- m_rareData->m_exceptionHandlers[i].start = handler.start;
- m_rareData->m_exceptionHandlers[i].end = handler.end;
- m_rareData->m_exceptionHandlers[i].target = handler.target;
- m_rareData->m_exceptionHandlers[i].scopeDepth = nonLocalScopeDepth + handler.scopeDepth;
-#if ENABLE(JIT) && ENABLE(LLINT)
- m_rareData->m_exceptionHandlers[i].nativeCode = CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(LLInt::getCodePtr(llint_op_catch)));
+ const UnlinkedHandlerInfo& unlinkedHandler = unlinkedCodeBlock->exceptionHandler(i);
+ HandlerInfo& handler = m_rareData->m_exceptionHandlers[i];
+#if ENABLE(JIT)
+ handler.initialize(unlinkedHandler, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(LLInt::getCodePtr(op_catch))));
+#else
+ handler.initialize(unlinkedHandler);
#endif
}
}
@@ -1645,46 +1958,65 @@ CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlin
}
// Allocate metadata buffers for the bytecode
-#if ENABLE(LLINT)
if (size_t size = unlinkedCodeBlock->numberOfLLintCallLinkInfos())
- m_llintCallLinkInfos.resizeToFit(size);
-#endif
+ m_llintCallLinkInfos = RefCountedArray<LLIntCallLinkInfo>(size);
if (size_t size = unlinkedCodeBlock->numberOfArrayProfiles())
m_arrayProfiles.grow(size);
if (size_t size = unlinkedCodeBlock->numberOfArrayAllocationProfiles())
- m_arrayAllocationProfiles.resizeToFit(size);
+ m_arrayAllocationProfiles = RefCountedArray<ArrayAllocationProfile>(size);
if (size_t size = unlinkedCodeBlock->numberOfValueProfiles())
- m_valueProfiles.resizeToFit(size);
+ m_valueProfiles = RefCountedArray<ValueProfile>(size);
if (size_t size = unlinkedCodeBlock->numberOfObjectAllocationProfiles())
- m_objectAllocationProfiles.resizeToFit(size);
+ m_objectAllocationProfiles = RefCountedArray<ObjectAllocationProfile>(size);
+
+#if ENABLE(JIT)
+ setCalleeSaveRegisters(RegisterSet::llintBaselineCalleeSaveRegisters());
+#endif
// Copy and translate the UnlinkedInstructions
unsigned instructionCount = unlinkedCodeBlock->instructions().count();
UnlinkedInstructionStream::Reader instructionReader(unlinkedCodeBlock->instructions());
- Vector<Instruction, 0, UnsafeVectorOverflow> instructions(instructionCount);
+ // Bookkeep the strongly referenced module environments.
+ HashSet<JSModuleEnvironment*> stronglyReferencedModuleEnvironments;
+
+ // Bookkeep the merge point bytecode offsets.
+ Vector<size_t> mergePointBytecodeOffsets;
+
+ RefCountedArray<Instruction> instructions(instructionCount);
+
for (unsigned i = 0; !instructionReader.atEnd(); ) {
const UnlinkedInstruction* pc = instructionReader.next();
unsigned opLength = opcodeLength(pc[0].u.opcode);
- instructions[i] = vm()->interpreter->getOpcode(pc[0].u.opcode);
+ instructions[i] = vm.interpreter->getOpcode(pc[0].u.opcode);
for (size_t j = 1; j < opLength; ++j) {
if (sizeof(int32_t) != sizeof(intptr_t))
instructions[i + j].u.pointer = 0;
instructions[i + j].u.operand = pc[j].u.operand;
}
switch (pc[0].u.opcode) {
+ case op_has_indexed_property: {
+ int arrayProfileIndex = pc[opLength - 1].u.operand;
+ m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
+
+ instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
+ break;
+ }
case op_call_varargs:
- case op_get_by_val:
- case op_get_argument_by_val: {
+ case op_tail_call_varargs:
+ case op_construct_varargs:
+ case op_get_by_val: {
int arrayProfileIndex = pc[opLength - 2].u.operand;
m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex];
FALLTHROUGH;
}
- case op_get_by_id: {
+ case op_get_direct_pname:
+ case op_get_by_id:
+ case op_get_from_arguments: {
ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand];
ASSERT(profile->m_bytecodeOffset == -1);
profile->m_bytecodeOffset = i;
@@ -1717,12 +2049,13 @@ CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlin
int inferredInlineCapacity = pc[opLength - 2].u.operand;
instructions[i + opLength - 1] = objectAllocationProfile;
- objectAllocationProfile->initialize(*vm(),
- m_ownerExecutable.get(), m_globalObject->objectPrototype(), inferredInlineCapacity);
+ objectAllocationProfile->initialize(vm,
+ this, m_globalObject->objectPrototype(), inferredInlineCapacity);
break;
}
case op_call:
+ case op_tail_call:
case op_call_eval: {
ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand];
ASSERT(profile->m_bytecodeOffset == -1);
@@ -1731,57 +2064,49 @@ CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlin
int arrayProfileIndex = pc[opLength - 2].u.operand;
m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex];
-#if ENABLE(LLINT)
instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand];
-#endif
break;
}
case op_construct: {
-#if ENABLE(LLINT)
instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand];
-#endif
ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand];
ASSERT(profile->m_bytecodeOffset == -1);
profile->m_bytecodeOffset = i;
instructions[i + opLength - 1] = profile;
break;
}
- case op_get_by_id_out_of_line:
- case op_get_by_id_self:
- case op_get_by_id_proto:
- case op_get_by_id_chain:
- case op_get_by_id_getter_self:
- case op_get_by_id_getter_proto:
- case op_get_by_id_getter_chain:
- case op_get_by_id_custom_self:
- case op_get_by_id_custom_proto:
- case op_get_by_id_custom_chain:
- case op_get_by_id_generic:
case op_get_array_length:
- case op_get_string_length:
CRASH();
- case op_init_global_const_nop: {
- ASSERT(codeType() == GlobalCode);
- Identifier ident = identifier(pc[4].u.operand);
- SymbolTableEntry entry = m_globalObject->symbolTable()->get(ident.impl());
- if (entry.isNull())
- break;
-
- instructions[i + 0] = vm()->interpreter->getOpcode(op_init_global_const);
- instructions[i + 1] = &m_globalObject->registerAt(entry.getIndex());
+#if !ASSERT_DISABLED
+ case op_create_lexical_environment: {
+ int symbolTableIndex = pc[3].u.operand;
+ ASSERT(clonedConstantSymbolTables.contains(symbolTableIndex));
break;
}
+#endif
case op_resolve_scope: {
- const Identifier& ident = identifier(pc[2].u.operand);
- ResolveType type = static_cast<ResolveType>(pc[3].u.operand);
-
- ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), scope, ident, Get, type);
- instructions[i + 3].u.operand = op.type;
- instructions[i + 4].u.operand = op.depth;
- if (op.activation)
- instructions[i + 5].u.activation.set(*vm(), ownerExecutable, op.activation);
+ const Identifier& ident = identifier(pc[3].u.operand);
+ ResolveType type = static_cast<ResolveType>(pc[4].u.operand);
+ RELEASE_ASSERT(type != LocalClosureVar);
+ int localScopeDepth = pc[5].u.operand;
+
+ ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, type, NotInitialization);
+ instructions[i + 4].u.operand = op.type;
+ instructions[i + 5].u.operand = op.depth;
+ if (op.lexicalEnvironment) {
+ if (op.type == ModuleVar) {
+ // Keep the linked module environment strongly referenced.
+ if (stronglyReferencedModuleEnvironments.add(jsCast<JSModuleEnvironment*>(op.lexicalEnvironment)).isNewEntry)
+ addConstant(op.lexicalEnvironment);
+ instructions[i + 6].u.jsCell.set(vm, this, op.lexicalEnvironment);
+ } else
+ instructions[i + 6].u.symbolTable.set(vm, this, op.lexicalEnvironment->symbolTable());
+ } else if (JSScope* constantScope = JSScope::constantScopeForCodeBlock(op.type, this))
+ instructions[i + 6].u.jsCell.set(vm, this, constantScope);
+ else
+ instructions[i + 6].u.pointer = nullptr;
break;
}
@@ -1791,51 +2116,169 @@ CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlin
profile->m_bytecodeOffset = i;
instructions[i + opLength - 1] = profile;
- // get_from_scope dst, scope, id, ResolveModeAndType, Structure, Operand
+ // get_from_scope dst, scope, id, GetPutInfo, Structure, Operand
+
+ int localScopeDepth = pc[5].u.operand;
+ instructions[i + 5].u.pointer = nullptr;
+
+ GetPutInfo getPutInfo = GetPutInfo(pc[4].u.operand);
+ ASSERT(getPutInfo.initializationMode() == NotInitialization);
+ if (getPutInfo.resolveType() == LocalClosureVar) {
+ instructions[i + 4] = GetPutInfo(getPutInfo.resolveMode(), ClosureVar, getPutInfo.initializationMode()).operand();
+ break;
+ }
+
const Identifier& ident = identifier(pc[3].u.operand);
- ResolveModeAndType modeAndType = ResolveModeAndType(pc[4].u.operand);
- ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), scope, ident, Get, modeAndType.type());
+ ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, getPutInfo.resolveType(), NotInitialization);
- instructions[i + 4].u.operand = ResolveModeAndType(modeAndType.mode(), op.type).operand();
- if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks)
+ instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), op.type, getPutInfo.initializationMode()).operand();
+ if (op.type == ModuleVar)
+ instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), ClosureVar, getPutInfo.initializationMode()).operand();
+ if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks)
instructions[i + 5].u.watchpointSet = op.watchpointSet;
else if (op.structure)
- instructions[i + 5].u.structure.set(*vm(), ownerExecutable, op.structure);
+ instructions[i + 5].u.structure.set(vm, this, op.structure);
instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand);
break;
}
case op_put_to_scope: {
- // put_to_scope scope, id, value, ResolveModeAndType, Structure, Operand
+ // put_to_scope scope, id, value, GetPutInfo, Structure, Operand
+ GetPutInfo getPutInfo = GetPutInfo(pc[4].u.operand);
+ if (getPutInfo.resolveType() == LocalClosureVar) {
+ // Only do watching if the property we're putting to is not anonymous.
+ if (static_cast<unsigned>(pc[2].u.operand) != UINT_MAX) {
+ int symbolTableIndex = pc[5].u.operand;
+ ASSERT(clonedConstantSymbolTables.contains(symbolTableIndex));
+ SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex));
+ const Identifier& ident = identifier(pc[2].u.operand);
+ ConcurrentJITLocker locker(symbolTable->m_lock);
+ auto iter = symbolTable->find(locker, ident.impl());
+ ASSERT(iter != symbolTable->end(locker));
+ iter->value.prepareToWatch();
+ instructions[i + 5].u.watchpointSet = iter->value.watchpointSet();
+ } else
+ instructions[i + 5].u.watchpointSet = nullptr;
+ break;
+ }
+
const Identifier& ident = identifier(pc[2].u.operand);
- ResolveModeAndType modeAndType = ResolveModeAndType(pc[4].u.operand);
- ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), scope, ident, Put, modeAndType.type());
+ int localScopeDepth = pc[5].u.operand;
+ instructions[i + 5].u.pointer = nullptr;
+ ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Put, getPutInfo.resolveType(), getPutInfo.initializationMode());
- instructions[i + 4].u.operand = ResolveModeAndType(modeAndType.mode(), op.type).operand();
- if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks)
+ instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), op.type, getPutInfo.initializationMode()).operand();
+ if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks)
instructions[i + 5].u.watchpointSet = op.watchpointSet;
else if (op.type == ClosureVar || op.type == ClosureVarWithVarInjectionChecks) {
if (op.watchpointSet)
- op.watchpointSet->invalidate();
+ op.watchpointSet->invalidate(PutToScopeFireDetail(this, ident));
} else if (op.structure)
- instructions[i + 5].u.structure.set(*vm(), ownerExecutable, op.structure);
+ instructions[i + 5].u.structure.set(vm, this, op.structure);
instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand);
+
break;
}
-
- case op_captured_mov:
- case op_new_captured_func: {
- if (pc[3].u.index == UINT_MAX) {
- instructions[i + 3].u.watchpointSet = 0;
+
+ case op_profile_type: {
+ RELEASE_ASSERT(vm.typeProfiler());
+ // The format of this instruction is: op_profile_type regToProfile, TypeLocation*, flag, identifier?, resolveType?
+ size_t instructionOffset = i + opLength - 1;
+ unsigned divotStart, divotEnd;
+ GlobalVariableID globalVariableID = 0;
+ RefPtr<TypeSet> globalTypeSet;
+ bool shouldAnalyze = m_unlinkedCode->typeProfilerExpressionInfoForBytecodeOffset(instructionOffset, divotStart, divotEnd);
+ VirtualRegister profileRegister(pc[1].u.operand);
+ ProfileTypeBytecodeFlag flag = static_cast<ProfileTypeBytecodeFlag>(pc[3].u.operand);
+ SymbolTable* symbolTable = nullptr;
+
+ switch (flag) {
+ case ProfileTypeBytecodeClosureVar: {
+ const Identifier& ident = identifier(pc[4].u.operand);
+ int localScopeDepth = pc[2].u.operand;
+ ResolveType type = static_cast<ResolveType>(pc[5].u.operand);
+ // Even though type profiling may be profiling either a Get or a Put, we can always claim a Get because
+ // we're abstractly "read"ing from a JSScope.
+ ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, type, NotInitialization);
+
+ if (op.type == ClosureVar || op.type == ModuleVar)
+ symbolTable = op.lexicalEnvironment->symbolTable();
+ else if (op.type == GlobalVar)
+ symbolTable = m_globalObject.get()->symbolTable();
+
+ UniquedStringImpl* impl = (op.type == ModuleVar) ? op.importedName.get() : ident.impl();
+ if (symbolTable) {
+ ConcurrentJITLocker locker(symbolTable->m_lock);
+ // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
+ symbolTable->prepareForTypeProfiling(locker);
+ globalVariableID = symbolTable->uniqueIDForVariable(locker, impl, vm);
+ globalTypeSet = symbolTable->globalTypeSetForVariable(locker, impl, vm);
+ } else
+ globalVariableID = TypeProfilerNoGlobalIDExists;
+
+ break;
+ }
+ case ProfileTypeBytecodeLocallyResolved: {
+ int symbolTableIndex = pc[2].u.operand;
+ ASSERT(clonedConstantSymbolTables.contains(symbolTableIndex));
+ SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex));
+ const Identifier& ident = identifier(pc[4].u.operand);
+ ConcurrentJITLocker locker(symbolTable->m_lock);
+ // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
+ globalVariableID = symbolTable->uniqueIDForVariable(locker, ident.impl(), vm);
+ globalTypeSet = symbolTable->globalTypeSetForVariable(locker, ident.impl(), vm);
+
+ break;
+ }
+ case ProfileTypeBytecodeDoesNotHaveGlobalID:
+ case ProfileTypeBytecodeFunctionArgument: {
+ globalVariableID = TypeProfilerNoGlobalIDExists;
+ break;
+ }
+ case ProfileTypeBytecodeFunctionReturnStatement: {
+ RELEASE_ASSERT(ownerExecutable->isFunctionExecutable());
+ globalTypeSet = jsCast<FunctionExecutable*>(ownerExecutable)->returnStatementTypeSet();
+ globalVariableID = TypeProfilerReturnStatement;
+ if (!shouldAnalyze) {
+ // Because a return statement can be added implicitly to return undefined at the end of a function,
+ // and these nodes don't emit expression ranges because they aren't in the actual source text of
+ // the user's program, give the type profiler some range to identify these return statements.
+ // Currently, the text offset that is used as identification is "f" in the function keyword
+ // and is stored on TypeLocation's m_divotForFunctionOffsetIfReturnStatement member variable.
+ divotStart = divotEnd = ownerExecutable->typeProfilingStartOffset();
+ shouldAnalyze = true;
+ }
break;
}
- StringImpl* uid = identifier(pc[3].u.index).impl();
- RELEASE_ASSERT(didCloneSymbolTable);
- ConcurrentJITLocker locker(m_symbolTable->m_lock);
- SymbolTable::Map::iterator iter = m_symbolTable->find(locker, uid);
- ASSERT(iter != m_symbolTable->end(locker));
- iter->value.prepareToWatch();
- instructions[i + 3].u.watchpointSet = iter->value.watchpointSet();
+ }
+
+ std::pair<TypeLocation*, bool> locationPair = vm.typeProfiler()->typeLocationCache()->getTypeLocation(globalVariableID,
+ ownerExecutable->sourceID(), divotStart, divotEnd, globalTypeSet, &vm);
+ TypeLocation* location = locationPair.first;
+ bool isNewLocation = locationPair.second;
+
+ if (flag == ProfileTypeBytecodeFunctionReturnStatement)
+ location->m_divotForFunctionOffsetIfReturnStatement = ownerExecutable->typeProfilingStartOffset();
+
+ if (shouldAnalyze && isNewLocation)
+ vm.typeProfiler()->insertNewLocation(location);
+
+ instructions[i + 2].u.location = location;
+ break;
+ }
+
+ case op_debug: {
+ if (pc[1].u.index == DidReachBreakpoint)
+ m_hasDebuggerStatement = true;
+ break;
+ }
+
+ case op_save: {
+ unsigned liveCalleeLocalsIndex = pc[2].u.index;
+ int offset = pc[3].u.operand;
+ if (liveCalleeLocalsIndex >= mergePointBytecodeOffsets.size())
+ mergePointBytecodeOffsets.resize(liveCalleeLocalsIndex + 1);
+ mergePointBytecodeOffsets[liveCalleeLocalsIndex] = i + offset;
break;
}
@@ -1844,7 +2287,25 @@ CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlin
}
i += opLength;
}
- m_instructions = WTF::RefCountedArray<Instruction>(instructions);
+
+ if (vm.controlFlowProfiler())
+ insertBasicBlockBoundariesForControlFlowProfiler(instructions);
+
+ m_instructions = WTFMove(instructions);
+
+ // Perform bytecode liveness analysis to determine which locals are live and should be resumed when executing op_resume.
+ if (unlinkedCodeBlock->parseMode() == SourceParseMode::GeneratorBodyMode) {
+ if (size_t count = mergePointBytecodeOffsets.size()) {
+ createRareDataIfNecessary();
+ BytecodeLivenessAnalysis liveness(this);
+ m_rareData->m_liveCalleeLocalsAtYield.grow(count);
+ size_t liveCalleeLocalsIndex = 0;
+ for (size_t bytecodeOffset : mergePointBytecodeOffsets) {
+ m_rareData->m_liveCalleeLocalsAtYield[liveCalleeLocalsIndex] = liveness.getLivenessInfoAtBytecodeOffset(bytecodeOffset);
+ ++liveCalleeLocalsIndex;
+ }
+ }
+ }
// Set optimization thresholds only after m_instructions is initialized, since these
// rely on the instruction count (and are in theory permitted to also inspect the
@@ -1854,26 +2315,51 @@ CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlin
// If the concurrent thread will want the code block's hash, then compute it here
// synchronously.
- if (Options::showDisassembly()
- || Options::showDFGDisassembly()
- || Options::dumpBytecodeAtDFGTime()
- || Options::dumpGraphAtEachPhase()
- || Options::verboseCompilation()
- || Options::logCompilationChanges()
- || Options::validateGraph()
- || Options::validateGraphAtEachPhase()
- || Options::verboseOSR()
- || Options::verboseCompilationQueue()
- || Options::reportCompileTimes()
- || Options::verboseCFA())
+ if (Options::alwaysComputeHash())
hash();
if (Options::dumpGeneratedBytecodes())
dumpBytecode();
+
+ heap()->m_codeBlocks.add(this);
+ heap()->reportExtraMemoryAllocated(m_instructions.size() * sizeof(Instruction));
+}
+
+#if ENABLE(WEBASSEMBLY)
+CodeBlock::CodeBlock(VM* vm, Structure* structure, WebAssemblyExecutable* ownerExecutable, JSGlobalObject* globalObject)
+ : JSCell(*vm, structure)
+ , m_globalObject(globalObject->vm(), this, globalObject)
+ , m_numCalleeLocals(0)
+ , m_numVars(0)
+ , m_shouldAlwaysBeInlined(false)
+#if ENABLE(JIT)
+ , m_capabilityLevelState(DFG::CannotCompile)
+#endif
+ , m_didFailFTLCompilation(false)
+ , m_hasBeenCompiledWithFTL(false)
+ , m_isConstructor(false)
+ , m_isStrictMode(false)
+ , m_codeType(FunctionCode)
+ , m_hasDebuggerStatement(false)
+ , m_steppingMode(SteppingModeDisabled)
+ , m_numBreakpoints(0)
+ , m_ownerExecutable(m_globalObject->vm(), this, ownerExecutable)
+ , m_vm(vm)
+ , m_osrExitCounter(0)
+ , m_optimizationDelayCounter(0)
+ , m_reoptimizationRetryCounter(0)
+ , m_creationTime(std::chrono::steady_clock::now())
+{
+ ASSERT(heap()->isDeferred());
+}
- m_heap->m_codeBlocks.add(this);
- m_heap->reportExtraMemoryCost(sizeof(CodeBlock) + m_instructions.size() * sizeof(Instruction));
+void CodeBlock::finishCreation(VM& vm, WebAssemblyExecutable*, JSGlobalObject*)
+{
+ Base::finishCreation(vm);
+
+ heap()->m_codeBlocks.add(this);
}
+#endif
CodeBlock::~CodeBlock()
{
@@ -1884,34 +2370,37 @@ CodeBlock::~CodeBlock()
dumpValueProfiles();
#endif
-#if ENABLE(LLINT)
- while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
- m_incomingLLIntCalls.begin()->remove();
-#endif // ENABLE(LLINT)
-#if ENABLE(JIT)
// We may be destroyed before any CodeBlocks that refer to us are destroyed.
// Consider that two CodeBlocks become unreachable at the same time. There
// is no guarantee about the order in which the CodeBlocks are destroyed.
// So, if we don't remove incoming calls, and get destroyed before the
// CodeBlock(s) that have calls into us, then the CallLinkInfo vector's
// destructor will try to remove nodes from our (no longer valid) linked list.
- while (m_incomingCalls.begin() != m_incomingCalls.end())
- m_incomingCalls.begin()->remove();
+ unlinkIncomingCalls();
// Note that our outgoing calls will be removed from other CodeBlocks'
// m_incomingCalls linked lists through the execution of the ~CallLinkInfo
// destructors.
- for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter)
- (*iter)->deref();
+#if ENABLE(JIT)
+ for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
+ StructureStubInfo* stub = *iter;
+ stub->aboutToDie();
+ stub->deref();
+ }
#endif // ENABLE(JIT)
}
+void CodeBlock::setAlternative(VM& vm, CodeBlock* alternative)
+{
+ m_alternative.set(vm, this, alternative);
+}
+
void CodeBlock::setNumParameters(int newValue)
{
m_numParameters = newValue;
- m_argumentValueProfiles.resizeToFit(newValue);
+ m_argumentValueProfiles = RefCountedArray<ValueProfile>(newValue);
}
void EvalCodeCache::visitAggregate(SlotVisitor& visitor)
@@ -1927,77 +2416,44 @@ CodeBlock* CodeBlock::specialOSREntryBlockOrNull()
if (jitType() != JITCode::DFGJIT)
return 0;
DFG::JITCode* jitCode = m_jitCode->dfg();
- return jitCode->osrEntryBlock.get();
+ return jitCode->osrEntryBlock();
#else // ENABLE(FTL_JIT)
return 0;
#endif // ENABLE(FTL_JIT)
}
-void CodeBlock::visitAggregate(SlotVisitor& visitor)
-{
-#if ENABLE(PARALLEL_GC)
- // I may be asked to scan myself more than once, and it may even happen concurrently.
- // To this end, use a CAS loop to check if I've been called already. Only one thread
- // may proceed past this point - whichever one wins the CAS race.
- unsigned oldValue;
- do {
- oldValue = m_visitAggregateHasBeenCalled;
- if (oldValue) {
- // Looks like someone else won! Return immediately to ensure that we don't
- // trace the same CodeBlock concurrently. Doing so is hazardous since we will
- // be mutating the state of ValueProfiles, which contain JSValues, which can
- // have word-tearing on 32-bit, leading to awesome timing-dependent crashes
- // that are nearly impossible to track down.
-
- // Also note that it must be safe to return early as soon as we see the
- // value true (well, (unsigned)1), since once a GC thread is in this method
- // and has won the CAS race (i.e. was responsible for setting the value true)
- // it will definitely complete the rest of this method before declaring
- // termination.
- return;
- }
- } while (!WTF::weakCompareAndSwap(&m_visitAggregateHasBeenCalled, 0, 1));
-#endif // ENABLE(PARALLEL_GC)
-
- if (!!m_alternative)
- m_alternative->visitAggregate(visitor);
-
- if (CodeBlock* otherBlock = specialOSREntryBlockOrNull())
- otherBlock->visitAggregate(visitor);
+void CodeBlock::visitWeakly(SlotVisitor& visitor)
+{
+ bool setByMe = m_visitWeaklyHasBeenCalled.compareExchangeStrong(false, true);
+ if (!setByMe)
+ return;
- visitor.reportExtraMemoryUsage(ownerExecutable(), sizeof(CodeBlock));
- if (m_jitCode)
- visitor.reportExtraMemoryUsage(ownerExecutable(), m_jitCode->size());
- if (m_instructions.size()) {
- // Divide by refCount() because m_instructions points to something that is shared
- // by multiple CodeBlocks, and we only want to count it towards the heap size once.
- // Having each CodeBlock report only its proportional share of the size is one way
- // of accomplishing this.
- visitor.reportExtraMemoryUsage(ownerExecutable(), m_instructions.size() * sizeof(Instruction) / m_instructions.refCount());
+ if (Heap::isMarked(this))
+ return;
+
+ if (shouldVisitStrongly()) {
+ visitor.appendUnbarrieredReadOnlyPointer(this);
+ return;
}
- visitor.append(&m_unlinkedCode);
+ // There are two things that may use unconditional finalizers: inline cache clearing
+ // and jettisoning. The probability of us wanting to do at least one of those things
+ // is probably quite close to 1. So we add one no matter what and when it runs, it
+ // figures out whether it has any work to do.
+ visitor.addUnconditionalFinalizer(&m_unconditionalFinalizer);
- // There are three things that may use unconditional finalizers: lazy bytecode freeing,
- // inline cache clearing, and jettisoning. The probability of us wanting to do at
- // least one of those things is probably quite close to 1. So we add one no matter what
- // and when it runs, it figures out whether it has any work to do.
- visitor.addUnconditionalFinalizer(this);
+ if (!JITCode::isOptimizingJIT(jitType()))
+ return;
+
+ // If we jettison ourselves we'll install our alternative, so make sure that it
+ // survives GC even if we don't.
+ visitor.append(&m_alternative);
// There are two things that we use weak reference harvesters for: DFG fixpoint for
// jettisoning, and trying to find structures that would be live based on some
// inline cache. So it makes sense to register them regardless.
- visitor.addWeakReferenceHarvester(this);
- m_allTransitionsHaveBeenMarked = false;
-
- if (shouldImmediatelyAssumeLivenessDuringScan()) {
- // This code block is live, so scan all references strongly and return.
- stronglyVisitStrongReferences(visitor);
- stronglyVisitWeakReferences(visitor);
- propagateTransitions(visitor);
- return;
- }
-
+ visitor.addWeakReferenceHarvester(&m_weakReferenceHarvester);
+
#if ENABLE(DFG_JIT)
// We get here if we're live in the sense that our owner executable is live,
// but we're not yet live for sure in another sense: we may yet decide that this
@@ -2007,16 +2463,97 @@ void CodeBlock::visitAggregate(SlotVisitor& visitor)
// either us marking additional objects, or by other objects being marked for
// other reasons, that this iteration should run again; it will notify us of this
// decision by calling harvestWeakReferences().
-
- m_jitCode->dfgCommon()->livenessHasBeenProved = false;
-
+
+ m_allTransitionsHaveBeenMarked = false;
propagateTransitions(visitor);
+
+ m_jitCode->dfgCommon()->livenessHasBeenProved = false;
determineLiveness(visitor);
-#else // ENABLE(DFG_JIT)
- RELEASE_ASSERT_NOT_REACHED();
#endif // ENABLE(DFG_JIT)
}
+size_t CodeBlock::estimatedSize(JSCell* cell)
+{
+ CodeBlock* thisObject = jsCast<CodeBlock*>(cell);
+ size_t extraMemoryAllocated = thisObject->m_instructions.size() * sizeof(Instruction);
+ if (thisObject->m_jitCode)
+ extraMemoryAllocated += thisObject->m_jitCode->size();
+ return Base::estimatedSize(cell) + extraMemoryAllocated;
+}
+
+void CodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor)
+{
+ CodeBlock* thisObject = jsCast<CodeBlock*>(cell);
+ ASSERT_GC_OBJECT_INHERITS(thisObject, info());
+ JSCell::visitChildren(thisObject, visitor);
+ thisObject->visitChildren(visitor);
+}
+
+void CodeBlock::visitChildren(SlotVisitor& visitor)
+{
+ // There are two things that may use unconditional finalizers: inline cache clearing
+ // and jettisoning. The probability of us wanting to do at least one of those things
+ // is probably quite close to 1. So we add one no matter what and when it runs, it
+ // figures out whether it has any work to do.
+ visitor.addUnconditionalFinalizer(&m_unconditionalFinalizer);
+
+ if (CodeBlock* otherBlock = specialOSREntryBlockOrNull())
+ visitor.appendUnbarrieredReadOnlyPointer(otherBlock);
+
+ if (m_jitCode)
+ visitor.reportExtraMemoryVisited(m_jitCode->size());
+ if (m_instructions.size())
+ visitor.reportExtraMemoryVisited(m_instructions.size() * sizeof(Instruction) / m_instructions.refCount());
+
+ stronglyVisitStrongReferences(visitor);
+ stronglyVisitWeakReferences(visitor);
+
+ m_allTransitionsHaveBeenMarked = false;
+ propagateTransitions(visitor);
+}
+
+bool CodeBlock::shouldVisitStrongly()
+{
+ if (Options::forceCodeBlockLiveness())
+ return true;
+
+ if (shouldJettisonDueToOldAge())
+ return false;
+
+ // Interpreter and Baseline JIT CodeBlocks don't need to be jettisoned when
+ // their weak references go stale. So if a basline JIT CodeBlock gets
+ // scanned, we can assume that this means that it's live.
+ if (!JITCode::isOptimizingJIT(jitType()))
+ return true;
+
+ return false;
+}
+
+bool CodeBlock::shouldJettisonDueToWeakReference()
+{
+ if (!JITCode::isOptimizingJIT(jitType()))
+ return false;
+ return !Heap::isMarked(this);
+}
+
+bool CodeBlock::shouldJettisonDueToOldAge()
+{
+ return false;
+}
+
+#if ENABLE(DFG_JIT)
+static bool shouldMarkTransition(DFG::WeakReferenceTransition& transition)
+{
+ if (transition.m_codeOrigin && !Heap::isMarked(transition.m_codeOrigin.get()))
+ return false;
+
+ if (!Heap::isMarked(transition.m_from.get()))
+ return false;
+
+ return true;
+}
+#endif // ENABLE(DFG_JIT)
+
void CodeBlock::propagateTransitions(SlotVisitor& visitor)
{
UNUSED_PARAM(visitor);
@@ -2026,19 +2563,23 @@ void CodeBlock::propagateTransitions(SlotVisitor& visitor)
bool allAreMarkedSoFar = true;
-#if ENABLE(LLINT)
Interpreter* interpreter = m_vm->interpreter;
if (jitType() == JITCode::InterpreterThunk) {
const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
for (size_t i = 0; i < propertyAccessInstructions.size(); ++i) {
Instruction* instruction = &instructions()[propertyAccessInstructions[i]];
switch (interpreter->getOpcodeID(instruction[0].u.opcode)) {
- case op_put_by_id_transition_direct:
- case op_put_by_id_transition_normal:
- case op_put_by_id_transition_direct_out_of_line:
- case op_put_by_id_transition_normal_out_of_line: {
- if (Heap::isMarked(instruction[4].u.structure.get()))
- visitor.append(&instruction[6].u.structure);
+ case op_put_by_id: {
+ StructureID oldStructureID = instruction[4].u.structureID;
+ StructureID newStructureID = instruction[6].u.structureID;
+ if (!oldStructureID || !newStructureID)
+ break;
+ Structure* oldStructure =
+ m_vm->heap.structureIDTable().get(oldStructureID);
+ Structure* newStructure =
+ m_vm->heap.structureIDTable().get(newStructureID);
+ if (Heap::isMarked(oldStructure))
+ visitor.appendUnbarrieredReadOnlyPointer(newStructure);
else
allAreMarkedSoFar = false;
break;
@@ -2048,45 +2589,27 @@ void CodeBlock::propagateTransitions(SlotVisitor& visitor)
}
}
}
-#endif // ENABLE(LLINT)
#if ENABLE(JIT)
if (JITCode::isJIT(jitType())) {
for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
StructureStubInfo& stubInfo = **iter;
- switch (stubInfo.accessType) {
- case access_put_by_id_transition_normal:
- case access_put_by_id_transition_direct: {
- JSCell* origin = stubInfo.codeOrigin.codeOriginOwner();
- if ((!origin || Heap::isMarked(origin))
- && Heap::isMarked(stubInfo.u.putByIdTransition.previousStructure.get()))
- visitor.append(&stubInfo.u.putByIdTransition.structure);
- else
- allAreMarkedSoFar = false;
- break;
+ if (stubInfo.cacheType != CacheType::Stub)
+ continue;
+ PolymorphicAccess* list = stubInfo.u.stub;
+ JSCell* origin = stubInfo.codeOrigin.codeOriginOwner();
+ if (origin && !Heap::isMarked(origin)) {
+ allAreMarkedSoFar = false;
+ continue;
}
-
- case access_put_by_id_list: {
- PolymorphicPutByIdList* list = stubInfo.u.putByIdList.list;
- JSCell* origin = stubInfo.codeOrigin.codeOriginOwner();
- if (origin && !Heap::isMarked(origin)) {
+ for (unsigned j = list->size(); j--;) {
+ const AccessCase& access = list->at(j);
+ if (access.type() != AccessCase::Transition)
+ continue;
+ if (Heap::isMarked(access.structure()))
+ visitor.appendUnbarrieredReadOnlyPointer(access.newStructure());
+ else
allAreMarkedSoFar = false;
- break;
- }
- for (unsigned j = list->size(); j--;) {
- PutByIdAccess& access = list->m_list[j];
- if (!access.isTransition())
- continue;
- if (Heap::isMarked(access.oldStructure()))
- visitor.append(&access.m_newStructure);
- else
- allAreMarkedSoFar = false;
- }
- break;
- }
-
- default:
- break;
}
}
}
@@ -2095,21 +2618,28 @@ void CodeBlock::propagateTransitions(SlotVisitor& visitor)
#if ENABLE(DFG_JIT)
if (JITCode::isOptimizingJIT(jitType())) {
DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
+
for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
- if ((!dfgCommon->transitions[i].m_codeOrigin
- || Heap::isMarked(dfgCommon->transitions[i].m_codeOrigin.get()))
- && Heap::isMarked(dfgCommon->transitions[i].m_from.get())) {
+ if (shouldMarkTransition(dfgCommon->transitions[i])) {
// If the following three things are live, then the target of the
// transition is also live:
+ //
// - This code block. We know it's live already because otherwise
// we wouldn't be scanning ourselves.
+ //
// - The code origin of the transition. Transitions may arise from
// code that was inlined. They are not relevant if the user's
// object that is required for the inlinee to run is no longer
// live.
+ //
// - The source of the transition. The transition checks if some
// heap location holds the source, and if so, stores the target.
// Hence the source must be live for the transition to be live.
+ //
+ // We also short-circuit the liveness if the structure is harmless
+ // to mark (i.e. its global object and prototype are both already
+ // live).
+
visitor.append(&dfgCommon->transitions[i].m_to);
} else
allAreMarkedSoFar = false;
@@ -2125,9 +2655,6 @@ void CodeBlock::determineLiveness(SlotVisitor& visitor)
{
UNUSED_PARAM(visitor);
- if (shouldImmediatelyAssumeLivenessDuringScan())
- return;
-
#if ENABLE(DFG_JIT)
// Check if we have any remaining work to do.
DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
@@ -2144,6 +2671,14 @@ void CodeBlock::determineLiveness(SlotVisitor& visitor)
break;
}
}
+ if (allAreLiveSoFar) {
+ for (unsigned i = 0; i < dfgCommon->weakStructureReferences.size(); ++i) {
+ if (!Heap::isMarked(dfgCommon->weakStructureReferences[i].get())) {
+ allAreLiveSoFar = false;
+ break;
+ }
+ }
+ }
// If some weak references are dead, then this fixpoint iteration was
// unsuccessful.
@@ -2153,246 +2688,271 @@ void CodeBlock::determineLiveness(SlotVisitor& visitor)
// All weak references are live. Record this information so we don't
// come back here again, and scan the strong references.
dfgCommon->livenessHasBeenProved = true;
- stronglyVisitStrongReferences(visitor);
+ visitor.appendUnbarrieredReadOnlyPointer(this);
#endif // ENABLE(DFG_JIT)
}
-void CodeBlock::visitWeakReferences(SlotVisitor& visitor)
+void CodeBlock::WeakReferenceHarvester::visitWeakReferences(SlotVisitor& visitor)
{
- propagateTransitions(visitor);
- determineLiveness(visitor);
+ CodeBlock* codeBlock =
+ bitwise_cast<CodeBlock*>(
+ bitwise_cast<char*>(this) - OBJECT_OFFSETOF(CodeBlock, m_weakReferenceHarvester));
+
+ codeBlock->propagateTransitions(visitor);
+ codeBlock->determineLiveness(visitor);
}
-void CodeBlock::finalizeUnconditionally()
+void CodeBlock::finalizeLLIntInlineCaches()
{
+#if ENABLE(WEBASSEMBLY)
+ if (m_ownerExecutable->isWebAssemblyExecutable())
+ return;
+#endif
+
Interpreter* interpreter = m_vm->interpreter;
- if (JITCode::couldBeInterpreted(jitType())) {
- const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
- for (size_t size = propertyAccessInstructions.size(), i = 0; i < size; ++i) {
- Instruction* curInstruction = &instructions()[propertyAccessInstructions[i]];
- switch (interpreter->getOpcodeID(curInstruction[0].u.opcode)) {
- case op_get_by_id:
- case op_get_by_id_out_of_line:
- case op_put_by_id:
- case op_put_by_id_out_of_line:
- if (!curInstruction[4].u.structure || Heap::isMarked(curInstruction[4].u.structure.get()))
- break;
- if (Options::verboseOSR())
- dataLogF("Clearing LLInt property access with structure %p.\n", curInstruction[4].u.structure.get());
- curInstruction[4].u.structure.clear();
- curInstruction[5].u.operand = 0;
+ const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
+ for (size_t size = propertyAccessInstructions.size(), i = 0; i < size; ++i) {
+ Instruction* curInstruction = &instructions()[propertyAccessInstructions[i]];
+ switch (interpreter->getOpcodeID(curInstruction[0].u.opcode)) {
+ case op_get_by_id: {
+ StructureID oldStructureID = curInstruction[4].u.structureID;
+ if (!oldStructureID || Heap::isMarked(m_vm->heap.structureIDTable().get(oldStructureID)))
break;
- case op_put_by_id_transition_direct:
- case op_put_by_id_transition_normal:
- case op_put_by_id_transition_direct_out_of_line:
- case op_put_by_id_transition_normal_out_of_line:
- if (Heap::isMarked(curInstruction[4].u.structure.get())
- && Heap::isMarked(curInstruction[6].u.structure.get())
- && Heap::isMarked(curInstruction[7].u.structureChain.get()))
- break;
- if (Options::verboseOSR()) {
- dataLogF("Clearing LLInt put transition with structures %p -> %p, chain %p.\n",
- curInstruction[4].u.structure.get(),
- curInstruction[6].u.structure.get(),
- curInstruction[7].u.structureChain.get());
- }
- curInstruction[4].u.structure.clear();
- curInstruction[6].u.structure.clear();
- curInstruction[7].u.structureChain.clear();
- curInstruction[0].u.opcode = interpreter->getOpcode(op_put_by_id);
+ if (Options::verboseOSR())
+ dataLogF("Clearing LLInt property access.\n");
+ curInstruction[4].u.structureID = 0;
+ curInstruction[5].u.operand = 0;
+ break;
+ }
+ case op_put_by_id: {
+ StructureID oldStructureID = curInstruction[4].u.structureID;
+ StructureID newStructureID = curInstruction[6].u.structureID;
+ StructureChain* chain = curInstruction[7].u.structureChain.get();
+ if ((!oldStructureID || Heap::isMarked(m_vm->heap.structureIDTable().get(oldStructureID))) &&
+ (!newStructureID || Heap::isMarked(m_vm->heap.structureIDTable().get(newStructureID))) &&
+ (!chain || Heap::isMarked(chain)))
break;
- case op_get_array_length:
+ if (Options::verboseOSR())
+ dataLogF("Clearing LLInt put transition.\n");
+ curInstruction[4].u.structureID = 0;
+ curInstruction[5].u.operand = 0;
+ curInstruction[6].u.structureID = 0;
+ curInstruction[7].u.structureChain.clear();
+ break;
+ }
+ case op_get_array_length:
+ break;
+ case op_to_this:
+ if (!curInstruction[2].u.structure || Heap::isMarked(curInstruction[2].u.structure.get()))
break;
- case op_to_this:
- if (!curInstruction[2].u.structure || Heap::isMarked(curInstruction[2].u.structure.get()))
- break;
- if (Options::verboseOSR())
- dataLogF("Clearing LLInt to_this with structure %p.\n", curInstruction[2].u.structure.get());
- curInstruction[2].u.structure.clear();
+ if (Options::verboseOSR())
+ dataLogF("Clearing LLInt to_this with structure %p.\n", curInstruction[2].u.structure.get());
+ curInstruction[2].u.structure.clear();
+ curInstruction[3].u.toThisStatus = merge(
+ curInstruction[3].u.toThisStatus, ToThisClearedByGC);
+ break;
+ case op_create_this: {
+ auto& cacheWriteBarrier = curInstruction[4].u.jsCell;
+ if (!cacheWriteBarrier || cacheWriteBarrier.unvalidatedGet() == JSCell::seenMultipleCalleeObjects())
break;
- case op_get_callee:
- if (!curInstruction[2].u.jsCell || Heap::isMarked(curInstruction[2].u.jsCell.get()))
- break;
- if (Options::verboseOSR())
- dataLogF("Clearing LLInt get callee with function %p.\n", curInstruction[2].u.jsCell.get());
- curInstruction[2].u.jsCell.clear();
+ JSCell* cachedFunction = cacheWriteBarrier.get();
+ if (Heap::isMarked(cachedFunction))
break;
- case op_resolve_scope: {
- WriteBarrierBase<JSActivation>& activation = curInstruction[5].u.activation;
- if (!activation || Heap::isMarked(activation.get()))
- break;
- if (Options::verboseOSR())
- dataLogF("Clearing dead activation %p.\n", activation.get());
- activation.clear();
+ if (Options::verboseOSR())
+ dataLogF("Clearing LLInt create_this with cached callee %p.\n", cachedFunction);
+ cacheWriteBarrier.clear();
+ break;
+ }
+ case op_resolve_scope: {
+ // Right now this isn't strictly necessary. Any symbol tables that this will refer to
+ // are for outer functions, and we refer to those functions strongly, and they refer
+ // to the symbol table strongly. But it's nice to be on the safe side.
+ WriteBarrierBase<SymbolTable>& symbolTable = curInstruction[6].u.symbolTable;
+ if (!symbolTable || Heap::isMarked(symbolTable.get()))
break;
- }
- case op_get_from_scope:
- case op_put_to_scope: {
- ResolveModeAndType modeAndType =
- ResolveModeAndType(curInstruction[4].u.operand);
- if (modeAndType.type() == GlobalVar || modeAndType.type() == GlobalVarWithVarInjectionChecks)
- continue;
- WriteBarrierBase<Structure>& structure = curInstruction[5].u.structure;
- if (!structure || Heap::isMarked(structure.get()))
- break;
- if (Options::verboseOSR())
- dataLogF("Clearing scope access with structure %p.\n", structure.get());
- structure.clear();
+ if (Options::verboseOSR())
+ dataLogF("Clearing dead symbolTable %p.\n", symbolTable.get());
+ symbolTable.clear();
+ break;
+ }
+ case op_get_from_scope:
+ case op_put_to_scope: {
+ GetPutInfo getPutInfo = GetPutInfo(curInstruction[4].u.operand);
+ if (getPutInfo.resolveType() == GlobalVar || getPutInfo.resolveType() == GlobalVarWithVarInjectionChecks
+ || getPutInfo.resolveType() == LocalClosureVar || getPutInfo.resolveType() == GlobalLexicalVar || getPutInfo.resolveType() == GlobalLexicalVarWithVarInjectionChecks)
+ continue;
+ WriteBarrierBase<Structure>& structure = curInstruction[5].u.structure;
+ if (!structure || Heap::isMarked(structure.get()))
break;
- }
- default:
- RELEASE_ASSERT_NOT_REACHED();
- }
+ if (Options::verboseOSR())
+ dataLogF("Clearing scope access with structure %p.\n", structure.get());
+ structure.clear();
+ break;
+ }
+ default:
+ OpcodeID opcodeID = interpreter->getOpcodeID(curInstruction[0].u.opcode);
+ ASSERT_WITH_MESSAGE_UNUSED(opcodeID, false, "Unhandled opcode in CodeBlock::finalizeUnconditionally, %s(%d) at bc %u", opcodeNames[opcodeID], opcodeID, propertyAccessInstructions[i]);
}
+ }
-#if ENABLE(LLINT)
- for (unsigned i = 0; i < m_llintCallLinkInfos.size(); ++i) {
- if (m_llintCallLinkInfos[i].isLinked() && !Heap::isMarked(m_llintCallLinkInfos[i].callee.get())) {
- if (Options::verboseOSR())
- dataLog("Clearing LLInt call from ", *this, "\n");
- m_llintCallLinkInfos[i].unlink();
- }
- if (!!m_llintCallLinkInfos[i].lastSeenCallee && !Heap::isMarked(m_llintCallLinkInfos[i].lastSeenCallee.get()))
- m_llintCallLinkInfos[i].lastSeenCallee.clear();
+ for (unsigned i = 0; i < m_llintCallLinkInfos.size(); ++i) {
+ if (m_llintCallLinkInfos[i].isLinked() && !Heap::isMarked(m_llintCallLinkInfos[i].callee.get())) {
+ if (Options::verboseOSR())
+ dataLog("Clearing LLInt call from ", *this, "\n");
+ m_llintCallLinkInfos[i].unlink();
}
-#endif // ENABLE(LLINT)
+ if (!!m_llintCallLinkInfos[i].lastSeenCallee && !Heap::isMarked(m_llintCallLinkInfos[i].lastSeenCallee.get()))
+ m_llintCallLinkInfos[i].lastSeenCallee.clear();
}
+}
-#if ENABLE(DFG_JIT)
- // Check if we're not live. If we are, then jettison.
- if (!(shouldImmediatelyAssumeLivenessDuringScan() || m_jitCode->dfgCommon()->livenessHasBeenProved)) {
- if (Options::verboseOSR())
- dataLog(*this, " has dead weak references, jettisoning during GC.\n");
+void CodeBlock::finalizeBaselineJITInlineCaches()
+{
+#if ENABLE(JIT)
+ for (auto iter = callLinkInfosBegin(); !!iter; ++iter)
+ (*iter)->visitWeak(*vm());
- if (DFG::shouldShowDisassembly()) {
- dataLog(*this, " will be jettisoned because of the following dead references:\n");
- DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
- for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
- DFG::WeakReferenceTransition& transition = dfgCommon->transitions[i];
- JSCell* origin = transition.m_codeOrigin.get();
- JSCell* from = transition.m_from.get();
- JSCell* to = transition.m_to.get();
- if ((!origin || Heap::isMarked(origin)) && Heap::isMarked(from))
- continue;
- dataLog(" Transition under ", RawPointer(origin), ", ", RawPointer(from), " -> ", RawPointer(to), ".\n");
- }
- for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
- JSCell* weak = dfgCommon->weakReferences[i].get();
- if (Heap::isMarked(weak))
- continue;
- dataLog(" Weak reference ", RawPointer(weak), ".\n");
- }
- }
-
- jettison();
+ for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
+ StructureStubInfo& stubInfo = **iter;
+ stubInfo.visitWeakReferences(this);
+ }
+#endif
+}
+
+void CodeBlock::UnconditionalFinalizer::finalizeUnconditionally()
+{
+ CodeBlock* codeBlock = bitwise_cast<CodeBlock*>(
+ bitwise_cast<char*>(this) - OBJECT_OFFSETOF(CodeBlock, m_unconditionalFinalizer));
+
+#if ENABLE(DFG_JIT)
+ if (codeBlock->shouldJettisonDueToWeakReference()) {
+ codeBlock->jettison(Profiler::JettisonDueToWeakReference);
return;
}
#endif // ENABLE(DFG_JIT)
-#if ENABLE(JIT)
- // Handle inline caches.
- if (!!jitCode()) {
- RepatchBuffer repatchBuffer(this);
- for (unsigned i = 0; i < numberOfCallLinkInfos(); ++i) {
- if (callLinkInfo(i).isLinked()) {
- if (ClosureCallStubRoutine* stub = callLinkInfo(i).stub.get()) {
- if (!Heap::isMarked(stub->structure())
- || !Heap::isMarked(stub->executable())) {
- if (Options::verboseOSR()) {
- dataLog(
- "Clearing closure call from ", *this, " to ",
- stub->executable()->hashFor(callLinkInfo(i).specializationKind()),
- ", stub routine ", RawPointer(stub), ".\n");
- }
- callLinkInfo(i).unlink(*m_vm, repatchBuffer);
- }
- } else if (!Heap::isMarked(callLinkInfo(i).callee.get())) {
- if (Options::verboseOSR()) {
- dataLog(
- "Clearing call from ", *this, " to ",
- RawPointer(callLinkInfo(i).callee.get()), " (",
- callLinkInfo(i).callee.get()->executable()->hashFor(
- callLinkInfo(i).specializationKind()),
- ").\n");
- }
- callLinkInfo(i).unlink(*m_vm, repatchBuffer);
- }
- }
- if (!!callLinkInfo(i).lastSeenCallee
- && !Heap::isMarked(callLinkInfo(i).lastSeenCallee.get()))
- callLinkInfo(i).lastSeenCallee.clear();
- }
- for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
- StructureStubInfo& stubInfo = **iter;
-
- if (stubInfo.visitWeakReferences())
- continue;
-
- resetStubDuringGCInternal(repatchBuffer, stubInfo);
- }
+ if (codeBlock->shouldJettisonDueToOldAge()) {
+ codeBlock->jettison(Profiler::JettisonDueToOldAge);
+ return;
}
+
+ if (JITCode::couldBeInterpreted(codeBlock->jitType()))
+ codeBlock->finalizeLLIntInlineCaches();
+
+#if ENABLE(JIT)
+ if (!!codeBlock->jitCode())
+ codeBlock->finalizeBaselineJITInlineCaches();
#endif
}
+void CodeBlock::getStubInfoMap(const ConcurrentJITLocker&, StubInfoMap& result)
+{
#if ENABLE(JIT)
-StructureStubInfo* CodeBlock::addStubInfo()
+ toHashMap(m_stubInfos, getStructureStubInfoCodeOrigin, result);
+#else
+ UNUSED_PARAM(result);
+#endif
+}
+
+void CodeBlock::getStubInfoMap(StubInfoMap& result)
{
ConcurrentJITLocker locker(m_lock);
- return m_stubInfos.add();
+ getStubInfoMap(locker, result);
}
-void CodeBlock::getStubInfoMap(const ConcurrentJITLocker&, StubInfoMap& result)
+void CodeBlock::getCallLinkInfoMap(const ConcurrentJITLocker&, CallLinkInfoMap& result)
{
- toHashMap(m_stubInfos, getStructureStubInfoCodeOrigin, result);
+#if ENABLE(JIT)
+ toHashMap(m_callLinkInfos, getCallLinkInfoCodeOrigin, result);
+#else
+ UNUSED_PARAM(result);
+#endif
}
-void CodeBlock::resetStub(StructureStubInfo& stubInfo)
+void CodeBlock::getCallLinkInfoMap(CallLinkInfoMap& result)
{
- if (stubInfo.accessType == access_unset)
- return;
-
ConcurrentJITLocker locker(m_lock);
-
- RepatchBuffer repatchBuffer(this);
- resetStubInternal(repatchBuffer, stubInfo);
+ getCallLinkInfoMap(locker, result);
}
-void CodeBlock::resetStubInternal(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo)
+void CodeBlock::getByValInfoMap(const ConcurrentJITLocker&, ByValInfoMap& result)
{
- AccessType accessType = static_cast<AccessType>(stubInfo.accessType);
-
- if (Options::verboseOSR()) {
- // This can be called from GC destructor calls, so we don't try to do a full dump
- // of the CodeBlock.
- dataLog("Clearing structure cache (kind ", static_cast<int>(stubInfo.accessType), ") in ", RawPointer(this), ".\n");
- }
-
- RELEASE_ASSERT(JITCode::isJIT(jitType()));
-
- if (isGetByIdAccess(accessType))
- resetGetByID(repatchBuffer, stubInfo);
- else if (isPutByIdAccess(accessType))
- resetPutByID(repatchBuffer, stubInfo);
- else {
- RELEASE_ASSERT(isInAccess(accessType));
- resetIn(repatchBuffer, stubInfo);
+#if ENABLE(JIT)
+ for (auto* byValInfo : m_byValInfos)
+ result.add(CodeOrigin(byValInfo->bytecodeIndex), byValInfo);
+#else
+ UNUSED_PARAM(result);
+#endif
+}
+
+void CodeBlock::getByValInfoMap(ByValInfoMap& result)
+{
+ ConcurrentJITLocker locker(m_lock);
+ getByValInfoMap(locker, result);
+}
+
+#if ENABLE(JIT)
+StructureStubInfo* CodeBlock::addStubInfo(AccessType accessType)
+{
+ ConcurrentJITLocker locker(m_lock);
+ return m_stubInfos.add(accessType);
+}
+
+StructureStubInfo* CodeBlock::findStubInfo(CodeOrigin codeOrigin)
+{
+ for (StructureStubInfo* stubInfo : m_stubInfos) {
+ if (stubInfo->codeOrigin == codeOrigin)
+ return stubInfo;
}
-
- stubInfo.reset();
+ return nullptr;
+}
+
+ByValInfo* CodeBlock::addByValInfo()
+{
+ ConcurrentJITLocker locker(m_lock);
+ return m_byValInfos.add();
+}
+
+CallLinkInfo* CodeBlock::addCallLinkInfo()
+{
+ ConcurrentJITLocker locker(m_lock);
+ return m_callLinkInfos.add();
}
-void CodeBlock::resetStubDuringGCInternal(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo)
+CallLinkInfo* CodeBlock::getCallLinkInfoForBytecodeIndex(unsigned index)
{
- resetStubInternal(repatchBuffer, stubInfo);
- stubInfo.resetByGC = true;
+ for (auto iter = m_callLinkInfos.begin(); !!iter; ++iter) {
+ if ((*iter)->codeOrigin() == CodeOrigin(index))
+ return *iter;
+ }
+ return nullptr;
}
#endif
+void CodeBlock::visitOSRExitTargets(SlotVisitor& visitor)
+{
+ // We strongly visit OSR exits targets because we don't want to deal with
+ // the complexity of generating an exit target CodeBlock on demand and
+ // guaranteeing that it matches the details of the CodeBlock we compiled
+ // the OSR exit against.
+
+ visitor.append(&m_alternative);
+
+#if ENABLE(DFG_JIT)
+ DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
+ if (dfgCommon->inlineCallFrames) {
+ for (auto* inlineCallFrame : *dfgCommon->inlineCallFrames) {
+ ASSERT(inlineCallFrame->baselineCodeBlock);
+ visitor.append(&inlineCallFrame->baselineCodeBlock);
+ }
+ }
+#endif
+}
+
void CodeBlock::stronglyVisitStrongReferences(SlotVisitor& visitor)
{
visitor.append(&m_globalObject);
visitor.append(&m_ownerExecutable);
- visitor.append(&m_symbolTable);
visitor.append(&m_unlinkedCode);
if (m_rareData)
m_rareData->m_evalCodeCache.visitAggregate(visitor);
@@ -2404,6 +2964,11 @@ void CodeBlock::stronglyVisitStrongReferences(SlotVisitor& visitor)
for (unsigned i = 0; i < m_objectAllocationProfiles.size(); ++i)
m_objectAllocationProfiles[i].visitAggregate(visitor);
+#if ENABLE(DFG_JIT)
+ if (JITCode::isOptimizingJIT(jitType()))
+ visitOSRExitTargets(visitor);
+#endif
+
updateAllPredictions();
}
@@ -2426,6 +2991,11 @@ void CodeBlock::stronglyVisitWeakReferences(SlotVisitor& visitor)
for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i)
visitor.append(&dfgCommon->weakReferences[i]);
+
+ for (unsigned i = 0; i < dfgCommon->weakStructureReferences.size(); ++i)
+ visitor.append(&dfgCommon->weakStructureReferences[i]);
+
+ dfgCommon->livenessHasBeenProved = true;
#endif
}
@@ -2474,87 +3044,70 @@ bool CodeBlock::hasOptimizedReplacement()
}
#endif
-bool CodeBlock::isCaptured(VirtualRegister operand, InlineCallFrame* inlineCallFrame) const
+HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler requiredHandler)
{
- if (operand.isArgument())
- return operand.toArgument() && usesArguments();
-
- if (inlineCallFrame)
- return inlineCallFrame->capturedVars.get(operand.toLocal());
-
- // The activation object isn't in the captured region, but it's "captured"
- // in the sense that stores to its location can be observed indirectly.
- if (needsActivation() && operand == activationRegister())
- return true;
-
- // Ditto for the arguments object.
- if (usesArguments() && operand == argumentsRegister())
- return true;
-
- // Ditto for the arguments object.
- if (usesArguments() && operand == unmodifiedArgumentsRegister(argumentsRegister()))
- return true;
-
- // We're in global code so there are no locals to capture
- if (!symbolTable())
- return false;
-
- return symbolTable()->isCaptured(operand.offset());
+ RELEASE_ASSERT(bytecodeOffset < instructions().size());
+ return handlerForIndex(bytecodeOffset, requiredHandler);
}
-int CodeBlock::framePointerOffsetToGetActivationRegisters(int machineCaptureStart)
+HandlerInfo* CodeBlock::handlerForIndex(unsigned index, RequiredHandler requiredHandler)
{
- // We'll be adding this to the stack pointer to get a registers pointer that looks
- // like it would have looked in the baseline engine. For example, if bytecode would
- // have put the first captured variable at offset -5 but we put it at offset -1, then
- // we'll have an offset of 4.
- int32_t offset = 0;
-
- // Compute where we put the captured variables. This offset will point the registers
- // pointer directly at the first captured var.
- offset += machineCaptureStart;
-
- // Now compute the offset needed to make the runtime see the captured variables at the
- // same offset that the bytecode would have used.
- offset -= symbolTable()->captureStart();
+ if (!m_rareData)
+ return 0;
- return offset;
+ Vector<HandlerInfo>& exceptionHandlers = m_rareData->m_exceptionHandlers;
+ for (size_t i = 0; i < exceptionHandlers.size(); ++i) {
+ HandlerInfo& handler = exceptionHandlers[i];
+ if ((requiredHandler == RequiredHandler::CatchHandler) && !handler.isCatchHandler())
+ continue;
+
+ // Handlers are ordered innermost first, so the first handler we encounter
+ // that contains the source address is the correct handler to use.
+ // This index used is either the BytecodeOffset or a CallSiteIndex.
+ if (handler.start <= index && handler.end > index)
+ return &handler;
+ }
+
+ return 0;
}
-int CodeBlock::framePointerOffsetToGetActivationRegisters()
+CallSiteIndex CodeBlock::newExceptionHandlingCallSiteIndex(CallSiteIndex originalCallSite)
{
- if (!JITCode::isOptimizingJIT(jitType()))
- return 0;
#if ENABLE(DFG_JIT)
- return framePointerOffsetToGetActivationRegisters(jitCode()->dfgCommon()->machineCaptureStart);
+ RELEASE_ASSERT(JITCode::isOptimizingJIT(jitType()));
+ RELEASE_ASSERT(canGetCodeOrigin(originalCallSite));
+ ASSERT(!!handlerForIndex(originalCallSite.bits()));
+ CodeOrigin originalOrigin = codeOrigin(originalCallSite);
+ return m_jitCode->dfgCommon()->addUniqueCallSiteIndex(originalOrigin);
#else
+ // We never create new on-the-fly exception handling
+ // call sites outside the DFG/FTL inline caches.
+ UNUSED_PARAM(originalCallSite);
RELEASE_ASSERT_NOT_REACHED();
- return 0;
+ return CallSiteIndex(0u);
#endif
}
-HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset)
+void CodeBlock::removeExceptionHandlerForCallSite(CallSiteIndex callSiteIndex)
{
- RELEASE_ASSERT(bytecodeOffset < instructions().size());
-
- if (!m_rareData)
- return 0;
-
+ RELEASE_ASSERT(m_rareData);
Vector<HandlerInfo>& exceptionHandlers = m_rareData->m_exceptionHandlers;
+ unsigned index = callSiteIndex.bits();
for (size_t i = 0; i < exceptionHandlers.size(); ++i) {
- // Handlers are ordered innermost first, so the first handler we encounter
- // that contains the source address is the correct handler to use.
- if (exceptionHandlers[i].start <= bytecodeOffset && exceptionHandlers[i].end > bytecodeOffset)
- return &exceptionHandlers[i];
+ HandlerInfo& handler = exceptionHandlers[i];
+ if (handler.start <= index && handler.end > index) {
+ exceptionHandlers.remove(i);
+ return;
+ }
}
- return 0;
+ RELEASE_ASSERT_NOT_REACHED();
}
unsigned CodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset)
{
RELEASE_ASSERT(bytecodeOffset < instructions().size());
- return m_ownerExecutable->lineNo() + m_unlinkedCode->lineNumberForBytecodeOffset(bytecodeOffset);
+ return ownerScriptExecutable()->firstLine() + m_unlinkedCode->lineNumberForBytecodeOffset(bytecodeOffset);
}
unsigned CodeBlock::columnNumberForBytecodeOffset(unsigned bytecodeOffset)
@@ -2573,7 +3126,7 @@ void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& d
m_unlinkedCode->expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
divot += m_sourceOffset;
column += line ? 1 : firstLineColumnOffset();
- line += m_ownerExecutable->lineNo();
+ line += ownerScriptExecutable()->firstLine();
}
bool CodeBlock::hasOpDebugForLineAndColumn(unsigned line, unsigned column)
@@ -2600,187 +3153,177 @@ bool CodeBlock::hasOpDebugForLineAndColumn(unsigned line, unsigned column)
void CodeBlock::shrinkToFit(ShrinkMode shrinkMode)
{
m_rareCaseProfiles.shrinkToFit();
- m_specialFastCaseProfiles.shrinkToFit();
+ m_resultProfiles.shrinkToFit();
if (shrinkMode == EarlyShrink) {
m_constantRegisters.shrinkToFit();
+ m_constantsSourceCodeRepresentation.shrinkToFit();
if (m_rareData) {
m_rareData->m_switchJumpTables.shrinkToFit();
m_rareData->m_stringSwitchJumpTables.shrinkToFit();
+ m_rareData->m_liveCalleeLocalsAtYield.shrinkToFit();
}
} // else don't shrink these, because we would have already pointed pointers into these tables.
}
-unsigned CodeBlock::addOrFindConstant(JSValue v)
-{
- unsigned result;
- if (findConstant(v, result))
- return result;
- return addConstant(v);
-}
-
-bool CodeBlock::findConstant(JSValue v, unsigned& index)
-{
- unsigned numberOfConstants = numberOfConstantRegisters();
- for (unsigned i = 0; i < numberOfConstants; ++i) {
- if (getConstant(FirstConstantRegisterIndex + i) == v) {
- index = i;
- return true;
- }
- }
- index = numberOfConstants;
- return false;
-}
-
#if ENABLE(JIT)
-void CodeBlock::unlinkCalls()
+void CodeBlock::linkIncomingCall(ExecState* callerFrame, CallLinkInfo* incoming)
{
- if (!!m_alternative)
- m_alternative->unlinkCalls();
-#if ENABLE(LLINT)
- for (size_t i = 0; i < m_llintCallLinkInfos.size(); ++i) {
- if (m_llintCallLinkInfos[i].isLinked())
- m_llintCallLinkInfos[i].unlink();
- }
-#endif
- if (!m_callLinkInfos.size())
- return;
- if (!m_vm->canUseJIT())
- return;
- RepatchBuffer repatchBuffer(this);
- for (size_t i = 0; i < m_callLinkInfos.size(); i++) {
- if (!m_callLinkInfos[i].isLinked())
- continue;
- m_callLinkInfos[i].unlink(*m_vm, repatchBuffer);
- }
+ noticeIncomingCall(callerFrame);
+ m_incomingCalls.push(incoming);
}
-void CodeBlock::linkIncomingCall(ExecState* callerFrame, CallLinkInfo* incoming)
+void CodeBlock::linkIncomingPolymorphicCall(ExecState* callerFrame, PolymorphicCallNode* incoming)
{
noticeIncomingCall(callerFrame);
- m_incomingCalls.push(incoming);
+ m_incomingPolymorphicCalls.push(incoming);
}
#endif // ENABLE(JIT)
void CodeBlock::unlinkIncomingCalls()
{
-#if ENABLE(LLINT)
while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
m_incomingLLIntCalls.begin()->unlink();
-#endif // ENABLE(LLINT)
#if ENABLE(JIT)
- if (m_incomingCalls.isEmpty())
- return;
- RepatchBuffer repatchBuffer(this);
while (m_incomingCalls.begin() != m_incomingCalls.end())
- m_incomingCalls.begin()->unlink(*m_vm, repatchBuffer);
+ m_incomingCalls.begin()->unlink(*vm());
+ while (m_incomingPolymorphicCalls.begin() != m_incomingPolymorphicCalls.end())
+ m_incomingPolymorphicCalls.begin()->unlink(*vm());
#endif // ENABLE(JIT)
}
-#if ENABLE(LLINT)
void CodeBlock::linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo* incoming)
{
noticeIncomingCall(callerFrame);
m_incomingLLIntCalls.push(incoming);
}
-#endif // ENABLE(LLINT)
-void CodeBlock::clearEvalCache()
+CodeBlock* CodeBlock::newReplacement()
{
- if (!!m_alternative)
- m_alternative->clearEvalCache();
- if (CodeBlock* otherBlock = specialOSREntryBlockOrNull())
- otherBlock->clearEvalCache();
- if (!m_rareData)
- return;
- m_rareData->m_evalCodeCache.clear();
+ return ownerScriptExecutable()->newReplacementCodeBlockFor(specializationKind());
}
-void CodeBlock::install()
+#if ENABLE(JIT)
+CodeBlock* CodeBlock::replacement()
{
- ownerExecutable()->installCode(this);
-}
+ const ClassInfo* classInfo = this->classInfo();
-PassRefPtr<CodeBlock> CodeBlock::newReplacement()
-{
- return ownerExecutable()->newReplacementCodeBlockFor(specializationKind());
-}
+ if (classInfo == FunctionCodeBlock::info())
+ return jsCast<FunctionExecutable*>(ownerExecutable())->codeBlockFor(m_isConstructor ? CodeForConstruct : CodeForCall);
-const SlowArgument* CodeBlock::machineSlowArguments()
-{
- if (!JITCode::isOptimizingJIT(jitType()))
- return symbolTable()->slowArguments();
-
-#if ENABLE(DFG_JIT)
- return jitCode()->dfgCommon()->slowArguments.get();
-#else // ENABLE(DFG_JIT)
- return 0;
-#endif // ENABLE(DFG_JIT)
-}
+ if (classInfo == EvalCodeBlock::info())
+ return jsCast<EvalExecutable*>(ownerExecutable())->codeBlock();
-#if ENABLE(JIT)
-CodeBlock* ProgramCodeBlock::replacement()
-{
- return jsCast<ProgramExecutable*>(ownerExecutable())->codeBlock();
-}
+ if (classInfo == ProgramCodeBlock::info())
+ return jsCast<ProgramExecutable*>(ownerExecutable())->codeBlock();
-CodeBlock* EvalCodeBlock::replacement()
-{
- return jsCast<EvalExecutable*>(ownerExecutable())->codeBlock();
-}
+ if (classInfo == ModuleProgramCodeBlock::info())
+ return jsCast<ModuleProgramExecutable*>(ownerExecutable())->codeBlock();
-CodeBlock* FunctionCodeBlock::replacement()
-{
- return jsCast<FunctionExecutable*>(ownerExecutable())->codeBlockFor(m_isConstructor ? CodeForConstruct : CodeForCall);
-}
+#if ENABLE(WEBASSEMBLY)
+ if (classInfo == WebAssemblyCodeBlock::info())
+ return nullptr;
+#endif
-DFG::CapabilityLevel ProgramCodeBlock::capabilityLevelInternal()
-{
- return DFG::programCapabilityLevel(this);
+ RELEASE_ASSERT_NOT_REACHED();
+ return nullptr;
}
-DFG::CapabilityLevel EvalCodeBlock::capabilityLevelInternal()
+DFG::CapabilityLevel CodeBlock::computeCapabilityLevel()
{
- return DFG::evalCapabilityLevel(this);
+ const ClassInfo* classInfo = this->classInfo();
+
+ if (classInfo == FunctionCodeBlock::info()) {
+ if (m_isConstructor)
+ return DFG::functionForConstructCapabilityLevel(this);
+ return DFG::functionForCallCapabilityLevel(this);
+ }
+
+ if (classInfo == EvalCodeBlock::info())
+ return DFG::evalCapabilityLevel(this);
+
+ if (classInfo == ProgramCodeBlock::info())
+ return DFG::programCapabilityLevel(this);
+
+ if (classInfo == ModuleProgramCodeBlock::info())
+ return DFG::programCapabilityLevel(this);
+
+#if ENABLE(WEBASSEMBLY)
+ if (classInfo == WebAssemblyCodeBlock::info())
+ return DFG::CannotCompile;
+#endif
+
+ RELEASE_ASSERT_NOT_REACHED();
+ return DFG::CannotCompile;
}
-DFG::CapabilityLevel FunctionCodeBlock::capabilityLevelInternal()
+#endif // ENABLE(JIT)
+
+void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mode, const FireDetail* detail)
{
- if (m_isConstructor)
- return DFG::functionForConstructCapabilityLevel(this);
- return DFG::functionForCallCapabilityLevel(this);
-}
+#if !ENABLE(DFG_JIT)
+ UNUSED_PARAM(mode);
+ UNUSED_PARAM(detail);
#endif
-void CodeBlock::jettison(ReoptimizationMode mode)
-{
+ RELEASE_ASSERT(reason != Profiler::NotJettisoned);
+
#if ENABLE(DFG_JIT)
- if (DFG::shouldShowDisassembly()) {
+ if (DFG::shouldDumpDisassembly()) {
dataLog("Jettisoning ", *this);
if (mode == CountReoptimization)
dataLog(" and counting reoptimization");
+ dataLog(" due to ", reason);
+ if (detail)
+ dataLog(", ", *detail);
dataLog(".\n");
}
- DeferGCForAWhile deferGC(*m_heap);
- RELEASE_ASSERT(JITCode::isOptimizingJIT(jitType()));
+ if (reason == Profiler::JettisonDueToWeakReference) {
+ if (DFG::shouldDumpDisassembly()) {
+ dataLog(*this, " will be jettisoned because of the following dead references:\n");
+ DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
+ for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
+ DFG::WeakReferenceTransition& transition = dfgCommon->transitions[i];
+ JSCell* origin = transition.m_codeOrigin.get();
+ JSCell* from = transition.m_from.get();
+ JSCell* to = transition.m_to.get();
+ if ((!origin || Heap::isMarked(origin)) && Heap::isMarked(from))
+ continue;
+ dataLog(" Transition under ", RawPointer(origin), ", ", RawPointer(from), " -> ", RawPointer(to), ".\n");
+ }
+ for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
+ JSCell* weak = dfgCommon->weakReferences[i].get();
+ if (Heap::isMarked(weak))
+ continue;
+ dataLog(" Weak reference ", RawPointer(weak), ".\n");
+ }
+ }
+ }
+#endif // ENABLE(DFG_JIT)
+
+ DeferGCForAWhile deferGC(*heap());
// We want to accomplish two things here:
// 1) Make sure that if this CodeBlock is on the stack right now, then if we return to it
// we should OSR exit at the top of the next bytecode instruction after the return.
// 2) Make sure that if we call the owner executable, then we shouldn't call this CodeBlock.
-
- // This accomplishes the OSR-exit-on-return part, and does its own book-keeping about
- // whether the invalidation has already happened.
- if (!jitCode()->dfgCommon()->invalidate()) {
- // Nothing to do since we've already been invalidated. That means that we cannot be
- // the optimized replacement.
- RELEASE_ASSERT(this != replacement());
- return;
+
+#if ENABLE(DFG_JIT)
+ if (reason != Profiler::JettisonDueToOldAge) {
+ if (Profiler::Compilation* compilation = jitCode()->dfgCommon()->compilation.get())
+ compilation->setJettisonReason(reason, detail);
+
+ // This accomplishes (1), and does its own book-keeping about whether it has already happened.
+ if (!jitCode()->dfgCommon()->invalidate()) {
+ // We've already been invalidated.
+ RELEASE_ASSERT(this != replacement());
+ return;
+ }
}
- if (DFG::shouldShowDisassembly())
+ if (DFG::shouldDumpDisassembly())
dataLog(" Did invalidate ", *this, "\n");
// Count the reoptimization if that's what the user wanted.
@@ -2788,24 +3331,30 @@ void CodeBlock::jettison(ReoptimizationMode mode)
// FIXME: Maybe this should call alternative().
// https://bugs.webkit.org/show_bug.cgi?id=123677
baselineAlternative()->countReoptimization();
- if (DFG::shouldShowDisassembly())
+ if (DFG::shouldDumpDisassembly())
dataLog(" Did count reoptimization for ", *this, "\n");
}
- // Now take care of the entrypoint.
if (this != replacement()) {
// This means that we were never the entrypoint. This can happen for OSR entry code
// blocks.
return;
}
- alternative()->optimizeAfterWarmUp();
- tallyFrequentExitSites();
- alternative()->install();
- if (DFG::shouldShowDisassembly())
+
+ if (alternative())
+ alternative()->optimizeAfterWarmUp();
+
+ if (reason != Profiler::JettisonDueToOldAge)
+ tallyFrequentExitSites();
+#endif // ENABLE(DFG_JIT)
+
+ // This accomplishes (2).
+ ownerScriptExecutable()->installCode(
+ m_globalObject->vm(), alternative(), codeType(), specializationKind());
+
+#if ENABLE(DFG_JIT)
+ if (DFG::shouldDumpDisassembly())
dataLog(" Did install baseline version of ", *this, "\n");
-#else // ENABLE(DFG_JIT)
- UNUSED_PARAM(mode);
- UNREACHABLE_FOR_PLATFORM();
#endif // ENABLE(DFG_JIT)
}
@@ -2813,28 +3362,82 @@ JSGlobalObject* CodeBlock::globalObjectFor(CodeOrigin codeOrigin)
{
if (!codeOrigin.inlineCallFrame)
return globalObject();
- return jsCast<FunctionExecutable*>(codeOrigin.inlineCallFrame->executable.get())->eitherCodeBlock()->globalObject();
+ return codeOrigin.inlineCallFrame->baselineCodeBlock->globalObject();
}
+class RecursionCheckFunctor {
+public:
+ RecursionCheckFunctor(CallFrame* startCallFrame, CodeBlock* codeBlock, unsigned depthToCheck)
+ : m_startCallFrame(startCallFrame)
+ , m_codeBlock(codeBlock)
+ , m_depthToCheck(depthToCheck)
+ , m_foundStartCallFrame(false)
+ , m_didRecurse(false)
+ { }
+
+ StackVisitor::Status operator()(StackVisitor& visitor)
+ {
+ CallFrame* currentCallFrame = visitor->callFrame();
+
+ if (currentCallFrame == m_startCallFrame)
+ m_foundStartCallFrame = true;
+
+ if (m_foundStartCallFrame) {
+ if (visitor->callFrame()->codeBlock() == m_codeBlock) {
+ m_didRecurse = true;
+ return StackVisitor::Done;
+ }
+
+ if (!m_depthToCheck--)
+ return StackVisitor::Done;
+ }
+
+ return StackVisitor::Continue;
+ }
+
+ bool didRecurse() const { return m_didRecurse; }
+
+private:
+ CallFrame* m_startCallFrame;
+ CodeBlock* m_codeBlock;
+ unsigned m_depthToCheck;
+ bool m_foundStartCallFrame;
+ bool m_didRecurse;
+};
+
void CodeBlock::noticeIncomingCall(ExecState* callerFrame)
{
CodeBlock* callerCodeBlock = callerFrame->codeBlock();
if (Options::verboseCallLink())
- dataLog("Noticing call link from ", *callerCodeBlock, " to ", *this, "\n");
+ dataLog("Noticing call link from ", pointerDump(callerCodeBlock), " to ", *this, "\n");
+#if ENABLE(DFG_JIT)
if (!m_shouldAlwaysBeInlined)
return;
+
+ if (!callerCodeBlock) {
+ m_shouldAlwaysBeInlined = false;
+ if (Options::verboseCallLink())
+ dataLog(" Clearing SABI because caller is native.\n");
+ return;
+ }
-#if ENABLE(DFG_JIT)
if (!hasBaselineJITProfiling())
return;
if (!DFG::mightInlineFunction(this))
return;
- if (!canInline(m_capabilityLevelState))
+ if (!canInline(capabilityLevelState()))
+ return;
+
+ if (!DFG::isSmallEnoughToInlineCodeInto(callerCodeBlock)) {
+ m_shouldAlwaysBeInlined = false;
+ if (Options::verboseCallLink())
+ dataLog(" Clearing SABI because caller is too large.\n");
return;
+ }
if (callerCodeBlock->jitType() == JITCode::InterpreterThunk) {
// If the caller is still in the interpreter, then we can't expect inlining to
@@ -2843,7 +3446,14 @@ void CodeBlock::noticeIncomingCall(ExecState* callerFrame)
// any of its callers.
m_shouldAlwaysBeInlined = false;
if (Options::verboseCallLink())
- dataLog(" Marking SABI because caller is in LLInt.\n");
+ dataLog(" Clearing SABI because caller is in LLInt.\n");
+ return;
+ }
+
+ if (JITCode::isOptimizingJIT(callerCodeBlock->jitType())) {
+ m_shouldAlwaysBeInlined = false;
+ if (Options::verboseCallLink())
+ dataLog(" Clearing SABI bcause caller was already optimized.\n");
return;
}
@@ -2853,40 +3463,72 @@ void CodeBlock::noticeIncomingCall(ExecState* callerFrame)
// delay eval optimization by a *lot*.
m_shouldAlwaysBeInlined = false;
if (Options::verboseCallLink())
- dataLog(" Marking SABI because caller is not a function.\n");
+ dataLog(" Clearing SABI because caller is not a function.\n");
return;
}
-
- ExecState* frame = callerFrame;
- for (unsigned i = Options::maximumInliningDepth(); i--; frame = frame->callerFrame()) {
- if (frame->isVMEntrySentinel())
- break;
- if (frame->codeBlock() == this) {
- // Recursive calls won't be inlined.
- if (Options::verboseCallLink())
- dataLog(" Marking SABI because recursion was detected.\n");
- m_shouldAlwaysBeInlined = false;
- return;
- }
+
+ // Recursive calls won't be inlined.
+ RecursionCheckFunctor functor(callerFrame, this, Options::maximumInliningDepth());
+ vm()->topCallFrame->iterate(functor);
+
+ if (functor.didRecurse()) {
+ if (Options::verboseCallLink())
+ dataLog(" Clearing SABI because recursion was detected.\n");
+ m_shouldAlwaysBeInlined = false;
+ return;
}
- RELEASE_ASSERT(callerCodeBlock->m_capabilityLevelState != DFG::CapabilityLevelNotSet);
+ if (callerCodeBlock->capabilityLevelState() == DFG::CapabilityLevelNotSet) {
+ dataLog("In call from ", *callerCodeBlock, " ", callerFrame->codeOrigin(), " to ", *this, ": caller's DFG capability level is not set.\n");
+ CRASH();
+ }
- if (canCompile(callerCodeBlock->m_capabilityLevelState))
+ if (canCompile(callerCodeBlock->capabilityLevelState()))
return;
if (Options::verboseCallLink())
- dataLog(" Marking SABI because the caller is not a DFG candidate.\n");
+ dataLog(" Clearing SABI because the caller is not a DFG candidate.\n");
m_shouldAlwaysBeInlined = false;
#endif
}
-#if ENABLE(JIT)
unsigned CodeBlock::reoptimizationRetryCounter() const
{
+#if ENABLE(JIT)
ASSERT(m_reoptimizationRetryCounter <= Options::reoptimizationRetryCounterMax());
return m_reoptimizationRetryCounter;
+#else
+ return 0;
+#endif // ENABLE(JIT)
+}
+
+#if ENABLE(JIT)
+void CodeBlock::setCalleeSaveRegisters(RegisterSet calleeSaveRegisters)
+{
+ m_calleeSaveRegisters = std::make_unique<RegisterAtOffsetList>(calleeSaveRegisters);
+}
+
+void CodeBlock::setCalleeSaveRegisters(std::unique_ptr<RegisterAtOffsetList> registerAtOffsetList)
+{
+ m_calleeSaveRegisters = WTFMove(registerAtOffsetList);
+}
+
+static size_t roundCalleeSaveSpaceAsVirtualRegisters(size_t calleeSaveRegisters)
+{
+ static const unsigned cpuRegisterSize = sizeof(void*);
+ return (WTF::roundUpToMultipleOf(sizeof(Register), calleeSaveRegisters * cpuRegisterSize) / sizeof(Register));
+
+}
+
+size_t CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters()
+{
+ return roundCalleeSaveSpaceAsVirtualRegisters(numberOfLLIntBaselineCalleeSaveRegisters());
+}
+
+size_t CodeBlock::calleeSaveSpaceAsVirtualRegisters()
+{
+ return roundCalleeSaveSpaceAsVirtualRegisters(m_calleeSaveRegisters->size());
}
void CodeBlock::countReoptimization()
@@ -2899,6 +3541,11 @@ void CodeBlock::countReoptimization()
unsigned CodeBlock::numberOfDFGCompiles()
{
ASSERT(JITCode::isBaselineCode(jitType()));
+ if (Options::testTheFTL()) {
+ if (m_didFailFTLCompilation)
+ return 1000000;
+ return (m_hasBeenCompiledWithFTL ? 1 : 0) + m_reoptimizationRetryCounter;
+ }
return (JITCode::isOptimizingJIT(replacement()->jitType()) ? 1 : 0) + m_reoptimizationRetryCounter;
}
@@ -2979,13 +3626,16 @@ double CodeBlock::optimizationThresholdScalingFactor()
ASSERT(instructionCount); // Make sure this is called only after we have an instruction stream; otherwise it'll just return the value of d, which makes no sense.
double result = d + a * sqrt(instructionCount + b) + c * instructionCount;
+
+ result *= codeTypeThresholdMultiplier();
+
if (Options::verboseOSR()) {
dataLog(
*this, ": instruction count is ", instructionCount,
", scaling execution counter by ", result, " * ", codeTypeThresholdMultiplier(),
"\n");
}
- return result * codeTypeThresholdMultiplier();
+ return result;
}
static int32_t clipThreshold(double threshold)
@@ -3010,7 +3660,7 @@ int32_t CodeBlock::adjustedCounterValue(int32_t desiredThreshold)
bool CodeBlock::checkIfOptimizationThresholdReached()
{
#if ENABLE(DFG_JIT)
- if (DFG::Worklist* worklist = m_vm->worklist.get()) {
+ if (DFG::Worklist* worklist = DFG::existingGlobalDFGWorklistOrNull()) {
if (worklist->compilationState(DFG::CompilationKey(this, DFG::DFGMode))
== DFG::Worklist::Compiled) {
optimizeNextInvocation();
@@ -3076,8 +3726,22 @@ void CodeBlock::forceOptimizationSlowPathConcurrently()
#if ENABLE(DFG_JIT)
void CodeBlock::setOptimizationThresholdBasedOnCompilationResult(CompilationResult result)
{
- RELEASE_ASSERT(jitType() == JITCode::BaselineJIT);
- RELEASE_ASSERT((result == CompilationSuccessful) == (replacement() != this));
+ JITCode::JITType type = jitType();
+ if (type != JITCode::BaselineJIT) {
+ dataLog(*this, ": expected to have baseline code but have ", type, "\n");
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+
+ CodeBlock* theReplacement = replacement();
+ if ((result == CompilationSuccessful) != (theReplacement != this)) {
+ dataLog(*this, ": we have result = ", result, " but ");
+ if (theReplacement == this)
+ dataLog("we are our own replacement.\n");
+ else
+ dataLog("our replacement is ", pointerDump(theReplacement), "\n");
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+
switch (result) {
case CompilationSuccessful:
RELEASE_ASSERT(JITCode::isOptimizingJIT(replacement()->jitType()));
@@ -3100,6 +3764,8 @@ void CodeBlock::setOptimizationThresholdBasedOnCompilationResult(CompilationResu
optimizeAfterWarmUp();
return;
}
+
+ dataLog("Unrecognized result: ", static_cast<int>(result), "\n");
RELEASE_ASSERT_NOT_REACHED();
}
@@ -3158,6 +3824,30 @@ ArrayProfile* CodeBlock::getOrAddArrayProfile(unsigned bytecodeOffset)
return addArrayProfile(bytecodeOffset);
}
+#if ENABLE(DFG_JIT)
+Vector<CodeOrigin, 0, UnsafeVectorOverflow>& CodeBlock::codeOrigins()
+{
+ return m_jitCode->dfgCommon()->codeOrigins;
+}
+
+size_t CodeBlock::numberOfDFGIdentifiers() const
+{
+ if (!JITCode::isOptimizingJIT(jitType()))
+ return 0;
+
+ return m_jitCode->dfgCommon()->dfgIdentifiers.size();
+}
+
+const Identifier& CodeBlock::identifier(int index) const
+{
+ size_t unlinkedIdentifiers = m_unlinkedCode->numberOfIdentifiers();
+ if (static_cast<unsigned>(index) < unlinkedIdentifiers)
+ return m_unlinkedCode->identifier(index);
+ ASSERT(JITCode::isOptimizingJIT(jitType()));
+ return m_jitCode->dfgCommon()->dfgIdentifiers[index - unlinkedIdentifiers];
+}
+#endif // ENABLE(DFG_JIT)
+
void CodeBlock::updateAllPredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles)
{
ConcurrentJITLocker locker(m_lock);
@@ -3204,6 +3894,10 @@ void CodeBlock::updateAllArrayPredictions()
void CodeBlock::updateAllPredictions()
{
+#if ENABLE(WEBASSEMBLY)
+ if (m_ownerExecutable->isWebAssemblyExecutable())
+ return;
+#endif
updateAllValueProfilePredictions();
updateAllArrayPredictions();
}
@@ -3255,9 +3949,7 @@ void CodeBlock::tallyFrequentExitSites()
DFG::JITCode* jitCode = m_jitCode->dfg();
for (unsigned i = 0; i < jitCode->osrExit.size(); ++i) {
DFG::OSRExit& exit = jitCode->osrExit[i];
-
- if (!exit.considerAddingAsFrequentExitSite(profiledBlock))
- continue;
+ exit.considerAddingAsFrequentExitSite(profiledBlock);
}
break;
}
@@ -3270,9 +3962,7 @@ void CodeBlock::tallyFrequentExitSites()
FTL::JITCode* jitCode = m_jitCode->ftl();
for (unsigned i = 0; i < jitCode->osrExit.size(); ++i) {
FTL::OSRExit& exit = jitCode->osrExit[i];
-
- if (!exit.considerAddingAsFrequentExitSite(profiledBlock))
- continue;
+ exit.considerAddingAsFrequentExitSite(profiledBlock);
}
break;
}
@@ -3308,10 +3998,10 @@ void CodeBlock::dumpValueProfiles()
RareCaseProfile* profile = rareCaseProfile(i);
dataLogF(" bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter);
}
- dataLog("SpecialFastCaseProfile for ", *this, ":\n");
- for (unsigned i = 0; i < numberOfSpecialFastCaseProfiles(); ++i) {
- RareCaseProfile* profile = specialFastCaseProfile(i);
- dataLogF(" bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter);
+ dataLog("ResultProfile for ", *this, ":\n");
+ for (unsigned i = 0; i < numberOfResultProfiles(); ++i) {
+ const ResultProfile& profile = *resultProfile(i);
+ dataLog(" bc = ", profile.bytecodeOffset(), ": ", profile, "\n");
}
}
#endif // ENABLE(VERBOSE_VALUE_PROFILE)
@@ -3319,10 +4009,8 @@ void CodeBlock::dumpValueProfiles()
unsigned CodeBlock::frameRegisterCount()
{
switch (jitType()) {
-#if ENABLE(LLINT)
case JITCode::InterpreterThunk:
return LLInt::frameRegisterCountFor(this);
-#endif // ENABLE(LLINT)
#if ENABLE(JIT)
case JITCode::BaselineJIT:
@@ -3341,6 +4029,11 @@ unsigned CodeBlock::frameRegisterCount()
}
}
+int CodeBlock::stackPointerOffset()
+{
+ return virtualRegisterForLocal(frameRegisterCount() - 1).offset();
+}
+
size_t CodeBlock::predictedMachineCodeSize()
{
// This will be called from CodeBlock::CodeBlock before either m_vm or the
@@ -3400,72 +4093,40 @@ bool CodeBlock::usesOpcode(OpcodeID opcodeID)
String CodeBlock::nameForRegister(VirtualRegister virtualRegister)
{
- ConcurrentJITLocker locker(symbolTable()->m_lock);
- SymbolTable::Map::iterator end = symbolTable()->end(locker);
- for (SymbolTable::Map::iterator ptr = symbolTable()->begin(locker); ptr != end; ++ptr) {
- if (ptr->value.getIndex() == virtualRegister.offset()) {
- // FIXME: This won't work from the compilation thread.
- // https://bugs.webkit.org/show_bug.cgi?id=115300
- return String(ptr->key);
+ for (unsigned i = 0; i < m_constantRegisters.size(); i++) {
+ if (m_constantRegisters[i].get().isEmpty())
+ continue;
+ if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(m_constantRegisters[i].get())) {
+ ConcurrentJITLocker locker(symbolTable->m_lock);
+ auto end = symbolTable->end(locker);
+ for (auto ptr = symbolTable->begin(locker); ptr != end; ++ptr) {
+ if (ptr->value.varOffset() == VarOffset(virtualRegister)) {
+ // FIXME: This won't work from the compilation thread.
+ // https://bugs.webkit.org/show_bug.cgi?id=115300
+ return ptr->key.get();
+ }
+ }
}
}
- if (needsActivation() && virtualRegister == activationRegister())
- return ASCIILiteral("activation");
if (virtualRegister == thisRegister())
return ASCIILiteral("this");
- if (usesArguments()) {
- if (virtualRegister == argumentsRegister())
- return ASCIILiteral("arguments");
- if (unmodifiedArgumentsRegister(argumentsRegister()) == virtualRegister)
- return ASCIILiteral("real arguments");
- }
if (virtualRegister.isArgument())
- return String::format("arguments[%3d]", virtualRegister.toArgument()).impl();
+ return String::format("arguments[%3d]", virtualRegister.toArgument());
return "";
}
-namespace {
-
-struct VerifyCapturedDef {
- void operator()(CodeBlock* codeBlock, Instruction* instruction, OpcodeID opcodeID, int operand)
- {
- unsigned bytecodeOffset = instruction - codeBlock->instructions().begin();
-
- if (codeBlock->isConstantRegisterIndex(operand)) {
- codeBlock->beginValidationDidFail();
- dataLog(" At bc#", bytecodeOffset, " encountered a definition of a constant.\n");
- codeBlock->endValidationDidFail();
- return;
- }
-
- switch (opcodeID) {
- case op_enter:
- case op_captured_mov:
- case op_init_lazy_reg:
- case op_create_arguments:
- case op_new_captured_func:
- return;
- default:
- break;
- }
-
- VirtualRegister virtualReg(operand);
- if (!virtualReg.isLocal())
- return;
-
- if (codeBlock->captureCount() && codeBlock->symbolTable()->isCaptured(operand)) {
- codeBlock->beginValidationDidFail();
- dataLog(" At bc#", bytecodeOffset, " encountered invalid assignment to captured variable loc", virtualReg.toLocal(), ".\n");
- codeBlock->endValidationDidFail();
- return;
- }
-
- return;
- }
-};
-
-} // anonymous namespace
+ValueProfile* CodeBlock::valueProfileForBytecodeOffset(int bytecodeOffset)
+{
+ ValueProfile* result = binarySearch<ValueProfile, int>(
+ m_valueProfiles, m_valueProfiles.size(), bytecodeOffset,
+ getValueProfileBytecodeOffset<ValueProfile>);
+ ASSERT(result->m_bytecodeOffset != -1);
+ ASSERT(instructions()[bytecodeOffset + opcodeLength(
+ m_vm->interpreter->getOpcodeID(
+ instructions()[bytecodeOffset].u.opcode)) - 1].u.profile == result);
+ return result;
+}
void CodeBlock::validate()
{
@@ -3473,7 +4134,7 @@ void CodeBlock::validate()
FastBitVector liveAtHead = liveness.getLivenessInfoAtBytecodeOffset(0);
- if (liveAtHead.numBits() != static_cast<size_t>(m_numCalleeRegisters)) {
+ if (liveAtHead.numBits() != static_cast<size_t>(m_numCalleeLocals)) {
beginValidationDidFail();
dataLog(" Wrong number of bits in result!\n");
dataLog(" Result: ", liveAtHead, "\n");
@@ -3481,39 +4142,16 @@ void CodeBlock::validate()
endValidationDidFail();
}
- for (unsigned i = m_numCalleeRegisters; i--;) {
- bool isCaptured = false;
+ for (unsigned i = m_numCalleeLocals; i--;) {
VirtualRegister reg = virtualRegisterForLocal(i);
- if (captureCount())
- isCaptured = reg.offset() <= captureStart() && reg.offset() > captureEnd();
-
- if (isCaptured) {
- if (!liveAtHead.get(i)) {
- beginValidationDidFail();
- dataLog(" Variable loc", i, " is expected to be live because it is captured, but it isn't live.\n");
- dataLog(" Result: ", liveAtHead, "\n");
- endValidationDidFail();
- }
- } else {
- if (liveAtHead.get(i)) {
- beginValidationDidFail();
- dataLog(" Variable loc", i, " is expected to be dead.\n");
- dataLog(" Result: ", liveAtHead, "\n");
- endValidationDidFail();
- }
+ if (liveAtHead.get(i)) {
+ beginValidationDidFail();
+ dataLog(" Variable ", reg, " is expected to be dead.\n");
+ dataLog(" Result: ", liveAtHead, "\n");
+ endValidationDidFail();
}
}
-
- for (unsigned bytecodeOffset = 0; bytecodeOffset < instructions().size();) {
- Instruction* currentInstruction = instructions().begin() + bytecodeOffset;
- OpcodeID opcodeID = m_vm->interpreter->getOpcodeID(currentInstruction->u.opcode);
-
- VerifyCapturedDef verifyCapturedDef;
- computeDefsForBytecodeOffset(this, bytecodeOffset, verifyCapturedDef);
-
- bytecodeOffset += opcodeLength(opcodeID);
- }
}
void CodeBlock::beginValidationDidFail()
@@ -3535,15 +4173,144 @@ void CodeBlock::addBreakpoint(unsigned numBreakpoints)
{
m_numBreakpoints += numBreakpoints;
ASSERT(m_numBreakpoints);
- if (jitType() == JITCode::DFGJIT)
- jettison();
+ if (JITCode::isOptimizingJIT(jitType()))
+ jettison(Profiler::JettisonDueToDebuggerBreakpoint);
}
void CodeBlock::setSteppingMode(CodeBlock::SteppingMode mode)
{
m_steppingMode = mode;
- if (mode == SteppingModeEnabled && jitType() == JITCode::DFGJIT)
- jettison();
+ if (mode == SteppingModeEnabled && JITCode::isOptimizingJIT(jitType()))
+ jettison(Profiler::JettisonDueToDebuggerStepping);
+}
+
+RareCaseProfile* CodeBlock::rareCaseProfileForBytecodeOffset(int bytecodeOffset)
+{
+ return tryBinarySearch<RareCaseProfile, int>(
+ m_rareCaseProfiles, m_rareCaseProfiles.size(), bytecodeOffset,
+ getRareCaseProfileBytecodeOffset);
+}
+
+unsigned CodeBlock::rareCaseProfileCountForBytecodeOffset(int bytecodeOffset)
+{
+ RareCaseProfile* profile = rareCaseProfileForBytecodeOffset(bytecodeOffset);
+ if (profile)
+ return profile->m_counter;
+ return 0;
}
+ResultProfile* CodeBlock::resultProfileForBytecodeOffset(int bytecodeOffset)
+{
+ if (!m_bytecodeOffsetToResultProfileIndexMap)
+ return nullptr;
+ auto iterator = m_bytecodeOffsetToResultProfileIndexMap->find(bytecodeOffset);
+ if (iterator == m_bytecodeOffsetToResultProfileIndexMap->end())
+ return nullptr;
+ return &m_resultProfiles[iterator->value];
+}
+
+#if ENABLE(JIT)
+DFG::CapabilityLevel CodeBlock::capabilityLevel()
+{
+ DFG::CapabilityLevel result = computeCapabilityLevel();
+ m_capabilityLevelState = result;
+ return result;
+}
+#endif
+
+void CodeBlock::insertBasicBlockBoundariesForControlFlowProfiler(RefCountedArray<Instruction>& instructions)
+{
+ if (!unlinkedCodeBlock()->hasOpProfileControlFlowBytecodeOffsets())
+ return;
+ const Vector<size_t>& bytecodeOffsets = unlinkedCodeBlock()->opProfileControlFlowBytecodeOffsets();
+ for (size_t i = 0, offsetsLength = bytecodeOffsets.size(); i < offsetsLength; i++) {
+ // Because op_profile_control_flow is emitted at the beginning of every basic block, finding
+ // the next op_profile_control_flow will give us the text range of a single basic block.
+ size_t startIdx = bytecodeOffsets[i];
+ RELEASE_ASSERT(vm()->interpreter->getOpcodeID(instructions[startIdx].u.opcode) == op_profile_control_flow);
+ int basicBlockStartOffset = instructions[startIdx + 1].u.operand;
+ int basicBlockEndOffset;
+ if (i + 1 < offsetsLength) {
+ size_t endIdx = bytecodeOffsets[i + 1];
+ RELEASE_ASSERT(vm()->interpreter->getOpcodeID(instructions[endIdx].u.opcode) == op_profile_control_flow);
+ basicBlockEndOffset = instructions[endIdx + 1].u.operand - 1;
+ } else {
+ basicBlockEndOffset = m_sourceOffset + ownerScriptExecutable()->source().length() - 1; // Offset before the closing brace.
+ basicBlockStartOffset = std::min(basicBlockStartOffset, basicBlockEndOffset); // Some start offsets may be at the closing brace, ensure it is the offset before.
+ }
+
+ // The following check allows for the same textual JavaScript basic block to have its bytecode emitted more
+ // than once and still play nice with the control flow profiler. When basicBlockStartOffset is larger than
+ // basicBlockEndOffset, it indicates that the bytecode generator has emitted code for the same AST node
+ // more than once (for example: ForInNode, Finally blocks in TryNode, etc). Though these are different
+ // basic blocks at the bytecode level, they are generated from the same textual basic block in the JavaScript
+ // program. The condition:
+ // (basicBlockEndOffset < basicBlockStartOffset)
+ // is encountered when op_profile_control_flow lies across the boundary of these duplicated bytecode basic
+ // blocks and the textual offset goes from the end of the duplicated block back to the beginning. These
+ // ranges are dummy ranges and are ignored. The duplicated bytecode basic blocks point to the same
+ // internal data structure, so if any of them execute, it will record the same textual basic block in the
+ // JavaScript program as executing.
+ // At the bytecode level, this situation looks like:
+ // j: op_profile_control_flow (from j->k, we have basicBlockEndOffset < basicBlockStartOffset)
+ // ...
+ // k: op_profile_control_flow (we want to skip over the j->k block and start fresh at offset k as the start of a new basic block k->m).
+ // ...
+ // m: op_profile_control_flow
+ if (basicBlockEndOffset < basicBlockStartOffset) {
+ RELEASE_ASSERT(i + 1 < offsetsLength); // We should never encounter dummy blocks at the end of a CodeBlock.
+ instructions[startIdx + 1].u.basicBlockLocation = vm()->controlFlowProfiler()->dummyBasicBlock();
+ continue;
+ }
+
+ BasicBlockLocation* basicBlockLocation = vm()->controlFlowProfiler()->getBasicBlockLocation(ownerScriptExecutable()->sourceID(), basicBlockStartOffset, basicBlockEndOffset);
+
+ // Find all functions that are enclosed within the range: [basicBlockStartOffset, basicBlockEndOffset]
+ // and insert these functions' start/end offsets as gaps in the current BasicBlockLocation.
+ // This is necessary because in the original source text of a JavaScript program,
+ // function literals form new basic blocks boundaries, but they aren't represented
+ // inside the CodeBlock's instruction stream.
+ auto insertFunctionGaps = [basicBlockLocation, basicBlockStartOffset, basicBlockEndOffset] (const WriteBarrier<FunctionExecutable>& functionExecutable) {
+ const UnlinkedFunctionExecutable* executable = functionExecutable->unlinkedExecutable();
+ int functionStart = executable->typeProfilingStartOffset();
+ int functionEnd = executable->typeProfilingEndOffset();
+ if (functionStart >= basicBlockStartOffset && functionEnd <= basicBlockEndOffset)
+ basicBlockLocation->insertGap(functionStart, functionEnd);
+ };
+
+ for (const WriteBarrier<FunctionExecutable>& executable : m_functionDecls)
+ insertFunctionGaps(executable);
+ for (const WriteBarrier<FunctionExecutable>& executable : m_functionExprs)
+ insertFunctionGaps(executable);
+
+ instructions[startIdx + 1].u.basicBlockLocation = basicBlockLocation;
+ }
+}
+
+#if ENABLE(JIT)
+void CodeBlock::setPCToCodeOriginMap(std::unique_ptr<PCToCodeOriginMap>&& map)
+{
+ m_pcToCodeOriginMap = WTFMove(map);
+}
+
+Optional<CodeOrigin> CodeBlock::findPC(void* pc)
+{
+ if (m_pcToCodeOriginMap) {
+ if (Optional<CodeOrigin> codeOrigin = m_pcToCodeOriginMap->findPC(pc))
+ return codeOrigin;
+ }
+
+ for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
+ StructureStubInfo* stub = *iter;
+ if (stub->containsPC(pc))
+ return Optional<CodeOrigin>(stub->codeOrigin);
+ }
+
+ if (Optional<CodeOrigin> codeOrigin = m_jitCode->findPC(this, pc))
+ return codeOrigin;
+
+ return Nullopt;
+}
+#endif // ENABLE(JIT)
+
} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/CodeBlock.h b/Source/JavaScriptCore/bytecode/CodeBlock.h
index 0d9868079..96cee40c7 100644
--- a/Source/JavaScriptCore/bytecode/CodeBlock.h
+++ b/Source/JavaScriptCore/bytecode/CodeBlock.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2009, 2010, 2011, 2012, 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2008-2015 Apple Inc. All rights reserved.
* Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
*
* Redistribution and use in source and binary forms, with or without
@@ -11,7 +11,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -38,34 +38,30 @@
#include "CallReturnOffsetToBytecodeOffset.h"
#include "CodeBlockHash.h"
#include "CodeBlockSet.h"
-#include "ConcurrentJITLock.h"
#include "CodeOrigin.h"
#include "CodeType.h"
#include "CompactJITCodeMap.h"
+#include "ConcurrentJITLock.h"
#include "DFGCommon.h"
-#include "DFGCommonData.h"
#include "DFGExitProfile.h"
-#include "DFGMinifiedGraph.h"
-#include "DFGOSREntry.h"
-#include "DFGOSRExit.h"
-#include "DFGVariableEventStream.h"
#include "DeferredCompilationCallback.h"
#include "EvalCodeCache.h"
#include "ExecutionCounter.h"
#include "ExpressionRangeInfo.h"
#include "HandlerInfo.h"
-#include "ObjectAllocationProfile.h"
-#include "Options.h"
-#include "Operations.h"
-#include "PutPropertySlot.h"
#include "Instruction.h"
#include "JITCode.h"
#include "JITWriteBarrier.h"
+#include "JSCell.h"
#include "JSGlobalObject.h"
#include "JumpTable.h"
#include "LLIntCallLinkInfo.h"
#include "LazyOperandValueProfile.h"
+#include "ObjectAllocationProfile.h"
+#include "Options.h"
#include "ProfilerCompilation.h"
+#include "ProfilerJettisonReason.h"
+#include "PutPropertySlot.h"
#include "RegExpObject.h"
#include "StructureStubInfo.h"
#include "UnconditionalFinalizer.h"
@@ -73,8 +69,8 @@
#include "VirtualRegister.h"
#include "Watchpoint.h"
#include <wtf/Bag.h>
+#include <wtf/FastBitVector.h>
#include <wtf/FastMalloc.h>
-#include <wtf/PassOwnPtr.h>
#include <wtf/RefCountedArray.h>
#include <wtf/RefPtr.h>
#include <wtf/SegmentedVector.h>
@@ -85,31 +81,51 @@ namespace JSC {
class ExecState;
class LLIntOffsetsExtractor;
-class RepatchBuffer;
-
-inline VirtualRegister unmodifiedArgumentsRegister(VirtualRegister argumentsRegister) { return VirtualRegister(argumentsRegister.offset() + 1); }
-
-static ALWAYS_INLINE int missingThisObjectMarker() { return std::numeric_limits<int>::max(); }
+class RegisterAtOffsetList;
+class TypeLocation;
+class JSModuleEnvironment;
+class PCToCodeOriginMap;
enum ReoptimizationMode { DontCountReoptimization, CountReoptimization };
-class CodeBlock : public ThreadSafeRefCounted<CodeBlock>, public UnconditionalFinalizer, public WeakReferenceHarvester {
- WTF_MAKE_FAST_ALLOCATED;
+class CodeBlock : public JSCell {
+ typedef JSCell Base;
friend class BytecodeLivenessAnalysis;
friend class JIT;
friend class LLIntOffsetsExtractor;
+
+ class UnconditionalFinalizer : public JSC::UnconditionalFinalizer {
+ virtual void finalizeUnconditionally() override;
+ };
+
+ class WeakReferenceHarvester : public JSC::WeakReferenceHarvester {
+ virtual void visitWeakReferences(SlotVisitor&) override;
+ };
+
public:
enum CopyParsedBlockTag { CopyParsedBlock };
+
+ static const unsigned StructureFlags = Base::StructureFlags | StructureIsImmortal;
+
+ DECLARE_INFO;
+
protected:
- CodeBlock(CopyParsedBlockTag, CodeBlock& other);
-
- CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock*, JSScope*, PassRefPtr<SourceProvider>, unsigned sourceOffset, unsigned firstLineColumnOffset);
+ CodeBlock(VM*, Structure*, CopyParsedBlockTag, CodeBlock& other);
+ CodeBlock(VM*, Structure*, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock*, JSScope*, PassRefPtr<SourceProvider>, unsigned sourceOffset, unsigned firstLineColumnOffset);
+#if ENABLE(WEBASSEMBLY)
+ CodeBlock(VM*, Structure*, WebAssemblyExecutable* ownerExecutable, JSGlobalObject*);
+#endif
+
+ void finishCreation(VM&, CopyParsedBlockTag, CodeBlock& other);
+ void finishCreation(VM&, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock*, JSScope*);
+#if ENABLE(WEBASSEMBLY)
+ void finishCreation(VM&, WebAssemblyExecutable* ownerExecutable, JSGlobalObject*);
+#endif
WriteBarrier<JSGlobalObject> m_globalObject;
- Heap* m_heap;
public:
- JS_EXPORT_PRIVATE virtual ~CodeBlock();
+ JS_EXPORT_PRIVATE ~CodeBlock();
UnlinkedCodeBlock* unlinkedCodeBlock() const { return m_unlinkedCode.get(); }
@@ -117,6 +133,7 @@ public:
CodeBlockHash hash() const;
bool hasHash() const;
bool isSafeToComputeHash() const;
+ CString hashAsStringIfPossible() const;
CString sourceCodeForTools() const; // Not quite the actual source we parsed; this will do things like prefix the source for a function with a reified signature.
CString sourceCodeOnOneLine() const; // As sourceCodeForTools(), but replaces all whitespace runs with a single space.
void dumpAssumingJITType(PrintStream&, JITCode::JITType) const;
@@ -125,28 +142,58 @@ public:
int numParameters() const { return m_numParameters; }
void setNumParameters(int newValue);
+ int numCalleeLocals() const { return m_numCalleeLocals; }
+
int* addressOfNumParameters() { return &m_numParameters; }
static ptrdiff_t offsetOfNumParameters() { return OBJECT_OFFSETOF(CodeBlock, m_numParameters); }
- CodeBlock* alternative() { return m_alternative.get(); }
- PassRefPtr<CodeBlock> releaseAlternative() { return m_alternative.release(); }
- void setAlternative(PassRefPtr<CodeBlock> alternative) { m_alternative = alternative; }
+ CodeBlock* alternative() const { return static_cast<CodeBlock*>(m_alternative.get()); }
+ void setAlternative(VM&, CodeBlock*);
+
+ template <typename Functor> void forEachRelatedCodeBlock(Functor&& functor)
+ {
+ Functor f(std::forward<Functor>(functor));
+ Vector<CodeBlock*, 4> codeBlocks;
+ codeBlocks.append(this);
+
+ while (!codeBlocks.isEmpty()) {
+ CodeBlock* currentCodeBlock = codeBlocks.takeLast();
+ f(currentCodeBlock);
+
+ if (CodeBlock* alternative = currentCodeBlock->alternative())
+ codeBlocks.append(alternative);
+ if (CodeBlock* osrEntryBlock = currentCodeBlock->specialOSREntryBlockOrNull())
+ codeBlocks.append(osrEntryBlock);
+ }
+ }
CodeSpecializationKind specializationKind() const
{
return specializationFromIsConstruct(m_isConstructor);
}
-
- CodeBlock* baselineAlternative();
+
+ CodeBlock* alternativeForJettison();
+ JS_EXPORT_PRIVATE CodeBlock* baselineAlternative();
// FIXME: Get rid of this.
// https://bugs.webkit.org/show_bug.cgi?id=123677
CodeBlock* baselineVersion();
- void visitAggregate(SlotVisitor&);
-
- void dumpBytecode(PrintStream& = WTF::dataFile());
- void dumpBytecode(PrintStream&, unsigned bytecodeOffset);
+ static size_t estimatedSize(JSCell*);
+ static void visitChildren(JSCell*, SlotVisitor&);
+ void visitChildren(SlotVisitor&);
+ void visitWeakly(SlotVisitor&);
+ void clearVisitWeaklyHasBeenCalled();
+
+ void dumpSource();
+ void dumpSource(PrintStream&);
+
+ void dumpBytecode();
+ void dumpBytecode(PrintStream&);
+ void dumpBytecode(
+ PrintStream&, unsigned bytecodeOffset,
+ const StubInfoMap& = StubInfoMap(), const CallLinkInfoMap& = CallLinkInfoMap());
+ void dumpExceptionHandlers(PrintStream&);
void printStructures(PrintStream&, const Instruction*);
void printStructure(PrintStream&, const char* name, const Instruction*, int operand);
@@ -169,72 +216,72 @@ public:
return index >= m_numVars;
}
- HandlerInfo* handlerForBytecodeOffset(unsigned bytecodeOffset);
+ enum class RequiredHandler {
+ CatchHandler,
+ AnyHandler
+ };
+ HandlerInfo* handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler = RequiredHandler::AnyHandler);
+ HandlerInfo* handlerForIndex(unsigned, RequiredHandler = RequiredHandler::AnyHandler);
+ void removeExceptionHandlerForCallSite(CallSiteIndex);
unsigned lineNumberForBytecodeOffset(unsigned bytecodeOffset);
unsigned columnNumberForBytecodeOffset(unsigned bytecodeOffset);
void expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot,
int& startOffset, int& endOffset, unsigned& line, unsigned& column);
-#if ENABLE(JIT)
- StructureStubInfo* addStubInfo();
- Bag<StructureStubInfo>::iterator begin() { return m_stubInfos.begin(); }
- Bag<StructureStubInfo>::iterator end() { return m_stubInfos.end(); }
+ void getStubInfoMap(const ConcurrentJITLocker&, StubInfoMap& result);
+ void getStubInfoMap(StubInfoMap& result);
+
+ void getCallLinkInfoMap(const ConcurrentJITLocker&, CallLinkInfoMap& result);
+ void getCallLinkInfoMap(CallLinkInfoMap& result);
- void resetStub(StructureStubInfo&);
+ void getByValInfoMap(const ConcurrentJITLocker&, ByValInfoMap& result);
+ void getByValInfoMap(ByValInfoMap& result);
- void getStubInfoMap(const ConcurrentJITLocker&, StubInfoMap& result);
+#if ENABLE(JIT)
+ StructureStubInfo* addStubInfo(AccessType);
+ Bag<StructureStubInfo>::iterator stubInfoBegin() { return m_stubInfos.begin(); }
+ Bag<StructureStubInfo>::iterator stubInfoEnd() { return m_stubInfos.end(); }
+
+ // O(n) operation. Use getStubInfoMap() unless you really only intend to get one
+ // stub info.
+ StructureStubInfo* findStubInfo(CodeOrigin);
- ByValInfo& getByValInfo(unsigned bytecodeIndex)
- {
- return *(binarySearch<ByValInfo, unsigned>(m_byValInfos, m_byValInfos.size(), bytecodeIndex, getByValInfoBytecodeIndex));
- }
+ ByValInfo* addByValInfo();
- CallLinkInfo& getCallLinkInfo(ReturnAddressPtr returnAddress)
- {
- return *(binarySearch<CallLinkInfo, void*>(m_callLinkInfos, m_callLinkInfos.size(), returnAddress.value(), getCallLinkInfoReturnLocation));
- }
+ CallLinkInfo* addCallLinkInfo();
+ Bag<CallLinkInfo>::iterator callLinkInfosBegin() { return m_callLinkInfos.begin(); }
+ Bag<CallLinkInfo>::iterator callLinkInfosEnd() { return m_callLinkInfos.end(); }
- CallLinkInfo& getCallLinkInfo(unsigned bytecodeIndex)
- {
- ASSERT(!JITCode::isOptimizingJIT(jitType()));
- return *(binarySearch<CallLinkInfo, unsigned>(m_callLinkInfos, m_callLinkInfos.size(), bytecodeIndex, getCallLinkInfoBytecodeIndex));
- }
+ // This is a slow function call used primarily for compiling OSR exits in the case
+ // that there had been inlining. Chances are if you want to use this, you're really
+ // looking for a CallLinkInfoMap to amortize the cost of calling this.
+ CallLinkInfo* getCallLinkInfoForBytecodeIndex(unsigned bytecodeIndex);
#endif // ENABLE(JIT)
void unlinkIncomingCalls();
#if ENABLE(JIT)
- void unlinkCalls();
-
void linkIncomingCall(ExecState* callerFrame, CallLinkInfo*);
-
- bool isIncomingCallAlreadyLinked(CallLinkInfo* incoming)
- {
- return m_incomingCalls.isOnList(incoming);
- }
+ void linkIncomingPolymorphicCall(ExecState* callerFrame, PolymorphicCallNode*);
#endif // ENABLE(JIT)
-#if ENABLE(LLINT)
void linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo*);
-#endif // ENABLE(LLINT)
- void setJITCodeMap(PassOwnPtr<CompactJITCodeMap> jitCodeMap)
+ void setJITCodeMap(std::unique_ptr<CompactJITCodeMap> jitCodeMap)
{
- m_jitCodeMap = jitCodeMap;
+ m_jitCodeMap = WTFMove(jitCodeMap);
}
CompactJITCodeMap* jitCodeMap()
{
return m_jitCodeMap.get();
}
-
+
unsigned bytecodeOffset(Instruction* returnAddress)
{
RELEASE_ASSERT(returnAddress >= instructions().begin() && returnAddress < instructions().end());
return static_cast<Instruction*>(returnAddress) - instructions().begin();
}
- bool isNumericCompareFunction() { return m_unlinkedCode->isNumericCompareFunction(); }
-
unsigned numberOfInstructions() const { return m_instructions.size(); }
RefCountedArray<Instruction>& instructions() { return m_instructions; }
const RefCountedArray<Instruction>& instructions() const { return m_instructions; }
@@ -245,28 +292,19 @@ public:
unsigned instructionCount() const { return m_instructions.size(); }
- int argumentIndexAfterCapture(size_t argument);
-
- bool hasSlowArguments();
- const SlowArgument* machineSlowArguments();
-
- // Exactly equivalent to codeBlock->ownerExecutable()->installCode(codeBlock);
- void install();
-
// Exactly equivalent to codeBlock->ownerExecutable()->newReplacementCodeBlockFor(codeBlock->specializationKind())
- PassRefPtr<CodeBlock> newReplacement();
+ CodeBlock* newReplacement();
- void setJITCode(PassRefPtr<JITCode> code, MacroAssemblerCodePtr codeWithArityCheck)
+ void setJITCode(PassRefPtr<JITCode> code)
{
- ASSERT(m_heap->isDeferred());
- m_heap->reportExtraMemoryCost(code->size());
+ ASSERT(heap()->isDeferred());
+ heap()->reportExtraMemoryAllocated(code->size());
ConcurrentJITLocker locker(m_lock);
WTF::storeStoreFence(); // This is probably not needed because the lock will also do something similar, but it's good to be paranoid.
m_jitCode = code;
- m_jitCodeWithArityCheck = codeWithArityCheck;
}
PassRefPtr<JITCode> jitCode() { return m_jitCode; }
- MacroAssemblerCodePtr jitCodeWithArityCheck() { return m_jitCodeWithArityCheck; }
+ static ptrdiff_t jitCodeOffset() { return OBJECT_OFFSETOF(CodeBlock, m_jitCode); }
JITCode::JITType jitType() const
{
JITCode* jitCode = m_jitCode.get();
@@ -282,24 +320,20 @@ public:
}
#if ENABLE(JIT)
- virtual CodeBlock* replacement() = 0;
+ CodeBlock* replacement();
- virtual DFG::CapabilityLevel capabilityLevelInternal() = 0;
- DFG::CapabilityLevel capabilityLevel()
- {
- DFG::CapabilityLevel result = capabilityLevelInternal();
- m_capabilityLevelState = result;
- return result;
- }
- DFG::CapabilityLevel capabilityLevelState() { return m_capabilityLevelState; }
+ DFG::CapabilityLevel computeCapabilityLevel();
+ DFG::CapabilityLevel capabilityLevel();
+ DFG::CapabilityLevel capabilityLevelState() { return static_cast<DFG::CapabilityLevel>(m_capabilityLevelState); }
bool hasOptimizedReplacement(JITCode::JITType typeToReplace);
bool hasOptimizedReplacement(); // the typeToReplace is my JITType
#endif
- void jettison(ReoptimizationMode = DontCountReoptimization);
+ void jettison(Profiler::JettisonReason, ReoptimizationMode = DontCountReoptimization, const FireDetail* = nullptr);
- ScriptExecutable* ownerExecutable() const { return m_ownerExecutable.get(); }
+ ExecutableBase* ownerExecutable() const { return m_ownerExecutable.get(); }
+ ScriptExecutable* ownerScriptExecutable() const { return jsCast<ScriptExecutable*>(m_ownerExecutable.get()); }
void setVM(VM* vm) { m_vm = vm; }
VM* vm() { return m_vm; }
@@ -307,78 +341,24 @@ public:
void setThisRegister(VirtualRegister thisRegister) { m_thisRegister = thisRegister; }
VirtualRegister thisRegister() const { return m_thisRegister; }
- bool needsFullScopeChain() const { return m_unlinkedCode->needsFullScopeChain(); }
bool usesEval() const { return m_unlinkedCode->usesEval(); }
- void setArgumentsRegister(VirtualRegister argumentsRegister)
- {
- ASSERT(argumentsRegister.isValid());
- m_argumentsRegister = argumentsRegister;
- ASSERT(usesArguments());
- }
- VirtualRegister argumentsRegister() const
- {
- ASSERT(usesArguments());
- return m_argumentsRegister;
- }
- VirtualRegister uncheckedArgumentsRegister()
- {
- if (!usesArguments())
- return VirtualRegister();
- return argumentsRegister();
- }
- void setActivationRegister(VirtualRegister activationRegister)
- {
- m_activationRegister = activationRegister;
- }
-
- VirtualRegister activationRegister() const
- {
- ASSERT(needsFullScopeChain());
- return m_activationRegister;
- }
-
- VirtualRegister uncheckedActivationRegister()
+ void setScopeRegister(VirtualRegister scopeRegister)
{
- if (!needsFullScopeChain())
- return VirtualRegister();
- return activationRegister();
+ ASSERT(scopeRegister.isLocal() || !scopeRegister.isValid());
+ m_scopeRegister = scopeRegister;
}
- bool usesArguments() const { return m_argumentsRegister.isValid(); }
-
- bool needsActivation() const
- {
- return m_needsActivation;
- }
-
- unsigned captureCount() const
+ VirtualRegister scopeRegister() const
{
- if (!symbolTable())
- return 0;
- return symbolTable()->captureCount();
- }
-
- int captureStart() const
- {
- if (!symbolTable())
- return 0;
- return symbolTable()->captureStart();
+ return m_scopeRegister;
}
- int captureEnd() const
+ CodeType codeType() const
{
- if (!symbolTable())
- return 0;
- return symbolTable()->captureEnd();
+ return static_cast<CodeType>(m_codeType);
}
- bool isCaptured(VirtualRegister operand, InlineCallFrame* = 0) const;
-
- int framePointerOffsetToGetActivationRegisters(int machineCaptureStart);
- int framePointerOffsetToGetActivationRegisters();
-
- CodeType codeType() const { return m_unlinkedCode->codeType(); }
PutPropertySlot::Context putByIdContext() const
{
if (codeType() == EvalCode)
@@ -393,20 +373,8 @@ public:
size_t numberOfJumpTargets() const { return m_unlinkedCode->numberOfJumpTargets(); }
unsigned jumpTarget(int index) const { return m_unlinkedCode->jumpTarget(index); }
- void clearEvalCache();
-
String nameForRegister(VirtualRegister);
-#if ENABLE(JIT)
- void setNumberOfByValInfos(size_t size) { m_byValInfos.resizeToFit(size); }
- size_t numberOfByValInfos() const { return m_byValInfos.size(); }
- ByValInfo& byValInfo(size_t index) { return m_byValInfos[index]; }
-
- void setNumberOfCallLinkInfos(size_t size) { m_callLinkInfos.resizeToFit(size); }
- size_t numberOfCallLinkInfos() const { return m_callLinkInfos.size(); }
- CallLinkInfo& callLinkInfo(int index) { return m_callLinkInfos[index]; }
-#endif
-
unsigned numberOfArgumentValueProfiles()
{
ASSERT(m_numParameters >= 0);
@@ -422,17 +390,7 @@ public:
unsigned numberOfValueProfiles() { return m_valueProfiles.size(); }
ValueProfile* valueProfile(int index) { return &m_valueProfiles[index]; }
- ValueProfile* valueProfileForBytecodeOffset(int bytecodeOffset)
- {
- ValueProfile* result = binarySearch<ValueProfile, int>(
- m_valueProfiles, m_valueProfiles.size(), bytecodeOffset,
- getValueProfileBytecodeOffset<ValueProfile>);
- ASSERT(result->m_bytecodeOffset != -1);
- ASSERT(instructions()[bytecodeOffset + opcodeLength(
- m_vm->interpreter->getOpcodeID(
- instructions()[bytecodeOffset].u.opcode)) - 1].u.profile == result);
- return result;
- }
+ ValueProfile* valueProfileForBytecodeOffset(int bytecodeOffset);
SpeculatedType valueProfilePredictionForBytecodeOffset(const ConcurrentJITLocker& locker, int bytecodeOffset)
{
return valueProfileForBytecodeOffset(bytecodeOffset)->computeUpdatedPrediction(locker);
@@ -455,19 +413,14 @@ public:
return &m_rareCaseProfiles.last();
}
unsigned numberOfRareCaseProfiles() { return m_rareCaseProfiles.size(); }
- RareCaseProfile* rareCaseProfile(int index) { return &m_rareCaseProfiles[index]; }
- RareCaseProfile* rareCaseProfileForBytecodeOffset(int bytecodeOffset)
- {
- return tryBinarySearch<RareCaseProfile, int>(
- m_rareCaseProfiles, m_rareCaseProfiles.size(), bytecodeOffset,
- getRareCaseProfileBytecodeOffset);
- }
+ RareCaseProfile* rareCaseProfileForBytecodeOffset(int bytecodeOffset);
+ unsigned rareCaseProfileCountForBytecodeOffset(int bytecodeOffset);
bool likelyToTakeSlowCase(int bytecodeOffset)
{
if (!hasBaselineJITProfiling())
return false;
- unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
+ unsigned value = rareCaseProfileCountForBytecodeOffset(bytecodeOffset);
return value >= Options::likelyToTakeSlowCaseMinimumCount();
}
@@ -475,60 +428,42 @@ public:
{
if (!hasBaselineJITProfiling())
return false;
- unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
+ unsigned value = rareCaseProfileCountForBytecodeOffset(bytecodeOffset);
return value >= Options::couldTakeSlowCaseMinimumCount();
}
- RareCaseProfile* addSpecialFastCaseProfile(int bytecodeOffset)
- {
- m_specialFastCaseProfiles.append(RareCaseProfile(bytecodeOffset));
- return &m_specialFastCaseProfiles.last();
- }
- unsigned numberOfSpecialFastCaseProfiles() { return m_specialFastCaseProfiles.size(); }
- RareCaseProfile* specialFastCaseProfile(int index) { return &m_specialFastCaseProfiles[index]; }
- RareCaseProfile* specialFastCaseProfileForBytecodeOffset(int bytecodeOffset)
+ ResultProfile* ensureResultProfile(int bytecodeOffset)
{
- return tryBinarySearch<RareCaseProfile, int>(
- m_specialFastCaseProfiles, m_specialFastCaseProfiles.size(), bytecodeOffset,
- getRareCaseProfileBytecodeOffset);
+ ResultProfile* profile = resultProfileForBytecodeOffset(bytecodeOffset);
+ if (!profile) {
+ m_resultProfiles.append(ResultProfile(bytecodeOffset));
+ profile = &m_resultProfiles.last();
+ ASSERT(&m_resultProfiles.last() == &m_resultProfiles[m_resultProfiles.size() - 1]);
+ if (!m_bytecodeOffsetToResultProfileIndexMap)
+ m_bytecodeOffsetToResultProfileIndexMap = std::make_unique<BytecodeOffsetToResultProfileIndexMap>();
+ m_bytecodeOffsetToResultProfileIndexMap->add(bytecodeOffset, m_resultProfiles.size() - 1);
+ }
+ return profile;
}
+ unsigned numberOfResultProfiles() { return m_resultProfiles.size(); }
+ ResultProfile* resultProfileForBytecodeOffset(int bytecodeOffset);
- bool likelyToTakeSpecialFastCase(int bytecodeOffset)
+ unsigned specialFastCaseProfileCountForBytecodeOffset(int bytecodeOffset)
{
- if (!hasBaselineJITProfiling())
- return false;
- unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
- return specialFastCaseCount >= Options::likelyToTakeSlowCaseMinimumCount();
+ ResultProfile* profile = resultProfileForBytecodeOffset(bytecodeOffset);
+ if (!profile)
+ return 0;
+ return profile->specialFastPathCount();
}
bool couldTakeSpecialFastCase(int bytecodeOffset)
{
if (!hasBaselineJITProfiling())
return false;
- unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
+ unsigned specialFastCaseCount = specialFastCaseProfileCountForBytecodeOffset(bytecodeOffset);
return specialFastCaseCount >= Options::couldTakeSlowCaseMinimumCount();
}
- bool likelyToTakeDeepestSlowCase(int bytecodeOffset)
- {
- if (!hasBaselineJITProfiling())
- return false;
- unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
- unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
- unsigned value = slowCaseCount - specialFastCaseCount;
- return value >= Options::likelyToTakeSlowCaseMinimumCount();
- }
-
- bool likelyToTakeAnySlowCase(int bytecodeOffset)
- {
- if (!hasBaselineJITProfiling())
- return false;
- unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
- unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
- unsigned value = slowCaseCount + specialFastCaseCount;
- return value >= Options::likelyToTakeSlowCaseMinimumCount();
- }
-
unsigned numberOfArrayProfiles() const { return m_arrayProfiles.size(); }
const ArrayProfileVector& arrayProfiles() { return m_arrayProfiles; }
ArrayProfile* addArrayProfile(unsigned bytecodeOffset)
@@ -547,10 +482,7 @@ public:
bool hasExpressionInfo() { return m_unlinkedCode->hasExpressionInfo(); }
#if ENABLE(DFG_JIT)
- Vector<CodeOrigin, 0, UnsafeVectorOverflow>& codeOrigins()
- {
- return m_jitCode->dfgCommon()->codeOrigins;
- }
+ Vector<CodeOrigin, 0, UnsafeVectorOverflow>& codeOrigins();
// Having code origins implies that there has been some inlining.
bool hasCodeOrigins()
@@ -558,16 +490,16 @@ public:
return JITCode::isOptimizingJIT(jitType());
}
- bool canGetCodeOrigin(unsigned index)
+ bool canGetCodeOrigin(CallSiteIndex index)
{
if (!hasCodeOrigins())
return false;
- return index < codeOrigins().size();
+ return index.bits() < codeOrigins().size();
}
- CodeOrigin codeOrigin(unsigned index)
+ CodeOrigin codeOrigin(CallSiteIndex index)
{
- return codeOrigins()[index];
+ return codeOrigins()[index.bits()];
}
bool addFrequentExitSite(const DFG::FrequentExitSite& site)
@@ -576,11 +508,15 @@ public:
ConcurrentJITLocker locker(m_lock);
return m_exitProfile.add(locker, site);
}
-
+
+ bool hasExitSite(const ConcurrentJITLocker& locker, const DFG::FrequentExitSite& site) const
+ {
+ return m_exitProfile.hasExitSite(locker, site);
+ }
bool hasExitSite(const DFG::FrequentExitSite& site) const
{
ConcurrentJITLocker locker(m_lock);
- return m_exitProfile.hasExitSite(locker, site);
+ return hasExitSite(locker, site);
}
DFG::ExitProfile& exitProfile() { return m_exitProfile; }
@@ -589,44 +525,26 @@ public:
{
return m_lazyOperandValueProfiles;
}
-#else // ENABLE(DFG_JIT)
- bool addFrequentExitSite(const DFG::FrequentExitSite&)
- {
- return false;
- }
#endif // ENABLE(DFG_JIT)
// Constant Pool
#if ENABLE(DFG_JIT)
size_t numberOfIdentifiers() const { return m_unlinkedCode->numberOfIdentifiers() + numberOfDFGIdentifiers(); }
- size_t numberOfDFGIdentifiers() const
- {
- if (!JITCode::isOptimizingJIT(jitType()))
- return 0;
-
- return m_jitCode->dfgCommon()->dfgIdentifiers.size();
- }
-
- const Identifier& identifier(int index) const
- {
- size_t unlinkedIdentifiers = m_unlinkedCode->numberOfIdentifiers();
- if (static_cast<unsigned>(index) < unlinkedIdentifiers)
- return m_unlinkedCode->identifier(index);
- ASSERT(JITCode::isOptimizingJIT(jitType()));
- return m_jitCode->dfgCommon()->dfgIdentifiers[index - unlinkedIdentifiers];
- }
+ size_t numberOfDFGIdentifiers() const;
+ const Identifier& identifier(int index) const;
#else
size_t numberOfIdentifiers() const { return m_unlinkedCode->numberOfIdentifiers(); }
const Identifier& identifier(int index) const { return m_unlinkedCode->identifier(index); }
#endif
Vector<WriteBarrier<Unknown>>& constants() { return m_constantRegisters; }
- size_t numberOfConstantRegisters() const { return m_constantRegisters.size(); }
+ Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation() { return m_constantsSourceCodeRepresentation; }
unsigned addConstant(JSValue v)
{
unsigned result = m_constantRegisters.size();
m_constantRegisters.append(WriteBarrier<Unknown>());
- m_constantRegisters.last().set(m_globalObject->vm(), m_ownerExecutable.get(), v);
+ m_constantRegisters.last().set(m_globalObject->vm(), this, v);
+ m_constantsSourceCodeRepresentation.append(SourceCodeRepresentation::Other);
return result;
}
@@ -634,19 +552,19 @@ public:
{
unsigned result = m_constantRegisters.size();
m_constantRegisters.append(WriteBarrier<Unknown>());
+ m_constantsSourceCodeRepresentation.append(SourceCodeRepresentation::Other);
return result;
}
- bool findConstant(JSValue, unsigned& result);
- unsigned addOrFindConstant(JSValue);
WriteBarrier<Unknown>& constantRegister(int index) { return m_constantRegisters[index - FirstConstantRegisterIndex]; }
ALWAYS_INLINE bool isConstantRegisterIndex(int index) const { return index >= FirstConstantRegisterIndex; }
ALWAYS_INLINE JSValue getConstant(int index) const { return m_constantRegisters[index - FirstConstantRegisterIndex].get(); }
+ ALWAYS_INLINE SourceCodeRepresentation constantSourceCodeRepresentation(int index) const { return m_constantsSourceCodeRepresentation[index - FirstConstantRegisterIndex]; }
FunctionExecutable* functionDecl(int index) { return m_functionDecls[index].get(); }
int numberOfFunctionDecls() { return m_functionDecls.size(); }
FunctionExecutable* functionExpr(int index) { return m_functionExprs[index].get(); }
-
+
RegExp* regexp(int index) const { return m_unlinkedCode->regexp(index); }
unsigned numberOfConstantBuffers() const
@@ -673,15 +591,26 @@ public:
return constantBufferAsVector(index).data();
}
+ Heap* heap() const { return &m_vm->heap; }
JSGlobalObject* globalObject() { return m_globalObject.get(); }
JSGlobalObject* globalObjectFor(CodeOrigin);
BytecodeLivenessAnalysis& livenessAnalysis()
{
- if (!m_livenessAnalysis)
- m_livenessAnalysis = std::make_unique<BytecodeLivenessAnalysis>(this);
- return *m_livenessAnalysis;
+ {
+ ConcurrentJITLocker locker(m_lock);
+ if (!!m_livenessAnalysis)
+ return *m_livenessAnalysis;
+ }
+ std::unique_ptr<BytecodeLivenessAnalysis> analysis =
+ std::make_unique<BytecodeLivenessAnalysis>(this);
+ {
+ ConcurrentJITLocker locker(m_lock);
+ if (!m_livenessAnalysis)
+ m_livenessAnalysis = WTFMove(analysis);
+ return *m_livenessAnalysis;
+ }
}
void validate();
@@ -702,8 +631,17 @@ public:
StringJumpTable& addStringSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_stringSwitchJumpTables.append(StringJumpTable()); return m_rareData->m_stringSwitchJumpTables.last(); }
StringJumpTable& stringSwitchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_stringSwitchJumpTables[tableIndex]; }
-
- SymbolTable* symbolTable() const { return m_symbolTable.get(); }
+ // Live callee registers at yield points.
+ const FastBitVector& liveCalleeLocalsAtYield(unsigned index) const
+ {
+ RELEASE_ASSERT(m_rareData);
+ return m_rareData->m_liveCalleeLocalsAtYield[index];
+ }
+ FastBitVector& liveCalleeLocalsAtYield(unsigned index)
+ {
+ RELEASE_ASSERT(m_rareData);
+ return m_rareData->m_liveCalleeLocalsAtYield[index];
+ }
EvalCodeCache& evalCodeCache() { createRareDataIfNecessary(); return m_rareData->m_evalCodeCache; }
@@ -741,7 +679,7 @@ public:
m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITSoon(), this);
}
- const ExecutionCounter& llintExecuteCounter() const
+ const BaselineExecutionCounter& llintExecuteCounter() const
{
return m_llintExecuteCounter;
}
@@ -767,9 +705,13 @@ public:
// When we observe a lot of speculation failures, we trigger a
// reoptimization. But each time, we increase the optimization trigger
// to avoid thrashing.
- unsigned reoptimizationRetryCounter() const;
+ JS_EXPORT_PRIVATE unsigned reoptimizationRetryCounter() const;
void countReoptimization();
#if ENABLE(JIT)
+ static unsigned numberOfLLIntBaselineCalleeSaveRegisters() { return RegisterSet::llintBaselineCalleeSaveRegisters().numberOfSetRegisters(); }
+ static size_t llintBaselineCalleeSaveSpaceAsVirtualRegisters();
+ size_t calleeSaveSpaceAsVirtualRegisters();
+
unsigned numberOfDFGCompiles();
int32_t codeTypeThresholdMultiplier() const;
@@ -781,11 +723,11 @@ public:
return &m_jitExecuteCounter.m_counter;
}
- static ptrdiff_t offsetOfJITExecuteCounter() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_counter); }
- static ptrdiff_t offsetOfJITExecutionActiveThreshold() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_activeThreshold); }
- static ptrdiff_t offsetOfJITExecutionTotalCount() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_totalCount); }
+ static ptrdiff_t offsetOfJITExecuteCounter() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_counter); }
+ static ptrdiff_t offsetOfJITExecutionActiveThreshold() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_activeThreshold); }
+ static ptrdiff_t offsetOfJITExecutionTotalCount() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_totalCount); }
- const ExecutionCounter& jitExecuteCounter() const { return m_jitExecuteCounter; }
+ const BaselineExecutionCounter& jitExecuteCounter() const { return m_jitExecuteCounter; }
unsigned optimizationDelayCounter() const { return m_optimizationDelayCounter; }
@@ -855,7 +797,14 @@ public:
uint32_t exitCountThresholdForReoptimizationFromLoop();
bool shouldReoptimizeNow();
bool shouldReoptimizeFromLoopNow();
+
+ void setCalleeSaveRegisters(RegisterSet);
+ void setCalleeSaveRegisters(std::unique_ptr<RegisterAtOffsetList>);
+
+ RegisterAtOffsetList* calleeSaveRegisters() const { return m_calleeSaveRegisters.get(); }
#else // No JIT
+ static unsigned numberOfLLIntBaselineCalleeSaveRegisters() { return 0; }
+ static size_t llintBaselineCalleeSaveSpaceAsVirtualRegisters() { return 0; };
void optimizeAfterWarmUp() { }
unsigned numberOfDFGCompiles() { return 0; }
#endif
@@ -866,10 +815,11 @@ public:
void updateAllPredictions();
unsigned frameRegisterCount();
+ int stackPointerOffset();
bool hasOpDebugForLineAndColumn(unsigned line, unsigned column);
- int hasDebuggerRequests() const { return !!m_debuggerRequests; }
+ bool hasDebuggerRequests() const { return m_debuggerRequests; }
void* debuggerRequestsAddress() { return &m_debuggerRequests; }
void addBreakpoint(unsigned numBreakpoints);
@@ -885,13 +835,16 @@ public:
};
void setSteppingMode(SteppingMode);
- void clearDebuggerRequests() { m_debuggerRequests = 0; }
-
+ void clearDebuggerRequests()
+ {
+ m_steppingMode = SteppingModeDisabled;
+ m_numBreakpoints = 0;
+ }
+
// FIXME: Make these remaining members private.
- int m_numCalleeRegisters;
+ int m_numCalleeLocals;
int m_numVars;
- bool m_isConstructor;
// This is intentionally public; it's the responsibility of anyone doing any
// of the following to hold the lock:
@@ -910,20 +863,67 @@ public:
// without holding any locks, because the GC is guaranteed to wait until any
// concurrent compilation threads finish what they're doing.
mutable ConcurrentJITLock m_lock;
-
- bool m_shouldAlwaysBeInlined;
- bool m_allTransitionsHaveBeenMarked; // Initialized and used on every GC.
-
- bool m_didFailFTLCompilation;
+
+ Atomic<bool> m_visitWeaklyHasBeenCalled;
+
+ bool m_shouldAlwaysBeInlined; // Not a bitfield because the JIT wants to store to it.
+
+#if ENABLE(JIT)
+ unsigned m_capabilityLevelState : 2; // DFG::CapabilityLevel
+#endif
+
+ bool m_allTransitionsHaveBeenMarked : 1; // Initialized and used on every GC.
+
+ bool m_didFailFTLCompilation : 1;
+ bool m_hasBeenCompiledWithFTL : 1;
+ bool m_isConstructor : 1;
+ bool m_isStrictMode : 1;
+ unsigned m_codeType : 2; // CodeType
// Internal methods for use by validation code. It would be private if it wasn't
// for the fact that we use it from anonymous namespaces.
void beginValidationDidFail();
NO_RETURN_DUE_TO_CRASH void endValidationDidFail();
+ struct RareData {
+ WTF_MAKE_FAST_ALLOCATED;
+ public:
+ Vector<HandlerInfo> m_exceptionHandlers;
+
+ // Buffers used for large array literals
+ Vector<Vector<JSValue>> m_constantBuffers;
+
+ // Jump Tables
+ Vector<SimpleJumpTable> m_switchJumpTables;
+ Vector<StringJumpTable> m_stringSwitchJumpTables;
+
+ Vector<FastBitVector> m_liveCalleeLocalsAtYield;
+
+ EvalCodeCache m_evalCodeCache;
+ };
+
+ void clearExceptionHandlers()
+ {
+ if (m_rareData)
+ m_rareData->m_exceptionHandlers.clear();
+ }
+
+ void appendExceptionHandler(const HandlerInfo& handler)
+ {
+ createRareDataIfNecessary(); // We may be handling the exception of an inlined call frame.
+ m_rareData->m_exceptionHandlers.append(handler);
+ }
+
+ CallSiteIndex newExceptionHandlingCallSiteIndex(CallSiteIndex originalCallSite);
+
+#if ENABLE(JIT)
+ void setPCToCodeOriginMap(std::unique_ptr<PCToCodeOriginMap>&&);
+ Optional<CodeOrigin> findPC(void* pc);
+#endif
+
protected:
- virtual void visitWeakReferences(SlotVisitor&) override;
- virtual void finalizeUnconditionally() override;
+ void finalizeLLIntInlineCaches();
+ void finalizeBaselineJITInlineCaches();
#if ENABLE(DFG_JIT)
void tallyFrequentExitSites();
@@ -940,298 +940,386 @@ private:
double optimizationThresholdScalingFactor();
-#if ENABLE(JIT)
- ClosureCallStubRoutine* findClosureCallForReturnPC(ReturnAddressPtr);
-#endif
-
void updateAllPredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles);
- void setConstantRegisters(const Vector<WriteBarrier<Unknown>>& constants)
+ void setConstantRegisters(const Vector<WriteBarrier<Unknown>>& constants, const Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation)
{
+ ASSERT(constants.size() == constantsSourceCodeRepresentation.size());
size_t count = constants.size();
- m_constantRegisters.resize(count);
+ m_constantRegisters.resizeToFit(count);
for (size_t i = 0; i < count; i++)
- m_constantRegisters[i].set(*m_vm, ownerExecutable(), constants[i].get());
+ m_constantRegisters[i].set(*m_vm, this, constants[i].get());
+ m_constantsSourceCodeRepresentation = constantsSourceCodeRepresentation;
}
- void dumpBytecode(PrintStream&, ExecState*, const Instruction* begin, const Instruction*&, const StubInfoMap& = StubInfoMap());
+ void replaceConstant(int index, JSValue value)
+ {
+ ASSERT(isConstantRegisterIndex(index) && static_cast<size_t>(index - FirstConstantRegisterIndex) < m_constantRegisters.size());
+ m_constantRegisters[index - FirstConstantRegisterIndex].set(m_globalObject->vm(), this, value);
+ }
+
+ void dumpBytecode(
+ PrintStream&, ExecState*, const Instruction* begin, const Instruction*&,
+ const StubInfoMap& = StubInfoMap(), const CallLinkInfoMap& = CallLinkInfoMap());
CString registerName(int r) const;
+ CString constantName(int index) const;
void printUnaryOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op);
void printBinaryOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op);
void printConditionalJump(PrintStream&, ExecState*, const Instruction*, const Instruction*&, int location, const char* op);
void printGetByIdOp(PrintStream&, ExecState*, int location, const Instruction*&);
void printGetByIdCacheStatus(PrintStream&, ExecState*, int location, const StubInfoMap&);
enum CacheDumpMode { DumpCaches, DontDumpCaches };
- void printCallOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op, CacheDumpMode, bool& hasPrintedProfiling);
+ void printCallOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op, CacheDumpMode, bool& hasPrintedProfiling, const CallLinkInfoMap&);
void printPutByIdOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op);
- void printLocationAndOp(PrintStream& out, ExecState*, int location, const Instruction*&, const char* op)
- {
- out.printf("[%4d] %-17s ", location, op);
- }
-
- void printLocationOpAndRegisterOperand(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op, int operand)
- {
- printLocationAndOp(out, exec, location, it, op);
- out.printf("%s", registerName(operand).data());
- }
+ void printPutByIdCacheStatus(PrintStream&, int location, const StubInfoMap&);
+ void printLocationAndOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op);
+ void printLocationOpAndRegisterOperand(PrintStream&, ExecState*, int location, const Instruction*& it, const char* op, int operand);
void beginDumpProfiling(PrintStream&, bool& hasPrintedProfiling);
void dumpValueProfiling(PrintStream&, const Instruction*&, bool& hasPrintedProfiling);
void dumpArrayProfiling(PrintStream&, const Instruction*&, bool& hasPrintedProfiling);
void dumpRareCaseProfile(PrintStream&, const char* name, RareCaseProfile*, bool& hasPrintedProfiling);
-
-#if ENABLE(DFG_JIT)
- bool shouldImmediatelyAssumeLivenessDuringScan()
- {
- // Interpreter and Baseline JIT CodeBlocks don't need to be jettisoned when
- // their weak references go stale. So if a basline JIT CodeBlock gets
- // scanned, we can assume that this means that it's live.
- if (!JITCode::isOptimizingJIT(jitType()))
- return true;
+ void dumpResultProfile(PrintStream&, ResultProfile*, bool& hasPrintedProfiling);
- // For simplicity, we don't attempt to jettison code blocks during GC if
- // they are executing. Instead we strongly mark their weak references to
- // allow them to continue to execute soundly.
- if (m_mayBeExecuting)
- return true;
-
- if (Options::forceDFGCodeBlockLiveness())
- return true;
-
- return false;
- }
-#else
- bool shouldImmediatelyAssumeLivenessDuringScan() { return true; }
-#endif
+ bool shouldVisitStrongly();
+ bool shouldJettisonDueToWeakReference();
+ bool shouldJettisonDueToOldAge();
void propagateTransitions(SlotVisitor&);
void determineLiveness(SlotVisitor&);
void stronglyVisitStrongReferences(SlotVisitor&);
void stronglyVisitWeakReferences(SlotVisitor&);
+ void visitOSRExitTargets(SlotVisitor&);
+
+ std::chrono::milliseconds timeSinceCreation()
+ {
+ return std::chrono::duration_cast<std::chrono::milliseconds>(
+ std::chrono::steady_clock::now() - m_creationTime);
+ }
void createRareDataIfNecessary()
{
if (!m_rareData)
- m_rareData = adoptPtr(new RareData);
+ m_rareData = std::make_unique<RareData>();
}
-
-#if ENABLE(JIT)
- void resetStubInternal(RepatchBuffer&, StructureStubInfo&);
- void resetStubDuringGCInternal(RepatchBuffer&, StructureStubInfo&);
-#endif
+
+ void insertBasicBlockBoundariesForControlFlowProfiler(RefCountedArray<Instruction>&);
+
WriteBarrier<UnlinkedCodeBlock> m_unlinkedCode;
int m_numParameters;
union {
unsigned m_debuggerRequests;
struct {
+ unsigned m_hasDebuggerStatement : 1;
unsigned m_steppingMode : 1;
- unsigned m_numBreakpoints : 31;
+ unsigned m_numBreakpoints : 30;
};
};
- WriteBarrier<ScriptExecutable> m_ownerExecutable;
+ WriteBarrier<ExecutableBase> m_ownerExecutable;
VM* m_vm;
RefCountedArray<Instruction> m_instructions;
- WriteBarrier<SymbolTable> m_symbolTable;
VirtualRegister m_thisRegister;
- VirtualRegister m_argumentsRegister;
- VirtualRegister m_activationRegister;
-
- bool m_isStrictMode;
- bool m_needsActivation;
- bool m_mayBeExecuting;
- uint8_t m_visitAggregateHasBeenCalled;
+ VirtualRegister m_scopeRegister;
+ mutable CodeBlockHash m_hash;
RefPtr<SourceProvider> m_source;
unsigned m_sourceOffset;
unsigned m_firstLineColumnOffset;
- unsigned m_codeType;
-#if ENABLE(LLINT)
- Vector<LLIntCallLinkInfo> m_llintCallLinkInfos;
+ RefCountedArray<LLIntCallLinkInfo> m_llintCallLinkInfos;
SentinelLinkedList<LLIntCallLinkInfo, BasicRawSentinelNode<LLIntCallLinkInfo>> m_incomingLLIntCalls;
-#endif
RefPtr<JITCode> m_jitCode;
- MacroAssemblerCodePtr m_jitCodeWithArityCheck;
#if ENABLE(JIT)
+ std::unique_ptr<RegisterAtOffsetList> m_calleeSaveRegisters;
Bag<StructureStubInfo> m_stubInfos;
- Vector<ByValInfo> m_byValInfos;
- Vector<CallLinkInfo> m_callLinkInfos;
+ Bag<ByValInfo> m_byValInfos;
+ Bag<CallLinkInfo> m_callLinkInfos;
SentinelLinkedList<CallLinkInfo, BasicRawSentinelNode<CallLinkInfo>> m_incomingCalls;
+ SentinelLinkedList<PolymorphicCallNode, BasicRawSentinelNode<PolymorphicCallNode>> m_incomingPolymorphicCalls;
+ std::unique_ptr<PCToCodeOriginMap> m_pcToCodeOriginMap;
#endif
- OwnPtr<CompactJITCodeMap> m_jitCodeMap;
+ std::unique_ptr<CompactJITCodeMap> m_jitCodeMap;
#if ENABLE(DFG_JIT)
// This is relevant to non-DFG code blocks that serve as the profiled code block
// for DFG code blocks.
DFG::ExitProfile m_exitProfile;
CompressedLazyOperandValueProfileHolder m_lazyOperandValueProfiles;
#endif
- Vector<ValueProfile> m_argumentValueProfiles;
- Vector<ValueProfile> m_valueProfiles;
+ RefCountedArray<ValueProfile> m_argumentValueProfiles;
+ RefCountedArray<ValueProfile> m_valueProfiles;
SegmentedVector<RareCaseProfile, 8> m_rareCaseProfiles;
- SegmentedVector<RareCaseProfile, 8> m_specialFastCaseProfiles;
- Vector<ArrayAllocationProfile> m_arrayAllocationProfiles;
+ SegmentedVector<ResultProfile, 8> m_resultProfiles;
+ typedef HashMap<unsigned, unsigned, IntHash<unsigned>, WTF::UnsignedWithZeroKeyHashTraits<unsigned>> BytecodeOffsetToResultProfileIndexMap;
+ std::unique_ptr<BytecodeOffsetToResultProfileIndexMap> m_bytecodeOffsetToResultProfileIndexMap;
+ RefCountedArray<ArrayAllocationProfile> m_arrayAllocationProfiles;
ArrayProfileVector m_arrayProfiles;
- Vector<ObjectAllocationProfile> m_objectAllocationProfiles;
+ RefCountedArray<ObjectAllocationProfile> m_objectAllocationProfiles;
// Constant Pool
COMPILE_ASSERT(sizeof(Register) == sizeof(WriteBarrier<Unknown>), Register_must_be_same_size_as_WriteBarrier_Unknown);
// TODO: This could just be a pointer to m_unlinkedCodeBlock's data, but the DFG mutates
// it, so we're stuck with it for now.
Vector<WriteBarrier<Unknown>> m_constantRegisters;
- Vector<WriteBarrier<FunctionExecutable>> m_functionDecls;
- Vector<WriteBarrier<FunctionExecutable>> m_functionExprs;
+ Vector<SourceCodeRepresentation> m_constantsSourceCodeRepresentation;
+ RefCountedArray<WriteBarrier<FunctionExecutable>> m_functionDecls;
+ RefCountedArray<WriteBarrier<FunctionExecutable>> m_functionExprs;
- RefPtr<CodeBlock> m_alternative;
+ WriteBarrier<CodeBlock> m_alternative;
- ExecutionCounter m_llintExecuteCounter;
+ BaselineExecutionCounter m_llintExecuteCounter;
- ExecutionCounter m_jitExecuteCounter;
- int32_t m_totalJITExecutions;
+ BaselineExecutionCounter m_jitExecuteCounter;
uint32_t m_osrExitCounter;
uint16_t m_optimizationDelayCounter;
uint16_t m_reoptimizationRetryCounter;
-
- mutable CodeBlockHash m_hash;
-
- std::unique_ptr<BytecodeLivenessAnalysis> m_livenessAnalysis;
- struct RareData {
- WTF_MAKE_FAST_ALLOCATED;
- public:
- Vector<HandlerInfo> m_exceptionHandlers;
+ std::chrono::steady_clock::time_point m_creationTime;
- // Buffers used for large array literals
- Vector<Vector<JSValue>> m_constantBuffers;
+ std::unique_ptr<BytecodeLivenessAnalysis> m_livenessAnalysis;
- // Jump Tables
- Vector<SimpleJumpTable> m_switchJumpTables;
- Vector<StringJumpTable> m_stringSwitchJumpTables;
+ std::unique_ptr<RareData> m_rareData;
- EvalCodeCache m_evalCodeCache;
- };
-#if COMPILER(MSVC)
- friend void WTF::deleteOwnedPtr<RareData>(RareData*);
-#endif
- OwnPtr<RareData> m_rareData;
-#if ENABLE(JIT)
- DFG::CapabilityLevel m_capabilityLevelState;
-#endif
+ UnconditionalFinalizer m_unconditionalFinalizer;
+ WeakReferenceHarvester m_weakReferenceHarvester;
};
// Program code is not marked by any function, so we make the global object
// responsible for marking it.
class GlobalCodeBlock : public CodeBlock {
+ typedef CodeBlock Base;
+ DECLARE_INFO;
+
protected:
- GlobalCodeBlock(CopyParsedBlockTag, GlobalCodeBlock& other)
- : CodeBlock(CopyParsedBlock, other)
+ GlobalCodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, GlobalCodeBlock& other)
+ : CodeBlock(vm, structure, CopyParsedBlock, other)
{
}
-
- GlobalCodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
- : CodeBlock(ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, sourceOffset, firstLineColumnOffset)
+
+ GlobalCodeBlock(VM* vm, Structure* structure, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
+ : CodeBlock(vm, structure, ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, sourceOffset, firstLineColumnOffset)
{
}
};
class ProgramCodeBlock : public GlobalCodeBlock {
public:
- ProgramCodeBlock(CopyParsedBlockTag, ProgramCodeBlock& other)
- : GlobalCodeBlock(CopyParsedBlock, other)
+ typedef GlobalCodeBlock Base;
+ DECLARE_INFO;
+
+ static ProgramCodeBlock* create(VM* vm, CopyParsedBlockTag, ProgramCodeBlock& other)
{
+ ProgramCodeBlock* instance = new (NotNull, allocateCell<ProgramCodeBlock>(vm->heap))
+ ProgramCodeBlock(vm, vm->programCodeBlockStructure.get(), CopyParsedBlock, other);
+ instance->finishCreation(*vm, CopyParsedBlock, other);
+ return instance;
}
- ProgramCodeBlock(ProgramExecutable* ownerExecutable, UnlinkedProgramCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned firstLineColumnOffset)
- : GlobalCodeBlock(ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, 0, firstLineColumnOffset)
+ static ProgramCodeBlock* create(VM* vm, ProgramExecutable* ownerExecutable, UnlinkedProgramCodeBlock* unlinkedCodeBlock,
+ JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned firstLineColumnOffset)
{
+ ProgramCodeBlock* instance = new (NotNull, allocateCell<ProgramCodeBlock>(vm->heap))
+ ProgramCodeBlock(vm, vm->programCodeBlockStructure.get(), ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, firstLineColumnOffset);
+ instance->finishCreation(*vm, ownerExecutable, unlinkedCodeBlock, scope);
+ return instance;
}
-#if ENABLE(JIT)
-protected:
- virtual CodeBlock* replacement() override;
- virtual DFG::CapabilityLevel capabilityLevelInternal() override;
-#endif
+ static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue prototype)
+ {
+ return Structure::create(vm, globalObject, prototype, TypeInfo(CellType, StructureFlags), info());
+ }
+
+private:
+ ProgramCodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, ProgramCodeBlock& other)
+ : GlobalCodeBlock(vm, structure, CopyParsedBlock, other)
+ {
+ }
+
+ ProgramCodeBlock(VM* vm, Structure* structure, ProgramExecutable* ownerExecutable, UnlinkedProgramCodeBlock* unlinkedCodeBlock,
+ JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned firstLineColumnOffset)
+ : GlobalCodeBlock(vm, structure, ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, 0, firstLineColumnOffset)
+ {
+ }
+
+ static void destroy(JSCell*);
+};
+
+class ModuleProgramCodeBlock : public GlobalCodeBlock {
+public:
+ typedef GlobalCodeBlock Base;
+ DECLARE_INFO;
+
+ static ModuleProgramCodeBlock* create(VM* vm, CopyParsedBlockTag, ModuleProgramCodeBlock& other)
+ {
+ ModuleProgramCodeBlock* instance = new (NotNull, allocateCell<ModuleProgramCodeBlock>(vm->heap))
+ ModuleProgramCodeBlock(vm, vm->moduleProgramCodeBlockStructure.get(), CopyParsedBlock, other);
+ instance->finishCreation(*vm, CopyParsedBlock, other);
+ return instance;
+ }
+
+ static ModuleProgramCodeBlock* create(VM* vm, ModuleProgramExecutable* ownerExecutable, UnlinkedModuleProgramCodeBlock* unlinkedCodeBlock,
+ JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned firstLineColumnOffset)
+ {
+ ModuleProgramCodeBlock* instance = new (NotNull, allocateCell<ModuleProgramCodeBlock>(vm->heap))
+ ModuleProgramCodeBlock(vm, vm->moduleProgramCodeBlockStructure.get(), ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, firstLineColumnOffset);
+ instance->finishCreation(*vm, ownerExecutable, unlinkedCodeBlock, scope);
+ return instance;
+ }
+
+ static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue prototype)
+ {
+ return Structure::create(vm, globalObject, prototype, TypeInfo(CellType, StructureFlags), info());
+ }
+
+private:
+ ModuleProgramCodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, ModuleProgramCodeBlock& other)
+ : GlobalCodeBlock(vm, structure, CopyParsedBlock, other)
+ {
+ }
+
+ ModuleProgramCodeBlock(VM* vm, Structure* structure, ModuleProgramExecutable* ownerExecutable, UnlinkedModuleProgramCodeBlock* unlinkedCodeBlock,
+ JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned firstLineColumnOffset)
+ : GlobalCodeBlock(vm, structure, ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, 0, firstLineColumnOffset)
+ {
+ }
+
+ static void destroy(JSCell*);
};
class EvalCodeBlock : public GlobalCodeBlock {
public:
- EvalCodeBlock(CopyParsedBlockTag, EvalCodeBlock& other)
- : GlobalCodeBlock(CopyParsedBlock, other)
+ typedef GlobalCodeBlock Base;
+ DECLARE_INFO;
+
+ static EvalCodeBlock* create(VM* vm, CopyParsedBlockTag, EvalCodeBlock& other)
{
+ EvalCodeBlock* instance = new (NotNull, allocateCell<EvalCodeBlock>(vm->heap))
+ EvalCodeBlock(vm, vm->evalCodeBlockStructure.get(), CopyParsedBlock, other);
+ instance->finishCreation(*vm, CopyParsedBlock, other);
+ return instance;
}
-
- EvalCodeBlock(EvalExecutable* ownerExecutable, UnlinkedEvalCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr<SourceProvider> sourceProvider)
- : GlobalCodeBlock(ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, 0, 1)
+
+ static EvalCodeBlock* create(VM* vm, EvalExecutable* ownerExecutable, UnlinkedEvalCodeBlock* unlinkedCodeBlock,
+ JSScope* scope, PassRefPtr<SourceProvider> sourceProvider)
{
+ EvalCodeBlock* instance = new (NotNull, allocateCell<EvalCodeBlock>(vm->heap))
+ EvalCodeBlock(vm, vm->evalCodeBlockStructure.get(), ownerExecutable, unlinkedCodeBlock, scope, sourceProvider);
+ instance->finishCreation(*vm, ownerExecutable, unlinkedCodeBlock, scope);
+ return instance;
}
-
+
+ static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue prototype)
+ {
+ return Structure::create(vm, globalObject, prototype, TypeInfo(CellType, StructureFlags), info());
+ }
+
const Identifier& variable(unsigned index) { return unlinkedEvalCodeBlock()->variable(index); }
unsigned numVariables() { return unlinkedEvalCodeBlock()->numVariables(); }
-#if ENABLE(JIT)
-protected:
- virtual CodeBlock* replacement() override;
- virtual DFG::CapabilityLevel capabilityLevelInternal() override;
-#endif
+private:
+ EvalCodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, EvalCodeBlock& other)
+ : GlobalCodeBlock(vm, structure, CopyParsedBlock, other)
+ {
+ }
+
+ EvalCodeBlock(VM* vm, Structure* structure, EvalExecutable* ownerExecutable, UnlinkedEvalCodeBlock* unlinkedCodeBlock,
+ JSScope* scope, PassRefPtr<SourceProvider> sourceProvider)
+ : GlobalCodeBlock(vm, structure, ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, 0, 1)
+ {
+ }
+ static void destroy(JSCell*);
+
private:
UnlinkedEvalCodeBlock* unlinkedEvalCodeBlock() const { return jsCast<UnlinkedEvalCodeBlock*>(unlinkedCodeBlock()); }
};
class FunctionCodeBlock : public CodeBlock {
public:
- FunctionCodeBlock(CopyParsedBlockTag, FunctionCodeBlock& other)
- : CodeBlock(CopyParsedBlock, other)
+ typedef CodeBlock Base;
+ DECLARE_INFO;
+
+ static FunctionCodeBlock* create(VM* vm, CopyParsedBlockTag, FunctionCodeBlock& other)
+ {
+ FunctionCodeBlock* instance = new (NotNull, allocateCell<FunctionCodeBlock>(vm->heap))
+ FunctionCodeBlock(vm, vm->functionCodeBlockStructure.get(), CopyParsedBlock, other);
+ instance->finishCreation(*vm, CopyParsedBlock, other);
+ return instance;
+ }
+
+ static FunctionCodeBlock* create(VM* vm, FunctionExecutable* ownerExecutable, UnlinkedFunctionCodeBlock* unlinkedCodeBlock, JSScope* scope,
+ PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
+ {
+ FunctionCodeBlock* instance = new (NotNull, allocateCell<FunctionCodeBlock>(vm->heap))
+ FunctionCodeBlock(vm, vm->functionCodeBlockStructure.get(), ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, sourceOffset, firstLineColumnOffset);
+ instance->finishCreation(*vm, ownerExecutable, unlinkedCodeBlock, scope);
+ return instance;
+ }
+
+ static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue prototype)
+ {
+ return Structure::create(vm, globalObject, prototype, TypeInfo(CellType, StructureFlags), info());
+ }
+
+private:
+ FunctionCodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, FunctionCodeBlock& other)
+ : CodeBlock(vm, structure, CopyParsedBlock, other)
{
}
- FunctionCodeBlock(FunctionExecutable* ownerExecutable, UnlinkedFunctionCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
- : CodeBlock(ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, sourceOffset, firstLineColumnOffset)
+ FunctionCodeBlock(VM* vm, Structure* structure, FunctionExecutable* ownerExecutable, UnlinkedFunctionCodeBlock* unlinkedCodeBlock, JSScope* scope,
+ PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
+ : CodeBlock(vm, structure, ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, sourceOffset, firstLineColumnOffset)
{
}
-#if ENABLE(JIT)
-protected:
- virtual CodeBlock* replacement() override;
- virtual DFG::CapabilityLevel capabilityLevelInternal() override;
-#endif
+ static void destroy(JSCell*);
};
-inline CodeBlock* baselineCodeBlockForInlineCallFrame(InlineCallFrame* inlineCallFrame)
-{
- RELEASE_ASSERT(inlineCallFrame);
- ExecutableBase* executable = inlineCallFrame->executable.get();
- RELEASE_ASSERT(executable->structure()->classInfo() == FunctionExecutable::info());
- return static_cast<FunctionExecutable*>(executable)->baselineCodeBlockFor(inlineCallFrame->isCall ? CodeForCall : CodeForConstruct);
-}
+#if ENABLE(WEBASSEMBLY)
+class WebAssemblyCodeBlock : public CodeBlock {
+public:
+ typedef CodeBlock Base;
+ DECLARE_INFO;
-inline CodeBlock* baselineCodeBlockForOriginAndBaselineCodeBlock(const CodeOrigin& codeOrigin, CodeBlock* baselineCodeBlock)
-{
- if (codeOrigin.inlineCallFrame)
- return baselineCodeBlockForInlineCallFrame(codeOrigin.inlineCallFrame);
- return baselineCodeBlock;
-}
+ static WebAssemblyCodeBlock* create(VM* vm, CopyParsedBlockTag, WebAssemblyCodeBlock& other)
+ {
+ WebAssemblyCodeBlock* instance = new (NotNull, allocateCell<WebAssemblyCodeBlock>(vm->heap))
+ WebAssemblyCodeBlock(vm, vm->webAssemblyCodeBlockStructure.get(), CopyParsedBlock, other);
+ instance->finishCreation(*vm, CopyParsedBlock, other);
+ return instance;
+ }
-inline int CodeBlock::argumentIndexAfterCapture(size_t argument)
-{
- if (argument >= static_cast<size_t>(symbolTable()->parameterCount()))
- return CallFrame::argumentOffset(argument);
-
- const SlowArgument* slowArguments = symbolTable()->slowArguments();
- if (!slowArguments || slowArguments[argument].status == SlowArgument::Normal)
- return CallFrame::argumentOffset(argument);
-
- ASSERT(slowArguments[argument].status == SlowArgument::Captured);
- return slowArguments[argument].index;
-}
+ static WebAssemblyCodeBlock* create(VM* vm, WebAssemblyExecutable* ownerExecutable, JSGlobalObject* globalObject)
+ {
+ WebAssemblyCodeBlock* instance = new (NotNull, allocateCell<WebAssemblyCodeBlock>(vm->heap))
+ WebAssemblyCodeBlock(vm, vm->webAssemblyCodeBlockStructure.get(), ownerExecutable, globalObject);
+ instance->finishCreation(*vm, ownerExecutable, globalObject);
+ return instance;
+ }
-inline bool CodeBlock::hasSlowArguments()
-{
- return !!symbolTable()->slowArguments();
-}
+ static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue prototype)
+ {
+ return Structure::create(vm, globalObject, prototype, TypeInfo(CellType, StructureFlags), info());
+ }
+
+private:
+ WebAssemblyCodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, WebAssemblyCodeBlock& other)
+ : CodeBlock(vm, structure, CopyParsedBlock, other)
+ {
+ }
+
+ WebAssemblyCodeBlock(VM* vm, Structure* structure, WebAssemblyExecutable* ownerExecutable, JSGlobalObject* globalObject)
+ : CodeBlock(vm, structure, ownerExecutable, globalObject)
+ {
+ }
+
+ static void destroy(JSCell*);
+};
+#endif
inline Register& ExecState::r(int index)
{
@@ -1241,25 +1329,30 @@ inline Register& ExecState::r(int index)
return this[index];
}
+inline Register& ExecState::r(VirtualRegister reg)
+{
+ return r(reg.offset());
+}
+
inline Register& ExecState::uncheckedR(int index)
{
RELEASE_ASSERT(index < FirstConstantRegisterIndex);
return this[index];
}
-inline JSValue ExecState::argumentAfterCapture(size_t argument)
+inline Register& ExecState::uncheckedR(VirtualRegister reg)
{
- if (argument >= argumentCount())
- return jsUndefined();
-
- if (!codeBlock())
- return this[argumentOffset(argument)].jsValue();
-
- return this[codeBlock()->argumentIndexAfterCapture(argument)].jsValue();
+ return uncheckedR(reg.offset());
}
-inline void CodeBlockSet::mark(void* candidateCodeBlock)
+inline void CodeBlock::clearVisitWeaklyHasBeenCalled()
{
+ m_visitWeaklyHasBeenCalled.store(false, std::memory_order_relaxed);
+}
+
+inline void CodeBlockSet::mark(const LockHolder& locker, void* candidateCodeBlock)
+{
+ ASSERT(m_lock.isLocked());
// We have to check for 0 and -1 because those are used by the HashMap as markers.
uintptr_t value = reinterpret_cast<uintptr_t>(candidateCodeBlock);
@@ -1268,15 +1361,61 @@ inline void CodeBlockSet::mark(void* candidateCodeBlock)
// -1 + 1 = 0
if (value + 1 <= 1)
return;
-
- HashSet<CodeBlock*>::iterator iter = m_set.find(static_cast<CodeBlock*>(candidateCodeBlock));
- if (iter == m_set.end())
+
+ CodeBlock* codeBlock = static_cast<CodeBlock*>(candidateCodeBlock);
+ if (!m_oldCodeBlocks.contains(codeBlock) && !m_newCodeBlocks.contains(codeBlock))
return;
-
- (*iter)->m_mayBeExecuting = true;
-#if ENABLE(GGC)
- m_currentlyExecuting.append(static_cast<CodeBlock*>(candidateCodeBlock));
-#endif
+
+ mark(locker, codeBlock);
+}
+
+inline void CodeBlockSet::mark(const LockHolder&, CodeBlock* codeBlock)
+{
+ if (!codeBlock)
+ return;
+
+ // Try to recover gracefully if we forget to execute a barrier for a
+ // CodeBlock that does value profiling. This is probably overkill, but we
+ // have always done it.
+ Heap::heap(codeBlock)->writeBarrier(codeBlock);
+
+ m_currentlyExecuting.add(codeBlock);
+}
+
+template <typename Functor> inline void ScriptExecutable::forEachCodeBlock(Functor&& functor)
+{
+ switch (type()) {
+ case ProgramExecutableType: {
+ if (CodeBlock* codeBlock = static_cast<CodeBlock*>(jsCast<ProgramExecutable*>(this)->m_programCodeBlock.get()))
+ codeBlock->forEachRelatedCodeBlock(std::forward<Functor>(functor));
+ break;
+ }
+
+ case EvalExecutableType: {
+ if (CodeBlock* codeBlock = static_cast<CodeBlock*>(jsCast<EvalExecutable*>(this)->m_evalCodeBlock.get()))
+ codeBlock->forEachRelatedCodeBlock(std::forward<Functor>(functor));
+ break;
+ }
+
+ case FunctionExecutableType: {
+ Functor f(std::forward<Functor>(functor));
+ FunctionExecutable* executable = jsCast<FunctionExecutable*>(this);
+ if (CodeBlock* codeBlock = static_cast<CodeBlock*>(executable->m_codeBlockForCall.get()))
+ codeBlock->forEachRelatedCodeBlock(f);
+ if (CodeBlock* codeBlock = static_cast<CodeBlock*>(executable->m_codeBlockForConstruct.get()))
+ codeBlock->forEachRelatedCodeBlock(f);
+ break;
+ }
+
+ case ModuleProgramExecutableType: {
+ if (CodeBlock* codeBlock = static_cast<CodeBlock*>(jsCast<ModuleProgramExecutable*>(this)->m_moduleProgramCodeBlock.get()))
+ codeBlock->forEachRelatedCodeBlock(std::forward<Functor>(functor));
+ break;
+ }
+
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/CodeBlockJettisoningWatchpoint.cpp b/Source/JavaScriptCore/bytecode/CodeBlockJettisoningWatchpoint.cpp
index be50c9778..50cf7378d 100644
--- a/Source/JavaScriptCore/bytecode/CodeBlockJettisoningWatchpoint.cpp
+++ b/Source/JavaScriptCore/bytecode/CodeBlockJettisoningWatchpoint.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,18 +28,16 @@
#include "CodeBlock.h"
#include "DFGCommon.h"
+#include "JSCInlines.h"
namespace JSC {
-void CodeBlockJettisoningWatchpoint::fireInternal()
+void CodeBlockJettisoningWatchpoint::fireInternal(const FireDetail& detail)
{
- if (DFG::shouldShowDisassembly())
+ if (DFG::shouldDumpDisassembly())
dataLog("Firing watchpoint ", RawPointer(this), " on ", *m_codeBlock, "\n");
- m_codeBlock->jettison(CountReoptimization);
-
- if (isOnList())
- remove();
+ m_codeBlock->jettison(Profiler::JettisonDueToUnprofiledWatchpoint, CountReoptimization, &detail);
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/CodeBlockJettisoningWatchpoint.h b/Source/JavaScriptCore/bytecode/CodeBlockJettisoningWatchpoint.h
index 89d87f4d0..b5e6dd330 100644
--- a/Source/JavaScriptCore/bytecode/CodeBlockJettisoningWatchpoint.h
+++ b/Source/JavaScriptCore/bytecode/CodeBlockJettisoningWatchpoint.h
@@ -34,18 +34,13 @@ class CodeBlock;
class CodeBlockJettisoningWatchpoint : public Watchpoint {
public:
- CodeBlockJettisoningWatchpoint()
- : m_codeBlock(0)
- {
- }
-
CodeBlockJettisoningWatchpoint(CodeBlock* codeBlock)
: m_codeBlock(codeBlock)
{
}
protected:
- virtual void fireInternal() override;
+ virtual void fireInternal(const FireDetail&) override;
private:
CodeBlock* m_codeBlock;
diff --git a/Source/JavaScriptCore/bytecode/CodeOrigin.cpp b/Source/JavaScriptCore/bytecode/CodeOrigin.cpp
index 39b83fead..d51695012 100644
--- a/Source/JavaScriptCore/bytecode/CodeOrigin.cpp
+++ b/Source/JavaScriptCore/bytecode/CodeOrigin.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -29,14 +29,15 @@
#include "CallFrame.h"
#include "CodeBlock.h"
#include "Executable.h"
-#include "Operations.h"
+#include "InlineCallFrame.h"
+#include "JSCInlines.h"
namespace JSC {
unsigned CodeOrigin::inlineDepthForCallFrame(InlineCallFrame* inlineCallFrame)
{
unsigned result = 1;
- for (InlineCallFrame* current = inlineCallFrame; current; current = current->caller.inlineCallFrame)
+ for (InlineCallFrame* current = inlineCallFrame; current; current = current->directCaller.inlineCallFrame)
result++;
return result;
}
@@ -45,18 +46,90 @@ unsigned CodeOrigin::inlineDepth() const
{
return inlineDepthForCallFrame(inlineCallFrame);
}
+
+bool CodeOrigin::isApproximatelyEqualTo(const CodeOrigin& other) const
+{
+ CodeOrigin a = *this;
+ CodeOrigin b = other;
+
+ if (!a.isSet())
+ return !b.isSet();
+ if (!b.isSet())
+ return false;
+
+ if (a.isHashTableDeletedValue())
+ return b.isHashTableDeletedValue();
+ if (b.isHashTableDeletedValue())
+ return false;
+ for (;;) {
+ ASSERT(a.isSet());
+ ASSERT(b.isSet());
+
+ if (a.bytecodeIndex != b.bytecodeIndex)
+ return false;
+
+ if ((!!a.inlineCallFrame) != (!!b.inlineCallFrame))
+ return false;
+
+ if (!a.inlineCallFrame)
+ return true;
+
+ if (a.inlineCallFrame->baselineCodeBlock.get() != b.inlineCallFrame->baselineCodeBlock.get())
+ return false;
+
+ a = a.inlineCallFrame->directCaller;
+ b = b.inlineCallFrame->directCaller;
+ }
+}
+
+unsigned CodeOrigin::approximateHash() const
+{
+ if (!isSet())
+ return 0;
+ if (isHashTableDeletedValue())
+ return 1;
+
+ unsigned result = 2;
+ CodeOrigin codeOrigin = *this;
+ for (;;) {
+ result += codeOrigin.bytecodeIndex;
+
+ if (!codeOrigin.inlineCallFrame)
+ return result;
+
+ result += WTF::PtrHash<JSCell*>::hash(codeOrigin.inlineCallFrame->baselineCodeBlock.get());
+
+ codeOrigin = codeOrigin.inlineCallFrame->directCaller;
+ }
+}
+
Vector<CodeOrigin> CodeOrigin::inlineStack() const
{
Vector<CodeOrigin> result(inlineDepth());
result.last() = *this;
unsigned index = result.size() - 2;
- for (InlineCallFrame* current = inlineCallFrame; current; current = current->caller.inlineCallFrame)
- result[index--] = current->caller;
+ for (InlineCallFrame* current = inlineCallFrame; current; current = current->directCaller.inlineCallFrame)
+ result[index--] = current->directCaller;
RELEASE_ASSERT(!result[0].inlineCallFrame);
return result;
}
+CodeBlock* CodeOrigin::codeOriginOwner() const
+{
+ if (!inlineCallFrame)
+ return 0;
+ return inlineCallFrame->baselineCodeBlock.get();
+}
+
+int CodeOrigin::stackOffset() const
+{
+ if (!inlineCallFrame)
+ return 0;
+
+ return inlineCallFrame->stackOffset;
+}
+
void CodeOrigin::dump(PrintStream& out) const
{
if (!isSet()) {
@@ -70,7 +143,7 @@ void CodeOrigin::dump(PrintStream& out) const
out.print(" --> ");
if (InlineCallFrame* frame = stack[i].inlineCallFrame) {
- out.print(frame->briefFunctionInformation(), ":<", RawPointer(frame->executable.get()), "> ");
+ out.print(frame->briefFunctionInformation(), ":<", RawPointer(frame->baselineCodeBlock.get()), "> ");
if (frame->isClosureCall)
out.print("(closure) ");
}
@@ -84,51 +157,4 @@ void CodeOrigin::dumpInContext(PrintStream& out, DumpContext*) const
dump(out);
}
-JSFunction* InlineCallFrame::calleeForCallFrame(ExecState* exec) const
-{
- return jsCast<JSFunction*>(calleeRecovery.recover(exec));
-}
-
-CodeBlockHash InlineCallFrame::hash() const
-{
- return jsCast<FunctionExecutable*>(executable.get())->codeBlockFor(
- specializationKind())->hash();
-}
-
-CString InlineCallFrame::inferredName() const
-{
- return jsCast<FunctionExecutable*>(executable.get())->inferredName().utf8();
-}
-
-CodeBlock* InlineCallFrame::baselineCodeBlock() const
-{
- return jsCast<FunctionExecutable*>(executable.get())->baselineCodeBlockFor(specializationKind());
-}
-
-void InlineCallFrame::dumpBriefFunctionInformation(PrintStream& out) const
-{
- out.print(inferredName(), "#", hash());
-}
-
-void InlineCallFrame::dumpInContext(PrintStream& out, DumpContext* context) const
-{
- out.print(briefFunctionInformation(), ":<", RawPointer(executable.get()));
- if (executable->isStrictMode())
- out.print(" (StrictMode)");
- out.print(", bc#", caller.bytecodeIndex, ", ", specializationKind());
- if (isClosureCall)
- out.print(", closure call");
- else
- out.print(", known callee: ", inContext(calleeRecovery.constant(), context));
- out.print(", numArgs+this = ", arguments.size());
- out.print(", stack < loc", VirtualRegister(stackOffset).toLocal());
- out.print(">");
-}
-
-void InlineCallFrame::dump(PrintStream& out) const
-{
- dumpInContext(out, 0);
-}
-
} // namespace JSC
-
diff --git a/Source/JavaScriptCore/bytecode/CodeOrigin.h b/Source/JavaScriptCore/bytecode/CodeOrigin.h
index ed660c247..66ab42724 100644
--- a/Source/JavaScriptCore/bytecode/CodeOrigin.h
+++ b/Source/JavaScriptCore/bytecode/CodeOrigin.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,10 +26,9 @@
#ifndef CodeOrigin_h
#define CodeOrigin_h
+#include "CallMode.h"
#include "CodeBlockHash.h"
#include "CodeSpecializationKind.h"
-#include "JSFunction.h"
-#include "ValueRecovery.h"
#include "WriteBarrier.h"
#include <wtf/BitVector.h>
#include <wtf/HashMap.h>
@@ -63,7 +62,7 @@ struct CodeOrigin {
CodeOrigin(WTF::HashTableDeletedValueType)
: bytecodeIndex(invalidBytecodeIndex)
- , inlineCallFrame(bitwise_cast<InlineCallFrame*>(static_cast<uintptr_t>(1)))
+ , inlineCallFrame(deletedMarker())
{
}
@@ -75,6 +74,7 @@ struct CodeOrigin {
}
bool isSet() const { return bytecodeIndex != invalidBytecodeIndex; }
+ explicit operator bool() const { return isSet(); }
bool isHashTableDeletedValue() const
{
@@ -87,7 +87,7 @@ struct CodeOrigin {
// If the code origin corresponds to inlined code, gives you the heap object that
// would have owned the code if it had not been inlined. Otherwise returns 0.
- ScriptExecutable* codeOriginOwner() const;
+ CodeBlock* codeOriginOwner() const;
int stackOffset() const;
@@ -97,69 +97,28 @@ struct CodeOrigin {
bool operator==(const CodeOrigin& other) const;
bool operator!=(const CodeOrigin& other) const { return !(*this == other); }
+ // This checks if the two code origins correspond to the same stack trace snippets,
+ // but ignore whether the InlineCallFrame's are identical.
+ bool isApproximatelyEqualTo(const CodeOrigin& other) const;
+
+ unsigned approximateHash() const;
+
+ template <typename Function>
+ void walkUpInlineStack(const Function&);
+
// Get the inline stack. This is slow, and is intended for debugging only.
Vector<CodeOrigin> inlineStack() const;
void dump(PrintStream&) const;
void dumpInContext(PrintStream&, DumpContext*) const;
-};
-struct InlineCallFrame {
- Vector<ValueRecovery> arguments;
- WriteBarrier<ScriptExecutable> executable;
- ValueRecovery calleeRecovery;
- CodeOrigin caller;
- BitVector capturedVars; // Indexed by the machine call frame's variable numbering.
- signed stackOffset : 30;
- bool isCall : 1;
- bool isClosureCall : 1; // If false then we know that callee/scope are constants and the DFG won't treat them as variables, i.e. they have to be recovered manually.
- VirtualRegister argumentsRegister; // This is only set if the code uses arguments. The unmodified arguments register follows the unmodifiedArgumentsRegister() convention (see CodeBlock.h).
-
- // There is really no good notion of a "default" set of values for
- // InlineCallFrame's fields. This constructor is here just to reduce confusion if
- // we forgot to initialize explicitly.
- InlineCallFrame()
- : stackOffset(0)
- , isCall(false)
- , isClosureCall(false)
+private:
+ static InlineCallFrame* deletedMarker()
{
+ return bitwise_cast<InlineCallFrame*>(static_cast<uintptr_t>(1));
}
-
- CodeSpecializationKind specializationKind() const { return specializationFromIsCall(isCall); }
-
- JSFunction* calleeConstant() const
- {
- if (calleeRecovery.isConstant())
- return jsCast<JSFunction*>(calleeRecovery.constant());
- return 0;
- }
-
- // Get the callee given a machine call frame to which this InlineCallFrame belongs.
- JSFunction* calleeForCallFrame(ExecState*) const;
-
- CString inferredName() const;
- CodeBlockHash hash() const;
-
- CodeBlock* baselineCodeBlock() const;
-
- ptrdiff_t callerFrameOffset() const { return stackOffset * sizeof(Register) + CallFrame::callerFrameOffset(); }
- ptrdiff_t returnPCOffset() const { return stackOffset * sizeof(Register) + CallFrame::returnPCOffset(); }
-
- void dumpBriefFunctionInformation(PrintStream&) const;
- void dump(PrintStream&) const;
- void dumpInContext(PrintStream&, DumpContext*) const;
-
- MAKE_PRINT_METHOD(InlineCallFrame, dumpBriefFunctionInformation, briefFunctionInformation);
};
-inline int CodeOrigin::stackOffset() const
-{
- if (!inlineCallFrame)
- return 0;
-
- return inlineCallFrame->stackOffset;
-}
-
inline unsigned CodeOrigin::hash() const
{
return WTF::IntHash<unsigned>::hash(bytecodeIndex) +
@@ -171,13 +130,6 @@ inline bool CodeOrigin::operator==(const CodeOrigin& other) const
return bytecodeIndex == other.bytecodeIndex
&& inlineCallFrame == other.inlineCallFrame;
}
-
-inline ScriptExecutable* CodeOrigin::codeOriginOwner() const
-{
- if (!inlineCallFrame)
- return 0;
- return inlineCallFrame->executable.get();
-}
struct CodeOriginHash {
static unsigned hash(const CodeOrigin& key) { return key.hash(); }
@@ -185,6 +137,12 @@ struct CodeOriginHash {
static const bool safeToCompareToEmptyOrDeleted = true;
};
+struct CodeOriginApproximateHash {
+ static unsigned hash(const CodeOrigin& key) { return key.approximateHash(); }
+ static bool equal(const CodeOrigin& a, const CodeOrigin& b) { return a.isApproximatelyEqualTo(b); }
+ static const bool safeToCompareToEmptyOrDeleted = true;
+};
+
} // namespace JSC
namespace WTF {
diff --git a/Source/JavaScriptCore/bytecode/CodeType.cpp b/Source/JavaScriptCore/bytecode/CodeType.cpp
index 8b2cad56a..0c7043dfa 100644
--- a/Source/JavaScriptCore/bytecode/CodeType.cpp
+++ b/Source/JavaScriptCore/bytecode/CodeType.cpp
@@ -42,6 +42,9 @@ void printInternal(PrintStream& out, JSC::CodeType codeType)
case JSC::FunctionCode:
out.print("Function");
return;
+ case JSC::ModuleCode:
+ out.print("Module");
+ return;
default:
CRASH();
return;
diff --git a/Source/JavaScriptCore/bytecode/CodeType.h b/Source/JavaScriptCore/bytecode/CodeType.h
index 04afc1109..9941d514c 100644
--- a/Source/JavaScriptCore/bytecode/CodeType.h
+++ b/Source/JavaScriptCore/bytecode/CodeType.h
@@ -26,11 +26,9 @@
#ifndef CodeType_h
#define CodeType_h
-#include <wtf/Platform.h>
-
namespace JSC {
-enum CodeType { GlobalCode, EvalCode, FunctionCode };
+enum CodeType { GlobalCode, EvalCode, FunctionCode, ModuleCode };
} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/ComplexGetStatus.cpp b/Source/JavaScriptCore/bytecode/ComplexGetStatus.cpp
new file mode 100644
index 000000000..33663d057
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ComplexGetStatus.cpp
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "ComplexGetStatus.h"
+
+#include "JSCInlines.h"
+
+namespace JSC {
+
+ComplexGetStatus ComplexGetStatus::computeFor(
+ Structure* headStructure, const ObjectPropertyConditionSet& conditionSet, UniquedStringImpl* uid)
+{
+ // FIXME: We should assert that we never see a structure that
+ // getOwnPropertySlotIsImpure() but for which we don't
+ // newImpurePropertyFiresWatchpoints(). We're not at a point where we can do
+ // that, yet.
+ // https://bugs.webkit.org/show_bug.cgi?id=131810
+
+ ASSERT(conditionSet.isValid());
+
+ if (headStructure->takesSlowPathInDFGForImpureProperty())
+ return takesSlowPath();
+
+ ComplexGetStatus result;
+ result.m_kind = Inlineable;
+
+ if (!conditionSet.isEmpty()) {
+ result.m_conditionSet = conditionSet;
+
+ if (!result.m_conditionSet.structuresEnsureValidity())
+ return skip();
+
+ unsigned numberOfSlotBases =
+ result.m_conditionSet.numberOfConditionsWithKind(PropertyCondition::Presence);
+ RELEASE_ASSERT(numberOfSlotBases <= 1);
+ if (!numberOfSlotBases) {
+ // Currently we don't support misses. That's a bummer.
+ // FIXME: https://bugs.webkit.org/show_bug.cgi?id=133052
+ return takesSlowPath();
+ }
+ ObjectPropertyCondition base = result.m_conditionSet.slotBaseCondition();
+ ASSERT(base.kind() == PropertyCondition::Presence);
+
+ result.m_offset = base.offset();
+ } else
+ result.m_offset = headStructure->getConcurrently(uid);
+
+ if (!isValidOffset(result.m_offset))
+ return takesSlowPath();
+
+ return result;
+}
+
+} // namespace JSC
+
+
diff --git a/Source/JavaScriptCore/bytecode/ComplexGetStatus.h b/Source/JavaScriptCore/bytecode/ComplexGetStatus.h
new file mode 100644
index 000000000..a06e995d5
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ComplexGetStatus.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ComplexGetStatus_h
+#define ComplexGetStatus_h
+
+#include "JSCJSValue.h"
+#include "ObjectPropertyConditionSet.h"
+#include "PropertyOffset.h"
+
+namespace JSC {
+
+class CodeBlock;
+class StructureChain;
+
+// This class is useful for figuring out how to inline a cached get-like access. We
+// say "get-like" because this is appropriate for loading the GetterSetter object in
+// a put_by_id that hits a setter. Notably, this doesn't figure out how to call
+// accessors, or even whether they should be called. What it gives us, is a way of
+// determining how to load the value from the requested property (identified by a
+// StringImpl* uid) from an object of the given structure in the given CodeBlock,
+// assuming that such an access had already been cached by Repatch (and so Repatch had
+// already done a bunch of safety checks). This doesn't reexecute any checks that
+// Repatch would have executed, and for prototype chain accesses, it doesn't ask the
+// objects in the prototype chain whether their getOwnPropertySlot would attempt to
+// intercept the access - so this really is only appropriate if you already know that
+// one of the JITOperations had OK'd this for caching and that Repatch concurred.
+//
+// The typical use pattern is something like:
+//
+// ComplexGetStatus status = ComplexGetStatus::computeFor(...);
+// switch (status.kind()) {
+// case ComplexGetStatus::ShouldSkip:
+// // Handle the case where this kind of access is possibly safe but wouldn't
+// // pass the required safety checks. For example, if an IC gives us a list of
+// // accesses and one of them is ShouldSkip, then we should pretend as if it
+// // wasn't even there.
+// break;
+// case ComplexGetStatus::TakesSlowPath:
+// // This kind of access is not safe to inline. Bail out of any attempst to
+// // inline.
+// break;
+// case ComplexGetStatus::Inlineable:
+// // The good stuff goes here. If it's Inlineable then the other properties of
+// // the 'status' object will tell you everything you need to know about how
+// // to execute the get-like operation.
+// break;
+// }
+
+class ComplexGetStatus {
+public:
+ enum Kind {
+ ShouldSkip,
+ TakesSlowPath,
+ Inlineable
+ };
+
+ ComplexGetStatus()
+ : m_kind(ShouldSkip)
+ , m_offset(invalidOffset)
+ {
+ }
+
+ static ComplexGetStatus skip()
+ {
+ return ComplexGetStatus();
+ }
+
+ static ComplexGetStatus takesSlowPath()
+ {
+ ComplexGetStatus result;
+ result.m_kind = TakesSlowPath;
+ return result;
+ }
+
+ static ComplexGetStatus computeFor(
+ Structure* headStructure, const ObjectPropertyConditionSet&, UniquedStringImpl* uid);
+
+ Kind kind() const { return m_kind; }
+ PropertyOffset offset() const { return m_offset; }
+ const ObjectPropertyConditionSet& conditionSet() const { return m_conditionSet; }
+
+private:
+ Kind m_kind;
+ PropertyOffset m_offset;
+ ObjectPropertyConditionSet m_conditionSet;
+};
+
+} // namespace JSC
+
+#endif // ComplexGetStatus_h
+
diff --git a/Source/JavaScriptCore/bytecode/DFGExitProfile.cpp b/Source/JavaScriptCore/bytecode/DFGExitProfile.cpp
index 5d05bbb2f..40a25ced6 100644
--- a/Source/JavaScriptCore/bytecode/DFGExitProfile.cpp
+++ b/Source/JavaScriptCore/bytecode/DFGExitProfile.cpp
@@ -28,8 +28,6 @@
#if ENABLE(DFG_JIT)
-#include <wtf/PassOwnPtr.h>
-
namespace JSC { namespace DFG {
ExitProfile::ExitProfile() { }
@@ -37,10 +35,12 @@ ExitProfile::~ExitProfile() { }
bool ExitProfile::add(const ConcurrentJITLocker&, const FrequentExitSite& site)
{
+ ASSERT(site.jitType() != ExitFromAnything);
+
// If we've never seen any frequent exits then create the list and put this site
// into it.
if (!m_frequentExitSites) {
- m_frequentExitSites = adoptPtr(new Vector<FrequentExitSite>());
+ m_frequentExitSites = std::make_unique<Vector<FrequentExitSite>>();
m_frequentExitSites->append(site);
return true;
}
@@ -78,7 +78,7 @@ bool ExitProfile::hasExitSite(const ConcurrentJITLocker&, const FrequentExitSite
return false;
for (unsigned i = m_frequentExitSites->size(); i--;) {
- if (m_frequentExitSites->at(i) == site)
+ if (site.subsumes(m_frequentExitSites->at(i)))
return true;
}
return false;
diff --git a/Source/JavaScriptCore/bytecode/DFGExitProfile.h b/Source/JavaScriptCore/bytecode/DFGExitProfile.h
index ab1a60d58..cdecbaf97 100644
--- a/Source/JavaScriptCore/bytecode/DFGExitProfile.h
+++ b/Source/JavaScriptCore/bytecode/DFGExitProfile.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2012, 2013, 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,10 +26,12 @@
#ifndef DFGExitProfile_h
#define DFGExitProfile_h
+#if ENABLE(DFG_JIT)
+
#include "ConcurrentJITLock.h"
#include "ExitKind.h"
+#include "ExitingJITType.h"
#include <wtf/HashSet.h>
-#include <wtf/OwnPtr.h>
#include <wtf/Vector.h>
namespace JSC { namespace DFG {
@@ -39,18 +41,21 @@ public:
FrequentExitSite()
: m_bytecodeOffset(0) // 0 = empty value
, m_kind(ExitKindUnset)
+ , m_jitType(ExitFromAnything)
{
}
FrequentExitSite(WTF::HashTableDeletedValueType)
: m_bytecodeOffset(1) // 1 = deleted value
, m_kind(ExitKindUnset)
+ , m_jitType(ExitFromAnything)
{
}
- explicit FrequentExitSite(unsigned bytecodeOffset, ExitKind kind)
+ explicit FrequentExitSite(unsigned bytecodeOffset, ExitKind kind, ExitingJITType jitType = ExitFromAnything)
: m_bytecodeOffset(bytecodeOffset)
, m_kind(kind)
+ , m_jitType(jitType)
{
if (m_kind == ArgumentsEscaped) {
// Count this one globally. It doesn't matter where in the code block the arguments excaped;
@@ -61,9 +66,10 @@ public:
// Use this constructor if you wish for the exit site to be counted globally within its
// code block.
- explicit FrequentExitSite(ExitKind kind)
+ explicit FrequentExitSite(ExitKind kind, ExitingJITType jitType = ExitFromAnything)
: m_bytecodeOffset(0)
, m_kind(kind)
+ , m_jitType(jitType)
{
}
@@ -75,16 +81,36 @@ public:
bool operator==(const FrequentExitSite& other) const
{
return m_bytecodeOffset == other.m_bytecodeOffset
- && m_kind == other.m_kind;
+ && m_kind == other.m_kind
+ && m_jitType == other.m_jitType;
+ }
+
+ bool subsumes(const FrequentExitSite& other) const
+ {
+ if (m_bytecodeOffset != other.m_bytecodeOffset)
+ return false;
+ if (m_kind != other.m_kind)
+ return false;
+ if (m_jitType == ExitFromAnything)
+ return true;
+ return m_jitType == other.m_jitType;
}
unsigned hash() const
{
- return WTF::intHash(m_bytecodeOffset) + m_kind;
+ return WTF::intHash(m_bytecodeOffset) + m_kind + m_jitType * 7;
}
unsigned bytecodeOffset() const { return m_bytecodeOffset; }
ExitKind kind() const { return m_kind; }
+ ExitingJITType jitType() const { return m_jitType; }
+
+ FrequentExitSite withJITType(ExitingJITType jitType) const
+ {
+ FrequentExitSite result = *this;
+ result.m_jitType = jitType;
+ return result;
+ }
bool isHashTableDeletedValue() const
{
@@ -94,6 +120,7 @@ public:
private:
unsigned m_bytecodeOffset;
ExitKind m_kind;
+ ExitingJITType m_jitType;
};
struct FrequentExitSiteHash {
@@ -104,6 +131,7 @@ struct FrequentExitSiteHash {
} } // namespace JSC::DFG
+
namespace WTF {
template<typename T> struct DefaultHash;
@@ -154,7 +182,7 @@ public:
private:
friend class QueryableExitProfile;
- OwnPtr<Vector<FrequentExitSite>> m_frequentExitSites;
+ std::unique_ptr<Vector<FrequentExitSite>> m_frequentExitSites;
};
class QueryableExitProfile {
@@ -166,6 +194,10 @@ public:
bool hasExitSite(const FrequentExitSite& site) const
{
+ if (site.jitType() == ExitFromAnything) {
+ return hasExitSite(site.withJITType(ExitFromDFG))
+ || hasExitSite(site.withJITType(ExitFromFTL));
+ }
return m_frequentExitSites.find(site) != m_frequentExitSites.end();
}
@@ -184,4 +216,6 @@ private:
} } // namespace JSC::DFG
+#endif // ENABLE(DFG_JIT)
+
#endif // DFGExitProfile_h
diff --git a/Source/JavaScriptCore/bytecode/DataFormat.cpp b/Source/JavaScriptCore/bytecode/DataFormat.cpp
new file mode 100644
index 000000000..8bd42e100
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/DataFormat.cpp
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2012-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "DataFormat.h"
+
+#include <wtf/Assertions.h>
+#include <wtf/PrintStream.h>
+
+namespace WTF {
+
+void printInternal(PrintStream& out, JSC::DataFormat dataFormat)
+{
+ out.print(dataFormatToString(dataFormat));
+}
+
+} // namespace WTF
diff --git a/Source/JavaScriptCore/bytecode/DataFormat.h b/Source/JavaScriptCore/bytecode/DataFormat.h
index bb9da4c57..81d6831ad 100644
--- a/Source/JavaScriptCore/bytecode/DataFormat.h
+++ b/Source/JavaScriptCore/bytecode/DataFormat.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -56,7 +56,6 @@ enum DataFormat {
// Special data formats used only for OSR.
DataFormatDead = 33, // Implies jsUndefined().
- DataFormatArguments = 34 // Implies that the arguments object must be reified.
};
inline const char* dataFormatToString(DataFormat dataFormat)
@@ -90,8 +89,6 @@ inline const char* dataFormatToString(DataFormat dataFormat)
return "JSBoolean";
case DataFormatDead:
return "Dead";
- case DataFormatArguments:
- return "Arguments";
default:
RELEASE_ASSERT_NOT_REACHED();
return "Unknown";
@@ -124,6 +121,13 @@ inline bool isJSBoolean(DataFormat format)
return isJSFormat(format, DataFormatJSBoolean);
}
-}
+} // namespace JSC
+
+namespace WTF {
+
+class PrintStream;
+void printInternal(PrintStream&, JSC::DataFormat);
+
+} // namespace WTF
#endif // DataFormat_h
diff --git a/Source/JavaScriptCore/bytecode/DeferredCompilationCallback.cpp b/Source/JavaScriptCore/bytecode/DeferredCompilationCallback.cpp
index 35af7c7b9..762387caf 100644
--- a/Source/JavaScriptCore/bytecode/DeferredCompilationCallback.cpp
+++ b/Source/JavaScriptCore/bytecode/DeferredCompilationCallback.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013, 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,10 +26,46 @@
#include "config.h"
#include "DeferredCompilationCallback.h"
+#include "CodeBlock.h"
+
namespace JSC {
DeferredCompilationCallback::DeferredCompilationCallback() { }
DeferredCompilationCallback::~DeferredCompilationCallback() { }
+void DeferredCompilationCallback::compilationDidComplete(CodeBlock*, CodeBlock*, CompilationResult result)
+{
+ dumpCompiledSourcesIfNeeded();
+
+ switch (result) {
+ case CompilationFailed:
+ case CompilationInvalidated:
+ case CompilationSuccessful:
+ break;
+ case CompilationDeferred:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+}
+
+Vector<DeferredSourceDump>& DeferredCompilationCallback::ensureDeferredSourceDump()
+{
+ if (!m_deferredSourceDump)
+ m_deferredSourceDump = std::make_unique<Vector<DeferredSourceDump>>();
+ return *m_deferredSourceDump;
+}
+
+void DeferredCompilationCallback::dumpCompiledSourcesIfNeeded()
+{
+ if (!m_deferredSourceDump)
+ return;
+
+ ASSERT(Options::dumpSourceAtDFGTime());
+ unsigned index = 0;
+ for (auto& info : *m_deferredSourceDump) {
+ dataLog("[", ++index, "] ");
+ info.dump();
+ }
+}
+
} // JSC
diff --git a/Source/JavaScriptCore/bytecode/DeferredCompilationCallback.h b/Source/JavaScriptCore/bytecode/DeferredCompilationCallback.h
index 6421e3e25..adecb82bb 100644
--- a/Source/JavaScriptCore/bytecode/DeferredCompilationCallback.h
+++ b/Source/JavaScriptCore/bytecode/DeferredCompilationCallback.h
@@ -27,7 +27,9 @@
#define DeferredCompilationCallback_h
#include "CompilationResult.h"
+#include "DeferredSourceDump.h"
#include <wtf/RefCounted.h>
+#include <wtf/Vector.h>
namespace JSC {
@@ -40,8 +42,15 @@ protected:
public:
virtual ~DeferredCompilationCallback();
- virtual void compilationDidBecomeReadyAsynchronously(CodeBlock*) = 0;
- virtual void compilationDidComplete(CodeBlock*, CompilationResult) = 0;
+ virtual void compilationDidBecomeReadyAsynchronously(CodeBlock*, CodeBlock* profiledDFGCodeBlock) = 0;
+ virtual void compilationDidComplete(CodeBlock*, CodeBlock* profiledDFGCodeBlock, CompilationResult);
+
+ Vector<DeferredSourceDump>& ensureDeferredSourceDump();
+
+private:
+ void dumpCompiledSourcesIfNeeded();
+
+ std::unique_ptr<Vector<DeferredSourceDump>> m_deferredSourceDump;
};
} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/DeferredSourceDump.cpp b/Source/JavaScriptCore/bytecode/DeferredSourceDump.cpp
new file mode 100644
index 000000000..48079db66
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/DeferredSourceDump.cpp
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "DeferredSourceDump.h"
+
+#include "CodeBlock.h"
+#include "CodeBlockWithJITType.h"
+
+namespace JSC {
+
+DeferredSourceDump::DeferredSourceDump(CodeBlock* codeBlock)
+ : m_codeBlock(codeBlock)
+ , m_rootCodeBlock(nullptr)
+ , m_rootJITType(JITCode::None)
+{
+}
+
+DeferredSourceDump::DeferredSourceDump(CodeBlock* codeBlock, CodeBlock* rootCodeBlock, JITCode::JITType rootJITType, CodeOrigin callerCodeOrigin)
+ : m_codeBlock(codeBlock)
+ , m_rootCodeBlock(rootCodeBlock)
+ , m_rootJITType(rootJITType)
+ , m_callerCodeOrigin(callerCodeOrigin)
+{
+}
+
+void DeferredSourceDump::dump()
+{
+ bool isInlinedFrame = !!m_rootCodeBlock;
+ if (isInlinedFrame)
+ dataLog("Inlined ");
+ else
+ dataLog("Compiled ");
+ dataLog(*m_codeBlock);
+
+ if (isInlinedFrame)
+ dataLog(" at ", CodeBlockWithJITType(m_rootCodeBlock, m_rootJITType), " ", m_callerCodeOrigin);
+
+ dataLog("\n'''");
+ m_codeBlock->dumpSource();
+ dataLog("'''\n");
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/ProfiledCodeBlockJettisoningWatchpoint.h b/Source/JavaScriptCore/bytecode/DeferredSourceDump.h
index 108e23a37..72cb6b3b8 100644
--- a/Source/JavaScriptCore/bytecode/ProfiledCodeBlockJettisoningWatchpoint.h
+++ b/Source/JavaScriptCore/bytecode/DeferredSourceDump.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -20,46 +20,33 @@
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef ProfiledCodeBlockJettisoningWatchpoint_h
-#define ProfiledCodeBlockJettisoningWatchpoint_h
+#ifndef DeferredSourceDump_h
+#define DeferredSourceDump_h
#include "CodeOrigin.h"
-#include "ExitKind.h"
-#include "Watchpoint.h"
+#include "JITCode.h"
namespace JSC {
class CodeBlock;
-class ProfiledCodeBlockJettisoningWatchpoint : public Watchpoint {
+class DeferredSourceDump {
public:
- ProfiledCodeBlockJettisoningWatchpoint()
- : m_exitKind(ExitKindUnset)
- , m_codeBlock(0)
- {
- }
-
- ProfiledCodeBlockJettisoningWatchpoint(
- CodeOrigin codeOrigin, ExitKind exitKind, CodeBlock* codeBlock)
- : m_codeOrigin(codeOrigin)
- , m_exitKind(exitKind)
- , m_codeBlock(codeBlock)
- {
- }
-
-protected:
- virtual void fireInternal() override;
+ DeferredSourceDump(CodeBlock*);
+ DeferredSourceDump(CodeBlock*, CodeBlock* rootCodeBlock, JITCode::JITType rootJITType, CodeOrigin callerCodeOrigin);
+
+ void dump();
private:
- CodeOrigin m_codeOrigin;
- ExitKind m_exitKind;
CodeBlock* m_codeBlock;
+ CodeBlock* m_rootCodeBlock;
+ JITCode::JITType m_rootJITType;
+ CodeOrigin m_callerCodeOrigin;
};
} // namespace JSC
-#endif // ProfiledCodeBlockJettisoningWatchpoint_h
-
+#endif // DeferredSourceDump_h
diff --git a/Source/JavaScriptCore/bytecode/EvalCodeCache.h b/Source/JavaScriptCore/bytecode/EvalCodeCache.h
index ff5911240..e1c7b2b47 100644
--- a/Source/JavaScriptCore/bytecode/EvalCodeCache.h
+++ b/Source/JavaScriptCore/bytecode/EvalCodeCache.h
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -31,6 +31,8 @@
#include "Executable.h"
#include "JSGlobalObject.h"
+#include "JSScope.h"
+#include "Options.h"
#include "SourceCode.h"
#include <wtf/HashMap.h>
#include <wtf/RefPtr.h>
@@ -42,21 +44,74 @@ namespace JSC {
class EvalCodeCache {
public:
- EvalExecutable* tryGet(bool inStrictContext, const String& evalSource, JSScope* scope)
+ class CacheKey {
+ public:
+ CacheKey(const String& source, bool isArrowFunctionContext)
+ : m_source(source.impl())
+ , m_isArrowFunctionContext(isArrowFunctionContext)
+ {
+ }
+
+ CacheKey(WTF::HashTableDeletedValueType)
+ : m_source(WTF::HashTableDeletedValue)
+ {
+ }
+
+ CacheKey() = default;
+
+ unsigned hash() const { return m_source->hash(); }
+
+ bool isEmptyValue() const { return !m_source; }
+
+ bool operator==(const CacheKey& other) const
+ {
+ return m_source == other.m_source && m_isArrowFunctionContext == other.m_isArrowFunctionContext;
+ }
+
+ bool isHashTableDeletedValue() const { return m_source.isHashTableDeletedValue(); }
+
+ struct Hash {
+ static unsigned hash(const CacheKey& key)
+ {
+ return key.hash();
+ }
+ static bool equal(const CacheKey& lhs, const CacheKey& rhs)
+ {
+ return StringHash::equal(lhs.m_source, rhs.m_source) && lhs.m_isArrowFunctionContext == rhs.m_isArrowFunctionContext;
+ }
+ static const bool safeToCompareToEmptyOrDeleted = false;
+ };
+
+ typedef SimpleClassHashTraits<CacheKey> HashTraits;
+
+ private:
+ RefPtr<StringImpl> m_source;
+ bool m_isArrowFunctionContext { false };
+ };
+
+ EvalExecutable* tryGet(bool inStrictContext, const String& evalSource, bool isArrowFunctionContext, JSScope* scope)
{
- if (!inStrictContext && evalSource.length() < maxCacheableSourceLength && scope->begin()->isVariableObject())
- return m_cacheMap.get(evalSource.impl()).get();
- return 0;
+ if (isCacheable(inStrictContext, evalSource, scope)) {
+ ASSERT(!inStrictContext);
+ return m_cacheMap.fastGet(CacheKey(evalSource, isArrowFunctionContext)).get();
+ }
+ return nullptr;
}
- EvalExecutable* getSlow(ExecState* exec, ScriptExecutable* owner, bool inStrictContext, const String& evalSource, JSScope* scope)
+ EvalExecutable* getSlow(ExecState* exec, JSCell* owner, bool inStrictContext, ThisTDZMode thisTDZMode, DerivedContextType derivedContextType, bool isArrowFunctionContext, const String& evalSource, JSScope* scope)
{
- EvalExecutable* evalExecutable = EvalExecutable::create(exec, makeSource(evalSource), inStrictContext);
+ VariableEnvironment variablesUnderTDZ;
+ JSScope::collectVariablesUnderTDZ(scope, variablesUnderTDZ);
+ EvalExecutable* evalExecutable = EvalExecutable::create(exec, makeSource(evalSource), inStrictContext, thisTDZMode, derivedContextType, isArrowFunctionContext, &variablesUnderTDZ);
if (!evalExecutable)
- return 0;
+ return nullptr;
- if (!inStrictContext && evalSource.length() < maxCacheableSourceLength && scope->begin()->isVariableObject() && m_cacheMap.size() < maxCacheEntries)
- m_cacheMap.set(evalSource.impl(), WriteBarrier<EvalExecutable>(exec->vm(), owner, evalExecutable));
+ if (isCacheable(inStrictContext, evalSource, scope) && m_cacheMap.size() < maxCacheEntries) {
+ ASSERT(!inStrictContext);
+ ASSERT_WITH_MESSAGE(thisTDZMode == ThisTDZMode::CheckIfNeeded, "Always CheckIfNeeded because the caching is enabled only in the sloppy mode.");
+ ASSERT_WITH_MESSAGE(derivedContextType == DerivedContextType::None, "derivedContextType is always None because class methods and class constructors are always evaluated as the strict code.");
+ m_cacheMap.set(CacheKey(evalSource, isArrowFunctionContext), WriteBarrier<EvalExecutable>(exec->vm(), owner, evalExecutable));
+ }
return evalExecutable;
}
@@ -71,10 +126,22 @@ namespace JSC {
}
private:
- static const unsigned maxCacheableSourceLength = 256;
+ ALWAYS_INLINE bool isCacheableScope(JSScope* scope)
+ {
+ return scope->isGlobalLexicalEnvironment() || scope->isFunctionNameScopeObject() || scope->isVarScope();
+ }
+
+ ALWAYS_INLINE bool isCacheable(bool inStrictContext, const String& evalSource, JSScope* scope)
+ {
+ // If eval() is called and it has access to a lexical scope, we can't soundly cache it.
+ // If the eval() only has access to the "var" scope, then we can cache it.
+ return !inStrictContext
+ && static_cast<size_t>(evalSource.length()) < Options::maximumEvalCacheableSourceLength()
+ && isCacheableScope(scope);
+ }
static const int maxCacheEntries = 64;
- typedef HashMap<RefPtr<StringImpl>, WriteBarrier<EvalExecutable>> EvalCacheMap;
+ typedef HashMap<CacheKey, WriteBarrier<EvalExecutable>, CacheKey::Hash, CacheKey::HashTraits> EvalCacheMap;
EvalCacheMap m_cacheMap;
};
diff --git a/Source/JavaScriptCore/bytecode/ExecutableInfo.h b/Source/JavaScriptCore/bytecode/ExecutableInfo.h
new file mode 100644
index 000000000..a45d5039d
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ExecutableInfo.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2012-2015 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ExecutableInfo_h
+#define ExecutableInfo_h
+
+#include "ParserModes.h"
+
+namespace JSC {
+
+enum class DerivedContextType : uint8_t { None, DerivedConstructorContext, DerivedMethodContext };
+
+// FIXME: These flags, ParserModes and propagation to XXXCodeBlocks should be reorganized.
+// https://bugs.webkit.org/show_bug.cgi?id=151547
+struct ExecutableInfo {
+ ExecutableInfo(bool usesEval, bool isStrictMode, bool isConstructor, bool isBuiltinFunction, ConstructorKind constructorKind, SuperBinding superBinding, SourceParseMode parseMode, DerivedContextType derivedContextType, bool isArrowFunctionContext, bool isClassContext)
+ : m_usesEval(usesEval)
+ , m_isStrictMode(isStrictMode)
+ , m_isConstructor(isConstructor)
+ , m_isBuiltinFunction(isBuiltinFunction)
+ , m_constructorKind(static_cast<unsigned>(constructorKind))
+ , m_superBinding(static_cast<unsigned>(superBinding))
+ , m_parseMode(parseMode)
+ , m_derivedContextType(static_cast<unsigned>(derivedContextType))
+ , m_isArrowFunctionContext(isArrowFunctionContext)
+ , m_isClassContext(isClassContext)
+ {
+ ASSERT(m_constructorKind == static_cast<unsigned>(constructorKind));
+ ASSERT(m_superBinding == static_cast<unsigned>(superBinding));
+ }
+
+ bool usesEval() const { return m_usesEval; }
+ bool isStrictMode() const { return m_isStrictMode; }
+ bool isConstructor() const { return m_isConstructor; }
+ bool isBuiltinFunction() const { return m_isBuiltinFunction; }
+ ConstructorKind constructorKind() const { return static_cast<ConstructorKind>(m_constructorKind); }
+ SuperBinding superBinding() const { return static_cast<SuperBinding>(m_superBinding); }
+ SourceParseMode parseMode() const { return m_parseMode; }
+ DerivedContextType derivedContextType() const { return static_cast<DerivedContextType>(m_derivedContextType); }
+ bool isArrowFunctionContext() const { return m_isArrowFunctionContext; }
+ bool isClassContext() const { return m_isClassContext; }
+
+private:
+ unsigned m_usesEval : 1;
+ unsigned m_isStrictMode : 1;
+ unsigned m_isConstructor : 1;
+ unsigned m_isBuiltinFunction : 1;
+ unsigned m_constructorKind : 2;
+ unsigned m_superBinding : 1;
+ SourceParseMode m_parseMode;
+ unsigned m_derivedContextType : 2;
+ unsigned m_isArrowFunctionContext : 1;
+ unsigned m_isClassContext : 1;
+};
+
+} // namespace JSC
+
+#endif // ExecutableInfo_h
diff --git a/Source/JavaScriptCore/bytecode/ExecutionCounter.cpp b/Source/JavaScriptCore/bytecode/ExecutionCounter.cpp
index 3a646a86a..fe4e430f1 100644
--- a/Source/JavaScriptCore/bytecode/ExecutionCounter.cpp
+++ b/Source/JavaScriptCore/bytecode/ExecutionCounter.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,21 +28,25 @@
#include "CodeBlock.h"
#include "ExecutableAllocator.h"
+#include "JSCInlines.h"
#include <wtf/StringExtras.h>
namespace JSC {
-ExecutionCounter::ExecutionCounter()
+template<CountingVariant countingVariant>
+ExecutionCounter<countingVariant>::ExecutionCounter()
{
reset();
}
-void ExecutionCounter::forceSlowPathConcurrently()
+template<CountingVariant countingVariant>
+void ExecutionCounter<countingVariant>::forceSlowPathConcurrently()
{
m_counter = 0;
}
-bool ExecutionCounter::checkIfThresholdCrossedAndSet(CodeBlock* codeBlock)
+template<CountingVariant countingVariant>
+bool ExecutionCounter<countingVariant>::checkIfThresholdCrossedAndSet(CodeBlock* codeBlock)
{
if (hasCrossedThreshold(codeBlock))
return true;
@@ -53,21 +57,23 @@ bool ExecutionCounter::checkIfThresholdCrossedAndSet(CodeBlock* codeBlock)
return false;
}
-void ExecutionCounter::setNewThreshold(int32_t threshold, CodeBlock* codeBlock)
+template<CountingVariant countingVariant>
+void ExecutionCounter<countingVariant>::setNewThreshold(int32_t threshold, CodeBlock* codeBlock)
{
reset();
m_activeThreshold = threshold;
setThreshold(codeBlock);
}
-void ExecutionCounter::deferIndefinitely()
+template<CountingVariant countingVariant>
+void ExecutionCounter<countingVariant>::deferIndefinitely()
{
m_totalCount = 0;
m_activeThreshold = std::numeric_limits<int32_t>::max();
m_counter = std::numeric_limits<int32_t>::min();
}
-double ExecutionCounter::applyMemoryUsageHeuristics(int32_t value, CodeBlock* codeBlock)
+double applyMemoryUsageHeuristics(int32_t value, CodeBlock* codeBlock)
{
#if ENABLE(JIT)
double multiplier =
@@ -82,8 +88,7 @@ double ExecutionCounter::applyMemoryUsageHeuristics(int32_t value, CodeBlock* co
return multiplier * value;
}
-int32_t ExecutionCounter::applyMemoryUsageHeuristicsAndConvertToInt(
- int32_t value, CodeBlock* codeBlock)
+int32_t applyMemoryUsageHeuristicsAndConvertToInt(int32_t value, CodeBlock* codeBlock)
{
double doubleResult = applyMemoryUsageHeuristics(value, codeBlock);
@@ -95,7 +100,8 @@ int32_t ExecutionCounter::applyMemoryUsageHeuristicsAndConvertToInt(
return static_cast<int32_t>(doubleResult);
}
-bool ExecutionCounter::hasCrossedThreshold(CodeBlock* codeBlock) const
+template<CountingVariant countingVariant>
+bool ExecutionCounter<countingVariant>::hasCrossedThreshold(CodeBlock* codeBlock) const
{
// This checks if the current count rounded up to the threshold we were targeting.
// For example, if we are using half of available executable memory and have
@@ -119,18 +125,17 @@ bool ExecutionCounter::hasCrossedThreshold(CodeBlock* codeBlock) const
return static_cast<double>(m_totalCount) + m_counter >=
modifiedThreshold - static_cast<double>(
- std::min(m_activeThreshold, Options::maximumExecutionCountsBetweenCheckpoints())) / 2;
+ std::min(m_activeThreshold, maximumExecutionCountsBetweenCheckpoints())) / 2;
}
-bool ExecutionCounter::setThreshold(CodeBlock* codeBlock)
+template<CountingVariant countingVariant>
+bool ExecutionCounter<countingVariant>::setThreshold(CodeBlock* codeBlock)
{
if (m_activeThreshold == std::numeric_limits<int32_t>::max()) {
deferIndefinitely();
return false;
}
- ASSERT(!m_activeThreshold || !hasCrossedThreshold(codeBlock));
-
// Compute the true total count.
double trueTotalCount = count();
@@ -159,17 +164,22 @@ bool ExecutionCounter::setThreshold(CodeBlock* codeBlock)
return false;
}
-void ExecutionCounter::reset()
+template<CountingVariant countingVariant>
+void ExecutionCounter<countingVariant>::reset()
{
m_counter = 0;
m_totalCount = 0;
m_activeThreshold = 0;
}
-void ExecutionCounter::dump(PrintStream& out) const
+template<CountingVariant countingVariant>
+void ExecutionCounter<countingVariant>::dump(PrintStream& out) const
{
out.printf("%lf/%lf, %d", count(), static_cast<double>(m_activeThreshold), m_counter);
}
+template class ExecutionCounter<CountingForBaseline>;
+template class ExecutionCounter<CountingForUpperTiers>;
+
} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/ExecutionCounter.h b/Source/JavaScriptCore/bytecode/ExecutionCounter.h
index a7346691d..5002c6c67 100644
--- a/Source/JavaScriptCore/bytecode/ExecutionCounter.h
+++ b/Source/JavaScriptCore/bytecode/ExecutionCounter.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -35,6 +35,25 @@ namespace JSC {
class CodeBlock;
+enum CountingVariant {
+ CountingForBaseline,
+ CountingForUpperTiers
+};
+
+double applyMemoryUsageHeuristics(int32_t value, CodeBlock*);
+int32_t applyMemoryUsageHeuristicsAndConvertToInt(int32_t value, CodeBlock*);
+
+inline int32_t formattedTotalExecutionCount(float value)
+{
+ union {
+ int32_t i;
+ float f;
+ } u;
+ u.f = value;
+ return u.i;
+}
+
+template<CountingVariant countingVariant>
class ExecutionCounter {
public:
ExecutionCounter();
@@ -44,31 +63,33 @@ public:
void deferIndefinitely();
double count() const { return static_cast<double>(m_totalCount) + m_counter; }
void dump(PrintStream&) const;
- static double applyMemoryUsageHeuristics(int32_t value, CodeBlock*);
- static int32_t applyMemoryUsageHeuristicsAndConvertToInt(int32_t value, CodeBlock*);
+
+ static int32_t maximumExecutionCountsBetweenCheckpoints()
+ {
+ switch (countingVariant) {
+ case CountingForBaseline:
+ return Options::maximumExecutionCountsBetweenCheckpointsForBaseline();
+ case CountingForUpperTiers:
+ return Options::maximumExecutionCountsBetweenCheckpointsForUpperTiers();
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return 0;
+ }
+ }
+
template<typename T>
static T clippedThreshold(JSGlobalObject* globalObject, T threshold)
{
int32_t maxThreshold;
if (Options::randomizeExecutionCountsBetweenCheckpoints())
- maxThreshold = globalObject->weakRandomInteger() % Options::maximumExecutionCountsBetweenCheckpoints();
+ maxThreshold = globalObject->weakRandomInteger() % maximumExecutionCountsBetweenCheckpoints();
else
- maxThreshold = Options::maximumExecutionCountsBetweenCheckpoints();
+ maxThreshold = maximumExecutionCountsBetweenCheckpoints();
if (threshold > maxThreshold)
threshold = maxThreshold;
return threshold;
}
- static int32_t formattedTotalCount(float value)
- {
- union {
- int32_t i;
- float f;
- } u;
- u.f = value;
- return u.i;
- }
-
private:
bool hasCrossedThreshold(CodeBlock*) const;
bool setThreshold(CodeBlock*);
@@ -89,11 +110,14 @@ public:
// m_counter.
float m_totalCount;
- // This is the threshold we were originally targetting, without any correction for
+ // This is the threshold we were originally targeting, without any correction for
// the memory usage heuristics.
int32_t m_activeThreshold;
};
+typedef ExecutionCounter<CountingForBaseline> BaselineExecutionCounter;
+typedef ExecutionCounter<CountingForUpperTiers> UpperTierExecutionCounter;
+
} // namespace JSC
#endif // ExecutionCounter_h
diff --git a/Source/JavaScriptCore/bytecode/ExitKind.cpp b/Source/JavaScriptCore/bytecode/ExitKind.cpp
index 350aa5857..84ff57b59 100644
--- a/Source/JavaScriptCore/bytecode/ExitKind.cpp
+++ b/Source/JavaScriptCore/bytecode/ExitKind.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -38,20 +38,20 @@ const char* exitKindToString(ExitKind kind)
return "Unset";
case BadType:
return "BadType";
- case BadFunction:
- return "BadFunction";
+ case BadCell:
+ return "BadCell";
+ case BadIdent:
+ return "BadIdent";
case BadExecutable:
return "BadExecutable";
case BadCache:
return "BadCache";
- case BadCacheWatchpoint:
- return "BadCacheWatchpoint";
- case BadWeakConstantCache:
- return "BadWeakConstantCache";
- case BadWeakConstantCacheWatchpoint:
- return "BadWeakConstantCacheWatchpoint";
+ case BadConstantCache:
+ return "BadConstantCache";
case BadIndexingType:
return "BadIndexingType";
+ case BadTypeInfoFlags:
+ return "BadTypeInfoFlags";
case Overflow:
return "Overflow";
case NegativeZero:
@@ -68,38 +68,43 @@ const char* exitKindToString(ExitKind kind)
return "InadequateCoverage";
case ArgumentsEscaped:
return "ArgumentsEscaped";
+ case ExoticObjectMode:
+ return "ExoticObjectMode";
case NotStringObject:
return "NotStringObject";
+ case VarargsOverflow:
+ return "VarargsOverflow";
+ case TDZFailure:
+ return "TDZFailure";
case Uncountable:
return "Uncountable";
- case UncountableWatchpoint:
- return "UncountableWatchpoint";
case UncountableInvalidation:
return "UncountableInvalidation";
case WatchdogTimerFired:
return "WatchdogTimerFired";
case DebuggerEvent:
return "DebuggerEvent";
+ case ExceptionCheck:
+ return "ExceptionCheck";
+ case GenericUnwind:
+ return "GenericUnwind";
}
RELEASE_ASSERT_NOT_REACHED();
return "Unknown";
}
-bool exitKindIsCountable(ExitKind kind)
+bool exitKindMayJettison(ExitKind kind)
{
switch (kind) {
- case ExitKindUnset:
- RELEASE_ASSERT_NOT_REACHED();
- case BadType:
- case Uncountable:
- case UncountableWatchpoint:
- case LoadFromHole: // Already counted directly by the baseline JIT.
- case StoreToHole: // Already counted directly by the baseline JIT.
- case OutOfBounds: // Already counted directly by the baseline JIT.
+ case ExceptionCheck:
+ case GenericUnwind:
return false;
default:
return true;
}
+
+ RELEASE_ASSERT_NOT_REACHED();
+ return false;
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/ExitKind.h b/Source/JavaScriptCore/bytecode/ExitKind.h
index a9f6df6d4..22a54a1a9 100644
--- a/Source/JavaScriptCore/bytecode/ExitKind.h
+++ b/Source/JavaScriptCore/bytecode/ExitKind.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,16 +28,16 @@
namespace JSC {
-enum ExitKind {
+enum ExitKind : uint8_t {
ExitKindUnset,
BadType, // We exited because a type prediction was wrong.
- BadFunction, // We exited because we made an incorrect assumption about what function we would see.
+ BadCell, // We exited because we made an incorrect assumption about what cell we would see. Usually used for function checks.
+ BadIdent, // We exited because we made an incorrect assumption about what identifier we would see. Usually used for cached Id check in get_by_val.
BadExecutable, // We exited because we made an incorrect assumption about what executable we would see.
BadCache, // We exited because an inline cache was wrong.
- BadWeakConstantCache, // We exited because a cache on a weak constant (usually a prototype) was wrong.
- BadCacheWatchpoint, // Same as BadCache but from a watchpoint.
- BadWeakConstantCacheWatchpoint, // Same as BadWeakConstantCache but from a watchpoint.
+ BadConstantCache, // We exited because a cache on a weak constant (usually a prototype) was wrong.
BadIndexingType, // We exited because an indexing type was wrong.
+ BadTypeInfoFlags, // We exited because we made an incorrect assumption about what TypeInfo flags we would see.
Overflow, // We exited because of overflow.
NegativeZero, // We exited because we encountered negative zero.
Int52Overflow, // We exited because of an Int52 overflow.
@@ -46,28 +46,20 @@ enum ExitKind {
OutOfBounds, // We had an out-of-bounds access to an array.
InadequateCoverage, // We exited because we ended up in code that didn't have profiling coverage.
ArgumentsEscaped, // We exited because arguments escaped but we didn't expect them to.
+ ExoticObjectMode, // We exited because some exotic object that we were accessing was in an exotic mode (like Arguments with slow arguments).
NotStringObject, // We exited because we shouldn't have attempted to optimize string object access.
+ VarargsOverflow, // We exited because a varargs call passed more arguments than we expected.
+ TDZFailure, // We exited because we were in the TDZ and accessed the variable.
Uncountable, // We exited for none of the above reasons, and we should not count it. Most uses of this should be viewed as a FIXME.
UncountableInvalidation, // We exited because the code block was invalidated; this means that we've already counted the reasons why the code block was invalidated.
- UncountableWatchpoint, // We exited because of a watchpoint, which isn't counted because watchpoints do tracking themselves.
WatchdogTimerFired, // We exited because we need to service the watchdog timer.
- DebuggerEvent // We exited because we need to service the debugger.
+ DebuggerEvent, // We exited because we need to service the debugger.
+ ExceptionCheck, // We exited because a direct exception check showed that we threw an exception from a C call.
+ GenericUnwind, // We exited because a we arrived at this OSR exit from genericUnwind.
};
const char* exitKindToString(ExitKind);
-bool exitKindIsCountable(ExitKind);
-
-inline bool isWatchpoint(ExitKind kind)
-{
- switch (kind) {
- case BadCacheWatchpoint:
- case BadWeakConstantCacheWatchpoint:
- case UncountableWatchpoint:
- return true;
- default:
- return false;
- }
-}
+bool exitKindMayJettison(ExitKind);
} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/ExitingJITType.cpp b/Source/JavaScriptCore/bytecode/ExitingJITType.cpp
new file mode 100644
index 000000000..aa8f120b6
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ExitingJITType.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "ExitingJITType.h"
+
+#include <wtf/PrintStream.h>
+
+namespace WTF {
+
+using namespace JSC;
+
+void printInternal(PrintStream& out, ExitingJITType type)
+{
+ switch (type) {
+ case ExitFromAnything:
+ out.print("FromAnything");
+ return;
+ case ExitFromDFG:
+ out.print("FromDFG");
+ return;
+ case ExitFromFTL:
+ out.print("FromFTL");
+ return;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
+
diff --git a/Source/JavaScriptCore/bytecode/ExitingJITType.h b/Source/JavaScriptCore/bytecode/ExitingJITType.h
new file mode 100644
index 000000000..e8ed03e41
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ExitingJITType.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ExitingJITType_h
+#define ExitingJITType_h
+
+#include "JITCode.h"
+
+namespace JSC {
+
+enum ExitingJITType : uint8_t {
+ ExitFromAnything,
+ ExitFromDFG,
+ ExitFromFTL
+};
+
+inline ExitingJITType exitingJITTypeFor(JITCode::JITType type)
+{
+ switch (type) {
+ case JITCode::DFGJIT:
+ return ExitFromDFG;
+ case JITCode::FTLJIT:
+ return ExitFromFTL;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return ExitFromAnything;
+ }
+}
+
+} // namespace JSC
+
+namespace WTF {
+
+class PrintStream;
+void printInternal(PrintStream&, JSC::ExitingJITType);
+
+} // namespace WTF
+
+#endif // ExitingJITType_h
+
diff --git a/Source/JavaScriptCore/bytecode/FullBytecodeLiveness.h b/Source/JavaScriptCore/bytecode/FullBytecodeLiveness.h
index d34392121..b22198a00 100644
--- a/Source/JavaScriptCore/bytecode/FullBytecodeLiveness.h
+++ b/Source/JavaScriptCore/bytecode/FullBytecodeLiveness.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -35,33 +35,22 @@ class BytecodeLivenessAnalysis;
typedef HashMap<unsigned, FastBitVector, WTF::IntHash<unsigned>, WTF::UnsignedWithZeroKeyHashTraits<unsigned>> BytecodeToBitmapMap;
class FullBytecodeLiveness {
+ WTF_MAKE_FAST_ALLOCATED;
public:
- FullBytecodeLiveness() : m_codeBlock(0) { }
-
- // We say "out" to refer to the bitvector that contains raw results for a bytecode
- // instruction.
- const FastBitVector& getOut(unsigned bytecodeIndex) const
+ const FastBitVector& getLiveness(unsigned bytecodeIndex) const
{
- BytecodeToBitmapMap::const_iterator iter = m_map.find(bytecodeIndex);
- ASSERT(iter != m_map.end());
- return iter->value;
+ return m_map[bytecodeIndex];
}
bool operandIsLive(int operand, unsigned bytecodeIndex) const
{
- return operandIsAlwaysLive(m_codeBlock, operand) || operandThatIsNotAlwaysLiveIsLive(m_codeBlock, getOut(bytecodeIndex), operand);
- }
-
- FastBitVector getLiveness(unsigned bytecodeIndex) const
- {
- return getLivenessInfo(m_codeBlock, getOut(bytecodeIndex));
+ return operandIsAlwaysLive(operand) || operandThatIsNotAlwaysLiveIsLive(getLiveness(bytecodeIndex), operand);
}
private:
friend class BytecodeLivenessAnalysis;
- CodeBlock* m_codeBlock;
- BytecodeToBitmapMap m_map;
+ Vector<FastBitVector, 0, UnsafeVectorOverflow> m_map;
};
} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp b/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp
index fbb3da1a5..66a4dd81d 100644
--- a/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp
+++ b/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -27,279 +27,348 @@
#include "GetByIdStatus.h"
#include "CodeBlock.h"
+#include "ComplexGetStatus.h"
+#include "JSCInlines.h"
#include "JSScope.h"
#include "LLIntData.h"
#include "LowLevelInterpreter.h"
-#include "Operations.h"
+#include "PolymorphicAccess.h"
+#include <wtf/ListDump.h>
namespace JSC {
-GetByIdStatus GetByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex, StringImpl* uid)
+bool GetByIdStatus::appendVariant(const GetByIdVariant& variant)
+{
+ // Attempt to merge this variant with an already existing variant.
+ for (unsigned i = 0; i < m_variants.size(); ++i) {
+ if (m_variants[i].attemptToMerge(variant))
+ return true;
+ }
+
+ // Make sure there is no overlap. We should have pruned out opportunities for
+ // overlap but it's possible that an inline cache got into a weird state. We are
+ // defensive and bail if we detect crazy.
+ for (unsigned i = 0; i < m_variants.size(); ++i) {
+ if (m_variants[i].structureSet().overlaps(variant.structureSet()))
+ return false;
+ }
+
+ m_variants.append(variant);
+ return true;
+}
+
+#if ENABLE(DFG_JIT)
+bool GetByIdStatus::hasExitSite(const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, unsigned bytecodeIndex)
+{
+ return profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadCache))
+ || profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadConstantCache));
+}
+#endif
+
+GetByIdStatus GetByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex, UniquedStringImpl* uid)
{
UNUSED_PARAM(profiledBlock);
UNUSED_PARAM(bytecodeIndex);
UNUSED_PARAM(uid);
-#if ENABLE(LLINT)
+
+ VM& vm = *profiledBlock->vm();
+
Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex;
- if (instruction[0].u.opcode == LLInt::getOpcode(llint_op_get_array_length))
+ if (instruction[0].u.opcode == LLInt::getOpcode(op_get_array_length))
return GetByIdStatus(NoInformation, false);
- Structure* structure = instruction[4].u.structure.get();
- if (!structure)
+ StructureID structureID = instruction[4].u.structureID;
+ if (!structureID)
return GetByIdStatus(NoInformation, false);
+ Structure* structure = vm.heap.structureIDTable().get(structureID);
+
if (structure->takesSlowPathInDFGForImpureProperty())
return GetByIdStatus(NoInformation, false);
unsigned attributesIgnored;
- JSCell* specificValue;
- PropertyOffset offset = structure->getConcurrently(
- *profiledBlock->vm(), uid, attributesIgnored, specificValue);
- if (structure->isDictionary())
- specificValue = 0;
+ PropertyOffset offset = structure->getConcurrently(uid, attributesIgnored);
if (!isValidOffset(offset))
return GetByIdStatus(NoInformation, false);
- return GetByIdStatus(Simple, false, StructureSet(structure), offset, specificValue);
-#else
- return GetByIdStatus(NoInformation, false);
-#endif
+ return GetByIdStatus(Simple, false, GetByIdVariant(StructureSet(structure), offset));
}
-void GetByIdStatus::computeForChain(GetByIdStatus& result, CodeBlock* profiledBlock, StringImpl* uid)
+GetByIdStatus GetByIdStatus::computeFor(CodeBlock* profiledBlock, StubInfoMap& map, unsigned bytecodeIndex, UniquedStringImpl* uid)
{
-#if ENABLE(JIT)
- // Validate the chain. If the chain is invalid, then currently the best thing
- // we can do is to assume that TakesSlow is true. In the future, it might be
- // worth exploring reifying the structure chain from the structure we've got
- // instead of using the one from the cache, since that will do the right things
- // if the structure chain has changed. But that may be harder, because we may
- // then end up having a different type of access altogether. And it currently
- // does not appear to be worth it to do so -- effectively, the heuristic we
- // have now is that if the structure chain has changed between when it was
- // cached on in the baseline JIT and when the DFG tried to inline the access,
- // then we fall back on a polymorphic access.
- if (!result.m_chain->isStillValid())
- return;
+ ConcurrentJITLocker locker(profiledBlock->m_lock);
- if (result.m_chain->head()->takesSlowPathInDFGForImpureProperty())
- return;
- size_t chainSize = result.m_chain->size();
- for (size_t i = 0; i < chainSize; i++) {
- if (result.m_chain->at(i)->takesSlowPathInDFGForImpureProperty())
- return;
- }
+ GetByIdStatus result;
- JSObject* currentObject = result.m_chain->terminalPrototype();
- Structure* currentStructure = result.m_chain->last();
+#if ENABLE(DFG_JIT)
+ result = computeForStubInfoWithoutExitSiteFeedback(
+ locker, profiledBlock, map.get(CodeOrigin(bytecodeIndex)), uid,
+ CallLinkStatus::computeExitSiteData(locker, profiledBlock, bytecodeIndex));
- ASSERT_UNUSED(currentObject, currentObject);
-
- unsigned attributesIgnored;
- JSCell* specificValue;
-
- result.m_offset = currentStructure->getConcurrently(
- *profiledBlock->vm(), uid, attributesIgnored, specificValue);
- if (currentStructure->isDictionary())
- specificValue = 0;
- if (!isValidOffset(result.m_offset))
- return;
-
- result.m_structureSet.add(result.m_chain->head());
- result.m_specificValue = JSValue(specificValue);
+ if (!result.takesSlowPath()
+ && hasExitSite(locker, profiledBlock, bytecodeIndex))
+ return GetByIdStatus(result.makesCalls() ? MakesCalls : TakesSlowPath, true);
#else
- UNUSED_PARAM(result);
- UNUSED_PARAM(profiledBlock);
- UNUSED_PARAM(uid);
- UNREACHABLE_FOR_PLATFORM();
+ UNUSED_PARAM(map);
#endif
+
+ if (!result)
+ return computeFromLLInt(profiledBlock, bytecodeIndex, uid);
+
+ return result;
}
-GetByIdStatus GetByIdStatus::computeFor(CodeBlock* profiledBlock, StubInfoMap& map, unsigned bytecodeIndex, StringImpl* uid)
+#if ENABLE(DFG_JIT)
+GetByIdStatus GetByIdStatus::computeForStubInfo(const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, StructureStubInfo* stubInfo, CodeOrigin codeOrigin, UniquedStringImpl* uid)
{
- ConcurrentJITLocker locker(profiledBlock->m_lock);
-
- UNUSED_PARAM(profiledBlock);
- UNUSED_PARAM(bytecodeIndex);
- UNUSED_PARAM(uid);
+ GetByIdStatus result = GetByIdStatus::computeForStubInfoWithoutExitSiteFeedback(
+ locker, profiledBlock, stubInfo, uid,
+ CallLinkStatus::computeExitSiteData(locker, profiledBlock, codeOrigin.bytecodeIndex));
+
+ if (!result.takesSlowPath() && GetByIdStatus::hasExitSite(locker, profiledBlock, codeOrigin.bytecodeIndex))
+ return GetByIdStatus(result.makesCalls() ? GetByIdStatus::MakesCalls : GetByIdStatus::TakesSlowPath, true);
+ return result;
+}
+#endif // ENABLE(DFG_JIT)
+
#if ENABLE(JIT)
- StructureStubInfo* stubInfo = map.get(CodeOrigin(bytecodeIndex));
- if (!stubInfo || !stubInfo->seen)
- return computeFromLLInt(profiledBlock, bytecodeIndex, uid);
-
- if (stubInfo->resetByGC)
- return GetByIdStatus(TakesSlowPath, true);
+GetByIdStatus GetByIdStatus::computeForStubInfoWithoutExitSiteFeedback(
+ const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, StructureStubInfo* stubInfo, UniquedStringImpl* uid,
+ CallLinkStatus::ExitSiteData callExitSiteData)
+{
+ if (!stubInfo || !stubInfo->everConsidered)
+ return GetByIdStatus(NoInformation);
- PolymorphicAccessStructureList* list;
- int listSize;
- switch (stubInfo->accessType) {
- case access_get_by_id_self_list:
- list = stubInfo->u.getByIdSelfList.structureList;
- listSize = stubInfo->u.getByIdSelfList.listSize;
- break;
- case access_get_by_id_proto_list:
- list = stubInfo->u.getByIdProtoList.structureList;
- listSize = stubInfo->u.getByIdProtoList.listSize;
- break;
- default:
- list = 0;
- listSize = 0;
- break;
- }
- for (int i = 0; i < listSize; ++i) {
- if (!list->list[i].isDirect)
- return GetByIdStatus(MakesCalls, true);
+ PolymorphicAccess* list = 0;
+ State slowPathState = TakesSlowPath;
+ if (stubInfo->cacheType == CacheType::Stub) {
+ list = stubInfo->u.stub;
+ for (unsigned i = 0; i < list->size(); ++i) {
+ const AccessCase& access = list->at(i);
+ if (access.doesCalls())
+ slowPathState = MakesCalls;
+ }
}
- // Next check if it takes slow case, in which case we want to be kind of careful.
- if (profiledBlock->likelyToTakeSlowCase(bytecodeIndex))
- return GetByIdStatus(TakesSlowPath, true);
+ if (stubInfo->tookSlowPath)
+ return GetByIdStatus(slowPathState);
// Finally figure out if we can derive an access strategy.
GetByIdStatus result;
+ result.m_state = Simple;
result.m_wasSeenInJIT = true; // This is interesting for bytecode dumping only.
- switch (stubInfo->accessType) {
- case access_unset:
- return computeFromLLInt(profiledBlock, bytecodeIndex, uid);
+ switch (stubInfo->cacheType) {
+ case CacheType::Unset:
+ return GetByIdStatus(NoInformation);
- case access_get_by_id_self: {
- Structure* structure = stubInfo->u.getByIdSelf.baseObjectStructure.get();
+ case CacheType::GetByIdSelf: {
+ Structure* structure = stubInfo->u.byIdSelf.baseObjectStructure.get();
if (structure->takesSlowPathInDFGForImpureProperty())
- return GetByIdStatus(TakesSlowPath, true);
+ return GetByIdStatus(slowPathState, true);
unsigned attributesIgnored;
- JSCell* specificValue;
- result.m_offset = structure->getConcurrently(
- *profiledBlock->vm(), uid, attributesIgnored, specificValue);
- if (structure->isDictionary())
- specificValue = 0;
-
- if (isValidOffset(result.m_offset)) {
- result.m_structureSet.add(structure);
- result.m_specificValue = JSValue(specificValue);
- }
+ GetByIdVariant variant;
+ variant.m_offset = structure->getConcurrently(uid, attributesIgnored);
+ if (!isValidOffset(variant.m_offset))
+ return GetByIdStatus(slowPathState, true);
- if (isValidOffset(result.m_offset))
- ASSERT(result.m_structureSet.size());
- break;
+ variant.m_structureSet.add(structure);
+ bool didAppend = result.appendVariant(variant);
+ ASSERT_UNUSED(didAppend, didAppend);
+ return result;
}
- case access_get_by_id_self_list: {
- for (int i = 0; i < listSize; ++i) {
- ASSERT(list->list[i].isDirect);
-
- Structure* structure = list->list[i].base.get();
- if (structure->takesSlowPathInDFGForImpureProperty())
- return GetByIdStatus(TakesSlowPath, true);
-
- if (result.m_structureSet.contains(structure))
- continue;
-
- unsigned attributesIgnored;
- JSCell* specificValue;
- PropertyOffset myOffset = structure->getConcurrently(
- *profiledBlock->vm(), uid, attributesIgnored, specificValue);
- if (structure->isDictionary())
- specificValue = 0;
+ case CacheType::Stub: {
+ for (unsigned listIndex = 0; listIndex < list->size(); ++listIndex) {
+ const AccessCase& access = list->at(listIndex);
+ if (access.viaProxy())
+ return GetByIdStatus(slowPathState, true);
- if (!isValidOffset(myOffset)) {
- result.m_offset = invalidOffset;
- break;
+ Structure* structure = access.structure();
+ if (!structure) {
+ // The null structure cases arise due to array.length and string.length. We have no way
+ // of creating a GetByIdVariant for those, and we don't really have to since the DFG
+ // handles those cases in FixupPhase using value profiling. That's a bit awkward - we
+ // shouldn't have to use value profiling to discover something that the AccessCase
+ // could have told us. But, it works well enough. So, our only concern here is to not
+ // crash on null structure.
+ return GetByIdStatus(slowPathState, true);
}
-
- if (!i) {
- result.m_offset = myOffset;
- result.m_specificValue = JSValue(specificValue);
- } else if (result.m_offset != myOffset) {
- result.m_offset = invalidOffset;
- break;
- } else if (result.m_specificValue != JSValue(specificValue))
- result.m_specificValue = JSValue();
- result.m_structureSet.add(structure);
+ ComplexGetStatus complexGetStatus = ComplexGetStatus::computeFor(
+ structure, access.conditionSet(), uid);
+
+ switch (complexGetStatus.kind()) {
+ case ComplexGetStatus::ShouldSkip:
+ continue;
+
+ case ComplexGetStatus::TakesSlowPath:
+ return GetByIdStatus(slowPathState, true);
+
+ case ComplexGetStatus::Inlineable: {
+ std::unique_ptr<CallLinkStatus> callLinkStatus;
+ JSFunction* intrinsicFunction = nullptr;
+
+ switch (access.type()) {
+ case AccessCase::Load: {
+ break;
+ }
+ case AccessCase::IntrinsicGetter: {
+ intrinsicFunction = access.intrinsicFunction();
+ break;
+ }
+ case AccessCase::Getter: {
+ CallLinkInfo* callLinkInfo = access.callLinkInfo();
+ ASSERT(callLinkInfo);
+ callLinkStatus = std::make_unique<CallLinkStatus>(
+ CallLinkStatus::computeFor(
+ locker, profiledBlock, *callLinkInfo, callExitSiteData));
+ break;
+ }
+ default: {
+ // FIXME: It would be totally sweet to support more of these at some point in the
+ // future. https://bugs.webkit.org/show_bug.cgi?id=133052
+ return GetByIdStatus(slowPathState, true);
+ } }
+
+ GetByIdVariant variant(
+ StructureSet(structure), complexGetStatus.offset(),
+ complexGetStatus.conditionSet(), WTFMove(callLinkStatus),
+ intrinsicFunction);
+
+ if (!result.appendVariant(variant))
+ return GetByIdStatus(slowPathState, true);
+ break;
+ } }
}
-
- if (isValidOffset(result.m_offset))
- ASSERT(result.m_structureSet.size());
- break;
- }
- case access_get_by_id_proto: {
- if (!stubInfo->u.getByIdProto.isDirect)
- return GetByIdStatus(MakesCalls, true);
- result.m_chain = adoptRef(new IntendedStructureChain(
- profiledBlock,
- stubInfo->u.getByIdProto.baseObjectStructure.get(),
- stubInfo->u.getByIdProto.prototypeStructure.get()));
- computeForChain(result, profiledBlock, uid);
- break;
- }
-
- case access_get_by_id_chain: {
- if (!stubInfo->u.getByIdChain.isDirect)
- return GetByIdStatus(MakesCalls, true);
- result.m_chain = adoptRef(new IntendedStructureChain(
- profiledBlock,
- stubInfo->u.getByIdChain.baseObjectStructure.get(),
- stubInfo->u.getByIdChain.chain.get(),
- stubInfo->u.getByIdChain.count));
- computeForChain(result, profiledBlock, uid);
- break;
+ return result;
}
default:
- ASSERT(!isValidOffset(result.m_offset));
- break;
+ return GetByIdStatus(slowPathState, true);
}
- if (!isValidOffset(result.m_offset)) {
- result.m_state = TakesSlowPath;
- result.m_structureSet.clear();
- result.m_chain.clear();
- result.m_specificValue = JSValue();
- } else
- result.m_state = Simple;
-
- return result;
-#else // ENABLE(JIT)
- UNUSED_PARAM(map);
- return GetByIdStatus(NoInformation, false);
+ RELEASE_ASSERT_NOT_REACHED();
+ return GetByIdStatus();
+}
#endif // ENABLE(JIT)
+
+GetByIdStatus GetByIdStatus::computeFor(
+ CodeBlock* profiledBlock, CodeBlock* dfgBlock, StubInfoMap& baselineMap,
+ StubInfoMap& dfgMap, CodeOrigin codeOrigin, UniquedStringImpl* uid)
+{
+#if ENABLE(DFG_JIT)
+ if (dfgBlock) {
+ CallLinkStatus::ExitSiteData exitSiteData;
+ {
+ ConcurrentJITLocker locker(profiledBlock->m_lock);
+ exitSiteData = CallLinkStatus::computeExitSiteData(
+ locker, profiledBlock, codeOrigin.bytecodeIndex);
+ }
+
+ GetByIdStatus result;
+ {
+ ConcurrentJITLocker locker(dfgBlock->m_lock);
+ result = computeForStubInfoWithoutExitSiteFeedback(
+ locker, dfgBlock, dfgMap.get(codeOrigin), uid, exitSiteData);
+ }
+
+ if (result.takesSlowPath())
+ return result;
+
+ {
+ ConcurrentJITLocker locker(profiledBlock->m_lock);
+ if (hasExitSite(locker, profiledBlock, codeOrigin.bytecodeIndex))
+ return GetByIdStatus(TakesSlowPath, true);
+ }
+
+ if (result.isSet())
+ return result;
+ }
+#else
+ UNUSED_PARAM(dfgBlock);
+ UNUSED_PARAM(dfgMap);
+#endif
+
+ return computeFor(profiledBlock, baselineMap, codeOrigin.bytecodeIndex, uid);
}
-GetByIdStatus GetByIdStatus::computeFor(VM& vm, Structure* structure, StringImpl* uid)
+GetByIdStatus GetByIdStatus::computeFor(const StructureSet& set, UniquedStringImpl* uid)
{
// For now we only handle the super simple self access case. We could handle the
// prototype case in the future.
- if (!structure)
- return GetByIdStatus(TakesSlowPath);
+ if (set.isEmpty())
+ return GetByIdStatus();
- if (toUInt32FromStringImpl(uid) != PropertyName::NotAnIndex)
- return GetByIdStatus(TakesSlowPath);
-
- if (structure->typeInfo().overridesGetOwnPropertySlot() && structure->typeInfo().type() != GlobalObjectType)
+ if (parseIndex(*uid))
return GetByIdStatus(TakesSlowPath);
- if (!structure->propertyAccessesAreCacheable())
- return GetByIdStatus(TakesSlowPath);
-
GetByIdStatus result;
- result.m_wasSeenInJIT = false; // To my knowledge nobody that uses computeFor(VM&, Structure*, StringImpl*) reads this field, but I might as well be honest: no, it wasn't seen in the JIT, since I computed it statically.
- unsigned attributes;
- JSCell* specificValue;
- result.m_offset = structure->getConcurrently(vm, uid, attributes, specificValue);
- if (!isValidOffset(result.m_offset))
- return GetByIdStatus(TakesSlowPath); // It's probably a prototype lookup. Give up on life for now, even though we could totally be way smarter about it.
- if (attributes & Accessor)
- return GetByIdStatus(MakesCalls);
- if (structure->isDictionary())
- specificValue = 0;
- result.m_structureSet.add(structure);
- result.m_specificValue = JSValue(specificValue);
result.m_state = Simple;
+ result.m_wasSeenInJIT = false;
+ for (unsigned i = 0; i < set.size(); ++i) {
+ Structure* structure = set[i];
+ if (structure->typeInfo().overridesGetOwnPropertySlot() && structure->typeInfo().type() != GlobalObjectType)
+ return GetByIdStatus(TakesSlowPath);
+
+ if (!structure->propertyAccessesAreCacheable())
+ return GetByIdStatus(TakesSlowPath);
+
+ unsigned attributes;
+ PropertyOffset offset = structure->getConcurrently(uid, attributes);
+ if (!isValidOffset(offset))
+ return GetByIdStatus(TakesSlowPath); // It's probably a prototype lookup. Give up on life for now, even though we could totally be way smarter about it.
+ if (attributes & Accessor)
+ return GetByIdStatus(MakesCalls); // We could be smarter here, like strength-reducing this to a Call.
+
+ if (!result.appendVariant(GetByIdVariant(structure, offset)))
+ return GetByIdStatus(TakesSlowPath);
+ }
+
return result;
}
+bool GetByIdStatus::makesCalls() const
+{
+ switch (m_state) {
+ case NoInformation:
+ case TakesSlowPath:
+ return false;
+ case Simple:
+ for (unsigned i = m_variants.size(); i--;) {
+ if (m_variants[i].callLinkStatus())
+ return true;
+ }
+ return false;
+ case MakesCalls:
+ return true;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+
+ return false;
+}
+
+void GetByIdStatus::dump(PrintStream& out) const
+{
+ out.print("(");
+ switch (m_state) {
+ case NoInformation:
+ out.print("NoInformation");
+ break;
+ case Simple:
+ out.print("Simple");
+ break;
+ case TakesSlowPath:
+ out.print("TakesSlowPath");
+ break;
+ case MakesCalls:
+ out.print("MakesCalls");
+ break;
+ }
+ out.print(", ", listDump(m_variants), ", seenInJIT = ", m_wasSeenInJIT, ")");
+}
+
} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/GetByIdStatus.h b/Source/JavaScriptCore/bytecode/GetByIdStatus.h
index a1e801cca..6afac5400 100644
--- a/Source/JavaScriptCore/bytecode/GetByIdStatus.h
+++ b/Source/JavaScriptCore/bytecode/GetByIdStatus.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2013, 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,9 +26,11 @@
#ifndef GetByIdStatus_h
#define GetByIdStatus_h
-#include "IntendedStructureChain.h"
-#include "PropertyOffset.h"
-#include "StructureSet.h"
+#include "CallLinkStatus.h"
+#include "CodeOrigin.h"
+#include "ConcurrentJITLock.h"
+#include "ExitingJITType.h"
+#include "GetByIdVariant.h"
#include "StructureStubInfo.h"
namespace JSC {
@@ -47,57 +49,66 @@ public:
GetByIdStatus()
: m_state(NoInformation)
- , m_offset(invalidOffset)
{
}
explicit GetByIdStatus(State state)
: m_state(state)
- , m_offset(invalidOffset)
{
ASSERT(state == NoInformation || state == TakesSlowPath || state == MakesCalls);
}
GetByIdStatus(
- State state, bool wasSeenInJIT, const StructureSet& structureSet = StructureSet(),
- PropertyOffset offset = invalidOffset, JSValue specificValue = JSValue(), PassRefPtr<IntendedStructureChain> chain = nullptr)
+ State state, bool wasSeenInJIT, const GetByIdVariant& variant = GetByIdVariant())
: m_state(state)
- , m_structureSet(structureSet)
- , m_chain(chain)
- , m_specificValue(specificValue)
- , m_offset(offset)
, m_wasSeenInJIT(wasSeenInJIT)
{
- ASSERT((state == Simple) == (offset != invalidOffset));
+ ASSERT((state == Simple) == variant.isSet());
+ m_variants.append(variant);
}
- static GetByIdStatus computeFor(CodeBlock*, StubInfoMap&, unsigned bytecodeIndex, StringImpl* uid);
- static GetByIdStatus computeFor(VM&, Structure*, StringImpl* uid);
+ static GetByIdStatus computeFor(CodeBlock*, StubInfoMap&, unsigned bytecodeIndex, UniquedStringImpl* uid);
+ static GetByIdStatus computeFor(const StructureSet&, UniquedStringImpl* uid);
+ static GetByIdStatus computeFor(CodeBlock* baselineBlock, CodeBlock* dfgBlock, StubInfoMap& baselineMap, StubInfoMap& dfgMap, CodeOrigin, UniquedStringImpl* uid);
+
+#if ENABLE(DFG_JIT)
+ static GetByIdStatus computeForStubInfo(const ConcurrentJITLocker&, CodeBlock* baselineBlock, StructureStubInfo*, CodeOrigin, UniquedStringImpl* uid);
+#endif
+
State state() const { return m_state; }
bool isSet() const { return m_state != NoInformation; }
bool operator!() const { return !isSet(); }
bool isSimple() const { return m_state == Simple; }
+
+ size_t numVariants() const { return m_variants.size(); }
+ const Vector<GetByIdVariant, 1>& variants() const { return m_variants; }
+ const GetByIdVariant& at(size_t index) const { return m_variants[index]; }
+ const GetByIdVariant& operator[](size_t index) const { return at(index); }
+
bool takesSlowPath() const { return m_state == TakesSlowPath || m_state == MakesCalls; }
- bool makesCalls() const { return m_state == MakesCalls; }
-
- const StructureSet& structureSet() const { return m_structureSet; }
- IntendedStructureChain* chain() const { return const_cast<IntendedStructureChain*>(m_chain.get()); } // Returns null if this is a direct access.
- JSValue specificValue() const { return m_specificValue; } // Returns JSValue() if there is no specific value.
- PropertyOffset offset() const { return m_offset; }
+ bool makesCalls() const;
bool wasSeenInJIT() const { return m_wasSeenInJIT; }
+ void dump(PrintStream&) const;
+
private:
- static void computeForChain(GetByIdStatus& result, CodeBlock*, StringImpl* uid);
- static GetByIdStatus computeFromLLInt(CodeBlock*, unsigned bytecodeIndex, StringImpl* uid);
+#if ENABLE(DFG_JIT)
+ static bool hasExitSite(const ConcurrentJITLocker&, CodeBlock*, unsigned bytecodeIndex);
+#endif
+#if ENABLE(JIT)
+ static GetByIdStatus computeForStubInfoWithoutExitSiteFeedback(
+ const ConcurrentJITLocker&, CodeBlock* profiledBlock, StructureStubInfo*,
+ UniquedStringImpl* uid, CallLinkStatus::ExitSiteData);
+#endif
+ static GetByIdStatus computeFromLLInt(CodeBlock*, unsigned bytecodeIndex, UniquedStringImpl* uid);
+
+ bool appendVariant(const GetByIdVariant&);
State m_state;
- StructureSet m_structureSet;
- RefPtr<IntendedStructureChain> m_chain;
- JSValue m_specificValue;
- PropertyOffset m_offset;
+ Vector<GetByIdVariant, 1> m_variants;
bool m_wasSeenInJIT;
};
diff --git a/Source/JavaScriptCore/bytecode/GetByIdVariant.cpp b/Source/JavaScriptCore/bytecode/GetByIdVariant.cpp
new file mode 100644
index 000000000..a869d4a1c
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/GetByIdVariant.cpp
@@ -0,0 +1,145 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "GetByIdVariant.h"
+
+#include "CallLinkStatus.h"
+#include "JSCInlines.h"
+#include <wtf/ListDump.h>
+
+namespace JSC {
+
+GetByIdVariant::GetByIdVariant(
+ const StructureSet& structureSet, PropertyOffset offset,
+ const ObjectPropertyConditionSet& conditionSet,
+ std::unique_ptr<CallLinkStatus> callLinkStatus,
+ JSFunction* intrinsicFunction)
+ : m_structureSet(structureSet)
+ , m_conditionSet(conditionSet)
+ , m_offset(offset)
+ , m_callLinkStatus(WTFMove(callLinkStatus))
+ , m_intrinsicFunction(intrinsicFunction)
+{
+ if (!structureSet.size()) {
+ ASSERT(offset == invalidOffset);
+ ASSERT(conditionSet.isEmpty());
+ }
+ if (intrinsicFunction)
+ ASSERT(intrinsic() != NoIntrinsic);
+}
+
+GetByIdVariant::~GetByIdVariant() { }
+
+GetByIdVariant::GetByIdVariant(const GetByIdVariant& other)
+ : GetByIdVariant()
+{
+ *this = other;
+}
+
+GetByIdVariant& GetByIdVariant::operator=(const GetByIdVariant& other)
+{
+ m_structureSet = other.m_structureSet;
+ m_conditionSet = other.m_conditionSet;
+ m_offset = other.m_offset;
+ m_intrinsicFunction = other.m_intrinsicFunction;
+ if (other.m_callLinkStatus)
+ m_callLinkStatus = std::make_unique<CallLinkStatus>(*other.m_callLinkStatus);
+ else
+ m_callLinkStatus = nullptr;
+ return *this;
+}
+
+inline bool GetByIdVariant::canMergeIntrinsicStructures(const GetByIdVariant& other) const
+{
+ if (m_intrinsicFunction != other.m_intrinsicFunction)
+ return false;
+ switch (intrinsic()) {
+ case TypedArrayByteLengthIntrinsic: {
+ // We can merge these sets as long as the element size of the two sets is the same.
+ TypedArrayType thisType = (*m_structureSet.begin())->classInfo()->typedArrayStorageType;
+ TypedArrayType otherType = (*other.m_structureSet.begin())->classInfo()->typedArrayStorageType;
+
+ ASSERT(isTypedView(thisType) && isTypedView(otherType));
+
+ return logElementSize(thisType) == logElementSize(otherType);
+ }
+
+ default:
+ return true;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+bool GetByIdVariant::attemptToMerge(const GetByIdVariant& other)
+{
+ if (m_offset != other.m_offset)
+ return false;
+ if (m_callLinkStatus || other.m_callLinkStatus)
+ return false;
+
+ if (!canMergeIntrinsicStructures(other))
+ return false;
+
+ if (m_conditionSet.isEmpty() != other.m_conditionSet.isEmpty())
+ return false;
+
+ ObjectPropertyConditionSet mergedConditionSet;
+ if (!m_conditionSet.isEmpty()) {
+ mergedConditionSet = m_conditionSet.mergedWith(other.m_conditionSet);
+ if (!mergedConditionSet.isValid() || !mergedConditionSet.hasOneSlotBaseCondition())
+ return false;
+ }
+ m_conditionSet = mergedConditionSet;
+
+ m_structureSet.merge(other.m_structureSet);
+
+ return true;
+}
+
+void GetByIdVariant::dump(PrintStream& out) const
+{
+ dumpInContext(out, 0);
+}
+
+void GetByIdVariant::dumpInContext(PrintStream& out, DumpContext* context) const
+{
+ if (!isSet()) {
+ out.print("<empty>");
+ return;
+ }
+
+ out.print(
+ "<", inContext(structureSet(), context), ", ", inContext(m_conditionSet, context));
+ out.print(", offset = ", offset());
+ if (m_callLinkStatus)
+ out.print(", call = ", *m_callLinkStatus);
+ if (m_intrinsicFunction)
+ out.print(", intrinsic = ", *m_intrinsicFunction);
+ out.print(">");
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/bytecode/GetByIdVariant.h b/Source/JavaScriptCore/bytecode/GetByIdVariant.h
new file mode 100644
index 000000000..03a1e566f
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/GetByIdVariant.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef GetByIdVariant_h
+#define GetByIdVariant_h
+
+#include "CallLinkStatus.h"
+#include "JSCJSValue.h"
+#include "ObjectPropertyConditionSet.h"
+#include "PropertyOffset.h"
+#include "StructureSet.h"
+
+namespace JSC {
+
+class CallLinkStatus;
+class GetByIdStatus;
+struct DumpContext;
+
+class GetByIdVariant {
+public:
+ GetByIdVariant(
+ const StructureSet& structureSet = StructureSet(), PropertyOffset offset = invalidOffset,
+ const ObjectPropertyConditionSet& = ObjectPropertyConditionSet(),
+ std::unique_ptr<CallLinkStatus> = nullptr,
+ JSFunction* = nullptr);
+
+ ~GetByIdVariant();
+
+ GetByIdVariant(const GetByIdVariant&);
+ GetByIdVariant& operator=(const GetByIdVariant&);
+
+ bool isSet() const { return !!m_structureSet.size(); }
+ bool operator!() const { return !isSet(); }
+ const StructureSet& structureSet() const { return m_structureSet; }
+ StructureSet& structureSet() { return m_structureSet; }
+
+ // A non-empty condition set means that this is a prototype load.
+ const ObjectPropertyConditionSet& conditionSet() const { return m_conditionSet; }
+
+ PropertyOffset offset() const { return m_offset; }
+ CallLinkStatus* callLinkStatus() const { return m_callLinkStatus.get(); }
+ JSFunction* intrinsicFunction() const { return m_intrinsicFunction; }
+ Intrinsic intrinsic() const { return m_intrinsicFunction ? m_intrinsicFunction->intrinsic() : NoIntrinsic; }
+
+ bool attemptToMerge(const GetByIdVariant& other);
+
+ void dump(PrintStream&) const;
+ void dumpInContext(PrintStream&, DumpContext*) const;
+
+private:
+ friend class GetByIdStatus;
+
+ bool canMergeIntrinsicStructures(const GetByIdVariant&) const;
+
+ StructureSet m_structureSet;
+ ObjectPropertyConditionSet m_conditionSet;
+ PropertyOffset m_offset;
+ std::unique_ptr<CallLinkStatus> m_callLinkStatus;
+ JSFunction* m_intrinsicFunction;
+};
+
+} // namespace JSC
+
+#endif // GetByIdVariant_h
+
diff --git a/Source/JavaScriptCore/bytecode/HandlerInfo.h b/Source/JavaScriptCore/bytecode/HandlerInfo.h
index 8396c9607..acdda08ed 100644
--- a/Source/JavaScriptCore/bytecode/HandlerInfo.h
+++ b/Source/JavaScriptCore/bytecode/HandlerInfo.h
@@ -27,16 +27,70 @@
#define HandlerInfo_h
#include "CodeLocation.h"
-#include <wtf/Platform.h>
namespace JSC {
-struct HandlerInfo {
+enum class HandlerType {
+ Illegal = 0,
+ Catch = 1,
+ Finally = 2,
+ SynthesizedFinally = 3
+};
+
+struct HandlerInfoBase {
+ HandlerType type() const { return static_cast<HandlerType>(typeBits); }
+ void setType(HandlerType type) { typeBits = static_cast<uint32_t>(type); }
+
+ const char* typeName()
+ {
+ switch (type()) {
+ case HandlerType::Catch:
+ return "catch";
+ case HandlerType::Finally:
+ return "finally";
+ case HandlerType::SynthesizedFinally:
+ return "synthesized finally";
+ default:
+ ASSERT_NOT_REACHED();
+ }
+ return nullptr;
+ }
+
+ bool isCatchHandler() const { return type() == HandlerType::Catch; }
+
uint32_t start;
uint32_t end;
uint32_t target;
- uint32_t scopeDepth;
+ uint32_t typeBits : 2; // HandlerType
+};
+
+struct UnlinkedHandlerInfo : public HandlerInfoBase {
+ UnlinkedHandlerInfo(uint32_t start, uint32_t end, uint32_t target, HandlerType handlerType)
+ {
+ this->start = start;
+ this->end = end;
+ this->target = target;
+ setType(handlerType);
+ ASSERT(type() == handlerType);
+ }
+};
+
+struct HandlerInfo : public HandlerInfoBase {
+ void initialize(const UnlinkedHandlerInfo& unlinkedInfo)
+ {
+ start = unlinkedInfo.start;
+ end = unlinkedInfo.end;
+ target = unlinkedInfo.target;
+ typeBits = unlinkedInfo.typeBits;
+ }
+
#if ENABLE(JIT)
+ void initialize(const UnlinkedHandlerInfo& unlinkedInfo, CodeLocationLabel label)
+ {
+ initialize(unlinkedInfo);
+ nativeCode = label;
+ }
+
CodeLocationLabel nativeCode;
#endif
};
diff --git a/Source/JavaScriptCore/bytecode/InlineCallFrame.cpp b/Source/JavaScriptCore/bytecode/InlineCallFrame.cpp
new file mode 100644
index 000000000..447bc7e73
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/InlineCallFrame.cpp
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2012-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "InlineCallFrame.h"
+
+#include "CallFrame.h"
+#include "CodeBlock.h"
+#include "Executable.h"
+#include "JSCInlines.h"
+
+namespace JSC {
+
+JSFunction* InlineCallFrame::calleeConstant() const
+{
+ if (calleeRecovery.isConstant())
+ return jsCast<JSFunction*>(calleeRecovery.constant());
+ return nullptr;
+}
+
+JSFunction* InlineCallFrame::calleeForCallFrame(ExecState* exec) const
+{
+ return jsCast<JSFunction*>(calleeRecovery.recover(exec));
+}
+
+CodeBlockHash InlineCallFrame::hash() const
+{
+ return baselineCodeBlock->hash();
+}
+
+CString InlineCallFrame::hashAsStringIfPossible() const
+{
+ return baselineCodeBlock->hashAsStringIfPossible();
+}
+
+CString InlineCallFrame::inferredName() const
+{
+ return jsCast<FunctionExecutable*>(baselineCodeBlock->ownerExecutable())->inferredName().utf8();
+}
+
+void InlineCallFrame::dumpBriefFunctionInformation(PrintStream& out) const
+{
+ out.print(inferredName(), "#", hashAsStringIfPossible());
+}
+
+void InlineCallFrame::dumpInContext(PrintStream& out, DumpContext* context) const
+{
+ out.print(briefFunctionInformation(), ":<", RawPointer(baselineCodeBlock.get()));
+ if (isStrictMode())
+ out.print(" (StrictMode)");
+ out.print(", bc#", directCaller.bytecodeIndex, ", ", static_cast<Kind>(kind));
+ if (isClosureCall)
+ out.print(", closure call");
+ else
+ out.print(", known callee: ", inContext(calleeRecovery.constant(), context));
+ out.print(", numArgs+this = ", arguments.size());
+ out.print(", stackOffset = ", stackOffset);
+ out.print(" (", virtualRegisterForLocal(0), " maps to ", virtualRegisterForLocal(0) + stackOffset, ")>");
+}
+
+void InlineCallFrame::dump(PrintStream& out) const
+{
+ dumpInContext(out, 0);
+}
+
+} // namespace JSC
+
+namespace WTF {
+
+void printInternal(PrintStream& out, JSC::InlineCallFrame::Kind kind)
+{
+ switch (kind) {
+ case JSC::InlineCallFrame::Call:
+ out.print("Call");
+ return;
+ case JSC::InlineCallFrame::Construct:
+ out.print("Construct");
+ return;
+ case JSC::InlineCallFrame::TailCall:
+ out.print("TailCall");
+ return;
+ case JSC::InlineCallFrame::CallVarargs:
+ out.print("CallVarargs");
+ return;
+ case JSC::InlineCallFrame::ConstructVarargs:
+ out.print("ConstructVarargs");
+ return;
+ case JSC::InlineCallFrame::TailCallVarargs:
+ out.print("TailCallVarargs");
+ return;
+ case JSC::InlineCallFrame::GetterCall:
+ out.print("GetterCall");
+ return;
+ case JSC::InlineCallFrame::SetterCall:
+ out.print("SetterCall");
+ return;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
+
diff --git a/Source/JavaScriptCore/bytecode/InlineCallFrame.h b/Source/JavaScriptCore/bytecode/InlineCallFrame.h
new file mode 100644
index 000000000..eaa943cfe
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/InlineCallFrame.h
@@ -0,0 +1,269 @@
+/*
+ * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef InlineCallFrame_h
+#define InlineCallFrame_h
+
+#include "CodeBlock.h"
+#include "CodeBlockHash.h"
+#include "CodeOrigin.h"
+#include "ValueRecovery.h"
+#include "WriteBarrier.h"
+#include <wtf/BitVector.h>
+#include <wtf/HashMap.h>
+#include <wtf/PrintStream.h>
+#include <wtf/StdLibExtras.h>
+#include <wtf/Vector.h>
+
+namespace JSC {
+
+struct InlineCallFrame;
+class ExecState;
+class JSFunction;
+
+struct InlineCallFrame {
+ enum Kind {
+ Call,
+ Construct,
+ TailCall,
+ CallVarargs,
+ ConstructVarargs,
+ TailCallVarargs,
+
+ // For these, the stackOffset incorporates the argument count plus the true return PC
+ // slot.
+ GetterCall,
+ SetterCall
+ };
+
+ static CallMode callModeFor(Kind kind)
+ {
+ switch (kind) {
+ case Call:
+ case CallVarargs:
+ case GetterCall:
+ case SetterCall:
+ return CallMode::Regular;
+ case TailCall:
+ case TailCallVarargs:
+ return CallMode::Tail;
+ case Construct:
+ case ConstructVarargs:
+ return CallMode::Construct;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+
+ static Kind kindFor(CallMode callMode)
+ {
+ switch (callMode) {
+ case CallMode::Regular:
+ return Call;
+ case CallMode::Construct:
+ return Construct;
+ case CallMode::Tail:
+ return TailCall;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+
+ static Kind varargsKindFor(CallMode callMode)
+ {
+ switch (callMode) {
+ case CallMode::Regular:
+ return CallVarargs;
+ case CallMode::Construct:
+ return ConstructVarargs;
+ case CallMode::Tail:
+ return TailCallVarargs;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+
+ static CodeSpecializationKind specializationKindFor(Kind kind)
+ {
+ switch (kind) {
+ case Call:
+ case CallVarargs:
+ case TailCall:
+ case TailCallVarargs:
+ case GetterCall:
+ case SetterCall:
+ return CodeForCall;
+ case Construct:
+ case ConstructVarargs:
+ return CodeForConstruct;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+
+ static bool isVarargs(Kind kind)
+ {
+ switch (kind) {
+ case CallVarargs:
+ case TailCallVarargs:
+ case ConstructVarargs:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ static bool isTail(Kind kind)
+ {
+ switch (kind) {
+ case TailCall:
+ case TailCallVarargs:
+ return true;
+ default:
+ return false;
+ }
+ }
+ bool isTail() const
+ {
+ return isTail(static_cast<Kind>(kind));
+ }
+
+ static CodeOrigin* computeCallerSkippingTailCalls(InlineCallFrame* inlineCallFrame, Kind* callerCallKind = nullptr)
+ {
+ CodeOrigin* codeOrigin;
+ bool tailCallee;
+ int callKind;
+ do {
+ tailCallee = inlineCallFrame->isTail();
+ callKind = inlineCallFrame->kind;
+ codeOrigin = &inlineCallFrame->directCaller;
+ inlineCallFrame = codeOrigin->inlineCallFrame;
+ } while (inlineCallFrame && tailCallee);
+
+ if (tailCallee)
+ return nullptr;
+
+ if (callerCallKind)
+ *callerCallKind = static_cast<Kind>(callKind);
+
+ return codeOrigin;
+ }
+
+ CodeOrigin* getCallerSkippingTailCalls(Kind* callerCallKind = nullptr)
+ {
+ return computeCallerSkippingTailCalls(this, callerCallKind);
+ }
+
+ InlineCallFrame* getCallerInlineFrameSkippingTailCalls()
+ {
+ CodeOrigin* caller = getCallerSkippingTailCalls();
+ return caller ? caller->inlineCallFrame : nullptr;
+ }
+
+ Vector<ValueRecovery> arguments; // Includes 'this'.
+ WriteBarrier<CodeBlock> baselineCodeBlock;
+ ValueRecovery calleeRecovery;
+ CodeOrigin directCaller;
+
+ signed stackOffset : 28;
+ unsigned kind : 3; // real type is Kind
+ bool isClosureCall : 1; // If false then we know that callee/scope are constants and the DFG won't treat them as variables, i.e. they have to be recovered manually.
+ VirtualRegister argumentCountRegister; // Only set when we inline a varargs call.
+
+ // There is really no good notion of a "default" set of values for
+ // InlineCallFrame's fields. This constructor is here just to reduce confusion if
+ // we forgot to initialize explicitly.
+ InlineCallFrame()
+ : stackOffset(0)
+ , kind(Call)
+ , isClosureCall(false)
+ {
+ }
+
+ bool isVarargs() const
+ {
+ return isVarargs(static_cast<Kind>(kind));
+ }
+
+ CodeSpecializationKind specializationKind() const { return specializationKindFor(static_cast<Kind>(kind)); }
+
+ JSFunction* calleeConstant() const;
+
+ // Get the callee given a machine call frame to which this InlineCallFrame belongs.
+ JSFunction* calleeForCallFrame(ExecState*) const;
+
+ CString inferredName() const;
+ CodeBlockHash hash() const;
+ CString hashAsStringIfPossible() const;
+
+ void setStackOffset(signed offset)
+ {
+ stackOffset = offset;
+ RELEASE_ASSERT(static_cast<signed>(stackOffset) == offset);
+ }
+
+ ptrdiff_t callerFrameOffset() const { return stackOffset * sizeof(Register) + CallFrame::callerFrameOffset(); }
+ ptrdiff_t returnPCOffset() const { return stackOffset * sizeof(Register) + CallFrame::returnPCOffset(); }
+
+ bool isStrictMode() const { return baselineCodeBlock->isStrictMode(); }
+
+ void dumpBriefFunctionInformation(PrintStream&) const;
+ void dump(PrintStream&) const;
+ void dumpInContext(PrintStream&, DumpContext*) const;
+
+ MAKE_PRINT_METHOD(InlineCallFrame, dumpBriefFunctionInformation, briefFunctionInformation);
+
+};
+
+inline CodeBlock* baselineCodeBlockForInlineCallFrame(InlineCallFrame* inlineCallFrame)
+{
+ RELEASE_ASSERT(inlineCallFrame);
+ return inlineCallFrame->baselineCodeBlock.get();
+}
+
+inline CodeBlock* baselineCodeBlockForOriginAndBaselineCodeBlock(const CodeOrigin& codeOrigin, CodeBlock* baselineCodeBlock)
+{
+ if (codeOrigin.inlineCallFrame)
+ return baselineCodeBlockForInlineCallFrame(codeOrigin.inlineCallFrame);
+ return baselineCodeBlock;
+}
+
+template <typename Function>
+inline void CodeOrigin::walkUpInlineStack(const Function& function)
+{
+ CodeOrigin codeOrigin = *this;
+ while (true) {
+ function(codeOrigin);
+ if (!codeOrigin.inlineCallFrame)
+ break;
+ codeOrigin = codeOrigin.inlineCallFrame->directCaller;
+ }
+}
+
+} // namespace JSC
+
+namespace WTF {
+
+void printInternal(PrintStream&, JSC::InlineCallFrame::Kind);
+
+} // namespace WTF
+
+#endif // InlineCallFrame_h
diff --git a/Source/JavaScriptCore/bytecode/InlineCallFrameSet.cpp b/Source/JavaScriptCore/bytecode/InlineCallFrameSet.cpp
index be5edb34c..402cfd06d 100644
--- a/Source/JavaScriptCore/bytecode/InlineCallFrameSet.cpp
+++ b/Source/JavaScriptCore/bytecode/InlineCallFrameSet.cpp
@@ -26,6 +26,9 @@
#include "config.h"
#include "InlineCallFrameSet.h"
+#include "InlineCallFrame.h"
+#include "JSCInlines.h"
+
namespace JSC {
InlineCallFrameSet::InlineCallFrameSet() { }
diff --git a/Source/JavaScriptCore/bytecode/InlineCallFrameSet.h b/Source/JavaScriptCore/bytecode/InlineCallFrameSet.h
index 0a8b2e79c..6dae56db9 100644
--- a/Source/JavaScriptCore/bytecode/InlineCallFrameSet.h
+++ b/Source/JavaScriptCore/bytecode/InlineCallFrameSet.h
@@ -28,12 +28,11 @@
#include "CodeOrigin.h"
#include <wtf/Bag.h>
-#include <wtf/Noncopyable.h>
+#include <wtf/RefCounted.h>
namespace JSC {
-class InlineCallFrameSet {
- WTF_MAKE_NONCOPYABLE(InlineCallFrameSet);
+class InlineCallFrameSet : public RefCounted<InlineCallFrameSet> {
public:
InlineCallFrameSet();
~InlineCallFrameSet();
@@ -45,7 +44,7 @@ public:
typedef Bag<InlineCallFrame>::iterator iterator;
iterator begin() { return m_frames.begin(); }
iterator end() { return m_frames.end(); }
-
+
private:
Bag<InlineCallFrame> m_frames;
};
diff --git a/Source/JavaScriptCore/bytecode/Instruction.h b/Source/JavaScriptCore/bytecode/Instruction.h
index 00bd8155b..494b00044 100644
--- a/Source/JavaScriptCore/bytecode/Instruction.h
+++ b/Source/JavaScriptCore/bytecode/Instruction.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2012-2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -29,12 +29,17 @@
#ifndef Instruction_h
#define Instruction_h
+#include "BasicBlockLocation.h"
#include "MacroAssembler.h"
#include "Opcode.h"
+#include "PutByIdFlags.h"
+#include "SymbolTable.h"
+#include "TypeLocation.h"
#include "PropertySlot.h"
#include "SpecialPointer.h"
#include "Structure.h"
#include "StructureChain.h"
+#include "ToThisStatus.h"
#include "VirtualRegister.h"
#include <wtf/VectorTraits.h>
@@ -43,7 +48,7 @@ namespace JSC {
class ArrayAllocationProfile;
class ArrayProfile;
class ObjectAllocationProfile;
-class VariableWatchpointSet;
+class WatchpointSet;
struct LLIntCallLinkInfo;
struct ValueProfile;
@@ -70,6 +75,18 @@ struct Instruction {
u.jsCell.clear();
u.operand = operand;
}
+ Instruction(unsigned unsignedValue)
+ {
+ // We have to initialize one of the pointer members to ensure that
+ // the entire struct is initialized in 64-bit.
+ u.jsCell.clear();
+ u.unsignedValue = unsignedValue;
+ }
+
+ Instruction(PutByIdFlags flags)
+ {
+ u.putByIdFlags = flags;
+ }
Instruction(VM& vm, JSCell* owner, Structure* structure)
{
@@ -94,30 +111,36 @@ struct Instruction {
Instruction(ArrayProfile* profile) { u.arrayProfile = profile; }
Instruction(ArrayAllocationProfile* profile) { u.arrayAllocationProfile = profile; }
Instruction(ObjectAllocationProfile* profile) { u.objectAllocationProfile = profile; }
- Instruction(WriteBarrier<Unknown>* registerPointer) { u.registerPointer = registerPointer; }
+ Instruction(WriteBarrier<Unknown>* variablePointer) { u.variablePointer = variablePointer; }
Instruction(Special::Pointer pointer) { u.specialPointer = pointer; }
- Instruction(StringImpl* uid) { u.uid = uid; }
+ Instruction(UniquedStringImpl* uid) { u.uid = uid; }
Instruction(bool* predicatePointer) { u.predicatePointer = predicatePointer; }
union {
Opcode opcode;
int operand;
+ unsigned unsignedValue;
WriteBarrierBase<Structure> structure;
+ StructureID structureID;
+ WriteBarrierBase<SymbolTable> symbolTable;
WriteBarrierBase<StructureChain> structureChain;
WriteBarrierBase<JSCell> jsCell;
- WriteBarrier<Unknown>* registerPointer;
+ WriteBarrier<Unknown>* variablePointer;
Special::Pointer specialPointer;
PropertySlot::GetValueFunc getterFunc;
LLIntCallLinkInfo* callLinkInfo;
- StringImpl* uid;
+ UniquedStringImpl* uid;
ValueProfile* profile;
ArrayProfile* arrayProfile;
ArrayAllocationProfile* arrayAllocationProfile;
ObjectAllocationProfile* objectAllocationProfile;
- VariableWatchpointSet* watchpointSet;
- WriteBarrierBase<JSActivation> activation;
+ WatchpointSet* watchpointSet;
void* pointer;
bool* predicatePointer;
+ ToThisStatus toThisStatus;
+ TypeLocation* location;
+ BasicBlockLocation* basicBlockLocation;
+ PutByIdFlags putByIdFlags;
} u;
private:
diff --git a/Source/JavaScriptCore/bytecode/InternalFunctionAllocationProfile.h b/Source/JavaScriptCore/bytecode/InternalFunctionAllocationProfile.h
new file mode 100644
index 000000000..1926e93f2
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/InternalFunctionAllocationProfile.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef InternalFunctionAllocationProfile_h
+#define InternalFunctionAllocationProfile_h
+
+#include "JSGlobalObject.h"
+#include "ObjectPrototype.h"
+#include "SlotVisitor.h"
+#include "WriteBarrier.h"
+
+namespace JSC {
+
+class InternalFunctionAllocationProfile {
+public:
+ Structure* structure() { return m_structure.get(); }
+ Structure* createAllocationStructureFromBase(VM&, JSCell* owner, JSObject* prototype, Structure* base);
+
+ void clear() { m_structure.clear(); }
+ void visitAggregate(SlotVisitor& visitor) { visitor.append(&m_structure); }
+
+private:
+ WriteBarrier<Structure> m_structure;
+};
+
+inline Structure* InternalFunctionAllocationProfile::createAllocationStructureFromBase(VM& vm, JSCell* owner, JSObject* prototype, Structure* baseStructure)
+{
+ ASSERT(prototype != baseStructure->storedPrototype());
+ ASSERT(!m_structure || m_structure.get()->classInfo() != baseStructure->classInfo());
+
+ Structure* structure = vm.prototypeMap.emptyStructureForPrototypeFromBaseStructure(prototype, baseStructure);
+
+ // Ensure that if another thread sees the structure, it will see it properly created.
+ WTF::storeStoreFence();
+
+ m_structure.set(vm, owner, structure);
+ return m_structure.get();
+}
+
+} // namespace JSC
+
+#endif /* InternalFunctionAllocationProfile_h */
diff --git a/Source/JavaScriptCore/bytecode/JumpTable.cpp b/Source/JavaScriptCore/bytecode/JumpTable.cpp
index ef7098b65..e22ad03c9 100644
--- a/Source/JavaScriptCore/bytecode/JumpTable.cpp
+++ b/Source/JavaScriptCore/bytecode/JumpTable.cpp
@@ -11,7 +11,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/Source/JavaScriptCore/bytecode/JumpTable.h b/Source/JavaScriptCore/bytecode/JumpTable.h
index 55d6855a5..b83e842cb 100644
--- a/Source/JavaScriptCore/bytecode/JumpTable.h
+++ b/Source/JavaScriptCore/bytecode/JumpTable.h
@@ -11,7 +11,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -94,6 +94,12 @@ namespace JSC {
}
#if ENABLE(JIT)
+ void ensureCTITable()
+ {
+ ASSERT(ctiOffsets.isEmpty() || ctiOffsets.size() == branchOffsets.size());
+ ctiOffsets.grow(branchOffsets.size());
+ }
+
inline CodeLocationLabel ctiForValue(int32_t value)
{
if (value >= min && static_cast<uint32_t>(value - min) < ctiOffsets.size())
diff --git a/Source/JavaScriptCore/bytecode/LLIntCallLinkInfo.h b/Source/JavaScriptCore/bytecode/LLIntCallLinkInfo.h
index bfb951018..2645dd5be 100644
--- a/Source/JavaScriptCore/bytecode/LLIntCallLinkInfo.h
+++ b/Source/JavaScriptCore/bytecode/LLIntCallLinkInfo.h
@@ -45,7 +45,7 @@ struct LLIntCallLinkInfo : public BasicRawSentinelNode<LLIntCallLinkInfo> {
remove();
}
- bool isLinked() { return callee; }
+ bool isLinked() { return !!callee; }
void unlink()
{
diff --git a/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.cpp b/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.cpp
index a8ad779ac..de654db68 100644
--- a/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.cpp
+++ b/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.cpp
@@ -26,7 +26,7 @@
#include "config.h"
#include "LazyOperandValueProfile.h"
-#include "Operations.h"
+#include "JSCInlines.h"
namespace JSC {
@@ -46,7 +46,7 @@ LazyOperandValueProfile* CompressedLazyOperandValueProfileHolder::add(
const ConcurrentJITLocker&, const LazyOperandValueProfileKey& key)
{
if (!m_data)
- m_data = adoptPtr(new LazyOperandValueProfile::List());
+ m_data = std::make_unique<LazyOperandValueProfile::List>();
else {
for (unsigned i = 0; i < m_data->size(); ++i) {
if (m_data->at(i).key() == key)
diff --git a/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.h b/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.h
index 95ef941cd..74e4f3318 100644
--- a/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.h
+++ b/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.h
@@ -31,7 +31,6 @@
#include "VirtualRegister.h"
#include <wtf/HashMap.h>
#include <wtf/Noncopyable.h>
-#include <wtf/OwnPtr.h>
#include <wtf/SegmentedVector.h>
namespace JSC {
@@ -161,7 +160,7 @@ public:
private:
friend class LazyOperandValueProfileParser;
- OwnPtr<LazyOperandValueProfile::List> m_data;
+ std::unique_ptr<LazyOperandValueProfile::List> m_data;
};
class LazyOperandValueProfileParser {
diff --git a/Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.cpp b/Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.cpp
index 1ac5bb5a0..bec692ef7 100644
--- a/Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.cpp
+++ b/Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.cpp
@@ -29,6 +29,7 @@
#if ENABLE(DFG_JIT)
#include "CodeBlock.h"
+#include "JSCInlines.h"
namespace JSC {
diff --git a/Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.h b/Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.h
index c6fe6c5f0..846f8cf7a 100644
--- a/Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.h
+++ b/Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.h
@@ -26,8 +26,6 @@
#ifndef MethodOfGettingAValueProfile_h
#define MethodOfGettingAValueProfile_h
-#include <wtf/Platform.h>
-
// This is guarded by ENABLE_DFG_JIT only because it uses some value profiles
// that are currently only used if the DFG is enabled (i.e. they are not
// available in the profile-only configuration). Hopefully someday all of
diff --git a/Source/JavaScriptCore/bytecode/ObjectAllocationProfile.h b/Source/JavaScriptCore/bytecode/ObjectAllocationProfile.h
index 9a9db0bc7..5fa706d25 100644
--- a/Source/JavaScriptCore/bytecode/ObjectAllocationProfile.h
+++ b/Source/JavaScriptCore/bytecode/ObjectAllocationProfile.h
@@ -89,13 +89,23 @@ public:
if (inlineCapacity > JSFinalObject::maxInlineCapacity())
inlineCapacity = JSFinalObject::maxInlineCapacity();
+ Structure* structure = vm.prototypeMap.emptyObjectStructureForPrototype(prototype, inlineCapacity);
+
+ // Ensure that if another thread sees the structure, it will see it properly created
+ WTF::storeStoreFence();
+
m_allocator = allocator;
- m_structure.set(vm, owner,
- vm.prototypeMap.emptyObjectStructureForPrototype(prototype, inlineCapacity));
+ m_structure.set(vm, owner, structure);
}
- Structure* structure() { return m_structure.get(); }
- unsigned inlineCapacity() { return m_structure->inlineCapacity(); }
+ Structure* structure()
+ {
+ Structure* structure = m_structure.get();
+ // Ensure that if we see the structure, it has been properly created
+ WTF::loadLoadFence();
+ return structure;
+ }
+ unsigned inlineCapacity() { return structure()->inlineCapacity(); }
void clear()
{
@@ -117,8 +127,8 @@ private:
return 0;
size_t count = 0;
- PropertyNameArray propertyNameArray(&vm);
- prototype->structure()->getPropertyNamesFromStructure(vm, propertyNameArray, ExcludeDontEnumProperties);
+ PropertyNameArray propertyNameArray(&vm, PropertyNameMode::StringsAndSymbols);
+ prototype->structure()->getPropertyNamesFromStructure(vm, propertyNameArray, EnumerationMode());
PropertyNameArrayData::PropertyNameVector& propertyNameVector = propertyNameArray.data()->propertyNameVector();
for (size_t i = 0; i < propertyNameVector.size(); ++i) {
JSValue value = prototype->getDirect(vm, propertyNameVector[i]);
diff --git a/Source/JavaScriptCore/bytecode/ObjectPropertyCondition.cpp b/Source/JavaScriptCore/bytecode/ObjectPropertyCondition.cpp
new file mode 100644
index 000000000..1f153b956
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ObjectPropertyCondition.cpp
@@ -0,0 +1,160 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "ObjectPropertyCondition.h"
+
+#include "JSCInlines.h"
+#include "TrackedReferences.h"
+
+namespace JSC {
+
+void ObjectPropertyCondition::dumpInContext(PrintStream& out, DumpContext* context) const
+{
+ if (!*this) {
+ out.print("<invalid>");
+ return;
+ }
+
+ out.print("<", inContext(JSValue(m_object), context), ": ", inContext(m_condition, context), ">");
+}
+
+void ObjectPropertyCondition::dump(PrintStream& out) const
+{
+ dumpInContext(out, nullptr);
+}
+
+bool ObjectPropertyCondition::structureEnsuresValidityAssumingImpurePropertyWatchpoint(
+ Structure* structure) const
+{
+ return m_condition.isStillValidAssumingImpurePropertyWatchpoint(structure);
+}
+
+bool ObjectPropertyCondition::structureEnsuresValidityAssumingImpurePropertyWatchpoint() const
+{
+ if (!*this)
+ return false;
+
+ return structureEnsuresValidityAssumingImpurePropertyWatchpoint(m_object->structure());
+}
+
+bool ObjectPropertyCondition::validityRequiresImpurePropertyWatchpoint(Structure* structure) const
+{
+ return m_condition.validityRequiresImpurePropertyWatchpoint(structure);
+}
+
+bool ObjectPropertyCondition::validityRequiresImpurePropertyWatchpoint() const
+{
+ if (!*this)
+ return false;
+
+ return validityRequiresImpurePropertyWatchpoint(m_object->structure());
+}
+
+bool ObjectPropertyCondition::isStillValid(Structure* structure) const
+{
+ return m_condition.isStillValid(structure, m_object);
+}
+
+bool ObjectPropertyCondition::isStillValid() const
+{
+ if (!*this)
+ return false;
+
+ return isStillValid(m_object->structure());
+}
+
+bool ObjectPropertyCondition::structureEnsuresValidity(Structure* structure) const
+{
+ return m_condition.isStillValid(structure);
+}
+
+bool ObjectPropertyCondition::structureEnsuresValidity() const
+{
+ if (!*this)
+ return false;
+
+ return structureEnsuresValidity(m_object->structure());
+}
+
+bool ObjectPropertyCondition::isWatchableAssumingImpurePropertyWatchpoint(
+ Structure* structure, PropertyCondition::WatchabilityEffort effort) const
+{
+ return m_condition.isWatchableAssumingImpurePropertyWatchpoint(structure, m_object, effort);
+}
+
+bool ObjectPropertyCondition::isWatchableAssumingImpurePropertyWatchpoint(
+ PropertyCondition::WatchabilityEffort effort) const
+{
+ if (!*this)
+ return false;
+
+ return isWatchableAssumingImpurePropertyWatchpoint(m_object->structure(), effort);
+}
+
+bool ObjectPropertyCondition::isWatchable(
+ Structure* structure, PropertyCondition::WatchabilityEffort effort) const
+{
+ return m_condition.isWatchable(structure, m_object, effort);
+}
+
+bool ObjectPropertyCondition::isWatchable(PropertyCondition::WatchabilityEffort effort) const
+{
+ if (!*this)
+ return false;
+
+ return isWatchable(m_object->structure(), effort);
+}
+
+bool ObjectPropertyCondition::isStillLive() const
+{
+ if (!*this)
+ return false;
+
+ if (!Heap::isMarked(m_object))
+ return false;
+
+ return m_condition.isStillLive();
+}
+
+void ObjectPropertyCondition::validateReferences(const TrackedReferences& tracked) const
+{
+ if (!*this)
+ return;
+
+ tracked.check(m_object);
+ m_condition.validateReferences(tracked);
+}
+
+ObjectPropertyCondition ObjectPropertyCondition::attemptToMakeEquivalenceWithoutBarrier() const
+{
+ PropertyCondition result = condition().attemptToMakeEquivalenceWithoutBarrier(object());
+ if (!result)
+ return ObjectPropertyCondition();
+ return ObjectPropertyCondition(object(), result);
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/bytecode/ObjectPropertyCondition.h b/Source/JavaScriptCore/bytecode/ObjectPropertyCondition.h
new file mode 100644
index 000000000..4c2a9bd1f
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ObjectPropertyCondition.h
@@ -0,0 +1,268 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ObjectPropertyCondition_h
+#define ObjectPropertyCondition_h
+
+#include "JSObject.h"
+#include "PropertyCondition.h"
+#include <wtf/HashMap.h>
+
+namespace JSC {
+
+class TrackedReferences;
+
+class ObjectPropertyCondition {
+public:
+ ObjectPropertyCondition()
+ : m_object(nullptr)
+ {
+ }
+
+ ObjectPropertyCondition(WTF::HashTableDeletedValueType token)
+ : m_object(nullptr)
+ , m_condition(token)
+ {
+ }
+
+ ObjectPropertyCondition(JSObject* object, const PropertyCondition& condition)
+ : m_object(object)
+ , m_condition(condition)
+ {
+ }
+
+ static ObjectPropertyCondition presenceWithoutBarrier(
+ JSObject* object, UniquedStringImpl* uid, PropertyOffset offset, unsigned attributes)
+ {
+ ObjectPropertyCondition result;
+ result.m_object = object;
+ result.m_condition = PropertyCondition::presenceWithoutBarrier(uid, offset, attributes);
+ return result;
+ }
+
+ static ObjectPropertyCondition presence(
+ VM& vm, JSCell* owner, JSObject* object, UniquedStringImpl* uid, PropertyOffset offset,
+ unsigned attributes)
+ {
+ if (owner)
+ vm.heap.writeBarrier(owner);
+ return presenceWithoutBarrier(object, uid, offset, attributes);
+ }
+
+ // NOTE: The prototype is the storedPrototype, not the prototypeForLookup.
+ static ObjectPropertyCondition absenceWithoutBarrier(
+ JSObject* object, UniquedStringImpl* uid, JSObject* prototype)
+ {
+ ObjectPropertyCondition result;
+ result.m_object = object;
+ result.m_condition = PropertyCondition::absenceWithoutBarrier(uid, prototype);
+ return result;
+ }
+
+ static ObjectPropertyCondition absence(
+ VM& vm, JSCell* owner, JSObject* object, UniquedStringImpl* uid, JSObject* prototype)
+ {
+ if (owner)
+ vm.heap.writeBarrier(owner);
+ return absenceWithoutBarrier(object, uid, prototype);
+ }
+
+ static ObjectPropertyCondition absenceOfSetterWithoutBarrier(
+ JSObject* object, UniquedStringImpl* uid, JSObject* prototype)
+ {
+ ObjectPropertyCondition result;
+ result.m_object = object;
+ result.m_condition = PropertyCondition::absenceOfSetterWithoutBarrier(uid, prototype);
+ return result;
+ }
+
+ static ObjectPropertyCondition absenceOfSetter(
+ VM& vm, JSCell* owner, JSObject* object, UniquedStringImpl* uid, JSObject* prototype)
+ {
+ if (owner)
+ vm.heap.writeBarrier(owner);
+ return absenceOfSetterWithoutBarrier(object, uid, prototype);
+ }
+
+ static ObjectPropertyCondition equivalenceWithoutBarrier(
+ JSObject* object, UniquedStringImpl* uid, JSValue value)
+ {
+ ObjectPropertyCondition result;
+ result.m_object = object;
+ result.m_condition = PropertyCondition::equivalenceWithoutBarrier(uid, value);
+ return result;
+ }
+
+ static ObjectPropertyCondition equivalence(
+ VM& vm, JSCell* owner, JSObject* object, UniquedStringImpl* uid, JSValue value)
+ {
+ if (owner)
+ vm.heap.writeBarrier(owner);
+ return equivalenceWithoutBarrier(object, uid, value);
+ }
+
+ explicit operator bool() const { return !!m_condition; }
+
+ JSObject* object() const { return m_object; }
+ PropertyCondition condition() const { return m_condition; }
+
+ PropertyCondition::Kind kind() const { return condition().kind(); }
+ UniquedStringImpl* uid() const { return condition().uid(); }
+ bool hasOffset() const { return condition().hasOffset(); }
+ PropertyOffset offset() const { return condition().offset(); }
+ unsigned hasAttributes() const { return condition().hasAttributes(); }
+ unsigned attributes() const { return condition().attributes(); }
+ bool hasPrototype() const { return condition().hasPrototype(); }
+ JSObject* prototype() const { return condition().prototype(); }
+ bool hasRequiredValue() const { return condition().hasRequiredValue(); }
+ JSValue requiredValue() const { return condition().requiredValue(); }
+
+ void dumpInContext(PrintStream&, DumpContext*) const;
+ void dump(PrintStream&) const;
+
+ unsigned hash() const
+ {
+ return WTF::PtrHash<JSObject*>::hash(m_object) ^ m_condition.hash();
+ }
+
+ bool operator==(const ObjectPropertyCondition& other) const
+ {
+ return m_object == other.m_object
+ && m_condition == other.m_condition;
+ }
+
+ bool isHashTableDeletedValue() const
+ {
+ return !m_object && m_condition.isHashTableDeletedValue();
+ }
+
+ // Two conditions are compatible if they are identical or if they speak of different uids or
+ // different objects. If false is returned, you have to decide how to resolve the conflict -
+ // for example if there is a Presence and an Equivalence then in some cases you'll want the
+ // more general of the two while in other cases you'll want the more specific of the two. This
+ // will also return false for contradictions, like Presence and Absence on the same
+ // object/uid. By convention, invalid conditions aren't compatible with anything.
+ bool isCompatibleWith(const ObjectPropertyCondition& other) const
+ {
+ if (!*this || !other)
+ return false;
+ return *this == other || uid() != other.uid() || object() != other.object();
+ }
+
+ // These validity-checking methods can optionally take a Struture* instead of loading the
+ // Structure* from the object. If you're in the concurrent JIT, then you must use the forms
+ // that take an explicit Structure* because you want the compiler to optimize for the same
+ // structure that you validated (i.e. avoid a TOCTOU race).
+
+ // Checks if the object's structure claims that the property won't be intercepted. Validity
+ // does not require watchpoints on the object.
+ bool structureEnsuresValidityAssumingImpurePropertyWatchpoint(Structure*) const;
+ bool structureEnsuresValidityAssumingImpurePropertyWatchpoint() const;
+
+ // Returns true if we need an impure property watchpoint to ensure validity even if
+ // isStillValidAccordingToStructure() returned true.
+ bool validityRequiresImpurePropertyWatchpoint(Structure*) const;
+ bool validityRequiresImpurePropertyWatchpoint() const;
+
+ // Checks if the condition still holds. May conservatively return false, if the object and
+ // structure alone don't guarantee the condition. Note that this may return true if the
+ // condition still requires some watchpoints on the object in addition to checking the
+ // structure. If you want to check if the condition holds by using the structure alone,
+ // use structureEnsuresValidity().
+ bool isStillValid(Structure*) const;
+ bool isStillValid() const;
+
+ // Shorthand for condition().isStillValid(structure).
+ bool structureEnsuresValidity(Structure*) const;
+ bool structureEnsuresValidity() const;
+
+ // This means that it's still valid and we could enforce validity by setting a transition
+ // watchpoint on the structure and possibly an impure property watchpoint.
+ bool isWatchableAssumingImpurePropertyWatchpoint(
+ Structure*,
+ PropertyCondition::WatchabilityEffort = PropertyCondition::MakeNoChanges) const;
+ bool isWatchableAssumingImpurePropertyWatchpoint(
+ PropertyCondition::WatchabilityEffort = PropertyCondition::MakeNoChanges) const;
+
+ // This means that it's still valid and we could enforce validity by setting a transition
+ // watchpoint on the structure.
+ bool isWatchable(
+ Structure*,
+ PropertyCondition::WatchabilityEffort = PropertyCondition::MakeNoChanges) const;
+ bool isWatchable(
+ PropertyCondition::WatchabilityEffort = PropertyCondition::MakeNoChanges) const;
+
+ bool watchingRequiresStructureTransitionWatchpoint() const
+ {
+ return condition().watchingRequiresStructureTransitionWatchpoint();
+ }
+ bool watchingRequiresReplacementWatchpoint() const
+ {
+ return condition().watchingRequiresReplacementWatchpoint();
+ }
+
+ // This means that the objects involved in this are still live.
+ bool isStillLive() const;
+
+ void validateReferences(const TrackedReferences&) const;
+
+ bool isValidValueForPresence(JSValue value) const
+ {
+ return condition().isValidValueForPresence(value);
+ }
+
+ ObjectPropertyCondition attemptToMakeEquivalenceWithoutBarrier() const;
+
+private:
+ JSObject* m_object;
+ PropertyCondition m_condition;
+};
+
+struct ObjectPropertyConditionHash {
+ static unsigned hash(const ObjectPropertyCondition& key) { return key.hash(); }
+ static bool equal(
+ const ObjectPropertyCondition& a, const ObjectPropertyCondition& b)
+ {
+ return a == b;
+ }
+ static const bool safeToCompareToEmptyOrDeleted = true;
+};
+
+} // namespace JSC
+
+namespace WTF {
+
+template<typename T> struct DefaultHash;
+template<> struct DefaultHash<JSC::ObjectPropertyCondition> {
+ typedef JSC::ObjectPropertyConditionHash Hash;
+};
+
+template<typename T> struct HashTraits;
+template<> struct HashTraits<JSC::ObjectPropertyCondition> : SimpleClassHashTraits<JSC::ObjectPropertyCondition> { };
+
+} // namespace WTF
+
+#endif // ObjectPropertyCondition_h
+
diff --git a/Source/JavaScriptCore/bytecode/ObjectPropertyConditionSet.cpp b/Source/JavaScriptCore/bytecode/ObjectPropertyConditionSet.cpp
new file mode 100644
index 000000000..1b92412af
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ObjectPropertyConditionSet.cpp
@@ -0,0 +1,368 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "ObjectPropertyConditionSet.h"
+
+#include "JSCInlines.h"
+#include <wtf/ListDump.h>
+
+namespace JSC {
+
+ObjectPropertyCondition ObjectPropertyConditionSet::forObject(JSObject* object) const
+{
+ for (const ObjectPropertyCondition& condition : *this) {
+ if (condition.object() == object)
+ return condition;
+ }
+ return ObjectPropertyCondition();
+}
+
+ObjectPropertyCondition ObjectPropertyConditionSet::forConditionKind(
+ PropertyCondition::Kind kind) const
+{
+ for (const ObjectPropertyCondition& condition : *this) {
+ if (condition.kind() == kind)
+ return condition;
+ }
+ return ObjectPropertyCondition();
+}
+
+unsigned ObjectPropertyConditionSet::numberOfConditionsWithKind(PropertyCondition::Kind kind) const
+{
+ unsigned result = 0;
+ for (const ObjectPropertyCondition& condition : *this) {
+ if (condition.kind() == kind)
+ result++;
+ }
+ return result;
+}
+
+bool ObjectPropertyConditionSet::hasOneSlotBaseCondition() const
+{
+ return numberOfConditionsWithKind(PropertyCondition::Presence) == 1;
+}
+
+ObjectPropertyCondition ObjectPropertyConditionSet::slotBaseCondition() const
+{
+ ObjectPropertyCondition result;
+ unsigned numFound = 0;
+ for (const ObjectPropertyCondition& condition : *this) {
+ if (condition.kind() == PropertyCondition::Presence) {
+ result = condition;
+ numFound++;
+ }
+ }
+ RELEASE_ASSERT(numFound == 1);
+ return result;
+}
+
+ObjectPropertyConditionSet ObjectPropertyConditionSet::mergedWith(
+ const ObjectPropertyConditionSet& other) const
+{
+ if (!isValid() || !other.isValid())
+ return invalid();
+
+ Vector<ObjectPropertyCondition> result;
+
+ if (!isEmpty())
+ result.appendVector(m_data->vector);
+
+ for (const ObjectPropertyCondition& newCondition : other) {
+ bool foundMatch = false;
+ for (const ObjectPropertyCondition& existingCondition : *this) {
+ if (newCondition == existingCondition) {
+ foundMatch = true;
+ continue;
+ }
+ if (!newCondition.isCompatibleWith(existingCondition))
+ return invalid();
+ }
+ if (!foundMatch)
+ result.append(newCondition);
+ }
+
+ return create(result);
+}
+
+bool ObjectPropertyConditionSet::structuresEnsureValidity() const
+{
+ if (!isValid())
+ return false;
+
+ for (const ObjectPropertyCondition& condition : *this) {
+ if (!condition.structureEnsuresValidity())
+ return false;
+ }
+ return true;
+}
+
+bool ObjectPropertyConditionSet::structuresEnsureValidityAssumingImpurePropertyWatchpoint() const
+{
+ if (!isValid())
+ return false;
+
+ for (const ObjectPropertyCondition& condition : *this) {
+ if (!condition.structureEnsuresValidityAssumingImpurePropertyWatchpoint())
+ return false;
+ }
+ return true;
+}
+
+bool ObjectPropertyConditionSet::needImpurePropertyWatchpoint() const
+{
+ for (const ObjectPropertyCondition& condition : *this) {
+ if (condition.validityRequiresImpurePropertyWatchpoint())
+ return true;
+ }
+ return false;
+}
+
+bool ObjectPropertyConditionSet::areStillLive() const
+{
+ for (const ObjectPropertyCondition& condition : *this) {
+ if (!condition.isStillLive())
+ return false;
+ }
+ return true;
+}
+
+void ObjectPropertyConditionSet::dumpInContext(PrintStream& out, DumpContext* context) const
+{
+ if (!isValid()) {
+ out.print("<invalid>");
+ return;
+ }
+
+ out.print("[");
+ if (m_data)
+ out.print(listDumpInContext(m_data->vector, context));
+ out.print("]");
+}
+
+void ObjectPropertyConditionSet::dump(PrintStream& out) const
+{
+ dumpInContext(out, nullptr);
+}
+
+namespace {
+
+bool verbose = false;
+
+ObjectPropertyCondition generateCondition(
+ VM& vm, JSCell* owner, JSObject* object, UniquedStringImpl* uid, PropertyCondition::Kind conditionKind)
+{
+ Structure* structure = object->structure();
+ if (verbose)
+ dataLog("Creating condition ", conditionKind, " for ", pointerDump(structure), "\n");
+
+ ObjectPropertyCondition result;
+ switch (conditionKind) {
+ case PropertyCondition::Presence: {
+ unsigned attributes;
+ PropertyOffset offset = structure->getConcurrently(uid, attributes);
+ if (offset == invalidOffset)
+ return ObjectPropertyCondition();
+ result = ObjectPropertyCondition::presence(vm, owner, object, uid, offset, attributes);
+ break;
+ }
+ case PropertyCondition::Absence: {
+ result = ObjectPropertyCondition::absence(
+ vm, owner, object, uid, object->structure()->storedPrototypeObject());
+ break;
+ }
+ case PropertyCondition::AbsenceOfSetter: {
+ result = ObjectPropertyCondition::absenceOfSetter(
+ vm, owner, object, uid, object->structure()->storedPrototypeObject());
+ break;
+ }
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return ObjectPropertyCondition();
+ }
+
+ if (!result.structureEnsuresValidityAssumingImpurePropertyWatchpoint()) {
+ if (verbose)
+ dataLog("Failed to create condition: ", result, "\n");
+ return ObjectPropertyCondition();
+ }
+
+ if (verbose)
+ dataLog("New condition: ", result, "\n");
+ return result;
+}
+
+enum Concurrency {
+ MainThread,
+ Concurrent
+};
+template<typename Functor>
+ObjectPropertyConditionSet generateConditions(
+ VM& vm, JSGlobalObject* globalObject, Structure* structure, JSObject* prototype, const Functor& functor,
+ Concurrency concurrency = MainThread)
+{
+ Vector<ObjectPropertyCondition> conditions;
+
+ for (;;) {
+ if (verbose)
+ dataLog("Considering structure: ", pointerDump(structure), "\n");
+
+ if (structure->isProxy()) {
+ if (verbose)
+ dataLog("It's a proxy, so invalid.\n");
+ return ObjectPropertyConditionSet::invalid();
+ }
+
+ JSValue value = structure->prototypeForLookup(globalObject);
+
+ if (value.isNull()) {
+ if (!prototype) {
+ if (verbose)
+ dataLog("Reached end up prototype chain as expected, done.\n");
+ break;
+ }
+ if (verbose)
+ dataLog("Unexpectedly reached end of prototype chain, so invalid.\n");
+ return ObjectPropertyConditionSet::invalid();
+ }
+
+ JSObject* object = jsCast<JSObject*>(value);
+ structure = object->structure(vm);
+
+ // Since we're accessing a prototype repeatedly, it's a good bet that it should not be
+ // treated as a dictionary.
+ if (structure->isDictionary()) {
+ if (concurrency == MainThread)
+ structure->flattenDictionaryStructure(vm, object);
+ else {
+ if (verbose)
+ dataLog("Cannot flatten dictionary when not on main thread, so invalid.\n");
+ return ObjectPropertyConditionSet::invalid();
+ }
+ }
+
+ if (!functor(conditions, object)) {
+ if (verbose)
+ dataLog("Functor failed, invalid.\n");
+ return ObjectPropertyConditionSet::invalid();
+ }
+
+ if (object == prototype) {
+ if (verbose)
+ dataLog("Reached desired prototype, done.\n");
+ break;
+ }
+ }
+
+ if (verbose)
+ dataLog("Returning conditions: ", listDump(conditions), "\n");
+ return ObjectPropertyConditionSet::create(conditions);
+}
+
+} // anonymous namespace
+
+ObjectPropertyConditionSet generateConditionsForPropertyMiss(
+ VM& vm, JSCell* owner, ExecState* exec, Structure* headStructure, UniquedStringImpl* uid)
+{
+ return generateConditions(
+ vm, exec->lexicalGlobalObject(), headStructure, nullptr,
+ [&] (Vector<ObjectPropertyCondition>& conditions, JSObject* object) -> bool {
+ ObjectPropertyCondition result =
+ generateCondition(vm, owner, object, uid, PropertyCondition::Absence);
+ if (!result)
+ return false;
+ conditions.append(result);
+ return true;
+ });
+}
+
+ObjectPropertyConditionSet generateConditionsForPropertySetterMiss(
+ VM& vm, JSCell* owner, ExecState* exec, Structure* headStructure, UniquedStringImpl* uid)
+{
+ return generateConditions(
+ vm, exec->lexicalGlobalObject(), headStructure, nullptr,
+ [&] (Vector<ObjectPropertyCondition>& conditions, JSObject* object) -> bool {
+ ObjectPropertyCondition result =
+ generateCondition(vm, owner, object, uid, PropertyCondition::AbsenceOfSetter);
+ if (!result)
+ return false;
+ conditions.append(result);
+ return true;
+ });
+}
+
+ObjectPropertyConditionSet generateConditionsForPrototypePropertyHit(
+ VM& vm, JSCell* owner, ExecState* exec, Structure* headStructure, JSObject* prototype,
+ UniquedStringImpl* uid)
+{
+ return generateConditions(
+ vm, exec->lexicalGlobalObject(), headStructure, prototype,
+ [&] (Vector<ObjectPropertyCondition>& conditions, JSObject* object) -> bool {
+ PropertyCondition::Kind kind =
+ object == prototype ? PropertyCondition::Presence : PropertyCondition::Absence;
+ ObjectPropertyCondition result =
+ generateCondition(vm, owner, object, uid, kind);
+ if (!result)
+ return false;
+ conditions.append(result);
+ return true;
+ });
+}
+
+ObjectPropertyConditionSet generateConditionsForPrototypePropertyHitCustom(
+ VM& vm, JSCell* owner, ExecState* exec, Structure* headStructure, JSObject* prototype,
+ UniquedStringImpl* uid)
+{
+ return generateConditions(
+ vm, exec->lexicalGlobalObject(), headStructure, prototype,
+ [&] (Vector<ObjectPropertyCondition>& conditions, JSObject* object) -> bool {
+ if (object == prototype)
+ return true;
+ ObjectPropertyCondition result =
+ generateCondition(vm, owner, object, uid, PropertyCondition::Absence);
+ if (!result)
+ return false;
+ conditions.append(result);
+ return true;
+ });
+}
+
+ObjectPropertyConditionSet generateConditionsForPropertySetterMissConcurrently(
+ VM& vm, JSGlobalObject* globalObject, Structure* headStructure, UniquedStringImpl* uid)
+{
+ return generateConditions(
+ vm, globalObject, headStructure, nullptr,
+ [&] (Vector<ObjectPropertyCondition>& conditions, JSObject* object) -> bool {
+ ObjectPropertyCondition result =
+ generateCondition(vm, nullptr, object, uid, PropertyCondition::AbsenceOfSetter);
+ if (!result)
+ return false;
+ conditions.append(result);
+ return true;
+ }, Concurrent);
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/bytecode/ObjectPropertyConditionSet.h b/Source/JavaScriptCore/bytecode/ObjectPropertyConditionSet.h
new file mode 100644
index 000000000..957eaac25
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ObjectPropertyConditionSet.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ObjectPropertyConditionSet_h
+#define ObjectPropertyConditionSet_h
+
+#include "ObjectPropertyCondition.h"
+#include <wtf/FastMalloc.h>
+#include <wtf/RefCounted.h>
+#include <wtf/Vector.h>
+
+namespace JSC {
+
+// An object property condition set is used to represent the set of additional conditions
+// that need to be met for some heap access to be valid. The set can have the following
+// interesting states:
+//
+// Empty: There are no special conditions that need to be met.
+// Invalid: The heap access is never valid.
+// Non-empty: The heap access is valid if all the ObjectPropertyConditions in the set are valid.
+
+class ObjectPropertyConditionSet {
+public:
+ ObjectPropertyConditionSet() { }
+
+ static ObjectPropertyConditionSet invalid()
+ {
+ ObjectPropertyConditionSet result;
+ result.m_data = adoptRef(new Data());
+ return result;
+ }
+
+ static ObjectPropertyConditionSet create(const Vector<ObjectPropertyCondition>& vector)
+ {
+ if (vector.isEmpty())
+ return ObjectPropertyConditionSet();
+
+ ObjectPropertyConditionSet result;
+ result.m_data = adoptRef(new Data());
+ result.m_data->vector = vector;
+ return result;
+ }
+
+ bool isValid() const
+ {
+ return !m_data || !m_data->vector.isEmpty();
+ }
+
+ bool isEmpty() const
+ {
+ return !m_data;
+ }
+
+ typedef const ObjectPropertyCondition* iterator;
+
+ iterator begin() const
+ {
+ if (!m_data)
+ return nullptr;
+ return m_data->vector.begin();
+ }
+ iterator end() const
+ {
+ if (!m_data)
+ return nullptr;
+ return m_data->vector.end();
+ }
+
+ ObjectPropertyCondition forObject(JSObject*) const;
+ ObjectPropertyCondition forConditionKind(PropertyCondition::Kind) const;
+
+ unsigned numberOfConditionsWithKind(PropertyCondition::Kind) const;
+
+ bool hasOneSlotBaseCondition() const;
+
+ // If this is a condition set for a prototype hit, then this is guaranteed to return the
+ // condition on the prototype itself. This allows you to get the object, offset, and
+ // attributes for the prototype. This will RELEASE_ASSERT that there is exactly one Presence
+ // in the set, and it will return that presence.
+ ObjectPropertyCondition slotBaseCondition() const;
+
+ // Attempt to create a new condition set by merging this one with the other one. This will
+ // fail if any of the conditions are incompatible with each other. When if fails, it returns
+ // invalid().
+ ObjectPropertyConditionSet mergedWith(const ObjectPropertyConditionSet& other) const;
+
+ bool structuresEnsureValidity() const;
+ bool structuresEnsureValidityAssumingImpurePropertyWatchpoint() const;
+
+ bool needImpurePropertyWatchpoint() const;
+ bool areStillLive() const;
+
+ void dumpInContext(PrintStream&, DumpContext*) const;
+ void dump(PrintStream&) const;
+
+ // Helpers for using this in a union.
+ void* releaseRawPointer()
+ {
+ return static_cast<void*>(m_data.leakRef());
+ }
+ static ObjectPropertyConditionSet adoptRawPointer(void* rawPointer)
+ {
+ ObjectPropertyConditionSet result;
+ result.m_data = adoptRef(static_cast<Data*>(rawPointer));
+ return result;
+ }
+ static ObjectPropertyConditionSet fromRawPointer(void* rawPointer)
+ {
+ ObjectPropertyConditionSet result;
+ result.m_data = static_cast<Data*>(rawPointer);
+ return result;
+ }
+
+ // FIXME: Everything below here should be private, but cannot be because of a bug in VS.
+
+ // Internally, this represents Invalid using a pointer to a Data that has an empty vector.
+
+ // FIXME: This could be made more compact by having it internally use a vector that just has
+ // the non-uid portion of ObjectPropertyCondition, and then requiring that the callers of all
+ // of the APIs supply the uid.
+
+ class Data : public ThreadSafeRefCounted<Data> {
+ WTF_MAKE_NONCOPYABLE(Data);
+ WTF_MAKE_FAST_ALLOCATED;
+
+ public:
+ Data() { }
+
+ Vector<ObjectPropertyCondition> vector;
+ };
+
+private:
+ RefPtr<Data> m_data;
+};
+
+ObjectPropertyConditionSet generateConditionsForPropertyMiss(
+ VM&, JSCell* owner, ExecState*, Structure* headStructure, UniquedStringImpl* uid);
+ObjectPropertyConditionSet generateConditionsForPropertySetterMiss(
+ VM&, JSCell* owner, ExecState*, Structure* headStructure, UniquedStringImpl* uid);
+ObjectPropertyConditionSet generateConditionsForPrototypePropertyHit(
+ VM&, JSCell* owner, ExecState*, Structure* headStructure, JSObject* prototype,
+ UniquedStringImpl* uid);
+ObjectPropertyConditionSet generateConditionsForPrototypePropertyHitCustom(
+ VM&, JSCell* owner, ExecState*, Structure* headStructure, JSObject* prototype,
+ UniquedStringImpl* uid);
+
+ObjectPropertyConditionSet generateConditionsForPropertySetterMissConcurrently(
+ VM&, JSGlobalObject*, Structure* headStructure, UniquedStringImpl* uid);
+
+} // namespace JSC
+
+#endif // ObjectPropertyConditionSet_h
+
diff --git a/Source/JavaScriptCore/bytecode/Opcode.cpp b/Source/JavaScriptCore/bytecode/Opcode.cpp
index 26f53511a..0d16dfc2f 100644
--- a/Source/JavaScriptCore/bytecode/Opcode.cpp
+++ b/Source/JavaScriptCore/bytecode/Opcode.cpp
@@ -11,7 +11,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -30,6 +30,8 @@
#include "config.h"
#include "Opcode.h"
+#include <wtf/PrintStream.h>
+
#if ENABLE(OPCODE_STATS)
#include <array>
#include <wtf/DataLog.h>
@@ -185,3 +187,14 @@ void OpcodeStats::resetLastInstruction()
#endif
} // namespace JSC
+
+namespace WTF {
+
+using namespace JSC;
+
+void printInternal(PrintStream& out, OpcodeID opcode)
+{
+ out.print(opcodeNames[opcode]);
+}
+
+} // namespace WTF
diff --git a/Source/JavaScriptCore/bytecode/Opcode.h b/Source/JavaScriptCore/bytecode/Opcode.h
index e8636e785..ee667c84f 100644
--- a/Source/JavaScriptCore/bytecode/Opcode.h
+++ b/Source/JavaScriptCore/bytecode/Opcode.h
@@ -11,7 +11,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -30,6 +30,7 @@
#ifndef Opcode_h
#define Opcode_h
+#include "Bytecodes.h"
#include "LLIntOpcode.h"
#include <algorithm>
@@ -40,158 +41,8 @@
namespace JSC {
#define FOR_EACH_CORE_OPCODE_ID_WITH_EXTENSION(macro, extension__) \
- macro(op_enter, 1) \
- macro(op_create_activation, 2) \
- macro(op_touch_entry, 1) \
- macro(op_init_lazy_reg, 2) \
- macro(op_create_arguments, 2) \
- macro(op_create_this, 4) \
- macro(op_get_callee, 3) \
- macro(op_to_this, 3) \
- \
- macro(op_new_object, 4) \
- macro(op_new_array, 5) \
- macro(op_new_array_with_size, 4) \
- macro(op_new_array_buffer, 5) \
- macro(op_new_regexp, 3) \
- macro(op_mov, 3) \
- macro(op_captured_mov, 4) \
- \
- macro(op_not, 3) \
- macro(op_eq, 4) \
- macro(op_eq_null, 3) \
- macro(op_neq, 4) \
- macro(op_neq_null, 3) \
- macro(op_stricteq, 4) \
- macro(op_nstricteq, 4) \
- macro(op_less, 4) \
- macro(op_lesseq, 4) \
- macro(op_greater, 4) \
- macro(op_greatereq, 4) \
- \
- macro(op_inc, 2) \
- macro(op_dec, 2) \
- macro(op_to_number, 3) \
- macro(op_negate, 3) \
- macro(op_add, 5) \
- macro(op_mul, 5) \
- macro(op_div, 5) \
- macro(op_mod, 4) \
- macro(op_sub, 5) \
- \
- macro(op_lshift, 4) \
- macro(op_rshift, 4) \
- macro(op_urshift, 4) \
- macro(op_unsigned, 3) \
- macro(op_bitand, 5) \
- macro(op_bitxor, 5) \
- macro(op_bitor, 5) \
- \
- macro(op_check_has_instance, 5) \
- macro(op_instanceof, 4) \
- macro(op_typeof, 3) \
- macro(op_is_undefined, 3) \
- macro(op_is_boolean, 3) \
- macro(op_is_number, 3) \
- macro(op_is_string, 3) \
- macro(op_is_object, 3) \
- macro(op_is_function, 3) \
- macro(op_in, 4) \
- \
- macro(op_init_global_const_nop, 5) \
- macro(op_init_global_const, 5) \
- macro(op_get_by_id, 9) /* has value profiling */ \
- macro(op_get_by_id_out_of_line, 9) /* has value profiling */ \
- macro(op_get_by_id_self, 9) /* has value profiling */ \
- macro(op_get_by_id_proto, 9) /* has value profiling */ \
- macro(op_get_by_id_chain, 9) /* has value profiling */ \
- macro(op_get_by_id_getter_self, 9) /* has value profiling */ \
- macro(op_get_by_id_getter_proto, 9) /* has value profiling */ \
- macro(op_get_by_id_getter_chain, 9) /* has value profiling */ \
- macro(op_get_by_id_custom_self, 9) /* has value profiling */ \
- macro(op_get_by_id_custom_proto, 9) /* has value profiling */ \
- macro(op_get_by_id_custom_chain, 9) /* has value profiling */ \
- macro(op_get_by_id_generic, 9) /* has value profiling */ \
- macro(op_get_array_length, 9) /* has value profiling */ \
- macro(op_get_string_length, 9) /* has value profiling */ \
- macro(op_get_arguments_length, 4) \
- macro(op_put_by_id, 9) \
- macro(op_put_by_id_out_of_line, 9) \
- macro(op_put_by_id_transition, 9) \
- macro(op_put_by_id_transition_direct, 9) \
- macro(op_put_by_id_transition_direct_out_of_line, 9) \
- macro(op_put_by_id_transition_normal, 9) \
- macro(op_put_by_id_transition_normal_out_of_line, 9) \
- macro(op_put_by_id_replace, 9) \
- macro(op_put_by_id_generic, 9) \
- macro(op_del_by_id, 4) \
- macro(op_get_by_val, 6) /* has value profiling */ \
- macro(op_get_argument_by_val, 6) /* must be the same size as op_get_by_val */ \
- macro(op_get_by_pname, 7) \
- macro(op_put_by_val, 5) \
- macro(op_put_by_val_direct, 5) \
- macro(op_del_by_val, 4) \
- macro(op_put_by_index, 4) \
- macro(op_put_getter_setter, 5) \
- \
- macro(op_jmp, 2) \
- macro(op_jtrue, 3) \
- macro(op_jfalse, 3) \
- macro(op_jeq_null, 3) \
- macro(op_jneq_null, 3) \
- macro(op_jneq_ptr, 4) \
- macro(op_jless, 4) \
- macro(op_jlesseq, 4) \
- macro(op_jgreater, 4) \
- macro(op_jgreatereq, 4) \
- macro(op_jnless, 4) \
- macro(op_jnlesseq, 4) \
- macro(op_jngreater, 4) \
- macro(op_jngreatereq, 4) \
- \
- macro(op_loop_hint, 1) \
- \
- macro(op_switch_imm, 4) \
- macro(op_switch_char, 4) \
- macro(op_switch_string, 4) \
- \
- macro(op_new_func, 4) \
- macro(op_new_captured_func, 4) \
- macro(op_new_func_exp, 3) \
- macro(op_call, 8) /* has value profiling */ \
- macro(op_call_eval, 8) /* has value profiling */ \
- macro(op_call_varargs, 8) /* has value profiling */ \
- macro(op_tear_off_activation, 2) \
- macro(op_tear_off_arguments, 3) \
- macro(op_ret, 2) \
- macro(op_ret_object_or_this, 3) \
- \
- macro(op_construct, 8) \
- macro(op_strcat, 4) \
- macro(op_to_primitive, 3) \
- \
- macro(op_get_pnames, 6) \
- macro(op_next_pname, 7) \
- \
- macro(op_resolve_scope, 6) \
- macro(op_get_from_scope, 8) /* has value profiling */ \
- macro(op_put_to_scope, 7) \
- \
- macro(op_push_with_scope, 2) \
- macro(op_pop_scope, 1) \
- macro(op_push_name_scope, 4) \
- \
- macro(op_catch, 2) \
- macro(op_throw, 2) \
- macro(op_throw_static_error, 3) \
- \
- macro(op_debug, 3) \
- macro(op_profile_will_call, 2) \
- macro(op_profile_did_call, 2) \
- \
- extension__ \
- \
- macro(op_end, 2) // end must be the last opcode in the list
+ FOR_EACH_BYTECODE_ID(macro) \
+ extension__
#define FOR_EACH_CORE_OPCODE_ID(macro) \
FOR_EACH_CORE_OPCODE_ID_WITH_EXTENSION(macro, /* No extension */ )
@@ -208,7 +59,11 @@ namespace JSC {
#undef OPCODE_ID_ENUM
const int maxOpcodeLength = 9;
-const int numOpcodeIDs = op_end + 1;
+#if !ENABLE(JIT)
+const int numOpcodeIDs = NUMBER_OF_BYTECODE_IDS + NUMBER_OF_CLOOP_BYTECODE_HELPER_IDS + NUMBER_OF_BYTECODE_HELPER_IDS;
+#else
+const int numOpcodeIDs = NUMBER_OF_BYTECODE_IDS + NUMBER_OF_BYTECODE_HELPER_IDS;
+#endif
#define OPCODE_ID_LENGTHS(id, length) const int id##_length = length;
FOR_EACH_OPCODE_ID(OPCODE_ID_LENGTHS);
@@ -220,7 +75,7 @@ const int numOpcodeIDs = op_end + 1;
const int opcodeLengths[numOpcodeIDs] = { FOR_EACH_OPCODE_ID(OPCODE_ID_LENGTH_MAP) };
#undef OPCODE_ID_LENGTH_MAP
-#define VERIFY_OPCODE_ID(id, size) COMPILE_ASSERT(id <= op_end, ASSERT_THAT_JS_OPCODE_IDS_ARE_VALID);
+#define VERIFY_OPCODE_ID(id, size) COMPILE_ASSERT(id <= numOpcodeIDs, ASSERT_THAT_JS_OPCODE_IDS_ARE_VALID);
FOR_EACH_OPCODE_ID(VERIFY_OPCODE_ID);
#undef VERIFY_OPCODE_ID
@@ -273,4 +128,12 @@ inline size_t opcodeLength(OpcodeID opcode)
} // namespace JSC
+namespace WTF {
+
+class PrintStream;
+
+void printInternal(PrintStream&, JSC::OpcodeID);
+
+} // namespace WTF
+
#endif // Opcode_h
diff --git a/Source/JavaScriptCore/bytecode/Operands.h b/Source/JavaScriptCore/bytecode/Operands.h
index f21e05f5f..78ddaa525 100644
--- a/Source/JavaScriptCore/bytecode/Operands.h
+++ b/Source/JavaScriptCore/bytecode/Operands.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2012, 2013, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -52,10 +52,10 @@ class Operands {
public:
Operands() { }
- explicit Operands(size_t numArguments, size_t numLocals)
+ explicit Operands(size_t numArguments, size_t numLocals, const T& initialValue = Traits::defaultValue())
{
- m_arguments.fill(Traits::defaultValue(), numArguments);
- m_locals.fill(Traits::defaultValue(), numLocals);
+ m_arguments.fill(initialValue, numArguments);
+ m_locals.fill(initialValue, numLocals);
}
template<typename U, typename OtherTraits>
@@ -96,7 +96,7 @@ public:
return local(idx);
}
- void ensureLocals(size_t size)
+ void ensureLocals(size_t size, const T& ensuredValue = Traits::defaultValue())
{
if (size <= m_locals.size())
return;
@@ -104,7 +104,7 @@ public:
size_t oldSize = m_locals.size();
m_locals.resize(size);
for (size_t i = oldSize; i < m_locals.size(); ++i)
- m_locals[i] = Traits::defaultValue();
+ m_locals[i] = ensuredValue;
}
void setLocal(size_t idx, const T& value)
@@ -149,6 +149,7 @@ public:
}
const T& operand(int operand) const { return const_cast<const T&>(const_cast<Operands*>(this)->operand(operand)); }
+ const T& operand(VirtualRegister operand) const { return const_cast<const T&>(const_cast<Operands*>(this)->operand(operand)); }
bool hasOperand(int operand) const
{
@@ -209,6 +210,10 @@ public:
return virtualRegisterForArgument(index).offset();
return virtualRegisterForLocal(index - numberOfArguments()).offset();
}
+ VirtualRegister virtualRegisterForIndex(size_t index) const
+ {
+ return VirtualRegister(operandForIndex(index));
+ }
size_t indexForOperand(int operand) const
{
if (operandIsArgument(operand))
@@ -252,11 +257,7 @@ public:
}
void dumpInContext(PrintStream& out, DumpContext* context) const;
-
- void dump(PrintStream& out) const
- {
- dumpInContext(out, 0);
- }
+ void dump(PrintStream& out) const;
private:
Vector<T, 8> m_arguments;
diff --git a/Source/JavaScriptCore/bytecode/OperandsInlines.h b/Source/JavaScriptCore/bytecode/OperandsInlines.h
index 74ad60bc1..c9dee88c7 100644
--- a/Source/JavaScriptCore/bytecode/OperandsInlines.h
+++ b/Source/JavaScriptCore/bytecode/OperandsInlines.h
@@ -47,6 +47,22 @@ void Operands<T, Traits>::dumpInContext(PrintStream& out, DumpContext* context)
}
}
+template<typename T, typename Traits>
+void Operands<T, Traits>::dump(PrintStream& out) const
+{
+ CommaPrinter comma(" ");
+ for (size_t argumentIndex = numberOfArguments(); argumentIndex--;) {
+ if (Traits::isEmptyForDump(argument(argumentIndex)))
+ continue;
+ out.print(comma, "arg", argumentIndex, ":", argument(argumentIndex));
+ }
+ for (size_t localIndex = 0; localIndex < numberOfLocals(); ++localIndex) {
+ if (Traits::isEmptyForDump(local(localIndex)))
+ continue;
+ out.print(comma, "loc", localIndex, ":", local(localIndex));
+ }
+}
+
} // namespace JSC
#endif // OperandsInlines_h
diff --git a/Source/JavaScriptCore/bytecode/PolymorphicAccess.cpp b/Source/JavaScriptCore/bytecode/PolymorphicAccess.cpp
new file mode 100644
index 000000000..3a59f8db4
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/PolymorphicAccess.cpp
@@ -0,0 +1,1469 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "PolymorphicAccess.h"
+
+#if ENABLE(JIT)
+
+#include "BinarySwitch.h"
+#include "CCallHelpers.h"
+#include "CodeBlock.h"
+#include "GetterSetter.h"
+#include "Heap.h"
+#include "JITOperations.h"
+#include "JSCInlines.h"
+#include "LinkBuffer.h"
+#include "ScratchRegisterAllocator.h"
+#include "StructureStubClearingWatchpoint.h"
+#include "StructureStubInfo.h"
+#include <wtf/CommaPrinter.h>
+#include <wtf/ListDump.h>
+
+namespace JSC {
+
+static const bool verbose = false;
+
+Watchpoint* AccessGenerationState::addWatchpoint(const ObjectPropertyCondition& condition)
+{
+ return WatchpointsOnStructureStubInfo::ensureReferenceAndAddWatchpoint(
+ watchpoints, jit->codeBlock(), stubInfo, condition);
+}
+
+void AccessGenerationState::restoreScratch()
+{
+ allocator->restoreReusedRegistersByPopping(*jit, preservedReusedRegisterState);
+}
+
+void AccessGenerationState::succeed()
+{
+ restoreScratch();
+ success.append(jit->jump());
+}
+
+void AccessGenerationState::calculateLiveRegistersForCallAndExceptionHandling()
+{
+ if (!m_calculatedRegistersForCallAndExceptionHandling) {
+ m_calculatedRegistersForCallAndExceptionHandling = true;
+
+ m_liveRegistersToPreserveAtExceptionHandlingCallSite = jit->codeBlock()->jitCode()->liveRegistersToPreserveAtExceptionHandlingCallSite(jit->codeBlock(), stubInfo->callSiteIndex);
+ m_needsToRestoreRegistersIfException = m_liveRegistersToPreserveAtExceptionHandlingCallSite.numberOfSetRegisters() > 0;
+ if (m_needsToRestoreRegistersIfException)
+ RELEASE_ASSERT(JITCode::isOptimizingJIT(jit->codeBlock()->jitType()));
+
+ m_liveRegistersForCall = RegisterSet(m_liveRegistersToPreserveAtExceptionHandlingCallSite, allocator->usedRegisters());
+ m_liveRegistersForCall.exclude(RegisterSet::registersToNotSaveForJSCall());
+ }
+}
+
+void AccessGenerationState::preserveLiveRegistersToStackForCall()
+{
+ unsigned extraStackPadding = 0;
+ unsigned numberOfStackBytesUsedForRegisterPreservation = ScratchRegisterAllocator::preserveRegistersToStackForCall(*jit, liveRegistersForCall(), extraStackPadding);
+ if (m_numberOfStackBytesUsedForRegisterPreservation != std::numeric_limits<unsigned>::max())
+ RELEASE_ASSERT(numberOfStackBytesUsedForRegisterPreservation == m_numberOfStackBytesUsedForRegisterPreservation);
+ m_numberOfStackBytesUsedForRegisterPreservation = numberOfStackBytesUsedForRegisterPreservation;
+}
+
+void AccessGenerationState::restoreLiveRegistersFromStackForCall(bool isGetter)
+{
+ RegisterSet dontRestore;
+ if (isGetter) {
+ // This is the result value. We don't want to overwrite the result with what we stored to the stack.
+ // We sometimes have to store it to the stack just in case we throw an exception and need the original value.
+ dontRestore.set(valueRegs);
+ }
+ restoreLiveRegistersFromStackForCall(dontRestore);
+}
+
+void AccessGenerationState::restoreLiveRegistersFromStackForCallWithThrownException()
+{
+ // Even if we're a getter, we don't want to ignore the result value like we normally do
+ // because the getter threw, and therefore, didn't return a value that means anything.
+ // Instead, we want to restore that register to what it was upon entering the getter
+ // inline cache. The subtlety here is if the base and the result are the same register,
+ // and the getter threw, we want OSR exit to see the original base value, not the result
+ // of the getter call.
+ RegisterSet dontRestore = liveRegistersForCall();
+ // As an optimization here, we only need to restore what is live for exception handling.
+ // We can construct the dontRestore set to accomplish this goal by having it contain only
+ // what is live for call but not live for exception handling. By ignoring things that are
+ // only live at the call but not the exception handler, we will only restore things live
+ // at the exception handler.
+ dontRestore.exclude(liveRegistersToPreserveAtExceptionHandlingCallSite());
+ restoreLiveRegistersFromStackForCall(dontRestore);
+}
+
+void AccessGenerationState::restoreLiveRegistersFromStackForCall(const RegisterSet& dontRestore)
+{
+ unsigned extraStackPadding = 0;
+ ScratchRegisterAllocator::restoreRegistersFromStackForCall(*jit, liveRegistersForCall(), dontRestore, m_numberOfStackBytesUsedForRegisterPreservation, extraStackPadding);
+}
+
+CallSiteIndex AccessGenerationState::callSiteIndexForExceptionHandlingOrOriginal()
+{
+ RELEASE_ASSERT(m_calculatedRegistersForCallAndExceptionHandling);
+
+ if (!m_calculatedCallSiteIndex) {
+ m_calculatedCallSiteIndex = true;
+
+ if (m_needsToRestoreRegistersIfException)
+ m_callSiteIndex = jit->codeBlock()->newExceptionHandlingCallSiteIndex(stubInfo->callSiteIndex);
+ else
+ m_callSiteIndex = originalCallSiteIndex();
+ }
+
+ return m_callSiteIndex;
+}
+
+const HandlerInfo& AccessGenerationState::originalExceptionHandler() const
+{
+ RELEASE_ASSERT(m_needsToRestoreRegistersIfException);
+ HandlerInfo* exceptionHandler = jit->codeBlock()->handlerForIndex(stubInfo->callSiteIndex.bits());
+ RELEASE_ASSERT(exceptionHandler);
+ return *exceptionHandler;
+}
+
+CallSiteIndex AccessGenerationState::originalCallSiteIndex() const { return stubInfo->callSiteIndex; }
+
+AccessCase::AccessCase()
+{
+}
+
+std::unique_ptr<AccessCase> AccessCase::get(
+ VM& vm, JSCell* owner, AccessType type, PropertyOffset offset, Structure* structure,
+ const ObjectPropertyConditionSet& conditionSet, bool viaProxy, WatchpointSet* additionalSet,
+ PropertySlot::GetValueFunc customGetter, JSObject* customSlotBase)
+{
+ std::unique_ptr<AccessCase> result(new AccessCase());
+
+ result->m_type = type;
+ result->m_offset = offset;
+ result->m_structure.set(vm, owner, structure);
+ result->m_conditionSet = conditionSet;
+
+ if (viaProxy || additionalSet || result->doesCalls() || customGetter || customSlotBase) {
+ result->m_rareData = std::make_unique<RareData>();
+ result->m_rareData->viaProxy = viaProxy;
+ result->m_rareData->additionalSet = additionalSet;
+ result->m_rareData->customAccessor.getter = customGetter;
+ result->m_rareData->customSlotBase.setMayBeNull(vm, owner, customSlotBase);
+ }
+
+ return result;
+}
+
+std::unique_ptr<AccessCase> AccessCase::replace(
+ VM& vm, JSCell* owner, Structure* structure, PropertyOffset offset)
+{
+ std::unique_ptr<AccessCase> result(new AccessCase());
+
+ result->m_type = Replace;
+ result->m_offset = offset;
+ result->m_structure.set(vm, owner, structure);
+
+ return result;
+}
+
+std::unique_ptr<AccessCase> AccessCase::transition(
+ VM& vm, JSCell* owner, Structure* oldStructure, Structure* newStructure, PropertyOffset offset,
+ const ObjectPropertyConditionSet& conditionSet)
+{
+ RELEASE_ASSERT(oldStructure == newStructure->previousID());
+
+ // Skip optimizing the case where we need a realloc, if we don't have
+ // enough registers to make it happen.
+ if (GPRInfo::numberOfRegisters < 6
+ && oldStructure->outOfLineCapacity() != newStructure->outOfLineCapacity()
+ && oldStructure->outOfLineCapacity()) {
+ return nullptr;
+ }
+
+ // Skip optimizing the case where we need realloc, and the structure has
+ // indexing storage.
+ // FIXME: We shouldn't skip this! Implement it!
+ // https://bugs.webkit.org/show_bug.cgi?id=130914
+ if (oldStructure->couldHaveIndexingHeader())
+ return nullptr;
+
+ std::unique_ptr<AccessCase> result(new AccessCase());
+
+ result->m_type = Transition;
+ result->m_offset = offset;
+ result->m_structure.set(vm, owner, newStructure);
+ result->m_conditionSet = conditionSet;
+
+ return result;
+}
+
+std::unique_ptr<AccessCase> AccessCase::setter(
+ VM& vm, JSCell* owner, AccessType type, Structure* structure, PropertyOffset offset,
+ const ObjectPropertyConditionSet& conditionSet, PutPropertySlot::PutValueFunc customSetter,
+ JSObject* customSlotBase)
+{
+ std::unique_ptr<AccessCase> result(new AccessCase());
+
+ result->m_type = type;
+ result->m_offset = offset;
+ result->m_structure.set(vm, owner, structure);
+ result->m_conditionSet = conditionSet;
+ result->m_rareData = std::make_unique<RareData>();
+ result->m_rareData->customAccessor.setter = customSetter;
+ result->m_rareData->customSlotBase.setMayBeNull(vm, owner, customSlotBase);
+
+ return result;
+}
+
+std::unique_ptr<AccessCase> AccessCase::in(
+ VM& vm, JSCell* owner, AccessType type, Structure* structure,
+ const ObjectPropertyConditionSet& conditionSet)
+{
+ std::unique_ptr<AccessCase> result(new AccessCase());
+
+ result->m_type = type;
+ result->m_structure.set(vm, owner, structure);
+ result->m_conditionSet = conditionSet;
+
+ return result;
+}
+
+std::unique_ptr<AccessCase> AccessCase::getLength(VM&, JSCell*, AccessType type)
+{
+ std::unique_ptr<AccessCase> result(new AccessCase());
+
+ result->m_type = type;
+
+ return result;
+}
+
+std::unique_ptr<AccessCase> AccessCase::getIntrinsic(
+ VM& vm, JSCell* owner, JSFunction* getter, PropertyOffset offset,
+ Structure* structure, const ObjectPropertyConditionSet& conditionSet)
+{
+ std::unique_ptr<AccessCase> result(new AccessCase());
+
+ result->m_type = IntrinsicGetter;
+ result->m_structure.set(vm, owner, structure);
+ result->m_conditionSet = conditionSet;
+ result->m_offset = offset;
+
+ result->m_rareData = std::make_unique<RareData>();
+ result->m_rareData->intrinsicFunction.set(vm, owner, getter);
+
+ return result;
+}
+
+AccessCase::~AccessCase()
+{
+}
+
+std::unique_ptr<AccessCase> AccessCase::fromStructureStubInfo(
+ VM& vm, JSCell* owner, StructureStubInfo& stubInfo)
+{
+ switch (stubInfo.cacheType) {
+ case CacheType::GetByIdSelf:
+ return get(
+ vm, owner, Load, stubInfo.u.byIdSelf.offset,
+ stubInfo.u.byIdSelf.baseObjectStructure.get());
+
+ case CacheType::PutByIdReplace:
+ return replace(
+ vm, owner, stubInfo.u.byIdSelf.baseObjectStructure.get(), stubInfo.u.byIdSelf.offset);
+
+ default:
+ return nullptr;
+ }
+}
+
+std::unique_ptr<AccessCase> AccessCase::clone() const
+{
+ std::unique_ptr<AccessCase> result(new AccessCase());
+ result->m_type = m_type;
+ result->m_offset = m_offset;
+ result->m_structure = m_structure;
+ result->m_conditionSet = m_conditionSet;
+ if (RareData* rareData = m_rareData.get()) {
+ result->m_rareData = std::make_unique<RareData>();
+ result->m_rareData->viaProxy = rareData->viaProxy;
+ result->m_rareData->additionalSet = rareData->additionalSet;
+ // NOTE: We don't copy the callLinkInfo, since that's created during code generation.
+ result->m_rareData->customAccessor.opaque = rareData->customAccessor.opaque;
+ result->m_rareData->customSlotBase = rareData->customSlotBase;
+ result->m_rareData->intrinsicFunction = rareData->intrinsicFunction;
+ }
+ return result;
+}
+
+bool AccessCase::guardedByStructureCheck() const
+{
+ if (viaProxy())
+ return false;
+
+ switch (m_type) {
+ case ArrayLength:
+ case StringLength:
+ return false;
+ default:
+ return true;
+ }
+}
+
+JSObject* AccessCase::alternateBase() const
+{
+ if (customSlotBase())
+ return customSlotBase();
+ return conditionSet().slotBaseCondition().object();
+}
+
+bool AccessCase::couldStillSucceed() const
+{
+ return m_conditionSet.structuresEnsureValidityAssumingImpurePropertyWatchpoint();
+}
+
+bool AccessCase::canReplace(const AccessCase& other)
+{
+ // We could do a lot better here, but for now we just do something obvious.
+
+ if (!guardedByStructureCheck() || !other.guardedByStructureCheck()) {
+ // FIXME: Implement this!
+ return false;
+ }
+
+ return structure() == other.structure();
+}
+
+void AccessCase::dump(PrintStream& out) const
+{
+ out.print(m_type, ":(");
+
+ CommaPrinter comma;
+
+ if (m_type == Transition)
+ out.print(comma, "structure = ", pointerDump(structure()), " -> ", pointerDump(newStructure()));
+ else if (m_structure)
+ out.print(comma, "structure = ", pointerDump(m_structure.get()));
+
+ if (isValidOffset(m_offset))
+ out.print(comma, "offset = ", m_offset);
+ if (!m_conditionSet.isEmpty())
+ out.print(comma, "conditions = ", m_conditionSet);
+
+ if (RareData* rareData = m_rareData.get()) {
+ if (rareData->viaProxy)
+ out.print(comma, "viaProxy = ", rareData->viaProxy);
+ if (rareData->additionalSet)
+ out.print(comma, "additionalSet = ", RawPointer(rareData->additionalSet.get()));
+ if (rareData->callLinkInfo)
+ out.print(comma, "callLinkInfo = ", RawPointer(rareData->callLinkInfo.get()));
+ if (rareData->customAccessor.opaque)
+ out.print(comma, "customAccessor = ", RawPointer(rareData->customAccessor.opaque));
+ if (rareData->customSlotBase)
+ out.print(comma, "customSlotBase = ", RawPointer(rareData->customSlotBase.get()));
+ }
+
+ out.print(")");
+}
+
+bool AccessCase::visitWeak(VM& vm) const
+{
+ if (m_structure && !Heap::isMarked(m_structure.get()))
+ return false;
+ if (!m_conditionSet.areStillLive())
+ return false;
+ if (m_rareData) {
+ if (m_rareData->callLinkInfo)
+ m_rareData->callLinkInfo->visitWeak(vm);
+ if (m_rareData->customSlotBase && !Heap::isMarked(m_rareData->customSlotBase.get()))
+ return false;
+ if (m_rareData->intrinsicFunction && !Heap::isMarked(m_rareData->intrinsicFunction.get()))
+ return false;
+ }
+ return true;
+}
+
+void AccessCase::generateWithGuard(
+ AccessGenerationState& state, CCallHelpers::JumpList& fallThrough)
+{
+ CCallHelpers& jit = *state.jit;
+
+ switch (m_type) {
+ case ArrayLength: {
+ ASSERT(!viaProxy());
+ jit.load8(CCallHelpers::Address(state.baseGPR, JSCell::indexingTypeOffset()), state.scratchGPR);
+ fallThrough.append(
+ jit.branchTest32(
+ CCallHelpers::Zero, state.scratchGPR, CCallHelpers::TrustedImm32(IsArray)));
+ fallThrough.append(
+ jit.branchTest32(
+ CCallHelpers::Zero, state.scratchGPR, CCallHelpers::TrustedImm32(IndexingShapeMask)));
+ break;
+ }
+
+ case StringLength: {
+ ASSERT(!viaProxy());
+ fallThrough.append(
+ jit.branch8(
+ CCallHelpers::NotEqual,
+ CCallHelpers::Address(state.baseGPR, JSCell::typeInfoTypeOffset()),
+ CCallHelpers::TrustedImm32(StringType)));
+ break;
+ }
+
+ default: {
+ if (viaProxy()) {
+ fallThrough.append(
+ jit.branch8(
+ CCallHelpers::NotEqual,
+ CCallHelpers::Address(state.baseGPR, JSCell::typeInfoTypeOffset()),
+ CCallHelpers::TrustedImm32(PureForwardingProxyType)));
+
+ jit.loadPtr(
+ CCallHelpers::Address(state.baseGPR, JSProxy::targetOffset()),
+ state.scratchGPR);
+
+ fallThrough.append(
+ jit.branchStructure(
+ CCallHelpers::NotEqual,
+ CCallHelpers::Address(state.scratchGPR, JSCell::structureIDOffset()),
+ structure()));
+ } else {
+ fallThrough.append(
+ jit.branchStructure(
+ CCallHelpers::NotEqual,
+ CCallHelpers::Address(state.baseGPR, JSCell::structureIDOffset()),
+ structure()));
+ }
+ break;
+ } };
+
+ generate(state);
+}
+
+// EncodedJSValue in JSVALUE32_64 is a 64-bit integer. When being compiled in ARM EABI, it must be aligned on an even-numbered register (r0, r2 or [sp]).
+// To prevent the assembler from using wrong registers, let's occupy r1 or r3 with a dummy argument when necessary.
+#if (COMPILER_SUPPORTS(EABI) && CPU(ARM)) || CPU(MIPS)
+#define EABI_32BIT_DUMMY_ARG CCallHelpers::TrustedImm32(0),
+#else
+#define EABI_32BIT_DUMMY_ARG
+#endif
+
+void AccessCase::generate(AccessGenerationState& state)
+{
+ if (verbose)
+ dataLog("Generating code for: ", *this, "\n");
+
+ CCallHelpers& jit = *state.jit;
+ VM& vm = *jit.vm();
+ CodeBlock* codeBlock = jit.codeBlock();
+ StructureStubInfo& stubInfo = *state.stubInfo;
+ const Identifier& ident = *state.ident;
+ JSValueRegs valueRegs = state.valueRegs;
+ GPRReg baseGPR = state.baseGPR;
+ GPRReg scratchGPR = state.scratchGPR;
+
+ ASSERT(m_conditionSet.structuresEnsureValidityAssumingImpurePropertyWatchpoint());
+
+ if ((structure() && structure()->needImpurePropertyWatchpoint())
+ || m_conditionSet.needImpurePropertyWatchpoint())
+ vm.registerWatchpointForImpureProperty(ident, state.addWatchpoint());
+
+ if (additionalSet())
+ additionalSet()->add(state.addWatchpoint());
+
+ for (const ObjectPropertyCondition& condition : m_conditionSet) {
+ Structure* structure = condition.object()->structure();
+
+ if (condition.isWatchableAssumingImpurePropertyWatchpoint()) {
+ structure->addTransitionWatchpoint(state.addWatchpoint(condition));
+ continue;
+ }
+
+ if (!condition.structureEnsuresValidityAssumingImpurePropertyWatchpoint(structure)) {
+ dataLog("This condition is no longer met: ", condition, "\n");
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+
+ // We will emit code that has a weak reference that isn't otherwise listed anywhere.
+ state.weakReferences.append(WriteBarrier<JSCell>(vm, codeBlock, structure));
+
+ jit.move(CCallHelpers::TrustedImmPtr(condition.object()), scratchGPR);
+ state.failAndRepatch.append(
+ jit.branchStructure(
+ CCallHelpers::NotEqual,
+ CCallHelpers::Address(scratchGPR, JSCell::structureIDOffset()),
+ structure));
+ }
+
+ switch (m_type) {
+ case InHit:
+ case InMiss:
+ jit.boxBooleanPayload(m_type == InHit, valueRegs.payloadGPR());
+ state.succeed();
+ return;
+
+ case Miss:
+ jit.moveTrustedValue(jsUndefined(), valueRegs);
+ state.succeed();
+ return;
+
+ case Load:
+ case Getter:
+ case Setter:
+ case CustomValueGetter:
+ case CustomAccessorGetter:
+ case CustomValueSetter:
+ case CustomAccessorSetter: {
+ if (isValidOffset(m_offset)) {
+ Structure* currStructure;
+ if (m_conditionSet.isEmpty())
+ currStructure = structure();
+ else
+ currStructure = m_conditionSet.slotBaseCondition().object()->structure();
+ currStructure->startWatchingPropertyForReplacements(vm, offset());
+ }
+
+ GPRReg baseForGetGPR;
+ if (viaProxy()) {
+ baseForGetGPR = valueRegs.payloadGPR();
+ jit.loadPtr(
+ CCallHelpers::Address(baseGPR, JSProxy::targetOffset()),
+ baseForGetGPR);
+ } else
+ baseForGetGPR = baseGPR;
+
+ GPRReg baseForAccessGPR;
+ if (!m_conditionSet.isEmpty()) {
+ jit.move(
+ CCallHelpers::TrustedImmPtr(alternateBase()),
+ scratchGPR);
+ baseForAccessGPR = scratchGPR;
+ } else
+ baseForAccessGPR = baseForGetGPR;
+
+ GPRReg loadedValueGPR = InvalidGPRReg;
+ if (m_type != CustomValueGetter && m_type != CustomAccessorGetter && m_type != CustomValueSetter && m_type != CustomAccessorSetter) {
+ if (m_type == Load)
+ loadedValueGPR = valueRegs.payloadGPR();
+ else
+ loadedValueGPR = scratchGPR;
+
+ GPRReg storageGPR;
+ if (isInlineOffset(m_offset))
+ storageGPR = baseForAccessGPR;
+ else {
+ jit.loadPtr(
+ CCallHelpers::Address(baseForAccessGPR, JSObject::butterflyOffset()),
+ loadedValueGPR);
+ jit.removeSpaceBits(loadedValueGPR);
+ storageGPR = loadedValueGPR;
+ }
+
+#if USE(JSVALUE64)
+ jit.load64(
+ CCallHelpers::Address(storageGPR, offsetRelativeToBase(m_offset)), loadedValueGPR);
+#else
+ if (m_type == Load) {
+ jit.load32(
+ CCallHelpers::Address(storageGPR, offsetRelativeToBase(m_offset) + TagOffset),
+ valueRegs.tagGPR());
+ }
+ jit.load32(
+ CCallHelpers::Address(storageGPR, offsetRelativeToBase(m_offset) + PayloadOffset),
+ loadedValueGPR);
+#endif
+ }
+
+ if (m_type == Load) {
+ state.succeed();
+ return;
+ }
+
+ // Stuff for custom getters/setters.
+ CCallHelpers::Call operationCall;
+ CCallHelpers::Call lookupExceptionHandlerCall;
+
+ // Stuff for JS getters/setters.
+ CCallHelpers::DataLabelPtr addressOfLinkFunctionCheck;
+ CCallHelpers::Call fastPathCall;
+ CCallHelpers::Call slowPathCall;
+
+ CCallHelpers::Jump success;
+ CCallHelpers::Jump fail;
+
+ // This also does the necessary calculations of whether or not we're an
+ // exception handling call site.
+ state.calculateLiveRegistersForCallAndExceptionHandling();
+ state.preserveLiveRegistersToStackForCall();
+
+ // Need to make sure that whenever this call is made in the future, we remember the
+ // place that we made it from.
+ jit.store32(
+ CCallHelpers::TrustedImm32(state.callSiteIndexForExceptionHandlingOrOriginal().bits()),
+ CCallHelpers::tagFor(static_cast<VirtualRegister>(JSStack::ArgumentCount)));
+
+ if (m_type == Getter || m_type == Setter) {
+ // Create a JS call using a JS call inline cache. Assume that:
+ //
+ // - SP is aligned and represents the extent of the calling compiler's stack usage.
+ //
+ // - FP is set correctly (i.e. it points to the caller's call frame header).
+ //
+ // - SP - FP is an aligned difference.
+ //
+ // - Any byte between FP (exclusive) and SP (inclusive) could be live in the calling
+ // code.
+ //
+ // Therefore, we temporarily grow the stack for the purpose of the call and then
+ // shrink it after.
+
+ RELEASE_ASSERT(!m_rareData->callLinkInfo);
+ m_rareData->callLinkInfo = std::make_unique<CallLinkInfo>();
+
+ // FIXME: If we generated a polymorphic call stub that jumped back to the getter
+ // stub, which then jumped back to the main code, then we'd have a reachability
+ // situation that the GC doesn't know about. The GC would ensure that the polymorphic
+ // call stub stayed alive, and it would ensure that the main code stayed alive, but
+ // it wouldn't know that the getter stub was alive. Ideally JIT stub routines would
+ // be GC objects, and then we'd be able to say that the polymorphic call stub has a
+ // reference to the getter stub.
+ // https://bugs.webkit.org/show_bug.cgi?id=148914
+ m_rareData->callLinkInfo->disallowStubs();
+
+ m_rareData->callLinkInfo->setUpCall(
+ CallLinkInfo::Call, stubInfo.codeOrigin, loadedValueGPR);
+
+ CCallHelpers::JumpList done;
+
+ // There is a "this" argument.
+ unsigned numberOfParameters = 1;
+ // ... and a value argument if we're calling a setter.
+ if (m_type == Setter)
+ numberOfParameters++;
+
+ // Get the accessor; if there ain't one then the result is jsUndefined().
+ if (m_type == Setter) {
+ jit.loadPtr(
+ CCallHelpers::Address(loadedValueGPR, GetterSetter::offsetOfSetter()),
+ loadedValueGPR);
+ } else {
+ jit.loadPtr(
+ CCallHelpers::Address(loadedValueGPR, GetterSetter::offsetOfGetter()),
+ loadedValueGPR);
+ }
+
+ CCallHelpers::Jump returnUndefined = jit.branchTestPtr(
+ CCallHelpers::Zero, loadedValueGPR);
+
+ unsigned numberOfRegsForCall = JSStack::CallFrameHeaderSize + numberOfParameters;
+
+ unsigned numberOfBytesForCall =
+ numberOfRegsForCall * sizeof(Register) + sizeof(CallerFrameAndPC);
+
+ unsigned alignedNumberOfBytesForCall =
+ WTF::roundUpToMultipleOf(stackAlignmentBytes(), numberOfBytesForCall);
+
+ jit.subPtr(
+ CCallHelpers::TrustedImm32(alignedNumberOfBytesForCall),
+ CCallHelpers::stackPointerRegister);
+
+ CCallHelpers::Address calleeFrame = CCallHelpers::Address(
+ CCallHelpers::stackPointerRegister,
+ -static_cast<ptrdiff_t>(sizeof(CallerFrameAndPC)));
+
+ jit.store32(
+ CCallHelpers::TrustedImm32(numberOfParameters),
+ calleeFrame.withOffset(JSStack::ArgumentCount * sizeof(Register) + PayloadOffset));
+
+ jit.storeCell(
+ loadedValueGPR, calleeFrame.withOffset(JSStack::Callee * sizeof(Register)));
+
+ jit.storeCell(
+ baseForGetGPR,
+ calleeFrame.withOffset(virtualRegisterForArgument(0).offset() * sizeof(Register)));
+
+ if (m_type == Setter) {
+ jit.storeValue(
+ valueRegs,
+ calleeFrame.withOffset(
+ virtualRegisterForArgument(1).offset() * sizeof(Register)));
+ }
+
+ CCallHelpers::Jump slowCase = jit.branchPtrWithPatch(
+ CCallHelpers::NotEqual, loadedValueGPR, addressOfLinkFunctionCheck,
+ CCallHelpers::TrustedImmPtr(0));
+
+ fastPathCall = jit.nearCall();
+ if (m_type == Getter)
+ jit.setupResults(valueRegs);
+ done.append(jit.jump());
+
+ slowCase.link(&jit);
+ jit.move(loadedValueGPR, GPRInfo::regT0);
+#if USE(JSVALUE32_64)
+ // We *always* know that the getter/setter, if non-null, is a cell.
+ jit.move(CCallHelpers::TrustedImm32(JSValue::CellTag), GPRInfo::regT1);
+#endif
+ jit.move(CCallHelpers::TrustedImmPtr(m_rareData->callLinkInfo.get()), GPRInfo::regT2);
+ slowPathCall = jit.nearCall();
+ if (m_type == Getter)
+ jit.setupResults(valueRegs);
+ done.append(jit.jump());
+
+ returnUndefined.link(&jit);
+ if (m_type == Getter)
+ jit.moveTrustedValue(jsUndefined(), valueRegs);
+
+ done.link(&jit);
+
+ jit.addPtr(CCallHelpers::TrustedImm32((jit.codeBlock()->stackPointerOffset() * sizeof(Register)) - state.preservedReusedRegisterState.numberOfBytesPreserved - state.numberOfStackBytesUsedForRegisterPreservation()),
+ GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
+ state.restoreLiveRegistersFromStackForCall(isGetter());
+
+ state.callbacks.append(
+ [=, &vm] (LinkBuffer& linkBuffer) {
+ m_rareData->callLinkInfo->setCallLocations(
+ linkBuffer.locationOfNearCall(slowPathCall),
+ linkBuffer.locationOf(addressOfLinkFunctionCheck),
+ linkBuffer.locationOfNearCall(fastPathCall));
+
+ linkBuffer.link(
+ slowPathCall,
+ CodeLocationLabel(vm.getCTIStub(linkCallThunkGenerator).code()));
+ });
+ } else {
+ // Need to make room for the C call so any of our stack spillage isn't overwritten.
+ // We also need to make room because we may be an inline cache in the FTL and not
+ // have a JIT call frame.
+ bool needsToMakeRoomOnStackForCCall = state.numberOfStackBytesUsedForRegisterPreservation() || codeBlock->jitType() == JITCode::FTLJIT;
+ if (needsToMakeRoomOnStackForCCall)
+ jit.makeSpaceOnStackForCCall();
+
+ // getter: EncodedJSValue (*GetValueFunc)(ExecState*, EncodedJSValue thisValue, PropertyName);
+ // setter: void (*PutValueFunc)(ExecState*, EncodedJSValue thisObject, EncodedJSValue value);
+ // Custom values are passed the slotBase (the property holder), custom accessors are passed the thisVaule (reciever).
+ GPRReg baseForCustomValue = m_type == CustomValueGetter || m_type == CustomValueSetter ? baseForAccessGPR : baseForGetGPR;
+#if USE(JSVALUE64)
+ if (m_type == CustomValueGetter || m_type == CustomAccessorGetter) {
+ jit.setupArgumentsWithExecState(
+ baseForCustomValue,
+ CCallHelpers::TrustedImmPtr(ident.impl()));
+ } else
+ jit.setupArgumentsWithExecState(baseForCustomValue, valueRegs.gpr());
+#else
+ if (m_type == CustomValueGetter || m_type == CustomAccessorGetter) {
+ jit.setupArgumentsWithExecState(
+ EABI_32BIT_DUMMY_ARG baseForCustomValue,
+ CCallHelpers::TrustedImm32(JSValue::CellTag),
+ CCallHelpers::TrustedImmPtr(ident.impl()));
+ } else {
+ jit.setupArgumentsWithExecState(
+ EABI_32BIT_DUMMY_ARG baseForCustomValue,
+ CCallHelpers::TrustedImm32(JSValue::CellTag),
+ valueRegs.payloadGPR(), valueRegs.tagGPR());
+ }
+#endif
+ jit.storePtr(GPRInfo::callFrameRegister, &vm.topCallFrame);
+
+ operationCall = jit.call();
+ if (m_type == CustomValueGetter || m_type == CustomAccessorGetter)
+ jit.setupResults(valueRegs);
+ if (needsToMakeRoomOnStackForCCall)
+ jit.reclaimSpaceOnStackForCCall();
+
+ CCallHelpers::Jump noException =
+ jit.emitExceptionCheck(CCallHelpers::InvertedExceptionCheck);
+
+ bool didSetLookupExceptionHandler = false;
+ state.restoreLiveRegistersFromStackForCallWithThrownException();
+ state.restoreScratch();
+ jit.copyCalleeSavesToVMCalleeSavesBuffer();
+ if (state.needsToRestoreRegistersIfException()) {
+ // To the JIT that produces the original exception handling
+ // call site, they will expect the OSR exit to be arrived
+ // at from genericUnwind. Therefore we must model what genericUnwind
+ // does here. I.e, set callFrameForCatch and copy callee saves.
+
+ jit.storePtr(GPRInfo::callFrameRegister, vm.addressOfCallFrameForCatch());
+ CCallHelpers::Jump jumpToOSRExitExceptionHandler = jit.jump();
+
+ // We don't need to insert a new exception handler in the table
+ // because we're doing a manual exception check here. i.e, we'll
+ // never arrive here from genericUnwind().
+ HandlerInfo originalHandler = state.originalExceptionHandler();
+ state.callbacks.append(
+ [=] (LinkBuffer& linkBuffer) {
+ linkBuffer.link(jumpToOSRExitExceptionHandler, originalHandler.nativeCode);
+ });
+ } else {
+ jit.setupArguments(CCallHelpers::TrustedImmPtr(&vm), GPRInfo::callFrameRegister);
+ lookupExceptionHandlerCall = jit.call();
+ didSetLookupExceptionHandler = true;
+ jit.jumpToExceptionHandler();
+ }
+
+ noException.link(&jit);
+ state.restoreLiveRegistersFromStackForCall(isGetter());
+
+ state.callbacks.append(
+ [=] (LinkBuffer& linkBuffer) {
+ linkBuffer.link(operationCall, FunctionPtr(m_rareData->customAccessor.opaque));
+ if (didSetLookupExceptionHandler)
+ linkBuffer.link(lookupExceptionHandlerCall, lookupExceptionHandler);
+ });
+ }
+ state.succeed();
+ return;
+ }
+
+ case Replace: {
+ if (InferredType* type = structure()->inferredTypeFor(ident.impl())) {
+ if (verbose)
+ dataLog("Have type: ", type->descriptor(), "\n");
+ state.failAndRepatch.append(
+ jit.branchIfNotType(
+ valueRegs, scratchGPR, type->descriptor(), CCallHelpers::DoNotHaveTagRegisters));
+ } else if (verbose)
+ dataLog("Don't have type.\n");
+
+ if (isInlineOffset(m_offset)) {
+ jit.storeValue(
+ valueRegs,
+ CCallHelpers::Address(
+ baseGPR,
+ JSObject::offsetOfInlineStorage() +
+ offsetInInlineStorage(m_offset) * sizeof(JSValue)));
+ } else {
+ jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
+ state.failAndIgnore.append(jit.branchIfNotToSpace(scratchGPR));
+ jit.storeValue(
+ valueRegs,
+ CCallHelpers::Address(
+ scratchGPR, offsetInButterfly(m_offset) * sizeof(JSValue)));
+ }
+ state.succeed();
+ return;
+ }
+
+ case Transition: {
+ // AccessCase::transition() should have returned null.
+ RELEASE_ASSERT(GPRInfo::numberOfRegisters >= 6 || !structure()->outOfLineCapacity() || structure()->outOfLineCapacity() == newStructure()->outOfLineCapacity());
+ RELEASE_ASSERT(!structure()->couldHaveIndexingHeader());
+
+ if (InferredType* type = newStructure()->inferredTypeFor(ident.impl())) {
+ if (verbose)
+ dataLog("Have type: ", type->descriptor(), "\n");
+ state.failAndRepatch.append(
+ jit.branchIfNotType(
+ valueRegs, scratchGPR, type->descriptor(), CCallHelpers::DoNotHaveTagRegisters));
+ } else if (verbose)
+ dataLog("Don't have type.\n");
+
+ CCallHelpers::JumpList slowPath;
+
+ ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
+ allocator.lock(baseGPR);
+#if USE(JSVALUE32_64)
+ allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseTagGPR));
+#endif
+ allocator.lock(valueRegs);
+ allocator.lock(scratchGPR);
+
+ GPRReg scratchGPR2 = allocator.allocateScratchGPR();
+ GPRReg scratchGPR3;
+ if (newStructure()->outOfLineCapacity() != structure()->outOfLineCapacity()
+ && structure()->outOfLineCapacity())
+ scratchGPR3 = allocator.allocateScratchGPR();
+ else
+ scratchGPR3 = InvalidGPRReg;
+
+ ScratchRegisterAllocator::PreservedState preservedState =
+ allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::SpaceForCCall);
+
+ ASSERT(structure()->transitionWatchpointSetHasBeenInvalidated());
+
+ bool scratchGPRHasStorage = false;
+ bool needsToMakeRoomOnStackForCCall = !preservedState.numberOfBytesPreserved && codeBlock->jitType() == JITCode::FTLJIT;
+
+ if (newStructure()->outOfLineCapacity() != structure()->outOfLineCapacity()) {
+ size_t newSize = newStructure()->outOfLineCapacity() * sizeof(JSValue);
+ CopiedAllocator* copiedAllocator = &vm.heap.storageAllocator();
+
+ if (!structure()->outOfLineCapacity()) {
+ jit.loadPtr(&copiedAllocator->m_currentRemaining, scratchGPR);
+ slowPath.append(
+ jit.branchSubPtr(
+ CCallHelpers::Signed, CCallHelpers::TrustedImm32(newSize), scratchGPR));
+ jit.storePtr(scratchGPR, &copiedAllocator->m_currentRemaining);
+ jit.negPtr(scratchGPR);
+ jit.addPtr(
+ CCallHelpers::AbsoluteAddress(&copiedAllocator->m_currentPayloadEnd), scratchGPR);
+ jit.addPtr(CCallHelpers::TrustedImm32(sizeof(JSValue)), scratchGPR);
+ } else {
+ size_t oldSize = structure()->outOfLineCapacity() * sizeof(JSValue);
+ ASSERT(newSize > oldSize);
+
+ jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR3);
+ slowPath.append(jit.branchIfNotToSpace(scratchGPR3));
+ jit.loadPtr(&copiedAllocator->m_currentRemaining, scratchGPR);
+ slowPath.append(
+ jit.branchSubPtr(
+ CCallHelpers::Signed, CCallHelpers::TrustedImm32(newSize), scratchGPR));
+ jit.storePtr(scratchGPR, &copiedAllocator->m_currentRemaining);
+ jit.negPtr(scratchGPR);
+ jit.addPtr(
+ CCallHelpers::AbsoluteAddress(&copiedAllocator->m_currentPayloadEnd), scratchGPR);
+ jit.addPtr(CCallHelpers::TrustedImm32(sizeof(JSValue)), scratchGPR);
+ // We have scratchGPR = new storage, scratchGPR3 = old storage,
+ // scratchGPR2 = available
+ for (size_t offset = 0; offset < oldSize; offset += sizeof(void*)) {
+ jit.loadPtr(
+ CCallHelpers::Address(
+ scratchGPR3,
+ -static_cast<ptrdiff_t>(
+ offset + sizeof(JSValue) + sizeof(void*))),
+ scratchGPR2);
+ jit.storePtr(
+ scratchGPR2,
+ CCallHelpers::Address(
+ scratchGPR,
+ -static_cast<ptrdiff_t>(offset + sizeof(JSValue) + sizeof(void*))));
+ }
+ }
+
+ jit.storePtr(scratchGPR, CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()));
+ scratchGPRHasStorage = true;
+ }
+
+ uint32_t structureBits = bitwise_cast<uint32_t>(newStructure()->id());
+ jit.store32(
+ CCallHelpers::TrustedImm32(structureBits),
+ CCallHelpers::Address(baseGPR, JSCell::structureIDOffset()));
+
+ if (isInlineOffset(m_offset)) {
+ jit.storeValue(
+ valueRegs,
+ CCallHelpers::Address(
+ baseGPR,
+ JSObject::offsetOfInlineStorage() +
+ offsetInInlineStorage(m_offset) * sizeof(JSValue)));
+ } else {
+ if (!scratchGPRHasStorage) {
+ jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
+ state.failAndIgnore.append(jit.branchIfNotToSpace(scratchGPR));
+ }
+ jit.storeValue(
+ valueRegs,
+ CCallHelpers::Address(scratchGPR, offsetInButterfly(m_offset) * sizeof(JSValue)));
+ }
+
+ ScratchBuffer* scratchBuffer = nullptr;
+ if (newStructure()->outOfLineCapacity() != structure()->outOfLineCapacity())
+ scratchBuffer = vm.scratchBufferForSize(allocator.desiredScratchBufferSizeForCall());
+
+ if (newStructure()->outOfLineCapacity() != structure()->outOfLineCapacity()) {
+ CCallHelpers::Call callFlushWriteBarrierBuffer;
+ CCallHelpers::Jump ownerIsRememberedOrInEden = jit.jumpIfIsRememberedOrInEden(baseGPR);
+ WriteBarrierBuffer& writeBarrierBuffer = jit.vm()->heap.writeBarrierBuffer();
+ jit.load32(writeBarrierBuffer.currentIndexAddress(), scratchGPR2);
+ CCallHelpers::Jump needToFlush =
+ jit.branch32(
+ CCallHelpers::AboveOrEqual, scratchGPR2,
+ CCallHelpers::TrustedImm32(writeBarrierBuffer.capacity()));
+
+ jit.add32(CCallHelpers::TrustedImm32(1), scratchGPR2);
+ jit.store32(scratchGPR2, writeBarrierBuffer.currentIndexAddress());
+
+ jit.move(CCallHelpers::TrustedImmPtr(writeBarrierBuffer.buffer()), scratchGPR);
+ // We use an offset of -sizeof(void*) because we already added 1 to scratchGPR2.
+ jit.storePtr(
+ baseGPR,
+ CCallHelpers::BaseIndex(
+ scratchGPR, scratchGPR2, CCallHelpers::ScalePtr,
+ static_cast<int32_t>(-sizeof(void*))));
+
+ CCallHelpers::Jump doneWithBarrier = jit.jump();
+ needToFlush.link(&jit);
+
+ // FIXME: We should restoreReusedRegistersByPopping() before this. Then, we wouldn't need
+ // padding in preserveReusedRegistersByPushing(). Or, maybe it would be even better if the
+ // barrier slow path was just the normal slow path, below.
+ // https://bugs.webkit.org/show_bug.cgi?id=149030
+ allocator.preserveUsedRegistersToScratchBufferForCall(jit, scratchBuffer, scratchGPR2);
+ if (needsToMakeRoomOnStackForCCall)
+ jit.makeSpaceOnStackForCCall();
+ jit.setupArgumentsWithExecState(baseGPR);
+ callFlushWriteBarrierBuffer = jit.call();
+ if (needsToMakeRoomOnStackForCCall)
+ jit.reclaimSpaceOnStackForCCall();
+ allocator.restoreUsedRegistersFromScratchBufferForCall(
+ jit, scratchBuffer, scratchGPR2);
+
+ doneWithBarrier.link(&jit);
+ ownerIsRememberedOrInEden.link(&jit);
+
+ state.callbacks.append(
+ [=] (LinkBuffer& linkBuffer) {
+ linkBuffer.link(callFlushWriteBarrierBuffer, operationFlushWriteBarrierBuffer);
+ });
+ }
+
+ allocator.restoreReusedRegistersByPopping(jit, preservedState);
+ state.succeed();
+
+ if (newStructure()->outOfLineCapacity() != structure()->outOfLineCapacity()) {
+ slowPath.link(&jit);
+ allocator.restoreReusedRegistersByPopping(jit, preservedState);
+ allocator.preserveUsedRegistersToScratchBufferForCall(jit, scratchBuffer, scratchGPR);
+ if (needsToMakeRoomOnStackForCCall)
+ jit.makeSpaceOnStackForCCall();
+#if USE(JSVALUE64)
+ jit.setupArgumentsWithExecState(
+ baseGPR,
+ CCallHelpers::TrustedImmPtr(newStructure()),
+ CCallHelpers::TrustedImm32(m_offset),
+ valueRegs.gpr());
+#else
+ jit.setupArgumentsWithExecState(
+ baseGPR,
+ CCallHelpers::TrustedImmPtr(newStructure()),
+ CCallHelpers::TrustedImm32(m_offset),
+ valueRegs.payloadGPR(), valueRegs.tagGPR());
+#endif
+ CCallHelpers::Call operationCall = jit.call();
+ if (needsToMakeRoomOnStackForCCall)
+ jit.reclaimSpaceOnStackForCCall();
+ allocator.restoreUsedRegistersFromScratchBufferForCall(jit, scratchBuffer, scratchGPR);
+ state.succeed();
+
+ state.callbacks.append(
+ [=] (LinkBuffer& linkBuffer) {
+ linkBuffer.link(operationCall, operationReallocateStorageAndFinishPut);
+ });
+ }
+ return;
+ }
+
+ case ArrayLength: {
+ jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
+ jit.removeSpaceBits(scratchGPR);
+ jit.load32(CCallHelpers::Address(scratchGPR, ArrayStorage::lengthOffset()), scratchGPR);
+ state.failAndIgnore.append(
+ jit.branch32(CCallHelpers::LessThan, scratchGPR, CCallHelpers::TrustedImm32(0)));
+ jit.boxInt32(scratchGPR, valueRegs, CCallHelpers::DoNotHaveTagRegisters);
+ state.succeed();
+ return;
+ }
+
+ case StringLength: {
+ jit.load32(CCallHelpers::Address(baseGPR, JSString::offsetOfLength()), valueRegs.payloadGPR());
+ jit.boxInt32(valueRegs.payloadGPR(), valueRegs, CCallHelpers::DoNotHaveTagRegisters);
+ state.succeed();
+ return;
+ }
+
+ case IntrinsicGetter: {
+ RELEASE_ASSERT(isValidOffset(offset()));
+
+ // We need to ensure the getter value does not move from under us. Note that GetterSetters
+ // are immutable so we just need to watch the property not any value inside it.
+ Structure* currStructure;
+ if (m_conditionSet.isEmpty())
+ currStructure = structure();
+ else
+ currStructure = m_conditionSet.slotBaseCondition().object()->structure();
+ currStructure->startWatchingPropertyForReplacements(vm, offset());
+
+ emitIntrinsicGetter(state);
+ return;
+ } }
+
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+PolymorphicAccess::PolymorphicAccess() { }
+PolymorphicAccess::~PolymorphicAccess() { }
+
+MacroAssemblerCodePtr PolymorphicAccess::regenerateWithCases(
+ VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, const Identifier& ident,
+ Vector<std::unique_ptr<AccessCase>> originalCasesToAdd)
+{
+ // This method will add the originalCasesToAdd to the list one at a time while preserving the
+ // invariants:
+ // - If a newly added case canReplace() any existing case, then the existing case is removed before
+ // the new case is added. Removal doesn't change order of the list. Any number of existing cases
+ // can be removed via the canReplace() rule.
+ // - Cases in the list always appear in ascending order of time of addition. Therefore, if you
+ // cascade through the cases in reverse order, you will get the most recent cases first.
+ // - If this method fails (returns null, doesn't add the cases), then both the previous case list
+ // and the previous stub are kept intact and the new cases are destroyed. It's OK to attempt to
+ // add more things after failure.
+
+ // First, verify that we can generate code for all of the new cases while eliminating any of the
+ // new cases that replace each other.
+ Vector<std::unique_ptr<AccessCase>> casesToAdd;
+ for (unsigned i = 0; i < originalCasesToAdd.size(); ++i) {
+ std::unique_ptr<AccessCase> myCase = WTFMove(originalCasesToAdd[i]);
+
+ // Add it only if it is not replaced by the subsequent cases in the list.
+ bool found = false;
+ for (unsigned j = i + 1; j < originalCasesToAdd.size(); ++j) {
+ if (originalCasesToAdd[j]->canReplace(*myCase)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (found)
+ continue;
+
+ casesToAdd.append(WTFMove(myCase));
+ }
+
+ if (verbose)
+ dataLog("casesToAdd: ", listDump(casesToAdd), "\n");
+
+ // If there aren't any cases to add, then fail on the grounds that there's no point to generating a
+ // new stub that will be identical to the old one. Returning null should tell the caller to just
+ // keep doing what they were doing before.
+ if (casesToAdd.isEmpty())
+ return MacroAssemblerCodePtr();
+
+ // Now construct the list of cases as they should appear if we are successful. This means putting
+ // all of the previous cases in this list in order but excluding those that can be replaced, and
+ // then adding the new cases.
+ ListType newCases;
+ for (auto& oldCase : m_list) {
+ // Ignore old cases that cannot possibly succeed anymore.
+ if (!oldCase->couldStillSucceed())
+ continue;
+
+ // Figure out if this is replaced by any new cases.
+ bool found = false;
+ for (auto& caseToAdd : casesToAdd) {
+ if (caseToAdd->canReplace(*oldCase)) {
+ found = true;
+ break;
+ }
+ }
+ if (found)
+ continue;
+
+ newCases.append(oldCase->clone());
+ }
+ for (auto& caseToAdd : casesToAdd)
+ newCases.append(WTFMove(caseToAdd));
+
+ if (verbose)
+ dataLog("newCases: ", listDump(newCases), "\n");
+
+ if (newCases.size() > Options::maxAccessVariantListSize()) {
+ if (verbose)
+ dataLog("Too many cases.\n");
+ return MacroAssemblerCodePtr();
+ }
+
+ MacroAssemblerCodePtr result = regenerate(vm, codeBlock, stubInfo, ident, newCases);
+ if (!result)
+ return MacroAssemblerCodePtr();
+
+ m_list = WTFMove(newCases);
+ return result;
+}
+
+MacroAssemblerCodePtr PolymorphicAccess::regenerateWithCase(
+ VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, const Identifier& ident,
+ std::unique_ptr<AccessCase> newAccess)
+{
+ Vector<std::unique_ptr<AccessCase>> newAccesses;
+ newAccesses.append(WTFMove(newAccess));
+ return regenerateWithCases(vm, codeBlock, stubInfo, ident, WTFMove(newAccesses));
+}
+
+bool PolymorphicAccess::visitWeak(VM& vm) const
+{
+ for (unsigned i = 0; i < size(); ++i) {
+ if (!at(i).visitWeak(vm))
+ return false;
+ }
+ if (Vector<WriteBarrier<JSCell>>* weakReferences = m_weakReferences.get()) {
+ for (WriteBarrier<JSCell>& weakReference : *weakReferences) {
+ if (!Heap::isMarked(weakReference.get()))
+ return false;
+ }
+ }
+ return true;
+}
+
+void PolymorphicAccess::dump(PrintStream& out) const
+{
+ out.print(RawPointer(this), ":[");
+ CommaPrinter comma;
+ for (auto& entry : m_list)
+ out.print(comma, *entry);
+ out.print("]");
+}
+
+MacroAssemblerCodePtr PolymorphicAccess::regenerate(
+ VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, const Identifier& ident,
+ PolymorphicAccess::ListType& cases)
+{
+ if (verbose)
+ dataLog("Generating code for cases: ", listDump(cases), "\n");
+
+ AccessGenerationState state;
+
+ state.access = this;
+ state.stubInfo = &stubInfo;
+ state.ident = &ident;
+
+ state.baseGPR = static_cast<GPRReg>(stubInfo.patch.baseGPR);
+ state.valueRegs = JSValueRegs(
+#if USE(JSVALUE32_64)
+ static_cast<GPRReg>(stubInfo.patch.valueTagGPR),
+#endif
+ static_cast<GPRReg>(stubInfo.patch.valueGPR));
+
+ ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
+ state.allocator = &allocator;
+ allocator.lock(state.baseGPR);
+ allocator.lock(state.valueRegs);
+#if USE(JSVALUE32_64)
+ allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseTagGPR));
+#endif
+
+ state.scratchGPR = allocator.allocateScratchGPR();
+
+ CCallHelpers jit(&vm, codeBlock);
+ state.jit = &jit;
+
+ state.preservedReusedRegisterState =
+ allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::NoExtraSpace);
+
+ bool allGuardedByStructureCheck = true;
+ bool hasJSGetterSetterCall = false;
+ for (auto& entry : cases) {
+ allGuardedByStructureCheck &= entry->guardedByStructureCheck();
+ if (entry->type() == AccessCase::Getter || entry->type() == AccessCase::Setter)
+ hasJSGetterSetterCall = true;
+ }
+
+ if (cases.isEmpty()) {
+ // This is super unlikely, but we make it legal anyway.
+ state.failAndRepatch.append(jit.jump());
+ } else if (!allGuardedByStructureCheck || cases.size() == 1) {
+ // If there are any proxies in the list, we cannot just use a binary switch over the structure.
+ // We need to resort to a cascade. A cascade also happens to be optimal if we only have just
+ // one case.
+ CCallHelpers::JumpList fallThrough;
+
+ // Cascade through the list, preferring newer entries.
+ for (unsigned i = cases.size(); i--;) {
+ fallThrough.link(&jit);
+ cases[i]->generateWithGuard(state, fallThrough);
+ }
+ state.failAndRepatch.append(fallThrough);
+ } else {
+ jit.load32(
+ CCallHelpers::Address(state.baseGPR, JSCell::structureIDOffset()),
+ state.scratchGPR);
+
+ Vector<int64_t> caseValues(cases.size());
+ for (unsigned i = 0; i < cases.size(); ++i)
+ caseValues[i] = bitwise_cast<int32_t>(cases[i]->structure()->id());
+
+ BinarySwitch binarySwitch(state.scratchGPR, caseValues, BinarySwitch::Int32);
+ while (binarySwitch.advance(jit))
+ cases[binarySwitch.caseIndex()]->generate(state);
+ state.failAndRepatch.append(binarySwitch.fallThrough());
+ }
+
+ if (!state.failAndIgnore.empty()) {
+ state.failAndIgnore.link(&jit);
+
+ // Make sure that the inline cache optimization code knows that we are taking slow path because
+ // of something that isn't patchable. The slow path will decrement "countdown" and will only
+ // patch things if the countdown reaches zero. We increment the slow path count here to ensure
+ // that the slow path does not try to patch.
+ jit.load8(&stubInfo.countdown, state.scratchGPR);
+ jit.add32(CCallHelpers::TrustedImm32(1), state.scratchGPR);
+ jit.store8(state.scratchGPR, &stubInfo.countdown);
+ }
+
+ CCallHelpers::JumpList failure;
+ if (allocator.didReuseRegisters()) {
+ state.failAndRepatch.link(&jit);
+ state.restoreScratch();
+ } else
+ failure = state.failAndRepatch;
+ failure.append(jit.jump());
+
+ CodeBlock* codeBlockThatOwnsExceptionHandlers = nullptr;
+ CallSiteIndex callSiteIndexForExceptionHandling;
+ if (state.needsToRestoreRegistersIfException() && hasJSGetterSetterCall) {
+ // Emit the exception handler.
+ // Note that this code is only reachable when doing genericUnwind from a pure JS getter/setter .
+ // Note also that this is not reachable from custom getter/setter. Custom getter/setters will have
+ // their own exception handling logic that doesn't go through genericUnwind.
+ MacroAssembler::Label makeshiftCatchHandler = jit.label();
+
+ int stackPointerOffset = codeBlock->stackPointerOffset() * sizeof(EncodedJSValue);
+ stackPointerOffset -= state.preservedReusedRegisterState.numberOfBytesPreserved;
+ stackPointerOffset -= state.numberOfStackBytesUsedForRegisterPreservation();
+
+ jit.loadPtr(vm.addressOfCallFrameForCatch(), GPRInfo::callFrameRegister);
+ jit.addPtr(CCallHelpers::TrustedImm32(stackPointerOffset), GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
+
+ state.restoreLiveRegistersFromStackForCallWithThrownException();
+ state.restoreScratch();
+ CCallHelpers::Jump jumpToOSRExitExceptionHandler = jit.jump();
+
+ HandlerInfo oldHandler = state.originalExceptionHandler();
+ CallSiteIndex newExceptionHandlingCallSite = state.callSiteIndexForExceptionHandling();
+ state.callbacks.append(
+ [=] (LinkBuffer& linkBuffer) {
+ linkBuffer.link(jumpToOSRExitExceptionHandler, oldHandler.nativeCode);
+
+ HandlerInfo handlerToRegister = oldHandler;
+ handlerToRegister.nativeCode = linkBuffer.locationOf(makeshiftCatchHandler);
+ handlerToRegister.start = newExceptionHandlingCallSite.bits();
+ handlerToRegister.end = newExceptionHandlingCallSite.bits() + 1;
+ codeBlock->appendExceptionHandler(handlerToRegister);
+ });
+
+ // We set these to indicate to the stub to remove itself from the CodeBlock's
+ // exception handler table when it is deallocated.
+ codeBlockThatOwnsExceptionHandlers = codeBlock;
+ ASSERT(JITCode::isOptimizingJIT(codeBlockThatOwnsExceptionHandlers->jitType()));
+ callSiteIndexForExceptionHandling = state.callSiteIndexForExceptionHandling();
+ }
+
+ LinkBuffer linkBuffer(vm, jit, codeBlock, JITCompilationCanFail);
+ if (linkBuffer.didFailToAllocate()) {
+ if (verbose)
+ dataLog("Did fail to allocate.\n");
+ return MacroAssemblerCodePtr();
+ }
+
+ CodeLocationLabel successLabel =
+ stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone);
+
+ linkBuffer.link(state.success, successLabel);
+
+ linkBuffer.link(
+ failure,
+ stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase));
+
+ for (auto callback : state.callbacks)
+ callback(linkBuffer);
+
+ if (verbose)
+ dataLog(*codeBlock, " ", stubInfo.codeOrigin, ": Generating polymorphic access stub for ", listDump(cases), "\n");
+
+ MacroAssemblerCodeRef code = FINALIZE_CODE_FOR(
+ codeBlock, linkBuffer,
+ ("%s", toCString("Access stub for ", *codeBlock, " ", stubInfo.codeOrigin, " with return point ", successLabel, ": ", listDump(cases)).data()));
+
+ bool doesCalls = false;
+ for (auto& entry : cases)
+ doesCalls |= entry->doesCalls();
+
+ m_stubRoutine = createJITStubRoutine(code, vm, codeBlock, doesCalls, nullptr, codeBlockThatOwnsExceptionHandlers, callSiteIndexForExceptionHandling);
+ m_watchpoints = WTFMove(state.watchpoints);
+ if (!state.weakReferences.isEmpty())
+ m_weakReferences = std::make_unique<Vector<WriteBarrier<JSCell>>>(WTFMove(state.weakReferences));
+ if (verbose)
+ dataLog("Returning: ", code.code(), "\n");
+ return code.code();
+}
+
+void PolymorphicAccess::aboutToDie()
+{
+ m_stubRoutine->aboutToDie();
+}
+
+} // namespace JSC
+
+namespace WTF {
+
+using namespace JSC;
+
+void printInternal(PrintStream& out, AccessCase::AccessType type)
+{
+ switch (type) {
+ case AccessCase::Load:
+ out.print("Load");
+ return;
+ case AccessCase::Transition:
+ out.print("Transition");
+ return;
+ case AccessCase::Replace:
+ out.print("Replace");
+ return;
+ case AccessCase::Miss:
+ out.print("Miss");
+ return;
+ case AccessCase::Getter:
+ out.print("Getter");
+ return;
+ case AccessCase::Setter:
+ out.print("Setter");
+ return;
+ case AccessCase::CustomValueGetter:
+ out.print("CustomValueGetter");
+ return;
+ case AccessCase::CustomAccessorGetter:
+ out.print("CustomAccessorGetter");
+ return;
+ case AccessCase::CustomValueSetter:
+ out.print("CustomValueSetter");
+ return;
+ case AccessCase::CustomAccessorSetter:
+ out.print("CustomAccessorSetter");
+ return;
+ case AccessCase::IntrinsicGetter:
+ out.print("IntrinsicGetter");
+ return;
+ case AccessCase::InHit:
+ out.print("InHit");
+ return;
+ case AccessCase::InMiss:
+ out.print("InMiss");
+ return;
+ case AccessCase::ArrayLength:
+ out.print("ArrayLength");
+ return;
+ case AccessCase::StringLength:
+ out.print("StringLength");
+ return;
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
+
+#endif // ENABLE(JIT)
+
+
diff --git a/Source/JavaScriptCore/bytecode/PolymorphicAccess.h b/Source/JavaScriptCore/bytecode/PolymorphicAccess.h
new file mode 100644
index 000000000..bb1ea0a4e
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/PolymorphicAccess.h
@@ -0,0 +1,451 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PolymorphicAccess_h
+#define PolymorphicAccess_h
+
+#if ENABLE(JIT)
+
+#include "CodeOrigin.h"
+#include "JSFunctionInlines.h"
+#include "MacroAssembler.h"
+#include "ObjectPropertyConditionSet.h"
+#include "Opcode.h"
+#include "ScratchRegisterAllocator.h"
+#include "Structure.h"
+#include <wtf/Vector.h>
+
+namespace JSC {
+
+class CodeBlock;
+class PolymorphicAccess;
+class StructureStubInfo;
+class WatchpointsOnStructureStubInfo;
+class ScratchRegisterAllocator;
+
+struct AccessGenerationState;
+
+class AccessCase {
+ WTF_MAKE_NONCOPYABLE(AccessCase);
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ enum AccessType {
+ Load,
+ Transition,
+ Replace,
+ Miss,
+ Getter,
+ Setter,
+ CustomValueGetter,
+ CustomAccessorGetter,
+ CustomValueSetter,
+ CustomAccessorSetter,
+ IntrinsicGetter,
+ InHit,
+ InMiss,
+ ArrayLength,
+ StringLength
+ };
+
+ static bool isGet(AccessType type)
+ {
+ switch (type) {
+ case Transition:
+ case Replace:
+ case Setter:
+ case CustomValueSetter:
+ case CustomAccessorSetter:
+ case InHit:
+ case InMiss:
+ return false;
+ case Load:
+ case Miss:
+ case Getter:
+ case CustomValueGetter:
+ case CustomAccessorGetter:
+ case IntrinsicGetter:
+ case ArrayLength:
+ case StringLength:
+ return true;
+ }
+ }
+
+ static bool isPut(AccessType type)
+ {
+ switch (type) {
+ case Load:
+ case Miss:
+ case Getter:
+ case CustomValueGetter:
+ case CustomAccessorGetter:
+ case IntrinsicGetter:
+ case InHit:
+ case InMiss:
+ case ArrayLength:
+ case StringLength:
+ return false;
+ case Transition:
+ case Replace:
+ case Setter:
+ case CustomValueSetter:
+ case CustomAccessorSetter:
+ return true;
+ }
+ }
+
+ static bool isIn(AccessType type)
+ {
+ switch (type) {
+ case Load:
+ case Miss:
+ case Getter:
+ case CustomValueGetter:
+ case CustomAccessorGetter:
+ case IntrinsicGetter:
+ case Transition:
+ case Replace:
+ case Setter:
+ case CustomValueSetter:
+ case CustomAccessorSetter:
+ case ArrayLength:
+ case StringLength:
+ return false;
+ case InHit:
+ case InMiss:
+ return true;
+ }
+ }
+
+ static std::unique_ptr<AccessCase> get(
+ VM&, JSCell* owner, AccessType, PropertyOffset, Structure*,
+ const ObjectPropertyConditionSet& = ObjectPropertyConditionSet(),
+ bool viaProxy = false,
+ WatchpointSet* additionalSet = nullptr,
+ PropertySlot::GetValueFunc = nullptr,
+ JSObject* customSlotBase = nullptr);
+
+ static std::unique_ptr<AccessCase> replace(VM&, JSCell* owner, Structure*, PropertyOffset);
+
+ static std::unique_ptr<AccessCase> transition(
+ VM&, JSCell* owner, Structure* oldStructure, Structure* newStructure, PropertyOffset,
+ const ObjectPropertyConditionSet& = ObjectPropertyConditionSet());
+
+ static std::unique_ptr<AccessCase> setter(
+ VM&, JSCell* owner, AccessType, Structure*, PropertyOffset,
+ const ObjectPropertyConditionSet&, PutPropertySlot::PutValueFunc = nullptr,
+ JSObject* customSlotBase = nullptr);
+
+ static std::unique_ptr<AccessCase> in(
+ VM&, JSCell* owner, AccessType, Structure*,
+ const ObjectPropertyConditionSet& = ObjectPropertyConditionSet());
+
+ static std::unique_ptr<AccessCase> getLength(VM&, JSCell* owner, AccessType);
+ static std::unique_ptr<AccessCase> getIntrinsic(VM&, JSCell* owner, JSFunction* intrinsic, PropertyOffset, Structure*, const ObjectPropertyConditionSet&);
+
+ static std::unique_ptr<AccessCase> fromStructureStubInfo(VM&, JSCell* owner, StructureStubInfo&);
+
+ ~AccessCase();
+
+ std::unique_ptr<AccessCase> clone() const;
+
+ AccessType type() const { return m_type; }
+ PropertyOffset offset() const { return m_offset; }
+ bool viaProxy() const { return m_rareData ? m_rareData->viaProxy : false; }
+
+ Structure* structure() const
+ {
+ if (m_type == Transition)
+ return m_structure->previousID();
+ return m_structure.get();
+ }
+ bool guardedByStructureCheck() const;
+
+ Structure* newStructure() const
+ {
+ ASSERT(m_type == Transition);
+ return m_structure.get();
+ }
+
+ ObjectPropertyConditionSet conditionSet() const { return m_conditionSet; }
+ JSFunction* intrinsicFunction() const
+ {
+ ASSERT(type() == IntrinsicGetter && m_rareData);
+ return m_rareData->intrinsicFunction.get();
+ }
+ Intrinsic intrinsic() const
+ {
+ return intrinsicFunction()->intrinsic();
+ }
+
+ WatchpointSet* additionalSet() const
+ {
+ return m_rareData ? m_rareData->additionalSet.get() : nullptr;
+ }
+
+ JSObject* customSlotBase() const
+ {
+ return m_rareData ? m_rareData->customSlotBase.get() : nullptr;
+ }
+
+ JSObject* alternateBase() const;
+
+ bool doesCalls() const
+ {
+ switch (type()) {
+ case Getter:
+ case Setter:
+ case CustomValueGetter:
+ case CustomAccessorGetter:
+ case CustomValueSetter:
+ case CustomAccessorSetter:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool isGetter() const
+ {
+ switch (type()) {
+ case Getter:
+ case CustomValueGetter:
+ case CustomAccessorGetter:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ CallLinkInfo* callLinkInfo() const
+ {
+ if (!m_rareData)
+ return nullptr;
+ return m_rareData->callLinkInfo.get();
+ }
+
+ // Is it still possible for this case to ever be taken?
+ bool couldStillSucceed() const;
+
+ static bool canEmitIntrinsicGetter(JSFunction*, Structure*);
+
+ // If this method returns true, then it's a good idea to remove 'other' from the access once 'this'
+ // is added. This method assumes that in case of contradictions, 'this' represents a newer, and so
+ // more useful, truth. This method can be conservative; it will return false when it doubt.
+ bool canReplace(const AccessCase& other);
+
+ void dump(PrintStream& out) const;
+
+private:
+ friend class CodeBlock;
+ friend class PolymorphicAccess;
+
+ AccessCase();
+
+ bool visitWeak(VM&) const;
+
+ // Fall through on success. Two kinds of failures are supported: fall-through, which means that we
+ // should try a different case; and failure, which means that this was the right case but it needs
+ // help from the slow path.
+ void generateWithGuard(AccessGenerationState&, MacroAssembler::JumpList& fallThrough);
+
+ // Fall through on success, add a jump to the failure list on failure.
+ void generate(AccessGenerationState&);
+ void emitIntrinsicGetter(AccessGenerationState&);
+
+ AccessType m_type { Load };
+ PropertyOffset m_offset { invalidOffset };
+
+ // Usually this is the structure that we expect the base object to have. But, this is the *new*
+ // structure for a transition and we rely on the fact that it has a strong reference to the old
+ // structure. For proxies, this is the structure of the object behind the proxy.
+ WriteBarrier<Structure> m_structure;
+
+ ObjectPropertyConditionSet m_conditionSet;
+
+ class RareData {
+ WTF_MAKE_FAST_ALLOCATED;
+ public:
+ RareData()
+ : viaProxy(false)
+ {
+ customAccessor.opaque = nullptr;
+ }
+
+ bool viaProxy;
+ RefPtr<WatchpointSet> additionalSet;
+ std::unique_ptr<CallLinkInfo> callLinkInfo;
+ union {
+ PropertySlot::GetValueFunc getter;
+ PutPropertySlot::PutValueFunc setter;
+ void* opaque;
+ } customAccessor;
+ WriteBarrier<JSObject> customSlotBase;
+ WriteBarrier<JSFunction> intrinsicFunction;
+ };
+
+ std::unique_ptr<RareData> m_rareData;
+};
+
+class PolymorphicAccess {
+ WTF_MAKE_NONCOPYABLE(PolymorphicAccess);
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ PolymorphicAccess();
+ ~PolymorphicAccess();
+
+ // This may return null, in which case the old stub routine is left intact. You are required to
+ // pass a vector of non-null access cases. This will prune the access cases by rejecting any case
+ // in the list that is subsumed by a later case in the list.
+ MacroAssemblerCodePtr regenerateWithCases(
+ VM&, CodeBlock*, StructureStubInfo&, const Identifier&, Vector<std::unique_ptr<AccessCase>>);
+
+ MacroAssemblerCodePtr regenerateWithCase(
+ VM&, CodeBlock*, StructureStubInfo&, const Identifier&, std::unique_ptr<AccessCase>);
+
+ bool isEmpty() const { return m_list.isEmpty(); }
+ unsigned size() const { return m_list.size(); }
+ const AccessCase& at(unsigned i) const { return *m_list[i]; }
+ const AccessCase& operator[](unsigned i) const { return *m_list[i]; }
+
+ // If this returns false then we are requesting a reset of the owning StructureStubInfo.
+ bool visitWeak(VM&) const;
+
+ void aboutToDie();
+
+ void dump(PrintStream& out) const;
+ bool containsPC(void* pc) const
+ {
+ if (!m_stubRoutine)
+ return false;
+
+ uintptr_t pcAsInt = bitwise_cast<uintptr_t>(pc);
+ return m_stubRoutine->startAddress() <= pcAsInt && pcAsInt <= m_stubRoutine->endAddress();
+ }
+
+private:
+ friend class AccessCase;
+ friend class CodeBlock;
+ friend struct AccessGenerationState;
+
+ typedef Vector<std::unique_ptr<AccessCase>, 2> ListType;
+
+ MacroAssemblerCodePtr regenerate(
+ VM&, CodeBlock*, StructureStubInfo&, const Identifier&, ListType& cases);
+
+ ListType m_list;
+ RefPtr<JITStubRoutine> m_stubRoutine;
+ std::unique_ptr<WatchpointsOnStructureStubInfo> m_watchpoints;
+ std::unique_ptr<Vector<WriteBarrier<JSCell>>> m_weakReferences;
+};
+
+struct AccessGenerationState {
+ AccessGenerationState()
+ : m_calculatedRegistersForCallAndExceptionHandling(false)
+ , m_needsToRestoreRegistersIfException(false)
+ , m_calculatedCallSiteIndex(false)
+ {
+ }
+ CCallHelpers* jit { nullptr };
+ ScratchRegisterAllocator* allocator;
+ ScratchRegisterAllocator::PreservedState preservedReusedRegisterState;
+ PolymorphicAccess* access { nullptr };
+ StructureStubInfo* stubInfo { nullptr };
+ MacroAssembler::JumpList success;
+ MacroAssembler::JumpList failAndRepatch;
+ MacroAssembler::JumpList failAndIgnore;
+ GPRReg baseGPR { InvalidGPRReg };
+ JSValueRegs valueRegs;
+ GPRReg scratchGPR { InvalidGPRReg };
+ Vector<std::function<void(LinkBuffer&)>> callbacks;
+ const Identifier* ident;
+ std::unique_ptr<WatchpointsOnStructureStubInfo> watchpoints;
+ Vector<WriteBarrier<JSCell>> weakReferences;
+
+ Watchpoint* addWatchpoint(const ObjectPropertyCondition& = ObjectPropertyCondition());
+
+ void restoreScratch();
+ void succeed();
+
+ void calculateLiveRegistersForCallAndExceptionHandling();
+
+ void preserveLiveRegistersToStackForCall();
+
+ void restoreLiveRegistersFromStackForCall(bool isGetter);
+ void restoreLiveRegistersFromStackForCallWithThrownException();
+ void restoreLiveRegistersFromStackForCall(const RegisterSet& dontRestore);
+
+ const RegisterSet& liveRegistersForCall()
+ {
+ RELEASE_ASSERT(m_calculatedRegistersForCallAndExceptionHandling);
+ return m_liveRegistersForCall;
+ }
+
+ CallSiteIndex callSiteIndexForExceptionHandlingOrOriginal();
+ CallSiteIndex callSiteIndexForExceptionHandling()
+ {
+ RELEASE_ASSERT(m_calculatedRegistersForCallAndExceptionHandling);
+ RELEASE_ASSERT(m_needsToRestoreRegistersIfException);
+ RELEASE_ASSERT(m_calculatedCallSiteIndex);
+ return m_callSiteIndex;
+ }
+
+ const HandlerInfo& originalExceptionHandler() const;
+ unsigned numberOfStackBytesUsedForRegisterPreservation() const
+ {
+ RELEASE_ASSERT(m_calculatedRegistersForCallAndExceptionHandling);
+ return m_numberOfStackBytesUsedForRegisterPreservation;
+ }
+
+ bool needsToRestoreRegistersIfException() const { return m_needsToRestoreRegistersIfException; }
+ CallSiteIndex originalCallSiteIndex() const;
+
+private:
+ const RegisterSet& liveRegistersToPreserveAtExceptionHandlingCallSite()
+ {
+ RELEASE_ASSERT(m_calculatedRegistersForCallAndExceptionHandling);
+ return m_liveRegistersToPreserveAtExceptionHandlingCallSite;
+ }
+
+ RegisterSet m_liveRegistersToPreserveAtExceptionHandlingCallSite;
+ RegisterSet m_liveRegistersForCall;
+ CallSiteIndex m_callSiteIndex { CallSiteIndex(std::numeric_limits<unsigned>::max()) };
+ unsigned m_numberOfStackBytesUsedForRegisterPreservation { std::numeric_limits<unsigned>::max() };
+ bool m_calculatedRegistersForCallAndExceptionHandling : 1;
+ bool m_needsToRestoreRegistersIfException : 1;
+ bool m_calculatedCallSiteIndex : 1;
+};
+
+} // namespace JSC
+
+namespace WTF {
+
+void printInternal(PrintStream&, JSC::AccessCase::AccessType);
+
+} // namespace WTF
+
+#endif // ENABLE(JIT)
+
+#endif // PolymorphicAccess_h
+
diff --git a/Source/JavaScriptCore/bytecode/PolymorphicAccessStructureList.h b/Source/JavaScriptCore/bytecode/PolymorphicAccessStructureList.h
deleted file mode 100644
index 61d97354f..000000000
--- a/Source/JavaScriptCore/bytecode/PolymorphicAccessStructureList.h
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef PolymorphicAccessStructureList_h
-#define PolymorphicAccessStructureList_h
-
-#include "JITStubRoutine.h"
-#include "Structure.h"
-#include "StructureChain.h"
-#include <wtf/Platform.h>
-
-#define POLYMORPHIC_LIST_CACHE_SIZE 8
-
-namespace JSC {
-
-// *Sigh*, If the JIT is enabled we need to track the stubRountine (of type CodeLocationLabel),
-// If the JIT is not in use we don't actually need the variable (that said, if the JIT is not in use we don't
-// curently actually use PolymorphicAccessStructureLists, which we should). Anyway, this seems like the best
-// solution for now - will need to something smarter if/when we actually want mixed-mode operation.
-
-#if ENABLE(JIT)
-// Structure used by op_get_by_id_self_list and op_get_by_id_proto_list instruction to hold data off the main opcode stream.
-struct PolymorphicAccessStructureList {
- WTF_MAKE_FAST_ALLOCATED;
-public:
- struct PolymorphicStubInfo {
- bool isChain;
- bool isDirect;
- RefPtr<JITStubRoutine> stubRoutine;
- WriteBarrier<Structure> base;
- union {
- WriteBarrierBase<Structure> proto;
- WriteBarrierBase<StructureChain> chain;
- } u;
-
- PolymorphicStubInfo()
- {
- u.proto.clear();
- }
-
- void set(VM& vm, JSCell* owner, PassRefPtr<JITStubRoutine> _stubRoutine, Structure* _base, bool _isDirect)
- {
- stubRoutine = _stubRoutine;
- base.set(vm, owner, _base);
- u.proto.clear();
- isChain = false;
- isDirect = _isDirect;
- }
-
- void set(VM& vm, JSCell* owner, PassRefPtr<JITStubRoutine> _stubRoutine, Structure* _base, Structure* _proto, bool _isDirect)
- {
- stubRoutine = _stubRoutine;
- base.set(vm, owner, _base);
- u.proto.set(vm, owner, _proto);
- isChain = false;
- isDirect = _isDirect;
- }
-
- void set(VM& vm, JSCell* owner, PassRefPtr<JITStubRoutine> _stubRoutine, Structure* _base, StructureChain* _chain, bool _isDirect)
- {
- stubRoutine = _stubRoutine;
- base.set(vm, owner, _base);
- u.chain.set(vm, owner, _chain);
- isChain = true;
- isDirect = _isDirect;
- }
- } list[POLYMORPHIC_LIST_CACHE_SIZE];
-
- PolymorphicAccessStructureList()
- {
- }
-
- PolymorphicAccessStructureList(VM& vm, JSCell* owner, PassRefPtr<JITStubRoutine> stubRoutine, Structure* firstBase, bool isDirect)
- {
- list[0].set(vm, owner, stubRoutine, firstBase, isDirect);
- }
-
- PolymorphicAccessStructureList(VM& vm, JSCell* owner, PassRefPtr<JITStubRoutine> stubRoutine, Structure* firstBase, Structure* firstProto, bool isDirect)
- {
- list[0].set(vm, owner, stubRoutine, firstBase, firstProto, isDirect);
- }
-
- PolymorphicAccessStructureList(VM& vm, JSCell* owner, PassRefPtr<JITStubRoutine> stubRoutine, Structure* firstBase, StructureChain* firstChain, bool isDirect)
- {
- list[0].set(vm, owner, stubRoutine, firstBase, firstChain, isDirect);
- }
-
- bool visitWeak(int count)
- {
- for (int i = 0; i < count; ++i) {
- PolymorphicStubInfo& info = list[i];
- if (!info.base) {
- // We're being marked during initialisation of an entry
- ASSERT(!info.u.proto);
- continue;
- }
-
- if (!Heap::isMarked(info.base.get()))
- return false;
- if (info.u.proto && !info.isChain
- && !Heap::isMarked(info.u.proto.get()))
- return false;
- if (info.u.chain && info.isChain
- && !Heap::isMarked(info.u.chain.get()))
- return false;
- }
-
- return true;
- }
-};
-
-#endif // ENABLE(JIT)
-
-} // namespace JSC
-
-#endif // PolymorphicAccessStructureList_h
-
diff --git a/Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.cpp b/Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.cpp
deleted file mode 100644
index 6a6ec8141..000000000
--- a/Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.cpp
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "PolymorphicPutByIdList.h"
-
-#if ENABLE(JIT)
-
-#include "StructureStubInfo.h"
-
-namespace JSC {
-
-PutByIdAccess PutByIdAccess::fromStructureStubInfo(
- StructureStubInfo& stubInfo,
- MacroAssemblerCodePtr initialSlowPath)
-{
- PutByIdAccess result;
-
- switch (stubInfo.accessType) {
- case access_put_by_id_replace:
- result.m_type = Replace;
- result.m_oldStructure.copyFrom(stubInfo.u.putByIdReplace.baseObjectStructure);
- result.m_stubRoutine = JITStubRoutine::createSelfManagedRoutine(initialSlowPath);
- break;
-
- case access_put_by_id_transition_direct:
- case access_put_by_id_transition_normal:
- result.m_type = Transition;
- result.m_oldStructure.copyFrom(stubInfo.u.putByIdTransition.previousStructure);
- result.m_newStructure.copyFrom(stubInfo.u.putByIdTransition.structure);
- result.m_chain.copyFrom(stubInfo.u.putByIdTransition.chain);
- result.m_stubRoutine = stubInfo.stubRoutine;
- break;
-
- default:
- RELEASE_ASSERT_NOT_REACHED();
- }
-
- return result;
-}
-
-bool PutByIdAccess::visitWeak() const
-{
- switch (m_type) {
- case Replace:
- if (!Heap::isMarked(m_oldStructure.get()))
- return false;
- break;
- case Transition:
- if (!Heap::isMarked(m_oldStructure.get()))
- return false;
- if (!Heap::isMarked(m_newStructure.get()))
- return false;
- if (!Heap::isMarked(m_chain.get()))
- return false;
- break;
- default:
- RELEASE_ASSERT_NOT_REACHED();
- return false;
- }
- return true;
-}
-
-PolymorphicPutByIdList::PolymorphicPutByIdList(
- PutKind putKind,
- StructureStubInfo& stubInfo,
- MacroAssemblerCodePtr initialSlowPath)
- : m_kind(putKind)
-{
- m_list.append(PutByIdAccess::fromStructureStubInfo(stubInfo, initialSlowPath));
-}
-
-PolymorphicPutByIdList* PolymorphicPutByIdList::from(
- PutKind putKind,
- StructureStubInfo& stubInfo,
- MacroAssemblerCodePtr initialSlowPath)
-{
- if (stubInfo.accessType == access_put_by_id_list)
- return stubInfo.u.putByIdList.list;
-
- ASSERT(stubInfo.accessType == access_put_by_id_replace
- || stubInfo.accessType == access_put_by_id_transition_normal
- || stubInfo.accessType == access_put_by_id_transition_direct);
-
- PolymorphicPutByIdList* result =
- new PolymorphicPutByIdList(putKind, stubInfo, initialSlowPath);
-
- stubInfo.initPutByIdList(result);
-
- return result;
-}
-
-PolymorphicPutByIdList::~PolymorphicPutByIdList() { }
-
-bool PolymorphicPutByIdList::isFull() const
-{
- ASSERT(size() <= POLYMORPHIC_LIST_CACHE_SIZE);
- return size() == POLYMORPHIC_LIST_CACHE_SIZE;
-}
-
-bool PolymorphicPutByIdList::isAlmostFull() const
-{
- ASSERT(size() <= POLYMORPHIC_LIST_CACHE_SIZE);
- return size() >= POLYMORPHIC_LIST_CACHE_SIZE - 1;
-}
-
-void PolymorphicPutByIdList::addAccess(const PutByIdAccess& putByIdAccess)
-{
- ASSERT(!isFull());
- // Make sure that the resizing optimizes for space, not time.
- m_list.resize(m_list.size() + 1);
- m_list.last() = putByIdAccess;
-}
-
-bool PolymorphicPutByIdList::visitWeak() const
-{
- for (unsigned i = 0; i < size(); ++i) {
- if (!at(i).visitWeak())
- return false;
- }
- return true;
-}
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.h b/Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.h
deleted file mode 100644
index d9fe2e7cf..000000000
--- a/Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.h
+++ /dev/null
@@ -1,195 +0,0 @@
-/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef PolymorphicPutByIdList_h
-#define PolymorphicPutByIdList_h
-
-#include <wtf/Platform.h>
-
-#if ENABLE(JIT)
-
-#include "CodeOrigin.h"
-#include "MacroAssembler.h"
-#include "Opcode.h"
-#include "PutKind.h"
-#include "Structure.h"
-#include <wtf/Vector.h>
-
-namespace JSC {
-
-class CodeBlock;
-struct StructureStubInfo;
-
-class PutByIdAccess {
-public:
- enum AccessType {
- Invalid,
- Transition,
- Replace
- };
-
- PutByIdAccess()
- : m_type(Invalid)
- {
- }
-
- static PutByIdAccess transition(
- VM& vm,
- JSCell* owner,
- Structure* oldStructure,
- Structure* newStructure,
- StructureChain* chain,
- PassRefPtr<JITStubRoutine> stubRoutine)
- {
- PutByIdAccess result;
- result.m_type = Transition;
- result.m_oldStructure.set(vm, owner, oldStructure);
- result.m_newStructure.set(vm, owner, newStructure);
- result.m_chain.set(vm, owner, chain);
- result.m_stubRoutine = stubRoutine;
- return result;
- }
-
- static PutByIdAccess replace(
- VM& vm,
- JSCell* owner,
- Structure* structure,
- PassRefPtr<JITStubRoutine> stubRoutine)
- {
- PutByIdAccess result;
- result.m_type = Replace;
- result.m_oldStructure.set(vm, owner, structure);
- result.m_stubRoutine = stubRoutine;
- return result;
- }
-
- static PutByIdAccess fromStructureStubInfo(
- StructureStubInfo&,
- MacroAssemblerCodePtr initialSlowPath);
-
- bool isSet() const { return m_type != Invalid; }
- bool operator!() const { return !isSet(); }
-
- AccessType type() const { return m_type; }
-
- bool isTransition() const { return m_type == Transition; }
- bool isReplace() const { return m_type == Replace; }
-
- Structure* oldStructure() const
- {
- // Using this instead of isSet() to make this assertion robust against the possibility
- // of additional access types being added.
- ASSERT(isTransition() || isReplace());
-
- return m_oldStructure.get();
- }
-
- Structure* structure() const
- {
- ASSERT(isReplace());
- return m_oldStructure.get();
- }
-
- Structure* newStructure() const
- {
- ASSERT(isTransition());
- return m_newStructure.get();
- }
-
- StructureChain* chain() const
- {
- ASSERT(isTransition());
- return m_chain.get();
- }
-
- PassRefPtr<JITStubRoutine> stubRoutine() const
- {
- ASSERT(isTransition() || isReplace());
- return m_stubRoutine;
- }
-
- bool visitWeak() const;
-
-private:
- friend class CodeBlock;
-
- AccessType m_type;
- WriteBarrier<Structure> m_oldStructure;
- WriteBarrier<Structure> m_newStructure;
- WriteBarrier<StructureChain> m_chain;
- RefPtr<JITStubRoutine> m_stubRoutine;
-};
-
-class PolymorphicPutByIdList {
- WTF_MAKE_FAST_ALLOCATED;
-public:
- // Initialize from a stub info; this will place one element in the list and it will
- // be created by converting the stub info's put by id access information into our
- // PutByIdAccess.
- PolymorphicPutByIdList(
- PutKind,
- StructureStubInfo&,
- MacroAssemblerCodePtr initialSlowPath);
-
- // Either creates a new polymorphic put list, or returns the one that is already
- // in place.
- static PolymorphicPutByIdList* from(
- PutKind,
- StructureStubInfo&,
- MacroAssemblerCodePtr initialSlowPath);
-
- ~PolymorphicPutByIdList();
-
- MacroAssemblerCodePtr currentSlowPathTarget() const
- {
- return m_list.last().stubRoutine()->code().code();
- }
-
- void addAccess(const PutByIdAccess&);
-
- bool isEmpty() const { return m_list.isEmpty(); }
- unsigned size() const { return m_list.size(); }
- bool isFull() const;
- bool isAlmostFull() const; // True if adding an element would make isFull() true.
- const PutByIdAccess& at(unsigned i) const { return m_list[i]; }
- const PutByIdAccess& operator[](unsigned i) const { return m_list[i]; }
-
- PutKind kind() const { return m_kind; }
-
- bool visitWeak() const;
-
-private:
- friend class CodeBlock;
-
- Vector<PutByIdAccess, 2> m_list;
- PutKind m_kind;
-};
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
-
-#endif // PolymorphicPutByIdList_h
-
diff --git a/Source/JavaScriptCore/bytecode/PreciseJumpTargets.cpp b/Source/JavaScriptCore/bytecode/PreciseJumpTargets.cpp
index ede8a3643..e2cb00048 100644
--- a/Source/JavaScriptCore/bytecode/PreciseJumpTargets.cpp
+++ b/Source/JavaScriptCore/bytecode/PreciseJumpTargets.cpp
@@ -26,6 +26,8 @@
#include "config.h"
#include "PreciseJumpTargets.h"
+#include "JSCInlines.h"
+
namespace JSC {
template <size_t vectorSize>
@@ -52,6 +54,7 @@ static void getJumpTargetsForBytecodeOffset(CodeBlock* codeBlock, Interpreter* i
case op_jnlesseq:
case op_jngreater:
case op_jngreatereq:
+ case op_save: // The jump of op_save is purely for calculating liveness.
out.append(bytecodeOffset + current[3].u.operand);
break;
case op_switch_imm:
@@ -71,15 +74,6 @@ static void getJumpTargetsForBytecodeOffset(CodeBlock* codeBlock, Interpreter* i
out.append(bytecodeOffset + current[2].u.operand);
break;
}
- case op_get_pnames:
- out.append(bytecodeOffset + current[5].u.operand);
- break;
- case op_next_pname:
- out.append(bytecodeOffset + current[6].u.operand);
- break;
- case op_check_has_instance:
- out.append(bytecodeOffset + current[4].u.operand);
- break;
case op_loop_hint:
out.append(bytecodeOffset);
break;
@@ -97,9 +91,12 @@ void computePreciseJumpTargets(CodeBlock* codeBlock, Vector<unsigned, 32>& out)
if (!codeBlock->numberOfJumpTargets())
return;
- for (unsigned i = codeBlock->numberOfExceptionHandlers(); i--;)
+ for (unsigned i = codeBlock->numberOfExceptionHandlers(); i--;) {
out.append(codeBlock->exceptionHandler(i).target);
-
+ out.append(codeBlock->exceptionHandler(i).start);
+ out.append(codeBlock->exceptionHandler(i).end);
+ }
+
Interpreter* interpreter = codeBlock->vm()->interpreter;
Instruction* instructionsBegin = codeBlock->instructions().begin();
unsigned instructionCount = codeBlock->instructions().size();
@@ -123,6 +120,7 @@ void computePreciseJumpTargets(CodeBlock* codeBlock, Vector<unsigned, 32>& out)
lastValue = value;
}
out.resize(toIndex);
+ out.shrinkToFit();
}
void findJumpTargetsForBytecodeOffset(CodeBlock* codeBlock, unsigned bytecodeOffset, Vector<unsigned, 1>& out)
diff --git a/Source/JavaScriptCore/bytecode/PreciseJumpTargets.h b/Source/JavaScriptCore/bytecode/PreciseJumpTargets.h
index fb60f9b9b..852413d77 100644
--- a/Source/JavaScriptCore/bytecode/PreciseJumpTargets.h
+++ b/Source/JavaScriptCore/bytecode/PreciseJumpTargets.h
@@ -30,7 +30,9 @@
namespace JSC {
+// Return a sorted list of bytecode index that are the destination of a jump.
void computePreciseJumpTargets(CodeBlock*, Vector<unsigned, 32>& out);
+
void findJumpTargetsForBytecodeOffset(CodeBlock*, unsigned bytecodeOffset, Vector<unsigned, 1>& out);
} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/PropertyCondition.cpp b/Source/JavaScriptCore/bytecode/PropertyCondition.cpp
new file mode 100644
index 000000000..347b86f8b
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/PropertyCondition.cpp
@@ -0,0 +1,364 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "PropertyCondition.h"
+
+#include "GetterSetter.h"
+#include "JSCInlines.h"
+#include "TrackedReferences.h"
+
+namespace JSC {
+
+static bool verbose = false;
+
+void PropertyCondition::dumpInContext(PrintStream& out, DumpContext* context) const
+{
+ if (!*this) {
+ out.print("<invalid>");
+ return;
+ }
+
+ out.print(m_kind, " of ", m_uid);
+ switch (m_kind) {
+ case Presence:
+ out.print(" at ", offset(), " with attributes ", attributes());
+ return;
+ case Absence:
+ case AbsenceOfSetter:
+ out.print(" with prototype ", inContext(JSValue(prototype()), context));
+ return;
+ case Equivalence:
+ out.print(" with ", inContext(requiredValue(), context));
+ return;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+void PropertyCondition::dump(PrintStream& out) const
+{
+ dumpInContext(out, nullptr);
+}
+
+bool PropertyCondition::isStillValidAssumingImpurePropertyWatchpoint(
+ Structure* structure, JSObject* base) const
+{
+ if (verbose) {
+ dataLog(
+ "Determining validity of ", *this, " with structure ", pointerDump(structure), " and base ",
+ JSValue(base), " assuming impure property watchpoints are set.\n");
+ }
+
+ if (!*this) {
+ if (verbose)
+ dataLog("Invalid because unset.\n");
+ return false;
+ }
+
+ if (!structure->propertyAccessesAreCacheable()) {
+ if (verbose)
+ dataLog("Invalid because accesses are not cacheable.\n");
+ return false;
+ }
+
+ switch (m_kind) {
+ case Presence: {
+ unsigned currentAttributes;
+ PropertyOffset currentOffset = structure->getConcurrently(uid(), currentAttributes);
+ if (currentOffset != offset() || currentAttributes != attributes()) {
+ if (verbose) {
+ dataLog(
+ "Invalid because we need offset, attributes to be ", offset(), ", ", attributes(),
+ " but they are ", currentOffset, ", ", currentAttributes, "\n");
+ }
+ return false;
+ }
+ return true;
+ }
+
+ case Absence: {
+ if (structure->isDictionary()) {
+ if (verbose)
+ dataLog("Invalid because it's a dictionary.\n");
+ return false;
+ }
+
+ PropertyOffset currentOffset = structure->getConcurrently(uid());
+ if (currentOffset != invalidOffset) {
+ if (verbose)
+ dataLog("Invalid because the property exists at offset: ", currentOffset, "\n");
+ return false;
+ }
+
+ if (structure->storedPrototypeObject() != prototype()) {
+ if (verbose) {
+ dataLog(
+ "Invalid because the prototype is ", structure->storedPrototype(), " even though "
+ "it should have been ", JSValue(prototype()), "\n");
+ }
+ return false;
+ }
+
+ return true;
+ }
+
+ case AbsenceOfSetter: {
+ if (structure->isDictionary()) {
+ if (verbose)
+ dataLog("Invalid because it's a dictionary.\n");
+ return false;
+ }
+
+ unsigned currentAttributes;
+ PropertyOffset currentOffset = structure->getConcurrently(uid(), currentAttributes);
+ if (currentOffset != invalidOffset) {
+ if (currentAttributes & (Accessor | CustomAccessor)) {
+ if (verbose) {
+ dataLog(
+ "Invalid because we expected not to have a setter, but we have one at offset ",
+ currentOffset, " with attributes ", currentAttributes, "\n");
+ }
+ return false;
+ }
+ }
+
+ if (structure->storedPrototypeObject() != prototype()) {
+ if (verbose) {
+ dataLog(
+ "Invalid because the prototype is ", structure->storedPrototype(), " even though "
+ "it should have been ", JSValue(prototype()), "\n");
+ }
+ return false;
+ }
+
+ return true;
+ }
+
+ case Equivalence: {
+ if (!base || base->structure() != structure) {
+ // Conservatively return false, since we cannot verify this one without having the
+ // object.
+ if (verbose) {
+ dataLog(
+ "Invalid because we don't have a base or the base has the wrong structure: ",
+ RawPointer(base), "\n");
+ }
+ return false;
+ }
+
+ // FIXME: This is somewhat racy, and maybe more risky than we want.
+ // https://bugs.webkit.org/show_bug.cgi?id=134641
+
+ PropertyOffset currentOffset = structure->getConcurrently(uid());
+ if (currentOffset == invalidOffset) {
+ if (verbose) {
+ dataLog(
+ "Invalid because the base no long appears to have ", uid(), " on its structure: ",
+ RawPointer(base), "\n");
+ }
+ return false;
+ }
+
+ JSValue currentValue = base->getDirect(currentOffset);
+ if (currentValue != requiredValue()) {
+ if (verbose) {
+ dataLog(
+ "Invalid because the value is ", currentValue, " but we require ", requiredValue(),
+ "\n");
+ }
+ return false;
+ }
+
+ return true;
+ } }
+
+ RELEASE_ASSERT_NOT_REACHED();
+ return false;
+}
+
+bool PropertyCondition::validityRequiresImpurePropertyWatchpoint(Structure* structure) const
+{
+ if (!*this)
+ return false;
+
+ switch (m_kind) {
+ case Presence:
+ case Absence:
+ case Equivalence:
+ return structure->needImpurePropertyWatchpoint();
+ default:
+ return false;
+ }
+}
+
+bool PropertyCondition::isStillValid(Structure* structure, JSObject* base) const
+{
+ if (!isStillValidAssumingImpurePropertyWatchpoint(structure, base))
+ return false;
+
+ // Currently we assume that an impure property can cause a property to appear, and can also
+ // "shadow" an existing JS property on the same object. Hence it affects both presence and
+ // absence. It doesn't affect AbsenceOfSetter because impure properties aren't ever setters.
+ switch (m_kind) {
+ case Absence:
+ if (structure->typeInfo().getOwnPropertySlotIsImpure() || structure->typeInfo().getOwnPropertySlotIsImpureForPropertyAbsence())
+ return false;
+ break;
+ case Presence:
+ case Equivalence:
+ if (structure->typeInfo().getOwnPropertySlotIsImpure())
+ return false;
+ break;
+ default:
+ break;
+ }
+
+ return true;
+}
+
+bool PropertyCondition::isWatchableWhenValid(
+ Structure* structure, WatchabilityEffort effort) const
+{
+ if (structure->transitionWatchpointSetHasBeenInvalidated())
+ return false;
+
+ switch (m_kind) {
+ case Equivalence: {
+ PropertyOffset offset = structure->getConcurrently(uid());
+
+ // This method should only be called when some variant of isValid returned true, which
+ // implies that we already confirmed that the structure knows of the property. We should
+ // also have verified that the Structure is a cacheable dictionary, which means we
+ // shouldn't have a TOCTOU race either.
+ RELEASE_ASSERT(offset != invalidOffset);
+
+ WatchpointSet* set;
+ switch (effort) {
+ case MakeNoChanges:
+ set = structure->propertyReplacementWatchpointSet(offset);
+ break;
+ case EnsureWatchability:
+ set = structure->ensurePropertyReplacementWatchpointSet(
+ *Heap::heap(structure)->vm(), offset);
+ break;
+ }
+
+ if (!set || !set->isStillValid())
+ return false;
+
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ return true;
+}
+
+bool PropertyCondition::isWatchableAssumingImpurePropertyWatchpoint(
+ Structure* structure, JSObject* base, WatchabilityEffort effort) const
+{
+ return isStillValidAssumingImpurePropertyWatchpoint(structure, base)
+ && isWatchableWhenValid(structure, effort);
+}
+
+bool PropertyCondition::isWatchable(
+ Structure* structure, JSObject* base, WatchabilityEffort effort) const
+{
+ return isStillValid(structure, base)
+ && isWatchableWhenValid(structure, effort);
+}
+
+bool PropertyCondition::isStillLive() const
+{
+ if (hasPrototype() && prototype() && !Heap::isMarked(prototype()))
+ return false;
+
+ if (hasRequiredValue()
+ && requiredValue()
+ && requiredValue().isCell()
+ && !Heap::isMarked(requiredValue().asCell()))
+ return false;
+
+ return true;
+}
+
+void PropertyCondition::validateReferences(const TrackedReferences& tracked) const
+{
+ if (hasPrototype())
+ tracked.check(prototype());
+
+ if (hasRequiredValue())
+ tracked.check(requiredValue());
+}
+
+bool PropertyCondition::isValidValueForAttributes(JSValue value, unsigned attributes)
+{
+ bool attributesClaimAccessor = !!(attributes & Accessor);
+ bool valueClaimsAccessor = !!jsDynamicCast<GetterSetter*>(value);
+ return attributesClaimAccessor == valueClaimsAccessor;
+}
+
+bool PropertyCondition::isValidValueForPresence(JSValue value) const
+{
+ return isValidValueForAttributes(value, attributes());
+}
+
+PropertyCondition PropertyCondition::attemptToMakeEquivalenceWithoutBarrier(JSObject* base) const
+{
+ Structure* structure = base->structure();
+ if (!structure->isValidOffset(offset()))
+ return PropertyCondition();
+ JSValue value = base->getDirect(offset());
+ if (!isValidValueForPresence(value))
+ return PropertyCondition();
+ return equivalenceWithoutBarrier(uid(), value);
+}
+
+} // namespace JSC
+
+namespace WTF {
+
+void printInternal(PrintStream& out, JSC::PropertyCondition::Kind condition)
+{
+ switch (condition) {
+ case JSC::PropertyCondition::Presence:
+ out.print("Presence");
+ return;
+ case JSC::PropertyCondition::Absence:
+ out.print("Absence");
+ return;
+ case JSC::PropertyCondition::AbsenceOfSetter:
+ out.print("Absence");
+ return;
+ case JSC::PropertyCondition::Equivalence:
+ out.print("Equivalence");
+ return;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
diff --git a/Source/JavaScriptCore/bytecode/PropertyCondition.h b/Source/JavaScriptCore/bytecode/PropertyCondition.h
new file mode 100644
index 000000000..1d5568f8d
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/PropertyCondition.h
@@ -0,0 +1,338 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PropertyCondition_h
+#define PropertyCondition_h
+
+#include "JSObject.h"
+#include <wtf/HashMap.h>
+
+namespace JSC {
+
+class TrackedReferences;
+
+class PropertyCondition {
+public:
+ enum Kind {
+ Presence,
+ Absence,
+ AbsenceOfSetter,
+ Equivalence // An adaptive watchpoint on this will be a pair of watchpoints, and when the structure transitions, we will set the replacement watchpoint on the new structure.
+ };
+
+ PropertyCondition()
+ : m_uid(nullptr)
+ , m_kind(Presence)
+ {
+ memset(&u, 0, sizeof(u));
+ }
+
+ PropertyCondition(WTF::HashTableDeletedValueType)
+ : m_uid(nullptr)
+ , m_kind(Absence)
+ {
+ memset(&u, 0, sizeof(u));
+ }
+
+ static PropertyCondition presenceWithoutBarrier(UniquedStringImpl* uid, PropertyOffset offset, unsigned attributes)
+ {
+ PropertyCondition result;
+ result.m_uid = uid;
+ result.m_kind = Presence;
+ result.u.presence.offset = offset;
+ result.u.presence.attributes = attributes;
+ return result;
+ }
+
+ static PropertyCondition presence(
+ VM&, JSCell*, UniquedStringImpl* uid, PropertyOffset offset, unsigned attributes)
+ {
+ return presenceWithoutBarrier(uid, offset, attributes);
+ }
+
+ // NOTE: The prototype is the storedPrototype not the prototypeForLookup.
+ static PropertyCondition absenceWithoutBarrier(UniquedStringImpl* uid, JSObject* prototype)
+ {
+ PropertyCondition result;
+ result.m_uid = uid;
+ result.m_kind = Absence;
+ result.u.absence.prototype = prototype;
+ return result;
+ }
+
+ static PropertyCondition absence(
+ VM& vm, JSCell* owner, UniquedStringImpl* uid, JSObject* prototype)
+ {
+ if (owner)
+ vm.heap.writeBarrier(owner);
+ return absenceWithoutBarrier(uid, prototype);
+ }
+
+ static PropertyCondition absenceOfSetterWithoutBarrier(
+ UniquedStringImpl* uid, JSObject* prototype)
+ {
+ PropertyCondition result;
+ result.m_uid = uid;
+ result.m_kind = AbsenceOfSetter;
+ result.u.absence.prototype = prototype;
+ return result;
+ }
+
+ static PropertyCondition absenceOfSetter(
+ VM& vm, JSCell* owner, UniquedStringImpl* uid, JSObject* prototype)
+ {
+ if (owner)
+ vm.heap.writeBarrier(owner);
+ return absenceOfSetterWithoutBarrier(uid, prototype);
+ }
+
+ static PropertyCondition equivalenceWithoutBarrier(
+ UniquedStringImpl* uid, JSValue value)
+ {
+ PropertyCondition result;
+ result.m_uid = uid;
+ result.m_kind = Equivalence;
+ result.u.equivalence.value = JSValue::encode(value);
+ return result;
+ }
+
+ static PropertyCondition equivalence(
+ VM& vm, JSCell* owner, UniquedStringImpl* uid, JSValue value)
+ {
+ if (value.isCell() && owner)
+ vm.heap.writeBarrier(owner);
+ return equivalenceWithoutBarrier(uid, value);
+ }
+
+ explicit operator bool() const { return m_uid || m_kind != Presence; }
+
+ Kind kind() const { return m_kind; }
+ UniquedStringImpl* uid() const { return m_uid; }
+
+ bool hasOffset() const { return !!*this && m_kind == Presence; };
+ PropertyOffset offset() const
+ {
+ ASSERT(hasOffset());
+ return u.presence.offset;
+ }
+ bool hasAttributes() const { return !!*this && m_kind == Presence; };
+ unsigned attributes() const
+ {
+ ASSERT(hasAttributes());
+ return u.presence.attributes;
+ }
+
+ bool hasPrototype() const { return !!*this && (m_kind == Absence || m_kind == AbsenceOfSetter); }
+ JSObject* prototype() const
+ {
+ ASSERT(hasPrototype());
+ return u.absence.prototype;
+ }
+
+ bool hasRequiredValue() const { return !!*this && m_kind == Equivalence; }
+ JSValue requiredValue() const
+ {
+ ASSERT(hasRequiredValue());
+ return JSValue::decode(u.equivalence.value);
+ }
+
+ void dumpInContext(PrintStream&, DumpContext*) const;
+ void dump(PrintStream&) const;
+
+ unsigned hash() const
+ {
+ unsigned result = WTF::PtrHash<UniquedStringImpl*>::hash(m_uid) + static_cast<unsigned>(m_kind);
+ switch (m_kind) {
+ case Presence:
+ result ^= u.presence.offset;
+ result ^= u.presence.attributes;
+ break;
+ case Absence:
+ case AbsenceOfSetter:
+ result ^= WTF::PtrHash<JSObject*>::hash(u.absence.prototype);
+ break;
+ case Equivalence:
+ result ^= EncodedJSValueHash::hash(u.equivalence.value);
+ break;
+ }
+ return result;
+ }
+
+ bool operator==(const PropertyCondition& other) const
+ {
+ if (m_uid != other.m_uid)
+ return false;
+ if (m_kind != other.m_kind)
+ return false;
+ switch (m_kind) {
+ case Presence:
+ return u.presence.offset == other.u.presence.offset
+ && u.presence.attributes == other.u.presence.attributes;
+ case Absence:
+ case AbsenceOfSetter:
+ return u.absence.prototype == other.u.absence.prototype;
+ case Equivalence:
+ return u.equivalence.value == other.u.equivalence.value;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+ return false;
+ }
+
+ bool isHashTableDeletedValue() const
+ {
+ return !m_uid && m_kind == Absence;
+ }
+
+ // Two conditions are compatible if they are identical or if they speak of different uids. If
+ // false is returned, you have to decide how to resolve the conflict - for example if there is
+ // a Presence and an Equivalence then in some cases you'll want the more general of the two
+ // while in other cases you'll want the more specific of the two. This will also return false
+ // for contradictions, like Presence and Absence on the same uid. By convention, invalid
+ // conditions aren't compatible with anything.
+ bool isCompatibleWith(const PropertyCondition& other) const
+ {
+ if (!*this || !other)
+ return false;
+ return *this == other || uid() != other.uid();
+ }
+
+ // Checks if the object's structure claims that the property won't be intercepted.
+ bool isStillValidAssumingImpurePropertyWatchpoint(Structure*, JSObject* base = nullptr) const;
+
+ // Returns true if we need an impure property watchpoint to ensure validity even if
+ // isStillValidAccordingToStructure() returned true.
+ bool validityRequiresImpurePropertyWatchpoint(Structure*) const;
+
+ // Checks if the condition is still valid right now for the given object and structure.
+ // May conservatively return false, if the object and structure alone don't guarantee the
+ // condition. This happens for an Absence condition on an object that may have impure
+ // properties. If the object is not supplied, then a "true" return indicates that checking if
+ // an object has the given structure guarantees the condition still holds. If an object is
+ // supplied, then you may need to use some other watchpoints on the object to guarantee the
+ // condition in addition to the structure check.
+ bool isStillValid(Structure*, JSObject* base = nullptr) const;
+
+ // In some cases, the condition is not watchable, but could be made watchable by enabling the
+ // appropriate watchpoint. For example, replacement watchpoints are enabled only when some
+ // access is cached on the property in some structure. This is mainly to save space for
+ // dictionary properties or properties that never get very hot. But, it's always safe to
+ // enable watching, provided that this is called from the main thread.
+ enum WatchabilityEffort {
+ // This is the default. It means that we don't change the state of any Structure or
+ // object, and implies that if the property happens not to be watchable then we don't make
+ // it watchable. This is mandatory if calling from a JIT thread. This is also somewhat
+ // preferable when first deciding whether to watch a condition for the first time (i.e.
+ // not from a watchpoint fire that causes us to see if we should adapt), since a
+ // watchpoint not being initialized for watching implies that maybe we don't know enough
+ // yet to make it profitable to watch -- as in, the thing being watched may not have
+ // stabilized yet. We prefer to only assume that a condition will hold if it has been
+ // known to hold for a while already.
+ MakeNoChanges,
+
+ // Do what it takes to ensure that the property can be watched, if doing so has no
+ // user-observable effect. For now this just means that we will ensure that a property
+ // replacement watchpoint is enabled if it hadn't been enabled already. Do not use this
+ // from JIT threads, since the act of enabling watchpoints is not thread-safe.
+ EnsureWatchability
+ };
+
+ // This means that it's still valid and we could enforce validity by setting a transition
+ // watchpoint on the structure and possibly an impure property watchpoint.
+ bool isWatchableAssumingImpurePropertyWatchpoint(
+ Structure*, JSObject* base = nullptr, WatchabilityEffort = MakeNoChanges) const;
+
+ // This means that it's still valid and we could enforce validity by setting a transition
+ // watchpoint on the structure.
+ bool isWatchable(
+ Structure*, JSObject* base = nullptr, WatchabilityEffort = MakeNoChanges) const;
+
+ bool watchingRequiresStructureTransitionWatchpoint() const
+ {
+ // Currently, this is required for all of our conditions.
+ return !!*this;
+ }
+ bool watchingRequiresReplacementWatchpoint() const
+ {
+ return !!*this && m_kind == Equivalence;
+ }
+
+ // This means that the objects involved in this are still live.
+ bool isStillLive() const;
+
+ void validateReferences(const TrackedReferences&) const;
+
+ static bool isValidValueForAttributes(JSValue value, unsigned attributes);
+
+ bool isValidValueForPresence(JSValue) const;
+
+ PropertyCondition attemptToMakeEquivalenceWithoutBarrier(JSObject* base) const;
+
+private:
+ bool isWatchableWhenValid(Structure*, WatchabilityEffort) const;
+
+ UniquedStringImpl* m_uid;
+ Kind m_kind;
+ union {
+ struct {
+ PropertyOffset offset;
+ unsigned attributes;
+ } presence;
+ struct {
+ JSObject* prototype;
+ } absence;
+ struct {
+ EncodedJSValue value;
+ } equivalence;
+ } u;
+};
+
+struct PropertyConditionHash {
+ static unsigned hash(const PropertyCondition& key) { return key.hash(); }
+ static bool equal(
+ const PropertyCondition& a, const PropertyCondition& b)
+ {
+ return a == b;
+ }
+ static const bool safeToCompareToEmptyOrDeleted = true;
+};
+
+} // namespace JSC
+
+namespace WTF {
+
+void printInternal(PrintStream&, JSC::PropertyCondition::Kind);
+
+template<typename T> struct DefaultHash;
+template<> struct DefaultHash<JSC::PropertyCondition> {
+ typedef JSC::PropertyConditionHash Hash;
+};
+
+template<typename T> struct HashTraits;
+template<> struct HashTraits<JSC::PropertyCondition> : SimpleClassHashTraits<JSC::PropertyCondition> { };
+
+} // namespace WTF
+
+#endif // PropertyCondition_h
+
diff --git a/Source/JavaScriptCore/bytecode/PutByIdFlags.cpp b/Source/JavaScriptCore/bytecode/PutByIdFlags.cpp
new file mode 100644
index 000000000..f28090049
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/PutByIdFlags.cpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "PutByIdFlags.h"
+
+#include "InferredType.h"
+#include <wtf/CommaPrinter.h>
+#include <wtf/PrintStream.h>
+#include <wtf/StringPrintStream.h>
+
+namespace WTF {
+
+using namespace JSC;
+
+void printInternal(PrintStream& out, PutByIdFlags flags) {
+ CommaPrinter comma("|");
+ if (flags & PutByIdIsDirect)
+ out.print(comma, "IsDirect");
+
+ InferredType::Kind kind = InferredType::kindForFlags(flags);
+ out.print(comma, kind);
+ if (InferredType::hasStructure(kind))
+ out.print(":", bitwise_cast<int32_t>(decodeStructureID(flags)));
+}
+
+} // namespace WTF
+
diff --git a/Source/JavaScriptCore/bytecode/PutByIdFlags.h b/Source/JavaScriptCore/bytecode/PutByIdFlags.h
new file mode 100644
index 000000000..6ad364393
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/PutByIdFlags.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PutByIdFlags_h
+#define PutByIdFlags_h
+
+#include "StructureIDTable.h"
+
+namespace JSC {
+
+enum PutByIdFlags : intptr_t {
+ PutByIdNone = 0,
+
+ // This flag indicates that the put_by_id is direct. That means that we store the property without
+ // checking if the prototype chain has a setter.
+ PutByIdIsDirect = 0x1,
+ PutByIdPersistentFlagsMask = 0x1,
+
+ // NOTE: The values below must be in sync with what is in LowLevelInterpreter.asm.
+
+ // Determining the required inferred type involves first checking the primary type mask, and then
+ // using that to figure out the meaning of the secondary mask:
+ // switch (flags & PutByIdPrimaryTypeMask) {
+ // case PutByIdPrimaryTypeSecondary:
+ // switch (flags & PutByIdSecondaryTypeMask) {
+ // ...
+ // }
+ // break;
+ // case PutByIdPrimaryTypeObjectWithStructure:
+ // case PutByIdPrimaryTypeObjectWithStructureOrOther:
+ // StructureID structureID = decodeStructureID(flags);
+ // break;
+ // }
+ PutByIdPrimaryTypeMask = 0x6,
+ PutByIdPrimaryTypeSecondary = 0x0, // Need to check the secondary type mask for the type.
+ PutByIdPrimaryTypeObjectWithStructure = 0x2, // Secondary type has structure ID.
+ PutByIdPrimaryTypeObjectWithStructureOrOther = 0x4, // Secondary type has structure ID.
+
+ PutByIdSecondaryTypeMask = -0x8,
+ PutByIdSecondaryTypeBottom = 0x0,
+ PutByIdSecondaryTypeBoolean = 0x8,
+ PutByIdSecondaryTypeOther = 0x10,
+ PutByIdSecondaryTypeInt32 = 0x18,
+ PutByIdSecondaryTypeNumber = 0x20,
+ PutByIdSecondaryTypeString = 0x28,
+ PutByIdSecondaryTypeSymbol = 0x30,
+ PutByIdSecondaryTypeObject = 0x38,
+ PutByIdSecondaryTypeObjectOrOther = 0x40,
+ PutByIdSecondaryTypeTop = 0x48
+};
+
+inline PutByIdFlags encodeStructureID(StructureID id)
+{
+#if USE(JSVALUE64)
+ return static_cast<PutByIdFlags>(static_cast<PutByIdFlags>(id) << 3);
+#else
+ PutByIdFlags result = bitwise_cast<PutByIdFlags>(id);
+ ASSERT(!(result & ~PutByIdSecondaryTypeMask));
+ return result;
+#endif
+}
+
+inline StructureID decodeStructureID(PutByIdFlags flags)
+{
+#if USE(JSVALUE64)
+ return static_cast<StructureID>(flags >> 3);
+#else
+ return bitwise_cast<StructureID>(flags & PutByIdSecondaryTypeMask);
+#endif
+}
+
+} // namespace JSC
+
+namespace WTF {
+
+class PrintStream;
+
+void printInternal(PrintStream&, JSC::PutByIdFlags);
+
+} // namespace WTF
+
+#endif // PutByIdFlags_h
+
diff --git a/Source/JavaScriptCore/bytecode/PutByIdStatus.cpp b/Source/JavaScriptCore/bytecode/PutByIdStatus.cpp
index 17cf70897..3d066b9ae 100644
--- a/Source/JavaScriptCore/bytecode/PutByIdStatus.cpp
+++ b/Source/JavaScriptCore/bytecode/PutByIdStatus.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -27,206 +27,391 @@
#include "PutByIdStatus.h"
#include "CodeBlock.h"
+#include "ComplexGetStatus.h"
#include "LLIntData.h"
#include "LowLevelInterpreter.h"
-#include "Operations.h"
+#include "JSCInlines.h"
+#include "PolymorphicAccess.h"
#include "Structure.h"
#include "StructureChain.h"
+#include <wtf/ListDump.h>
namespace JSC {
-PutByIdStatus PutByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex, StringImpl* uid)
+bool PutByIdStatus::appendVariant(const PutByIdVariant& variant)
+{
+ for (unsigned i = 0; i < m_variants.size(); ++i) {
+ if (m_variants[i].attemptToMerge(variant))
+ return true;
+ }
+ for (unsigned i = 0; i < m_variants.size(); ++i) {
+ if (m_variants[i].oldStructure().overlaps(variant.oldStructure()))
+ return false;
+ }
+ m_variants.append(variant);
+ return true;
+}
+
+#if ENABLE(DFG_JIT)
+bool PutByIdStatus::hasExitSite(const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, unsigned bytecodeIndex)
+{
+ return profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadCache))
+ || profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadConstantCache));
+
+}
+#endif
+
+PutByIdStatus PutByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex, UniquedStringImpl* uid)
{
UNUSED_PARAM(profiledBlock);
UNUSED_PARAM(bytecodeIndex);
UNUSED_PARAM(uid);
-#if ENABLE(LLINT)
+
+ VM& vm = *profiledBlock->vm();
+
Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex;
- Structure* structure = instruction[4].u.structure.get();
- if (!structure)
- return PutByIdStatus(NoInformation, 0, 0, 0, invalidOffset);
+ StructureID structureID = instruction[4].u.structureID;
+ if (!structureID)
+ return PutByIdStatus(NoInformation);
- if (instruction[0].u.opcode == LLInt::getOpcode(llint_op_put_by_id)
- || instruction[0].u.opcode == LLInt::getOpcode(llint_op_put_by_id_out_of_line)) {
- PropertyOffset offset = structure->getConcurrently(*profiledBlock->vm(), uid);
+ Structure* structure = vm.heap.structureIDTable().get(structureID);
+
+ StructureID newStructureID = instruction[6].u.structureID;
+ if (!newStructureID) {
+ PropertyOffset offset = structure->getConcurrently(uid);
if (!isValidOffset(offset))
- return PutByIdStatus(NoInformation, 0, 0, 0, invalidOffset);
+ return PutByIdStatus(NoInformation);
- return PutByIdStatus(SimpleReplace, structure, 0, 0, offset);
+ return PutByIdVariant::replace(structure, offset, structure->inferredTypeDescriptorFor(uid));
}
+
+ Structure* newStructure = vm.heap.structureIDTable().get(newStructureID);
ASSERT(structure->transitionWatchpointSetHasBeenInvalidated());
- ASSERT(instruction[0].u.opcode == LLInt::getOpcode(llint_op_put_by_id_transition_direct)
- || instruction[0].u.opcode == LLInt::getOpcode(llint_op_put_by_id_transition_normal)
- || instruction[0].u.opcode == LLInt::getOpcode(llint_op_put_by_id_transition_direct_out_of_line)
- || instruction[0].u.opcode == LLInt::getOpcode(llint_op_put_by_id_transition_normal_out_of_line));
-
- Structure* newStructure = instruction[6].u.structure.get();
- StructureChain* chain = instruction[7].u.structureChain.get();
- ASSERT(newStructure);
- ASSERT(chain);
-
- PropertyOffset offset = newStructure->getConcurrently(*profiledBlock->vm(), uid);
+ PropertyOffset offset = newStructure->getConcurrently(uid);
if (!isValidOffset(offset))
- return PutByIdStatus(NoInformation, 0, 0, 0, invalidOffset);
+ return PutByIdStatus(NoInformation);
- return PutByIdStatus(
- SimpleTransition, structure, newStructure,
- chain ? adoptRef(new IntendedStructureChain(profiledBlock, structure, chain)) : 0,
- offset);
-#else
- return PutByIdStatus(NoInformation, 0, 0, 0, invalidOffset);
-#endif
+ ObjectPropertyConditionSet conditionSet;
+ if (!(instruction[8].u.putByIdFlags & PutByIdIsDirect)) {
+ conditionSet =
+ generateConditionsForPropertySetterMissConcurrently(
+ *profiledBlock->vm(), profiledBlock->globalObject(), structure, uid);
+ if (!conditionSet.isValid())
+ return PutByIdStatus(NoInformation);
+ }
+
+ return PutByIdVariant::transition(
+ structure, newStructure, conditionSet, offset, newStructure->inferredTypeDescriptorFor(uid));
}
-PutByIdStatus PutByIdStatus::computeFor(CodeBlock* profiledBlock, StubInfoMap& map, unsigned bytecodeIndex, StringImpl* uid)
+PutByIdStatus PutByIdStatus::computeFor(CodeBlock* profiledBlock, StubInfoMap& map, unsigned bytecodeIndex, UniquedStringImpl* uid)
{
ConcurrentJITLocker locker(profiledBlock->m_lock);
UNUSED_PARAM(profiledBlock);
UNUSED_PARAM(bytecodeIndex);
UNUSED_PARAM(uid);
-#if ENABLE(JIT)
- if (profiledBlock->likelyToTakeSlowCase(bytecodeIndex))
- return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset);
+#if ENABLE(DFG_JIT)
+ if (hasExitSite(locker, profiledBlock, bytecodeIndex))
+ return PutByIdStatus(TakesSlowPath);
StructureStubInfo* stubInfo = map.get(CodeOrigin(bytecodeIndex));
- if (!stubInfo || !stubInfo->seen)
+ PutByIdStatus result = computeForStubInfo(
+ locker, profiledBlock, stubInfo, uid,
+ CallLinkStatus::computeExitSiteData(locker, profiledBlock, bytecodeIndex));
+ if (!result)
return computeFromLLInt(profiledBlock, bytecodeIndex, uid);
- if (stubInfo->resetByGC)
- return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset);
+ return result;
+#else // ENABLE(JIT)
+ UNUSED_PARAM(map);
+ return PutByIdStatus(NoInformation);
+#endif // ENABLE(JIT)
+}
- switch (stubInfo->accessType) {
- case access_unset:
- // If the JIT saw it but didn't optimize it, then assume that this takes slow path.
- return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset);
+#if ENABLE(JIT)
+PutByIdStatus PutByIdStatus::computeForStubInfo(const ConcurrentJITLocker& locker, CodeBlock* baselineBlock, StructureStubInfo* stubInfo, CodeOrigin codeOrigin, UniquedStringImpl* uid)
+{
+ return computeForStubInfo(
+ locker, baselineBlock, stubInfo, uid,
+ CallLinkStatus::computeExitSiteData(locker, baselineBlock, codeOrigin.bytecodeIndex));
+}
+
+PutByIdStatus PutByIdStatus::computeForStubInfo(
+ const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, StructureStubInfo* stubInfo,
+ UniquedStringImpl* uid, CallLinkStatus::ExitSiteData callExitSiteData)
+{
+ if (!stubInfo || !stubInfo->everConsidered)
+ return PutByIdStatus();
+
+ if (stubInfo->tookSlowPath)
+ return PutByIdStatus(TakesSlowPath);
+
+ switch (stubInfo->cacheType) {
+ case CacheType::Unset:
+ // This means that we attempted to cache but failed for some reason.
+ return PutByIdStatus(TakesSlowPath);
- case access_put_by_id_replace: {
+ case CacheType::PutByIdReplace: {
PropertyOffset offset =
- stubInfo->u.putByIdReplace.baseObjectStructure->getConcurrently(
- *profiledBlock->vm(), uid);
+ stubInfo->u.byIdSelf.baseObjectStructure->getConcurrently(uid);
if (isValidOffset(offset)) {
- return PutByIdStatus(
- SimpleReplace,
- stubInfo->u.putByIdReplace.baseObjectStructure.get(),
- 0, 0,
- offset);
+ return PutByIdVariant::replace(
+ stubInfo->u.byIdSelf.baseObjectStructure.get(), offset, InferredType::Top);
}
- return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset);
+ return PutByIdStatus(TakesSlowPath);
}
- case access_put_by_id_transition_normal:
- case access_put_by_id_transition_direct: {
- ASSERT(stubInfo->u.putByIdTransition.previousStructure->transitionWatchpointSetHasBeenInvalidated());
- PropertyOffset offset =
- stubInfo->u.putByIdTransition.structure->getConcurrently(
- *profiledBlock->vm(), uid);
- if (isValidOffset(offset)) {
- return PutByIdStatus(
- SimpleTransition,
- stubInfo->u.putByIdTransition.previousStructure.get(),
- stubInfo->u.putByIdTransition.structure.get(),
- stubInfo->u.putByIdTransition.chain ? adoptRef(new IntendedStructureChain(
- profiledBlock, stubInfo->u.putByIdTransition.previousStructure.get(),
- stubInfo->u.putByIdTransition.chain.get())) : 0,
- offset);
+ case CacheType::Stub: {
+ PolymorphicAccess* list = stubInfo->u.stub;
+
+ PutByIdStatus result;
+ result.m_state = Simple;
+
+ State slowPathState = TakesSlowPath;
+ for (unsigned i = 0; i < list->size(); ++i) {
+ const AccessCase& access = list->at(i);
+ if (access.doesCalls())
+ slowPathState = MakesCalls;
}
- return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset);
+
+ for (unsigned i = 0; i < list->size(); ++i) {
+ const AccessCase& access = list->at(i);
+ if (access.viaProxy())
+ return PutByIdStatus(slowPathState);
+
+ PutByIdVariant variant;
+
+ switch (access.type()) {
+ case AccessCase::Replace: {
+ Structure* structure = access.structure();
+ PropertyOffset offset = structure->getConcurrently(uid);
+ if (!isValidOffset(offset))
+ return PutByIdStatus(slowPathState);
+ variant = PutByIdVariant::replace(
+ structure, offset, structure->inferredTypeDescriptorFor(uid));
+ break;
+ }
+
+ case AccessCase::Transition: {
+ PropertyOffset offset =
+ access.newStructure()->getConcurrently(uid);
+ if (!isValidOffset(offset))
+ return PutByIdStatus(slowPathState);
+ ObjectPropertyConditionSet conditionSet = access.conditionSet();
+ if (!conditionSet.structuresEnsureValidity())
+ return PutByIdStatus(slowPathState);
+ variant = PutByIdVariant::transition(
+ access.structure(), access.newStructure(), conditionSet, offset,
+ access.newStructure()->inferredTypeDescriptorFor(uid));
+ break;
+ }
+
+ case AccessCase::Setter: {
+ Structure* structure = access.structure();
+
+ ComplexGetStatus complexGetStatus = ComplexGetStatus::computeFor(
+ structure, access.conditionSet(), uid);
+
+ switch (complexGetStatus.kind()) {
+ case ComplexGetStatus::ShouldSkip:
+ continue;
+
+ case ComplexGetStatus::TakesSlowPath:
+ return PutByIdStatus(slowPathState);
+
+ case ComplexGetStatus::Inlineable: {
+ CallLinkInfo* callLinkInfo = access.callLinkInfo();
+ ASSERT(callLinkInfo);
+ std::unique_ptr<CallLinkStatus> callLinkStatus =
+ std::make_unique<CallLinkStatus>(
+ CallLinkStatus::computeFor(
+ locker, profiledBlock, *callLinkInfo, callExitSiteData));
+
+ variant = PutByIdVariant::setter(
+ structure, complexGetStatus.offset(), complexGetStatus.conditionSet(),
+ WTFMove(callLinkStatus));
+ } }
+ break;
+ }
+
+ case AccessCase::CustomValueSetter:
+ case AccessCase::CustomAccessorSetter:
+ return PutByIdStatus(MakesCalls);
+
+ default:
+ return PutByIdStatus(slowPathState);
+ }
+
+ if (!result.appendVariant(variant))
+ return PutByIdStatus(slowPathState);
+ }
+
+ return result;
}
default:
- // FIXME: We should handle polymorphic PutById. We probably have some interesting things
- // we could do about it.
- return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset);
+ return PutByIdStatus(TakesSlowPath);
}
-#else // ENABLE(JIT)
- UNUSED_PARAM(map);
- return PutByIdStatus(NoInformation, 0, 0, 0, invalidOffset);
-#endif // ENABLE(JIT)
}
+#endif
-PutByIdStatus PutByIdStatus::computeFor(VM& vm, JSGlobalObject* globalObject, Structure* structure, StringImpl* uid, bool isDirect)
+PutByIdStatus PutByIdStatus::computeFor(CodeBlock* baselineBlock, CodeBlock* dfgBlock, StubInfoMap& baselineMap, StubInfoMap& dfgMap, CodeOrigin codeOrigin, UniquedStringImpl* uid)
{
- if (toUInt32FromStringImpl(uid) != PropertyName::NotAnIndex)
- return PutByIdStatus(TakesSlowPath);
+#if ENABLE(DFG_JIT)
+ if (dfgBlock) {
+ CallLinkStatus::ExitSiteData exitSiteData;
+ {
+ ConcurrentJITLocker locker(baselineBlock->m_lock);
+ if (hasExitSite(locker, baselineBlock, codeOrigin.bytecodeIndex))
+ return PutByIdStatus(TakesSlowPath);
+ exitSiteData = CallLinkStatus::computeExitSiteData(
+ locker, baselineBlock, codeOrigin.bytecodeIndex);
+ }
+
+ PutByIdStatus result;
+ {
+ ConcurrentJITLocker locker(dfgBlock->m_lock);
+ result = computeForStubInfo(
+ locker, dfgBlock, dfgMap.get(codeOrigin), uid, exitSiteData);
+ }
+
+ // We use TakesSlowPath in some cases where the stub was unset. That's weird and
+ // it would be better not to do that. But it means that we have to defend
+ // ourselves here.
+ if (result.isSimple())
+ return result;
+ }
+#else
+ UNUSED_PARAM(dfgBlock);
+ UNUSED_PARAM(dfgMap);
+#endif
+
+ return computeFor(baselineBlock, baselineMap, codeOrigin.bytecodeIndex, uid);
+}
- if (!structure)
+PutByIdStatus PutByIdStatus::computeFor(JSGlobalObject* globalObject, const StructureSet& set, UniquedStringImpl* uid, bool isDirect)
+{
+ if (parseIndex(*uid))
return PutByIdStatus(TakesSlowPath);
+
+ if (set.isEmpty())
+ return PutByIdStatus();
- if (structure->typeInfo().overridesGetOwnPropertySlot() && structure->typeInfo().type() != GlobalObjectType)
- return PutByIdStatus(TakesSlowPath);
+ PutByIdStatus result;
+ result.m_state = Simple;
+ for (unsigned i = 0; i < set.size(); ++i) {
+ Structure* structure = set[i];
+
+ if (structure->typeInfo().overridesGetOwnPropertySlot() && structure->typeInfo().type() != GlobalObjectType)
+ return PutByIdStatus(TakesSlowPath);
- if (!structure->propertyAccessesAreCacheable())
- return PutByIdStatus(TakesSlowPath);
+ if (!structure->propertyAccessesAreCacheable())
+ return PutByIdStatus(TakesSlowPath);
+
+ unsigned attributes;
+ PropertyOffset offset = structure->getConcurrently(uid, attributes);
+ if (isValidOffset(offset)) {
+ if (attributes & CustomAccessor)
+ return PutByIdStatus(MakesCalls);
+
+ if (attributes & (Accessor | ReadOnly))
+ return PutByIdStatus(TakesSlowPath);
+
+ WatchpointSet* replaceSet = structure->propertyReplacementWatchpointSet(offset);
+ if (!replaceSet || replaceSet->isStillValid()) {
+ // When this executes, it'll create, and fire, this replacement watchpoint set.
+ // That means that this has probably never executed or that something fishy is
+ // going on. Also, we cannot create or fire the watchpoint set from the concurrent
+ // JIT thread, so even if we wanted to do this, we'd need to have a lazy thingy.
+ // So, better leave this alone and take slow path.
+ return PutByIdStatus(TakesSlowPath);
+ }
+
+ PutByIdVariant variant =
+ PutByIdVariant::replace(structure, offset, structure->inferredTypeDescriptorFor(uid));
+ if (!result.appendVariant(variant))
+ return PutByIdStatus(TakesSlowPath);
+ continue;
+ }
+
+ // Our hypothesis is that we're doing a transition. Before we prove that this is really
+ // true, we want to do some sanity checks.
- unsigned attributes;
- JSCell* specificValue;
- PropertyOffset offset = structure->getConcurrently(vm, uid, attributes, specificValue);
- if (isValidOffset(offset)) {
- if (attributes & (Accessor | ReadOnly))
+ // Don't cache put transitions on dictionaries.
+ if (structure->isDictionary())
return PutByIdStatus(TakesSlowPath);
- if (specificValue) {
- // We need the PutById slow path to verify that we're storing the right value into
- // the specialized slot.
+
+ // If the structure corresponds to something that isn't an object, then give up, since
+ // we don't want to be adding properties to strings.
+ if (!structure->typeInfo().isObject())
return PutByIdStatus(TakesSlowPath);
+
+ ObjectPropertyConditionSet conditionSet;
+ if (!isDirect) {
+ conditionSet = generateConditionsForPropertySetterMissConcurrently(
+ globalObject->vm(), globalObject, structure, uid);
+ if (!conditionSet.isValid())
+ return PutByIdStatus(TakesSlowPath);
}
- return PutByIdStatus(SimpleReplace, structure, 0, 0, offset);
- }
- // Our hypothesis is that we're doing a transition. Before we prove that this is really
- // true, we want to do some sanity checks.
+ // We only optimize if there is already a structure that the transition is cached to.
+ Structure* transition =
+ Structure::addPropertyTransitionToExistingStructureConcurrently(structure, uid, 0, offset);
+ if (!transition)
+ return PutByIdStatus(TakesSlowPath);
+ ASSERT(isValidOffset(offset));
- // Don't cache put transitions on dictionaries.
- if (structure->isDictionary())
- return PutByIdStatus(TakesSlowPath);
+ bool didAppend = result.appendVariant(
+ PutByIdVariant::transition(
+ structure, transition, conditionSet, offset,
+ transition->inferredTypeDescriptorFor(uid)));
+ if (!didAppend)
+ return PutByIdStatus(TakesSlowPath);
+ }
+
+ return result;
+}
- // If the structure corresponds to something that isn't an object, then give up, since
- // we don't want to be adding properties to strings.
- if (structure->typeInfo().type() == StringType)
- return PutByIdStatus(TakesSlowPath);
+bool PutByIdStatus::makesCalls() const
+{
+ if (m_state == MakesCalls)
+ return true;
+
+ if (m_state != Simple)
+ return false;
+
+ for (unsigned i = m_variants.size(); i--;) {
+ if (m_variants[i].makesCalls())
+ return true;
+ }
- RefPtr<IntendedStructureChain> chain;
- if (!isDirect) {
- chain = adoptRef(new IntendedStructureChain(globalObject, structure));
+ return false;
+}
+
+void PutByIdStatus::dump(PrintStream& out) const
+{
+ switch (m_state) {
+ case NoInformation:
+ out.print("(NoInformation)");
+ return;
- // If the prototype chain has setters or read-only properties, then give up.
- if (chain->mayInterceptStoreTo(vm, uid))
- return PutByIdStatus(TakesSlowPath);
+ case Simple:
+ out.print("(", listDump(m_variants), ")");
+ return;
- // If the prototype chain hasn't been normalized (i.e. there are proxies or dictionaries)
- // then give up. The dictionary case would only happen if this structure has not been
- // used in an optimized put_by_id transition. And really the only reason why we would
- // bail here is that I don't really feel like having the optimizing JIT go and flatten
- // dictionaries if we have evidence to suggest that those objects were never used as
- // prototypes in a cacheable prototype access - i.e. there's a good chance that some of
- // the other checks below will fail.
- if (!chain->isNormalized())
- return PutByIdStatus(TakesSlowPath);
+ case TakesSlowPath:
+ out.print("(TakesSlowPath)");
+ return;
+ case MakesCalls:
+ out.print("(MakesCalls)");
+ return;
}
- // We only optimize if there is already a structure that the transition is cached to.
- // Among other things, this allows us to guard against a transition with a specific
- // value.
- //
- // - If we're storing a value that could be specific: this would only be a problem if
- // the existing transition did have a specific value already, since if it didn't,
- // then we would behave "as if" we were not storing a specific value. If it did
- // have a specific value, then we'll know - the fact that we pass 0 for
- // specificValue will tell us.
- //
- // - If we're not storing a value that could be specific: again, this would only be a
- // problem if the existing transition did have a specific value, which we check for
- // by passing 0 for the specificValue.
- Structure* transition = Structure::addPropertyTransitionToExistingStructureConcurrently(structure, uid, 0, 0, offset);
- if (!transition)
- return PutByIdStatus(TakesSlowPath); // This occurs in bizarre cases only. See above.
- ASSERT(!transition->transitionDidInvolveSpecificValue());
- ASSERT(isValidOffset(offset));
-
- return PutByIdStatus(SimpleTransition, structure, transition, chain.release(), offset);
+ RELEASE_ASSERT_NOT_REACHED();
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/PutByIdStatus.h b/Source/JavaScriptCore/bytecode/PutByIdStatus.h
index c0a1bc35c..b0473472a 100644
--- a/Source/JavaScriptCore/bytecode/PutByIdStatus.h
+++ b/Source/JavaScriptCore/bytecode/PutByIdStatus.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2013, 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,8 +26,9 @@
#ifndef PutByIdStatus_h
#define PutByIdStatus_h
-#include "IntendedStructureChain.h"
-#include "PropertyOffset.h"
+#include "CallLinkStatus.h"
+#include "ExitingJITType.h"
+#include "PutByIdVariant.h"
#include "StructureStubInfo.h"
#include <wtf/text/StringImpl.h>
@@ -44,77 +45,70 @@ public:
enum State {
// It's uncached so we have no information.
NoInformation,
- // It's cached as a direct store into an object property for cases where the object
- // already has the property.
- SimpleReplace,
- // It's cached as a transition from one structure that lacks the property to one that
- // includes the property, and a direct store to this new property.
- SimpleTransition,
+ // It's cached as a simple store of some kind.
+ Simple,
// It's known to often take slow path.
- TakesSlowPath
+ TakesSlowPath,
+ // It's known to take paths that make calls.
+ MakesCalls
};
PutByIdStatus()
: m_state(NoInformation)
- , m_oldStructure(0)
- , m_newStructure(0)
- , m_structureChain(0)
- , m_offset(invalidOffset)
{
}
explicit PutByIdStatus(State state)
: m_state(state)
- , m_oldStructure(0)
- , m_newStructure(0)
- , m_structureChain(0)
- , m_offset(invalidOffset)
{
- ASSERT(m_state == NoInformation || m_state == TakesSlowPath);
+ ASSERT(m_state == NoInformation || m_state == TakesSlowPath || m_state == MakesCalls);
}
- PutByIdStatus(
- State state,
- Structure* oldStructure,
- Structure* newStructure,
- PassRefPtr<IntendedStructureChain> structureChain,
- PropertyOffset offset)
- : m_state(state)
- , m_oldStructure(oldStructure)
- , m_newStructure(newStructure)
- , m_structureChain(structureChain)
- , m_offset(offset)
+ PutByIdStatus(const PutByIdVariant& variant)
+ : m_state(Simple)
{
- ASSERT((m_state == NoInformation || m_state == TakesSlowPath) == !m_oldStructure);
- ASSERT((m_state != SimpleTransition) == !m_newStructure);
- ASSERT(!((m_state != SimpleTransition) && m_structureChain));
- ASSERT((m_state == NoInformation || m_state == TakesSlowPath) == (m_offset == invalidOffset));
+ m_variants.append(variant);
}
- static PutByIdStatus computeFor(CodeBlock*, StubInfoMap&, unsigned bytecodeIndex, StringImpl* uid);
- static PutByIdStatus computeFor(VM&, JSGlobalObject*, Structure*, StringImpl* uid, bool isDirect);
+ static PutByIdStatus computeFor(CodeBlock*, StubInfoMap&, unsigned bytecodeIndex, UniquedStringImpl* uid);
+ static PutByIdStatus computeFor(JSGlobalObject*, const StructureSet&, UniquedStringImpl* uid, bool isDirect);
+
+ static PutByIdStatus computeFor(CodeBlock* baselineBlock, CodeBlock* dfgBlock, StubInfoMap& baselineMap, StubInfoMap& dfgMap, CodeOrigin, UniquedStringImpl* uid);
+
+#if ENABLE(JIT)
+ static PutByIdStatus computeForStubInfo(const ConcurrentJITLocker&, CodeBlock* baselineBlock, StructureStubInfo*, CodeOrigin, UniquedStringImpl* uid);
+#endif
State state() const { return m_state; }
bool isSet() const { return m_state != NoInformation; }
bool operator!() const { return m_state == NoInformation; }
- bool isSimpleReplace() const { return m_state == SimpleReplace; }
- bool isSimpleTransition() const { return m_state == SimpleTransition; }
- bool takesSlowPath() const { return m_state == TakesSlowPath; }
+ bool isSimple() const { return m_state == Simple; }
+ bool takesSlowPath() const { return m_state == TakesSlowPath || m_state == MakesCalls; }
+ bool makesCalls() const;
- Structure* oldStructure() const { return m_oldStructure; }
- Structure* newStructure() const { return m_newStructure; }
- IntendedStructureChain* structureChain() const { return m_structureChain.get(); }
- PropertyOffset offset() const { return m_offset; }
+ size_t numVariants() const { return m_variants.size(); }
+ const Vector<PutByIdVariant, 1>& variants() const { return m_variants; }
+ const PutByIdVariant& at(size_t index) const { return m_variants[index]; }
+ const PutByIdVariant& operator[](size_t index) const { return at(index); }
+
+ void dump(PrintStream&) const;
private:
- static PutByIdStatus computeFromLLInt(CodeBlock*, unsigned bytecodeIndex, StringImpl* uid);
+#if ENABLE(DFG_JIT)
+ static bool hasExitSite(const ConcurrentJITLocker&, CodeBlock*, unsigned bytecodeIndex);
+#endif
+#if ENABLE(JIT)
+ static PutByIdStatus computeForStubInfo(
+ const ConcurrentJITLocker&, CodeBlock*, StructureStubInfo*, UniquedStringImpl* uid,
+ CallLinkStatus::ExitSiteData);
+#endif
+ static PutByIdStatus computeFromLLInt(CodeBlock*, unsigned bytecodeIndex, UniquedStringImpl* uid);
+
+ bool appendVariant(const PutByIdVariant&);
State m_state;
- Structure* m_oldStructure;
- Structure* m_newStructure;
- RefPtr<IntendedStructureChain> m_structureChain;
- PropertyOffset m_offset;
+ Vector<PutByIdVariant, 1> m_variants;
};
} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/PutByIdVariant.cpp b/Source/JavaScriptCore/bytecode/PutByIdVariant.cpp
new file mode 100644
index 000000000..9904c625b
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/PutByIdVariant.cpp
@@ -0,0 +1,249 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "PutByIdVariant.h"
+
+#include "CallLinkStatus.h"
+#include "JSCInlines.h"
+#include <wtf/ListDump.h>
+
+namespace JSC {
+
+PutByIdVariant::PutByIdVariant(const PutByIdVariant& other)
+ : PutByIdVariant()
+{
+ *this = other;
+}
+
+PutByIdVariant& PutByIdVariant::operator=(const PutByIdVariant& other)
+{
+ m_kind = other.m_kind;
+ m_oldStructure = other.m_oldStructure;
+ m_newStructure = other.m_newStructure;
+ m_conditionSet = other.m_conditionSet;
+ m_offset = other.m_offset;
+ m_requiredType = other.m_requiredType;
+ if (other.m_callLinkStatus)
+ m_callLinkStatus = std::make_unique<CallLinkStatus>(*other.m_callLinkStatus);
+ else
+ m_callLinkStatus = nullptr;
+ return *this;
+}
+
+PutByIdVariant PutByIdVariant::replace(
+ const StructureSet& structure, PropertyOffset offset, const InferredType::Descriptor& requiredType)
+{
+ PutByIdVariant result;
+ result.m_kind = Replace;
+ result.m_oldStructure = structure;
+ result.m_offset = offset;
+ result.m_requiredType = requiredType;
+ return result;
+}
+
+PutByIdVariant PutByIdVariant::transition(
+ const StructureSet& oldStructure, Structure* newStructure,
+ const ObjectPropertyConditionSet& conditionSet, PropertyOffset offset,
+ const InferredType::Descriptor& requiredType)
+{
+ PutByIdVariant result;
+ result.m_kind = Transition;
+ result.m_oldStructure = oldStructure;
+ result.m_newStructure = newStructure;
+ result.m_conditionSet = conditionSet;
+ result.m_offset = offset;
+ result.m_requiredType = requiredType;
+ return result;
+}
+
+PutByIdVariant PutByIdVariant::setter(
+ const StructureSet& structure, PropertyOffset offset,
+ const ObjectPropertyConditionSet& conditionSet,
+ std::unique_ptr<CallLinkStatus> callLinkStatus)
+{
+ PutByIdVariant result;
+ result.m_kind = Setter;
+ result.m_oldStructure = structure;
+ result.m_conditionSet = conditionSet;
+ result.m_offset = offset;
+ result.m_callLinkStatus = WTFMove(callLinkStatus);
+ result.m_requiredType = InferredType::Top;
+ return result;
+}
+
+Structure* PutByIdVariant::oldStructureForTransition() const
+{
+ ASSERT(kind() == Transition);
+ ASSERT(m_oldStructure.size() <= 2);
+ for (unsigned i = m_oldStructure.size(); i--;) {
+ Structure* structure = m_oldStructure[i];
+ if (structure != m_newStructure)
+ return structure;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+
+ return nullptr;
+}
+
+bool PutByIdVariant::writesStructures() const
+{
+ switch (kind()) {
+ case Transition:
+ case Setter:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool PutByIdVariant::reallocatesStorage() const
+{
+ switch (kind()) {
+ case Transition:
+ return oldStructureForTransition()->outOfLineCapacity() != newStructure()->outOfLineCapacity();
+ case Setter:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool PutByIdVariant::makesCalls() const
+{
+ return kind() == Setter;
+}
+
+bool PutByIdVariant::attemptToMerge(const PutByIdVariant& other)
+{
+ if (m_offset != other.m_offset)
+ return false;
+
+ if (m_requiredType != other.m_requiredType)
+ return false;
+
+ switch (m_kind) {
+ case Replace: {
+ switch (other.m_kind) {
+ case Replace: {
+ ASSERT(m_conditionSet.isEmpty());
+ ASSERT(other.m_conditionSet.isEmpty());
+
+ m_oldStructure.merge(other.m_oldStructure);
+ return true;
+ }
+
+ case Transition: {
+ PutByIdVariant newVariant = other;
+ if (newVariant.attemptToMergeTransitionWithReplace(*this)) {
+ *this = newVariant;
+ return true;
+ }
+ return false;
+ }
+
+ default:
+ return false;
+ }
+ }
+
+ case Transition:
+ switch (other.m_kind) {
+ case Replace:
+ return attemptToMergeTransitionWithReplace(other);
+
+ default:
+ return false;
+ }
+
+ default:
+ return false;
+ }
+}
+
+bool PutByIdVariant::attemptToMergeTransitionWithReplace(const PutByIdVariant& replace)
+{
+ ASSERT(m_kind == Transition);
+ ASSERT(replace.m_kind == Replace);
+ ASSERT(m_offset == replace.m_offset);
+ ASSERT(!replace.writesStructures());
+ ASSERT(!replace.reallocatesStorage());
+ ASSERT(replace.conditionSet().isEmpty());
+
+ // This sort of merging only works when we have one path along which we add a new field which
+ // transitions to structure S while the other path was already on structure S. This doesn't
+ // work if we need to reallocate anything or if the replace path is polymorphic.
+
+ if (reallocatesStorage())
+ return false;
+
+ if (replace.m_oldStructure.onlyStructure() != m_newStructure)
+ return false;
+
+ m_oldStructure.merge(m_newStructure);
+ return true;
+}
+
+void PutByIdVariant::dump(PrintStream& out) const
+{
+ dumpInContext(out, 0);
+}
+
+void PutByIdVariant::dumpInContext(PrintStream& out, DumpContext* context) const
+{
+ switch (kind()) {
+ case NotSet:
+ out.print("<empty>");
+ return;
+
+ case Replace:
+ out.print(
+ "<Replace: ", inContext(structure(), context), ", offset = ", offset(), ", ",
+ inContext(requiredType(), context), ">");
+ return;
+
+ case Transition:
+ out.print(
+ "<Transition: ", inContext(oldStructure(), context), " -> ",
+ pointerDumpInContext(newStructure(), context), ", [",
+ inContext(m_conditionSet, context), "], offset = ", offset(), ", ",
+ inContext(requiredType(), context), ">");
+ return;
+
+ case Setter:
+ out.print(
+ "<Setter: ", inContext(structure(), context), ", [",
+ inContext(m_conditionSet, context), "]");
+ out.print(", offset = ", m_offset);
+ out.print(", call = ", *m_callLinkStatus);
+ out.print(">");
+ return;
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/bytecode/PutByIdVariant.h b/Source/JavaScriptCore/bytecode/PutByIdVariant.h
new file mode 100644
index 000000000..29cd08d03
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/PutByIdVariant.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PutByIdVariant_h
+#define PutByIdVariant_h
+
+#include "ObjectPropertyConditionSet.h"
+#include "PropertyOffset.h"
+#include "StructureSet.h"
+
+namespace JSC {
+
+class CallLinkStatus;
+
+class PutByIdVariant {
+public:
+ enum Kind {
+ NotSet,
+ Replace,
+ Transition,
+ Setter
+ };
+
+ PutByIdVariant()
+ : m_kind(NotSet)
+ , m_newStructure(nullptr)
+ , m_offset(invalidOffset)
+ {
+ }
+
+ PutByIdVariant(const PutByIdVariant&);
+ PutByIdVariant& operator=(const PutByIdVariant&);
+
+ static PutByIdVariant replace(const StructureSet&, PropertyOffset, const InferredType::Descriptor&);
+
+ static PutByIdVariant transition(
+ const StructureSet& oldStructure, Structure* newStructure,
+ const ObjectPropertyConditionSet&, PropertyOffset, const InferredType::Descriptor&);
+
+ static PutByIdVariant setter(
+ const StructureSet&, PropertyOffset, const ObjectPropertyConditionSet&,
+ std::unique_ptr<CallLinkStatus>);
+
+ Kind kind() const { return m_kind; }
+
+ bool isSet() const { return kind() != NotSet; }
+ bool operator!() const { return !isSet(); }
+
+ const StructureSet& structure() const
+ {
+ ASSERT(kind() == Replace || kind() == Setter);
+ return m_oldStructure;
+ }
+
+ const StructureSet& structureSet() const
+ {
+ return structure();
+ }
+
+ const StructureSet& oldStructure() const
+ {
+ ASSERT(kind() == Transition || kind() == Replace || kind() == Setter);
+ return m_oldStructure;
+ }
+
+ StructureSet& oldStructure()
+ {
+ ASSERT(kind() == Transition || kind() == Replace || kind() == Setter);
+ return m_oldStructure;
+ }
+
+ Structure* oldStructureForTransition() const;
+
+ Structure* newStructure() const
+ {
+ ASSERT(kind() == Transition);
+ return m_newStructure;
+ }
+
+ InferredType::Descriptor requiredType() const
+ {
+ return m_requiredType;
+ }
+
+ bool writesStructures() const;
+ bool reallocatesStorage() const;
+ bool makesCalls() const;
+
+ const ObjectPropertyConditionSet& conditionSet() const { return m_conditionSet; }
+
+ // We don't support intrinsics for Setters (it would be sweet if we did) but we need this for templated helpers.
+ Intrinsic intrinsic() const { return NoIntrinsic; }
+
+ PropertyOffset offset() const
+ {
+ ASSERT(isSet());
+ return m_offset;
+ }
+
+ CallLinkStatus* callLinkStatus() const
+ {
+ ASSERT(kind() == Setter);
+ return m_callLinkStatus.get();
+ }
+
+ bool attemptToMerge(const PutByIdVariant& other);
+
+ void dump(PrintStream&) const;
+ void dumpInContext(PrintStream&, DumpContext*) const;
+
+private:
+ bool attemptToMergeTransitionWithReplace(const PutByIdVariant& replace);
+
+ Kind m_kind;
+ StructureSet m_oldStructure;
+ Structure* m_newStructure;
+ ObjectPropertyConditionSet m_conditionSet;
+ PropertyOffset m_offset;
+ InferredType::Descriptor m_requiredType;
+ std::unique_ptr<CallLinkStatus> m_callLinkStatus;
+};
+
+} // namespace JSC
+
+#endif // PutByIdVariant_h
+
diff --git a/Source/JavaScriptCore/bytecode/SamplingTool.cpp b/Source/JavaScriptCore/bytecode/SamplingTool.cpp
index d18dbc1ff..f5bf2b72a 100644
--- a/Source/JavaScriptCore/bytecode/SamplingTool.cpp
+++ b/Source/JavaScriptCore/bytecode/SamplingTool.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2009, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -32,6 +32,7 @@
#include "CodeBlock.h"
#include "Interpreter.h"
#include "Opcode.h"
+#include "JSCInlines.h"
#if !OS(WINDOWS)
#include <unistd.h>
@@ -284,7 +285,7 @@ void SamplingTool::doRun()
#if ENABLE(CODEBLOCK_SAMPLING)
if (CodeBlock* codeBlock = sample.codeBlock()) {
- MutexLocker locker(m_scriptSampleMapMutex);
+ LockHolder locker(m_scriptSampleMapMutex);
ScriptSampleRecord* record = m_scopeSampleMap->get(codeBlock->ownerExecutable());
ASSERT(record);
record->sample(codeBlock, sample.vPC());
@@ -300,7 +301,7 @@ void SamplingTool::sample()
void SamplingTool::notifyOfScope(VM& vm, ScriptExecutable* script)
{
#if ENABLE(CODEBLOCK_SAMPLING)
- MutexLocker locker(m_scriptSampleMapMutex);
+ LockHolder locker(m_scriptSampleMapMutex);
m_scopeSampleMap->set(script, adoptPtr(new ScriptSampleRecord(vm, script)));
#else
UNUSED_PARAM(vm);
diff --git a/Source/JavaScriptCore/bytecode/SamplingTool.h b/Source/JavaScriptCore/bytecode/SamplingTool.h
index 1dfb8ecca..18e348377 100644
--- a/Source/JavaScriptCore/bytecode/SamplingTool.h
+++ b/Source/JavaScriptCore/bytecode/SamplingTool.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2013, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -35,6 +35,7 @@
#include <wtf/Assertions.h>
#include <wtf/Atomics.h>
#include <wtf/HashMap.h>
+#include <wtf/Lock.h>
#include <wtf/MainThread.h>
#include <wtf/Spectrum.h>
#include <wtf/Threading.h>
@@ -227,6 +228,7 @@ namespace JSC {
};
class SamplingTool {
+ WTF_MAKE_FAST_ALLOCATED;
public:
friend struct CallRecord;
@@ -271,7 +273,7 @@ namespace JSC {
, m_sampleCount(0)
, m_opcodeSampleCount(0)
#if ENABLE(CODEBLOCK_SAMPLING)
- , m_scopeSampleMap(adoptPtr(new ScriptSampleRecordMap))
+ , m_scopeSampleMap(std::make_unique<ScriptSampleRecordMap>)
#endif
{
memset(m_opcodeSamples, 0, sizeof(m_opcodeSamples));
@@ -337,8 +339,8 @@ namespace JSC {
unsigned m_opcodeSamplesInCTIFunctions[numOpcodeIDs];
#if ENABLE(CODEBLOCK_SAMPLING)
- Mutex m_scriptSampleMapMutex;
- OwnPtr<ScriptSampleRecordMap> m_scopeSampleMap;
+ Lock m_scriptSampleMapMutex;
+ std::unique_ptr<ScriptSampleRecordMap> m_scopeSampleMap;
#endif
};
diff --git a/Source/JavaScriptCore/bytecode/SpecialPointer.cpp b/Source/JavaScriptCore/bytecode/SpecialPointer.cpp
index 7789653f0..dc5a363b6 100644
--- a/Source/JavaScriptCore/bytecode/SpecialPointer.cpp
+++ b/Source/JavaScriptCore/bytecode/SpecialPointer.cpp
@@ -28,6 +28,7 @@
#include "CodeBlock.h"
#include "JSGlobalObject.h"
+#include "JSCInlines.h"
namespace JSC {
diff --git a/Source/JavaScriptCore/bytecode/SpecialPointer.h b/Source/JavaScriptCore/bytecode/SpecialPointer.h
index c18a6e904..64fb23fcf 100644
--- a/Source/JavaScriptCore/bytecode/SpecialPointer.h
+++ b/Source/JavaScriptCore/bytecode/SpecialPointer.h
@@ -41,6 +41,11 @@ enum Pointer {
};
} // namespace Special
+enum class LinkTimeConstant {
+ DefinePropertyFunction,
+};
+const unsigned LinkTimeConstantCount = 1;
+
inline bool pointerIsFunction(Special::Pointer pointer)
{
ASSERT_UNUSED(pointer, pointer < Special::TableSize);
diff --git a/Source/JavaScriptCore/bytecode/SpeculatedType.cpp b/Source/JavaScriptCore/bytecode/SpeculatedType.cpp
index 3917cca0f..af67f4504 100644
--- a/Source/JavaScriptCore/bytecode/SpeculatedType.cpp
+++ b/Source/JavaScriptCore/bytecode/SpeculatedType.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2011-2013, 2015-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -29,13 +29,13 @@
#include "config.h"
#include "SpeculatedType.h"
-#include "Arguments.h"
+#include "DirectArguments.h"
#include "JSArray.h"
#include "JSFunction.h"
-#include "Operations.h"
+#include "JSCInlines.h"
+#include "ScopedArguments.h"
#include "StringObject.h"
#include "ValueProfile.h"
-#include <wtf/BoundsCheckedPointer.h>
#include <wtf/StringPrintStream.h>
namespace JSC {
@@ -127,8 +127,13 @@ void dumpSpeculation(PrintStream& out, SpeculatedType value)
else
isTop = false;
- if (value & SpecArguments)
- myOut.print("Arguments");
+ if (value & SpecDirectArguments)
+ myOut.print("Directarguments");
+ else
+ isTop = false;
+
+ if (value & SpecScopedArguments)
+ myOut.print("Scopedarguments");
else
isTop = false;
@@ -136,6 +141,11 @@ void dumpSpeculation(PrintStream& out, SpeculatedType value)
myOut.print("Stringobject");
else
isTop = false;
+
+ if (value & SpecRegExpObject)
+ myOut.print("Regexpobject");
+ else
+ isTop = false;
}
if ((value & SpecString) == SpecString)
@@ -151,18 +161,32 @@ void dumpSpeculation(PrintStream& out, SpeculatedType value)
else
isTop = false;
}
+
+ if (value & SpecSymbol)
+ myOut.print("Symbol");
+ else
+ isTop = false;
}
- if (value & SpecInt32)
+ if (value == SpecInt32)
myOut.print("Int32");
- else
- isTop = false;
+ else {
+ if (value & SpecBoolInt32)
+ myOut.print("Boolint32");
+ else
+ isTop = false;
+
+ if (value & SpecNonBoolInt32)
+ myOut.print("Nonboolint32");
+ else
+ isTop = false;
+ }
if (value & SpecInt52)
myOut.print("Int52");
- if ((value & SpecDouble) == SpecDouble)
- myOut.print("Double");
+ if ((value & SpecBytecodeDouble) == SpecBytecodeDouble)
+ myOut.print("Bytecodedouble");
else {
if (value & SpecInt52AsDouble)
myOut.print("Int52asdouble");
@@ -174,12 +198,15 @@ void dumpSpeculation(PrintStream& out, SpeculatedType value)
else
isTop = false;
- if (value & SpecDoubleNaN)
- myOut.print("Doublenan");
+ if (value & SpecDoublePureNaN)
+ myOut.print("Doublepurenan");
else
isTop = false;
}
+ if (value & SpecDoubleImpureNaN)
+ out.print("Doubleimpurenan");
+
if (value & SpecBoolean)
myOut.print("Bool");
else
@@ -229,16 +256,22 @@ static const char* speculationToAbbreviatedString(SpeculatedType prediction)
return "<Float32array>";
if (isFloat64ArraySpeculation(prediction))
return "<Float64array>";
- if (isArgumentsSpeculation(prediction))
- return "<Arguments>";
+ if (isDirectArgumentsSpeculation(prediction))
+ return "<DirectArguments>";
+ if (isScopedArgumentsSpeculation(prediction))
+ return "<ScopedArguments>";
if (isStringObjectSpeculation(prediction))
return "<StringObject>";
+ if (isRegExpObjectSpeculation(prediction))
+ return "<RegExpObject>";
if (isStringOrStringObjectSpeculation(prediction))
return "<StringOrStringObject>";
if (isObjectSpeculation(prediction))
return "<Object>";
if (isCellSpeculation(prediction))
return "<Cell>";
+ if (isBoolInt32Speculation(prediction))
+ return "<BoolInt32>";
if (isInt32Speculation(prediction))
return "<Int32>";
if (isInt52AsDoubleSpeculation(prediction))
@@ -255,6 +288,8 @@ static const char* speculationToAbbreviatedString(SpeculatedType prediction)
return "<Boolean>";
if (isOtherSpeculation(prediction))
return "<Other>";
+ if (isMiscSpeculation(prediction))
+ return "<Misc>";
return "";
}
@@ -300,11 +335,17 @@ SpeculatedType speculationFromClassInfo(const ClassInfo* classInfo)
if (classInfo == JSArray::info())
return SpecArray;
- if (classInfo == Arguments::info())
- return SpecArguments;
+ if (classInfo == DirectArguments::info())
+ return SpecDirectArguments;
+
+ if (classInfo == ScopedArguments::info())
+ return SpecScopedArguments;
if (classInfo == StringObject::info())
return SpecStringObject;
+
+ if (classInfo == RegExpObject::info())
+ return SpecRegExpObject;
if (classInfo->isSubClassOf(JSFunction::info()))
return SpecFunction;
@@ -322,6 +363,8 @@ SpeculatedType speculationFromStructure(Structure* structure)
{
if (structure->typeInfo().type() == StringType)
return SpecString;
+ if (structure->typeInfo().type() == SymbolType)
+ return SpecSymbol;
return speculationFromClassInfo(structure->classInfo());
}
@@ -329,7 +372,7 @@ SpeculatedType speculationFromCell(JSCell* cell)
{
if (JSString* string = jsDynamicCast<JSString*>(cell)) {
if (const StringImpl* impl = string->tryGetValueImpl()) {
- if (impl->isIdentifier())
+ if (impl->isAtomic())
return SpecStringIdent;
}
return SpecStringVar;
@@ -341,12 +384,15 @@ SpeculatedType speculationFromValue(JSValue value)
{
if (value.isEmpty())
return SpecEmpty;
- if (value.isInt32())
- return SpecInt32;
+ if (value.isInt32()) {
+ if (value.asInt32() & ~1)
+ return SpecNonBoolInt32;
+ return SpecBoolInt32;
+ }
if (value.isDouble()) {
double number = value.asNumber();
if (number != number)
- return SpecDoubleNaN;
+ return SpecDoublePureNaN;
if (value.isMachineInt())
return SpecInt52AsDouble;
return SpecNonIntAsDouble;
@@ -391,5 +437,136 @@ TypedArrayType typedArrayTypeFromSpeculation(SpeculatedType type)
return NotTypedArray;
}
+SpeculatedType leastUpperBoundOfStrictlyEquivalentSpeculations(SpeculatedType type)
+{
+ if (type & SpecInteger)
+ type |= SpecInteger;
+ if (type & SpecString)
+ type |= SpecString;
+ return type;
+}
+
+bool valuesCouldBeEqual(SpeculatedType a, SpeculatedType b)
+{
+ a = leastUpperBoundOfStrictlyEquivalentSpeculations(a);
+ b = leastUpperBoundOfStrictlyEquivalentSpeculations(b);
+
+ // Anything could be equal to a string.
+ if (a & SpecString)
+ return true;
+ if (b & SpecString)
+ return true;
+
+ // If both sides are definitely only objects, then equality is fairly sane.
+ if (isObjectSpeculation(a) && isObjectSpeculation(b))
+ return !!(a & b);
+
+ // If either side could be an object or not, then we could call toString or
+ // valueOf, which could return anything.
+ if (a & SpecObject)
+ return true;
+ if (b & SpecObject)
+ return true;
+
+ // Neither side is an object or string, so the world is relatively sane.
+ return !!(a & b);
+}
+
+SpeculatedType typeOfDoubleSum(SpeculatedType a, SpeculatedType b)
+{
+ SpeculatedType result = a | b;
+ // Impure NaN could become pure NaN during addition because addition may clear bits.
+ if (result & SpecDoubleImpureNaN)
+ result |= SpecDoublePureNaN;
+ // Values could overflow, or fractions could become integers.
+ if (result & SpecDoubleReal)
+ result |= SpecDoubleReal;
+ return result;
+}
+
+SpeculatedType typeOfDoubleDifference(SpeculatedType a, SpeculatedType b)
+{
+ return typeOfDoubleSum(a, b);
+}
+
+SpeculatedType typeOfDoubleProduct(SpeculatedType a, SpeculatedType b)
+{
+ return typeOfDoubleSum(a, b);
+}
+
+static SpeculatedType polluteDouble(SpeculatedType value)
+{
+ // Impure NaN could become pure NaN because the operation could clear some bits.
+ if (value & SpecDoubleImpureNaN)
+ value |= SpecDoubleNaN;
+ // Values could overflow, fractions could become integers, or an error could produce
+ // PureNaN.
+ if (value & SpecDoubleReal)
+ value |= SpecDoubleReal | SpecDoublePureNaN;
+ return value;
+}
+
+SpeculatedType typeOfDoubleQuotient(SpeculatedType a, SpeculatedType b)
+{
+ return polluteDouble(a | b);
+}
+
+SpeculatedType typeOfDoubleMinMax(SpeculatedType a, SpeculatedType b)
+{
+ SpeculatedType result = a | b;
+ // Impure NaN could become pure NaN during addition because addition may clear bits.
+ if (result & SpecDoubleImpureNaN)
+ result |= SpecDoublePureNaN;
+ return result;
+}
+
+SpeculatedType typeOfDoubleNegation(SpeculatedType value)
+{
+ // Impure NaN could become pure NaN because bits might get cleared.
+ if (value & SpecDoubleImpureNaN)
+ value |= SpecDoublePureNaN;
+ // We could get negative zero, which mixes SpecInt52AsDouble and SpecNotIntAsDouble.
+ // We could also overflow a large negative int into something that is no longer
+ // representable as an int.
+ if (value & SpecDoubleReal)
+ value |= SpecDoubleReal;
+ return value;
+}
+
+SpeculatedType typeOfDoubleAbs(SpeculatedType value)
+{
+ return typeOfDoubleNegation(value);
+}
+
+SpeculatedType typeOfDoubleRounding(SpeculatedType value)
+{
+ // We might lose bits, which leads to a NaN being purified.
+ if (value & SpecDoubleImpureNaN)
+ value |= SpecDoublePureNaN;
+ // We might lose bits, which leads to a value becoming integer-representable.
+ if (value & SpecNonIntAsDouble)
+ value |= SpecInt52AsDouble;
+ return value;
+}
+
+SpeculatedType typeOfDoublePow(SpeculatedType xValue, SpeculatedType yValue)
+{
+ // Math.pow() always return NaN if the exponent is NaN, unlike std::pow().
+ // We always set a pure NaN in that case.
+ if (yValue & SpecDoubleNaN)
+ xValue |= SpecDoublePureNaN;
+ return polluteDouble(xValue);
+}
+
+SpeculatedType typeOfDoubleBinaryOp(SpeculatedType a, SpeculatedType b)
+{
+ return polluteDouble(a | b);
+}
+
+SpeculatedType typeOfDoubleUnaryOp(SpeculatedType value)
+{
+ return polluteDouble(value);
+}
+
} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/SpeculatedType.h b/Source/JavaScriptCore/bytecode/SpeculatedType.h
index eaf0af37a..1ebbeb1f5 100644
--- a/Source/JavaScriptCore/bytecode/SpeculatedType.h
+++ b/Source/JavaScriptCore/bytecode/SpeculatedType.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2011-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -38,48 +38,58 @@ namespace JSC {
class Structure;
typedef uint32_t SpeculatedType;
-static const SpeculatedType SpecNone = 0x00000000; // We don't know anything yet.
-static const SpeculatedType SpecFinalObject = 0x00000001; // It's definitely a JSFinalObject.
-static const SpeculatedType SpecArray = 0x00000002; // It's definitely a JSArray.
-static const SpeculatedType SpecFunction = 0x00000008; // It's definitely a JSFunction or one of its subclasses.
-static const SpeculatedType SpecInt8Array = 0x00000010; // It's definitely an Int8Array or one of its subclasses.
-static const SpeculatedType SpecInt16Array = 0x00000020; // It's definitely an Int16Array or one of its subclasses.
-static const SpeculatedType SpecInt32Array = 0x00000040; // It's definitely an Int32Array or one of its subclasses.
-static const SpeculatedType SpecUint8Array = 0x00000080; // It's definitely an Uint8Array or one of its subclasses.
-static const SpeculatedType SpecUint8ClampedArray = 0x00000100; // It's definitely an Uint8ClampedArray or one of its subclasses.
-static const SpeculatedType SpecUint16Array = 0x00000200; // It's definitely an Uint16Array or one of its subclasses.
-static const SpeculatedType SpecUint32Array = 0x00000400; // It's definitely an Uint32Array or one of its subclasses.
-static const SpeculatedType SpecFloat32Array = 0x00000800; // It's definitely an Uint16Array or one of its subclasses.
-static const SpeculatedType SpecFloat64Array = 0x00001000; // It's definitely an Uint16Array or one of its subclasses.
+static const SpeculatedType SpecNone = 0; // We don't know anything yet.
+static const SpeculatedType SpecFinalObject = 1u << 0; // It's definitely a JSFinalObject.
+static const SpeculatedType SpecArray = 1u << 1; // It's definitely a JSArray.
+static const SpeculatedType SpecFunction = 1u << 2; // It's definitely a JSFunction.
+static const SpeculatedType SpecInt8Array = 1u << 3; // It's definitely an Int8Array or one of its subclasses.
+static const SpeculatedType SpecInt16Array = 1u << 4; // It's definitely an Int16Array or one of its subclasses.
+static const SpeculatedType SpecInt32Array = 1u << 5; // It's definitely an Int32Array or one of its subclasses.
+static const SpeculatedType SpecUint8Array = 1u << 6; // It's definitely an Uint8Array or one of its subclasses.
+static const SpeculatedType SpecUint8ClampedArray = 1u << 7; // It's definitely an Uint8ClampedArray or one of its subclasses.
+static const SpeculatedType SpecUint16Array = 1u << 8; // It's definitely an Uint16Array or one of its subclasses.
+static const SpeculatedType SpecUint32Array = 1u << 9; // It's definitely an Uint32Array or one of its subclasses.
+static const SpeculatedType SpecFloat32Array = 1u << 10; // It's definitely an Uint16Array or one of its subclasses.
+static const SpeculatedType SpecFloat64Array = 1u << 11; // It's definitely an Uint16Array or one of its subclasses.
static const SpeculatedType SpecTypedArrayView = SpecInt8Array | SpecInt16Array | SpecInt32Array | SpecUint8Array | SpecUint8ClampedArray | SpecUint16Array | SpecUint32Array | SpecFloat32Array | SpecFloat64Array;
-static const SpeculatedType SpecArguments = 0x00002000; // It's definitely an Arguments object.
-static const SpeculatedType SpecStringObject = 0x00004000; // It's definitely a StringObject.
-static const SpeculatedType SpecObjectOther = 0x00008000; // It's definitely an object but not JSFinalObject, JSArray, or JSFunction.
-static const SpeculatedType SpecObject = 0x0000ffff; // Bitmask used for testing for any kind of object prediction.
-static const SpeculatedType SpecStringIdent = 0x00010000; // It's definitely a JSString, and it's an identifier.
-static const SpeculatedType SpecStringVar = 0x00020000; // It's definitely a JSString, and it's not an identifier.
-static const SpeculatedType SpecString = 0x00030000; // It's definitely a JSString.
-static const SpeculatedType SpecCellOther = 0x00040000; // It's definitely a JSCell but not a subclass of JSObject and definitely not a JSString.
-static const SpeculatedType SpecCell = 0x0007ffff; // It's definitely a JSCell.
-static const SpeculatedType SpecInt32 = 0x00800000; // It's definitely an Int32.
-static const SpeculatedType SpecInt52 = 0x01000000; // It's definitely an Int52 and we intend it to unbox it.
-static const SpeculatedType SpecMachineInt = 0x01800000; // It's something that we can do machine int arithmetic on.
-static const SpeculatedType SpecInt52AsDouble = 0x02000000; // It's definitely an Int52 and it's inside a double.
-static const SpeculatedType SpecInteger = 0x03800000; // It's definitely some kind of integer.
-static const SpeculatedType SpecNonIntAsDouble = 0x04000000; // It's definitely not an Int52 but it's a real number and it's a double.
-static const SpeculatedType SpecDoubleReal = 0x06000000; // It's definitely a non-NaN double.
-static const SpeculatedType SpecDoubleNaN = 0x08000000; // It's definitely a NaN.
-static const SpeculatedType SpecDouble = 0x0e000000; // It's either a non-NaN or a NaN double.
-static const SpeculatedType SpecBytecodeRealNumber = 0x06800000; // It's either an Int32 or a DoubleReal.
-static const SpeculatedType SpecFullRealNumber = 0x07800000; // It's either an Int32 or a DoubleReal, or a Int52.
-static const SpeculatedType SpecBytecodeNumber = 0x0e800000; // It's either an Int32 or a Double.
-static const SpeculatedType SpecFullNumber = 0x0f800000; // It's either an Int32, Int52, or a Double.
-static const SpeculatedType SpecBoolean = 0x10000000; // It's definitely a Boolean.
-static const SpeculatedType SpecOther = 0x20000000; // It's definitely none of the above.
-static const SpeculatedType SpecHeapTop = 0x3effffff; // It can be any of the above, except for SpecInt52.
-static const SpeculatedType SpecEmpty = 0x40000000; // It's definitely an empty value marker.
-static const SpeculatedType SpecBytecodeTop = 0x7effffff; // It can be any of the above, except for SpecInt52.
-static const SpeculatedType SpecFullTop = 0x7fffffff; // It can be any of the above plus anything the DFG chooses.
+static const SpeculatedType SpecDirectArguments = 1u << 12; // It's definitely a DirectArguments object.
+static const SpeculatedType SpecScopedArguments = 1u << 13; // It's definitely a ScopedArguments object.
+static const SpeculatedType SpecStringObject = 1u << 14; // It's definitely a StringObject.
+static const SpeculatedType SpecRegExpObject = 1u << 15; // It's definitely a RegExpObject (and not any subclass of RegExpObject).
+static const SpeculatedType SpecObjectOther = 1u << 16; // It's definitely an object but not JSFinalObject, JSArray, or JSFunction.
+static const SpeculatedType SpecObject = SpecFinalObject | SpecArray | SpecFunction | SpecTypedArrayView | SpecDirectArguments | SpecScopedArguments | SpecStringObject | SpecRegExpObject | SpecObjectOther; // Bitmask used for testing for any kind of object prediction.
+static const SpeculatedType SpecStringIdent = 1u << 17; // It's definitely a JSString, and it's an identifier.
+static const SpeculatedType SpecStringVar = 1u << 18; // It's definitely a JSString, and it's not an identifier.
+static const SpeculatedType SpecString = SpecStringIdent | SpecStringVar; // It's definitely a JSString.
+static const SpeculatedType SpecSymbol = 1u << 19; // It's definitely a Symbol.
+static const SpeculatedType SpecCellOther = 1u << 20; // It's definitely a JSCell but not a subclass of JSObject and definitely not a JSString or a Symbol. FIXME: This shouldn't be part of heap-top or bytecode-top. https://bugs.webkit.org/show_bug.cgi?id=133078
+static const SpeculatedType SpecCell = SpecObject | SpecString | SpecSymbol | SpecCellOther; // It's definitely a JSCell.
+static const SpeculatedType SpecBoolInt32 = 1u << 21; // It's definitely an Int32 with value 0 or 1.
+static const SpeculatedType SpecNonBoolInt32 = 1u << 22; // It's definitely an Int32 with value other than 0 or 1.
+static const SpeculatedType SpecInt32 = SpecBoolInt32 | SpecNonBoolInt32; // It's definitely an Int32.
+static const SpeculatedType SpecInt52 = 1u << 23; // It's definitely an Int52 and we intend it to unbox it.
+static const SpeculatedType SpecMachineInt = SpecInt32 | SpecInt52; // It's something that we can do machine int arithmetic on.
+static const SpeculatedType SpecInt52AsDouble = 1u << 24; // It's definitely an Int52 and it's inside a double.
+static const SpeculatedType SpecInteger = SpecMachineInt | SpecInt52AsDouble; // It's definitely some kind of integer.
+static const SpeculatedType SpecNonIntAsDouble = 1u << 25; // It's definitely not an Int52 but it's a real number and it's a double.
+static const SpeculatedType SpecDoubleReal = SpecNonIntAsDouble | SpecInt52AsDouble; // It's definitely a non-NaN double.
+static const SpeculatedType SpecDoublePureNaN = 1u << 26; // It's definitely a NaN that is sae to tag (i.e. pure).
+static const SpeculatedType SpecDoubleImpureNaN = 1u << 27; // It's definitely a NaN that is unsafe to tag (i.e. impure).
+static const SpeculatedType SpecDoubleNaN = SpecDoublePureNaN | SpecDoubleImpureNaN; // It's definitely some kind of NaN.
+static const SpeculatedType SpecBytecodeDouble = SpecDoubleReal | SpecDoublePureNaN; // It's either a non-NaN or a NaN double, but it's definitely not impure NaN.
+static const SpeculatedType SpecFullDouble = SpecDoubleReal | SpecDoubleNaN; // It's either a non-NaN or a NaN double.
+static const SpeculatedType SpecBytecodeRealNumber = SpecInt32 | SpecDoubleReal; // It's either an Int32 or a DoubleReal.
+static const SpeculatedType SpecFullRealNumber = SpecMachineInt | SpecDoubleReal; // It's either an Int32 or a DoubleReal, or a Int52.
+static const SpeculatedType SpecBytecodeNumber = SpecInt32 | SpecBytecodeDouble; // It's either an Int32 or a Double, and the Double cannot be an impure NaN.
+static const SpeculatedType SpecFullNumber = SpecMachineInt | SpecFullDouble; // It's either an Int32, Int52, or a Double, and the Double can be impure NaN.
+static const SpeculatedType SpecBoolean = 1u << 28; // It's definitely a Boolean.
+static const SpeculatedType SpecOther = 1u << 29; // It's definitely either Null or Undefined.
+static const SpeculatedType SpecMisc = SpecBoolean | SpecOther; // It's definitely either a boolean, Null, or Undefined.
+static const SpeculatedType SpecHeapTop = SpecCell | SpecBytecodeNumber | SpecMisc; // It can be any of the above, except for SpecInt52 and SpecDoubleImpureNaN.
+static const SpeculatedType SpecPrimitive = SpecString | SpecSymbol | SpecBytecodeNumber | SpecMisc; // It's any non-Object JSValue.
+static const SpeculatedType SpecEmpty = 1u << 30; // It's definitely an empty value marker.
+static const SpeculatedType SpecBytecodeTop = SpecHeapTop | SpecEmpty; // It can be any of the above, except for SpecInt52 and SpecDoubleImpureNaN. Corresponds to what could be found in a bytecode local.
+static const SpeculatedType SpecFullTop = SpecBytecodeTop | SpecFullNumber; // It can be anything that bytecode could see plus exotic encodings of numbers.
typedef bool (*SpeculatedTypeChecker)(SpeculatedType);
@@ -94,6 +104,16 @@ inline bool isCellSpeculation(SpeculatedType value)
return !!(value & SpecCell) && !(value & ~SpecCell);
}
+inline bool isCellOrOtherSpeculation(SpeculatedType value)
+{
+ return !!value && !(value & ~(SpecCell | SpecOther));
+}
+
+inline bool isNotCellSpeculation(SpeculatedType value)
+{
+ return !(value & SpecCell) && value;
+}
+
inline bool isObjectSpeculation(SpeculatedType value)
{
return !!(value & SpecObject) && !(value & ~SpecObject);
@@ -119,11 +139,26 @@ inline bool isStringIdentSpeculation(SpeculatedType value)
return value == SpecStringIdent;
}
+inline bool isNotStringVarSpeculation(SpeculatedType value)
+{
+ return !(value & SpecStringVar);
+}
+
inline bool isStringSpeculation(SpeculatedType value)
{
return !!value && (value & SpecString) == value;
}
+inline bool isStringOrOtherSpeculation(SpeculatedType value)
+{
+ return !!value && (value & (SpecString | SpecOther)) == value;
+}
+
+inline bool isSymbolSpeculation(SpeculatedType value)
+{
+ return value == SpecSymbol;
+}
+
inline bool isArraySpeculation(SpeculatedType value)
{
return value == SpecArray;
@@ -179,9 +214,14 @@ inline bool isFloat64ArraySpeculation(SpeculatedType value)
return value == SpecFloat64Array;
}
-inline bool isArgumentsSpeculation(SpeculatedType value)
+inline bool isDirectArgumentsSpeculation(SpeculatedType value)
{
- return !!value && (value & SpecArguments) == value;
+ return value == SpecDirectArguments;
+}
+
+inline bool isScopedArgumentsSpeculation(SpeculatedType value)
+{
+ return value == SpecScopedArguments;
}
inline bool isActionableIntMutableArraySpeculation(SpeculatedType value)
@@ -210,13 +250,14 @@ inline bool isActionableTypedMutableArraySpeculation(SpeculatedType value)
inline bool isActionableMutableArraySpeculation(SpeculatedType value)
{
return isArraySpeculation(value)
- || isArgumentsSpeculation(value)
|| isActionableTypedMutableArraySpeculation(value);
}
inline bool isActionableArraySpeculation(SpeculatedType value)
{
return isStringSpeculation(value)
+ || isDirectArgumentsSpeculation(value)
+ || isScopedArgumentsSpeculation(value)
|| isActionableMutableArraySpeculation(value);
}
@@ -235,39 +276,49 @@ inline bool isStringOrStringObjectSpeculation(SpeculatedType value)
return !!value && !(value & ~(SpecString | SpecStringObject));
}
+inline bool isRegExpObjectSpeculation(SpeculatedType value)
+{
+ return value == SpecRegExpObject;
+}
+
+inline bool isBoolInt32Speculation(SpeculatedType value)
+{
+ return value == SpecBoolInt32;
+}
+
inline bool isInt32Speculation(SpeculatedType value)
{
- return value == SpecInt32;
+ return value && !(value & ~SpecInt32);
}
-inline bool isInt32SpeculationForArithmetic(SpeculatedType value)
+inline bool isInt32OrBooleanSpeculation(SpeculatedType value)
{
- return !(value & (SpecDouble | SpecInt52));
+ return value && !(value & ~(SpecBoolean | SpecInt32));
}
-inline bool isInt32SpeculationExpectingDefined(SpeculatedType value)
+inline bool isInt32SpeculationForArithmetic(SpeculatedType value)
{
- return isInt32Speculation(value & ~SpecOther);
+ return !(value & (SpecFullDouble | SpecInt52));
}
-inline bool isInt52Speculation(SpeculatedType value)
+inline bool isInt32OrBooleanSpeculationForArithmetic(SpeculatedType value)
{
- return value == SpecInt52;
+ return !(value & (SpecFullDouble | SpecInt52));
}
-inline bool isMachineIntSpeculation(SpeculatedType value)
+inline bool isInt32OrBooleanSpeculationExpectingDefined(SpeculatedType value)
{
- return !!value && (value & SpecMachineInt) == value;
+ return isInt32OrBooleanSpeculation(value & ~SpecOther);
}
-inline bool isMachineIntSpeculationExpectingDefined(SpeculatedType value)
+inline bool isInt52Speculation(SpeculatedType value)
{
- return isMachineIntSpeculation(value & ~SpecOther);
+ return value == SpecInt52;
}
-inline bool isMachineIntSpeculationForArithmetic(SpeculatedType value)
+inline bool isMachineIntSpeculation(SpeculatedType value)
{
- return !(value & SpecDouble);
+ return !!value && (value & SpecMachineInt) == value;
}
inline bool isInt52AsDoubleSpeculation(SpeculatedType value)
@@ -287,12 +338,12 @@ inline bool isDoubleRealSpeculation(SpeculatedType value)
inline bool isDoubleSpeculation(SpeculatedType value)
{
- return !!value && (value & SpecDouble) == value;
+ return !!value && (value & SpecFullDouble) == value;
}
inline bool isDoubleSpeculationForArithmetic(SpeculatedType value)
{
- return !!(value & SpecDouble);
+ return !!(value & SpecFullDouble);
}
inline bool isBytecodeRealNumberSpeculation(SpeculatedType value)
@@ -315,14 +366,14 @@ inline bool isFullNumberSpeculation(SpeculatedType value)
return !!(value & SpecFullNumber) && !(value & ~SpecFullNumber);
}
-inline bool isBytecodeNumberSpeculationExpectingDefined(SpeculatedType value)
+inline bool isFullNumberOrBooleanSpeculation(SpeculatedType value)
{
- return isBytecodeNumberSpeculation(value & ~SpecOther);
+ return value && !(value & ~(SpecFullNumber | SpecBoolean));
}
-inline bool isFullNumberSpeculationExpectingDefined(SpeculatedType value)
+inline bool isFullNumberOrBooleanSpeculationExpectingDefined(SpeculatedType value)
{
- return isFullNumberSpeculation(value & ~SpecOther);
+ return isFullNumberOrBooleanSpeculation(value & ~SpecOther);
}
inline bool isBooleanSpeculation(SpeculatedType value)
@@ -335,6 +386,11 @@ inline bool isOtherSpeculation(SpeculatedType value)
return value == SpecOther;
}
+inline bool isMiscSpeculation(SpeculatedType value)
+{
+ return !!value && !(value & ~SpecMisc);
+}
+
inline bool isOtherOrEmptySpeculation(SpeculatedType value)
{
return !value || value == SpecOther;
@@ -345,6 +401,16 @@ inline bool isEmptySpeculation(SpeculatedType value)
return value == SpecEmpty;
}
+inline bool isUntypedSpeculationForArithmetic(SpeculatedType value)
+{
+ return !!(value & ~(SpecFullNumber | SpecBoolean));
+}
+
+inline bool isUntypedSpeculationForBitOps(SpeculatedType value)
+{
+ return !!(value & ~(SpecFullNumber | SpecBoolean | SpecOther));
+}
+
void dumpSpeculation(PrintStream&, SpeculatedType);
void dumpSpeculationAbbreviated(PrintStream&, SpeculatedType);
@@ -382,6 +448,27 @@ SpeculatedType speculationFromValue(JSValue);
SpeculatedType speculationFromTypedArrayType(TypedArrayType); // only valid for typed views.
TypedArrayType typedArrayTypeFromSpeculation(SpeculatedType);
+SpeculatedType leastUpperBoundOfStrictlyEquivalentSpeculations(SpeculatedType);
+
+bool valuesCouldBeEqual(SpeculatedType, SpeculatedType);
+
+// Precise computation of the type of the result of a double computation after we
+// already know that the inputs are doubles and that the result must be a double. Use
+// the closest one of these that applies.
+SpeculatedType typeOfDoubleSum(SpeculatedType, SpeculatedType);
+SpeculatedType typeOfDoubleDifference(SpeculatedType, SpeculatedType);
+SpeculatedType typeOfDoubleProduct(SpeculatedType, SpeculatedType);
+SpeculatedType typeOfDoubleQuotient(SpeculatedType, SpeculatedType);
+SpeculatedType typeOfDoubleMinMax(SpeculatedType, SpeculatedType);
+SpeculatedType typeOfDoubleNegation(SpeculatedType);
+SpeculatedType typeOfDoubleAbs(SpeculatedType);
+SpeculatedType typeOfDoubleRounding(SpeculatedType);
+SpeculatedType typeOfDoublePow(SpeculatedType, SpeculatedType);
+
+// This conservatively models the behavior of arbitrary double operations.
+SpeculatedType typeOfDoubleBinaryOp(SpeculatedType, SpeculatedType);
+SpeculatedType typeOfDoubleUnaryOp(SpeculatedType);
+
} // namespace JSC
#endif // SpeculatedType_h
diff --git a/Source/JavaScriptCore/bytecode/StructureSet.cpp b/Source/JavaScriptCore/bytecode/StructureSet.cpp
new file mode 100644
index 000000000..40fea8da3
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/StructureSet.cpp
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "StructureSet.h"
+
+#include "DFGAbstractValue.h"
+#include "TrackedReferences.h"
+#include <wtf/CommaPrinter.h>
+
+namespace JSC {
+
+#if ENABLE(DFG_JIT)
+
+void StructureSet::filter(const DFG::StructureAbstractValue& other)
+{
+ genericFilter([&] (Structure* structure) -> bool { return other.contains(structure); });
+}
+
+void StructureSet::filter(SpeculatedType type)
+{
+ genericFilter(
+ [&] (Structure* structure) -> bool {
+ return type & speculationFromStructure(structure);
+ });
+}
+
+void StructureSet::filterArrayModes(ArrayModes arrayModes)
+{
+ genericFilter(
+ [&] (Structure* structure) -> bool {
+ return arrayModes & arrayModeFromStructure(structure);
+ });
+}
+
+void StructureSet::filter(const DFG::AbstractValue& other)
+{
+ filter(other.m_structure);
+ filter(other.m_type);
+ filterArrayModes(other.m_arrayModes);
+}
+
+#endif // ENABLE(DFG_JIT)
+
+SpeculatedType StructureSet::speculationFromStructures() const
+{
+ SpeculatedType result = SpecNone;
+ forEach(
+ [&] (Structure* structure) {
+ mergeSpeculation(result, speculationFromStructure(structure));
+ });
+ return result;
+}
+
+ArrayModes StructureSet::arrayModesFromStructures() const
+{
+ ArrayModes result = 0;
+ forEach(
+ [&] (Structure* structure) {
+ mergeArrayModes(result, asArrayModes(structure->indexingType()));
+ });
+ return result;
+}
+
+void StructureSet::dumpInContext(PrintStream& out, DumpContext* context) const
+{
+ CommaPrinter comma;
+ out.print("[");
+ forEach([&] (Structure* structure) { out.print(comma, inContext(*structure, context)); });
+ out.print("]");
+}
+
+void StructureSet::dump(PrintStream& out) const
+{
+ dumpInContext(out, nullptr);
+}
+
+void StructureSet::validateReferences(const TrackedReferences& trackedReferences) const
+{
+ forEach(
+ [&] (Structure* structure) {
+ trackedReferences.check(structure);
+ });
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/bytecode/StructureSet.h b/Source/JavaScriptCore/bytecode/StructureSet.h
index 4cdcd01cb..df19ec538 100644
--- a/Source/JavaScriptCore/bytecode/StructureSet.h
+++ b/Source/JavaScriptCore/bytecode/StructureSet.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -27,159 +27,60 @@
#define StructureSet_h
#include "ArrayProfile.h"
+#include "DumpContext.h"
#include "SpeculatedType.h"
#include "Structure.h"
-#include "DumpContext.h"
-#include <wtf/CommaPrinter.h>
-#include <wtf/Vector.h>
+#include <wtf/TinyPtrSet.h>
namespace JSC {
+class TrackedReferences;
+
namespace DFG {
class StructureAbstractValue;
+struct AbstractValue;
}
-class StructureSet {
+class StructureSet : public TinyPtrSet<Structure*> {
public:
- StructureSet() { }
-
- StructureSet(Structure* structure)
- {
- m_structures.append(structure);
- }
-
- void clear()
- {
- m_structures.clear();
- }
-
- void add(Structure* structure)
- {
- ASSERT(!contains(structure));
- m_structures.append(structure);
- }
-
- bool addAll(const StructureSet& other)
- {
- bool changed = false;
- for (size_t i = 0; i < other.size(); ++i) {
- if (contains(other[i]))
- continue;
- add(other[i]);
- changed = true;
- }
- return changed;
- }
-
- void remove(Structure* structure)
- {
- for (size_t i = 0; i < m_structures.size(); ++i) {
- if (m_structures[i] != structure)
- continue;
-
- m_structures[i] = m_structures.last();
- m_structures.removeLast();
- return;
- }
- }
-
- bool contains(Structure* structure) const
- {
- for (size_t i = 0; i < m_structures.size(); ++i) {
- if (m_structures[i] == structure)
- return true;
- }
- return false;
- }
-
- bool containsOnly(Structure* structure) const
- {
- if (size() != 1)
- return false;
- return singletonStructure() == structure;
- }
-
- bool isSubsetOf(const StructureSet& other) const
- {
- for (size_t i = 0; i < m_structures.size(); ++i) {
- if (!other.contains(m_structures[i]))
- return false;
- }
- return true;
- }
-
- bool isSupersetOf(const StructureSet& other) const
- {
- return other.isSubsetOf(*this);
- }
-
- size_t size() const { return m_structures.size(); }
+ // I really want to do this:
+ // using TinyPtrSet::TinyPtrSet;
+ //
+ // But I can't because Windows.
- // Call this if you know that the structure set must consist of exactly
- // one structure.
- Structure* singletonStructure() const
+ StructureSet()
{
- ASSERT(m_structures.size() == 1);
- return m_structures[0];
}
- Structure* at(size_t i) const { return m_structures.at(i); }
-
- Structure* operator[](size_t i) const { return at(i); }
-
- Structure* last() const { return m_structures.last(); }
-
- SpeculatedType speculationFromStructures() const
+ StructureSet(Structure* structure)
+ : TinyPtrSet(structure)
{
- SpeculatedType result = SpecNone;
-
- for (size_t i = 0; i < m_structures.size(); ++i)
- mergeSpeculation(result, speculationFromStructure(m_structures[i]));
-
- return result;
}
- ArrayModes arrayModesFromStructures() const
+ ALWAYS_INLINE StructureSet(const StructureSet& other)
+ : TinyPtrSet(other)
{
- ArrayModes result = 0;
-
- for (size_t i = 0; i < m_structures.size(); ++i)
- mergeArrayModes(result, asArrayModes(m_structures[i]->indexingType()));
-
- return result;
}
- bool operator==(const StructureSet& other) const
+ Structure* onlyStructure() const
{
- if (m_structures.size() != other.m_structures.size())
- return false;
-
- for (size_t i = 0; i < m_structures.size(); ++i) {
- if (!other.contains(m_structures[i]))
- return false;
- }
-
- return true;
+ return onlyEntry();
}
- void dumpInContext(PrintStream& out, DumpContext* context) const
- {
- CommaPrinter comma;
- out.print("[");
- for (size_t i = 0; i < m_structures.size(); ++i)
- out.print(comma, inContext(*m_structures[i], context));
- out.print("]");
- }
+#if ENABLE(DFG_JIT)
+ void filter(const DFG::StructureAbstractValue&);
+ void filter(SpeculatedType);
+ void filterArrayModes(ArrayModes);
+ void filter(const DFG::AbstractValue&);
+#endif // ENABLE(DFG_JIT)
- void dump(PrintStream& out) const
- {
- dumpInContext(out, 0);
- }
+ SpeculatedType speculationFromStructures() const;
+ ArrayModes arrayModesFromStructures() const;
-private:
- friend class DFG::StructureAbstractValue;
+ void dumpInContext(PrintStream&, DumpContext*) const;
+ void dump(PrintStream&) const;
- Vector<Structure*, 2> m_structures;
+ void validateReferences(const TrackedReferences&) const;
};
} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/StructureStubClearingWatchpoint.cpp b/Source/JavaScriptCore/bytecode/StructureStubClearingWatchpoint.cpp
index 5cfb3d1e8..d2bdd6a5a 100644
--- a/Source/JavaScriptCore/bytecode/StructureStubClearingWatchpoint.cpp
+++ b/Source/JavaScriptCore/bytecode/StructureStubClearingWatchpoint.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -29,6 +29,7 @@
#if ENABLE(JIT)
#include "CodeBlock.h"
+#include "JSCInlines.h"
#include "StructureStubInfo.h"
namespace JSC {
@@ -36,42 +37,56 @@ namespace JSC {
StructureStubClearingWatchpoint::~StructureStubClearingWatchpoint() { }
StructureStubClearingWatchpoint* StructureStubClearingWatchpoint::push(
+ const ObjectPropertyCondition& key,
WatchpointsOnStructureStubInfo& holder,
- OwnPtr<StructureStubClearingWatchpoint>& head)
+ std::unique_ptr<StructureStubClearingWatchpoint>& head)
{
- head = adoptPtr(new StructureStubClearingWatchpoint(holder, head.release()));
+ head = std::make_unique<StructureStubClearingWatchpoint>(key, holder, WTFMove(head));
return head.get();
}
-void StructureStubClearingWatchpoint::fireInternal()
+void StructureStubClearingWatchpoint::fireInternal(const FireDetail&)
{
- // This will implicitly cause my own demise: stub reset removes all watchpoints.
- // That works, because deleting a watchpoint removes it from the set's list, and
- // the set's list traversal for firing is robust against the set changing.
- m_holder.codeBlock()->resetStub(*m_holder.stubInfo());
+ if (!m_key || !m_key.isWatchable(PropertyCondition::EnsureWatchability)) {
+ // This will implicitly cause my own demise: stub reset removes all watchpoints.
+ // That works, because deleting a watchpoint removes it from the set's list, and
+ // the set's list traversal for firing is robust against the set changing.
+ ConcurrentJITLocker locker(m_holder.codeBlock()->m_lock);
+ m_holder.stubInfo()->reset(m_holder.codeBlock());
+ return;
+ }
+
+ if (m_key.kind() == PropertyCondition::Presence) {
+ // If this was a presence condition, let's watch the property for replacements. This is profitable
+ // for the DFG, which will want the replacement set to be valid in order to do constant folding.
+ VM& vm = *Heap::heap(m_key.object())->vm();
+ m_key.object()->structure()->startWatchingPropertyForReplacements(vm, m_key.offset());
+ }
+
+ m_key.object()->structure()->addTransitionWatchpoint(this);
}
WatchpointsOnStructureStubInfo::~WatchpointsOnStructureStubInfo()
{
}
-StructureStubClearingWatchpoint* WatchpointsOnStructureStubInfo::addWatchpoint()
+StructureStubClearingWatchpoint* WatchpointsOnStructureStubInfo::addWatchpoint(const ObjectPropertyCondition& key)
{
- return StructureStubClearingWatchpoint::push(*this, m_head);
+ return StructureStubClearingWatchpoint::push(key, *this, m_head);
}
StructureStubClearingWatchpoint* WatchpointsOnStructureStubInfo::ensureReferenceAndAddWatchpoint(
- RefPtr<WatchpointsOnStructureStubInfo>& holderRef, CodeBlock* codeBlock,
- StructureStubInfo* stubInfo)
+ std::unique_ptr<WatchpointsOnStructureStubInfo>& holderRef, CodeBlock* codeBlock,
+ StructureStubInfo* stubInfo, const ObjectPropertyCondition& key)
{
if (!holderRef)
- holderRef = adoptRef(new WatchpointsOnStructureStubInfo(codeBlock, stubInfo));
+ holderRef = std::make_unique<WatchpointsOnStructureStubInfo>(codeBlock, stubInfo);
else {
ASSERT(holderRef->m_codeBlock == codeBlock);
ASSERT(holderRef->m_stubInfo == stubInfo);
}
- return holderRef->addWatchpoint();
+ return holderRef->addWatchpoint(key);
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/StructureStubClearingWatchpoint.h b/Source/JavaScriptCore/bytecode/StructureStubClearingWatchpoint.h
index 4c6bdecf4..37668c3b9 100644
--- a/Source/JavaScriptCore/bytecode/StructureStubClearingWatchpoint.h
+++ b/Source/JavaScriptCore/bytecode/StructureStubClearingWatchpoint.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,57 +26,53 @@
#ifndef StructureStubClearingWatchpoint_h
#define StructureStubClearingWatchpoint_h
+#include "ObjectPropertyCondition.h"
#include "Watchpoint.h"
-#include <wtf/Platform.h>
#if ENABLE(JIT)
#include <wtf/FastMalloc.h>
#include <wtf/Noncopyable.h>
-#include <wtf/OwnPtr.h>
-#include <wtf/PassOwnPtr.h>
-#include <wtf/RefCounted.h>
-#include <wtf/RefPtr.h>
namespace JSC {
class CodeBlock;
+class StructureStubInfo;
class WatchpointsOnStructureStubInfo;
-struct StructureStubInfo;
class StructureStubClearingWatchpoint : public Watchpoint {
WTF_MAKE_NONCOPYABLE(StructureStubClearingWatchpoint);
WTF_MAKE_FAST_ALLOCATED;
public:
StructureStubClearingWatchpoint(
- WatchpointsOnStructureStubInfo& holder)
- : m_holder(holder)
- {
- }
-
- StructureStubClearingWatchpoint(
+ const ObjectPropertyCondition& key,
WatchpointsOnStructureStubInfo& holder,
- PassOwnPtr<StructureStubClearingWatchpoint> next)
- : m_holder(holder)
- , m_next(next)
+ std::unique_ptr<StructureStubClearingWatchpoint> next)
+ : m_key(key)
+ , m_holder(holder)
+ , m_next(WTFMove(next))
{
}
virtual ~StructureStubClearingWatchpoint();
static StructureStubClearingWatchpoint* push(
+ const ObjectPropertyCondition& key,
WatchpointsOnStructureStubInfo& holder,
- OwnPtr<StructureStubClearingWatchpoint>& head);
+ std::unique_ptr<StructureStubClearingWatchpoint>& head);
protected:
- virtual void fireInternal() override;
+ virtual void fireInternal(const FireDetail&) override;
private:
+ ObjectPropertyCondition m_key;
WatchpointsOnStructureStubInfo& m_holder;
- OwnPtr<StructureStubClearingWatchpoint> m_next;
+ std::unique_ptr<StructureStubClearingWatchpoint> m_next;
};
-class WatchpointsOnStructureStubInfo : public RefCounted<WatchpointsOnStructureStubInfo> {
+class WatchpointsOnStructureStubInfo {
+ WTF_MAKE_NONCOPYABLE(WatchpointsOnStructureStubInfo);
+ WTF_MAKE_FAST_ALLOCATED;
public:
WatchpointsOnStructureStubInfo(CodeBlock* codeBlock, StructureStubInfo* stubInfo)
: m_codeBlock(codeBlock)
@@ -86,11 +82,11 @@ public:
~WatchpointsOnStructureStubInfo();
- StructureStubClearingWatchpoint* addWatchpoint();
+ StructureStubClearingWatchpoint* addWatchpoint(const ObjectPropertyCondition& key);
static StructureStubClearingWatchpoint* ensureReferenceAndAddWatchpoint(
- RefPtr<WatchpointsOnStructureStubInfo>& holderRef,
- CodeBlock*, StructureStubInfo*);
+ std::unique_ptr<WatchpointsOnStructureStubInfo>& holderRef,
+ CodeBlock*, StructureStubInfo*, const ObjectPropertyCondition& key);
CodeBlock* codeBlock() const { return m_codeBlock; }
StructureStubInfo* stubInfo() const { return m_stubInfo; }
@@ -98,7 +94,7 @@ public:
private:
CodeBlock* m_codeBlock;
StructureStubInfo* m_stubInfo;
- OwnPtr<StructureStubClearingWatchpoint> m_head;
+ std::unique_ptr<StructureStubClearingWatchpoint> m_head;
};
} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/StructureStubInfo.cpp b/Source/JavaScriptCore/bytecode/StructureStubInfo.cpp
index 91413dfbf..ccd42dadc 100644
--- a/Source/JavaScriptCore/bytecode/StructureStubInfo.cpp
+++ b/Source/JavaScriptCore/bytecode/StructureStubInfo.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2014, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -27,107 +27,169 @@
#include "StructureStubInfo.h"
#include "JSObject.h"
-#include "PolymorphicPutByIdList.h"
-
+#include "PolymorphicAccess.h"
+#include "Repatch.h"
namespace JSC {
#if ENABLE(JIT)
+StructureStubInfo::StructureStubInfo(AccessType accessType)
+ : callSiteIndex(UINT_MAX)
+ , accessType(accessType)
+ , cacheType(CacheType::Unset)
+ , countdown(1) // For a totally clear stub, we'll patch it after the first execution.
+ , repatchCount(0)
+ , numberOfCoolDowns(0)
+ , resetByGC(false)
+ , tookSlowPath(false)
+ , everConsidered(false)
+{
+}
+
+StructureStubInfo::~StructureStubInfo()
+{
+}
+
+void StructureStubInfo::initGetByIdSelf(CodeBlock* codeBlock, Structure* baseObjectStructure, PropertyOffset offset)
+{
+ cacheType = CacheType::GetByIdSelf;
+
+ u.byIdSelf.baseObjectStructure.set(
+ *codeBlock->vm(), codeBlock, baseObjectStructure);
+ u.byIdSelf.offset = offset;
+}
+
+void StructureStubInfo::initPutByIdReplace(CodeBlock* codeBlock, Structure* baseObjectStructure, PropertyOffset offset)
+{
+ cacheType = CacheType::PutByIdReplace;
+
+ u.byIdSelf.baseObjectStructure.set(
+ *codeBlock->vm(), codeBlock, baseObjectStructure);
+ u.byIdSelf.offset = offset;
+}
+
+void StructureStubInfo::initStub(CodeBlock*, std::unique_ptr<PolymorphicAccess> stub)
+{
+ cacheType = CacheType::Stub;
+ u.stub = stub.release();
+}
+
void StructureStubInfo::deref()
{
- switch (accessType) {
- case access_get_by_id_self_list: {
- PolymorphicAccessStructureList* polymorphicStructures = u.getByIdSelfList.structureList;
- delete polymorphicStructures;
+ switch (cacheType) {
+ case CacheType::Stub:
+ delete u.stub;
return;
- }
- case access_get_by_id_proto_list: {
- PolymorphicAccessStructureList* polymorphicStructures = u.getByIdProtoList.structureList;
- delete polymorphicStructures;
+ case CacheType::Unset:
+ case CacheType::GetByIdSelf:
+ case CacheType::PutByIdReplace:
return;
}
- case access_put_by_id_list:
- delete u.putByIdList.list;
- return;
- case access_in_list: {
- PolymorphicAccessStructureList* polymorphicStructures = u.inList.structureList;
- delete polymorphicStructures;
+
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+void StructureStubInfo::aboutToDie()
+{
+ switch (cacheType) {
+ case CacheType::Stub:
+ u.stub->aboutToDie();
return;
- }
- case access_get_by_id_self:
- case access_get_by_id_proto:
- case access_get_by_id_chain:
- case access_put_by_id_transition_normal:
- case access_put_by_id_transition_direct:
- case access_put_by_id_replace:
- case access_unset:
- case access_get_by_id_generic:
- case access_put_by_id_generic:
- case access_get_array_length:
- case access_get_string_length:
- // These instructions don't have to release any allocated memory
+ case CacheType::Unset:
+ case CacheType::GetByIdSelf:
+ case CacheType::PutByIdReplace:
return;
- default:
- RELEASE_ASSERT_NOT_REACHED();
}
+
+ RELEASE_ASSERT_NOT_REACHED();
}
-bool StructureStubInfo::visitWeakReferences()
+MacroAssemblerCodePtr StructureStubInfo::addAccessCase(
+ CodeBlock* codeBlock, const Identifier& ident, std::unique_ptr<AccessCase> accessCase)
{
+ VM& vm = *codeBlock->vm();
+
+ if (!accessCase)
+ return MacroAssemblerCodePtr();
+
+ if (cacheType == CacheType::Stub)
+ return u.stub->regenerateWithCase(vm, codeBlock, *this, ident, WTFMove(accessCase));
+
+ std::unique_ptr<PolymorphicAccess> access = std::make_unique<PolymorphicAccess>();
+
+ Vector<std::unique_ptr<AccessCase>> accessCases;
+
+ std::unique_ptr<AccessCase> previousCase =
+ AccessCase::fromStructureStubInfo(vm, codeBlock, *this);
+ if (previousCase)
+ accessCases.append(WTFMove(previousCase));
+
+ accessCases.append(WTFMove(accessCase));
+
+ MacroAssemblerCodePtr result =
+ access->regenerateWithCases(vm, codeBlock, *this, ident, WTFMove(accessCases));
+
+ if (!result)
+ return MacroAssemblerCodePtr();
+
+ initStub(codeBlock, WTFMove(access));
+ return result;
+}
+
+void StructureStubInfo::reset(CodeBlock* codeBlock)
+{
+ if (cacheType == CacheType::Unset)
+ return;
+
+ if (Options::verboseOSR()) {
+ // This can be called from GC destructor calls, so we don't try to do a full dump
+ // of the CodeBlock.
+ dataLog("Clearing structure cache (kind ", static_cast<int>(accessType), ") in ", RawPointer(codeBlock), ".\n");
+ }
+
switch (accessType) {
- case access_get_by_id_self:
- if (!Heap::isMarked(u.getByIdSelf.baseObjectStructure.get()))
- return false;
+ case AccessType::Get:
+ resetGetByID(codeBlock, *this);
break;
- case access_get_by_id_proto:
- if (!Heap::isMarked(u.getByIdProto.baseObjectStructure.get())
- || !Heap::isMarked(u.getByIdProto.prototypeStructure.get()))
- return false;
+ case AccessType::Put:
+ resetPutByID(codeBlock, *this);
break;
- case access_get_by_id_chain:
- if (!Heap::isMarked(u.getByIdChain.baseObjectStructure.get())
- || !Heap::isMarked(u.getByIdChain.chain.get()))
- return false;
- break;
- case access_get_by_id_self_list: {
- PolymorphicAccessStructureList* polymorphicStructures = u.getByIdSelfList.structureList;
- if (!polymorphicStructures->visitWeak(u.getByIdSelfList.listSize))
- return false;
- break;
- }
- case access_get_by_id_proto_list: {
- PolymorphicAccessStructureList* polymorphicStructures = u.getByIdProtoList.structureList;
- if (!polymorphicStructures->visitWeak(u.getByIdProtoList.listSize))
- return false;
+ case AccessType::In:
+ resetIn(codeBlock, *this);
break;
}
- case access_put_by_id_transition_normal:
- case access_put_by_id_transition_direct:
- if (!Heap::isMarked(u.putByIdTransition.previousStructure.get())
- || !Heap::isMarked(u.putByIdTransition.structure.get())
- || !Heap::isMarked(u.putByIdTransition.chain.get()))
- return false;
- break;
- case access_put_by_id_replace:
- if (!Heap::isMarked(u.putByIdReplace.baseObjectStructure.get()))
- return false;
- break;
- case access_put_by_id_list:
- if (!u.putByIdList.list->visitWeak())
- return false;
+
+ deref();
+ cacheType = CacheType::Unset;
+}
+
+void StructureStubInfo::visitWeakReferences(CodeBlock* codeBlock)
+{
+ VM& vm = *codeBlock->vm();
+
+ switch (cacheType) {
+ case CacheType::GetByIdSelf:
+ case CacheType::PutByIdReplace:
+ if (Heap::isMarked(u.byIdSelf.baseObjectStructure.get()))
+ return;
break;
- case access_in_list: {
- PolymorphicAccessStructureList* polymorphicStructures = u.inList.structureList;
- if (!polymorphicStructures->visitWeak(u.inList.listSize))
- return false;
+ case CacheType::Stub:
+ if (u.stub->visitWeak(vm))
+ return;
break;
- }
default:
- // The rest of the instructions don't require references, so there is no need to
- // do anything.
- break;
+ return;
}
- return true;
+
+ reset(codeBlock);
+ resetByGC = true;
+}
+
+bool StructureStubInfo::containsPC(void* pc) const
+{
+ if (cacheType != CacheType::Stub)
+ return false;
+ return u.stub->containsPC(pc);
}
#endif
diff --git a/Source/JavaScriptCore/bytecode/StructureStubInfo.h b/Source/JavaScriptCore/bytecode/StructureStubInfo.h
index 5463f3e95..40d362d44 100644
--- a/Source/JavaScriptCore/bytecode/StructureStubInfo.h
+++ b/Source/JavaScriptCore/bytecode/StructureStubInfo.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2012-2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,205 +26,121 @@
#ifndef StructureStubInfo_h
#define StructureStubInfo_h
-#include <wtf/Platform.h>
-
#include "CodeOrigin.h"
#include "Instruction.h"
#include "JITStubRoutine.h"
#include "MacroAssembler.h"
+#include "ObjectPropertyConditionSet.h"
#include "Opcode.h"
-#include "PolymorphicAccessStructureList.h"
+#include "Options.h"
+#include "PolymorphicAccess.h"
#include "RegisterSet.h"
#include "Structure.h"
#include "StructureStubClearingWatchpoint.h"
-#include <wtf/OwnPtr.h>
namespace JSC {
#if ENABLE(JIT)
-class PolymorphicPutByIdList;
+class PolymorphicAccess;
-enum AccessType {
- access_get_by_id_self,
- access_get_by_id_proto,
- access_get_by_id_chain,
- access_get_by_id_self_list,
- access_get_by_id_proto_list,
- access_put_by_id_transition_normal,
- access_put_by_id_transition_direct,
- access_put_by_id_replace,
- access_put_by_id_list,
- access_unset,
- access_get_by_id_generic,
- access_put_by_id_generic,
- access_get_array_length,
- access_get_string_length,
- access_in_list
+enum class AccessType : int8_t {
+ Get,
+ Put,
+ In
};
-inline bool isGetByIdAccess(AccessType accessType)
-{
- switch (accessType) {
- case access_get_by_id_self:
- case access_get_by_id_proto:
- case access_get_by_id_chain:
- case access_get_by_id_self_list:
- case access_get_by_id_proto_list:
- case access_get_by_id_generic:
- case access_get_array_length:
- case access_get_string_length:
- return true;
- default:
- return false;
- }
-}
-
-inline bool isPutByIdAccess(AccessType accessType)
-{
- switch (accessType) {
- case access_put_by_id_transition_normal:
- case access_put_by_id_transition_direct:
- case access_put_by_id_replace:
- case access_put_by_id_list:
- case access_put_by_id_generic:
- return true;
- default:
- return false;
- }
-}
-
-inline bool isInAccess(AccessType accessType)
-{
- switch (accessType) {
- case access_in_list:
- return true;
- default:
- return false;
- }
-}
+enum class CacheType : int8_t {
+ Unset,
+ GetByIdSelf,
+ PutByIdReplace,
+ Stub
+};
-struct StructureStubInfo {
- StructureStubInfo()
- : accessType(access_unset)
- , seen(false)
- , resetByGC(false)
- {
- }
+class StructureStubInfo {
+ WTF_MAKE_NONCOPYABLE(StructureStubInfo);
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ StructureStubInfo(AccessType);
+ ~StructureStubInfo();
- void initGetByIdSelf(VM& vm, JSCell* owner, Structure* baseObjectStructure)
- {
- accessType = access_get_by_id_self;
+ void initGetByIdSelf(CodeBlock*, Structure* baseObjectStructure, PropertyOffset);
+ void initPutByIdReplace(CodeBlock*, Structure* baseObjectStructure, PropertyOffset);
+ void initStub(CodeBlock*, std::unique_ptr<PolymorphicAccess>);
- u.getByIdSelf.baseObjectStructure.set(vm, owner, baseObjectStructure);
- }
+ MacroAssemblerCodePtr addAccessCase(
+ CodeBlock*, const Identifier&, std::unique_ptr<AccessCase>);
- void initGetByIdChain(VM& vm, JSCell* owner, Structure* baseObjectStructure, StructureChain* chain, unsigned count, bool isDirect)
- {
- accessType = access_get_by_id_chain;
+ void reset(CodeBlock*);
- u.getByIdChain.baseObjectStructure.set(vm, owner, baseObjectStructure);
- u.getByIdChain.chain.set(vm, owner, chain);
- u.getByIdChain.count = count;
- u.getByIdChain.isDirect = isDirect;
- }
+ void deref();
+ void aboutToDie();
- void initGetByIdSelfList(PolymorphicAccessStructureList* structureList, int listSize, bool didSelfPatching = false)
+ // Check if the stub has weak references that are dead. If it does, then it resets itself,
+ // either entirely or just enough to ensure that those dead pointers don't get used anymore.
+ void visitWeakReferences(CodeBlock*);
+
+ ALWAYS_INLINE bool considerCaching()
{
- accessType = access_get_by_id_self_list;
-
- u.getByIdSelfList.structureList = structureList;
- u.getByIdSelfList.listSize = listSize;
- u.getByIdSelfList.didSelfPatching = didSelfPatching;
+ everConsidered = true;
+ if (!countdown) {
+ // Check if we have been doing repatching too frequently. If so, then we should cool off
+ // for a while.
+ willRepatch();
+ if (repatchCount > Options::repatchCountForCoolDown()) {
+ // We've been repatching too much, so don't do it now.
+ repatchCount = 0;
+ // The amount of time we require for cool-down depends on the number of times we've
+ // had to cool down in the past. The relationship is exponential. The max value we
+ // allow here is 2^256 - 2, since the slow paths may increment the count to indicate
+ // that they'd like to temporarily skip patching just this once.
+ countdown = WTF::leftShiftWithSaturation(
+ static_cast<uint8_t>(Options::initialCoolDownCount()),
+ numberOfCoolDowns,
+ static_cast<uint8_t>(std::numeric_limits<uint8_t>::max() - 1));
+ willCoolDown();
+ return false;
+ }
+ return true;
+ }
+ countdown--;
+ return false;
}
- void initGetByIdProtoList(PolymorphicAccessStructureList* structureList, int listSize)
+ ALWAYS_INLINE void willRepatch()
{
- accessType = access_get_by_id_proto_list;
-
- u.getByIdProtoList.structureList = structureList;
- u.getByIdProtoList.listSize = listSize;
+ WTF::incrementWithSaturation(repatchCount);
}
- // PutById*
-
- void initPutByIdTransition(VM& vm, JSCell* owner, Structure* previousStructure, Structure* structure, StructureChain* chain, bool isDirect)
+ ALWAYS_INLINE void willCoolDown()
{
- if (isDirect)
- accessType = access_put_by_id_transition_direct;
- else
- accessType = access_put_by_id_transition_normal;
-
- u.putByIdTransition.previousStructure.set(vm, owner, previousStructure);
- u.putByIdTransition.structure.set(vm, owner, structure);
- u.putByIdTransition.chain.set(vm, owner, chain);
+ WTF::incrementWithSaturation(numberOfCoolDowns);
}
- void initPutByIdReplace(VM& vm, JSCell* owner, Structure* baseObjectStructure)
- {
- accessType = access_put_by_id_replace;
-
- u.putByIdReplace.baseObjectStructure.set(vm, owner, baseObjectStructure);
- }
-
- void initPutByIdList(PolymorphicPutByIdList* list)
- {
- accessType = access_put_by_id_list;
- u.putByIdList.list = list;
- }
-
- void initInList(PolymorphicAccessStructureList* list, int listSize)
- {
- accessType = access_in_list;
- u.inList.structureList = list;
- u.inList.listSize = listSize;
- }
-
- void reset()
- {
- deref();
- accessType = access_unset;
- stubRoutine.clear();
- watchpoints.clear();
- }
+ CodeLocationCall callReturnLocation;
- void deref();
+ CodeOrigin codeOrigin;
+ CallSiteIndex callSiteIndex;
- bool visitWeakReferences();
-
- bool seenOnce()
- {
- return seen;
- }
+ bool containsPC(void* pc) const;
- void setSeen()
- {
- seen = true;
- }
-
- StructureStubClearingWatchpoint* addWatchpoint(CodeBlock* codeBlock)
- {
- return WatchpointsOnStructureStubInfo::ensureReferenceAndAddWatchpoint(
- watchpoints, codeBlock, this);
- }
-
- int8_t accessType;
- bool seen : 1;
- bool resetByGC : 1;
-
- CodeOrigin codeOrigin;
+ union {
+ struct {
+ WriteBarrierBase<Structure> baseObjectStructure;
+ PropertyOffset offset;
+ } byIdSelf;
+ PolymorphicAccess* stub;
+ } u;
struct {
- int8_t registersFlushed;
- int8_t callFrameRegister;
int8_t baseGPR;
#if USE(JSVALUE32_64)
int8_t valueTagGPR;
+ int8_t baseTagGPR;
#endif
int8_t valueGPR;
RegisterSet usedRegisters;
int32_t deltaCallToDone;
- int32_t deltaCallToStorageLoad;
int32_t deltaCallToJump;
int32_t deltaCallToSlowCase;
int32_t deltaCheckImmToCall;
@@ -236,53 +152,14 @@ struct StructureStubInfo {
#endif
} patch;
- union {
- struct {
- // It would be unwise to put anything here, as it will surely be overwritten.
- } unset;
- struct {
- WriteBarrierBase<Structure> baseObjectStructure;
- } getByIdSelf;
- struct {
- WriteBarrierBase<Structure> baseObjectStructure;
- WriteBarrierBase<Structure> prototypeStructure;
- bool isDirect;
- } getByIdProto;
- struct {
- WriteBarrierBase<Structure> baseObjectStructure;
- WriteBarrierBase<StructureChain> chain;
- unsigned count : 31;
- bool isDirect : 1;
- } getByIdChain;
- struct {
- PolymorphicAccessStructureList* structureList;
- int listSize : 31;
- bool didSelfPatching : 1;
- } getByIdSelfList;
- struct {
- PolymorphicAccessStructureList* structureList;
- int listSize;
- } getByIdProtoList;
- struct {
- WriteBarrierBase<Structure> previousStructure;
- WriteBarrierBase<Structure> structure;
- WriteBarrierBase<StructureChain> chain;
- } putByIdTransition;
- struct {
- WriteBarrierBase<Structure> baseObjectStructure;
- } putByIdReplace;
- struct {
- PolymorphicPutByIdList* list;
- } putByIdList;
- struct {
- PolymorphicAccessStructureList* structureList;
- int listSize;
- } inList;
- } u;
-
- RefPtr<JITStubRoutine> stubRoutine;
- CodeLocationCall callReturnLocation;
- RefPtr<WatchpointsOnStructureStubInfo> watchpoints;
+ AccessType accessType;
+ CacheType cacheType;
+ uint8_t countdown; // We repatch only when this is zero. If not zero, we decrement.
+ uint8_t repatchCount;
+ uint8_t numberOfCoolDowns;
+ bool resetByGC : 1;
+ bool tookSlowPath : 1;
+ bool everConsidered : 1;
};
inline CodeOrigin getStructureStubInfoCodeOrigin(StructureStubInfo& structureStubInfo)
@@ -290,7 +167,7 @@ inline CodeOrigin getStructureStubInfoCodeOrigin(StructureStubInfo& structureStu
return structureStubInfo.codeOrigin;
}
-typedef HashMap<CodeOrigin, StructureStubInfo*> StubInfoMap;
+typedef HashMap<CodeOrigin, StructureStubInfo*, CodeOriginApproximateHash> StubInfoMap;
#else
diff --git a/Source/JavaScriptCore/bytecode/ToThisStatus.cpp b/Source/JavaScriptCore/bytecode/ToThisStatus.cpp
new file mode 100644
index 000000000..23d1e0800
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ToThisStatus.cpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "ToThisStatus.h"
+
+namespace JSC {
+
+ToThisStatus merge(ToThisStatus a, ToThisStatus b)
+{
+ switch (a) {
+ case ToThisOK:
+ return b;
+ case ToThisConflicted:
+ return ToThisConflicted;
+ case ToThisClearedByGC:
+ if (b == ToThisConflicted)
+ return ToThisConflicted;
+ return ToThisClearedByGC;
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+ return ToThisConflicted;
+}
+
+} // namespace JSC
+
+namespace WTF {
+
+using namespace JSC;
+
+void printInternal(PrintStream& out, ToThisStatus status)
+{
+ switch (status) {
+ case ToThisOK:
+ out.print("OK");
+ return;
+ case ToThisConflicted:
+ out.print("Conflicted");
+ return;
+ case ToThisClearedByGC:
+ out.print("ClearedByGC");
+ return;
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
+
diff --git a/Source/JavaScriptCore/bytecode/ToThisStatus.h b/Source/JavaScriptCore/bytecode/ToThisStatus.h
new file mode 100644
index 000000000..55d707c0f
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ToThisStatus.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ToThisStatus_h
+#define ToThisStatus_h
+
+#include <wtf/PrintStream.h>
+
+namespace JSC {
+
+enum ToThisStatus {
+ ToThisOK,
+ ToThisConflicted,
+ ToThisClearedByGC
+};
+
+ToThisStatus merge(ToThisStatus, ToThisStatus);
+
+} // namespace JSC
+
+namespace WTF {
+
+void printInternal(PrintStream&, JSC::ToThisStatus);
+
+} // namespace WTF
+
+#endif // ToThisStatus_h
+
diff --git a/Source/JavaScriptCore/bytecode/ProfiledCodeBlockJettisoningWatchpoint.cpp b/Source/JavaScriptCore/bytecode/TrackedReferences.cpp
index edf8e228d..d98fa9759 100644
--- a/Source/JavaScriptCore/bytecode/ProfiledCodeBlockJettisoningWatchpoint.cpp
+++ b/Source/JavaScriptCore/bytecode/TrackedReferences.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -24,38 +24,57 @@
*/
#include "config.h"
-#include "ProfiledCodeBlockJettisoningWatchpoint.h"
+#include "TrackedReferences.h"
-#include "CodeBlock.h"
-#include "DFGCommon.h"
-#include "DFGExitProfile.h"
+#include "JSCInlines.h"
+#include <wtf/CommaPrinter.h>
namespace JSC {
-void ProfiledCodeBlockJettisoningWatchpoint::fireInternal()
+TrackedReferences::TrackedReferences()
{
- if (DFG::shouldShowDisassembly()) {
- dataLog(
- "Firing profiled watchpoint ", RawPointer(this), " on ", *m_codeBlock, " due to ",
- m_exitKind, " at ", m_codeOrigin, "\n");
- }
-
- // FIXME: Maybe this should call alternative().
- // https://bugs.webkit.org/show_bug.cgi?id=123677
- CodeBlock* machineBaselineCodeBlock = m_codeBlock->baselineAlternative();
- CodeBlock* sourceBaselineCodeBlock =
- baselineCodeBlockForOriginAndBaselineCodeBlock(
- m_codeOrigin, machineBaselineCodeBlock);
-
- if (sourceBaselineCodeBlock) {
- sourceBaselineCodeBlock->addFrequentExitSite(
- DFG::FrequentExitSite(m_codeOrigin.bytecodeIndex, m_exitKind));
- }
+}
+
+TrackedReferences::~TrackedReferences()
+{
+}
+
+void TrackedReferences::add(JSCell* cell)
+{
+ if (cell)
+ m_references.add(cell);
+}
+
+void TrackedReferences::add(JSValue value)
+{
+ if (value.isCell())
+ add(value.asCell());
+}
+
+void TrackedReferences::check(JSCell* cell) const
+{
+ if (!cell)
+ return;
- m_codeBlock->jettison(CountReoptimization);
+ if (m_references.contains(cell))
+ return;
- if (isOnList())
- remove();
+ dataLog("Found untracked reference: ", RawPointer(cell), "\n");
+ dataLog("All tracked references: ", *this, "\n");
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+void TrackedReferences::check(JSValue value) const
+{
+ if (value.isCell())
+ check(value.asCell());
+}
+
+void TrackedReferences::dump(PrintStream& out) const
+{
+ CommaPrinter comma;
+ for (JSCell* cell : m_references)
+ out.print(comma, RawPointer(cell));
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/TrackedReferences.h b/Source/JavaScriptCore/bytecode/TrackedReferences.h
new file mode 100644
index 000000000..cc15e1ee7
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/TrackedReferences.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TrackedReferences_h
+#define TrackedReferences_h
+
+#include "JSCJSValue.h"
+#include "JSCell.h"
+#include <wtf/HashSet.h>
+#include <wtf/PrintStream.h>
+
+namespace JSC {
+
+class TrackedReferences {
+public:
+ TrackedReferences();
+ ~TrackedReferences();
+
+ void add(JSCell*);
+ void add(JSValue);
+
+ void check(JSCell*) const;
+ void check(JSValue) const;
+
+ void dump(PrintStream&) const;
+
+private:
+ HashSet<JSCell*> m_references;
+};
+
+} // namespace JSC
+
+#endif // TrackedReferences_h
+
diff --git a/Source/JavaScriptCore/bytecode/TypeLocation.h b/Source/JavaScriptCore/bytecode/TypeLocation.h
new file mode 100644
index 000000000..ec07656ee
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/TypeLocation.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TypeLocation_h
+#define TypeLocation_h
+
+#include "TypeSet.h"
+
+namespace JSC {
+
+enum TypeProfilerGlobalIDFlags {
+ TypeProfilerNeedsUniqueIDGeneration = -1,
+ TypeProfilerNoGlobalIDExists = -2,
+ TypeProfilerReturnStatement = -3
+};
+
+typedef intptr_t GlobalVariableID;
+
+class TypeLocation {
+public:
+ TypeLocation()
+ : m_lastSeenType(TypeNothing)
+ , m_divotForFunctionOffsetIfReturnStatement(UINT_MAX)
+ , m_instructionTypeSet(TypeSet::create())
+ , m_globalTypeSet(nullptr)
+ {
+ }
+
+ GlobalVariableID m_globalVariableID;
+ RuntimeType m_lastSeenType;
+ intptr_t m_sourceID;
+ unsigned m_divotStart;
+ unsigned m_divotEnd;
+ unsigned m_divotForFunctionOffsetIfReturnStatement;
+ RefPtr<TypeSet> m_instructionTypeSet;
+ RefPtr<TypeSet> m_globalTypeSet;
+};
+
+} //namespace JSC
+
+#endif //TypeLocation_h
diff --git a/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.cpp b/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.cpp
index 1dfb5ac6a..83d9054a3 100644
--- a/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.cpp
+++ b/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2013 Apple Inc. All Rights Reserved.
+ * Copyright (C) 2012, 2013, 2015 Apple Inc. All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -31,8 +31,10 @@
#include "ClassInfo.h"
#include "CodeCache.h"
#include "Executable.h"
+#include "ExecutableInfo.h"
+#include "FunctionOverrides.h"
#include "JSString.h"
-#include "Operations.h"
+#include "JSCInlines.h"
#include "Parser.h"
#include "SourceProvider.h"
#include "Structure.h"
@@ -42,177 +44,33 @@
namespace JSC {
-const ClassInfo UnlinkedFunctionExecutable::s_info = { "UnlinkedFunctionExecutable", 0, 0, 0, CREATE_METHOD_TABLE(UnlinkedFunctionExecutable) };
-const ClassInfo UnlinkedCodeBlock::s_info = { "UnlinkedCodeBlock", 0, 0, 0, CREATE_METHOD_TABLE(UnlinkedCodeBlock) };
-const ClassInfo UnlinkedGlobalCodeBlock::s_info = { "UnlinkedGlobalCodeBlock", &Base::s_info, 0, 0, CREATE_METHOD_TABLE(UnlinkedGlobalCodeBlock) };
-const ClassInfo UnlinkedProgramCodeBlock::s_info = { "UnlinkedProgramCodeBlock", &Base::s_info, 0, 0, CREATE_METHOD_TABLE(UnlinkedProgramCodeBlock) };
-const ClassInfo UnlinkedEvalCodeBlock::s_info = { "UnlinkedEvalCodeBlock", &Base::s_info, 0, 0, CREATE_METHOD_TABLE(UnlinkedEvalCodeBlock) };
-const ClassInfo UnlinkedFunctionCodeBlock::s_info = { "UnlinkedFunctionCodeBlock", &Base::s_info, 0, 0, CREATE_METHOD_TABLE(UnlinkedFunctionCodeBlock) };
-
-static UnlinkedFunctionCodeBlock* generateFunctionCodeBlock(VM& vm, UnlinkedFunctionExecutable* executable, const SourceCode& source, CodeSpecializationKind kind, DebuggerMode debuggerMode, ProfilerMode profilerMode, ParserError& error)
-{
- RefPtr<FunctionBodyNode> body = parse<FunctionBodyNode>(&vm, source, executable->parameters(), executable->name(), executable->isInStrictContext() ? JSParseStrict : JSParseNormal, JSParseFunctionCode, error);
-
- if (!body) {
- ASSERT(error.m_type != ParserError::ErrorNone);
- return 0;
- }
-
- if (executable->forceUsesArguments())
- body->setUsesArguments();
- body->finishParsing(executable->parameters(), executable->name(), executable->functionNameIsInScopeToggle());
- executable->recordParse(body->features(), body->hasCapturedVariables());
-
- UnlinkedFunctionCodeBlock* result = UnlinkedFunctionCodeBlock::create(&vm, FunctionCode, ExecutableInfo(body->needsActivation(), body->usesEval(), body->isStrictMode(), kind == CodeForConstruct));
- OwnPtr<BytecodeGenerator> generator(adoptPtr(new BytecodeGenerator(vm, body.get(), result, debuggerMode, profilerMode)));
- error = generator->generate();
- body->destroyData();
- if (error.m_type != ParserError::ErrorNone)
- return 0;
- return result;
-}
-
-unsigned UnlinkedCodeBlock::addOrFindConstant(JSValue v)
-{
- unsigned numberOfConstants = numberOfConstantRegisters();
- for (unsigned i = 0; i < numberOfConstants; ++i) {
- if (getConstant(FirstConstantRegisterIndex + i) == v)
- return i;
- }
- return addConstant(v);
-}
-
-UnlinkedFunctionExecutable::UnlinkedFunctionExecutable(VM* vm, Structure* structure, const SourceCode& source, FunctionBodyNode* node, bool isFromGlobalCode)
- : Base(*vm, structure)
- , m_numCapturedVariables(node->capturedVariableCount())
- , m_forceUsesArguments(node->usesArguments())
- , m_isInStrictContext(node->isStrictMode())
- , m_hasCapturedVariables(node->hasCapturedVariables())
- , m_isFromGlobalCode(isFromGlobalCode)
- , m_name(node->ident())
- , m_inferredName(node->inferredName())
- , m_parameters(node->parameters())
- , m_firstLineOffset(node->firstLine() - source.firstLine())
- , m_lineCount(node->lastLine() - node->firstLine())
- , m_unlinkedFunctionNameStart(node->functionNameStart() - source.startOffset())
- , m_unlinkedBodyStartColumn(node->startColumn())
- , m_unlinkedBodyEndColumn(m_lineCount ? node->endColumn() : node->endColumn() - node->startColumn())
- , m_startOffset(node->source().startOffset() - source.startOffset())
- , m_sourceLength(node->source().length())
- , m_features(node->features())
- , m_functionNameIsInScopeToggle(node->functionNameIsInScopeToggle())
-{
-}
-
-size_t UnlinkedFunctionExecutable::parameterCount() const
-{
- return m_parameters->size();
-}
-
-void UnlinkedFunctionExecutable::visitChildren(JSCell* cell, SlotVisitor& visitor)
-{
- UnlinkedFunctionExecutable* thisObject = jsCast<UnlinkedFunctionExecutable*>(cell);
- ASSERT_GC_OBJECT_INHERITS(thisObject, info());
- COMPILE_ASSERT(StructureFlags & OverridesVisitChildren, OverridesVisitChildrenWithoutSettingFlag);
- ASSERT(thisObject->structure()->typeInfo().overridesVisitChildren());
- Base::visitChildren(thisObject, visitor);
- visitor.append(&thisObject->m_codeBlockForCall);
- visitor.append(&thisObject->m_codeBlockForConstruct);
- visitor.append(&thisObject->m_nameValue);
- visitor.append(&thisObject->m_symbolTableForCall);
- visitor.append(&thisObject->m_symbolTableForConstruct);
-}
-
-FunctionExecutable* UnlinkedFunctionExecutable::link(VM& vm, const SourceCode& source, size_t lineOffset, size_t sourceOffset)
-{
- unsigned firstLine = lineOffset + m_firstLineOffset;
- unsigned startOffset = sourceOffset + m_startOffset;
- bool startColumnIsOnFirstSourceLine = !m_firstLineOffset;
- unsigned startColumn = m_unlinkedBodyStartColumn + (startColumnIsOnFirstSourceLine ? source.startColumn() : 1);
- bool endColumnIsOnStartLine = !m_lineCount;
- unsigned endColumn = m_unlinkedBodyEndColumn + (endColumnIsOnStartLine ? startColumn : 1);
- SourceCode code(source.provider(), startOffset, startOffset + m_sourceLength, firstLine, startColumn);
- return FunctionExecutable::create(vm, code, this, firstLine, firstLine + m_lineCount, startColumn, endColumn);
-}
-
-UnlinkedFunctionExecutable* UnlinkedFunctionExecutable::fromGlobalCode(const Identifier& name, ExecState* exec, Debugger*, const SourceCode& source, JSObject** exception)
-{
- ParserError error;
- VM& vm = exec->vm();
- CodeCache* codeCache = vm.codeCache();
- UnlinkedFunctionExecutable* executable = codeCache->getFunctionExecutableFromGlobalCode(vm, name, source, error);
-
- if (exec->lexicalGlobalObject()->hasDebugger())
- exec->lexicalGlobalObject()->debugger()->sourceParsed(exec, source.provider(), error.m_line, error.m_message);
-
- if (error.m_type != ParserError::ErrorNone) {
- *exception = error.toErrorObject(exec->lexicalGlobalObject(), source);
- return 0;
- }
-
- return executable;
-}
-
-UnlinkedFunctionCodeBlock* UnlinkedFunctionExecutable::codeBlockFor(VM& vm, const SourceCode& source, CodeSpecializationKind specializationKind, DebuggerMode debuggerMode, ProfilerMode profilerMode, ParserError& error)
-{
- switch (specializationKind) {
- case CodeForCall:
- if (UnlinkedFunctionCodeBlock* codeBlock = m_codeBlockForCall.get())
- return codeBlock;
- break;
- case CodeForConstruct:
- if (UnlinkedFunctionCodeBlock* codeBlock = m_codeBlockForConstruct.get())
- return codeBlock;
- break;
- }
-
- UnlinkedFunctionCodeBlock* result = generateFunctionCodeBlock(vm, this, source, specializationKind, debuggerMode, profilerMode, error);
-
- if (error.m_type != ParserError::ErrorNone)
- return 0;
-
- switch (specializationKind) {
- case CodeForCall:
- m_codeBlockForCall.set(vm, this, result);
- m_symbolTableForCall.set(vm, this, result->symbolTable());
- break;
- case CodeForConstruct:
- m_codeBlockForConstruct.set(vm, this, result);
- m_symbolTableForConstruct.set(vm, this, result->symbolTable());
- break;
- }
- return result;
-}
-
-String UnlinkedFunctionExecutable::paramString() const
-{
- FunctionParameters& parameters = *m_parameters;
- StringBuilder builder;
- for (size_t pos = 0; pos < parameters.size(); ++pos) {
- if (!builder.isEmpty())
- builder.appendLiteral(", ");
- parameters.at(pos)->toString(builder);
- }
- return builder.toString();
-}
+const ClassInfo UnlinkedCodeBlock::s_info = { "UnlinkedCodeBlock", 0, 0, CREATE_METHOD_TABLE(UnlinkedCodeBlock) };
+const ClassInfo UnlinkedGlobalCodeBlock::s_info = { "UnlinkedGlobalCodeBlock", &Base::s_info, 0, CREATE_METHOD_TABLE(UnlinkedGlobalCodeBlock) };
+const ClassInfo UnlinkedProgramCodeBlock::s_info = { "UnlinkedProgramCodeBlock", &Base::s_info, 0, CREATE_METHOD_TABLE(UnlinkedProgramCodeBlock) };
+const ClassInfo UnlinkedModuleProgramCodeBlock::s_info = { "UnlinkedModuleProgramCodeBlock", &Base::s_info, nullptr, CREATE_METHOD_TABLE(UnlinkedModuleProgramCodeBlock) };
+const ClassInfo UnlinkedEvalCodeBlock::s_info = { "UnlinkedEvalCodeBlock", &Base::s_info, 0, CREATE_METHOD_TABLE(UnlinkedEvalCodeBlock) };
+const ClassInfo UnlinkedFunctionCodeBlock::s_info = { "UnlinkedFunctionCodeBlock", &Base::s_info, 0, CREATE_METHOD_TABLE(UnlinkedFunctionCodeBlock) };
UnlinkedCodeBlock::UnlinkedCodeBlock(VM* vm, Structure* structure, CodeType codeType, const ExecutableInfo& info)
: Base(*vm, structure)
, m_numVars(0)
- , m_numCalleeRegisters(0)
+ , m_numCalleeLocals(0)
, m_numParameters(0)
- , m_vm(vm)
- , m_argumentsRegister(VirtualRegister())
, m_globalObjectRegister(VirtualRegister())
- , m_needsFullScopeChain(info.m_needsActivation)
- , m_usesEval(info.m_usesEval)
- , m_isNumericCompareFunction(false)
- , m_isStrictMode(info.m_isStrictMode)
- , m_isConstructor(info.m_isConstructor)
+ , m_usesEval(info.usesEval())
+ , m_isStrictMode(info.isStrictMode())
+ , m_isConstructor(info.isConstructor())
, m_hasCapturedVariables(false)
+ , m_isBuiltinFunction(info.isBuiltinFunction())
+ , m_constructorKind(static_cast<unsigned>(info.constructorKind()))
+ , m_superBinding(static_cast<unsigned>(info.superBinding()))
+ , m_derivedContextType(static_cast<unsigned>(info.derivedContextType()))
+ , m_isArrowFunctionContext(info.isArrowFunctionContext())
+ , m_isClassContext(info.isClassContext())
, m_firstLine(0)
, m_lineCount(0)
, m_endColumn(UINT_MAX)
+ , m_parseMode(info.parseMode())
, m_features(0)
, m_codeType(codeType)
, m_arrayProfileCount(0)
@@ -220,32 +78,42 @@ UnlinkedCodeBlock::UnlinkedCodeBlock(VM* vm, Structure* structure, CodeType code
, m_objectAllocationProfileCount(0)
, m_valueProfileCount(0)
, m_llintCallLinkInfoCount(0)
-#if ENABLE(BYTECODE_COMMENTS)
- , m_bytecodeCommentIterator(0)
-#endif
{
+ for (auto& constantRegisterIndex : m_linkTimeConstants)
+ constantRegisterIndex = 0;
+ ASSERT(m_constructorKind == static_cast<unsigned>(info.constructorKind()));
+}
+VM* UnlinkedCodeBlock::vm() const
+{
+ return MarkedBlock::blockFor(this)->vm();
}
void UnlinkedCodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor)
{
UnlinkedCodeBlock* thisObject = jsCast<UnlinkedCodeBlock*>(cell);
ASSERT_GC_OBJECT_INHERITS(thisObject, info());
- COMPILE_ASSERT(StructureFlags & OverridesVisitChildren, OverridesVisitChildrenWithoutSettingFlag);
- ASSERT(thisObject->structure()->typeInfo().overridesVisitChildren());
Base::visitChildren(thisObject, visitor);
- visitor.append(&thisObject->m_symbolTable);
for (FunctionExpressionVector::iterator ptr = thisObject->m_functionDecls.begin(), end = thisObject->m_functionDecls.end(); ptr != end; ++ptr)
visitor.append(ptr);
for (FunctionExpressionVector::iterator ptr = thisObject->m_functionExprs.begin(), end = thisObject->m_functionExprs.end(); ptr != end; ++ptr)
visitor.append(ptr);
visitor.appendValues(thisObject->m_constantRegisters.data(), thisObject->m_constantRegisters.size());
+ if (thisObject->m_unlinkedInstructions)
+ visitor.reportExtraMemoryVisited(thisObject->m_unlinkedInstructions->sizeInBytes());
if (thisObject->m_rareData) {
for (size_t i = 0, end = thisObject->m_rareData->m_regexps.size(); i != end; i++)
visitor.append(&thisObject->m_rareData->m_regexps[i]);
}
}
+size_t UnlinkedCodeBlock::estimatedSize(JSCell* cell)
+{
+ UnlinkedCodeBlock* thisObject = jsCast<UnlinkedCodeBlock*>(cell);
+ size_t extraSize = thisObject->m_unlinkedInstructions ? thisObject->m_unlinkedInstructions->sizeInBytes() : 0;
+ return Base::estimatedSize(cell) + extraSize;
+}
+
int UnlinkedCodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset)
{
ASSERT(bytecodeOffset < instructions().count());
@@ -404,15 +272,53 @@ void UnlinkedCodeBlock::addExpressionInfo(unsigned instructionOffset,
m_expressionInfo.append(info);
}
+bool UnlinkedCodeBlock::typeProfilerExpressionInfoForBytecodeOffset(unsigned bytecodeOffset, unsigned& startDivot, unsigned& endDivot)
+{
+ static const bool verbose = false;
+ if (!m_rareData) {
+ if (verbose)
+ dataLogF("Don't have assignment info for offset:%u\n", bytecodeOffset);
+ startDivot = UINT_MAX;
+ endDivot = UINT_MAX;
+ return false;
+ }
+
+ auto iter = m_rareData->m_typeProfilerInfoMap.find(bytecodeOffset);
+ if (iter == m_rareData->m_typeProfilerInfoMap.end()) {
+ if (verbose)
+ dataLogF("Don't have assignment info for offset:%u\n", bytecodeOffset);
+ startDivot = UINT_MAX;
+ endDivot = UINT_MAX;
+ return false;
+ }
+
+ RareData::TypeProfilerExpressionRange& range = iter->value;
+ startDivot = range.m_startDivot;
+ endDivot = range.m_endDivot;
+ return true;
+}
+
+void UnlinkedCodeBlock::addTypeProfilerExpressionInfo(unsigned instructionOffset, unsigned startDivot, unsigned endDivot)
+{
+ createRareDataIfNecessary();
+ RareData::TypeProfilerExpressionRange range;
+ range.m_startDivot = startDivot;
+ range.m_endDivot = endDivot;
+ m_rareData->m_typeProfilerInfoMap.set(instructionOffset, range);
+}
+
void UnlinkedProgramCodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor)
{
UnlinkedProgramCodeBlock* thisObject = jsCast<UnlinkedProgramCodeBlock*>(cell);
ASSERT_GC_OBJECT_INHERITS(thisObject, info());
- COMPILE_ASSERT(StructureFlags & OverridesVisitChildren, OverridesVisitChildrenWithoutSettingFlag);
- ASSERT(thisObject->structure()->typeInfo().overridesVisitChildren());
Base::visitChildren(thisObject, visitor);
- for (size_t i = 0, end = thisObject->m_functionDeclarations.size(); i != end; i++)
- visitor.append(&thisObject->m_functionDeclarations[i].second);
+}
+
+void UnlinkedModuleProgramCodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor)
+{
+ UnlinkedModuleProgramCodeBlock* thisObject = jsCast<UnlinkedModuleProgramCodeBlock*>(cell);
+ ASSERT_GC_OBJECT_INHERITS(thisObject, info());
+ Base::visitChildren(thisObject, visitor);
}
UnlinkedCodeBlock::~UnlinkedCodeBlock()
@@ -424,6 +330,11 @@ void UnlinkedProgramCodeBlock::destroy(JSCell* cell)
jsCast<UnlinkedProgramCodeBlock*>(cell)->~UnlinkedProgramCodeBlock();
}
+void UnlinkedModuleProgramCodeBlock::destroy(JSCell* cell)
+{
+ jsCast<UnlinkedModuleProgramCodeBlock*>(cell)->~UnlinkedModuleProgramCodeBlock();
+}
+
void UnlinkedEvalCodeBlock::destroy(JSCell* cell)
{
jsCast<UnlinkedEvalCodeBlock*>(cell)->~UnlinkedEvalCodeBlock();
@@ -441,7 +352,9 @@ void UnlinkedFunctionExecutable::destroy(JSCell* cell)
void UnlinkedCodeBlock::setInstructions(std::unique_ptr<UnlinkedInstructionStream> instructions)
{
- m_unlinkedInstructions = std::move(instructions);
+ ASSERT(instructions);
+ m_unlinkedInstructions = WTFMove(instructions);
+ Heap::heap(this)->reportExtraMemoryAllocated(m_unlinkedInstructions->sizeInBytes());
}
const UnlinkedInstructionStream& UnlinkedCodeBlock::instructions() const
diff --git a/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.h b/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.h
index b9dae2d5c..f5b2b44a9 100644
--- a/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.h
+++ b/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2013 Apple Inc. All Rights Reserved.
+ * Copyright (C) 2012-2015 Apple Inc. All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -29,35 +29,37 @@
#include "BytecodeConventions.h"
#include "CodeSpecializationKind.h"
#include "CodeType.h"
+#include "ConstructAbility.h"
#include "ExpressionRangeInfo.h"
+#include "HandlerInfo.h"
#include "Identifier.h"
#include "JSCell.h"
#include "JSString.h"
#include "ParserModes.h"
#include "RegExp.h"
#include "SpecialPointer.h"
-#include "SymbolTable.h"
+#include "UnlinkedFunctionExecutable.h"
+#include "VariableEnvironment.h"
#include "VirtualRegister.h"
-
-#include <wtf/Compression.h>
+#include <wtf/FastBitVector.h>
#include <wtf/RefCountedArray.h>
#include <wtf/Vector.h>
namespace JSC {
class Debugger;
-class FunctionBodyNode;
+class FunctionMetadataNode;
class FunctionExecutable;
-class FunctionParameters;
class JSScope;
-struct ParserError;
+class ParserError;
class ScriptExecutable;
class SourceCode;
class SourceProvider;
-class SymbolTable;
class UnlinkedCodeBlock;
class UnlinkedFunctionCodeBlock;
+class UnlinkedFunctionExecutable;
class UnlinkedInstructionStream;
+struct ExecutableInfo;
typedef unsigned UnlinkedValueProfile;
typedef unsigned UnlinkedArrayProfile;
@@ -65,132 +67,6 @@ typedef unsigned UnlinkedArrayAllocationProfile;
typedef unsigned UnlinkedObjectAllocationProfile;
typedef unsigned UnlinkedLLIntCallLinkInfo;
-struct ExecutableInfo {
- ExecutableInfo(bool needsActivation, bool usesEval, bool isStrictMode, bool isConstructor)
- : m_needsActivation(needsActivation)
- , m_usesEval(usesEval)
- , m_isStrictMode(isStrictMode)
- , m_isConstructor(isConstructor)
- {
- }
- bool m_needsActivation;
- bool m_usesEval;
- bool m_isStrictMode;
- bool m_isConstructor;
-};
-
-class UnlinkedFunctionExecutable : public JSCell {
-public:
- friend class CodeCache;
- typedef JSCell Base;
- static UnlinkedFunctionExecutable* create(VM* vm, const SourceCode& source, FunctionBodyNode* node, bool isFromGlobalCode = false)
- {
- UnlinkedFunctionExecutable* instance = new (NotNull, allocateCell<UnlinkedFunctionExecutable>(vm->heap)) UnlinkedFunctionExecutable(vm, vm->unlinkedFunctionExecutableStructure.get(), source, node, isFromGlobalCode);
- instance->finishCreation(*vm);
- return instance;
- }
-
- const Identifier& name() const { return m_name; }
- const Identifier& inferredName() const { return m_inferredName; }
- JSString* nameValue() const { return m_nameValue.get(); }
- SymbolTable* symbolTable(CodeSpecializationKind kind)
- {
- return (kind == CodeForCall) ? m_symbolTableForCall.get() : m_symbolTableForConstruct.get();
- }
- size_t parameterCount() const;
- bool isInStrictContext() const { return m_isInStrictContext; }
- FunctionNameIsInScopeToggle functionNameIsInScopeToggle() const { return m_functionNameIsInScopeToggle; }
-
- unsigned firstLineOffset() const { return m_firstLineOffset; }
- unsigned lineCount() const { return m_lineCount; }
- unsigned unlinkedFunctionNameStart() const { return m_unlinkedFunctionNameStart; }
- unsigned unlinkedBodyStartColumn() const { return m_unlinkedBodyStartColumn; }
- unsigned unlinkedBodyEndColumn() const { return m_unlinkedBodyEndColumn; }
- unsigned startOffset() const { return m_startOffset; }
- unsigned sourceLength() { return m_sourceLength; }
-
- String paramString() const;
-
- UnlinkedFunctionCodeBlock* codeBlockFor(VM&, const SourceCode&, CodeSpecializationKind, DebuggerMode, ProfilerMode, ParserError&);
-
- static UnlinkedFunctionExecutable* fromGlobalCode(const Identifier&, ExecState*, Debugger*, const SourceCode&, JSObject** exception);
-
- FunctionExecutable* link(VM&, const SourceCode&, size_t lineOffset, size_t sourceOffset);
-
- void clearCodeForRecompilation()
- {
- m_symbolTableForCall.clear();
- m_symbolTableForConstruct.clear();
- m_codeBlockForCall.clear();
- m_codeBlockForConstruct.clear();
- }
-
- FunctionParameters* parameters() { return m_parameters.get(); }
-
- void recordParse(CodeFeatures features, bool hasCapturedVariables)
- {
- m_features = features;
- m_hasCapturedVariables = hasCapturedVariables;
- }
-
- bool forceUsesArguments() const { return m_forceUsesArguments; }
-
- CodeFeatures features() const { return m_features; }
- bool hasCapturedVariables() const { return m_hasCapturedVariables; }
-
- static const bool needsDestruction = true;
- static const bool hasImmortalStructure = true;
- static void destroy(JSCell*);
-
-private:
- UnlinkedFunctionExecutable(VM*, Structure*, const SourceCode&, FunctionBodyNode*, bool isFromGlobalCode);
- WriteBarrier<UnlinkedFunctionCodeBlock> m_codeBlockForCall;
- WriteBarrier<UnlinkedFunctionCodeBlock> m_codeBlockForConstruct;
-
- unsigned m_numCapturedVariables : 29;
- bool m_forceUsesArguments : 1;
- bool m_isInStrictContext : 1;
- bool m_hasCapturedVariables : 1;
- bool m_isFromGlobalCode : 1;
-
- Identifier m_name;
- Identifier m_inferredName;
- WriteBarrier<JSString> m_nameValue;
- WriteBarrier<SymbolTable> m_symbolTableForCall;
- WriteBarrier<SymbolTable> m_symbolTableForConstruct;
- RefPtr<FunctionParameters> m_parameters;
- unsigned m_firstLineOffset;
- unsigned m_lineCount;
- unsigned m_unlinkedFunctionNameStart;
- unsigned m_unlinkedBodyStartColumn;
- unsigned m_unlinkedBodyEndColumn;
- unsigned m_startOffset;
- unsigned m_sourceLength;
-
- CodeFeatures m_features;
-
- FunctionNameIsInScopeToggle m_functionNameIsInScopeToggle;
-
-protected:
- void finishCreation(VM& vm)
- {
- Base::finishCreation(vm);
- m_nameValue.set(vm, this, jsString(&vm, name().string()));
- }
-
- static void visitChildren(JSCell*, SlotVisitor&);
-
-public:
- static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue proto)
- {
- return Structure::create(vm, globalObject, proto, TypeInfo(UnlinkedFunctionExecutableType, StructureFlags), info());
- }
-
- static const unsigned StructureFlags = OverridesVisitChildren | JSCell::StructureFlags;
-
- DECLARE_EXPORT_INFO;
-};
-
struct UnlinkedStringJumpTable {
typedef HashMap<RefPtr<StringImpl>, int32_t> StringOffsetTable;
StringOffsetTable offsetTable;
@@ -218,13 +94,6 @@ struct UnlinkedSimpleJumpTable {
}
};
-struct UnlinkedHandlerInfo {
- uint32_t start;
- uint32_t end;
- uint32_t target;
- uint32_t scopeDepth;
-};
-
struct UnlinkedInstruction {
UnlinkedInstruction() { u.operand = 0; }
UnlinkedInstruction(OpcodeID opcode) { u.opcode = opcode; }
@@ -239,31 +108,32 @@ struct UnlinkedInstruction {
class UnlinkedCodeBlock : public JSCell {
public:
typedef JSCell Base;
+ static const unsigned StructureFlags = Base::StructureFlags;
+
static const bool needsDestruction = true;
- static const bool hasImmortalStructure = true;
enum { CallFunction, ApplyFunction };
bool isConstructor() const { return m_isConstructor; }
bool isStrictMode() const { return m_isStrictMode; }
bool usesEval() const { return m_usesEval; }
-
- bool needsFullScopeChain() const { return m_needsFullScopeChain; }
- void setNeedsFullScopeChain(bool needsFullScopeChain) { m_needsFullScopeChain = needsFullScopeChain; }
+ SourceParseMode parseMode() const { return m_parseMode; }
+ bool isArrowFunction() const { return m_parseMode == SourceParseMode::ArrowFunctionMode; }
+ DerivedContextType derivedContextType() const { return static_cast<DerivedContextType>(m_derivedContextType); }
+ bool isArrowFunctionContext() const { return m_isArrowFunctionContext; }
+ bool isClassContext() const { return m_isClassContext; }
void addExpressionInfo(unsigned instructionOffset, int divot,
int startOffset, int endOffset, unsigned line, unsigned column);
+ void addTypeProfilerExpressionInfo(unsigned instructionOffset, unsigned startDivot, unsigned endDivot);
+
bool hasExpressionInfo() { return m_expressionInfo.size(); }
+ const Vector<ExpressionRangeInfo>& expressionInfo() { return m_expressionInfo; }
// Special registers
void setThisRegister(VirtualRegister thisRegister) { m_thisRegister = thisRegister; }
- void setActivationRegister(VirtualRegister activationRegister) { m_activationRegister = activationRegister; }
-
- void setArgumentsRegister(VirtualRegister argumentsRegister) { m_argumentsRegister = argumentsRegister; }
- bool usesArguments() const { return m_argumentsRegister.isValid(); }
- VirtualRegister argumentsRegister() const { return m_argumentsRegister; }
-
+ void setScopeRegister(VirtualRegister scopeRegister) { m_scopeRegister = scopeRegister; }
bool usesGlobalObject() const { return m_globalObjectRegister.isValid(); }
void setGlobalObjectRegister(VirtualRegister globalObjectRegister) { m_globalObjectRegister = globalObjectRegister; }
@@ -278,7 +148,7 @@ public:
{
createRareDataIfNecessary();
unsigned size = m_rareData->m_regexps.size();
- m_rareData->m_regexps.append(WriteBarrier<RegExp>(*m_vm, this, r));
+ m_rareData->m_regexps.append(WriteBarrier<RegExp>(*vm(), this, r));
return size;
}
unsigned numberOfRegExps() const
@@ -296,19 +166,35 @@ public:
const Identifier& identifier(int index) const { return m_identifiers[index]; }
const Vector<Identifier>& identifiers() const { return m_identifiers; }
- size_t numberOfConstantRegisters() const { return m_constantRegisters.size(); }
- unsigned addConstant(JSValue v)
+ unsigned addConstant(JSValue v, SourceCodeRepresentation sourceCodeRepresentation = SourceCodeRepresentation::Other)
+ {
+ unsigned result = m_constantRegisters.size();
+ m_constantRegisters.append(WriteBarrier<Unknown>());
+ m_constantRegisters.last().set(*vm(), this, v);
+ m_constantsSourceCodeRepresentation.append(sourceCodeRepresentation);
+ return result;
+ }
+ unsigned addConstant(LinkTimeConstant type)
{
unsigned result = m_constantRegisters.size();
+ ASSERT(result);
+ unsigned index = static_cast<unsigned>(type);
+ ASSERT(index < LinkTimeConstantCount);
+ m_linkTimeConstants[index] = result;
m_constantRegisters.append(WriteBarrier<Unknown>());
- m_constantRegisters.last().set(*m_vm, this, v);
+ m_constantsSourceCodeRepresentation.append(SourceCodeRepresentation::Other);
return result;
}
- unsigned addOrFindConstant(JSValue);
+ unsigned registerIndexForLinkTimeConstant(LinkTimeConstant type)
+ {
+ unsigned index = static_cast<unsigned>(type);
+ ASSERT(index < LinkTimeConstantCount);
+ return m_linkTimeConstants[index];
+ }
const Vector<WriteBarrier<Unknown>>& constantRegisters() { return m_constantRegisters; }
const WriteBarrier<Unknown>& constantRegister(int index) const { return m_constantRegisters[index - FirstConstantRegisterIndex]; }
ALWAYS_INLINE bool isConstantRegisterIndex(int index) const { return index >= FirstConstantRegisterIndex; }
- ALWAYS_INLINE JSValue getConstant(int index) const { return m_constantRegisters[index - FirstConstantRegisterIndex].get(); }
+ const Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation() { return m_constantsSourceCodeRepresentation; }
// Jumps
size_t numberOfJumpTargets() const { return m_jumpTargets.size(); }
@@ -316,22 +202,22 @@ public:
unsigned jumpTarget(int index) const { return m_jumpTargets[index]; }
unsigned lastJumpTarget() const { return m_jumpTargets.last(); }
- void setIsNumericCompareFunction(bool isNumericCompareFunction) { m_isNumericCompareFunction = isNumericCompareFunction; }
- bool isNumericCompareFunction() const { return m_isNumericCompareFunction; }
+ bool isBuiltinFunction() const { return m_isBuiltinFunction; }
+
+ ConstructorKind constructorKind() const { return static_cast<ConstructorKind>(m_constructorKind); }
+ SuperBinding superBinding() const { return static_cast<SuperBinding>(m_superBinding); }
void shrinkToFit()
{
m_jumpTargets.shrinkToFit();
m_identifiers.shrinkToFit();
m_constantRegisters.shrinkToFit();
+ m_constantsSourceCodeRepresentation.shrinkToFit();
m_functionDecls.shrinkToFit();
m_functionExprs.shrinkToFit();
m_propertyAccessInstructions.shrinkToFit();
m_expressionInfo.shrinkToFit();
-#if ENABLE(BYTECODE_COMMENTS)
- m_bytecodeComments.shrinkToFit();
-#endif
if (m_rareData) {
m_rareData->m_exceptionHandlers.shrinkToFit();
m_rareData->m_regexps.shrinkToFit();
@@ -347,7 +233,7 @@ public:
int m_numVars;
int m_numCapturedVars;
- int m_numCalleeRegisters;
+ int m_numCalleeLocals;
// Jump Tables
@@ -363,7 +249,7 @@ public:
{
unsigned size = m_functionDecls.size();
m_functionDecls.append(WriteBarrier<UnlinkedFunctionExecutable>());
- m_functionDecls.last().set(*m_vm, this, n);
+ m_functionDecls.last().set(*vm(), this, n);
return size;
}
UnlinkedFunctionExecutable* functionDecl(int index) { return m_functionDecls[index].get(); }
@@ -372,7 +258,7 @@ public:
{
unsigned size = m_functionExprs.size();
m_functionExprs.append(WriteBarrier<UnlinkedFunctionExecutable>());
- m_functionExprs.last().set(*m_vm, this, n);
+ m_functionExprs.last().set(*vm(), this, n);
return size;
}
UnlinkedFunctionExecutable* functionExpr(int index) { return m_functionExprs[index].get(); }
@@ -380,12 +266,10 @@ public:
// Exception handling support
size_t numberOfExceptionHandlers() const { return m_rareData ? m_rareData->m_exceptionHandlers.size() : 0; }
- void addExceptionHandler(const UnlinkedHandlerInfo& hanler) { createRareDataIfNecessary(); return m_rareData->m_exceptionHandlers.append(hanler); }
+ void addExceptionHandler(const UnlinkedHandlerInfo& handler) { createRareDataIfNecessary(); return m_rareData->m_exceptionHandlers.append(handler); }
UnlinkedHandlerInfo& exceptionHandler(int index) { ASSERT(m_rareData); return m_rareData->m_exceptionHandlers[index]; }
- SymbolTable* symbolTable() const { return m_symbolTable.get(); }
-
- VM* vm() const { return m_vm; }
+ VM* vm() const;
UnlinkedArrayProfile addArrayProfile() { return m_arrayProfileCount++; }
unsigned numberOfArrayProfiles() { return m_arrayProfileCount; }
@@ -402,8 +286,7 @@ public:
CodeType codeType() const { return m_codeType; }
VirtualRegister thisRegister() const { return m_thisRegister; }
- VirtualRegister activationRegister() const { return m_activationRegister; }
-
+ VirtualRegister scopeRegister() const { return m_scopeRegister; }
void addPropertyAccessInstruction(unsigned propertyAccessInstruction)
{
@@ -436,13 +319,15 @@ public:
return m_rareData->m_constantBuffers[index];
}
- bool hasRareData() const { return m_rareData; }
+ bool hasRareData() const { return m_rareData.get(); }
int lineNumberForBytecodeOffset(unsigned bytecodeOffset);
void expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot,
int& startOffset, int& endOffset, unsigned& line, unsigned& column);
+ bool typeProfilerExpressionInfoForBytecodeOffset(unsigned bytecodeOffset, unsigned& startDivot, unsigned& endDivot);
+
void recordParse(CodeFeatures features, bool hasCapturedVariables, unsigned firstLine, unsigned lineCount, unsigned endColumn)
{
m_features = features;
@@ -460,6 +345,21 @@ public:
ALWAYS_INLINE unsigned startColumn() const { return 0; }
unsigned endColumn() const { return m_endColumn; }
+ void addOpProfileControlFlowBytecodeOffset(size_t offset)
+ {
+ createRareDataIfNecessary();
+ m_rareData->m_opProfileControlFlowBytecodeOffsets.append(offset);
+ }
+ const Vector<size_t>& opProfileControlFlowBytecodeOffsets() const
+ {
+ ASSERT(m_rareData);
+ return m_rareData->m_opProfileControlFlowBytecodeOffsets;
+ }
+ bool hasOpProfileControlFlowBytecodeOffsets() const
+ {
+ return m_rareData && !m_rareData->m_opProfileControlFlowBytecodeOffsets.isEmpty();
+ }
+
void dumpExpressionRangeInfo(); // For debugging purpose only.
protected:
@@ -469,9 +369,6 @@ protected:
void finishCreation(VM& vm)
{
Base::finishCreation(vm);
- if (codeType() == GlobalCode)
- return;
- m_symbolTable.set(vm, this, SymbolTable::create(vm));
}
private:
@@ -479,51 +376,49 @@ private:
void createRareDataIfNecessary()
{
if (!m_rareData)
- m_rareData = adoptPtr(new RareData);
+ m_rareData = std::make_unique<RareData>();
}
void getLineAndColumn(ExpressionRangeInfo&, unsigned& line, unsigned& column);
- std::unique_ptr<UnlinkedInstructionStream> m_unlinkedInstructions;
-
int m_numParameters;
- VM* m_vm;
+
+ std::unique_ptr<UnlinkedInstructionStream> m_unlinkedInstructions;
VirtualRegister m_thisRegister;
- VirtualRegister m_argumentsRegister;
- VirtualRegister m_activationRegister;
+ VirtualRegister m_scopeRegister;
VirtualRegister m_globalObjectRegister;
- bool m_needsFullScopeChain : 1;
- bool m_usesEval : 1;
- bool m_isNumericCompareFunction : 1;
- bool m_isStrictMode : 1;
- bool m_isConstructor : 1;
- bool m_hasCapturedVariables : 1;
+ unsigned m_usesEval : 1;
+ unsigned m_isStrictMode : 1;
+ unsigned m_isConstructor : 1;
+ unsigned m_hasCapturedVariables : 1;
+ unsigned m_isBuiltinFunction : 1;
+ unsigned m_constructorKind : 2;
+ unsigned m_superBinding : 1;
+ unsigned m_derivedContextType : 2;
+ unsigned m_isArrowFunctionContext : 1;
+ unsigned m_isClassContext : 1;
unsigned m_firstLine;
unsigned m_lineCount;
unsigned m_endColumn;
+ SourceParseMode m_parseMode;
CodeFeatures m_features;
CodeType m_codeType;
Vector<unsigned> m_jumpTargets;
+ Vector<unsigned> m_propertyAccessInstructions;
+
// Constant Pools
Vector<Identifier> m_identifiers;
Vector<WriteBarrier<Unknown>> m_constantRegisters;
+ Vector<SourceCodeRepresentation> m_constantsSourceCodeRepresentation;
typedef Vector<WriteBarrier<UnlinkedFunctionExecutable>> FunctionExpressionVector;
FunctionExpressionVector m_functionDecls;
FunctionExpressionVector m_functionExprs;
-
- WriteBarrier<SymbolTable> m_symbolTable;
-
- Vector<unsigned> m_propertyAccessInstructions;
-
-#if ENABLE(BYTECODE_COMMENTS)
- Vector<Comment> m_bytecodeComments;
- size_t m_bytecodeCommentIterator;
-#endif
+ std::array<unsigned, LinkTimeConstantCount> m_linkTimeConstants;
unsigned m_arrayProfileCount;
unsigned m_arrayAllocationProfileCount;
@@ -548,16 +443,22 @@ public:
Vector<UnlinkedStringJumpTable> m_stringSwitchJumpTables;
Vector<ExpressionRangeInfo::FatPosition> m_expressionInfoFatPositions;
+
+ struct TypeProfilerExpressionRange {
+ unsigned m_startDivot;
+ unsigned m_endDivot;
+ };
+ HashMap<unsigned, TypeProfilerExpressionRange> m_typeProfilerInfoMap;
+ Vector<size_t> m_opProfileControlFlowBytecodeOffsets;
};
private:
- OwnPtr<RareData> m_rareData;
+ std::unique_ptr<RareData> m_rareData;
Vector<ExpressionRangeInfo> m_expressionInfo;
protected:
-
- static const unsigned StructureFlags = OverridesVisitChildren | Base::StructureFlags;
static void visitChildren(JSCell*, SlotVisitor&);
+ static size_t estimatedSize(JSCell*);
public:
DECLARE_INFO;
@@ -573,12 +474,10 @@ protected:
{
}
- static const unsigned StructureFlags = OverridesVisitChildren | Base::StructureFlags;
-
DECLARE_INFO;
};
-class UnlinkedProgramCodeBlock : public UnlinkedGlobalCodeBlock {
+class UnlinkedProgramCodeBlock final : public UnlinkedGlobalCodeBlock {
private:
friend class CodeCache;
static UnlinkedProgramCodeBlock* create(VM* vm, const ExecutableInfo& info)
@@ -590,47 +489,102 @@ private:
public:
typedef UnlinkedGlobalCodeBlock Base;
+ static const unsigned StructureFlags = Base::StructureFlags | StructureIsImmortal;
+
static void destroy(JSCell*);
- void addFunctionDeclaration(VM& vm, const Identifier& name, UnlinkedFunctionExecutable* functionExecutable)
+ void setVariableDeclarations(const VariableEnvironment& environment) { m_varDeclarations = environment; }
+ const VariableEnvironment& variableDeclarations() const { return m_varDeclarations; }
+
+ void setLexicalDeclarations(const VariableEnvironment& environment) { m_lexicalDeclarations = environment; }
+ const VariableEnvironment& lexicalDeclarations() const { return m_lexicalDeclarations; }
+
+ static void visitChildren(JSCell*, SlotVisitor&);
+
+private:
+ UnlinkedProgramCodeBlock(VM* vm, Structure* structure, const ExecutableInfo& info)
+ : Base(vm, structure, GlobalCode, info)
{
- m_functionDeclarations.append(std::make_pair(name, WriteBarrier<UnlinkedFunctionExecutable>(vm, this, functionExecutable)));
}
- void addVariableDeclaration(const Identifier& name, bool isConstant)
+ VariableEnvironment m_varDeclarations;
+ VariableEnvironment m_lexicalDeclarations;
+
+public:
+ static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue proto)
+ {
+ return Structure::create(vm, globalObject, proto, TypeInfo(UnlinkedProgramCodeBlockType, StructureFlags), info());
+ }
+
+ DECLARE_INFO;
+};
+
+class UnlinkedModuleProgramCodeBlock final : public UnlinkedGlobalCodeBlock {
+private:
+ friend class CodeCache;
+ static UnlinkedModuleProgramCodeBlock* create(VM* vm, const ExecutableInfo& info)
{
- m_varDeclarations.append(std::make_pair(name, isConstant));
+ UnlinkedModuleProgramCodeBlock* instance = new (NotNull, allocateCell<UnlinkedModuleProgramCodeBlock>(vm->heap)) UnlinkedModuleProgramCodeBlock(vm, vm->unlinkedModuleProgramCodeBlockStructure.get(), info);
+ instance->finishCreation(*vm);
+ return instance;
}
- typedef Vector<std::pair<Identifier, bool>> VariableDeclations;
- typedef Vector<std::pair<Identifier, WriteBarrier<UnlinkedFunctionExecutable>> > FunctionDeclations;
+public:
+ typedef UnlinkedGlobalCodeBlock Base;
+ static const unsigned StructureFlags = Base::StructureFlags | StructureIsImmortal;
- const VariableDeclations& variableDeclarations() const { return m_varDeclarations; }
- const FunctionDeclations& functionDeclarations() const { return m_functionDeclarations; }
+ static void destroy(JSCell*);
static void visitChildren(JSCell*, SlotVisitor&);
+ // This offset represents the constant register offset to the stored symbol table that represents the layout of the
+ // module environment. This symbol table is created by the byte code generator since the module environment includes
+ // the top-most lexical captured variables inside the module code. This means that, once the module environment is
+ // allocated and instantiated from this symbol table, it is titely coupled with the specific unlinked module program
+ // code block and the stored symbol table. So before executing the module code, we should not clear the unlinked module
+ // program code block in the module executable. This requirement is met because the garbage collector only clears
+ // unlinked code in (1) unmarked executables and (2) function executables.
+ //
+ // Since the function code may be executed repeatedly and the environment of each function execution is different,
+ // the function code need to allocate and instantiate the environment in the prologue of the function code. On the
+ // other hand, the module code is executed only once. So we can instantiate the module environment outside the module
+ // code. At that time, we construct the module environment by using the symbol table that is held by the module executable.
+ // The symbol table held by the executable is the cloned one from one in the unlinked code block. Instantiating the module
+ // environment before executing and linking the module code is required to link the imported bindings between the modules.
+ //
+ // The unlinked module program code block only holds the pre-cloned symbol table in its constant register pool. It does
+ // not hold the instantiated module environment. So while the module environment requires the specific unlinked module
+ // program code block, the unlinked module code block can be used for the module environment instantiated from this
+ // unlinked code block. There is 1:N relation between the unlinked module code block and the module environments. So the
+ // unlinked module program code block can be cached.
+ //
+ // On the other hand, the linked code block for the module environment includes the resolved references to the imported
+ // bindings. The imported binding references the other module environment, so the linked code block is titly coupled
+ // with the specific set of the module environments. Thus, the linked code block should not be cached.
+ int moduleEnvironmentSymbolTableConstantRegisterOffset() { return m_moduleEnvironmentSymbolTableConstantRegisterOffset; }
+ void setModuleEnvironmentSymbolTableConstantRegisterOffset(int offset)
+ {
+ m_moduleEnvironmentSymbolTableConstantRegisterOffset = offset;
+ }
+
private:
- UnlinkedProgramCodeBlock(VM* vm, Structure* structure, const ExecutableInfo& info)
- : Base(vm, structure, GlobalCode, info)
+ UnlinkedModuleProgramCodeBlock(VM* vm, Structure* structure, const ExecutableInfo& info)
+ : Base(vm, structure, ModuleCode, info)
{
}
- VariableDeclations m_varDeclarations;
- FunctionDeclations m_functionDeclarations;
+ int m_moduleEnvironmentSymbolTableConstantRegisterOffset { 0 };
public:
static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue proto)
{
- return Structure::create(vm, globalObject, proto, TypeInfo(UnlinkedProgramCodeBlockType, StructureFlags), info());
+ return Structure::create(vm, globalObject, proto, TypeInfo(UnlinkedModuleProgramCodeBlockType, StructureFlags), info());
}
- static const unsigned StructureFlags = OverridesVisitChildren | Base::StructureFlags;
-
DECLARE_INFO;
};
-class UnlinkedEvalCodeBlock : public UnlinkedGlobalCodeBlock {
+class UnlinkedEvalCodeBlock final : public UnlinkedGlobalCodeBlock {
private:
friend class CodeCache;
@@ -643,6 +597,8 @@ private:
public:
typedef UnlinkedGlobalCodeBlock Base;
+ static const unsigned StructureFlags = Base::StructureFlags | StructureIsImmortal;
+
static void destroy(JSCell*);
const Identifier& variable(unsigned index) { return m_variables[index]; }
@@ -667,13 +623,14 @@ public:
return Structure::create(vm, globalObject, proto, TypeInfo(UnlinkedEvalCodeBlockType, StructureFlags), info());
}
- static const unsigned StructureFlags = OverridesVisitChildren | Base::StructureFlags;
-
DECLARE_INFO;
};
-class UnlinkedFunctionCodeBlock : public UnlinkedCodeBlock {
+class UnlinkedFunctionCodeBlock final : public UnlinkedCodeBlock {
public:
+ typedef UnlinkedCodeBlock Base;
+ static const unsigned StructureFlags = Base::StructureFlags | StructureIsImmortal;
+
static UnlinkedFunctionCodeBlock* create(VM* vm, CodeType codeType, const ExecutableInfo& info)
{
UnlinkedFunctionCodeBlock* instance = new (NotNull, allocateCell<UnlinkedFunctionCodeBlock>(vm->heap)) UnlinkedFunctionCodeBlock(vm, vm->unlinkedFunctionCodeBlockStructure.get(), codeType, info);
@@ -681,7 +638,6 @@ public:
return instance;
}
- typedef UnlinkedCodeBlock Base;
static void destroy(JSCell*);
private:
@@ -696,8 +652,6 @@ public:
return Structure::create(vm, globalObject, proto, TypeInfo(UnlinkedFunctionCodeBlockType, StructureFlags), info());
}
- static const unsigned StructureFlags = OverridesVisitChildren | Base::StructureFlags;
-
DECLARE_INFO;
};
diff --git a/Source/JavaScriptCore/bytecode/UnlinkedFunctionExecutable.cpp b/Source/JavaScriptCore/bytecode/UnlinkedFunctionExecutable.cpp
new file mode 100644
index 000000000..7ad9d1042
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/UnlinkedFunctionExecutable.cpp
@@ -0,0 +1,225 @@
+/*
+ * Copyright (C) 2012, 2013, 2015 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "UnlinkedFunctionExecutable.h"
+
+#include "BytecodeGenerator.h"
+#include "ClassInfo.h"
+#include "CodeCache.h"
+#include "Executable.h"
+#include "ExecutableInfo.h"
+#include "FunctionOverrides.h"
+#include "JSCInlines.h"
+#include "JSString.h"
+#include "Parser.h"
+#include "SourceProvider.h"
+#include "Structure.h"
+#include "SymbolTable.h"
+#include "UnlinkedInstructionStream.h"
+#include <wtf/DataLog.h>
+
+namespace JSC {
+
+static_assert(sizeof(UnlinkedFunctionExecutable) <= 256, "UnlinkedFunctionExecutable should fit in a 256-byte cell.");
+
+const ClassInfo UnlinkedFunctionExecutable::s_info = { "UnlinkedFunctionExecutable", 0, 0, CREATE_METHOD_TABLE(UnlinkedFunctionExecutable) };
+
+static UnlinkedFunctionCodeBlock* generateUnlinkedFunctionCodeBlock(
+ VM& vm, UnlinkedFunctionExecutable* executable, const SourceCode& source,
+ CodeSpecializationKind kind, DebuggerMode debuggerMode, ProfilerMode profilerMode,
+ UnlinkedFunctionKind functionKind, ParserError& error, SourceParseMode parseMode)
+{
+ JSParserBuiltinMode builtinMode = executable->isBuiltinFunction() ? JSParserBuiltinMode::Builtin : JSParserBuiltinMode::NotBuiltin;
+ JSParserStrictMode strictMode = executable->isInStrictContext() ? JSParserStrictMode::Strict : JSParserStrictMode::NotStrict;
+ ASSERT(isFunctionParseMode(executable->parseMode()));
+ std::unique_ptr<FunctionNode> function = parse<FunctionNode>(
+ &vm, source, executable->name(), builtinMode, strictMode, executable->parseMode(), executable->superBinding(), error, nullptr);
+
+ if (!function) {
+ ASSERT(error.isValid());
+ return nullptr;
+ }
+
+ function->finishParsing(executable->name(), executable->functionMode());
+ executable->recordParse(function->features(), function->hasCapturedVariables());
+
+ bool isClassContext = executable->superBinding() == SuperBinding::Needed;
+
+ UnlinkedFunctionCodeBlock* result = UnlinkedFunctionCodeBlock::create(&vm, FunctionCode,
+ ExecutableInfo(function->usesEval(), function->isStrictMode(), kind == CodeForConstruct, functionKind == UnlinkedBuiltinFunction, executable->constructorKind(), executable->superBinding(), parseMode, executable->derivedContextType(), false, isClassContext));
+
+ auto generator(std::make_unique<BytecodeGenerator>(vm, function.get(), result, debuggerMode, profilerMode, executable->parentScopeTDZVariables()));
+ error = generator->generate();
+ if (error.isValid())
+ return nullptr;
+ return result;
+}
+
+UnlinkedFunctionExecutable::UnlinkedFunctionExecutable(VM* vm, Structure* structure, const SourceCode& source, RefPtr<SourceProvider>&& sourceOverride, FunctionMetadataNode* node, UnlinkedFunctionKind kind, ConstructAbility constructAbility, VariableEnvironment& parentScopeTDZVariables, DerivedContextType derivedContextType)
+ : Base(*vm, structure)
+ , m_firstLineOffset(node->firstLine() - source.firstLine())
+ , m_lineCount(node->lastLine() - node->firstLine())
+ , m_unlinkedFunctionNameStart(node->functionNameStart() - source.startOffset())
+ , m_unlinkedBodyStartColumn(node->startColumn())
+ , m_unlinkedBodyEndColumn(m_lineCount ? node->endColumn() : node->endColumn() - node->startColumn())
+ , m_startOffset(node->source().startOffset() - source.startOffset())
+ , m_sourceLength(node->source().length())
+ , m_parametersStartOffset(node->parametersStart())
+ , m_typeProfilingStartOffset(node->functionKeywordStart())
+ , m_typeProfilingEndOffset(node->startStartOffset() + node->source().length() - 1)
+ , m_parameterCount(node->parameterCount())
+ , m_features(0)
+ , m_isInStrictContext(node->isInStrictContext())
+ , m_hasCapturedVariables(false)
+ , m_isBuiltinFunction(kind == UnlinkedBuiltinFunction)
+ , m_constructAbility(static_cast<unsigned>(constructAbility))
+ , m_constructorKind(static_cast<unsigned>(node->constructorKind()))
+ , m_functionMode(node->functionMode())
+ , m_superBinding(static_cast<unsigned>(node->superBinding()))
+ , m_derivedContextType(static_cast<unsigned>(derivedContextType))
+ , m_sourceParseMode(static_cast<unsigned>(node->parseMode()))
+ , m_name(node->ident())
+ , m_inferredName(node->inferredName())
+ , m_sourceOverride(WTFMove(sourceOverride))
+{
+ ASSERT(m_constructorKind == static_cast<unsigned>(node->constructorKind()));
+ m_parentScopeTDZVariables.swap(parentScopeTDZVariables);
+}
+
+void UnlinkedFunctionExecutable::visitChildren(JSCell* cell, SlotVisitor& visitor)
+{
+ UnlinkedFunctionExecutable* thisObject = jsCast<UnlinkedFunctionExecutable*>(cell);
+ ASSERT_GC_OBJECT_INHERITS(thisObject, info());
+ Base::visitChildren(thisObject, visitor);
+ visitor.append(&thisObject->m_unlinkedCodeBlockForCall);
+ visitor.append(&thisObject->m_unlinkedCodeBlockForConstruct);
+ visitor.append(&thisObject->m_nameValue);
+}
+
+FunctionExecutable* UnlinkedFunctionExecutable::link(VM& vm, const SourceCode& ownerSource, int overrideLineNumber)
+{
+ SourceCode source = m_sourceOverride ? SourceCode(m_sourceOverride) : ownerSource;
+ unsigned firstLine = source.firstLine() + m_firstLineOffset;
+ unsigned startOffset = source.startOffset() + m_startOffset;
+ unsigned lineCount = m_lineCount;
+
+ // Adjust to one-based indexing.
+ bool startColumnIsOnFirstSourceLine = !m_firstLineOffset;
+ unsigned startColumn = m_unlinkedBodyStartColumn + (startColumnIsOnFirstSourceLine ? source.startColumn() : 1);
+ bool endColumnIsOnStartLine = !lineCount;
+ unsigned endColumn = m_unlinkedBodyEndColumn + (endColumnIsOnStartLine ? startColumn : 1);
+
+ SourceCode code(source.provider(), startOffset, startOffset + m_sourceLength, firstLine, startColumn);
+ FunctionOverrides::OverrideInfo overrideInfo;
+ bool hasFunctionOverride = false;
+
+ if (UNLIKELY(Options::functionOverrides())) {
+ hasFunctionOverride = FunctionOverrides::initializeOverrideFor(code, overrideInfo);
+ if (hasFunctionOverride) {
+ firstLine = overrideInfo.firstLine;
+ lineCount = overrideInfo.lineCount;
+ startColumn = overrideInfo.startColumn;
+ endColumn = overrideInfo.endColumn;
+ code = overrideInfo.sourceCode;
+ }
+ }
+
+ FunctionExecutable* result = FunctionExecutable::create(vm, code, this, firstLine, firstLine + lineCount, startColumn, endColumn);
+ if (overrideLineNumber != -1)
+ result->setOverrideLineNumber(overrideLineNumber);
+
+ if (UNLIKELY(hasFunctionOverride)) {
+ result->overrideParameterAndTypeProfilingStartEndOffsets(
+ overrideInfo.parametersStartOffset,
+ overrideInfo.typeProfilingStartOffset,
+ overrideInfo.typeProfilingEndOffset);
+ }
+
+ return result;
+}
+
+UnlinkedFunctionExecutable* UnlinkedFunctionExecutable::fromGlobalCode(
+ const Identifier& name, ExecState& exec, const SourceCode& source,
+ JSObject*& exception, int overrideLineNumber)
+{
+ ParserError error;
+ VM& vm = exec.vm();
+ CodeCache* codeCache = vm.codeCache();
+ UnlinkedFunctionExecutable* executable = codeCache->getFunctionExecutableFromGlobalCode(vm, name, source, error);
+
+ auto& globalObject = *exec.lexicalGlobalObject();
+ if (globalObject.hasDebugger())
+ globalObject.debugger()->sourceParsed(&exec, source.provider(), error.line(), error.message());
+
+ if (error.isValid()) {
+ exception = error.toErrorObject(&globalObject, source, overrideLineNumber);
+ return nullptr;
+ }
+
+ return executable;
+}
+
+UnlinkedFunctionCodeBlock* UnlinkedFunctionExecutable::unlinkedCodeBlockFor(
+ VM& vm, const SourceCode& source, CodeSpecializationKind specializationKind,
+ DebuggerMode debuggerMode, ProfilerMode profilerMode, ParserError& error, SourceParseMode parseMode)
+{
+ switch (specializationKind) {
+ case CodeForCall:
+ if (UnlinkedFunctionCodeBlock* codeBlock = m_unlinkedCodeBlockForCall.get())
+ return codeBlock;
+ break;
+ case CodeForConstruct:
+ if (UnlinkedFunctionCodeBlock* codeBlock = m_unlinkedCodeBlockForConstruct.get())
+ return codeBlock;
+ break;
+ }
+
+ UnlinkedFunctionCodeBlock* result = generateUnlinkedFunctionCodeBlock(
+ vm, this, source, specializationKind, debuggerMode, profilerMode,
+ isBuiltinFunction() ? UnlinkedBuiltinFunction : UnlinkedNormalFunction,
+ error, parseMode);
+
+ if (error.isValid())
+ return nullptr;
+
+ switch (specializationKind) {
+ case CodeForCall:
+ m_unlinkedCodeBlockForCall.set(vm, this, result);
+ break;
+ case CodeForConstruct:
+ m_unlinkedCodeBlockForConstruct.set(vm, this, result);
+ break;
+ }
+ return result;
+}
+
+void UnlinkedFunctionExecutable::setInvalidTypeProfilingOffsets()
+{
+ m_typeProfilingStartOffset = std::numeric_limits<unsigned>::max();
+ m_typeProfilingEndOffset = std::numeric_limits<unsigned>::max();
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/UnlinkedFunctionExecutable.h b/Source/JavaScriptCore/bytecode/UnlinkedFunctionExecutable.h
new file mode 100644
index 000000000..8a614db21
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/UnlinkedFunctionExecutable.h
@@ -0,0 +1,190 @@
+/*
+ * Copyright (C) 2012-2015 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef UnlinkedFunctionExecutable_h
+#define UnlinkedFunctionExecutable_h
+
+#include "BytecodeConventions.h"
+#include "CodeSpecializationKind.h"
+#include "CodeType.h"
+#include "ConstructAbility.h"
+#include "ExecutableInfo.h"
+#include "ExpressionRangeInfo.h"
+#include "HandlerInfo.h"
+#include "Identifier.h"
+#include "JSCell.h"
+#include "JSString.h"
+#include "ParserModes.h"
+#include "RegExp.h"
+#include "SpecialPointer.h"
+#include "VariableEnvironment.h"
+#include "VirtualRegister.h"
+#include <wtf/RefCountedArray.h>
+#include <wtf/Vector.h>
+
+namespace JSC {
+
+class FunctionMetadataNode;
+class FunctionExecutable;
+class ParserError;
+class SourceCode;
+class SourceProvider;
+class UnlinkedFunctionCodeBlock;
+
+enum UnlinkedFunctionKind {
+ UnlinkedNormalFunction,
+ UnlinkedBuiltinFunction,
+};
+
+class UnlinkedFunctionExecutable final : public JSCell {
+public:
+ friend class CodeCache;
+ friend class VM;
+
+ typedef JSCell Base;
+ static const unsigned StructureFlags = Base::StructureFlags | StructureIsImmortal;
+
+ static UnlinkedFunctionExecutable* create(VM* vm, const SourceCode& source, FunctionMetadataNode* node, UnlinkedFunctionKind unlinkedFunctionKind, ConstructAbility constructAbility, VariableEnvironment& parentScopeTDZVariables, DerivedContextType derivedContextType, RefPtr<SourceProvider>&& sourceOverride = nullptr)
+ {
+ UnlinkedFunctionExecutable* instance = new (NotNull, allocateCell<UnlinkedFunctionExecutable>(vm->heap))
+ UnlinkedFunctionExecutable(vm, vm->unlinkedFunctionExecutableStructure.get(), source, WTFMove(sourceOverride), node, unlinkedFunctionKind, constructAbility, parentScopeTDZVariables, derivedContextType);
+ instance->finishCreation(*vm);
+ return instance;
+ }
+
+ const Identifier& name() const { return m_name; }
+ const Identifier& inferredName() const { return m_inferredName; }
+ JSString* nameValue() const { return m_nameValue.get(); }
+ void setNameValue(VM& vm, JSString* nameValue) { m_nameValue.set(vm, this, nameValue); }
+ unsigned parameterCount() const { return m_parameterCount; };
+ SourceParseMode parseMode() const { return static_cast<SourceParseMode>(m_sourceParseMode); };
+ bool isInStrictContext() const { return m_isInStrictContext; }
+ FunctionMode functionMode() const { return static_cast<FunctionMode>(m_functionMode); }
+ ConstructorKind constructorKind() const { return static_cast<ConstructorKind>(m_constructorKind); }
+ SuperBinding superBinding() const { return static_cast<SuperBinding>(m_superBinding); }
+
+ unsigned unlinkedFunctionNameStart() const { return m_unlinkedFunctionNameStart; }
+ unsigned unlinkedBodyStartColumn() const { return m_unlinkedBodyStartColumn; }
+ unsigned unlinkedBodyEndColumn() const { return m_unlinkedBodyEndColumn; }
+ unsigned startOffset() const { return m_startOffset; }
+ unsigned sourceLength() { return m_sourceLength; }
+ unsigned parametersStartOffset() const { return m_parametersStartOffset; }
+ unsigned typeProfilingStartOffset() const { return m_typeProfilingStartOffset; }
+ unsigned typeProfilingEndOffset() const { return m_typeProfilingEndOffset; }
+ void setInvalidTypeProfilingOffsets();
+
+ UnlinkedFunctionCodeBlock* unlinkedCodeBlockFor(
+ VM&, const SourceCode&, CodeSpecializationKind, DebuggerMode, ProfilerMode,
+ ParserError&, SourceParseMode);
+
+ static UnlinkedFunctionExecutable* fromGlobalCode(
+ const Identifier&, ExecState&, const SourceCode&, JSObject*& exception,
+ int overrideLineNumber);
+
+ JS_EXPORT_PRIVATE FunctionExecutable* link(VM&, const SourceCode&, int overrideLineNumber = -1);
+
+ void clearCode()
+ {
+ m_unlinkedCodeBlockForCall.clear();
+ m_unlinkedCodeBlockForConstruct.clear();
+ }
+
+ void recordParse(CodeFeatures features, bool hasCapturedVariables)
+ {
+ m_features = features;
+ m_hasCapturedVariables = hasCapturedVariables;
+ }
+
+ CodeFeatures features() const { return m_features; }
+ bool hasCapturedVariables() const { return m_hasCapturedVariables; }
+
+ static const bool needsDestruction = true;
+ static void destroy(JSCell*);
+
+ bool isBuiltinFunction() const { return m_isBuiltinFunction; }
+ ConstructAbility constructAbility() const { return static_cast<ConstructAbility>(m_constructAbility); }
+ bool isClassConstructorFunction() const { return constructorKind() != ConstructorKind::None; }
+ const VariableEnvironment* parentScopeTDZVariables() const { return &m_parentScopeTDZVariables; }
+
+ bool isArrowFunction() const { return parseMode() == SourceParseMode::ArrowFunctionMode; }
+
+ JSC::DerivedContextType derivedContextType() const {return static_cast<JSC::DerivedContextType>(m_derivedContextType); }
+
+private:
+ UnlinkedFunctionExecutable(VM*, Structure*, const SourceCode&, RefPtr<SourceProvider>&& sourceOverride, FunctionMetadataNode*, UnlinkedFunctionKind, ConstructAbility, VariableEnvironment&, JSC::DerivedContextType);
+
+ unsigned m_firstLineOffset;
+ unsigned m_lineCount;
+ unsigned m_unlinkedFunctionNameStart;
+ unsigned m_unlinkedBodyStartColumn;
+ unsigned m_unlinkedBodyEndColumn;
+ unsigned m_startOffset;
+ unsigned m_sourceLength;
+ unsigned m_parametersStartOffset;
+ unsigned m_typeProfilingStartOffset;
+ unsigned m_typeProfilingEndOffset;
+ unsigned m_parameterCount;
+ CodeFeatures m_features;
+ unsigned m_isInStrictContext : 1;
+ unsigned m_hasCapturedVariables : 1;
+ unsigned m_isBuiltinFunction : 1;
+ unsigned m_constructAbility: 1;
+ unsigned m_constructorKind : 2;
+ unsigned m_functionMode : 1; // FunctionMode
+ unsigned m_superBinding : 1;
+ unsigned m_derivedContextType: 2;
+ unsigned m_sourceParseMode : 4; // SourceParseMode
+
+ WriteBarrier<UnlinkedFunctionCodeBlock> m_unlinkedCodeBlockForCall;
+ WriteBarrier<UnlinkedFunctionCodeBlock> m_unlinkedCodeBlockForConstruct;
+
+ Identifier m_name;
+ Identifier m_inferredName;
+ WriteBarrier<JSString> m_nameValue;
+ RefPtr<SourceProvider> m_sourceOverride;
+
+ VariableEnvironment m_parentScopeTDZVariables;
+
+protected:
+ void finishCreation(VM& vm)
+ {
+ Base::finishCreation(vm);
+ m_nameValue.set(vm, this, jsString(&vm, name().string()));
+ }
+
+ static void visitChildren(JSCell*, SlotVisitor&);
+
+public:
+ static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue proto)
+ {
+ return Structure::create(vm, globalObject, proto, TypeInfo(UnlinkedFunctionExecutableType, StructureFlags), info());
+ }
+
+ DECLARE_EXPORT_INFO;
+};
+
+} // namespace JSC
+
+#endif // UnlinkedFunctionExecutable_h
diff --git a/Source/JavaScriptCore/bytecode/UnlinkedInstructionStream.cpp b/Source/JavaScriptCore/bytecode/UnlinkedInstructionStream.cpp
index 2e07f4f47..6a300d3b4 100644
--- a/Source/JavaScriptCore/bytecode/UnlinkedInstructionStream.cpp
+++ b/Source/JavaScriptCore/bytecode/UnlinkedInstructionStream.cpp
@@ -28,81 +28,6 @@
namespace JSC {
-// Unlinked instructions are packed in a simple stream format.
-//
-// The first byte is always the opcode.
-// It's followed by an opcode-dependent number of argument values.
-// The first 3 bits of each value determines the format:
-//
-// 5-bit positive integer (1 byte total)
-// 5-bit negative integer (1 byte total)
-// 13-bit positive integer (2 bytes total)
-// 13-bit negative integer (2 bytes total)
-// 5-bit constant register index, based at 0x40000000 (1 byte total)
-// 13-bit constant register index, based at 0x40000000 (2 bytes total)
-// 32-bit raw value (5 bytes total)
-
-enum PackedValueType {
- Positive5Bit = 0,
- Negative5Bit,
- Positive13Bit,
- Negative13Bit,
- ConstantRegister5Bit,
- ConstantRegister13Bit,
- Full32Bit
-};
-
-UnlinkedInstructionStream::Reader::Reader(const UnlinkedInstructionStream& stream)
- : m_stream(stream)
- , m_index(0)
-{
-}
-
-inline unsigned char UnlinkedInstructionStream::Reader::read8()
-{
- return m_stream.m_data.data()[m_index++];
-}
-
-inline unsigned UnlinkedInstructionStream::Reader::read32()
-{
- const unsigned char* data = &m_stream.m_data.data()[m_index];
- unsigned char type = data[0] >> 5;
-
- switch (type) {
- case Positive5Bit:
- m_index++;
- return data[0];
- case Negative5Bit:
- m_index++;
- return 0xffffffe0 | data[0];
- case Positive13Bit:
- m_index += 2;
- return ((data[0] & 0x1F) << 8) | data[1];
- case Negative13Bit:
- m_index += 2;
- return 0xffffe000 | ((data[0] & 0x1F) << 8) | data[1];
- case ConstantRegister5Bit:
- m_index++;
- return 0x40000000 | (data[0] & 0x1F);
- case ConstantRegister13Bit:
- m_index += 2;
- return 0x40000000 | ((data[0] & 0x1F) << 8) | data[1];
- default:
- ASSERT(type == Full32Bit);
- m_index += 5;
- return data[1] | data[2] << 8 | data[3] << 16 | data[4] << 24;
- }
-}
-
-const UnlinkedInstruction* UnlinkedInstructionStream::Reader::next()
-{
- m_unpackedBuffer[0].u.opcode = static_cast<OpcodeID>(read8());
- unsigned opLength = opcodeLength(m_unpackedBuffer[0].u.opcode);
- for (unsigned i = 1; i < opLength; ++i)
- m_unpackedBuffer[i].u.index = read32();
- return m_unpackedBuffer;
-}
-
static void append8(unsigned char*& ptr, unsigned char value)
{
*(ptr++) = value;
@@ -150,7 +75,7 @@ static void append32(unsigned char*& ptr, unsigned value)
*(ptr++) = (value >> 24) & 0xff;
}
-UnlinkedInstructionStream::UnlinkedInstructionStream(const Vector<UnlinkedInstruction>& instructions)
+UnlinkedInstructionStream::UnlinkedInstructionStream(const Vector<UnlinkedInstruction, 0, UnsafeVectorOverflow>& instructions)
: m_instructionCount(instructions.size())
{
Vector<unsigned char> buffer;
@@ -177,6 +102,11 @@ UnlinkedInstructionStream::UnlinkedInstructionStream(const Vector<UnlinkedInstru
m_data = RefCountedArray<unsigned char>(buffer);
}
+size_t UnlinkedInstructionStream::sizeInBytes() const
+{
+ return m_data.size() * sizeof(unsigned char);
+}
+
#ifndef NDEBUG
const RefCountedArray<UnlinkedInstruction>& UnlinkedInstructionStream::unpackForDebugging() const
{
diff --git a/Source/JavaScriptCore/bytecode/UnlinkedInstructionStream.h b/Source/JavaScriptCore/bytecode/UnlinkedInstructionStream.h
index 5a919a29e..a875e4906 100644
--- a/Source/JavaScriptCore/bytecode/UnlinkedInstructionStream.h
+++ b/Source/JavaScriptCore/bytecode/UnlinkedInstructionStream.h
@@ -33,10 +33,12 @@
namespace JSC {
class UnlinkedInstructionStream {
+ WTF_MAKE_FAST_ALLOCATED;
public:
- explicit UnlinkedInstructionStream(const Vector<UnlinkedInstruction>&);
+ explicit UnlinkedInstructionStream(const Vector<UnlinkedInstruction, 0, UnsafeVectorOverflow>&);
unsigned count() const { return m_instructionCount; }
+ size_t sizeInBytes() const;
class Reader {
public:
@@ -69,6 +71,81 @@ private:
unsigned m_instructionCount;
};
+// Unlinked instructions are packed in a simple stream format.
+//
+// The first byte is always the opcode.
+// It's followed by an opcode-dependent number of argument values.
+// The first 3 bits of each value determines the format:
+//
+// 5-bit positive integer (1 byte total)
+// 5-bit negative integer (1 byte total)
+// 13-bit positive integer (2 bytes total)
+// 13-bit negative integer (2 bytes total)
+// 5-bit constant register index, based at 0x40000000 (1 byte total)
+// 13-bit constant register index, based at 0x40000000 (2 bytes total)
+// 32-bit raw value (5 bytes total)
+
+enum PackedValueType {
+ Positive5Bit = 0,
+ Negative5Bit,
+ Positive13Bit,
+ Negative13Bit,
+ ConstantRegister5Bit,
+ ConstantRegister13Bit,
+ Full32Bit
+};
+
+ALWAYS_INLINE UnlinkedInstructionStream::Reader::Reader(const UnlinkedInstructionStream& stream)
+ : m_stream(stream)
+ , m_index(0)
+{
+}
+
+ALWAYS_INLINE unsigned char UnlinkedInstructionStream::Reader::read8()
+{
+ return m_stream.m_data.data()[m_index++];
+}
+
+ALWAYS_INLINE unsigned UnlinkedInstructionStream::Reader::read32()
+{
+ const unsigned char* data = &m_stream.m_data.data()[m_index];
+ unsigned char type = data[0] >> 5;
+
+ switch (type) {
+ case Positive5Bit:
+ m_index++;
+ return data[0];
+ case Negative5Bit:
+ m_index++;
+ return 0xffffffe0 | data[0];
+ case Positive13Bit:
+ m_index += 2;
+ return ((data[0] & 0x1F) << 8) | data[1];
+ case Negative13Bit:
+ m_index += 2;
+ return 0xffffe000 | ((data[0] & 0x1F) << 8) | data[1];
+ case ConstantRegister5Bit:
+ m_index++;
+ return 0x40000000 | (data[0] & 0x1F);
+ case ConstantRegister13Bit:
+ m_index += 2;
+ return 0x40000000 | ((data[0] & 0x1F) << 8) | data[1];
+ default:
+ ASSERT(type == Full32Bit);
+ m_index += 5;
+ return data[1] | data[2] << 8 | data[3] << 16 | data[4] << 24;
+ }
+}
+
+ALWAYS_INLINE const UnlinkedInstruction* UnlinkedInstructionStream::Reader::next()
+{
+ m_unpackedBuffer[0].u.opcode = static_cast<OpcodeID>(read8());
+ unsigned opLength = opcodeLength(m_unpackedBuffer[0].u.opcode);
+ for (unsigned i = 1; i < opLength; ++i)
+ m_unpackedBuffer[i].u.index = read32();
+ return m_unpackedBuffer;
+}
+
} // namespace JSC
#endif // UnlinkedInstructionStream_h
diff --git a/Source/JavaScriptCore/bytecode/ValueProfile.cpp b/Source/JavaScriptCore/bytecode/ValueProfile.cpp
new file mode 100644
index 000000000..876ce30e8
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ValueProfile.cpp
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "ValueProfile.h"
+
+namespace WTF {
+
+using namespace JSC;
+
+void printInternal(PrintStream& out, const ResultProfile& profile)
+{
+ const char* separator = "";
+
+ if (!profile.didObserveNonInt32()) {
+ out.print("Int32");
+ separator = "|";
+ } else {
+ if (profile.didObserveNegZeroDouble()) {
+ out.print(separator, "NegZeroDouble");
+ separator = "|";
+ }
+ if (profile.didObserveNonNegZeroDouble()) {
+ out.print("NonNegZeroDouble");
+ separator = "|";
+ }
+ if (profile.didObserveNonNumber()) {
+ out.print("NonNumber");
+ separator = "|";
+ }
+ if (profile.didObserveInt32Overflow()) {
+ out.print("Int32Overflow");
+ separator = "|";
+ }
+ if (profile.didObserveInt52Overflow()) {
+ out.print("Int52Overflow");
+ separator = "|";
+ }
+ }
+ if (profile.specialFastPathCount()) {
+ out.print(" special fast path: ");
+ out.print(profile.specialFastPathCount());
+ }
+}
+
+} // namespace WTF
diff --git a/Source/JavaScriptCore/bytecode/ValueProfile.h b/Source/JavaScriptCore/bytecode/ValueProfile.h
index 0790f79da..48b47da7c 100644
--- a/Source/JavaScriptCore/bytecode/ValueProfile.h
+++ b/Source/JavaScriptCore/bytecode/ValueProfile.h
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -206,7 +206,65 @@ inline int getRareCaseProfileBytecodeOffset(RareCaseProfile* rareCaseProfile)
return rareCaseProfile->m_bytecodeOffset;
}
+struct ResultProfile {
+private:
+ static const int numberOfFlagBits = 5;
+
+public:
+ ResultProfile(int bytecodeOffset)
+ : m_bytecodeOffsetAndFlags(bytecodeOffset << numberOfFlagBits)
+ {
+ ASSERT(((bytecodeOffset << numberOfFlagBits) >> numberOfFlagBits) == bytecodeOffset);
+ }
+
+ enum ObservedResults {
+ NonNegZeroDouble = 1 << 0,
+ NegZeroDouble = 1 << 1,
+ NonNumber = 1 << 2,
+ Int32Overflow = 1 << 3,
+ Int52Overflow = 1 << 4,
+ };
+
+ int bytecodeOffset() const { return m_bytecodeOffsetAndFlags >> numberOfFlagBits; }
+ unsigned specialFastPathCount() const { return m_specialFastPathCount; }
+
+ bool didObserveNonInt32() const { return hasBits(NonNegZeroDouble | NegZeroDouble | NonNumber); }
+ bool didObserveDouble() const { return hasBits(NonNegZeroDouble | NegZeroDouble); }
+ bool didObserveNonNegZeroDouble() const { return hasBits(NonNegZeroDouble); }
+ bool didObserveNegZeroDouble() const { return hasBits(NegZeroDouble); }
+ bool didObserveNonNumber() const { return hasBits(NonNumber); }
+ bool didObserveInt32Overflow() const { return hasBits(Int32Overflow); }
+ bool didObserveInt52Overflow() const { return hasBits(Int52Overflow); }
+
+ void setObservedNonNegZeroDouble() { setBit(NonNegZeroDouble); }
+ void setObservedNegZeroDouble() { setBit(NegZeroDouble); }
+ void setObservedNonNumber() { setBit(NonNumber); }
+ void setObservedInt32Overflow() { setBit(Int32Overflow); }
+ void setObservedInt52Overflow() { setBit(Int52Overflow); }
+
+ void* addressOfFlags() { return &m_bytecodeOffsetAndFlags; }
+ void* addressOfSpecialFastPathCount() { return &m_specialFastPathCount; }
+
+private:
+ bool hasBits(int mask) const { return m_bytecodeOffsetAndFlags & mask; }
+ void setBit(int mask) { m_bytecodeOffsetAndFlags |= mask; }
+
+ int m_bytecodeOffsetAndFlags;
+ unsigned m_specialFastPathCount { 0 };
+};
+
+inline int getResultProfileBytecodeOffset(ResultProfile* profile)
+{
+ return profile->bytecodeOffset();
+}
+
} // namespace JSC
+namespace WTF {
+
+void printInternal(PrintStream&, const JSC::ResultProfile&);
+
+} // namespace WTF
+
#endif // ValueProfile_h
diff --git a/Source/JavaScriptCore/bytecode/ValueRecovery.cpp b/Source/JavaScriptCore/bytecode/ValueRecovery.cpp
index 5032684dd..9c083b04a 100644
--- a/Source/JavaScriptCore/bytecode/ValueRecovery.cpp
+++ b/Source/JavaScriptCore/bytecode/ValueRecovery.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2013, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -27,7 +27,7 @@
#include "ValueRecovery.h"
#include "CodeBlock.h"
-#include "Operations.h"
+#include "JSCInlines.h"
namespace JSC {
@@ -86,34 +86,40 @@ void ValueRecovery::dumpInContext(PrintStream& out, DumpContext* context) const
case InFPR:
out.print(fpr());
return;
+ case UnboxedDoubleInFPR:
+ out.print("double(", fpr(), ")");
+ return;
#if USE(JSVALUE32_64)
case InPair:
out.print("pair(", tagGPR(), ", ", payloadGPR(), ")");
return;
#endif
case DisplacedInJSStack:
- out.printf("*%d", virtualRegister().offset());
+ out.print("*", virtualRegister());
return;
case Int32DisplacedInJSStack:
- out.printf("*int32(%d)", virtualRegister().offset());
+ out.print("*int32(", virtualRegister(), ")");
return;
case Int52DisplacedInJSStack:
- out.printf("*int52(%d)", virtualRegister().offset());
+ out.print("*int52(", virtualRegister(), ")");
return;
case StrictInt52DisplacedInJSStack:
- out.printf("*strictInt52(%d)", virtualRegister().offset());
+ out.print("*strictInt52(", virtualRegister(), ")");
return;
case DoubleDisplacedInJSStack:
- out.printf("*double(%d)", virtualRegister().offset());
+ out.print("*double(", virtualRegister(), ")");
return;
case CellDisplacedInJSStack:
- out.printf("*cell(%d)", virtualRegister().offset());
+ out.print("*cell(", virtualRegister(), ")");
return;
case BooleanDisplacedInJSStack:
- out.printf("*bool(%d)", virtualRegister().offset());
+ out.print("*bool(", virtualRegister(), ")");
+ return;
+ case DirectArgumentsThatWereNotCreated:
+ out.print("DirectArguments(", nodeID(), ")");
return;
- case ArgumentsThatWereNotCreated:
- out.printf("arguments");
+ case ClonedArgumentsThatWereNotCreated:
+ out.print("ClonedArguments(", nodeID(), ")");
return;
case Constant:
out.print("[", inContext(constant(), context), "]");
diff --git a/Source/JavaScriptCore/bytecode/ValueRecovery.h b/Source/JavaScriptCore/bytecode/ValueRecovery.h
index 3af2c3409..5f6ee9c72 100644
--- a/Source/JavaScriptCore/bytecode/ValueRecovery.h
+++ b/Source/JavaScriptCore/bytecode/ValueRecovery.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2013, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,19 +26,21 @@
#ifndef ValueRecovery_h
#define ValueRecovery_h
+#include "DFGMinifiedID.h"
#include "DataFormat.h"
#if ENABLE(JIT)
#include "GPRInfo.h"
#include "FPRInfo.h"
+#include "Reg.h"
#endif
#include "JSCJSValue.h"
#include "MacroAssembler.h"
#include "VirtualRegister.h"
-#include <wtf/Platform.h>
namespace JSC {
struct DumpContext;
+struct InlineCallFrame;
// Describes how to recover a given bytecode virtual register at a given
// code point.
@@ -54,6 +56,7 @@ enum ValueRecoveryTechnique {
InPair,
#endif
InFPR,
+ UnboxedDoubleInFPR,
// It's in the stack, but at a different location.
DisplacedInJSStack,
// It's in the stack, at a different location, and it's unboxed.
@@ -63,8 +66,9 @@ enum ValueRecoveryTechnique {
DoubleDisplacedInJSStack,
CellDisplacedInJSStack,
BooleanDisplacedInJSStack,
- // It's an Arguments object.
- ArgumentsThatWereNotCreated,
+ // It's an Arguments object. This arises because of the simplified arguments simplification done by the DFG.
+ DirectArgumentsThatWereNotCreated,
+ ClonedArgumentsThatWereNotCreated,
// It's a constant.
Constant,
// Don't know how to recover it.
@@ -80,6 +84,19 @@ public:
bool isSet() const { return m_technique != DontKnow; }
bool operator!() const { return !isSet(); }
+
+#if ENABLE(JIT)
+ static ValueRecovery inRegister(Reg reg, DataFormat dataFormat)
+ {
+ if (reg.isGPR())
+ return inGPR(reg.gpr(), dataFormat);
+
+ ASSERT(reg.isFPR());
+ return inFPR(reg.fpr(), dataFormat);
+ }
+#endif
+
+ explicit operator bool() const { return isSet(); }
static ValueRecovery inGPR(MacroAssembler::RegisterID gpr, DataFormat dataFormat)
{
@@ -115,10 +132,14 @@ public:
}
#endif
- static ValueRecovery inFPR(MacroAssembler::FPRegisterID fpr)
+ static ValueRecovery inFPR(MacroAssembler::FPRegisterID fpr, DataFormat dataFormat)
{
+ ASSERT(dataFormat == DataFormatDouble || dataFormat & DataFormatJS);
ValueRecovery result;
- result.m_technique = InFPR;
+ if (dataFormat == DataFormatDouble)
+ result.m_technique = UnboxedDoubleInFPR;
+ else
+ result.m_technique = InFPR;
result.m_source.fpr = fpr;
return result;
}
@@ -168,18 +189,27 @@ public:
return result;
}
- static ValueRecovery argumentsThatWereNotCreated()
+ static ValueRecovery directArgumentsThatWereNotCreated(DFG::MinifiedID id)
{
ValueRecovery result;
- result.m_technique = ArgumentsThatWereNotCreated;
+ result.m_technique = DirectArgumentsThatWereNotCreated;
+ result.m_source.nodeID = id.bits();
return result;
}
+ static ValueRecovery outOfBandArgumentsThatWereNotCreated(DFG::MinifiedID id)
+ {
+ ValueRecovery result;
+ result.m_technique = ClonedArgumentsThatWereNotCreated;
+ result.m_source.nodeID = id.bits();
+ return result;
+ }
+
ValueRecoveryTechnique technique() const { return m_technique; }
bool isConstant() const { return m_technique == Constant; }
-
- bool isInRegisters() const
+
+ bool isInGPR() const
{
switch (m_technique) {
case InGPR:
@@ -188,19 +218,81 @@ public:
case UnboxedCellInGPR:
case UnboxedInt52InGPR:
case UnboxedStrictInt52InGPR:
-#if USE(JSVALUE32_64)
- case InPair:
-#endif
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool isInFPR() const
+ {
+ switch (m_technique) {
case InFPR:
+ case UnboxedDoubleInFPR:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool isInRegisters() const
+ {
+ return isInJSValueRegs() || isInGPR() || isInFPR();
+ }
+
+ bool isInJSStack() const
+ {
+ switch (m_technique) {
+ case DisplacedInJSStack:
+ case Int32DisplacedInJSStack:
+ case Int52DisplacedInJSStack:
+ case StrictInt52DisplacedInJSStack:
+ case DoubleDisplacedInJSStack:
+ case CellDisplacedInJSStack:
+ case BooleanDisplacedInJSStack:
return true;
default:
return false;
}
}
+
+ DataFormat dataFormat() const
+ {
+ switch (m_technique) {
+ case InGPR:
+ case InFPR:
+ case DisplacedInJSStack:
+ case Constant:
+#if USE(JSVALUE32_64)
+ case InPair:
+#endif
+ return DataFormatJS;
+ case UnboxedInt32InGPR:
+ case Int32DisplacedInJSStack:
+ return DataFormatInt32;
+ case UnboxedInt52InGPR:
+ case Int52DisplacedInJSStack:
+ return DataFormatInt52;
+ case UnboxedStrictInt52InGPR:
+ case StrictInt52DisplacedInJSStack:
+ return DataFormatStrictInt52;
+ case UnboxedBooleanInGPR:
+ case BooleanDisplacedInJSStack:
+ return DataFormatBoolean;
+ case UnboxedCellInGPR:
+ case CellDisplacedInJSStack:
+ return DataFormatCell;
+ case UnboxedDoubleInFPR:
+ case DoubleDisplacedInJSStack:
+ return DataFormatDouble;
+ default:
+ return DataFormatNone;
+ }
+ }
MacroAssembler::RegisterID gpr() const
{
- ASSERT(m_technique == InGPR || m_technique == UnboxedInt32InGPR || m_technique == UnboxedBooleanInGPR || m_technique == UnboxedInt52InGPR || m_technique == UnboxedStrictInt52InGPR || m_technique == UnboxedCellInGPR);
+ ASSERT(isInGPR());
return m_source.gpr;
}
@@ -216,26 +308,71 @@ public:
ASSERT(m_technique == InPair);
return m_source.pair.payloadGPR;
}
-#endif
+
+ bool isInJSValueRegs() const
+ {
+ return m_technique == InPair;
+ }
+
+#if ENABLE(JIT)
+ JSValueRegs jsValueRegs() const
+ {
+ ASSERT(isInJSValueRegs());
+ return JSValueRegs(tagGPR(), payloadGPR());
+ }
+#endif // ENABLE(JIT)
+#else
+ bool isInJSValueRegs() const
+ {
+ return isInGPR();
+ }
+#endif // USE(JSVALUE32_64)
MacroAssembler::FPRegisterID fpr() const
{
- ASSERT(m_technique == InFPR);
+ ASSERT(isInFPR());
return m_source.fpr;
}
VirtualRegister virtualRegister() const
{
- ASSERT(m_technique == DisplacedInJSStack || m_technique == Int32DisplacedInJSStack || m_technique == DoubleDisplacedInJSStack || m_technique == CellDisplacedInJSStack || m_technique == BooleanDisplacedInJSStack || m_technique == Int52DisplacedInJSStack || m_technique == StrictInt52DisplacedInJSStack);
+ ASSERT(isInJSStack());
return VirtualRegister(m_source.virtualReg);
}
+ ValueRecovery withLocalsOffset(int offset) const
+ {
+ switch (m_technique) {
+ case DisplacedInJSStack:
+ case Int32DisplacedInJSStack:
+ case DoubleDisplacedInJSStack:
+ case CellDisplacedInJSStack:
+ case BooleanDisplacedInJSStack:
+ case Int52DisplacedInJSStack:
+ case StrictInt52DisplacedInJSStack: {
+ ValueRecovery result;
+ result.m_technique = m_technique;
+ result.m_source.virtualReg = m_source.virtualReg + offset;
+ return result;
+ }
+
+ default:
+ return *this;
+ }
+ }
+
JSValue constant() const
{
- ASSERT(m_technique == Constant);
+ ASSERT(isConstant());
return JSValue::decode(m_source.constant);
}
+ DFG::MinifiedID nodeID() const
+ {
+ ASSERT(m_technique == DirectArgumentsThatWereNotCreated || m_technique == ClonedArgumentsThatWereNotCreated);
+ return DFG::MinifiedID::fromBits(m_source.nodeID);
+ }
+
JSValue recover(ExecState*) const;
#if ENABLE(JIT)
@@ -256,6 +393,7 @@ private:
#endif
int virtualReg;
EncodedJSValue constant;
+ uintptr_t nodeID;
} m_source;
};
diff --git a/Source/JavaScriptCore/bytecode/VariableWatchpointSet.h b/Source/JavaScriptCore/bytecode/VariableWatchpointSet.h
deleted file mode 100644
index 4dec40495..000000000
--- a/Source/JavaScriptCore/bytecode/VariableWatchpointSet.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef VariableWatchpointSet_h
-#define VariableWatchpointSet_h
-
-#include "Watchpoint.h"
-#include "WriteBarrier.h"
-
-namespace JSC {
-
-class VariableWatchpointSet : public WatchpointSet {
- friend class LLIntOffsetsExtractor;
-public:
- VariableWatchpointSet()
- : WatchpointSet(ClearWatchpoint)
- {
- }
-
- ~VariableWatchpointSet() { }
-
- // For the purpose of deciding whether or not to watch this variable, you only need
- // to inspect inferredValue(). If this returns something other than the empty
- // value, then it means that at all future safepoints, this watchpoint set will be
- // in one of these states:
- //
- // IsWatched: in this case, the variable's value must still be the
- // inferredValue.
- //
- // IsInvalidated: in this case the variable's value may be anything but you'll
- // either notice that it's invalidated and not install the watchpoint, or
- // you will have been notified that the watchpoint was fired.
- JSValue inferredValue() const { return m_inferredValue; }
-
- void notifyWrite(JSValue value)
- {
- ASSERT(!!value);
- switch (state()) {
- case ClearWatchpoint:
- m_inferredValue = value;
- startWatching();
- return;
-
- case IsWatched:
- ASSERT(!!m_inferredValue);
- if (value == m_inferredValue)
- return;
- invalidate();
- return;
-
- case IsInvalidated:
- ASSERT(!m_inferredValue);
- return;
- }
-
- ASSERT_NOT_REACHED();
- }
-
- void invalidate()
- {
- m_inferredValue = JSValue();
- WatchpointSet::invalidate();
- }
-
- void finalizeUnconditionally()
- {
- ASSERT(!!m_inferredValue == (state() == IsWatched));
- if (!m_inferredValue)
- return;
- if (!m_inferredValue.isCell())
- return;
- JSCell* cell = m_inferredValue.asCell();
- if (Heap::isMarked(cell))
- return;
- invalidate();
- }
-
- JSValue* addressOfInferredValue() { return &m_inferredValue; }
-
-private:
- JSValue m_inferredValue;
-};
-
-} // namespace JSC
-
-#endif // VariableWatchpointSet_h
-
diff --git a/Source/JavaScriptCore/bytecode/VariableWriteFireDetail.cpp b/Source/JavaScriptCore/bytecode/VariableWriteFireDetail.cpp
new file mode 100644
index 000000000..b483ab21c
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/VariableWriteFireDetail.cpp
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "VariableWriteFireDetail.h"
+
+#include "JSCInlines.h"
+
+namespace JSC {
+
+void VariableWriteFireDetail::dump(PrintStream& out) const
+{
+ out.print("Write to ", m_name, " in ", JSValue(m_object));
+}
+
+void VariableWriteFireDetail::touch(WatchpointSet* set, JSObject* object, const PropertyName& name)
+{
+ set->touch(VariableWriteFireDetail(object, name));
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/bytecode/VariableWriteFireDetail.h b/Source/JavaScriptCore/bytecode/VariableWriteFireDetail.h
new file mode 100644
index 000000000..664f69cbb
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/VariableWriteFireDetail.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef VariableWriteFireDetail_h
+#define VariableWriteFireDetail_h
+
+#include "Watchpoint.h"
+
+namespace JSC {
+
+class JSObject;
+class PropertyName;
+
+class VariableWriteFireDetail : public FireDetail {
+public:
+ VariableWriteFireDetail(JSObject* object, const PropertyName& name)
+ : m_object(object)
+ , m_name(name)
+ {
+ }
+
+ virtual void dump(PrintStream&) const override;
+
+ JS_EXPORT_PRIVATE static void touch(WatchpointSet*, JSObject*, const PropertyName&);
+
+private:
+ JSObject* m_object;
+ const PropertyName& m_name;
+};
+
+} // namespace JSC
+
+#endif // VariableWriteFireDetail_h
diff --git a/Source/JavaScriptCore/bytecode/VirtualRegister.cpp b/Source/JavaScriptCore/bytecode/VirtualRegister.cpp
new file mode 100644
index 000000000..57cdb62c9
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/VirtualRegister.cpp
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2011, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "VirtualRegister.h"
+
+namespace JSC {
+
+void VirtualRegister::dump(PrintStream& out) const
+{
+ if (!isValid()) {
+ out.print("<invalid>");
+ return;
+ }
+
+ if (isHeader()) {
+ out.print("head", m_virtualRegister);
+ return;
+ }
+
+ if (isConstant()) {
+ out.print("const", toConstantIndex());
+ return;
+ }
+
+ if (isArgument()) {
+ if (!toArgument())
+ out.print("this");
+ else
+ out.print("arg", toArgument());
+ return;
+ }
+
+ if (isLocal()) {
+ out.print("loc", toLocal());
+ return;
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/bytecode/VirtualRegister.h b/Source/JavaScriptCore/bytecode/VirtualRegister.h
index c63aee85f..613088ef6 100644
--- a/Source/JavaScriptCore/bytecode/VirtualRegister.h
+++ b/Source/JavaScriptCore/bytecode/VirtualRegister.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,7 +28,6 @@
#include "CallFrame.h"
-#include <wtf/Platform.h>
#include <wtf/PrintStream.h>
namespace JSC {
@@ -60,14 +59,47 @@ public:
bool isValid() const { return (m_virtualRegister != s_invalidVirtualRegister); }
bool isLocal() const { return operandIsLocal(m_virtualRegister); }
bool isArgument() const { return operandIsArgument(m_virtualRegister); }
+ bool isHeader() const { return m_virtualRegister >= 0 && m_virtualRegister < JSStack::ThisArgument; }
bool isConstant() const { return m_virtualRegister >= s_firstConstantRegisterIndex; }
int toLocal() const { ASSERT(isLocal()); return operandToLocal(m_virtualRegister); }
int toArgument() const { ASSERT(isArgument()); return operandToArgument(m_virtualRegister); }
int toConstantIndex() const { ASSERT(isConstant()); return m_virtualRegister - s_firstConstantRegisterIndex; }
int offset() const { return m_virtualRegister; }
-
- bool operator==(const VirtualRegister other) const { return m_virtualRegister == other.m_virtualRegister; }
- bool operator!=(const VirtualRegister other) const { return m_virtualRegister != other.m_virtualRegister; }
+ int offsetInBytes() const { return m_virtualRegister * sizeof(Register); }
+
+ bool operator==(VirtualRegister other) const { return m_virtualRegister == other.m_virtualRegister; }
+ bool operator!=(VirtualRegister other) const { return m_virtualRegister != other.m_virtualRegister; }
+ bool operator<(VirtualRegister other) const { return m_virtualRegister < other.m_virtualRegister; }
+ bool operator>(VirtualRegister other) const { return m_virtualRegister > other.m_virtualRegister; }
+ bool operator<=(VirtualRegister other) const { return m_virtualRegister <= other.m_virtualRegister; }
+ bool operator>=(VirtualRegister other) const { return m_virtualRegister >= other.m_virtualRegister; }
+
+ VirtualRegister operator+(int value) const
+ {
+ return VirtualRegister(offset() + value);
+ }
+ VirtualRegister operator-(int value) const
+ {
+ return VirtualRegister(offset() - value);
+ }
+ VirtualRegister operator+(VirtualRegister value) const
+ {
+ return VirtualRegister(offset() + value.offset());
+ }
+ VirtualRegister operator-(VirtualRegister value) const
+ {
+ return VirtualRegister(offset() - value.offset());
+ }
+ VirtualRegister& operator+=(int value)
+ {
+ return *this = *this + value;
+ }
+ VirtualRegister& operator-=(int value)
+ {
+ return *this = *this - value;
+ }
+
+ void dump(PrintStream& out) const;
private:
static const int s_invalidVirtualRegister = 0x3fffffff;
@@ -95,13 +127,4 @@ inline VirtualRegister virtualRegisterForArgument(int argument, int offset = 0)
} // namespace JSC
-namespace WTF {
-
-inline void printInternal(PrintStream& out, JSC::VirtualRegister value)
-{
- out.print(value.offset());
-}
-
-} // namespace WTF
-
#endif // VirtualRegister_h
diff --git a/Source/JavaScriptCore/bytecode/Watchpoint.cpp b/Source/JavaScriptCore/bytecode/Watchpoint.cpp
index f29c2141c..761c06744 100644
--- a/Source/JavaScriptCore/bytecode/Watchpoint.cpp
+++ b/Source/JavaScriptCore/bytecode/Watchpoint.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,16 +26,32 @@
#include "config.h"
#include "Watchpoint.h"
-#include "LinkBuffer.h"
#include <wtf/CompilationThread.h>
#include <wtf/PassRefPtr.h>
namespace JSC {
+void StringFireDetail::dump(PrintStream& out) const
+{
+ out.print(m_string);
+}
+
Watchpoint::~Watchpoint()
{
- if (isOnList())
+ if (isOnList()) {
+ // This will happen if we get destroyed before the set fires. That's totally a valid
+ // possibility. For example:
+ //
+ // CodeBlock has a Watchpoint on transition from structure S1. The transition never
+ // happens, but the CodeBlock gets destroyed because of GC.
remove();
+ }
+}
+
+void Watchpoint::fire(const FireDetail& detail)
+{
+ RELEASE_ASSERT(!isOnList());
+ fireInternal(detail);
}
WatchpointSet::WatchpointSet(WatchpointState state)
@@ -65,20 +81,48 @@ void WatchpointSet::add(Watchpoint* watchpoint)
m_state = IsWatched;
}
-void WatchpointSet::fireAllSlow()
+void WatchpointSet::fireAllSlow(const FireDetail& detail)
{
ASSERT(state() == IsWatched);
WTF::storeStoreFence();
- fireAllWatchpoints();
- m_state = IsInvalidated;
+ m_state = IsInvalidated; // Do this first. Needed for adaptive watchpoints.
+ fireAllWatchpoints(detail);
WTF::storeStoreFence();
}
-void WatchpointSet::fireAllWatchpoints()
+void WatchpointSet::fireAllSlow(const char* reason)
{
- while (!m_set.isEmpty())
- m_set.begin()->fire();
+ fireAllSlow(StringFireDetail(reason));
+}
+
+void WatchpointSet::fireAllWatchpoints(const FireDetail& detail)
+{
+ // In case there are any adaptive watchpoints, we need to make sure that they see that this
+ // watchpoint has been already invalidated.
+ RELEASE_ASSERT(hasBeenInvalidated());
+
+ while (!m_set.isEmpty()) {
+ Watchpoint* watchpoint = m_set.begin();
+ ASSERT(watchpoint->isOnList());
+
+ // Removing the Watchpoint before firing it makes it possible to implement watchpoints
+ // that add themselves to a different set when they fire. This kind of "adaptive"
+ // watchpoint can be used to track some semantic property that is more fine-graiend than
+ // what the set can convey. For example, we might care if a singleton object ever has a
+ // property called "foo". We can watch for this by checking if its Structure has "foo" and
+ // then watching its transitions. But then the watchpoint fires if any property is added.
+ // So, before the watchpoint decides to invalidate any code, it can check if it is
+ // possible to add itself to the transition watchpoint set of the singleton object's new
+ // Structure.
+ watchpoint->remove();
+ ASSERT(m_set.begin() != watchpoint);
+ ASSERT(!watchpoint->isOnList());
+
+ watchpoint->fire(detail);
+ // After we fire the watchpoint, the watchpoint pointer may be a dangling pointer. That's
+ // fine, because we have no use for the pointer anymore.
+ }
}
void InlineWatchpointSet::add(Watchpoint* watchpoint)
@@ -86,6 +130,11 @@ void InlineWatchpointSet::add(Watchpoint* watchpoint)
inflate()->add(watchpoint);
}
+void InlineWatchpointSet::fireAll(const char* reason)
+{
+ fireAll(StringFireDetail(reason));
+}
+
WatchpointSet* InlineWatchpointSet::inflateSlow()
{
ASSERT(isThin());
diff --git a/Source/JavaScriptCore/bytecode/Watchpoint.h b/Source/JavaScriptCore/bytecode/Watchpoint.h
index 8790f4e62..869e908c8 100644
--- a/Source/JavaScriptCore/bytecode/Watchpoint.h
+++ b/Source/JavaScriptCore/bytecode/Watchpoint.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -27,12 +27,47 @@
#define Watchpoint_h
#include <wtf/Atomics.h>
+#include <wtf/FastMalloc.h>
+#include <wtf/Noncopyable.h>
+#include <wtf/PrintStream.h>
#include <wtf/SentinelLinkedList.h>
#include <wtf/ThreadSafeRefCounted.h>
namespace JSC {
+class FireDetail {
+ void* operator new(size_t) = delete;
+
+public:
+ FireDetail()
+ {
+ }
+
+ virtual ~FireDetail()
+ {
+ }
+
+ virtual void dump(PrintStream&) const = 0;
+};
+
+class StringFireDetail : public FireDetail {
+public:
+ StringFireDetail(const char* string)
+ : m_string(string)
+ {
+ }
+
+ virtual void dump(PrintStream& out) const override;
+
+private:
+ const char* m_string;
+};
+
+class WatchpointSet;
+
class Watchpoint : public BasicRawSentinelNode<Watchpoint> {
+ WTF_MAKE_NONCOPYABLE(Watchpoint);
+ WTF_MAKE_FAST_ALLOCATED;
public:
Watchpoint()
{
@@ -40,10 +75,12 @@ public:
virtual ~Watchpoint();
- void fire() { fireInternal(); }
-
protected:
- virtual void fireInternal() = 0;
+ virtual void fireInternal(const FireDetail&) = 0;
+
+private:
+ friend class WatchpointSet;
+ void fire(const FireDetail&);
};
enum WatchpointState {
@@ -57,8 +94,14 @@ class InlineWatchpointSet;
class WatchpointSet : public ThreadSafeRefCounted<WatchpointSet> {
friend class LLIntOffsetsExtractor;
public:
- WatchpointSet(WatchpointState);
- ~WatchpointSet(); // Note that this will not fire any of the watchpoints; if you need to know when a WatchpointSet dies then you need a separate mechanism for this.
+ JS_EXPORT_PRIVATE WatchpointSet(WatchpointState);
+ JS_EXPORT_PRIVATE ~WatchpointSet(); // Note that this will not fire any of the watchpoints; if you need to know when a WatchpointSet dies then you need a separate mechanism for this.
+
+ // Fast way of getting the state, which only works from the main thread.
+ WatchpointState stateOnJSThread() const
+ {
+ return static_cast<WatchpointState>(m_state);
+ }
// It is safe to call this from another thread. It may return an old
// state. Guarantees that if *first* read the state() of the thing being
@@ -98,39 +141,67 @@ public:
// set watchpoints that we believe will actually be fired.
void startWatching()
{
- ASSERT(state() != IsInvalidated);
+ ASSERT(m_state != IsInvalidated);
+ if (m_state == IsWatched)
+ return;
+ WTF::storeStoreFence();
m_state = IsWatched;
+ WTF::storeStoreFence();
}
- void fireAll()
+ void fireAll(const FireDetail& detail)
{
- if (state() != IsWatched)
+ if (LIKELY(m_state != IsWatched))
return;
- fireAllSlow();
+ fireAllSlow(detail);
}
- void touch()
+ void fireAll(const char* reason)
+ {
+ if (LIKELY(m_state != IsWatched))
+ return;
+ fireAllSlow(reason);
+ }
+
+ void touch(const FireDetail& detail)
{
if (state() == ClearWatchpoint)
startWatching();
else
- fireAll();
+ fireAll(detail);
}
- void invalidate()
+ void touch(const char* reason)
+ {
+ touch(StringFireDetail(reason));
+ }
+
+ void invalidate(const FireDetail& detail)
{
if (state() == IsWatched)
- fireAll();
+ fireAll(detail);
m_state = IsInvalidated;
}
-
+
+ void invalidate(const char* reason)
+ {
+ invalidate(StringFireDetail(reason));
+ }
+
+ bool isBeingWatched() const
+ {
+ return m_setIsNotEmpty;
+ }
+
int8_t* addressOfState() { return &m_state; }
+ static ptrdiff_t offsetOfState() { return OBJECT_OFFSETOF(WatchpointSet, m_state); }
int8_t* addressOfSetIsNotEmpty() { return &m_setIsNotEmpty; }
- JS_EXPORT_PRIVATE void fireAllSlow(); // Call only if you've checked isWatched.
+ JS_EXPORT_PRIVATE void fireAllSlow(const FireDetail&); // Call only if you've checked isWatched.
+ JS_EXPORT_PRIVATE void fireAllSlow(const char* reason); // Ditto.
private:
- void fireAllWatchpoints();
+ void fireAllWatchpoints(const FireDetail&);
friend class InlineWatchpointSet;
@@ -174,18 +245,34 @@ public:
freeFat();
}
+ // Fast way of getting the state, which only works from the main thread.
+ WatchpointState stateOnJSThread() const
+ {
+ uintptr_t data = m_data;
+ if (isFat(data))
+ return fat(data)->stateOnJSThread();
+ return decodeState(data);
+ }
+
+ // It is safe to call this from another thread. It may return a prior state,
+ // but that should be fine since you should only perform actions based on the
+ // state if you also add a watchpoint.
+ WatchpointState state() const
+ {
+ WTF::loadLoadFence();
+ uintptr_t data = m_data;
+ WTF::loadLoadFence();
+ if (isFat(data))
+ return fat(data)->state();
+ return decodeState(data);
+ }
+
// It is safe to call this from another thread. It may return false
// even if the set actually had been invalidated, but that ought to happen
// only in the case of races, and should be rare.
bool hasBeenInvalidated() const
{
- WTF::loadLoadFence();
- uintptr_t data = m_data;
- if (isFat(data)) {
- WTF::loadLoadFence();
- return fat(data)->hasBeenInvalidated();
- }
- return decodeState(data) == IsInvalidated;
+ return state() == IsInvalidated;
}
// Like hasBeenInvalidated(), may be called from another thread.
@@ -206,10 +293,10 @@ public:
m_data = encodeState(IsWatched);
}
- void fireAll()
+ void fireAll(const FireDetail& detail)
{
if (isFat()) {
- fat()->fireAll();
+ fat()->fireAll(detail);
return;
}
if (decodeState(m_data) == ClearWatchpoint)
@@ -218,19 +305,77 @@ public:
WTF::storeStoreFence();
}
- void touch()
+ void invalidate(const FireDetail& detail)
+ {
+ if (isFat())
+ fat()->invalidate(detail);
+ else
+ m_data = encodeState(IsInvalidated);
+ }
+
+ JS_EXPORT_PRIVATE void fireAll(const char* reason);
+
+ void touch(const FireDetail& detail)
{
if (isFat()) {
- fat()->touch();
+ fat()->touch(detail);
return;
}
- if (decodeState(m_data) == ClearWatchpoint)
+ uintptr_t data = m_data;
+ if (decodeState(data) == IsInvalidated)
+ return;
+ WTF::storeStoreFence();
+ if (decodeState(data) == ClearWatchpoint)
m_data = encodeState(IsWatched);
else
m_data = encodeState(IsInvalidated);
WTF::storeStoreFence();
}
+ void touch(const char* reason)
+ {
+ touch(StringFireDetail(reason));
+ }
+
+ // Note that for any watchpoint that is visible from the DFG, it would be incorrect to write code like:
+ //
+ // if (w.isBeingWatched())
+ // w.fireAll()
+ //
+ // Concurrently to this, the DFG could do:
+ //
+ // if (w.isStillValid())
+ // perform optimizations;
+ // if (!w.isStillValid())
+ // retry compilation;
+ //
+ // Note that the DFG algorithm is widespread, and sound, because fireAll() and invalidate() will leave
+ // the watchpoint in a !isStillValid() state. Hence, if fireAll() or invalidate() interleaved between
+ // the first isStillValid() check and the second one, then it would simply cause the DFG to retry
+ // compilation later.
+ //
+ // But, if you change some piece of state that the DFG might optimize for, but invalidate the
+ // watchpoint by doing:
+ //
+ // if (w.isBeingWatched())
+ // w.fireAll()
+ //
+ // then the DFG would never know that you invalidated state between the two checks.
+ //
+ // There are two ways to work around this:
+ //
+ // - Call fireAll() without a isBeingWatched() check. Then, the DFG will know that the watchpoint has
+ // been invalidated when it does its second check.
+ //
+ // - Do not expose the watchpoint set to the DFG directly, and have your own way of validating whether
+ // the assumptions that the DFG thread used are still valid when the DFG code is installed.
+ bool isBeingWatched() const
+ {
+ if (isFat())
+ return fat()->isBeingWatched();
+ return false;
+ }
+
private:
static const uintptr_t IsThinFlag = 1;
static const uintptr_t StateMask = 6;
@@ -247,7 +392,7 @@ private:
static uintptr_t encodeState(WatchpointState state)
{
- return (state << StateShift) | IsThinFlag;
+ return (static_cast<uintptr_t>(state) << StateShift) | IsThinFlag;
}
bool isThin() const { return isThin(m_data); }