diff options
author | Konstantin Tokarev <annulen@yandex.ru> | 2016-08-25 19:20:41 +0300 |
---|---|---|
committer | Konstantin Tokarev <annulen@yandex.ru> | 2017-02-02 12:30:55 +0000 |
commit | 6882a04fb36642862b11efe514251d32070c3d65 (patch) | |
tree | b7959826000b061fd5ccc7512035c7478742f7b0 /Source/JavaScriptCore/bytecode | |
parent | ab6df191029eeeb0b0f16f127d553265659f739e (diff) | |
download | qtwebkit-6882a04fb36642862b11efe514251d32070c3d65.tar.gz |
Imported QtWebKit TP3 (git b57bc6801f1876c3220d5a4bfea33d620d477443)
Change-Id: I3b1d8a2808782c9f34d50240000e20cb38d3680f
Reviewed-by: Konstantin Tokarev <annulen@yandex.ru>
Diffstat (limited to 'Source/JavaScriptCore/bytecode')
136 files changed, 17648 insertions, 6187 deletions
diff --git a/Source/JavaScriptCore/bytecode/AdaptiveInferredPropertyValueWatchpointBase.cpp b/Source/JavaScriptCore/bytecode/AdaptiveInferredPropertyValueWatchpointBase.cpp new file mode 100644 index 000000000..9f02e8cb6 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/AdaptiveInferredPropertyValueWatchpointBase.cpp @@ -0,0 +1,91 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "AdaptiveInferredPropertyValueWatchpointBase.h" + +#include "JSCellInlines.h" +#include "StructureInlines.h" + +namespace JSC { + +AdaptiveInferredPropertyValueWatchpointBase::AdaptiveInferredPropertyValueWatchpointBase(const ObjectPropertyCondition& key) + : m_key(key) +{ + RELEASE_ASSERT(key.kind() == PropertyCondition::Equivalence); +} + +void AdaptiveInferredPropertyValueWatchpointBase::install() +{ + RELEASE_ASSERT(m_key.isWatchable()); + + m_key.object()->structure()->addTransitionWatchpoint(&m_structureWatchpoint); + + PropertyOffset offset = m_key.object()->structure()->getConcurrently(m_key.uid()); + WatchpointSet* set = m_key.object()->structure()->propertyReplacementWatchpointSet(offset); + set->add(&m_propertyWatchpoint); +} + +void AdaptiveInferredPropertyValueWatchpointBase::fire(const FireDetail& detail) +{ + // We need to defer GC here otherwise we might trigger a GC that could destroy the owner + // CodeBlock. In particular, this can happen when we add rare data to a structure when + // we EnsureWatchability. + DeferGCForAWhile defer(*Heap::heap(m_key.object())); + // One of the watchpoints fired, but the other one didn't. Make sure that neither of them are + // in any set anymore. This simplifies things by allowing us to reinstall the watchpoints + // wherever from scratch. + if (m_structureWatchpoint.isOnList()) + m_structureWatchpoint.remove(); + if (m_propertyWatchpoint.isOnList()) + m_propertyWatchpoint.remove(); + + if (m_key.isWatchable(PropertyCondition::EnsureWatchability)) { + install(); + return; + } + + handleFire(detail); +} + +void AdaptiveInferredPropertyValueWatchpointBase::StructureWatchpoint::fireInternal(const FireDetail& detail) +{ + ptrdiff_t myOffset = OBJECT_OFFSETOF(AdaptiveInferredPropertyValueWatchpointBase, m_structureWatchpoint); + + AdaptiveInferredPropertyValueWatchpointBase* parent = bitwise_cast<AdaptiveInferredPropertyValueWatchpointBase*>(bitwise_cast<char*>(this) - myOffset); + + parent->fire(detail); +} + +void AdaptiveInferredPropertyValueWatchpointBase::PropertyWatchpoint::fireInternal(const FireDetail& detail) +{ + ptrdiff_t myOffset = OBJECT_OFFSETOF(AdaptiveInferredPropertyValueWatchpointBase, m_propertyWatchpoint); + + AdaptiveInferredPropertyValueWatchpointBase* parent = bitwise_cast<AdaptiveInferredPropertyValueWatchpointBase*>(bitwise_cast<char*>(this) - myOffset); + + parent->fire(detail); +} + +} // namespace JSC diff --git a/Source/JavaScriptCore/bytecode/AdaptiveInferredPropertyValueWatchpointBase.h b/Source/JavaScriptCore/bytecode/AdaptiveInferredPropertyValueWatchpointBase.h new file mode 100644 index 000000000..3fd022303 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/AdaptiveInferredPropertyValueWatchpointBase.h @@ -0,0 +1,75 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef AdaptiveInferredPropertyValueWatchpointBase_h +#define AdaptiveInferredPropertyValueWatchpointBase_h + +#include "ObjectPropertyCondition.h" +#include "Watchpoint.h" +#include <wtf/FastMalloc.h> +#include <wtf/Noncopyable.h> + +namespace JSC { + +class AdaptiveInferredPropertyValueWatchpointBase { + WTF_MAKE_NONCOPYABLE(AdaptiveInferredPropertyValueWatchpointBase); + WTF_MAKE_FAST_ALLOCATED; + +public: + AdaptiveInferredPropertyValueWatchpointBase(const ObjectPropertyCondition&); + + const ObjectPropertyCondition& key() const { return m_key; } + + void install(); + + virtual ~AdaptiveInferredPropertyValueWatchpointBase() = default; + +protected: + virtual void handleFire(const FireDetail&) = 0; + +private: + class StructureWatchpoint : public Watchpoint { + public: + StructureWatchpoint() { } + protected: + virtual void fireInternal(const FireDetail&) override; + }; + class PropertyWatchpoint : public Watchpoint { + public: + PropertyWatchpoint() { } + protected: + virtual void fireInternal(const FireDetail&) override; + }; + + void fire(const FireDetail&); + + ObjectPropertyCondition m_key; + StructureWatchpoint m_structureWatchpoint; + PropertyWatchpoint m_propertyWatchpoint; +}; + +} // namespace JSC + +#endif /* AdaptiveInferredPropertyValueWatchpointBase_h */ diff --git a/Source/JavaScriptCore/bytecode/ArrayAllocationProfile.cpp b/Source/JavaScriptCore/bytecode/ArrayAllocationProfile.cpp index 6d9afda28..905b5bd3c 100644 --- a/Source/JavaScriptCore/bytecode/ArrayAllocationProfile.cpp +++ b/Source/JavaScriptCore/bytecode/ArrayAllocationProfile.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012 Apple Inc. All rights reserved. + * Copyright (C) 2012, 2013 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,15 +26,30 @@ #include "config.h" #include "ArrayAllocationProfile.h" -#include "Operations.h" +#include "JSCInlines.h" namespace JSC { void ArrayAllocationProfile::updateIndexingType() { - if (!m_lastArray) + // This is awkwardly racy but totally sound even when executed concurrently. The + // worst cases go something like this: + // + // - Two threads race to execute this code; one of them succeeds in updating the + // m_currentIndexingType and the other either updates it again, or sees a null + // m_lastArray; if it updates it again then at worst it will cause the profile + // to "forget" some array. That's still sound, since we don't promise that + // this profile is a reflection of any kind of truth. + // + // - A concurrent thread reads m_lastArray, but that array is now dead. While + // it's possible for that array to no longer be reachable, it cannot actually + // be freed, since we require the GC to wait until all concurrent JITing + // finishes. + + JSArray* lastArray = m_lastArray; + if (!lastArray) return; - m_currentIndexingType = leastUpperBoundOfIndexingTypes(m_currentIndexingType, m_lastArray->structure()->indexingType()); + m_currentIndexingType = leastUpperBoundOfIndexingTypes(m_currentIndexingType, lastArray->indexingType()); m_lastArray = 0; } diff --git a/Source/JavaScriptCore/bytecode/ArrayAllocationProfile.h b/Source/JavaScriptCore/bytecode/ArrayAllocationProfile.h index a1647fad4..f03763f70 100644 --- a/Source/JavaScriptCore/bytecode/ArrayAllocationProfile.h +++ b/Source/JavaScriptCore/bytecode/ArrayAllocationProfile.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012 Apple Inc. All rights reserved. + * Copyright (C) 2012, 2013 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -41,7 +41,8 @@ public: IndexingType selectIndexingType() { - if (m_lastArray && UNLIKELY(m_lastArray->structure()->indexingType() != m_currentIndexingType)) + JSArray* lastArray = m_lastArray; + if (lastArray && UNLIKELY(lastArray->indexingType() != m_currentIndexingType)) updateIndexingType(); return m_currentIndexingType; } diff --git a/Source/JavaScriptCore/bytecode/ArrayProfile.cpp b/Source/JavaScriptCore/bytecode/ArrayProfile.cpp index 7ea31da10..b8ade2223 100644 --- a/Source/JavaScriptCore/bytecode/ArrayProfile.cpp +++ b/Source/JavaScriptCore/bytecode/ArrayProfile.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012 Apple Inc. All rights reserved. + * Copyright (C) 2012, 2013 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -24,10 +24,11 @@ */ #include "config.h" -#include "JSCellInlines.h" #include "ArrayProfile.h" #include "CodeBlock.h" +#include "JSCInlines.h" +#include <wtf/CommaPrinter.h> #include <wtf/StringExtras.h> #include <wtf/StringPrintStream.h> @@ -36,7 +37,7 @@ namespace JSC { void dumpArrayModes(PrintStream& out, ArrayModes arrayModes) { if (!arrayModes) { - out.print("0:<empty>"); + out.print("<empty>"); return; } @@ -45,84 +46,90 @@ void dumpArrayModes(PrintStream& out, ArrayModes arrayModes) return; } - out.print(arrayModes, ":"); - + CommaPrinter comma("|"); if (arrayModes & asArrayModes(NonArray)) - out.print("NonArray"); + out.print(comma, "NonArray"); if (arrayModes & asArrayModes(NonArrayWithInt32)) - out.print("NonArrayWithInt32"); + out.print(comma, "NonArrayWithInt32"); if (arrayModes & asArrayModes(NonArrayWithDouble)) - out.print("NonArrayWithDouble"); + out.print(comma, "NonArrayWithDouble"); if (arrayModes & asArrayModes(NonArrayWithContiguous)) - out.print("NonArrayWithContiguous"); + out.print(comma, "NonArrayWithContiguous"); if (arrayModes & asArrayModes(NonArrayWithArrayStorage)) - out.print("NonArrayWithArrayStorage"); + out.print(comma, "NonArrayWithArrayStorage"); if (arrayModes & asArrayModes(NonArrayWithSlowPutArrayStorage)) - out.print("NonArrayWithSlowPutArrayStorage"); + out.print(comma, "NonArrayWithSlowPutArrayStorage"); if (arrayModes & asArrayModes(ArrayClass)) - out.print("ArrayClass"); + out.print(comma, "ArrayClass"); if (arrayModes & asArrayModes(ArrayWithUndecided)) - out.print("ArrayWithUndecided"); + out.print(comma, "ArrayWithUndecided"); if (arrayModes & asArrayModes(ArrayWithInt32)) - out.print("ArrayWithInt32"); + out.print(comma, "ArrayWithInt32"); if (arrayModes & asArrayModes(ArrayWithDouble)) - out.print("ArrayWithDouble"); + out.print(comma, "ArrayWithDouble"); if (arrayModes & asArrayModes(ArrayWithContiguous)) - out.print("ArrayWithContiguous"); + out.print(comma, "ArrayWithContiguous"); if (arrayModes & asArrayModes(ArrayWithArrayStorage)) - out.print("ArrayWithArrayStorage"); + out.print(comma, "ArrayWithArrayStorage"); if (arrayModes & asArrayModes(ArrayWithSlowPutArrayStorage)) - out.print("ArrayWithSlowPutArrayStorage"); + out.print(comma, "ArrayWithSlowPutArrayStorage"); + + if (arrayModes & Int8ArrayMode) + out.print(comma, "Int8ArrayMode"); + if (arrayModes & Int16ArrayMode) + out.print(comma, "Int16ArrayMode"); + if (arrayModes & Int32ArrayMode) + out.print(comma, "Int32ArrayMode"); + if (arrayModes & Uint8ArrayMode) + out.print(comma, "Uint8ArrayMode"); + if (arrayModes & Uint8ClampedArrayMode) + out.print(comma, "Uint8ClampedArrayMode"); + if (arrayModes & Uint16ArrayMode) + out.print(comma, "Uint16ArrayMode"); + if (arrayModes & Uint32ArrayMode) + out.print(comma, "Uint32ArrayMode"); + if (arrayModes & Float32ArrayMode) + out.print(comma, "Float32ArrayMode"); + if (arrayModes & Float64ArrayMode) + out.print(comma, "Float64ArrayMode"); } -ArrayModes ArrayProfile::updatedObservedArrayModes() const +void ArrayProfile::computeUpdatedPrediction(const ConcurrentJITLocker& locker, CodeBlock* codeBlock) { - if (m_lastSeenStructure) - return m_observedArrayModes | arrayModeFromStructure(m_lastSeenStructure); - return m_observedArrayModes; + if (!m_lastSeenStructureID) + return; + + Structure* lastSeenStructure = codeBlock->heap()->structureIDTable().get(m_lastSeenStructureID); + computeUpdatedPrediction(locker, codeBlock, lastSeenStructure); + m_lastSeenStructureID = 0; } -void ArrayProfile::computeUpdatedPrediction(CodeBlock* codeBlock, OperationInProgress operation) +void ArrayProfile::computeUpdatedPrediction(const ConcurrentJITLocker&, CodeBlock* codeBlock, Structure* lastSeenStructure) { - const bool verbose = false; - - if (m_lastSeenStructure) { - m_observedArrayModes |= arrayModeFromStructure(m_lastSeenStructure); - m_mayInterceptIndexedAccesses |= - m_lastSeenStructure->typeInfo().interceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero(); - if (!codeBlock->globalObject()->isOriginalArrayStructure(m_lastSeenStructure)) - m_usesOriginalArrayStructures = false; - if (!structureIsPolymorphic()) { - if (!m_expectedStructure) - m_expectedStructure = m_lastSeenStructure; - else if (m_expectedStructure != m_lastSeenStructure) { - if (verbose) - dataLog(*codeBlock, " bc#", m_bytecodeOffset, ": making structure polymorphic because ", RawPointer(m_expectedStructure), " (", m_expectedStructure->classInfo()->className, ") != ", RawPointer(m_lastSeenStructure), " (", m_lastSeenStructure->classInfo()->className, ")\n"); - m_expectedStructure = polymorphicStructure(); - } - } - m_lastSeenStructure = 0; - } + m_observedArrayModes |= arrayModeFromStructure(lastSeenStructure); - if (hasTwoOrMoreBitsSet(m_observedArrayModes)) { - if (verbose) - dataLog(*codeBlock, " bc#", m_bytecodeOffset, ": making structure polymorphic because two or more bits are set in m_observedArrayModes\n"); - m_expectedStructure = polymorphicStructure(); + if (!m_didPerformFirstRunPruning + && hasTwoOrMoreBitsSet(m_observedArrayModes)) { + m_observedArrayModes = arrayModeFromStructure(lastSeenStructure); + m_didPerformFirstRunPruning = true; } - if (operation == Collection - && expectedStructure() - && !Heap::isMarked(m_expectedStructure)) { - if (verbose) - dataLog(*codeBlock, " bc#", m_bytecodeOffset, ": making structure during GC\n"); - m_expectedStructure = polymorphicStructure(); - } + m_mayInterceptIndexedAccesses |= + lastSeenStructure->typeInfo().interceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero(); + JSGlobalObject* globalObject = codeBlock->globalObject(); + if (!globalObject->isOriginalArrayStructure(lastSeenStructure) + && !globalObject->isOriginalTypedArrayStructure(lastSeenStructure)) + m_usesOriginalArrayStructures = false; } -CString ArrayProfile::briefDescription(CodeBlock* codeBlock) +CString ArrayProfile::briefDescription(const ConcurrentJITLocker& locker, CodeBlock* codeBlock) +{ + computeUpdatedPrediction(locker, codeBlock); + return briefDescriptionWithoutUpdating(locker); +} + +CString ArrayProfile::briefDescriptionWithoutUpdating(const ConcurrentJITLocker&) { - computeUpdatedPrediction(codeBlock); - StringPrintStream out; bool hasPrinted = false; @@ -134,18 +141,6 @@ CString ArrayProfile::briefDescription(CodeBlock* codeBlock) hasPrinted = true; } - if (structureIsPolymorphic()) { - if (hasPrinted) - out.print(", "); - out.print("struct = TOP"); - hasPrinted = true; - } else if (m_expectedStructure) { - if (hasPrinted) - out.print(", "); - out.print("struct = ", RawPointer(m_expectedStructure)); - hasPrinted = true; - } - if (m_mayStoreToHole) { if (hasPrinted) out.print(", "); diff --git a/Source/JavaScriptCore/bytecode/ArrayProfile.h b/Source/JavaScriptCore/bytecode/ArrayProfile.h index 384275689..66b295da7 100644 --- a/Source/JavaScriptCore/bytecode/ArrayProfile.h +++ b/Source/JavaScriptCore/bytecode/ArrayProfile.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012 Apple Inc. All rights reserved. + * Copyright (C) 2012, 2013 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,6 +26,7 @@ #ifndef ArrayProfile_h #define ArrayProfile_h +#include "ConcurrentJITLock.h" #include "JSArray.h" #include "Structure.h" #include <wtf/HashMap.h> @@ -36,20 +37,44 @@ namespace JSC { class CodeBlock; class LLIntOffsetsExtractor; -// This is a bitfield where each bit represents an IndexingType that we have seen. -// There are 32 indexing types, so an unsigned is enough. +// This is a bitfield where each bit represents an type of array access that we have seen. +// There are 16 indexing types that use the lower bits. +// There are 9 typed array types taking the bits 16 to 25. typedef unsigned ArrayModes; +const ArrayModes Int8ArrayMode = 1 << 16; +const ArrayModes Int16ArrayMode = 1 << 17; +const ArrayModes Int32ArrayMode = 1 << 18; +const ArrayModes Uint8ArrayMode = 1 << 19; +const ArrayModes Uint8ClampedArrayMode = 1 << 20; +const ArrayModes Uint16ArrayMode = 1 << 21; +const ArrayModes Uint32ArrayMode = 1 << 22; +const ArrayModes Float32ArrayMode = 1 << 23; +const ArrayModes Float64ArrayMode = 1 << 24; + #define asArrayModes(type) \ (static_cast<unsigned>(1) << static_cast<unsigned>(type)) +#define ALL_TYPED_ARRAY_MODES \ + (Int8ArrayMode \ + | Int16ArrayMode \ + | Int32ArrayMode \ + | Uint8ArrayMode \ + | Uint8ClampedArrayMode \ + | Uint16ArrayMode \ + | Uint32ArrayMode \ + | Float32ArrayMode \ + | Float64ArrayMode \ + ) + #define ALL_NON_ARRAY_ARRAY_MODES \ (asArrayModes(NonArray) \ | asArrayModes(NonArrayWithInt32) \ | asArrayModes(NonArrayWithDouble) \ | asArrayModes(NonArrayWithContiguous) \ | asArrayModes(NonArrayWithArrayStorage) \ - | asArrayModes(NonArrayWithSlowPutArrayStorage)) + | asArrayModes(NonArrayWithSlowPutArrayStorage) \ + | ALL_TYPED_ARRAY_MODES) #define ALL_ARRAY_ARRAY_MODES \ (asArrayModes(ArrayClass) \ @@ -64,6 +89,29 @@ typedef unsigned ArrayModes; inline ArrayModes arrayModeFromStructure(Structure* structure) { + switch (structure->classInfo()->typedArrayStorageType) { + case TypeInt8: + return Int8ArrayMode; + case TypeUint8: + return Uint8ArrayMode; + case TypeUint8Clamped: + return Uint8ClampedArrayMode; + case TypeInt16: + return Int16ArrayMode; + case TypeUint16: + return Uint16ArrayMode; + case TypeInt32: + return Int32ArrayMode; + case TypeUint32: + return Uint32ArrayMode; + case TypeFloat32: + return Float32ArrayMode; + case TypeFloat64: + return Float64ArrayMode; + case TypeDataView: + case NotTypedArray: + break; + } return asArrayModes(structure->indexingType()); } @@ -79,6 +127,11 @@ inline bool mergeArrayModes(ArrayModes& left, ArrayModes right) return true; } +inline bool arrayModesAreClearOrTop(ArrayModes modes) +{ + return !modes || modes == ALL_ARRAY_MODES; +} + // Checks if proven is a subset of expected. inline bool arrayModesAlreadyChecked(ArrayModes proven, ArrayModes expected) { @@ -129,66 +182,55 @@ class ArrayProfile { public: ArrayProfile() : m_bytecodeOffset(std::numeric_limits<unsigned>::max()) - , m_lastSeenStructure(0) - , m_expectedStructure(0) + , m_lastSeenStructureID(0) , m_mayStoreToHole(false) , m_outOfBounds(false) , m_mayInterceptIndexedAccesses(false) , m_usesOriginalArrayStructures(true) + , m_didPerformFirstRunPruning(false) , m_observedArrayModes(0) { } ArrayProfile(unsigned bytecodeOffset) : m_bytecodeOffset(bytecodeOffset) - , m_lastSeenStructure(0) - , m_expectedStructure(0) + , m_lastSeenStructureID(0) , m_mayStoreToHole(false) , m_outOfBounds(false) , m_mayInterceptIndexedAccesses(false) , m_usesOriginalArrayStructures(true) + , m_didPerformFirstRunPruning(false) , m_observedArrayModes(0) { } unsigned bytecodeOffset() const { return m_bytecodeOffset; } - Structure** addressOfLastSeenStructure() { return &m_lastSeenStructure; } + StructureID* addressOfLastSeenStructureID() { return &m_lastSeenStructureID; } ArrayModes* addressOfArrayModes() { return &m_observedArrayModes; } bool* addressOfMayStoreToHole() { return &m_mayStoreToHole; } + + void setOutOfBounds() { m_outOfBounds = true; } bool* addressOfOutOfBounds() { return &m_outOfBounds; } void observeStructure(Structure* structure) { - m_lastSeenStructure = structure; + m_lastSeenStructureID = structure->id(); } - void computeUpdatedPrediction(CodeBlock*, OperationInProgress = NoOperation); + void computeUpdatedPrediction(const ConcurrentJITLocker&, CodeBlock*); + void computeUpdatedPrediction(const ConcurrentJITLocker&, CodeBlock*, Structure* lastSeenStructure); - Structure* expectedStructure() const - { - if (structureIsPolymorphic()) - return 0; - return m_expectedStructure; - } - bool structureIsPolymorphic() const - { - return m_expectedStructure == polymorphicStructure(); - } - bool hasDefiniteStructure() const - { - return !structureIsPolymorphic() && m_expectedStructure; - } - ArrayModes observedArrayModes() const { return m_observedArrayModes; } - ArrayModes updatedObservedArrayModes() const; // Computes the observed array modes without updating the profile. - bool mayInterceptIndexedAccesses() const { return m_mayInterceptIndexedAccesses; } + ArrayModes observedArrayModes(const ConcurrentJITLocker&) const { return m_observedArrayModes; } + bool mayInterceptIndexedAccesses(const ConcurrentJITLocker&) const { return m_mayInterceptIndexedAccesses; } - bool mayStoreToHole() const { return m_mayStoreToHole; } - bool outOfBounds() const { return m_outOfBounds; } + bool mayStoreToHole(const ConcurrentJITLocker&) const { return m_mayStoreToHole; } + bool outOfBounds(const ConcurrentJITLocker&) const { return m_outOfBounds; } - bool usesOriginalArrayStructures() const { return m_usesOriginalArrayStructures; } + bool usesOriginalArrayStructures(const ConcurrentJITLocker&) const { return m_usesOriginalArrayStructures; } - CString briefDescription(CodeBlock*); + CString briefDescription(const ConcurrentJITLocker&, CodeBlock*); + CString briefDescriptionWithoutUpdating(const ConcurrentJITLocker&); private: friend class LLIntOffsetsExtractor; @@ -196,16 +238,16 @@ private: static Structure* polymorphicStructure() { return static_cast<Structure*>(reinterpret_cast<void*>(1)); } unsigned m_bytecodeOffset; - Structure* m_lastSeenStructure; - Structure* m_expectedStructure; + StructureID m_lastSeenStructureID; bool m_mayStoreToHole; // This flag may become overloaded to indicate other special cases that were encountered during array access, as it depends on indexing type. Since we currently have basically just one indexing type (two variants of ArrayStorage), this flag for now just means exactly what its name implies. bool m_outOfBounds; - bool m_mayInterceptIndexedAccesses; - bool m_usesOriginalArrayStructures; + bool m_mayInterceptIndexedAccesses : 1; + bool m_usesOriginalArrayStructures : 1; + bool m_didPerformFirstRunPruning : 1; ArrayModes m_observedArrayModes; }; -typedef SegmentedVector<ArrayProfile, 4, 0> ArrayProfileVector; +typedef SegmentedVector<ArrayProfile, 4> ArrayProfileVector; } // namespace JSC diff --git a/Source/JavaScriptCore/bytecode/ByValInfo.h b/Source/JavaScriptCore/bytecode/ByValInfo.h index 3f79967df..20518300c 100644 --- a/Source/JavaScriptCore/bytecode/ByValInfo.h +++ b/Source/JavaScriptCore/bytecode/ByValInfo.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012 Apple Inc. All rights reserved. + * Copyright (C) 2012, 2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,23 +26,25 @@ #ifndef ByValInfo_h #define ByValInfo_h -#include <wtf/Platform.h> - -#if ENABLE(JIT) - #include "ClassInfo.h" #include "CodeLocation.h" +#include "CodeOrigin.h" #include "IndexingType.h" #include "JITStubRoutine.h" #include "Structure.h" +#include "StructureStubInfo.h" namespace JSC { +#if ENABLE(JIT) + enum JITArrayMode { JITInt32, JITDouble, JITContiguous, JITArrayStorage, + JITDirectArguments, + JITScopedArguments, JITInt8Array, JITInt16Array, JITInt32Array, @@ -67,14 +69,26 @@ inline bool isOptimizableIndexingType(IndexingType indexingType) } } +inline bool hasOptimizableIndexingForJSType(JSType type) +{ + switch (type) { + case DirectArgumentsType: + case ScopedArgumentsType: + return true; + default: + return false; + } +} + inline bool hasOptimizableIndexingForClassInfo(const ClassInfo* classInfo) { - return classInfo->typedArrayStorageType != TypedArrayNone; + return isTypedView(classInfo->typedArrayStorageType); } inline bool hasOptimizableIndexing(Structure* structure) { return isOptimizableIndexingType(structure->indexingType()) + || hasOptimizableIndexingForJSType(structure->typeInfo().type()) || hasOptimizableIndexingForClassInfo(structure->classInfo()); } @@ -95,26 +109,39 @@ inline JITArrayMode jitArrayModeForIndexingType(IndexingType indexingType) } } +inline JITArrayMode jitArrayModeForJSType(JSType type) +{ + switch (type) { + case DirectArgumentsType: + return JITDirectArguments; + case ScopedArgumentsType: + return JITScopedArguments; + default: + RELEASE_ASSERT_NOT_REACHED(); + return JITContiguous; + } +} + inline JITArrayMode jitArrayModeForClassInfo(const ClassInfo* classInfo) { switch (classInfo->typedArrayStorageType) { - case TypedArrayInt8: + case TypeInt8: return JITInt8Array; - case TypedArrayInt16: + case TypeInt16: return JITInt16Array; - case TypedArrayInt32: + case TypeInt32: return JITInt32Array; - case TypedArrayUint8: + case TypeUint8: return JITUint8Array; - case TypedArrayUint8Clamped: + case TypeUint8Clamped: return JITUint8ClampedArray; - case TypedArrayUint16: + case TypeUint16: return JITUint16Array; - case TypedArrayUint32: + case TypeUint32: return JITUint32Array; - case TypedArrayFloat32: + case TypeFloat32: return JITFloat32Array; - case TypedArrayFloat64: + case TypeFloat64: return JITFloat64Array; default: CRASH(); @@ -122,35 +149,91 @@ inline JITArrayMode jitArrayModeForClassInfo(const ClassInfo* classInfo) } } +inline bool jitArrayModePermitsPut(JITArrayMode mode) +{ + switch (mode) { + case JITDirectArguments: + case JITScopedArguments: + // We could support put_by_val on these at some point, but it's just not that profitable + // at the moment. + return false; + default: + return true; + } +} + +inline TypedArrayType typedArrayTypeForJITArrayMode(JITArrayMode mode) +{ + switch (mode) { + case JITInt8Array: + return TypeInt8; + case JITInt16Array: + return TypeInt16; + case JITInt32Array: + return TypeInt32; + case JITUint8Array: + return TypeUint8; + case JITUint8ClampedArray: + return TypeUint8Clamped; + case JITUint16Array: + return TypeUint16; + case JITUint32Array: + return TypeUint32; + case JITFloat32Array: + return TypeFloat32; + case JITFloat64Array: + return TypeFloat64; + default: + CRASH(); + return NotTypedArray; + } +} + inline JITArrayMode jitArrayModeForStructure(Structure* structure) { if (isOptimizableIndexingType(structure->indexingType())) return jitArrayModeForIndexingType(structure->indexingType()); + if (hasOptimizableIndexingForJSType(structure->typeInfo().type())) + return jitArrayModeForJSType(structure->typeInfo().type()); + ASSERT(hasOptimizableIndexingForClassInfo(structure->classInfo())); return jitArrayModeForClassInfo(structure->classInfo()); } struct ByValInfo { ByValInfo() { } - - ByValInfo(unsigned bytecodeIndex, CodeLocationJump badTypeJump, JITArrayMode arrayMode, int16_t badTypeJumpToDone, int16_t returnAddressToSlowPath) + + ByValInfo(unsigned bytecodeIndex, CodeLocationJump notIndexJump, CodeLocationJump badTypeJump, JITArrayMode arrayMode, ArrayProfile* arrayProfile, int16_t badTypeJumpToDone, int16_t badTypeJumpToNextHotPath, int16_t returnAddressToSlowPath) : bytecodeIndex(bytecodeIndex) + , notIndexJump(notIndexJump) , badTypeJump(badTypeJump) , arrayMode(arrayMode) + , arrayProfile(arrayProfile) , badTypeJumpToDone(badTypeJumpToDone) + , badTypeJumpToNextHotPath(badTypeJumpToNextHotPath) , returnAddressToSlowPath(returnAddressToSlowPath) , slowPathCount(0) + , stubInfo(nullptr) + , tookSlowPath(false) + , seen(false) { } - + unsigned bytecodeIndex; + CodeLocationJump notIndexJump; CodeLocationJump badTypeJump; JITArrayMode arrayMode; // The array mode that was baked into the inline JIT code. + ArrayProfile* arrayProfile; int16_t badTypeJumpToDone; + int16_t badTypeJumpToNextHotPath; int16_t returnAddressToSlowPath; unsigned slowPathCount; RefPtr<JITStubRoutine> stubRoutine; + Identifier cachedId; + StructureStubInfo* stubInfo; + bool tookSlowPath : 1; + bool seen : 1; }; inline unsigned getByValInfoBytecodeIndex(ByValInfo* info) @@ -158,9 +241,15 @@ inline unsigned getByValInfoBytecodeIndex(ByValInfo* info) return info->bytecodeIndex; } -} // namespace JSC +typedef HashMap<CodeOrigin, ByValInfo*, CodeOriginApproximateHash> ByValInfoMap; + +#else // ENABLE(JIT) + +typedef HashMap<int, void*> ByValInfoMap; #endif // ENABLE(JIT) +} // namespace JSC + #endif // ByValInfo_h diff --git a/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.cpp b/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.cpp new file mode 100644 index 000000000..7f17c0ef1 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.cpp @@ -0,0 +1,238 @@ +/* + * Copyright (C) 2013, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "BytecodeBasicBlock.h" + +#include "CodeBlock.h" +#include "JSCInlines.h" +#include "PreciseJumpTargets.h" + +namespace JSC { + +void BytecodeBasicBlock::shrinkToFit() +{ + m_bytecodeOffsets.shrinkToFit(); + m_successors.shrinkToFit(); +} + +static bool isBranch(OpcodeID opcodeID) +{ + switch (opcodeID) { + case op_jmp: + case op_jtrue: + case op_jfalse: + case op_jeq_null: + case op_jneq_null: + case op_jneq_ptr: + case op_jless: + case op_jlesseq: + case op_jgreater: + case op_jgreatereq: + case op_jnless: + case op_jnlesseq: + case op_jngreater: + case op_jngreatereq: + case op_switch_imm: + case op_switch_char: + case op_switch_string: + case op_save: + return true; + default: + return false; + } +} + +static bool isUnconditionalBranch(OpcodeID opcodeID) +{ + switch (opcodeID) { + case op_jmp: + return true; + default: + return false; + } +} + +static bool isTerminal(OpcodeID opcodeID) +{ + switch (opcodeID) { + case op_ret: + case op_end: + return true; + default: + return false; + } +} + +static bool isThrow(OpcodeID opcodeID) +{ + switch (opcodeID) { + case op_throw: + case op_throw_static_error: + return true; + default: + return false; + } +} + +static bool isJumpTarget(OpcodeID opcodeID, const Vector<unsigned, 32>& jumpTargets, unsigned bytecodeOffset) +{ + if (opcodeID == op_catch) + return true; + + return std::binary_search(jumpTargets.begin(), jumpTargets.end(), bytecodeOffset); +} + +static void linkBlocks(BytecodeBasicBlock* predecessor, BytecodeBasicBlock* successor) +{ + predecessor->addSuccessor(successor); +} + +void computeBytecodeBasicBlocks(CodeBlock* codeBlock, Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks) +{ + Vector<unsigned, 32> jumpTargets; + computePreciseJumpTargets(codeBlock, jumpTargets); + + // Create the entry and exit basic blocks. + basicBlocks.reserveCapacity(jumpTargets.size() + 2); + + auto entry = std::make_unique<BytecodeBasicBlock>(BytecodeBasicBlock::EntryBlock); + auto firstBlock = std::make_unique<BytecodeBasicBlock>(0, 0); + linkBlocks(entry.get(), firstBlock.get()); + + basicBlocks.append(WTFMove(entry)); + BytecodeBasicBlock* current = firstBlock.get(); + basicBlocks.append(WTFMove(firstBlock)); + + auto exit = std::make_unique<BytecodeBasicBlock>(BytecodeBasicBlock::ExitBlock); + + bool nextInstructionIsLeader = false; + + Interpreter* interpreter = codeBlock->vm()->interpreter; + Instruction* instructionsBegin = codeBlock->instructions().begin(); + unsigned instructionCount = codeBlock->instructions().size(); + for (unsigned bytecodeOffset = 0; bytecodeOffset < instructionCount;) { + OpcodeID opcodeID = interpreter->getOpcodeID(instructionsBegin[bytecodeOffset].u.opcode); + unsigned opcodeLength = opcodeLengths[opcodeID]; + + bool createdBlock = false; + // If the current bytecode is a jump target, then it's the leader of its own basic block. + if (isJumpTarget(opcodeID, jumpTargets, bytecodeOffset) || nextInstructionIsLeader) { + auto newBlock = std::make_unique<BytecodeBasicBlock>(bytecodeOffset, opcodeLength); + current = newBlock.get(); + basicBlocks.append(WTFMove(newBlock)); + createdBlock = true; + nextInstructionIsLeader = false; + bytecodeOffset += opcodeLength; + } + + // If the current bytecode is a branch or a return, then the next instruction is the leader of its own basic block. + if (isBranch(opcodeID) || isTerminal(opcodeID) || isThrow(opcodeID)) + nextInstructionIsLeader = true; + + if (createdBlock) + continue; + + // Otherwise, just add to the length of the current block. + current->addBytecodeLength(opcodeLength); + bytecodeOffset += opcodeLength; + } + + // Link basic blocks together. + for (unsigned i = 0; i < basicBlocks.size(); i++) { + BytecodeBasicBlock* block = basicBlocks[i].get(); + + if (block->isEntryBlock() || block->isExitBlock()) + continue; + + bool fallsThrough = true; + for (unsigned bytecodeOffset = block->leaderBytecodeOffset(); bytecodeOffset < block->leaderBytecodeOffset() + block->totalBytecodeLength();) { + const Instruction& currentInstruction = instructionsBegin[bytecodeOffset]; + OpcodeID opcodeID = interpreter->getOpcodeID(currentInstruction.u.opcode); + unsigned opcodeLength = opcodeLengths[opcodeID]; + // If we found a terminal bytecode, link to the exit block. + if (isTerminal(opcodeID)) { + ASSERT(bytecodeOffset + opcodeLength == block->leaderBytecodeOffset() + block->totalBytecodeLength()); + linkBlocks(block, exit.get()); + fallsThrough = false; + break; + } + + // If we found a throw, get the HandlerInfo for this instruction to see where we will jump. + // If there isn't one, treat this throw as a terminal. This is true even if we have a finally + // block because the finally block will create its own catch, which will generate a HandlerInfo. + if (isThrow(opcodeID)) { + ASSERT(bytecodeOffset + opcodeLength == block->leaderBytecodeOffset() + block->totalBytecodeLength()); + HandlerInfo* handler = codeBlock->handlerForBytecodeOffset(bytecodeOffset); + fallsThrough = false; + if (!handler) { + linkBlocks(block, exit.get()); + break; + } + for (unsigned i = 0; i < basicBlocks.size(); i++) { + BytecodeBasicBlock* otherBlock = basicBlocks[i].get(); + if (handler->target == otherBlock->leaderBytecodeOffset()) { + linkBlocks(block, otherBlock); + break; + } + } + break; + } + + // If we found a branch, link to the block(s) that we jump to. + if (isBranch(opcodeID)) { + ASSERT(bytecodeOffset + opcodeLength == block->leaderBytecodeOffset() + block->totalBytecodeLength()); + Vector<unsigned, 1> bytecodeOffsetsJumpedTo; + findJumpTargetsForBytecodeOffset(codeBlock, bytecodeOffset, bytecodeOffsetsJumpedTo); + + for (unsigned i = 0; i < basicBlocks.size(); i++) { + BytecodeBasicBlock* otherBlock = basicBlocks[i].get(); + if (bytecodeOffsetsJumpedTo.contains(otherBlock->leaderBytecodeOffset())) + linkBlocks(block, otherBlock); + } + + if (isUnconditionalBranch(opcodeID)) + fallsThrough = false; + + break; + } + bytecodeOffset += opcodeLength; + } + + // If we fall through then link to the next block in program order. + if (fallsThrough) { + ASSERT(i + 1 < basicBlocks.size()); + BytecodeBasicBlock* nextBlock = basicBlocks[i + 1].get(); + linkBlocks(block, nextBlock); + } + } + + basicBlocks.append(WTFMove(exit)); + + for (auto& basicBlock : basicBlocks) + basicBlock->shrinkToFit(); +} + +} // namespace JSC diff --git a/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.h b/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.h new file mode 100644 index 000000000..bd7d3ae9b --- /dev/null +++ b/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.h @@ -0,0 +1,96 @@ +/* + * Copyright (C) 2013, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef BytecodeBasicBlock_h +#define BytecodeBasicBlock_h + +#include <limits.h> +#include <wtf/FastBitVector.h> +#include <wtf/HashMap.h> +#include <wtf/RefCounted.h> +#include <wtf/Vector.h> + +namespace JSC { + +class CodeBlock; + +class BytecodeBasicBlock { + WTF_MAKE_FAST_ALLOCATED; +public: + enum SpecialBlockType { EntryBlock, ExitBlock }; + BytecodeBasicBlock(unsigned start, unsigned length); + BytecodeBasicBlock(SpecialBlockType); + void shrinkToFit(); + + bool isEntryBlock() { return !m_leaderBytecodeOffset && !m_totalBytecodeLength; } + bool isExitBlock() { return m_leaderBytecodeOffset == UINT_MAX && m_totalBytecodeLength == UINT_MAX; } + + unsigned leaderBytecodeOffset() { return m_leaderBytecodeOffset; } + unsigned totalBytecodeLength() { return m_totalBytecodeLength; } + + Vector<unsigned>& bytecodeOffsets() { return m_bytecodeOffsets; } + void addBytecodeLength(unsigned); + + Vector<BytecodeBasicBlock*>& successors() { return m_successors; } + void addSuccessor(BytecodeBasicBlock* block) { m_successors.append(block); } + + FastBitVector& in() { return m_in; } + FastBitVector& out() { return m_out; } + +private: + unsigned m_leaderBytecodeOffset; + unsigned m_totalBytecodeLength; + + Vector<unsigned> m_bytecodeOffsets; + Vector<BytecodeBasicBlock*> m_successors; + + FastBitVector m_in; + FastBitVector m_out; +}; + +void computeBytecodeBasicBlocks(CodeBlock*, Vector<std::unique_ptr<BytecodeBasicBlock>>&); + +inline BytecodeBasicBlock::BytecodeBasicBlock(unsigned start, unsigned length) + : m_leaderBytecodeOffset(start) + , m_totalBytecodeLength(length) +{ + m_bytecodeOffsets.append(m_leaderBytecodeOffset); +} + +inline BytecodeBasicBlock::BytecodeBasicBlock(BytecodeBasicBlock::SpecialBlockType blockType) + : m_leaderBytecodeOffset(blockType == BytecodeBasicBlock::EntryBlock ? 0 : UINT_MAX) + , m_totalBytecodeLength(blockType == BytecodeBasicBlock::EntryBlock ? 0 : UINT_MAX) +{ +} + +inline void BytecodeBasicBlock::addBytecodeLength(unsigned bytecodeLength) +{ + m_bytecodeOffsets.append(m_leaderBytecodeOffset + m_totalBytecodeLength); + m_totalBytecodeLength += bytecodeLength; +} + +} // namespace JSC + +#endif // BytecodeBasicBlock_h diff --git a/Source/JavaScriptCore/bytecode/BytecodeIntrinsicRegistry.cpp b/Source/JavaScriptCore/bytecode/BytecodeIntrinsicRegistry.cpp new file mode 100644 index 000000000..57740ac3e --- /dev/null +++ b/Source/JavaScriptCore/bytecode/BytecodeIntrinsicRegistry.cpp @@ -0,0 +1,76 @@ +/* + * Copyright (C) 2015 Yusuke Suzuki <utatane.tea@gmail.com>. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "BytecodeIntrinsicRegistry.h" +#include "BytecodeGenerator.h" +#include "JSArrayIterator.h" +#include "JSCJSValueInlines.h" +#include "JSPromise.h" +#include "Nodes.h" +#include "StrongInlines.h" + +namespace JSC { + +#define INITIALIZE_BYTECODE_INTRINSIC_NAMES_TO_SET(name) m_bytecodeIntrinsicMap.add(vm.propertyNames->name##PrivateName.impl(), &BytecodeIntrinsicNode::emit_intrinsic_##name); + +BytecodeIntrinsicRegistry::BytecodeIntrinsicRegistry(VM& vm) + : m_vm(vm) + , m_bytecodeIntrinsicMap() +{ + JSC_COMMON_BYTECODE_INTRINSIC_FUNCTIONS_EACH_NAME(INITIALIZE_BYTECODE_INTRINSIC_NAMES_TO_SET) + JSC_COMMON_BYTECODE_INTRINSIC_CONSTANTS_EACH_NAME(INITIALIZE_BYTECODE_INTRINSIC_NAMES_TO_SET) + + m_undefined.set(m_vm, jsUndefined()); + m_arrayIterationKindKey.set(m_vm, jsNumber(ArrayIterateKey)); + m_arrayIterationKindValue.set(m_vm, jsNumber(ArrayIterateValue)); + m_arrayIterationKindKeyValue.set(m_vm, jsNumber(ArrayIterateKeyValue)); + m_promiseStatePending.set(m_vm, jsNumber(static_cast<unsigned>(JSPromise::Status::Pending))); + m_promiseStateFulfilled.set(m_vm, jsNumber(static_cast<unsigned>(JSPromise::Status::Fulfilled))); + m_promiseStateRejected.set(m_vm, jsNumber(static_cast<unsigned>(JSPromise::Status::Rejected))); + m_symbolIterator.set(m_vm, Symbol::create(m_vm, static_cast<SymbolImpl&>(*m_vm.propertyNames->iteratorSymbol.impl()))); + m_symbolSearch.set(m_vm, Symbol::create(m_vm, static_cast<SymbolImpl&>(*m_vm.propertyNames->searchSymbol.impl()))); +} + +BytecodeIntrinsicNode::EmitterType BytecodeIntrinsicRegistry::lookup(const Identifier& ident) const +{ + if (!m_vm.propertyNames->isPrivateName(ident)) + return nullptr; + auto iterator = m_bytecodeIntrinsicMap.find(ident.impl()); + if (iterator == m_bytecodeIntrinsicMap.end()) + return nullptr; + return iterator->value; +} + +#define JSC_DECLARE_BYTECODE_INTRINSIC_CONSTANT_GENERATORS(name) \ + JSValue BytecodeIntrinsicRegistry::name##Value(BytecodeGenerator&) \ + { \ + return m_##name.get(); \ + } + JSC_COMMON_BYTECODE_INTRINSIC_CONSTANTS_EACH_NAME(JSC_DECLARE_BYTECODE_INTRINSIC_CONSTANT_GENERATORS) +#undef JSC_DECLARE_BYTECODE_INTRINSIC_CONSTANT_GENERATORS + +} // namespace JSC + diff --git a/Source/JavaScriptCore/bytecode/BytecodeIntrinsicRegistry.h b/Source/JavaScriptCore/bytecode/BytecodeIntrinsicRegistry.h new file mode 100644 index 000000000..bd44dd39e --- /dev/null +++ b/Source/JavaScriptCore/bytecode/BytecodeIntrinsicRegistry.h @@ -0,0 +1,82 @@ +/* + * Copyright (C) 2015 Yusuke Suzuki <utatane.tea@gmail.com>. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef BytecodeIntrinsicRegistry_h +#define BytecodeIntrinsicRegistry_h + +#include "Identifier.h" +#include <wtf/HashTable.h> +#include <wtf/Noncopyable.h> + +namespace JSC { + +class CommonIdentifiers; +class BytecodeGenerator; +class BytecodeIntrinsicNode; +class RegisterID; +class Identifier; + +#define JSC_COMMON_BYTECODE_INTRINSIC_FUNCTIONS_EACH_NAME(macro) \ + macro(assert) \ + macro(isObject) \ + macro(putByValDirect) \ + macro(toString) + +#define JSC_COMMON_BYTECODE_INTRINSIC_CONSTANTS_EACH_NAME(macro) \ + macro(undefined) \ + macro(arrayIterationKindKey) \ + macro(arrayIterationKindValue) \ + macro(arrayIterationKindKeyValue) \ + macro(promiseStatePending) \ + macro(promiseStateFulfilled) \ + macro(promiseStateRejected) \ + macro(symbolIterator) \ + macro(symbolSearch) + +class BytecodeIntrinsicRegistry { + WTF_MAKE_NONCOPYABLE(BytecodeIntrinsicRegistry); +public: + explicit BytecodeIntrinsicRegistry(VM&); + + typedef RegisterID* (BytecodeIntrinsicNode::* EmitterType)(BytecodeGenerator&, RegisterID*); + + EmitterType lookup(const Identifier&) const; + +#define JSC_DECLARE_BYTECODE_INTRINSIC_CONSTANT_GENERATORS(name) JSValue name##Value(BytecodeGenerator&); + JSC_COMMON_BYTECODE_INTRINSIC_CONSTANTS_EACH_NAME(JSC_DECLARE_BYTECODE_INTRINSIC_CONSTANT_GENERATORS) +#undef JSC_DECLARE_BYTECODE_INTRINSIC_CONSTANT_GENERATORS + +private: + VM& m_vm; + HashMap<RefPtr<UniquedStringImpl>, EmitterType, IdentifierRepHash> m_bytecodeIntrinsicMap; + +#define JSC_DECLARE_BYTECODE_INTRINSIC_CONSTANT_GENERATORS(name) Strong<Unknown> m_##name; + JSC_COMMON_BYTECODE_INTRINSIC_CONSTANTS_EACH_NAME(JSC_DECLARE_BYTECODE_INTRINSIC_CONSTANT_GENERATORS) +#undef JSC_DECLARE_BYTECODE_INTRINSIC_CONSTANT_GENERATORS +}; + +} // namespace JSC + +#endif // BytecodeIntrinsicRegistry_h diff --git a/Source/JavaScriptCore/bytecode/BytecodeKills.h b/Source/JavaScriptCore/bytecode/BytecodeKills.h new file mode 100644 index 000000000..d073ded25 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/BytecodeKills.h @@ -0,0 +1,181 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef BytecodeKills_h +#define BytecodeKills_h + +#include "CodeBlock.h" +#include <wtf/FastBitVector.h> + +namespace JSC { + +class BytecodeLivenessAnalysis; + +class BytecodeKills { + WTF_MAKE_FAST_ALLOCATED; +public: + BytecodeKills() + : m_codeBlock(nullptr) + { + } + + // By convention, we say that non-local operands are never killed. + bool operandIsKilled(unsigned bytecodeIndex, int operand) const + { + ASSERT_WITH_SECURITY_IMPLICATION(bytecodeIndex < m_codeBlock->instructions().size()); + VirtualRegister reg(operand); + if (reg.isLocal()) + return m_killSets[bytecodeIndex].contains(operand); + return false; + } + + bool operandIsKilled(Instruction* instruction, int operand) const + { + return operandIsKilled(instruction - m_codeBlock->instructions().begin(), operand); + } + + template<typename Functor> + void forEachOperandKilledAt(unsigned bytecodeIndex, const Functor& functor) const + { + ASSERT_WITH_SECURITY_IMPLICATION(bytecodeIndex < m_codeBlock->instructions().size()); + m_killSets[bytecodeIndex].forEachLocal( + [&] (unsigned local) { + functor(virtualRegisterForLocal(local)); + }); + } + + template<typename Functor> + void forEachOperandKilledAt(Instruction* pc, const Functor& functor) const + { + forEachOperandKilledAt(pc - m_codeBlock->instructions().begin(), functor); + } + +private: + friend class BytecodeLivenessAnalysis; + + class KillSet { + public: + KillSet() + : m_word(0) + { + } + + ~KillSet() + { + if (hasVector()) + delete vector(); + } + + void add(unsigned local) + { + if (isEmpty()) { + setOneItem(local); + return; + } + if (hasOneItem()) { + ASSERT(oneItem() != local); + Vector<unsigned>* vector = new Vector<unsigned>(); + vector->append(oneItem()); + vector->append(local); + setVector(vector); + return; + } + ASSERT(!vector()->contains(local)); + vector()->append(local); + } + + template<typename Functor> + void forEachLocal(const Functor& functor) + { + if (isEmpty()) + return; + if (hasOneItem()) { + functor(oneItem()); + return; + } + for (unsigned local : *vector()) + functor(local); + } + + bool contains(unsigned expectedLocal) + { + if (isEmpty()) + return false; + if (hasOneItem()) + return oneItem() == expectedLocal; + for (unsigned local : *vector()) { + if (local == expectedLocal) + return true; + } + return false; + } + + private: + bool isEmpty() const + { + return !m_word; + } + + bool hasOneItem() const + { + return m_word & 1; + } + + unsigned oneItem() const + { + return m_word >> 1; + } + + void setOneItem(unsigned value) + { + m_word = (value << 1) | 1; + } + + bool hasVector() const + { + return !isEmpty() && !hasOneItem(); + } + + Vector<unsigned>* vector() + { + return bitwise_cast<Vector<unsigned>*>(m_word); + } + + void setVector(Vector<unsigned>* value) + { + m_word = bitwise_cast<uintptr_t>(value); + } + + uintptr_t m_word; + }; + + CodeBlock* m_codeBlock; + std::unique_ptr<KillSet[]> m_killSets; +}; + +} // namespace JSC + +#endif // BytecodeKills_h + diff --git a/Source/JavaScriptCore/bytecode/BytecodeList.json b/Source/JavaScriptCore/bytecode/BytecodeList.json new file mode 100644 index 000000000..053b8dc9b --- /dev/null +++ b/Source/JavaScriptCore/bytecode/BytecodeList.json @@ -0,0 +1,182 @@ +[ + { + "section" : "Bytecodes", "emitInHFile" : true, "emitInASMFile" : true, + "macroNameComponent" : "BYTECODE", "asmPrefix" : "llint_", + "bytecodes" : [ + { "name" : "op_enter", "length" : 1 }, + { "name" : "op_get_scope", "length" : 2 }, + { "name" : "op_create_direct_arguments", "length" : 2 }, + { "name" : "op_create_scoped_arguments", "length" : 3 }, + { "name" : "op_create_out_of_band_arguments", "length" : 2 }, + { "name" : "op_create_this", "length" : 5 }, + { "name" : "op_to_this", "length" : 4 }, + { "name" : "op_check_tdz", "length" : 2 }, + { "name" : "op_new_object", "length" : 4 }, + { "name" : "op_new_array", "length" : 5 }, + { "name" : "op_new_array_with_size", "length" : 4 }, + { "name" : "op_new_array_buffer", "length" : 5 }, + { "name" : "op_new_regexp", "length" : 3 }, + { "name" : "op_mov", "length" : 3 }, + { "name" : "op_not", "length" : 3 }, + { "name" : "op_eq", "length" : 4 }, + { "name" : "op_eq_null", "length" : 3 }, + { "name" : "op_neq", "length" : 4 }, + { "name" : "op_neq_null", "length" : 3 }, + { "name" : "op_stricteq", "length" : 4 }, + { "name" : "op_nstricteq", "length" : 4 }, + { "name" : "op_less", "length" : 4 }, + { "name" : "op_lesseq", "length" : 4 }, + { "name" : "op_greater", "length" : 4 }, + { "name" : "op_greatereq", "length" : 4 }, + { "name" : "op_inc", "length" : 2 }, + { "name" : "op_dec", "length" : 2 }, + { "name" : "op_to_number", "length" : 3 }, + { "name" : "op_to_string", "length" : 3 }, + { "name" : "op_negate", "length" : 3 }, + { "name" : "op_add", "length" : 5 }, + { "name" : "op_mul", "length" : 5 }, + { "name" : "op_div", "length" : 5 }, + { "name" : "op_mod", "length" : 4 }, + { "name" : "op_sub", "length" : 5 }, + { "name" : "op_lshift", "length" : 4 }, + { "name" : "op_rshift", "length" : 4 }, + { "name" : "op_urshift", "length" : 4 }, + { "name" : "op_unsigned", "length" : 3 }, + { "name" : "op_bitand", "length" : 5 }, + { "name" : "op_bitxor", "length" : 5 }, + { "name" : "op_bitor", "length" : 5 }, + { "name" : "op_overrides_has_instance", "length" : 4 }, + { "name" : "op_instanceof", "length" : 4 }, + { "name" : "op_instanceof_custom", "length" : 5 }, + { "name" : "op_typeof", "length" : 3 }, + { "name" : "op_is_undefined", "length" : 3 }, + { "name" : "op_is_boolean", "length" : 3 }, + { "name" : "op_is_number", "length" : 3 }, + { "name" : "op_is_string", "length" : 3 }, + { "name" : "op_is_object", "length" : 3 }, + { "name" : "op_is_object_or_null", "length" : 3 }, + { "name" : "op_is_function", "length" : 3 }, + { "name" : "op_in", "length" : 4 }, + { "name" : "op_get_by_id", "length" : 9 }, + { "name" : "op_get_array_length", "length" : 9 }, + { "name" : "op_put_by_id", "length" : 9 }, + { "name" : "op_del_by_id", "length" : 4 }, + { "name" : "op_get_by_val", "length" : 6 }, + { "name" : "op_put_by_val", "length" : 5 }, + { "name" : "op_put_by_val_direct", "length" : 5 }, + { "name" : "op_del_by_val", "length" : 4 }, + { "name" : "op_put_by_index", "length" : 4 }, + { "name" : "op_put_getter_by_id", "length" : 5 }, + { "name" : "op_put_setter_by_id", "length" : 5 }, + { "name" : "op_put_getter_setter_by_id", "length" : 6 }, + { "name" : "op_put_getter_by_val", "length" : 5 }, + { "name" : "op_put_setter_by_val", "length" : 5 }, + { "name" : "op_jmp", "length" : 2 }, + { "name" : "op_jtrue", "length" : 3 }, + { "name" : "op_jfalse", "length" : 3 }, + { "name" : "op_jeq_null", "length" : 3 }, + { "name" : "op_jneq_null", "length" : 3 }, + { "name" : "op_jneq_ptr", "length" : 4 }, + { "name" : "op_jless", "length" : 4 }, + { "name" : "op_jlesseq", "length" : 4 }, + { "name" : "op_jgreater", "length" : 4 }, + { "name" : "op_jgreatereq", "length" : 4 }, + { "name" : "op_jnless", "length" : 4 }, + { "name" : "op_jnlesseq", "length" : 4 }, + { "name" : "op_jngreater", "length" : 4 }, + { "name" : "op_jngreatereq", "length" : 4 }, + { "name" : "op_loop_hint", "length" : 1 }, + { "name" : "op_switch_imm", "length" : 4 }, + { "name" : "op_switch_char", "length" : 4 }, + { "name" : "op_switch_string", "length" : 4 }, + { "name" : "op_new_func", "length" : 4 }, + { "name" : "op_new_func_exp", "length" : 4 }, + { "name" : "op_new_generator_func", "length" : 4 }, + { "name" : "op_new_generator_func_exp", "length" : 4 }, + { "name" : "op_new_arrow_func_exp", "length" : 4 }, + { "name" : "op_call", "length" : 9 }, + { "name" : "op_tail_call", "length" : 9 }, + { "name" : "op_call_eval", "length" : 9 }, + { "name" : "op_call_varargs", "length" : 9 }, + { "name" : "op_tail_call_varargs", "length" : 9 }, + { "name" : "op_ret", "length" : 2 }, + { "name" : "op_construct", "length" : 9 }, + { "name" : "op_construct_varargs", "length" : 9 }, + { "name" : "op_strcat", "length" : 4 }, + { "name" : "op_to_primitive", "length" : 3 }, + { "name" : "op_resolve_scope", "length" : 7 }, + { "name" : "op_get_from_scope", "length" : 8 }, + { "name" : "op_put_to_scope", "length" : 7 }, + { "name" : "op_get_from_arguments", "length" : 5 }, + { "name" : "op_put_to_arguments", "length" : 4 }, + { "name" : "op_push_with_scope", "length" : 4 }, + { "name" : "op_create_lexical_environment", "length" : 5 }, + { "name" : "op_get_parent_scope", "length" : 3 }, + { "name" : "op_catch", "length" : 3 }, + { "name" : "op_throw", "length" : 2 }, + { "name" : "op_throw_static_error", "length" : 3 }, + { "name" : "op_debug", "length" : 3 }, + { "name" : "op_profile_will_call", "length" : 2 }, + { "name" : "op_profile_did_call", "length" : 2 }, + { "name" : "op_end", "length" : 2 }, + { "name" : "op_profile_type", "length" : 6 }, + { "name" : "op_profile_control_flow", "length" : 2 }, + { "name" : "op_get_enumerable_length", "length" : 3 }, + { "name" : "op_has_indexed_property", "length" : 5 }, + { "name" : "op_has_structure_property", "length" : 5 }, + { "name" : "op_has_generic_property", "length" : 4 }, + { "name" : "op_get_direct_pname", "length" : 7 }, + { "name" : "op_get_property_enumerator", "length" : 3 }, + { "name" : "op_enumerator_structure_pname", "length" : 4 }, + { "name" : "op_enumerator_generic_pname", "length" : 4 }, + { "name" : "op_to_index_string", "length" : 3 }, + { "name" : "op_assert", "length" : 3 }, + { "name" : "op_copy_rest", "length": 4 }, + { "name" : "op_get_rest_length", "length": 3 }, + { "name" : "op_save", "length" : 4 }, + { "name" : "op_resume", "length" : 3 }, + { "name" : "op_watchdog", "length" : 1 } + ] + }, + { + "section" : "CLoopHelpers", "emitInHFile" : true, "emitInASMFile" : false, "defaultLength" : 1, + "macroNameComponent" : "CLOOP_BYTECODE_HELPER", + "bytecodes" : [ + { "name" : "llint_entry" }, + { "name" : "getHostCallReturnValue" }, + { "name" : "llint_return_to_host" }, + { "name" : "llint_vm_entry_to_javascript" }, + { "name" : "llint_vm_entry_to_native" }, + { "name" : "llint_cloop_did_return_from_js_1" }, + { "name" : "llint_cloop_did_return_from_js_2" }, + { "name" : "llint_cloop_did_return_from_js_3" }, + { "name" : "llint_cloop_did_return_from_js_4" }, + { "name" : "llint_cloop_did_return_from_js_5" }, + { "name" : "llint_cloop_did_return_from_js_6" }, + { "name" : "llint_cloop_did_return_from_js_7" }, + { "name" : "llint_cloop_did_return_from_js_8" }, + { "name" : "llint_cloop_did_return_from_js_9" }, + { "name" : "llint_cloop_did_return_from_js_10" }, + { "name" : "llint_cloop_did_return_from_js_11" } + ] + }, + { + "section" : "NativeHelpers", "emitInHFile" : true, "emitInASMFile" : true, "defaultLength" : 1, + "macroNameComponent" : "BYTECODE_HELPER", + "bytecodes" : [ + { "name" : "llint_program_prologue" }, + { "name" : "llint_eval_prologue" }, + { "name" : "llint_module_program_prologue" }, + { "name" : "llint_function_for_call_prologue" }, + { "name" : "llint_function_for_construct_prologue" }, + { "name" : "llint_function_for_call_arity_check" }, + { "name" : "llint_function_for_construct_arity_check" }, + { "name" : "llint_generic_return_point" }, + { "name" : "llint_throw_from_slow_path_trampoline" }, + { "name" : "llint_throw_during_call_trampoline" }, + { "name" : "llint_native_call_trampoline" }, + { "name" : "llint_native_construct_trampoline" }, + { "name" : "handleUncaughtException" } + ] + } +] diff --git a/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.cpp b/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.cpp new file mode 100644 index 000000000..7228b0333 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.cpp @@ -0,0 +1,348 @@ +/* + * Copyright (C) 2013, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "BytecodeLivenessAnalysis.h" + +#include "BytecodeKills.h" +#include "BytecodeLivenessAnalysisInlines.h" +#include "BytecodeUseDef.h" +#include "CodeBlock.h" +#include "FullBytecodeLiveness.h" +#include "PreciseJumpTargets.h" + +namespace JSC { + +BytecodeLivenessAnalysis::BytecodeLivenessAnalysis(CodeBlock* codeBlock) + : m_codeBlock(codeBlock) +{ + ASSERT(m_codeBlock); + compute(); +} + +static bool isValidRegisterForLiveness(CodeBlock* codeBlock, int operand) +{ + if (codeBlock->isConstantRegisterIndex(operand)) + return false; + + VirtualRegister virtualReg(operand); + return virtualReg.isLocal(); +} + +static unsigned getLeaderOffsetForBasicBlock(std::unique_ptr<BytecodeBasicBlock>* basicBlock) +{ + return (*basicBlock)->leaderBytecodeOffset(); +} + +static BytecodeBasicBlock* findBasicBlockWithLeaderOffset(Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks, unsigned leaderOffset) +{ + return (*tryBinarySearch<std::unique_ptr<BytecodeBasicBlock>, unsigned>(basicBlocks, basicBlocks.size(), leaderOffset, getLeaderOffsetForBasicBlock)).get(); +} + +static bool blockContainsBytecodeOffset(BytecodeBasicBlock* block, unsigned bytecodeOffset) +{ + unsigned leaderOffset = block->leaderBytecodeOffset(); + return bytecodeOffset >= leaderOffset && bytecodeOffset < leaderOffset + block->totalBytecodeLength(); +} + +static BytecodeBasicBlock* findBasicBlockForBytecodeOffset(Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks, unsigned bytecodeOffset) +{ +/* + for (unsigned i = 0; i < basicBlocks.size(); i++) { + if (blockContainsBytecodeOffset(basicBlocks[i].get(), bytecodeOffset)) + return basicBlocks[i].get(); + } + return 0; +*/ + std::unique_ptr<BytecodeBasicBlock>* basicBlock = approximateBinarySearch<std::unique_ptr<BytecodeBasicBlock>, unsigned>( + basicBlocks, basicBlocks.size(), bytecodeOffset, getLeaderOffsetForBasicBlock); + // We found the block we were looking for. + if (blockContainsBytecodeOffset((*basicBlock).get(), bytecodeOffset)) + return (*basicBlock).get(); + + // Basic block is to the left of the returned block. + if (bytecodeOffset < (*basicBlock)->leaderBytecodeOffset()) { + ASSERT(basicBlock - 1 >= basicBlocks.data()); + ASSERT(blockContainsBytecodeOffset(basicBlock[-1].get(), bytecodeOffset)); + return basicBlock[-1].get(); + } + + // Basic block is to the right of the returned block. + ASSERT(&basicBlock[1] <= &basicBlocks.last()); + ASSERT(blockContainsBytecodeOffset(basicBlock[1].get(), bytecodeOffset)); + return basicBlock[1].get(); +} + +// Simplified interface to bytecode use/def, which determines defs first and then uses, and includes +// exception handlers in the uses. +template<typename UseFunctor, typename DefFunctor> +static void stepOverInstruction(CodeBlock* codeBlock, BytecodeBasicBlock* block, Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks, unsigned bytecodeOffset, const UseFunctor& use, const DefFunctor& def) +{ + // This abstractly execute the instruction in reverse. Instructions logically first use operands and + // then define operands. This logical ordering is necessary for operations that use and def the same + // operand, like: + // + // op_add loc1, loc1, loc2 + // + // The use of loc1 happens before the def of loc1. That's a semantic requirement since the add + // operation cannot travel forward in time to read the value that it will produce after reading that + // value. Since we are executing in reverse, this means that we must do defs before uses (reverse of + // uses before defs). + // + // Since this is a liveness analysis, this ordering ends up being particularly important: if we did + // uses before defs, then the add operation above would appear to not have loc1 live, since we'd + // first add it to the out set (the use), and then we'd remove it (the def). + + computeDefsForBytecodeOffset( + codeBlock, block, bytecodeOffset, + [&] (CodeBlock* codeBlock, Instruction*, OpcodeID, int operand) { + if (isValidRegisterForLiveness(codeBlock, operand)) + def(VirtualRegister(operand).toLocal()); + }); + + computeUsesForBytecodeOffset( + codeBlock, block, bytecodeOffset, + [&] (CodeBlock* codeBlock, Instruction*, OpcodeID, int operand) { + if (isValidRegisterForLiveness(codeBlock, operand)) + use(VirtualRegister(operand).toLocal()); + }); + + // If we have an exception handler, we want the live-in variables of the + // exception handler block to be included in the live-in of this particular bytecode. + if (HandlerInfo* handler = codeBlock->handlerForBytecodeOffset(bytecodeOffset)) { + BytecodeBasicBlock* handlerBlock = findBasicBlockWithLeaderOffset(basicBlocks, handler->target); + ASSERT(handlerBlock); + handlerBlock->in().forEachSetBit(use); + } +} + +static void stepOverInstruction(CodeBlock* codeBlock, BytecodeBasicBlock* block, Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks, unsigned bytecodeOffset, FastBitVector& out) +{ + stepOverInstruction( + codeBlock, block, basicBlocks, bytecodeOffset, + [&] (unsigned bitIndex) { + // This is the use functor, so we set the bit. + out.set(bitIndex); + }, + [&] (unsigned bitIndex) { + // This is the def functor, so we clear the bit. + out.clear(bitIndex); + }); +} + +static void computeLocalLivenessForBytecodeOffset(CodeBlock* codeBlock, BytecodeBasicBlock* block, Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks, unsigned targetOffset, FastBitVector& result) +{ + ASSERT(!block->isExitBlock()); + ASSERT(!block->isEntryBlock()); + + FastBitVector out = block->out(); + + for (int i = block->bytecodeOffsets().size() - 1; i >= 0; i--) { + unsigned bytecodeOffset = block->bytecodeOffsets()[i]; + if (targetOffset > bytecodeOffset) + break; + + stepOverInstruction(codeBlock, block, basicBlocks, bytecodeOffset, out); + } + + result.set(out); +} + +static void computeLocalLivenessForBlock(CodeBlock* codeBlock, BytecodeBasicBlock* block, Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks) +{ + if (block->isExitBlock() || block->isEntryBlock()) + return; + computeLocalLivenessForBytecodeOffset(codeBlock, block, basicBlocks, block->leaderBytecodeOffset(), block->in()); +} + +void BytecodeLivenessAnalysis::runLivenessFixpoint() +{ + UnlinkedCodeBlock* unlinkedCodeBlock = m_codeBlock->unlinkedCodeBlock(); + unsigned numberOfVariables = unlinkedCodeBlock->m_numCalleeLocals; + + for (unsigned i = 0; i < m_basicBlocks.size(); i++) { + BytecodeBasicBlock* block = m_basicBlocks[i].get(); + block->in().resize(numberOfVariables); + block->out().resize(numberOfVariables); + } + + bool changed; + m_basicBlocks.last()->in().clearAll(); + m_basicBlocks.last()->out().clearAll(); + FastBitVector newOut; + newOut.resize(m_basicBlocks.last()->out().numBits()); + do { + changed = false; + for (unsigned i = m_basicBlocks.size() - 1; i--;) { + BytecodeBasicBlock* block = m_basicBlocks[i].get(); + newOut.clearAll(); + for (unsigned j = 0; j < block->successors().size(); j++) + newOut.merge(block->successors()[j]->in()); + bool outDidChange = block->out().setAndCheck(newOut); + computeLocalLivenessForBlock(m_codeBlock, block, m_basicBlocks); + changed |= outDidChange; + } + } while (changed); +} + +void BytecodeLivenessAnalysis::getLivenessInfoAtBytecodeOffset(unsigned bytecodeOffset, FastBitVector& result) +{ + BytecodeBasicBlock* block = findBasicBlockForBytecodeOffset(m_basicBlocks, bytecodeOffset); + ASSERT(block); + ASSERT(!block->isEntryBlock()); + ASSERT(!block->isExitBlock()); + result.resize(block->out().numBits()); + computeLocalLivenessForBytecodeOffset(m_codeBlock, block, m_basicBlocks, bytecodeOffset, result); +} + +bool BytecodeLivenessAnalysis::operandIsLiveAtBytecodeOffset(int operand, unsigned bytecodeOffset) +{ + if (operandIsAlwaysLive(operand)) + return true; + FastBitVector result; + getLivenessInfoAtBytecodeOffset(bytecodeOffset, result); + return operandThatIsNotAlwaysLiveIsLive(result, operand); +} + +FastBitVector BytecodeLivenessAnalysis::getLivenessInfoAtBytecodeOffset(unsigned bytecodeOffset) +{ + FastBitVector out; + getLivenessInfoAtBytecodeOffset(bytecodeOffset, out); + return out; +} + +void BytecodeLivenessAnalysis::computeFullLiveness(FullBytecodeLiveness& result) +{ + FastBitVector out; + + result.m_map.resize(m_codeBlock->instructions().size()); + + for (unsigned i = m_basicBlocks.size(); i--;) { + BytecodeBasicBlock* block = m_basicBlocks[i].get(); + if (block->isEntryBlock() || block->isExitBlock()) + continue; + + out = block->out(); + + for (unsigned i = block->bytecodeOffsets().size(); i--;) { + unsigned bytecodeOffset = block->bytecodeOffsets()[i]; + stepOverInstruction(m_codeBlock, block, m_basicBlocks, bytecodeOffset, out); + result.m_map[bytecodeOffset] = out; + } + } +} + +void BytecodeLivenessAnalysis::computeKills(BytecodeKills& result) +{ + FastBitVector out; + + result.m_codeBlock = m_codeBlock; + result.m_killSets = std::make_unique<BytecodeKills::KillSet[]>(m_codeBlock->instructions().size()); + + for (unsigned i = m_basicBlocks.size(); i--;) { + BytecodeBasicBlock* block = m_basicBlocks[i].get(); + if (block->isEntryBlock() || block->isExitBlock()) + continue; + + out = block->out(); + + for (unsigned i = block->bytecodeOffsets().size(); i--;) { + unsigned bytecodeOffset = block->bytecodeOffsets()[i]; + stepOverInstruction( + m_codeBlock, block, m_basicBlocks, bytecodeOffset, + [&] (unsigned index) { + // This is for uses. + if (out.get(index)) + return; + result.m_killSets[bytecodeOffset].add(index); + out.set(index); + }, + [&] (unsigned index) { + // This is for defs. + out.clear(index); + }); + } + } +} + +void BytecodeLivenessAnalysis::dumpResults() +{ + Interpreter* interpreter = m_codeBlock->vm()->interpreter; + Instruction* instructionsBegin = m_codeBlock->instructions().begin(); + for (unsigned i = 0; i < m_basicBlocks.size(); i++) { + BytecodeBasicBlock* block = m_basicBlocks[i].get(); + dataLogF("\nBytecode basic block %u: %p (offset: %u, length: %u)\n", i, block, block->leaderBytecodeOffset(), block->totalBytecodeLength()); + dataLogF("Successors: "); + for (unsigned j = 0; j < block->successors().size(); j++) { + BytecodeBasicBlock* successor = block->successors()[j]; + dataLogF("%p ", successor); + } + dataLogF("\n"); + if (block->isEntryBlock()) { + dataLogF("Entry block %p\n", block); + continue; + } + if (block->isExitBlock()) { + dataLogF("Exit block: %p\n", block); + continue; + } + for (unsigned bytecodeOffset = block->leaderBytecodeOffset(); bytecodeOffset < block->leaderBytecodeOffset() + block->totalBytecodeLength();) { + const Instruction* currentInstruction = &instructionsBegin[bytecodeOffset]; + + dataLogF("Live variables: "); + FastBitVector liveBefore = getLivenessInfoAtBytecodeOffset(bytecodeOffset); + for (unsigned j = 0; j < liveBefore.numBits(); j++) { + if (liveBefore.get(j)) + dataLogF("%u ", j); + } + dataLogF("\n"); + m_codeBlock->dumpBytecode(WTF::dataFile(), m_codeBlock->globalObject()->globalExec(), instructionsBegin, currentInstruction); + + OpcodeID opcodeID = interpreter->getOpcodeID(instructionsBegin[bytecodeOffset].u.opcode); + unsigned opcodeLength = opcodeLengths[opcodeID]; + bytecodeOffset += opcodeLength; + } + + dataLogF("Live variables: "); + FastBitVector liveAfter = block->out(); + for (unsigned j = 0; j < liveAfter.numBits(); j++) { + if (liveAfter.get(j)) + dataLogF("%u ", j); + } + dataLogF("\n"); + } +} + +void BytecodeLivenessAnalysis::compute() +{ + computeBytecodeBasicBlocks(m_codeBlock, m_basicBlocks); + ASSERT(m_basicBlocks.size()); + runLivenessFixpoint(); + + if (Options::dumpBytecodeLivenessResults()) + dumpResults(); +} + +} // namespace JSC diff --git a/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.h b/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.h new file mode 100644 index 000000000..ece16f21f --- /dev/null +++ b/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.h @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2013, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef BytecodeLivenessAnalysis_h +#define BytecodeLivenessAnalysis_h + +#include "BytecodeBasicBlock.h" +#include <wtf/FastBitVector.h> +#include <wtf/HashMap.h> +#include <wtf/Vector.h> + +namespace JSC { + +class BytecodeKills; +class CodeBlock; +class FullBytecodeLiveness; + +class BytecodeLivenessAnalysis { + WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_NONCOPYABLE(BytecodeLivenessAnalysis); +public: + BytecodeLivenessAnalysis(CodeBlock*); + + bool operandIsLiveAtBytecodeOffset(int operand, unsigned bytecodeOffset); + FastBitVector getLivenessInfoAtBytecodeOffset(unsigned bytecodeOffset); + + void computeFullLiveness(FullBytecodeLiveness& result); + void computeKills(BytecodeKills& result); + +private: + void compute(); + void runLivenessFixpoint(); + void dumpResults(); + + void getLivenessInfoAtBytecodeOffset(unsigned bytecodeOffset, FastBitVector&); + + CodeBlock* m_codeBlock; + Vector<std::unique_ptr<BytecodeBasicBlock>> m_basicBlocks; +}; + +inline bool operandIsAlwaysLive(int operand); +inline bool operandThatIsNotAlwaysLiveIsLive(const FastBitVector& out, int operand); +inline bool operandIsLive(const FastBitVector& out, int operand); + +} // namespace JSC + +#endif // BytecodeLivenessAnalysis_h diff --git a/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysisInlines.h b/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysisInlines.h new file mode 100644 index 000000000..9b5c755fc --- /dev/null +++ b/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysisInlines.h @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2013, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef BytecodeLivenessAnalysisInlines_h +#define BytecodeLivenessAnalysisInlines_h + +#include "BytecodeLivenessAnalysis.h" +#include "CodeBlock.h" +#include "Operations.h" + +namespace JSC { + +inline bool operandIsAlwaysLive(int operand) +{ + return !VirtualRegister(operand).isLocal(); +} + +inline bool operandThatIsNotAlwaysLiveIsLive(const FastBitVector& out, int operand) +{ + unsigned local = VirtualRegister(operand).toLocal(); + if (local >= out.numBits()) + return false; + return out.get(local); +} + +inline bool operandIsLive(const FastBitVector& out, int operand) +{ + return operandIsAlwaysLive(operand) || operandThatIsNotAlwaysLiveIsLive(out, operand); +} + +} // namespace JSC + +#endif // BytecodeLivenessAnalysisInlines_h + diff --git a/Source/JavaScriptCore/bytecode/BytecodeUseDef.h b/Source/JavaScriptCore/bytecode/BytecodeUseDef.h new file mode 100644 index 000000000..14a69f68a --- /dev/null +++ b/Source/JavaScriptCore/bytecode/BytecodeUseDef.h @@ -0,0 +1,447 @@ +/* + * Copyright (C) 2013, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef BytecodeUseDef_h +#define BytecodeUseDef_h + +#include "CodeBlock.h" + +namespace JSC { + +template<typename Functor> +void computeUsesForBytecodeOffset( + CodeBlock* codeBlock, BytecodeBasicBlock* block, unsigned bytecodeOffset, const Functor& functor) +{ + Interpreter* interpreter = codeBlock->vm()->interpreter; + Instruction* instructionsBegin = codeBlock->instructions().begin(); + Instruction* instruction = &instructionsBegin[bytecodeOffset]; + OpcodeID opcodeID = interpreter->getOpcodeID(instruction->u.opcode); + switch (opcodeID) { + // No uses. + case op_new_regexp: + case op_new_array_buffer: + case op_throw_static_error: + case op_debug: + case op_jneq_ptr: + case op_loop_hint: + case op_jmp: + case op_new_object: + case op_enter: + case op_catch: + case op_profile_control_flow: + case op_create_direct_arguments: + case op_create_out_of_band_arguments: + case op_get_rest_length: + case op_watchdog: + return; + case op_assert: + case op_get_scope: + case op_to_this: + case op_check_tdz: + case op_profile_will_call: + case op_profile_did_call: + case op_profile_type: + case op_throw: + case op_end: + case op_ret: + case op_jtrue: + case op_jfalse: + case op_jeq_null: + case op_jneq_null: + case op_dec: + case op_inc: + case op_resume: { + ASSERT(opcodeLengths[opcodeID] > 1); + functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); + return; + } + case op_jlesseq: + case op_jgreater: + case op_jgreatereq: + case op_jnless: + case op_jnlesseq: + case op_jngreater: + case op_jngreatereq: + case op_jless: + case op_copy_rest: { + ASSERT(opcodeLengths[opcodeID] > 2); + functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); + functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); + return; + } + case op_put_by_val_direct: + case op_put_by_val: { + ASSERT(opcodeLengths[opcodeID] > 3); + functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); + functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); + functor(codeBlock, instruction, opcodeID, instruction[3].u.operand); + return; + } + case op_put_by_index: + case op_put_by_id: + case op_put_to_scope: + case op_put_to_arguments: { + ASSERT(opcodeLengths[opcodeID] > 3); + functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); + functor(codeBlock, instruction, opcodeID, instruction[3].u.operand); + return; + } + case op_put_getter_by_id: + case op_put_setter_by_id: { + ASSERT(opcodeLengths[opcodeID] > 4); + functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); + functor(codeBlock, instruction, opcodeID, instruction[4].u.operand); + return; + } + case op_put_getter_setter_by_id: { + ASSERT(opcodeLengths[opcodeID] > 5); + functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); + functor(codeBlock, instruction, opcodeID, instruction[4].u.operand); + functor(codeBlock, instruction, opcodeID, instruction[5].u.operand); + return; + } + case op_put_getter_by_val: + case op_put_setter_by_val: { + ASSERT(opcodeLengths[opcodeID] > 4); + functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); + functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); + functor(codeBlock, instruction, opcodeID, instruction[4].u.operand); + return; + } + case op_get_property_enumerator: + case op_get_enumerable_length: + case op_new_func_exp: + case op_new_generator_func_exp: + case op_new_arrow_func_exp: + case op_to_index_string: + case op_create_lexical_environment: + case op_resolve_scope: + case op_get_from_scope: + case op_to_primitive: + case op_get_by_id: + case op_get_array_length: + case op_typeof: + case op_is_undefined: + case op_is_boolean: + case op_is_number: + case op_is_string: + case op_is_object: + case op_is_object_or_null: + case op_is_function: + case op_to_number: + case op_to_string: + case op_negate: + case op_neq_null: + case op_eq_null: + case op_not: + case op_mov: + case op_new_array_with_size: + case op_create_this: + case op_del_by_id: + case op_unsigned: + case op_new_func: + case op_new_generator_func: + case op_get_parent_scope: + case op_create_scoped_arguments: + case op_get_from_arguments: { + ASSERT(opcodeLengths[opcodeID] > 2); + functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); + return; + } + case op_has_generic_property: + case op_has_indexed_property: + case op_enumerator_structure_pname: + case op_enumerator_generic_pname: + case op_get_by_val: + case op_in: + case op_overrides_has_instance: + case op_instanceof: + case op_add: + case op_mul: + case op_div: + case op_mod: + case op_sub: + case op_lshift: + case op_rshift: + case op_urshift: + case op_bitand: + case op_bitxor: + case op_bitor: + case op_less: + case op_lesseq: + case op_greater: + case op_greatereq: + case op_nstricteq: + case op_stricteq: + case op_neq: + case op_eq: + case op_push_with_scope: + case op_del_by_val: { + ASSERT(opcodeLengths[opcodeID] > 3); + functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); + functor(codeBlock, instruction, opcodeID, instruction[3].u.operand); + return; + } + case op_instanceof_custom: + case op_has_structure_property: + case op_construct_varargs: + case op_call_varargs: + case op_tail_call_varargs: { + ASSERT(opcodeLengths[opcodeID] > 4); + functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); + functor(codeBlock, instruction, opcodeID, instruction[3].u.operand); + functor(codeBlock, instruction, opcodeID, instruction[4].u.operand); + return; + } + case op_get_direct_pname: { + ASSERT(opcodeLengths[opcodeID] > 5); + functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); + functor(codeBlock, instruction, opcodeID, instruction[3].u.operand); + functor(codeBlock, instruction, opcodeID, instruction[4].u.operand); + functor(codeBlock, instruction, opcodeID, instruction[5].u.operand); + return; + } + case op_switch_string: + case op_switch_char: + case op_switch_imm: { + ASSERT(opcodeLengths[opcodeID] > 3); + functor(codeBlock, instruction, opcodeID, instruction[3].u.operand); + return; + } + case op_new_array: + case op_strcat: { + int base = instruction[2].u.operand; + int count = instruction[3].u.operand; + for (int i = 0; i < count; i++) + functor(codeBlock, instruction, opcodeID, base - i); + return; + } + case op_construct: + case op_call_eval: + case op_call: + case op_tail_call: { + functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); + int argCount = instruction[3].u.operand; + int registerOffset = -instruction[4].u.operand; + int lastArg = registerOffset + CallFrame::thisArgumentOffset(); + for (int i = 0; i < argCount; i++) + functor(codeBlock, instruction, opcodeID, lastArg + i); + return; + } + case op_save: { + functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); + unsigned mergePointBytecodeOffset = bytecodeOffset + instruction[3].u.operand; + BytecodeBasicBlock* mergePointBlock = nullptr; + for (BytecodeBasicBlock* successor : block->successors()) { + if (successor->leaderBytecodeOffset() == mergePointBytecodeOffset) { + mergePointBlock = successor; + break; + } + } + ASSERT(mergePointBlock); + mergePointBlock->in().forEachSetBit([&](unsigned local) { + functor(codeBlock, instruction, opcodeID, virtualRegisterForLocal(local).offset()); + }); + return; + } + default: + RELEASE_ASSERT_NOT_REACHED(); + break; + } +} + +template<typename Functor> +void computeDefsForBytecodeOffset(CodeBlock* codeBlock, BytecodeBasicBlock* block, unsigned bytecodeOffset, const Functor& functor) +{ + Interpreter* interpreter = codeBlock->vm()->interpreter; + Instruction* instructionsBegin = codeBlock->instructions().begin(); + Instruction* instruction = &instructionsBegin[bytecodeOffset]; + OpcodeID opcodeID = interpreter->getOpcodeID(instruction->u.opcode); + switch (opcodeID) { + // These don't define anything. + case op_copy_rest: + case op_put_to_scope: + case op_end: + case op_profile_will_call: + case op_profile_did_call: + case op_throw: + case op_throw_static_error: + case op_save: + case op_assert: + case op_debug: + case op_ret: + case op_jmp: + case op_jtrue: + case op_jfalse: + case op_jeq_null: + case op_jneq_null: + case op_jneq_ptr: + case op_jless: + case op_jlesseq: + case op_jgreater: + case op_jgreatereq: + case op_jnless: + case op_jnlesseq: + case op_jngreater: + case op_jngreatereq: + case op_loop_hint: + case op_switch_imm: + case op_switch_char: + case op_switch_string: + case op_put_by_id: + case op_put_getter_by_id: + case op_put_setter_by_id: + case op_put_getter_setter_by_id: + case op_put_getter_by_val: + case op_put_setter_by_val: + case op_put_by_val: + case op_put_by_val_direct: + case op_put_by_index: + case op_profile_type: + case op_profile_control_flow: + case op_put_to_arguments: + case op_watchdog: +#define LLINT_HELPER_OPCODES(opcode, length) case opcode: + FOR_EACH_LLINT_OPCODE_EXTENSION(LLINT_HELPER_OPCODES); +#undef LLINT_HELPER_OPCODES + return; + // These all have a single destination for the first argument. + case op_to_index_string: + case op_get_enumerable_length: + case op_has_indexed_property: + case op_has_structure_property: + case op_has_generic_property: + case op_get_direct_pname: + case op_get_property_enumerator: + case op_enumerator_structure_pname: + case op_enumerator_generic_pname: + case op_get_parent_scope: + case op_push_with_scope: + case op_create_lexical_environment: + case op_resolve_scope: + case op_strcat: + case op_to_primitive: + case op_create_this: + case op_new_array: + case op_new_array_buffer: + case op_new_array_with_size: + case op_new_regexp: + case op_new_func: + case op_new_func_exp: + case op_new_generator_func: + case op_new_generator_func_exp: + case op_new_arrow_func_exp: + case op_call_varargs: + case op_tail_call_varargs: + case op_construct_varargs: + case op_get_from_scope: + case op_call: + case op_tail_call: + case op_call_eval: + case op_construct: + case op_get_by_id: + case op_get_array_length: + case op_overrides_has_instance: + case op_instanceof: + case op_instanceof_custom: + case op_get_by_val: + case op_typeof: + case op_is_undefined: + case op_is_boolean: + case op_is_number: + case op_is_string: + case op_is_object: + case op_is_object_or_null: + case op_is_function: + case op_in: + case op_to_number: + case op_to_string: + case op_negate: + case op_add: + case op_mul: + case op_div: + case op_mod: + case op_sub: + case op_lshift: + case op_rshift: + case op_urshift: + case op_bitand: + case op_bitxor: + case op_bitor: + case op_inc: + case op_dec: + case op_eq: + case op_neq: + case op_stricteq: + case op_nstricteq: + case op_less: + case op_lesseq: + case op_greater: + case op_greatereq: + case op_neq_null: + case op_eq_null: + case op_not: + case op_mov: + case op_new_object: + case op_to_this: + case op_check_tdz: + case op_get_scope: + case op_create_direct_arguments: + case op_create_scoped_arguments: + case op_create_out_of_band_arguments: + case op_del_by_id: + case op_del_by_val: + case op_unsigned: + case op_get_from_arguments: + case op_get_rest_length: { + ASSERT(opcodeLengths[opcodeID] > 1); + functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); + return; + } + case op_catch: { + ASSERT(opcodeLengths[opcodeID] > 2); + functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); + functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); + return; + } + case op_enter: { + for (unsigned i = codeBlock->m_numVars; i--;) + functor(codeBlock, instruction, opcodeID, virtualRegisterForLocal(i).offset()); + return; + } + case op_resume: { + RELEASE_ASSERT(block->successors().size() == 1); + block->successors()[0]->in().forEachSetBit([&](unsigned local) { + functor(codeBlock, instruction, opcodeID, virtualRegisterForLocal(local).offset()); + }); + return; + } + } +} + +} // namespace JSC + +#endif // BytecodeUseDef_h + diff --git a/Source/JavaScriptCore/bytecode/LineInfo.h b/Source/JavaScriptCore/bytecode/CallEdge.cpp index e9e70138a..dffff6dfd 100644 --- a/Source/JavaScriptCore/bytecode/LineInfo.h +++ b/Source/JavaScriptCore/bytecode/CallEdge.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012 Apple Inc. All rights reserved. + * Copyright (C) 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -23,19 +23,15 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef LineInfo_h -#define LineInfo_h - -#include <wtf/StdLibExtras.h> +#include "config.h" +#include "CallEdge.h" namespace JSC { -struct LineInfo { - uint32_t instructionOffset; - int32_t lineNumber; -}; +void CallEdge::dump(PrintStream& out) const +{ + out.print("<", m_callee, ", count: ", m_count, ">"); +} } // namespace JSC -#endif // LineInfo_h - diff --git a/Source/JavaScriptCore/bytecode/CallEdge.h b/Source/JavaScriptCore/bytecode/CallEdge.h new file mode 100644 index 000000000..304520951 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/CallEdge.h @@ -0,0 +1,71 @@ +/* + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef CallEdge_h +#define CallEdge_h + +#include "CallVariant.h" + +namespace JSC { + +class CallEdge { +public: + CallEdge(); + CallEdge(CallVariant, uint32_t); + + bool operator!() const { return !m_callee; } + + CallVariant callee() const { return m_callee; } + uint32_t count() const { return m_count; } + + CallEdge despecifiedClosure() const + { + return CallEdge(m_callee.despecifiedClosure(), m_count); + } + + void dump(PrintStream&) const; + +private: + CallVariant m_callee; + uint32_t m_count; +}; + +inline CallEdge::CallEdge(CallVariant callee, uint32_t count) + : m_callee(callee) + , m_count(count) +{ +} + +inline CallEdge::CallEdge() + : CallEdge(CallVariant(), 0) +{ +} + +typedef Vector<CallEdge, 1> CallEdgeList; + +} // namespace JSC + +#endif // CallEdge_h + diff --git a/Source/JavaScriptCore/bytecode/CallLinkInfo.cpp b/Source/JavaScriptCore/bytecode/CallLinkInfo.cpp index 5b2661f06..0579d4250 100644 --- a/Source/JavaScriptCore/bytecode/CallLinkInfo.cpp +++ b/Source/JavaScriptCore/bytecode/CallLinkInfo.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012 Apple Inc. All rights reserved. + * Copyright (C) 2012, 2013, 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,35 +26,105 @@ #include "config.h" #include "CallLinkInfo.h" +#include "CallFrameShuffleData.h" #include "DFGOperations.h" #include "DFGThunks.h" -#include "RepatchBuffer.h" +#include "JSCInlines.h" +#include "Repatch.h" +#include <wtf/ListDump.h> +#include <wtf/NeverDestroyed.h> #if ENABLE(JIT) namespace JSC { -void CallLinkInfo::unlink(VM& vm, RepatchBuffer& repatchBuffer) +CallLinkInfo::CallLinkInfo() + : m_hasSeenShouldRepatch(false) + , m_hasSeenClosure(false) + , m_clearedByGC(false) + , m_allowStubs(true) + , m_callType(None) + , m_maxNumArguments(0) + , m_slowPathCount(0) { - ASSERT(isLinked()); +} + +CallLinkInfo::~CallLinkInfo() +{ + clearStub(); + + if (isOnList()) + remove(); +} + +void CallLinkInfo::clearStub() +{ + if (!stub()) + return; + + m_stub->clearCallNodesFor(this); + m_stub = nullptr; +} + +void CallLinkInfo::unlink(VM& vm) +{ + if (!isLinked()) { + // We could be called even if we're not linked anymore because of how polymorphic calls + // work. Each callsite within the polymorphic call stub may separately ask us to unlink(). + RELEASE_ASSERT(!isOnList()); + return; + } - repatchBuffer.revertJumpReplacementToBranchPtrWithPatch(RepatchBuffer::startOfBranchPtrWithPatchOnRegister(hotPathBegin), static_cast<MacroAssembler::RegisterID>(calleeGPR), 0); - if (isDFG) { -#if ENABLE(DFG_JIT) - repatchBuffer.relink(callReturnLocation, (callType == Construct ? vm.getCTIStub(DFG::linkConstructThunkGenerator) : vm.getCTIStub(DFG::linkCallThunkGenerator)).code()); -#else - RELEASE_ASSERT_NOT_REACHED(); -#endif - } else - repatchBuffer.relink(callReturnLocation, callType == Construct ? vm.getCTIStub(linkConstructGenerator).code() : vm.getCTIStub(linkCallGenerator).code()); - hasSeenShouldRepatch = false; - callee.clear(); - stub.clear(); + unlinkFor(vm, *this); // It will be on a list if the callee has a code block. if (isOnList()) remove(); } +void CallLinkInfo::visitWeak(VM& vm) +{ + auto handleSpecificCallee = [&] (JSFunction* callee) { + if (Heap::isMarked(callee->executable())) + m_hasSeenClosure = true; + else + m_clearedByGC = true; + }; + + if (isLinked()) { + if (stub()) { + if (!stub()->visitWeak(vm)) { + if (Options::verboseOSR()) { + dataLog( + "Clearing closure call to ", + listDump(stub()->variants()), ", stub routine ", RawPointer(stub()), + ".\n"); + } + unlink(vm); + m_clearedByGC = true; + } + } else if (!Heap::isMarked(m_callee.get())) { + if (Options::verboseOSR()) { + dataLog( + "Clearing call to ", + RawPointer(m_callee.get()), " (", + m_callee.get()->executable()->hashFor(specializationKind()), + ").\n"); + } + handleSpecificCallee(m_callee.get()); + unlink(vm); + } + } + if (haveLastSeenCallee() && !Heap::isMarked(lastSeenCallee())) { + handleSpecificCallee(lastSeenCallee()); + clearLastSeenCallee(); + } +} + +void CallLinkInfo::setFrameShuffleData(const CallFrameShuffleData& shuffleData) +{ + m_frameShuffleData = std::make_unique<CallFrameShuffleData>(shuffleData); +} + } // namespace JSC #endif // ENABLE(JIT) diff --git a/Source/JavaScriptCore/bytecode/CallLinkInfo.h b/Source/JavaScriptCore/bytecode/CallLinkInfo.h index 36eb84bee..beeeaa12c 100644 --- a/Source/JavaScriptCore/bytecode/CallLinkInfo.h +++ b/Source/JavaScriptCore/bytecode/CallLinkInfo.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012 Apple Inc. All rights reserved. + * Copyright (C) 2012, 2014, 2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,89 +26,336 @@ #ifndef CallLinkInfo_h #define CallLinkInfo_h -#include "ClosureCallStubRoutine.h" +#include "CallMode.h" #include "CodeLocation.h" #include "CodeSpecializationKind.h" #include "JITWriteBarrier.h" #include "JSFunction.h" #include "Opcode.h" +#include "PolymorphicCallStubRoutine.h" #include "WriteBarrier.h" -#include <wtf/Platform.h> #include <wtf/SentinelLinkedList.h> namespace JSC { #if ENABLE(JIT) -class RepatchBuffer; +struct CallFrameShuffleData; -struct CallLinkInfo : public BasicRawSentinelNode<CallLinkInfo> { - enum CallType { None, Call, CallVarargs, Construct }; +class CallLinkInfo : public BasicRawSentinelNode<CallLinkInfo> { +public: + enum CallType { None, Call, CallVarargs, Construct, ConstructVarargs, TailCall, TailCallVarargs }; static CallType callTypeFor(OpcodeID opcodeID) { if (opcodeID == op_call || opcodeID == op_call_eval) return Call; + if (opcodeID == op_call_varargs) + return CallVarargs; if (opcodeID == op_construct) return Construct; - ASSERT(opcodeID == op_call_varargs); - return CallVarargs; + if (opcodeID == op_construct_varargs) + return ConstructVarargs; + if (opcodeID == op_tail_call) + return TailCall; + ASSERT(opcodeID == op_tail_call_varargs); + return TailCallVarargs; } - - CallLinkInfo() - : hasSeenShouldRepatch(false) - , isDFG(false) - , hasSeenClosure(false) - , callType(None) + + static bool isVarargsCallType(CallType callType) { + switch (callType) { + case CallVarargs: + case ConstructVarargs: + case TailCallVarargs: + return true; + + default: + return false; + } } + + CallLinkInfo(); - ~CallLinkInfo() + ~CallLinkInfo(); + + static CodeSpecializationKind specializationKindFor(CallType callType) { - if (isOnList()) - remove(); + return specializationFromIsConstruct(callType == Construct || callType == ConstructVarargs); } - CodeSpecializationKind specializationKind() const { - return specializationFromIsConstruct(callType == Construct); + return specializationKindFor(static_cast<CallType>(m_callType)); + } + + static CallMode callModeFor(CallType callType) + { + switch (callType) { + case Call: + case CallVarargs: + return CallMode::Regular; + case TailCall: + case TailCallVarargs: + return CallMode::Tail; + case Construct: + case ConstructVarargs: + return CallMode::Construct; + case None: + RELEASE_ASSERT_NOT_REACHED(); + } + + RELEASE_ASSERT_NOT_REACHED(); + } + + CallMode callMode() const + { + return callModeFor(static_cast<CallType>(m_callType)); + } + + bool isTailCall() const + { + return callMode() == CallMode::Tail; + } + + bool isVarargs() const + { + return isVarargsCallType(static_cast<CallType>(m_callType)); + } + + bool isLinked() { return m_stub || m_callee; } + void unlink(VM&); + + void setUpCall(CallType callType, CodeOrigin codeOrigin, unsigned calleeGPR) + { + m_callType = callType; + m_codeOrigin = codeOrigin; + m_calleeGPR = calleeGPR; + } + + void setCallLocations(CodeLocationNearCall callReturnLocation, CodeLocationDataLabelPtr hotPathBegin, + CodeLocationNearCall hotPathOther) + { + m_callReturnLocation = callReturnLocation; + m_hotPathBegin = hotPathBegin; + m_hotPathOther = hotPathOther; + } + + bool allowStubs() const { return m_allowStubs; } + + void disallowStubs() + { + m_allowStubs = false; + } + + void setUpCallFromFTL(CallType callType, CodeOrigin codeOrigin, + CodeLocationNearCall callReturnLocation, CodeLocationDataLabelPtr hotPathBegin, + CodeLocationNearCall hotPathOther, unsigned calleeGPR) + { + m_callType = callType; + m_codeOrigin = codeOrigin; + m_callReturnLocation = callReturnLocation; + m_hotPathBegin = hotPathBegin; + m_hotPathOther = hotPathOther; + m_calleeGPR = calleeGPR; + } + + CodeLocationNearCall callReturnLocation() + { + return m_callReturnLocation; + } + + CodeLocationDataLabelPtr hotPathBegin() + { + return m_hotPathBegin; + } + + CodeLocationNearCall hotPathOther() + { + return m_hotPathOther; + } + + void setCallee(VM& vm, CodeLocationDataLabelPtr location, JSCell* owner, JSFunction* callee) + { + m_callee.set(vm, location, owner, callee); + } + + void clearCallee() + { + m_callee.clear(); + } + + JSFunction* callee() + { + return m_callee.get(); + } + + void setLastSeenCallee(VM& vm, const JSCell* owner, JSFunction* callee) + { + m_lastSeenCallee.set(vm, owner, callee); + } + + void clearLastSeenCallee() + { + m_lastSeenCallee.clear(); + } + + JSFunction* lastSeenCallee() + { + return m_lastSeenCallee.get(); + } + + bool haveLastSeenCallee() + { + return !!m_lastSeenCallee; + } + + void setStub(PassRefPtr<PolymorphicCallStubRoutine> newStub) + { + clearStub(); + m_stub = newStub; } - CodeLocationNearCall callReturnLocation; - CodeLocationDataLabelPtr hotPathBegin; - CodeLocationNearCall hotPathOther; - JITWriteBarrier<JSFunction> callee; - WriteBarrier<JSFunction> lastSeenCallee; - RefPtr<ClosureCallStubRoutine> stub; - bool hasSeenShouldRepatch : 1; - bool isDFG : 1; - bool hasSeenClosure : 1; - CallType callType : 5; - unsigned calleeGPR : 8; - CodeOrigin codeOrigin; + void clearStub(); - bool isLinked() { return stub || callee; } - void unlink(VM&, RepatchBuffer&); + PolymorphicCallStubRoutine* stub() + { + return m_stub.get(); + } + + void setSlowStub(PassRefPtr<JITStubRoutine> newSlowStub) + { + m_slowStub = newSlowStub; + } + + void clearSlowStub() + { + m_slowStub = nullptr; + } + + JITStubRoutine* slowStub() + { + return m_slowStub.get(); + } bool seenOnce() { - return hasSeenShouldRepatch; + return m_hasSeenShouldRepatch; + } + + void clearSeen() + { + m_hasSeenShouldRepatch = false; } void setSeen() { - hasSeenShouldRepatch = true; + m_hasSeenShouldRepatch = true; + } + + bool hasSeenClosure() + { + return m_hasSeenClosure; + } + + void setHasSeenClosure() + { + m_hasSeenClosure = true; + } + + bool clearedByGC() + { + return m_clearedByGC; + } + + void setCallType(CallType callType) + { + m_callType = callType; + } + + CallType callType() + { + return static_cast<CallType>(m_callType); + } + + uint8_t* addressOfMaxNumArguments() + { + return &m_maxNumArguments; } + + uint8_t maxNumArguments() + { + return m_maxNumArguments; + } + + static ptrdiff_t offsetOfSlowPathCount() + { + return OBJECT_OFFSETOF(CallLinkInfo, m_slowPathCount); + } + + void setCalleeGPR(unsigned calleeGPR) + { + m_calleeGPR = calleeGPR; + } + + unsigned calleeGPR() + { + return m_calleeGPR; + } + + uint32_t slowPathCount() + { + return m_slowPathCount; + } + + void setCodeOrigin(CodeOrigin codeOrigin) + { + m_codeOrigin = codeOrigin; + } + + CodeOrigin codeOrigin() + { + return m_codeOrigin; + } + + void visitWeak(VM&); + + void setFrameShuffleData(const CallFrameShuffleData&); + + const CallFrameShuffleData* frameShuffleData() + { + return m_frameShuffleData.get(); + } + +private: + CodeLocationNearCall m_callReturnLocation; + CodeLocationDataLabelPtr m_hotPathBegin; + CodeLocationNearCall m_hotPathOther; + JITWriteBarrier<JSFunction> m_callee; + WriteBarrier<JSFunction> m_lastSeenCallee; + RefPtr<PolymorphicCallStubRoutine> m_stub; + RefPtr<JITStubRoutine> m_slowStub; + std::unique_ptr<CallFrameShuffleData> m_frameShuffleData; + bool m_hasSeenShouldRepatch : 1; + bool m_hasSeenClosure : 1; + bool m_clearedByGC : 1; + bool m_allowStubs : 1; + unsigned m_callType : 4; // CallType + unsigned m_calleeGPR : 8; + uint8_t m_maxNumArguments; // Only used for varargs calls. + uint32_t m_slowPathCount; + CodeOrigin m_codeOrigin; }; -inline void* getCallLinkInfoReturnLocation(CallLinkInfo* callLinkInfo) +inline CodeOrigin getCallLinkInfoCodeOrigin(CallLinkInfo& callLinkInfo) { - return callLinkInfo->callReturnLocation.executableAddress(); + return callLinkInfo.codeOrigin(); } -inline unsigned getCallLinkInfoBytecodeIndex(CallLinkInfo* callLinkInfo) -{ - return callLinkInfo->codeOrigin.bytecodeIndex; -} +typedef HashMap<CodeOrigin, CallLinkInfo*, CodeOriginApproximateHash> CallLinkInfoMap; + +#else // ENABLE(JIT) + +typedef HashMap<int, void*> CallLinkInfoMap; + #endif // ENABLE(JIT) } // namespace JSC diff --git a/Source/JavaScriptCore/bytecode/CallLinkStatus.cpp b/Source/JavaScriptCore/bytecode/CallLinkStatus.cpp index 509b15aaf..8ffc23d13 100644 --- a/Source/JavaScriptCore/bytecode/CallLinkStatus.cpp +++ b/Source/JavaScriptCore/bytecode/CallLinkStatus.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2012-2016 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,101 +26,305 @@ #include "config.h" #include "CallLinkStatus.h" +#include "CallLinkInfo.h" #include "CodeBlock.h" +#include "DFGJITCode.h" +#include "InlineCallFrame.h" #include "LLIntCallLinkInfo.h" -#include "Operations.h" +#include "JSCInlines.h" #include <wtf/CommaPrinter.h> +#include <wtf/ListDump.h> namespace JSC { +static const bool verbose = false; + CallLinkStatus::CallLinkStatus(JSValue value) - : m_callTarget(value) - , m_executable(0) - , m_structure(0) - , m_couldTakeSlowPath(false) + : m_couldTakeSlowPath(false) , m_isProved(false) { - if (!value || !value.isCell()) - return; - - m_structure = value.asCell()->structure(); - - if (!value.asCell()->inherits(&JSFunction::s_info)) + if (!value || !value.isCell()) { + m_couldTakeSlowPath = true; return; + } - m_executable = jsCast<JSFunction*>(value.asCell())->executable(); + m_variants.append(CallVariant(value.asCell())); } -JSFunction* CallLinkStatus::function() const +CallLinkStatus CallLinkStatus::computeFromLLInt(const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, unsigned bytecodeIndex) { - if (!m_callTarget || !m_callTarget.isCell()) - return 0; + UNUSED_PARAM(profiledBlock); + UNUSED_PARAM(bytecodeIndex); +#if ENABLE(DFG_JIT) + if (profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadCell))) { + // We could force this to be a closure call, but instead we'll just assume that it + // takes slow path. + return takesSlowPath(); + } +#else + UNUSED_PARAM(locker); +#endif + + VM& vm = *profiledBlock->vm(); + + Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex; + OpcodeID op = vm.interpreter->getOpcodeID(instruction[0].u.opcode); + if (op != op_call && op != op_construct && op != op_tail_call) + return CallLinkStatus(); - if (!m_callTarget.asCell()->inherits(&JSFunction::s_info)) - return 0; + LLIntCallLinkInfo* callLinkInfo = instruction[5].u.callLinkInfo; - return jsCast<JSFunction*>(m_callTarget.asCell()); + return CallLinkStatus(callLinkInfo->lastSeenCallee.get()); } -InternalFunction* CallLinkStatus::internalFunction() const +CallLinkStatus CallLinkStatus::computeFor( + CodeBlock* profiledBlock, unsigned bytecodeIndex, const CallLinkInfoMap& map) { - if (!m_callTarget || !m_callTarget.isCell()) - return 0; + ConcurrentJITLocker locker(profiledBlock->m_lock); - if (!m_callTarget.asCell()->inherits(&InternalFunction::s_info)) - return 0; + UNUSED_PARAM(profiledBlock); + UNUSED_PARAM(bytecodeIndex); + UNUSED_PARAM(map); +#if ENABLE(DFG_JIT) + ExitSiteData exitSiteData = computeExitSiteData(locker, profiledBlock, bytecodeIndex); - return jsCast<InternalFunction*>(m_callTarget.asCell()); + CallLinkInfo* callLinkInfo = map.get(CodeOrigin(bytecodeIndex)); + if (!callLinkInfo) { + if (exitSiteData.takesSlowPath) + return takesSlowPath(); + return computeFromLLInt(locker, profiledBlock, bytecodeIndex); + } + + return computeFor(locker, profiledBlock, *callLinkInfo, exitSiteData); +#else + return CallLinkStatus(); +#endif } -Intrinsic CallLinkStatus::intrinsicFor(CodeSpecializationKind kind) const +CallLinkStatus::ExitSiteData CallLinkStatus::computeExitSiteData( + const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, unsigned bytecodeIndex) { - if (!m_executable) - return NoIntrinsic; + ExitSiteData exitSiteData; + +#if ENABLE(DFG_JIT) + exitSiteData.takesSlowPath = + profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadType)) + || profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadExecutable)); + exitSiteData.badFunction = + profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadCell)); +#else + UNUSED_PARAM(locker); + UNUSED_PARAM(profiledBlock); + UNUSED_PARAM(bytecodeIndex); +#endif - return m_executable->intrinsicFor(kind); + return exitSiteData; } -CallLinkStatus CallLinkStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex) +#if ENABLE(JIT) +CallLinkStatus CallLinkStatus::computeFor( + const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, CallLinkInfo& callLinkInfo) { + // We don't really need this, but anytime we have to debug this code, it becomes indispensable. UNUSED_PARAM(profiledBlock); - UNUSED_PARAM(bytecodeIndex); -#if ENABLE(LLINT) - Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex; - LLIntCallLinkInfo* callLinkInfo = instruction[4].u.callLinkInfo; - return CallLinkStatus(callLinkInfo->lastSeenCallee.get()); -#else - return CallLinkStatus(); -#endif + CallLinkStatus result = computeFromCallLinkInfo(locker, callLinkInfo); + result.m_maxNumArguments = callLinkInfo.maxNumArguments(); + return result; } -CallLinkStatus CallLinkStatus::computeFor(CodeBlock* profiledBlock, unsigned bytecodeIndex) +CallLinkStatus CallLinkStatus::computeFromCallLinkInfo( + const ConcurrentJITLocker&, CallLinkInfo& callLinkInfo) { - UNUSED_PARAM(profiledBlock); - UNUSED_PARAM(bytecodeIndex); -#if ENABLE(JIT) && ENABLE(VALUE_PROFILER) - if (!profiledBlock->numberOfCallLinkInfos()) - return computeFromLLInt(profiledBlock, bytecodeIndex); + if (callLinkInfo.clearedByGC()) + return takesSlowPath(); - if (profiledBlock->couldTakeSlowCase(bytecodeIndex)) - return CallLinkStatus::takesSlowPath(); + // Note that despite requiring that the locker is held, this code is racy with respect + // to the CallLinkInfo: it may get cleared while this code runs! This is because + // CallLinkInfo::unlink() may be called from a different CodeBlock than the one that owns + // the CallLinkInfo and currently we save space by not having CallLinkInfos know who owns + // them. So, there is no way for either the caller of CallLinkInfo::unlock() or unlock() + // itself to figure out which lock to lock. + // + // Fortunately, that doesn't matter. The only things we ask of CallLinkInfo - the slow + // path count, the stub, and the target - can all be asked racily. Stubs and targets can + // only be deleted at next GC, so if we load a non-null one, then it must contain data + // that is still marginally valid (i.e. the pointers ain't stale). This kind of raciness + // is probably OK for now. - CallLinkInfo& callLinkInfo = profiledBlock->getCallLinkInfo(bytecodeIndex); - if (callLinkInfo.stub) - return CallLinkStatus(callLinkInfo.stub->executable(), callLinkInfo.stub->structure()); + // PolymorphicCallStubRoutine is a GCAwareJITStubRoutine, so if non-null, it will stay alive + // until next GC even if the CallLinkInfo is concurrently cleared. Also, the variants list is + // never mutated after the PolymorphicCallStubRoutine is instantiated. We have some conservative + // fencing in place to make sure that we see the variants list after construction. + if (PolymorphicCallStubRoutine* stub = callLinkInfo.stub()) { + WTF::loadLoadFence(); + + CallEdgeList edges = stub->edges(); + + // Now that we've loaded the edges list, there are no further concurrency concerns. We will + // just manipulate and prune this list to our liking - mostly removing entries that are too + // infrequent and ensuring that it's sorted in descending order of frequency. + + RELEASE_ASSERT(edges.size()); + + std::sort( + edges.begin(), edges.end(), + [] (CallEdge a, CallEdge b) { + return a.count() > b.count(); + }); + RELEASE_ASSERT(edges.first().count() >= edges.last().count()); + + double totalCallsToKnown = 0; + double totalCallsToUnknown = callLinkInfo.slowPathCount(); + CallVariantList variants; + for (size_t i = 0; i < edges.size(); ++i) { + CallEdge edge = edges[i]; + // If the call is at the tail of the distribution, then we don't optimize it and we + // treat it as if it was a call to something unknown. We define the tail as being either + // a call that doesn't belong to the N most frequent callees (N = + // maxPolymorphicCallVariantsForInlining) or that has a total call count that is too + // small. + if (i >= Options::maxPolymorphicCallVariantsForInlining() + || edge.count() < Options::frequentCallThreshold()) + totalCallsToUnknown += edge.count(); + else { + totalCallsToKnown += edge.count(); + variants.append(edge.callee()); + } + } + + // Bail if we didn't find any calls that qualified. + RELEASE_ASSERT(!!totalCallsToKnown == !!variants.size()); + if (variants.isEmpty()) + return takesSlowPath(); + + // We require that the distribution of callees is skewed towards a handful of common ones. + if (totalCallsToKnown / totalCallsToUnknown < Options::minimumCallToKnownRate()) + return takesSlowPath(); + + RELEASE_ASSERT(totalCallsToKnown); + RELEASE_ASSERT(variants.size()); + + CallLinkStatus result; + result.m_variants = variants; + result.m_couldTakeSlowPath = !!totalCallsToUnknown; + result.m_isBasedOnStub = true; + return result; + } - JSFunction* target = callLinkInfo.lastSeenCallee.get(); - if (!target) - return computeFromLLInt(profiledBlock, bytecodeIndex); + CallLinkStatus result; - if (callLinkInfo.hasSeenClosure) - return CallLinkStatus(target->executable(), target->structure()); + if (JSFunction* target = callLinkInfo.lastSeenCallee()) { + CallVariant variant(target); + if (callLinkInfo.hasSeenClosure()) + variant = variant.despecifiedClosure(); + result.m_variants.append(variant); + } + + result.m_couldTakeSlowPath = !!callLinkInfo.slowPathCount(); - return CallLinkStatus(target); -#else - return CallLinkStatus(); + return result; +} + +CallLinkStatus CallLinkStatus::computeFor( + const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, CallLinkInfo& callLinkInfo, + ExitSiteData exitSiteData) +{ + CallLinkStatus result = computeFor(locker, profiledBlock, callLinkInfo); + if (exitSiteData.badFunction) { + if (result.isBasedOnStub()) { + // If we have a polymorphic stub, then having an exit site is not quite so useful. In + // most cases, the information in the stub has higher fidelity. + result.makeClosureCall(); + } else { + // We might not have a polymorphic stub for any number of reasons. When this happens, we + // are in less certain territory, so exit sites mean a lot. + result.m_couldTakeSlowPath = true; + } + } + if (exitSiteData.takesSlowPath) + result.m_couldTakeSlowPath = true; + + return result; +} #endif + +void CallLinkStatus::computeDFGStatuses( + CodeBlock* dfgCodeBlock, CallLinkStatus::ContextMap& map) +{ +#if ENABLE(DFG_JIT) + RELEASE_ASSERT(dfgCodeBlock->jitType() == JITCode::DFGJIT); + CodeBlock* baselineCodeBlock = dfgCodeBlock->alternative(); + for (auto iter = dfgCodeBlock->callLinkInfosBegin(); !!iter; ++iter) { + CallLinkInfo& info = **iter; + CodeOrigin codeOrigin = info.codeOrigin(); + + // Check if we had already previously made a terrible mistake in the FTL for this + // code origin. Note that this is approximate because we could have a monovariant + // inline in the FTL that ended up failing. We should fix that at some point by + // having data structures to track the context of frequent exits. This is currently + // challenging because it would require creating a CodeOrigin-based database in + // baseline CodeBlocks, but those CodeBlocks don't really have a place to put the + // InlineCallFrames. + CodeBlock* currentBaseline = + baselineCodeBlockForOriginAndBaselineCodeBlock(codeOrigin, baselineCodeBlock); + ExitSiteData exitSiteData; + { + ConcurrentJITLocker locker(currentBaseline->m_lock); + exitSiteData = computeExitSiteData( + locker, currentBaseline, codeOrigin.bytecodeIndex); + } + + { + ConcurrentJITLocker locker(dfgCodeBlock->m_lock); + map.add(info.codeOrigin(), computeFor(locker, dfgCodeBlock, info, exitSiteData)); + } + } +#else + UNUSED_PARAM(dfgCodeBlock); +#endif // ENABLE(DFG_JIT) + + if (verbose) { + dataLog("Context map:\n"); + ContextMap::iterator iter = map.begin(); + ContextMap::iterator end = map.end(); + for (; iter != end; ++iter) { + dataLog(" ", iter->key, ":\n"); + dataLog(" ", iter->value, "\n"); + } + } +} + +CallLinkStatus CallLinkStatus::computeFor( + CodeBlock* profiledBlock, CodeOrigin codeOrigin, + const CallLinkInfoMap& baselineMap, const CallLinkStatus::ContextMap& dfgMap) +{ + auto iter = dfgMap.find(codeOrigin); + if (iter != dfgMap.end()) + return iter->value; + + return computeFor(profiledBlock, codeOrigin.bytecodeIndex, baselineMap); +} + +void CallLinkStatus::setProvenConstantCallee(CallVariant variant) +{ + m_variants = CallVariantList{ variant }; + m_couldTakeSlowPath = false; + m_isProved = true; +} + +bool CallLinkStatus::isClosureCall() const +{ + for (unsigned i = m_variants.size(); i--;) { + if (m_variants[i].isClosureCall()) + return true; + } + return false; +} + +void CallLinkStatus::makeClosureCall() +{ + m_variants = despecifiedVariantList(m_variants); } void CallLinkStatus::dump(PrintStream& out) const @@ -138,14 +342,11 @@ void CallLinkStatus::dump(PrintStream& out) const if (m_couldTakeSlowPath) out.print(comma, "Could Take Slow Path"); - if (m_callTarget) - out.print(comma, "Known target: ", m_callTarget); - - if (m_executable) - out.print(comma, "Executable/CallHash: ", RawPointer(m_executable), "/", m_executable->hashFor(CodeForCall)); + if (!m_variants.isEmpty()) + out.print(comma, listDump(m_variants)); - if (m_structure) - out.print(comma, "Structure: ", RawPointer(m_structure)); + if (m_maxNumArguments) + out.print(comma, "maxNumArguments = ", m_maxNumArguments); } } // namespace JSC diff --git a/Source/JavaScriptCore/bytecode/CallLinkStatus.h b/Source/JavaScriptCore/bytecode/CallLinkStatus.h index 51965fe4a..d3c1eee0c 100644 --- a/Source/JavaScriptCore/bytecode/CallLinkStatus.h +++ b/Source/JavaScriptCore/bytecode/CallLinkStatus.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2012-2016 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,7 +26,12 @@ #ifndef CallLinkStatus_h #define CallLinkStatus_h +#include "CallLinkInfo.h" +#include "CallVariant.h" +#include "CodeOrigin.h" #include "CodeSpecializationKind.h" +#include "ConcurrentJITLock.h" +#include "ExitingJITType.h" #include "Intrinsic.h" #include "JSCJSValue.h" @@ -37,14 +42,12 @@ class ExecutableBase; class InternalFunction; class JSFunction; class Structure; +class CallLinkInfo; class CallLinkStatus { + WTF_MAKE_FAST_ALLOCATED; public: CallLinkStatus() - : m_executable(0) - , m_structure(0) - , m_couldTakeSlowPath(false) - , m_isProved(false) { } @@ -57,75 +60,75 @@ public: explicit CallLinkStatus(JSValue); - CallLinkStatus(ExecutableBase* executable, Structure* structure) - : m_executable(executable) - , m_structure(structure) - , m_couldTakeSlowPath(false) - , m_isProved(false) + CallLinkStatus(CallVariant variant) + : m_variants(1, variant) { - ASSERT(!!executable == !!structure); } - CallLinkStatus& setIsProved(bool isProved) - { - m_isProved = isProved; - return *this; - } + static CallLinkStatus computeFor( + CodeBlock*, unsigned bytecodeIndex, const CallLinkInfoMap&); + + struct ExitSiteData { + bool takesSlowPath { false }; + bool badFunction { false }; + }; + static ExitSiteData computeExitSiteData(const ConcurrentJITLocker&, CodeBlock*, unsigned bytecodeIndex); - static CallLinkStatus computeFor(CodeBlock*, unsigned bytecodeIndex); +#if ENABLE(JIT) + // Computes the status assuming that we never took slow path and never previously + // exited. + static CallLinkStatus computeFor(const ConcurrentJITLocker&, CodeBlock*, CallLinkInfo&); + static CallLinkStatus computeFor( + const ConcurrentJITLocker&, CodeBlock*, CallLinkInfo&, ExitSiteData); +#endif - CallLinkStatus& setHasBadFunctionExitSite(bool didHaveExitSite) - { - ASSERT(!m_isProved); - if (didHaveExitSite) { - // Turn this into a closure call. - m_callTarget = JSValue(); - } - return *this; - } + typedef HashMap<CodeOrigin, CallLinkStatus, CodeOriginApproximateHash> ContextMap; - CallLinkStatus& setHasBadCacheExitSite(bool didHaveExitSite) - { - ASSERT(!m_isProved); - if (didHaveExitSite) - *this = takesSlowPath(); - return *this; - } + // Computes all of the statuses of the DFG code block. Doesn't include statuses that had + // no information. Currently we use this when compiling FTL code, to enable polyvariant + // inlining. + static void computeDFGStatuses(CodeBlock* dfgCodeBlock, ContextMap&); - CallLinkStatus& setHasBadExecutableExitSite(bool didHaveExitSite) - { - ASSERT(!m_isProved); - if (didHaveExitSite) - *this = takesSlowPath(); - return *this; - } + // Helper that first consults the ContextMap and then does computeFor(). + static CallLinkStatus computeFor( + CodeBlock*, CodeOrigin, const CallLinkInfoMap&, const ContextMap&); - bool isSet() const { return m_callTarget || m_executable || m_couldTakeSlowPath; } + void setProvenConstantCallee(CallVariant); + + bool isSet() const { return !m_variants.isEmpty() || m_couldTakeSlowPath; } bool operator!() const { return !isSet(); } bool couldTakeSlowPath() const { return m_couldTakeSlowPath; } - bool isClosureCall() const { return m_executable && !m_callTarget; } - - JSValue callTarget() const { return m_callTarget; } - JSFunction* function() const; - InternalFunction* internalFunction() const; - Intrinsic intrinsicFor(CodeSpecializationKind) const; - ExecutableBase* executable() const { return m_executable; } - Structure* structure() const { return m_structure; } + + CallVariantList variants() const { return m_variants; } + unsigned size() const { return m_variants.size(); } + CallVariant at(unsigned i) const { return m_variants[i]; } + CallVariant operator[](unsigned i) const { return at(i); } bool isProved() const { return m_isProved; } - bool canOptimize() const { return (m_callTarget || m_executable) && !m_couldTakeSlowPath; } + bool isBasedOnStub() const { return m_isBasedOnStub; } + bool canOptimize() const { return !m_variants.isEmpty(); } + + bool isClosureCall() const; // Returns true if any callee is a closure call. + + unsigned maxNumArguments() const { return m_maxNumArguments; } void dump(PrintStream&) const; private: - static CallLinkStatus computeFromLLInt(CodeBlock*, unsigned bytecodeIndex); + void makeClosureCall(); + + static CallLinkStatus computeFromLLInt(const ConcurrentJITLocker&, CodeBlock*, unsigned bytecodeIndex); +#if ENABLE(JIT) + static CallLinkStatus computeFromCallLinkInfo( + const ConcurrentJITLocker&, CallLinkInfo&); +#endif - JSValue m_callTarget; - ExecutableBase* m_executable; - Structure* m_structure; - bool m_couldTakeSlowPath; - bool m_isProved; + CallVariantList m_variants; + bool m_couldTakeSlowPath { false }; + bool m_isProved { false }; + bool m_isBasedOnStub { false }; + unsigned m_maxNumArguments { 0 }; }; } // namespace JSC diff --git a/Source/JavaScriptCore/bytecode/CallMode.cpp b/Source/JavaScriptCore/bytecode/CallMode.cpp new file mode 100644 index 000000000..5757b1850 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/CallMode.cpp @@ -0,0 +1,49 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "CallMode.h" + +#include <wtf/PrintStream.h> + +namespace WTF { + +void printInternal(PrintStream& out, JSC::CallMode callMode) +{ + switch (callMode) { + case JSC::CallMode::Tail: + out.print("TailCall"); + return; + case JSC::CallMode::Regular: + out.print("Call"); + return; + case JSC::CallMode::Construct: + out.print("Construct"); + return; + } + RELEASE_ASSERT_NOT_REACHED(); +} + +} // namespace WTF diff --git a/Source/JavaScriptCore/bytecode/CallMode.h b/Source/JavaScriptCore/bytecode/CallMode.h new file mode 100644 index 000000000..bf21d8634 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/CallMode.h @@ -0,0 +1,55 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef CallMode_h +#define CallMode_h + +#include "CodeSpecializationKind.h" + +namespace JSC { + +enum class CallMode { Regular, Tail, Construct }; + +enum FrameAction { KeepTheFrame = 0, ReuseTheFrame }; + +inline CodeSpecializationKind specializationKindFor(CallMode callMode) +{ + if (callMode == CallMode::Construct) + return CodeForConstruct; + + return CodeForCall; +} + +} // namespace JSC + +namespace WTF { + +class PrintStream; +void printInternal(PrintStream&, JSC::CallMode); + +} // namespace WTF + +#endif // CallMode_h + diff --git a/Source/JavaScriptCore/bytecode/CallReturnOffsetToBytecodeOffset.h b/Source/JavaScriptCore/bytecode/CallReturnOffsetToBytecodeOffset.h index 3a7448efd..496738f09 100644 --- a/Source/JavaScriptCore/bytecode/CallReturnOffsetToBytecodeOffset.h +++ b/Source/JavaScriptCore/bytecode/CallReturnOffsetToBytecodeOffset.h @@ -26,8 +26,6 @@ #ifndef CallReturnOffsetToBytecodeOffset_h #define CallReturnOffsetToBytecodeOffset_h -#include <wtf/Platform.h> - namespace JSC { #if ENABLE(JIT) diff --git a/Source/JavaScriptCore/bytecode/CallVariant.cpp b/Source/JavaScriptCore/bytecode/CallVariant.cpp new file mode 100644 index 000000000..9745dde2b --- /dev/null +++ b/Source/JavaScriptCore/bytecode/CallVariant.cpp @@ -0,0 +1,97 @@ +/* + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "CallVariant.h" + +#include "JSCInlines.h" +#include <wtf/ListDump.h> + +namespace JSC { + +void CallVariant::dump(PrintStream& out) const +{ + if (!*this) { + out.print("null"); + return; + } + + if (InternalFunction* internalFunction = this->internalFunction()) { + out.print("InternalFunction: ", JSValue(internalFunction)); + return; + } + + if (JSFunction* function = this->function()) { + out.print("(Function: ", JSValue(function), "; Executable: ", *executable(), ")"); + return; + } + + out.print("Executable: ", *executable()); +} + +CallVariantList variantListWithVariant(const CallVariantList& list, CallVariant variantToAdd) +{ + ASSERT(variantToAdd); + CallVariantList result; + for (CallVariant variant : list) { + ASSERT(variant); + if (!!variantToAdd) { + if (variant == variantToAdd) + variantToAdd = CallVariant(); + else if (variant.despecifiedClosure() == variantToAdd.despecifiedClosure()) { + variant = variant.despecifiedClosure(); + variantToAdd = CallVariant(); + } + } + result.append(variant); + } + if (!!variantToAdd) + result.append(variantToAdd); + + if (!ASSERT_DISABLED) { + for (unsigned i = 0; i < result.size(); ++i) { + for (unsigned j = i + 1; j < result.size(); ++j) { + if (result[i] != result[j]) + continue; + + dataLog("variantListWithVariant(", listDump(list), ", ", variantToAdd, ") failed: got duplicates in result: ", listDump(result), "\n"); + RELEASE_ASSERT_NOT_REACHED(); + } + } + } + + return result; +} + +CallVariantList despecifiedVariantList(const CallVariantList& list) +{ + CallVariantList result; + for (CallVariant variant : list) + result = variantListWithVariant(result, variant.despecifiedClosure()); + return result; +} + +} // namespace JSC + diff --git a/Source/JavaScriptCore/bytecode/CallVariant.h b/Source/JavaScriptCore/bytecode/CallVariant.h new file mode 100644 index 000000000..2514f72b8 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/CallVariant.h @@ -0,0 +1,203 @@ +/* + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef CallVariant_h +#define CallVariant_h + +#include "Executable.h" +#include "JSCell.h" +#include "JSFunction.h" + +namespace JSC { + +// The CallVariant class is meant to encapsulate a callee in a way that is useful for call linking +// and inlining. Because JavaScript has closures, and because JSC implements the notion of internal +// non-function objects that nevertheless provide call traps, the call machinery wants to see a +// callee in one of the following four forms: +// +// JSFunction callee: This means that we expect the callsite to always call a particular function +// instance, that is associated with a particular lexical environment. This pinpoints not +// just the code that will be called (i.e. the executable) but also the scope within which +// the code runs. +// +// Executable callee: This corresponds to a call to a closure. In this case, we know that the +// callsite will call a JSFunction, but we do not know which particular JSFunction. We do know +// what code will be called - i.e. we know the executable. +// +// InternalFunction callee: JSC supports a special kind of native functions that support bizarre +// semantics. These are always singletons. If we know that the callee is an InternalFunction +// then we know both the code that will be called and the scope; in fact the "scope" is really +// just the InternalFunction itself. +// +// Something else: It's possible call all manner of rubbish in JavaScript. This implicitly supports +// bizarre object callees, but it can't really tell you anything interesting about them other +// than the fact that they don't fall into any of the above categories. +// +// This class serves as a kind of union over these four things. It does so by just holding a +// JSCell*. We determine which of the modes its in by doing type checks on the cell. Note that we +// cannot use WriteBarrier<> here because this gets used inside the compiler. + +class CallVariant { +public: + explicit CallVariant(JSCell* callee = nullptr) + : m_callee(callee) + { + } + + CallVariant(WTF::HashTableDeletedValueType) + : m_callee(deletedToken()) + { + } + + bool operator!() const { return !m_callee; } + + // If this variant refers to a function, change it to refer to its executable. + ALWAYS_INLINE CallVariant despecifiedClosure() const + { + if (m_callee->type() == JSFunctionType) + return CallVariant(jsCast<JSFunction*>(m_callee)->executable()); + return *this; + } + + JSCell* rawCalleeCell() const { return m_callee; } + + InternalFunction* internalFunction() const + { + return jsDynamicCast<InternalFunction*>(m_callee); + } + + JSFunction* function() const + { + return jsDynamicCast<JSFunction*>(m_callee); + } + + bool isClosureCall() const { return !!jsDynamicCast<ExecutableBase*>(m_callee); } + + ExecutableBase* executable() const + { + if (JSFunction* function = this->function()) + return function->executable(); + return jsDynamicCast<ExecutableBase*>(m_callee); + } + + JSCell* nonExecutableCallee() const + { + RELEASE_ASSERT(!isClosureCall()); + return m_callee; + } + + Intrinsic intrinsicFor(CodeSpecializationKind kind) const + { + if (ExecutableBase* executable = this->executable()) + return executable->intrinsicFor(kind); + return NoIntrinsic; + } + + FunctionExecutable* functionExecutable() const + { + if (ExecutableBase* executable = this->executable()) + return jsDynamicCast<FunctionExecutable*>(executable); + return nullptr; + } + + void dump(PrintStream& out) const; + + bool isHashTableDeletedValue() const + { + return m_callee == deletedToken(); + } + + bool operator==(const CallVariant& other) const + { + return m_callee == other.m_callee; + } + + bool operator!=(const CallVariant& other) const + { + return !(*this == other); + } + + bool operator<(const CallVariant& other) const + { + return m_callee < other.m_callee; + } + + bool operator>(const CallVariant& other) const + { + return other < *this; + } + + bool operator<=(const CallVariant& other) const + { + return !(*this < other); + } + + bool operator>=(const CallVariant& other) const + { + return other <= *this; + } + + unsigned hash() const + { + return WTF::PtrHash<JSCell*>::hash(m_callee); + } + +private: + static JSCell* deletedToken() { return bitwise_cast<JSCell*>(static_cast<uintptr_t>(1)); } + + JSCell* m_callee; +}; + +struct CallVariantHash { + static unsigned hash(const CallVariant& key) { return key.hash(); } + static bool equal(const CallVariant& a, const CallVariant& b) { return a == b; } + static const bool safeToCompareToEmptyOrDeleted = true; +}; + +typedef Vector<CallVariant, 1> CallVariantList; + +// Returns a new variant list by attempting to either append the given variant or merge it with one +// of the variants we already have by despecifying closures. +CallVariantList variantListWithVariant(const CallVariantList&, CallVariant); + +// Returns a new list where every element is despecified, and the list is deduplicated. +CallVariantList despecifiedVariantList(const CallVariantList&); + +} // namespace JSC + +namespace WTF { + +template<typename T> struct DefaultHash; +template<> struct DefaultHash<JSC::CallVariant> { + typedef JSC::CallVariantHash Hash; +}; + +template<typename T> struct HashTraits; +template<> struct HashTraits<JSC::CallVariant> : SimpleClassHashTraits<JSC::CallVariant> { }; + +} // namespace WTF + +#endif // CallVariant_h + diff --git a/Source/JavaScriptCore/bytecode/CodeBlock.cpp b/Source/JavaScriptCore/bytecode/CodeBlock.cpp index efe4424c2..ba6a4bdf9 100644 --- a/Source/JavaScriptCore/bytecode/CodeBlock.cpp +++ b/Source/JavaScriptCore/bytecode/CodeBlock.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008, 2009, 2010, 2012, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2008-2010, 2012-2015 Apple Inc. All rights reserved. * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca> * * Redistribution and use in source and binary forms, with or without @@ -11,7 +11,7 @@ * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * 3. Neither the name of Apple Inc. ("Apple") nor the names of * its contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * @@ -30,43 +30,123 @@ #include "config.h" #include "CodeBlock.h" +#include "BasicBlockLocation.h" #include "BytecodeGenerator.h" +#include "BytecodeUseDef.h" #include "CallLinkStatus.h" #include "DFGCapabilities.h" #include "DFGCommon.h" -#include "DFGNode.h" -#include "DFGRepatch.h" +#include "DFGDriver.h" +#include "DFGJITCode.h" +#include "DFGWorklist.h" #include "Debugger.h" +#include "FunctionExecutableDump.h" +#include "GetPutInfo.h" +#include "InlineCallFrame.h" #include "Interpreter.h" #include "JIT.h" -#include "JITStubs.h" -#include "JSActivation.h" #include "JSCJSValue.h" #include "JSFunction.h" -#include "JSNameScope.h" +#include "JSLexicalEnvironment.h" +#include "JSModuleEnvironment.h" +#include "LLIntEntrypoint.h" #include "LowLevelInterpreter.h" -#include "Operations.h" +#include "JSCInlines.h" +#include "PCToCodeOriginMap.h" +#include "PolymorphicAccess.h" +#include "ProfilerDatabase.h" #include "ReduceWhitespace.h" -#include "RepatchBuffer.h" +#include "Repatch.h" #include "SlotVisitorInlines.h" -#include <stdio.h> +#include "StackVisitor.h" +#include "TypeLocationCache.h" +#include "TypeProfiler.h" +#include "UnlinkedInstructionStream.h" +#include <wtf/BagToHashMap.h> #include <wtf/CommaPrinter.h> #include <wtf/StringExtras.h> #include <wtf/StringPrintStream.h> +#include <wtf/text/UniquedStringImpl.h> + +#if ENABLE(JIT) +#include "RegisterAtOffsetList.h" +#endif #if ENABLE(DFG_JIT) #include "DFGOperations.h" #endif -#define DUMP_CODE_BLOCK_STATISTICS 0 +#if ENABLE(FTL_JIT) +#include "FTLJITCode.h" +#endif namespace JSC { -#if ENABLE(DFG_JIT) -using namespace DFG; +const ClassInfo CodeBlock::s_info = { + "CodeBlock", 0, 0, + CREATE_METHOD_TABLE(CodeBlock) +}; + +const ClassInfo FunctionCodeBlock::s_info = { + "FunctionCodeBlock", &Base::s_info, 0, + CREATE_METHOD_TABLE(FunctionCodeBlock) +}; + +#if ENABLE(WEBASSEMBLY) +const ClassInfo WebAssemblyCodeBlock::s_info = { + "WebAssemblyCodeBlock", &Base::s_info, 0, + CREATE_METHOD_TABLE(WebAssemblyCodeBlock) +}; #endif -String CodeBlock::inferredName() const +const ClassInfo GlobalCodeBlock::s_info = { + "GlobalCodeBlock", &Base::s_info, 0, + CREATE_METHOD_TABLE(GlobalCodeBlock) +}; + +const ClassInfo ProgramCodeBlock::s_info = { + "ProgramCodeBlock", &Base::s_info, 0, + CREATE_METHOD_TABLE(ProgramCodeBlock) +}; + +const ClassInfo ModuleProgramCodeBlock::s_info = { + "ModuleProgramCodeBlock", &Base::s_info, 0, + CREATE_METHOD_TABLE(ModuleProgramCodeBlock) +}; + +const ClassInfo EvalCodeBlock::s_info = { + "EvalCodeBlock", &Base::s_info, 0, + CREATE_METHOD_TABLE(EvalCodeBlock) +}; + +void FunctionCodeBlock::destroy(JSCell* cell) +{ + jsCast<FunctionCodeBlock*>(cell)->~FunctionCodeBlock(); +} + +#if ENABLE(WEBASSEMBLY) +void WebAssemblyCodeBlock::destroy(JSCell* cell) +{ + jsCast<WebAssemblyCodeBlock*>(cell)->~WebAssemblyCodeBlock(); +} +#endif + +void ProgramCodeBlock::destroy(JSCell* cell) +{ + jsCast<ProgramCodeBlock*>(cell)->~ProgramCodeBlock(); +} + +void ModuleProgramCodeBlock::destroy(JSCell* cell) +{ + jsCast<ModuleProgramCodeBlock*>(cell)->~ModuleProgramCodeBlock(); +} + +void EvalCodeBlock::destroy(JSCell* cell) +{ + jsCast<EvalCodeBlock*>(cell)->~EvalCodeBlock(); +} + +CString CodeBlock::inferredName() const { switch (codeType()) { case GlobalCode: @@ -74,22 +154,38 @@ String CodeBlock::inferredName() const case EvalCode: return "<eval>"; case FunctionCode: - return jsCast<FunctionExecutable*>(ownerExecutable())->inferredName().string(); + return jsCast<FunctionExecutable*>(ownerExecutable())->inferredName().utf8(); + case ModuleCode: + return "<module>"; default: CRASH(); - return String(); + return CString("", 0); } } +bool CodeBlock::hasHash() const +{ + return !!m_hash; +} + +bool CodeBlock::isSafeToComputeHash() const +{ + return !isCompilationThread(); +} + CodeBlockHash CodeBlock::hash() const { - return CodeBlockHash(ownerExecutable()->source(), specializationKind()); + if (!m_hash) { + RELEASE_ASSERT(isSafeToComputeHash()); + m_hash = CodeBlockHash(ownerScriptExecutable()->source(), specializationKind()); + } + return m_hash; } -String CodeBlock::sourceCodeForTools() const +CString CodeBlock::sourceCodeForTools() const { if (codeType() != FunctionCode) - return ownerExecutable()->source().toString(); + return ownerScriptExecutable()->source().toUTF8(); SourceProvider* provider = source(); FunctionExecutable* executable = jsCast<FunctionExecutable*>(ownerExecutable()); @@ -97,78 +193,78 @@ String CodeBlock::sourceCodeForTools() const unsigned unlinkedStartOffset = unlinked->startOffset(); unsigned linkedStartOffset = executable->source().startOffset(); int delta = linkedStartOffset - unlinkedStartOffset; - StringBuilder builder; - builder.append("function "); - builder.append(provider->getRange( - delta + unlinked->functionStartOffset(), - delta + unlinked->startOffset() + unlinked->sourceLength())); - return builder.toString(); + unsigned rangeStart = delta + unlinked->unlinkedFunctionNameStart(); + unsigned rangeEnd = delta + unlinked->startOffset() + unlinked->sourceLength(); + return toCString( + "function ", + provider->source().substring(rangeStart, rangeEnd - rangeStart).utf8()); } -String CodeBlock::sourceCodeOnOneLine() const +CString CodeBlock::sourceCodeOnOneLine() const { return reduceWhitespace(sourceCodeForTools()); } +CString CodeBlock::hashAsStringIfPossible() const +{ + if (hasHash() || isSafeToComputeHash()) + return toCString(hash()); + return "<no-hash>"; +} + void CodeBlock::dumpAssumingJITType(PrintStream& out, JITCode::JITType jitType) const { - out.print(inferredName(), "#", hash(), ":[", RawPointer(this), "->", RawPointer(ownerExecutable()), ", ", jitType, codeType()); + out.print(inferredName(), "#", hashAsStringIfPossible()); + out.print(":[", RawPointer(this), "->"); + if (!!m_alternative) + out.print(RawPointer(alternative()), "->"); + out.print(RawPointer(ownerExecutable()), ", ", jitType, codeType()); + if (codeType() == FunctionCode) out.print(specializationKind()); - if (ownerExecutable()->neverInline()) + out.print(", ", instructionCount()); + if (this->jitType() == JITCode::BaselineJIT && m_shouldAlwaysBeInlined) + out.print(" (ShouldAlwaysBeInlined)"); + if (ownerScriptExecutable()->neverInline()) out.print(" (NeverInline)"); + if (ownerScriptExecutable()->neverOptimize()) + out.print(" (NeverOptimize)"); + if (ownerScriptExecutable()->didTryToEnterInLoop()) + out.print(" (DidTryToEnterInLoop)"); + if (ownerScriptExecutable()->isStrictMode()) + out.print(" (StrictMode)"); + if (this->jitType() == JITCode::BaselineJIT && m_didFailFTLCompilation) + out.print(" (FTLFail)"); + if (this->jitType() == JITCode::BaselineJIT && m_hasBeenCompiledWithFTL) + out.print(" (HadFTLReplacement)"); out.print("]"); } void CodeBlock::dump(PrintStream& out) const { - dumpAssumingJITType(out, getJITType()); -} - -static String escapeQuotes(const String& str) -{ - String result = str; - size_t pos = 0; - while ((pos = result.find('\"', pos)) != notFound) { - result = makeString(result.substringSharingImpl(0, pos), "\"\\\"\"", result.substringSharingImpl(pos + 1)); - pos += 4; - } - return result; + dumpAssumingJITType(out, jitType()); } -static String valueToSourceString(ExecState* exec, JSValue val) +static CString idName(int id0, const Identifier& ident) { - if (!val) - return ASCIILiteral("0"); - - if (val.isString()) - return makeString("\"", escapeQuotes(val.toString(exec)->value(exec)), "\""); - - return toString(val); + return toCString(ident.impl(), "(@id", id0, ")"); } -static CString constantName(ExecState* exec, int k, JSValue value) +CString CodeBlock::registerName(int r) const { - return makeString(valueToSourceString(exec, value), "(@k", String::number(k - FirstConstantRegisterIndex), ")").utf8(); -} + if (isConstantRegisterIndex(r)) + return constantName(r); -static CString idName(int id0, const Identifier& ident) -{ - return makeString(ident.string(), "(@id", String::number(id0), ")").utf8(); + return toCString(VirtualRegister(r)); } -CString CodeBlock::registerName(ExecState* exec, int r) const +CString CodeBlock::constantName(int index) const { - if (r == missingThisObjectMarker()) - return "<null>"; - - if (isConstantRegisterIndex(r)) - return constantName(exec, r, getConstant(r)); - - return makeString("r", String::number(r)).utf8(); + JSValue value = getConstant(index); + return toCString(value, "(", VirtualRegister(index), ")"); } -static String regexpToSourceString(RegExp* regExp) +static CString regexpToSourceString(RegExp* regExp) { char postfix[5] = { '/', 0, 0, 0, 0 }; int index = 1; @@ -179,19 +275,12 @@ static String regexpToSourceString(RegExp* regExp) if (regExp->multiline()) postfix[index] = 'm'; - return makeString("/", regExp->pattern(), postfix); + return toCString("/", regExp->pattern().impl(), postfix); } static CString regexpName(int re, RegExp* regexp) { - return makeString(regexpToSourceString(regexp), "(@re", String::number(re), ")").utf8(); -} - -static String pointerToSourceString(void* p) -{ - char buffer[2 + 2 * sizeof(void*) + 1]; // 0x [two characters per byte] \0 - snprintf(buffer, sizeof(buffer), "%p", p); - return buffer; + return toCString(regexpToSourceString(regexp), "(@re", re, ")"); } NEVER_INLINE static const char* debugHookName(int debugHookID) @@ -220,7 +309,8 @@ void CodeBlock::printUnaryOp(PrintStream& out, ExecState* exec, int location, co int r0 = (++it)->u.operand; int r1 = (++it)->u.operand; - out.printf("[%4d] %s\t\t %s, %s", location, op, registerName(exec, r0).data(), registerName(exec, r1).data()); + printLocationAndOp(out, exec, location, it, op); + out.printf("%s, %s", registerName(r0).data(), registerName(r1).data()); } void CodeBlock::printBinaryOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op) @@ -228,14 +318,16 @@ void CodeBlock::printBinaryOp(PrintStream& out, ExecState* exec, int location, c int r0 = (++it)->u.operand; int r1 = (++it)->u.operand; int r2 = (++it)->u.operand; - out.printf("[%4d] %s\t\t %s, %s, %s", location, op, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data()); + printLocationAndOp(out, exec, location, it, op); + out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data()); } void CodeBlock::printConditionalJump(PrintStream& out, ExecState* exec, const Instruction*, const Instruction*& it, int location, const char* op) { int r0 = (++it)->u.operand; int offset = (++it)->u.operand; - out.printf("[%4d] %s\t\t %s, %d(->%d)", location, op, registerName(exec, r0).data(), offset, location + offset); + printLocationAndOp(out, exec, location, it, op); + out.printf("%s, %d(->%d)", registerName(r0).data(), offset, location + offset); } void CodeBlock::printGetByIdOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it) @@ -245,72 +337,36 @@ void CodeBlock::printGetByIdOp(PrintStream& out, ExecState* exec, int location, case op_get_by_id: op = "get_by_id"; break; - case op_get_by_id_out_of_line: - op = "get_by_id_out_of_line"; - break; - case op_get_by_id_self: - op = "get_by_id_self"; - break; - case op_get_by_id_proto: - op = "get_by_id_proto"; - break; - case op_get_by_id_chain: - op = "get_by_id_chain"; - break; - case op_get_by_id_getter_self: - op = "get_by_id_getter_self"; - break; - case op_get_by_id_getter_proto: - op = "get_by_id_getter_proto"; - break; - case op_get_by_id_getter_chain: - op = "get_by_id_getter_chain"; - break; - case op_get_by_id_custom_self: - op = "get_by_id_custom_self"; - break; - case op_get_by_id_custom_proto: - op = "get_by_id_custom_proto"; - break; - case op_get_by_id_custom_chain: - op = "get_by_id_custom_chain"; - break; - case op_get_by_id_generic: - op = "get_by_id_generic"; - break; case op_get_array_length: op = "array_length"; break; - case op_get_string_length: - op = "string_length"; - break; default: RELEASE_ASSERT_NOT_REACHED(); +#if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE) op = 0; +#endif } int r0 = (++it)->u.operand; int r1 = (++it)->u.operand; int id0 = (++it)->u.operand; - out.printf("[%4d] %s\t %s, %s, %s", location, op, registerName(exec, r0).data(), registerName(exec, r1).data(), idName(id0, m_identifiers[id0]).data()); + printLocationAndOp(out, exec, location, it, op); + out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data()); it += 4; // Increment up to the value profiler. } -#if ENABLE(JIT) || ENABLE(LLINT) // unused in some configurations -static void dumpStructure(PrintStream& out, const char* name, ExecState* exec, Structure* structure, Identifier& ident) +static void dumpStructure(PrintStream& out, const char* name, Structure* structure, const Identifier& ident) { if (!structure) return; out.printf("%s = %p", name, structure); - PropertyOffset offset = structure->get(exec->vm(), ident); + PropertyOffset offset = structure->getConcurrently(ident.impl()); if (offset != invalidOffset) out.printf(" (offset = %d)", offset); } -#endif -#if ENABLE(JIT) // unused when not ENABLE(JIT), leading to silly warnings -static void dumpChain(PrintStream& out, ExecState* exec, StructureChain* chain, Identifier& ident) +static void dumpChain(PrintStream& out, StructureChain* chain, const Identifier& ident) { out.printf("chain = %p: [", chain); bool first = true; @@ -321,135 +377,138 @@ static void dumpChain(PrintStream& out, ExecState* exec, StructureChain* chain, first = false; else out.printf(", "); - dumpStructure(out, "struct", exec, currentStructure->get(), ident); + dumpStructure(out, "struct", currentStructure->get(), ident); } out.printf("]"); } -#endif -void CodeBlock::printGetByIdCacheStatus(PrintStream& out, ExecState* exec, int location) +void CodeBlock::printGetByIdCacheStatus(PrintStream& out, ExecState* exec, int location, const StubInfoMap& map) { Instruction* instruction = instructions().begin() + location; - Identifier& ident = identifier(instruction[3].u.operand); + const Identifier& ident = identifier(instruction[3].u.operand); UNUSED_PARAM(ident); // tell the compiler to shut up in certain platform configurations. -#if ENABLE(LLINT) if (exec->interpreter()->getOpcodeID(instruction[0].u.opcode) == op_get_array_length) out.printf(" llint(array_length)"); - else if (Structure* structure = instruction[4].u.structure.get()) { + else if (StructureID structureID = instruction[4].u.structureID) { + Structure* structure = m_vm->heap.structureIDTable().get(structureID); out.printf(" llint("); - dumpStructure(out, "struct", exec, structure, ident); + dumpStructure(out, "struct", structure, ident); out.printf(")"); } -#endif #if ENABLE(JIT) - if (numberOfStructureStubInfos()) { - StructureStubInfo& stubInfo = getStubInfo(location); - if (stubInfo.seen) { - out.printf(" jit("); - - Structure* baseStructure = 0; - Structure* prototypeStructure = 0; - StructureChain* chain = 0; - PolymorphicAccessStructureList* structureList = 0; - int listSize = 0; - - switch (stubInfo.accessType) { - case access_get_by_id_self: - out.printf("self"); - baseStructure = stubInfo.u.getByIdSelf.baseObjectStructure.get(); - break; - case access_get_by_id_proto: - out.printf("proto"); - baseStructure = stubInfo.u.getByIdProto.baseObjectStructure.get(); - prototypeStructure = stubInfo.u.getByIdProto.prototypeStructure.get(); - break; - case access_get_by_id_chain: - out.printf("chain"); - baseStructure = stubInfo.u.getByIdChain.baseObjectStructure.get(); - chain = stubInfo.u.getByIdChain.chain.get(); - break; - case access_get_by_id_self_list: - out.printf("self_list"); - structureList = stubInfo.u.getByIdSelfList.structureList; - listSize = stubInfo.u.getByIdSelfList.listSize; - break; - case access_get_by_id_proto_list: - out.printf("proto_list"); - structureList = stubInfo.u.getByIdProtoList.structureList; - listSize = stubInfo.u.getByIdProtoList.listSize; - break; - case access_unset: - out.printf("unset"); - break; - case access_get_by_id_generic: - out.printf("generic"); - break; - case access_get_array_length: - out.printf("array_length"); - break; - case access_get_string_length: - out.printf("string_length"); - break; - default: - RELEASE_ASSERT_NOT_REACHED(); - break; - } - - if (baseStructure) { - out.printf(", "); - dumpStructure(out, "struct", exec, baseStructure, ident); - } + if (StructureStubInfo* stubPtr = map.get(CodeOrigin(location))) { + StructureStubInfo& stubInfo = *stubPtr; + if (stubInfo.resetByGC) + out.print(" (Reset By GC)"); + + out.printf(" jit("); - if (prototypeStructure) { - out.printf(", "); - dumpStructure(out, "prototypeStruct", exec, baseStructure, ident); - } + Structure* baseStructure = nullptr; + PolymorphicAccess* stub = nullptr; - if (chain) { - out.printf(", "); - dumpChain(out, exec, chain, ident); - } + switch (stubInfo.cacheType) { + case CacheType::GetByIdSelf: + out.printf("self"); + baseStructure = stubInfo.u.byIdSelf.baseObjectStructure.get(); + break; + case CacheType::Stub: + out.printf("stub"); + stub = stubInfo.u.stub; + break; + case CacheType::Unset: + out.printf("unset"); + break; + default: + RELEASE_ASSERT_NOT_REACHED(); + break; + } - if (structureList) { - out.printf(", list = %p: [", structureList); - for (int i = 0; i < listSize; ++i) { - if (i) - out.printf(", "); - out.printf("("); - dumpStructure(out, "base", exec, structureList->list[i].base.get(), ident); - if (structureList->list[i].isChain) { - if (structureList->list[i].u.chain.get()) { - out.printf(", "); - dumpChain(out, exec, structureList->list[i].u.chain.get(), ident); - } - } else { - if (structureList->list[i].u.proto.get()) { - out.printf(", "); - dumpStructure(out, "proto", exec, structureList->list[i].u.proto.get(), ident); - } - } - out.printf(")"); - } - out.printf("]"); + if (baseStructure) { + out.printf(", "); + dumpStructure(out, "struct", baseStructure, ident); + } + + if (stub) + out.print(", ", *stub); + + out.printf(")"); + } +#else + UNUSED_PARAM(map); +#endif +} + +void CodeBlock::printPutByIdCacheStatus(PrintStream& out, int location, const StubInfoMap& map) +{ + Instruction* instruction = instructions().begin() + location; + + const Identifier& ident = identifier(instruction[2].u.operand); + + UNUSED_PARAM(ident); // tell the compiler to shut up in certain platform configurations. + + out.print(", ", instruction[8].u.putByIdFlags); + + if (StructureID structureID = instruction[4].u.structureID) { + Structure* structure = m_vm->heap.structureIDTable().get(structureID); + out.print(" llint("); + if (StructureID newStructureID = instruction[6].u.structureID) { + Structure* newStructure = m_vm->heap.structureIDTable().get(newStructureID); + dumpStructure(out, "prev", structure, ident); + out.print(", "); + dumpStructure(out, "next", newStructure, ident); + if (StructureChain* chain = instruction[7].u.structureChain.get()) { + out.print(", "); + dumpChain(out, chain, ident); } - out.printf(")"); + } else + dumpStructure(out, "struct", structure, ident); + out.print(")"); + } + +#if ENABLE(JIT) + if (StructureStubInfo* stubPtr = map.get(CodeOrigin(location))) { + StructureStubInfo& stubInfo = *stubPtr; + if (stubInfo.resetByGC) + out.print(" (Reset By GC)"); + + out.printf(" jit("); + + switch (stubInfo.cacheType) { + case CacheType::PutByIdReplace: + out.print("replace, "); + dumpStructure(out, "struct", stubInfo.u.byIdSelf.baseObjectStructure.get(), ident); + break; + case CacheType::Stub: { + out.print("stub, ", *stubInfo.u.stub); + break; + } + case CacheType::Unset: + out.printf("unset"); + break; + default: + RELEASE_ASSERT_NOT_REACHED(); + break; } + out.printf(")"); } +#else + UNUSED_PARAM(map); #endif } -void CodeBlock::printCallOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op, CacheDumpMode cacheDumpMode) +void CodeBlock::printCallOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op, CacheDumpMode cacheDumpMode, bool& hasPrintedProfiling, const CallLinkInfoMap& map) { + int dst = (++it)->u.operand; int func = (++it)->u.operand; int argCount = (++it)->u.operand; int registerOffset = (++it)->u.operand; - out.printf("[%4d] %s\t %s, %d, %d", location, op, registerName(exec, func).data(), argCount, registerOffset); + printLocationAndOp(out, exec, location, it, op); + out.printf("%s, %s, %d, %d", registerName(dst).data(), registerName(func).data(), argCount, registerOffset); if (cacheDumpMode == DumpCaches) { -#if ENABLE(LLINT) LLIntCallLinkInfo* callLinkInfo = it[1].u.callLinkInfo; if (callLinkInfo->lastSeenCallee) { out.printf( @@ -457,17 +516,23 @@ void CodeBlock::printCallOp(PrintStream& out, ExecState* exec, int location, con callLinkInfo->lastSeenCallee.get(), callLinkInfo->lastSeenCallee->executable()); } -#endif #if ENABLE(JIT) - if (numberOfCallLinkInfos()) { - JSFunction* target = getCallLinkInfo(location).lastSeenCallee.get(); + if (CallLinkInfo* info = map.get(CodeOrigin(location))) { + JSFunction* target = info->lastSeenCallee(); if (target) out.printf(" jit(%p, exec %p)", target, target->executable()); } + + if (jitType() != JITCode::FTLJIT) + out.print(" status(", CallLinkStatus::computeFor(this, location, map), ")"); +#else + UNUSED_PARAM(map); #endif - out.print(" status(", CallLinkStatus::computeFor(this, location), ")"); } - it += 2; + ++it; + ++it; + dumpArrayProfiling(out, it, hasPrintedProfiling); + dumpValueProfiling(out, it, hasPrintedProfiling); } void CodeBlock::printPutByIdOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op) @@ -475,52 +540,34 @@ void CodeBlock::printPutByIdOp(PrintStream& out, ExecState* exec, int location, int r0 = (++it)->u.operand; int id0 = (++it)->u.operand; int r1 = (++it)->u.operand; - out.printf("[%4d] %s\t %s, %s, %s", location, op, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data(), registerName(exec, r1).data()); + printLocationAndOp(out, exec, location, it, op); + out.printf("%s, %s, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), registerName(r1).data()); it += 5; } -void CodeBlock::printStructure(PrintStream& out, const char* name, const Instruction* vPC, int operand) +void CodeBlock::dumpSource() { - unsigned instructionOffset = vPC - instructions().begin(); - out.printf(" [%4d] %s: %s\n", instructionOffset, name, pointerToSourceString(vPC[operand].u.structure).utf8().data()); + dumpSource(WTF::dataFile()); } -void CodeBlock::printStructures(PrintStream& out, const Instruction* vPC) +void CodeBlock::dumpSource(PrintStream& out) { - Interpreter* interpreter = m_vm->interpreter; - unsigned instructionOffset = vPC - instructions().begin(); - - if (vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id)) { - printStructure(out, "get_by_id", vPC, 4); - return; - } - if (vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_self)) { - printStructure(out, "get_by_id_self", vPC, 4); - return; - } - if (vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_proto)) { - out.printf(" [%4d] %s: %s, %s\n", instructionOffset, "get_by_id_proto", pointerToSourceString(vPC[4].u.structure).utf8().data(), pointerToSourceString(vPC[5].u.structure).utf8().data()); - return; - } - if (vPC[0].u.opcode == interpreter->getOpcode(op_put_by_id_transition)) { - out.printf(" [%4d] %s: %s, %s, %s\n", instructionOffset, "put_by_id_transition", pointerToSourceString(vPC[4].u.structure).utf8().data(), pointerToSourceString(vPC[5].u.structure).utf8().data(), pointerToSourceString(vPC[6].u.structureChain).utf8().data()); - return; - } - if (vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_chain)) { - out.printf(" [%4d] %s: %s, %s\n", instructionOffset, "get_by_id_chain", pointerToSourceString(vPC[4].u.structure).utf8().data(), pointerToSourceString(vPC[5].u.structureChain).utf8().data()); - return; - } - if (vPC[0].u.opcode == interpreter->getOpcode(op_put_by_id)) { - printStructure(out, "put_by_id", vPC, 4); - return; - } - if (vPC[0].u.opcode == interpreter->getOpcode(op_put_by_id_replace)) { - printStructure(out, "put_by_id_replace", vPC, 4); + ScriptExecutable* executable = ownerScriptExecutable(); + if (executable->isFunctionExecutable()) { + FunctionExecutable* functionExecutable = reinterpret_cast<FunctionExecutable*>(executable); + StringView source = functionExecutable->source().provider()->getRange( + functionExecutable->parametersStartOffset(), + functionExecutable->typeProfilingEndOffset() + 1); // Type profiling end offset is the character before the '}'. + + out.print("function ", inferredName(), source); return; } + out.print(executable->source().view()); +} - // These m_instructions doesn't ref Structures. - ASSERT(vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_generic) || vPC[0].u.opcode == interpreter->getOpcode(op_put_by_id_generic) || vPC[0].u.opcode == interpreter->getOpcode(op_call) || vPC[0].u.opcode == interpreter->getOpcode(op_call_eval) || vPC[0].u.opcode == interpreter->getOpcode(op_construct)); +void CodeBlock::dumpBytecode() +{ + dumpBytecode(WTF::dataFile()); } void CodeBlock::dumpBytecode(PrintStream& out) @@ -539,41 +586,45 @@ void CodeBlock::dumpBytecode(PrintStream& out) ": %lu m_instructions; %lu bytes; %d parameter(s); %d callee register(s); %d variable(s)", static_cast<unsigned long>(instructions().size()), static_cast<unsigned long>(instructions().size() * sizeof(Instruction)), - m_numParameters, m_numCalleeRegisters, m_numVars); - if (symbolTable() && symbolTable()->captureCount()) { - out.printf( - "; %d captured var(s) (from r%d to r%d, inclusive)", - symbolTable()->captureCount(), symbolTable()->captureStart(), symbolTable()->captureEnd() - 1); - } - if (usesArguments()) { - out.printf( - "; uses arguments, in r%d, r%d", - argumentsRegister(), - unmodifiedArgumentsRegister(argumentsRegister())); - } - if (needsFullScopeChain() && codeType() == FunctionCode) - out.printf("; activation in r%d", activationRegister()); - out.print("\n\nSource: ", sourceCodeOnOneLine(), "\n\n"); - + m_numParameters, m_numCalleeLocals, m_numVars); + out.printf("\n"); + + StubInfoMap stubInfos; + CallLinkInfoMap callLinkInfos; + getStubInfoMap(stubInfos); + getCallLinkInfoMap(callLinkInfos); + const Instruction* begin = instructions().begin(); const Instruction* end = instructions().end(); for (const Instruction* it = begin; it != end; ++it) - dumpBytecode(out, exec, begin, it); - - if (!m_identifiers.isEmpty()) { + dumpBytecode(out, exec, begin, it, stubInfos, callLinkInfos); + + if (numberOfIdentifiers()) { out.printf("\nIdentifiers:\n"); size_t i = 0; do { - out.printf(" id%u = %s\n", static_cast<unsigned>(i), m_identifiers[i].string().utf8().data()); + out.printf(" id%u = %s\n", static_cast<unsigned>(i), identifier(i).string().utf8().data()); ++i; - } while (i != m_identifiers.size()); + } while (i != numberOfIdentifiers()); } if (!m_constantRegisters.isEmpty()) { out.printf("\nConstants:\n"); size_t i = 0; do { - out.printf(" k%u = %s\n", static_cast<unsigned>(i), valueToSourceString(exec, m_constantRegisters[i].get()).utf8().data()); + const char* sourceCodeRepresentationDescription = nullptr; + switch (m_constantsSourceCodeRepresentation[i]) { + case SourceCodeRepresentation::Double: + sourceCodeRepresentationDescription = ": in source as double"; + break; + case SourceCodeRepresentation::Integer: + sourceCodeRepresentationDescription = ": in source as integer"; + break; + case SourceCodeRepresentation::Other: + sourceCodeRepresentationDescription = ""; + break; + } + out.printf(" k%u = %s%s\n", static_cast<unsigned>(i), toCString(m_constantRegisters[i].get()).data(), sourceCodeRepresentationDescription); ++i; } while (i < m_constantRegisters.size()); } @@ -582,59 +633,28 @@ void CodeBlock::dumpBytecode(PrintStream& out) out.printf("\nm_regexps:\n"); size_t i = 0; do { - out.printf(" re%u = %s\n", static_cast<unsigned>(i), regexpToSourceString(m_unlinkedCode->regexp(i)).utf8().data()); + out.printf(" re%u = %s\n", static_cast<unsigned>(i), regexpToSourceString(m_unlinkedCode->regexp(i)).data()); ++i; } while (i < count); } -#if ENABLE(JIT) - if (!m_structureStubInfos.isEmpty()) - out.printf("\nStructures:\n"); -#endif - - if (m_rareData && !m_rareData->m_exceptionHandlers.isEmpty()) { - out.printf("\nException Handlers:\n"); - unsigned i = 0; - do { - out.printf("\t %d: { start: [%4d] end: [%4d] target: [%4d] depth: [%4d] }\n", i + 1, m_rareData->m_exceptionHandlers[i].start, m_rareData->m_exceptionHandlers[i].end, m_rareData->m_exceptionHandlers[i].target, m_rareData->m_exceptionHandlers[i].scopeDepth); - ++i; - } while (i < m_rareData->m_exceptionHandlers.size()); - } + dumpExceptionHandlers(out); - if (m_rareData && !m_rareData->m_immediateSwitchJumpTables.isEmpty()) { - out.printf("Immediate Switch Jump Tables:\n"); + if (m_rareData && !m_rareData->m_switchJumpTables.isEmpty()) { + out.printf("Switch Jump Tables:\n"); unsigned i = 0; do { out.printf(" %1d = {\n", i); int entry = 0; - Vector<int32_t>::const_iterator end = m_rareData->m_immediateSwitchJumpTables[i].branchOffsets.end(); - for (Vector<int32_t>::const_iterator iter = m_rareData->m_immediateSwitchJumpTables[i].branchOffsets.begin(); iter != end; ++iter, ++entry) { + Vector<int32_t>::const_iterator end = m_rareData->m_switchJumpTables[i].branchOffsets.end(); + for (Vector<int32_t>::const_iterator iter = m_rareData->m_switchJumpTables[i].branchOffsets.begin(); iter != end; ++iter, ++entry) { if (!*iter) continue; - out.printf("\t\t%4d => %04d\n", entry + m_rareData->m_immediateSwitchJumpTables[i].min, *iter); + out.printf("\t\t%4d => %04d\n", entry + m_rareData->m_switchJumpTables[i].min, *iter); } out.printf(" }\n"); ++i; - } while (i < m_rareData->m_immediateSwitchJumpTables.size()); - } - - if (m_rareData && !m_rareData->m_characterSwitchJumpTables.isEmpty()) { - out.printf("\nCharacter Switch Jump Tables:\n"); - unsigned i = 0; - do { - out.printf(" %1d = {\n", i); - int entry = 0; - Vector<int32_t>::const_iterator end = m_rareData->m_characterSwitchJumpTables[i].branchOffsets.end(); - for (Vector<int32_t>::const_iterator iter = m_rareData->m_characterSwitchJumpTables[i].branchOffsets.begin(); iter != end; ++iter, ++entry) { - if (!*iter) - continue; - ASSERT(!((i + m_rareData->m_characterSwitchJumpTables[i].min) & ~0xFFFF)); - UChar ch = static_cast<UChar>(entry + m_rareData->m_characterSwitchJumpTables[i].min); - out.printf("\t\t\"%s\" => %04d\n", String(&ch, 1).utf8().data(), *iter); - } - out.printf(" }\n"); - ++i; - } while (i < m_rareData->m_characterSwitchJumpTables.size()); + } while (i < m_rareData->m_switchJumpTables.size()); } if (m_rareData && !m_rareData->m_stringSwitchJumpTables.isEmpty()) { @@ -644,15 +664,41 @@ void CodeBlock::dumpBytecode(PrintStream& out) out.printf(" %1d = {\n", i); StringJumpTable::StringOffsetTable::const_iterator end = m_rareData->m_stringSwitchJumpTables[i].offsetTable.end(); for (StringJumpTable::StringOffsetTable::const_iterator iter = m_rareData->m_stringSwitchJumpTables[i].offsetTable.begin(); iter != end; ++iter) - out.printf("\t\t\"%s\" => %04d\n", String(iter->key).utf8().data(), iter->value.branchOffset); + out.printf("\t\t\"%s\" => %04d\n", iter->key->utf8().data(), iter->value.branchOffset); out.printf(" }\n"); ++i; } while (i < m_rareData->m_stringSwitchJumpTables.size()); } + if (m_rareData && !m_rareData->m_liveCalleeLocalsAtYield.isEmpty()) { + out.printf("\nLive Callee Locals:\n"); + unsigned i = 0; + do { + const FastBitVector& liveness = m_rareData->m_liveCalleeLocalsAtYield[i]; + out.printf(" live%1u = ", i); + liveness.dump(out); + out.printf("\n"); + ++i; + } while (i < m_rareData->m_liveCalleeLocalsAtYield.size()); + } + out.printf("\n"); } +void CodeBlock::dumpExceptionHandlers(PrintStream& out) +{ + if (m_rareData && !m_rareData->m_exceptionHandlers.isEmpty()) { + out.printf("\nException Handlers:\n"); + unsigned i = 0; + do { + HandlerInfo& handler = m_rareData->m_exceptionHandlers[i]; + out.printf("\t %d: { start: [%4d] end: [%4d] target: [%4d] } %s\n", + i + 1, handler.start, handler.end, handler.target, handler.typeName()); + ++i; + } while (i < m_rareData->m_exceptionHandlers.size()); + } +} + void CodeBlock::beginDumpProfiling(PrintStream& out, bool& hasPrintedProfiling) { if (hasPrintedProfiling) { @@ -666,35 +712,30 @@ void CodeBlock::beginDumpProfiling(PrintStream& out, bool& hasPrintedProfiling) void CodeBlock::dumpValueProfiling(PrintStream& out, const Instruction*& it, bool& hasPrintedProfiling) { + ConcurrentJITLocker locker(m_lock); + ++it; -#if ENABLE(VALUE_PROFILER) - CString description = it->u.profile->briefDescription(); + CString description = it->u.profile->briefDescription(locker); if (!description.length()) return; beginDumpProfiling(out, hasPrintedProfiling); out.print(description); -#else - UNUSED_PARAM(out); - UNUSED_PARAM(hasPrintedProfiling); -#endif } void CodeBlock::dumpArrayProfiling(PrintStream& out, const Instruction*& it, bool& hasPrintedProfiling) { + ConcurrentJITLocker locker(m_lock); + ++it; -#if ENABLE(VALUE_PROFILER) - CString description = it->u.arrayProfile->briefDescription(this); + if (!it->u.arrayProfile) + return; + CString description = it->u.arrayProfile->briefDescription(locker, this); if (!description.length()) return; beginDumpProfiling(out, hasPrintedProfiling); out.print(description); -#else - UNUSED_PARAM(out); - UNUSED_PARAM(hasPrintedProfiling); -#endif } -#if ENABLE(VALUE_PROFILER) void CodeBlock::dumpRareCaseProfile(PrintStream& out, const char* name, RareCaseProfile* profile, bool& hasPrintedProfiling) { if (!profile || !profile->m_counter) @@ -703,55 +744,108 @@ void CodeBlock::dumpRareCaseProfile(PrintStream& out, const char* name, RareCase beginDumpProfiling(out, hasPrintedProfiling); out.print(name, profile->m_counter); } -#endif -void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instruction* begin, const Instruction*& it) +void CodeBlock::dumpResultProfile(PrintStream& out, ResultProfile* profile, bool& hasPrintedProfiling) +{ + if (!profile) + return; + + beginDumpProfiling(out, hasPrintedProfiling); + out.print("results: ", *profile); +} + +void CodeBlock::printLocationAndOp(PrintStream& out, ExecState*, int location, const Instruction*&, const char* op) +{ + out.printf("[%4d] %-17s ", location, op); +} + +void CodeBlock::printLocationOpAndRegisterOperand(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op, int operand) +{ + printLocationAndOp(out, exec, location, it, op); + out.printf("%s", registerName(operand).data()); +} + +void CodeBlock::dumpBytecode( + PrintStream& out, ExecState* exec, const Instruction* begin, const Instruction*& it, + const StubInfoMap& stubInfos, const CallLinkInfoMap& callLinkInfos) { int location = it - begin; bool hasPrintedProfiling = false; - switch (exec->interpreter()->getOpcodeID(it->u.opcode)) { + OpcodeID opcode = exec->interpreter()->getOpcodeID(it->u.opcode); + switch (opcode) { case op_enter: { - out.printf("[%4d] enter", location); + printLocationAndOp(out, exec, location, it, "enter"); break; } - case op_create_activation: { + case op_get_scope: { int r0 = (++it)->u.operand; - out.printf("[%4d] create_activation %s", location, registerName(exec, r0).data()); + printLocationOpAndRegisterOperand(out, exec, location, it, "get_scope", r0); break; } - case op_create_arguments: { + case op_create_direct_arguments: { int r0 = (++it)->u.operand; - out.printf("[%4d] create_arguments\t %s", location, registerName(exec, r0).data()); + printLocationAndOp(out, exec, location, it, "create_direct_arguments"); + out.printf("%s", registerName(r0).data()); break; } - case op_init_lazy_reg: { + case op_create_scoped_arguments: { int r0 = (++it)->u.operand; - out.printf("[%4d] init_lazy_reg\t %s", location, registerName(exec, r0).data()); + int r1 = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "create_scoped_arguments"); + out.printf("%s, %s", registerName(r0).data(), registerName(r1).data()); break; } - case op_get_callee: { + case op_create_out_of_band_arguments: { int r0 = (++it)->u.operand; - out.printf("[%4d] op_get_callee %s\n", location, registerName(exec, r0).data()); - ++it; + printLocationAndOp(out, exec, location, it, "create_out_of_band_arguments"); + out.printf("%s", registerName(r0).data()); + break; + } + case op_copy_rest: { + int r0 = (++it)->u.operand; + int r1 = (++it)->u.operand; + unsigned argumentOffset = (++it)->u.unsignedValue; + printLocationAndOp(out, exec, location, it, "copy_rest"); + out.printf("%s, %s, ", registerName(r0).data(), registerName(r1).data()); + out.printf("ArgumentsOffset: %u", argumentOffset); + break; + } + case op_get_rest_length: { + int r0 = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "get_rest_length"); + out.printf("%s, ", registerName(r0).data()); + unsigned argumentOffset = (++it)->u.unsignedValue; + out.printf("ArgumentsOffset: %u", argumentOffset); break; } case op_create_this: { int r0 = (++it)->u.operand; int r1 = (++it)->u.operand; unsigned inferredInlineCapacity = (++it)->u.operand; - out.printf("[%4d] create_this %s, %s, %u", location, registerName(exec, r0).data(), registerName(exec, r1).data(), inferredInlineCapacity); + unsigned cachedFunction = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "create_this"); + out.printf("%s, %s, %u, %u", registerName(r0).data(), registerName(r1).data(), inferredInlineCapacity, cachedFunction); + break; + } + case op_to_this: { + int r0 = (++it)->u.operand; + printLocationOpAndRegisterOperand(out, exec, location, it, "to_this", r0); + Structure* structure = (++it)->u.structure.get(); + if (structure) + out.print(", cache(struct = ", RawPointer(structure), ")"); + out.print(", ", (++it)->u.toThisStatus); break; } - case op_convert_this: { + case op_check_tdz: { int r0 = (++it)->u.operand; - out.printf("[%4d] convert_this\t %s", location, registerName(exec, r0).data()); - ++it; // Skip value profile. + printLocationOpAndRegisterOperand(out, exec, location, it, "op_check_tdz", r0); break; } case op_new_object: { int r0 = (++it)->u.operand; unsigned inferredInlineCapacity = (++it)->u.operand; - out.printf("[%4d] new_object\t %s, %u", location, registerName(exec, r0).data(), inferredInlineCapacity); + printLocationAndOp(out, exec, location, it, "new_object"); + out.printf("%s, %u", registerName(r0).data(), inferredInlineCapacity); ++it; // Skip object allocation profile. break; } @@ -759,14 +853,16 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio int dst = (++it)->u.operand; int argv = (++it)->u.operand; int argc = (++it)->u.operand; - out.printf("[%4d] new_array\t %s, %s, %d", location, registerName(exec, dst).data(), registerName(exec, argv).data(), argc); + printLocationAndOp(out, exec, location, it, "new_array"); + out.printf("%s, %s, %d", registerName(dst).data(), registerName(argv).data(), argc); ++it; // Skip array allocation profile. break; } case op_new_array_with_size: { int dst = (++it)->u.operand; int length = (++it)->u.operand; - out.printf("[%4d] new_array_with_size\t %s, %s", location, registerName(exec, dst).data(), registerName(exec, length).data()); + printLocationAndOp(out, exec, location, it, "new_array_with_size"); + out.printf("%s, %s", registerName(dst).data(), registerName(length).data()); ++it; // Skip array allocation profile. break; } @@ -774,14 +870,16 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio int dst = (++it)->u.operand; int argv = (++it)->u.operand; int argc = (++it)->u.operand; - out.printf("[%4d] new_array_buffer\t %s, %d, %d", location, registerName(exec, dst).data(), argv, argc); + printLocationAndOp(out, exec, location, it, "new_array_buffer"); + out.printf("%s, %d, %d", registerName(dst).data(), argv, argc); ++it; // Skip array allocation profile. break; } case op_new_regexp: { int r0 = (++it)->u.operand; int re0 = (++it)->u.operand; - out.printf("[%4d] new_regexp\t %s, ", location, registerName(exec, r0).data()); + printLocationAndOp(out, exec, location, it, "new_regexp"); + out.printf("%s, ", registerName(r0).data()); if (r0 >=0 && r0 < (int)m_unlinkedCode->numberOfRegExps()) out.printf("%s", regexpName(re0, regexp(re0)).data()); else @@ -791,7 +889,24 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio case op_mov: { int r0 = (++it)->u.operand; int r1 = (++it)->u.operand; - out.printf("[%4d] mov\t\t %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data()); + printLocationAndOp(out, exec, location, it, "mov"); + out.printf("%s, %s", registerName(r0).data(), registerName(r1).data()); + break; + } + case op_profile_type: { + int r0 = (++it)->u.operand; + ++it; + ++it; + ++it; + ++it; + printLocationAndOp(out, exec, location, it, "op_profile_type"); + out.printf("%s", registerName(r0).data()); + break; + } + case op_profile_control_flow: { + BasicBlockLocation* basicBlockLocation = (++it)->u.basicBlockLocation; + printLocationAndOp(out, exec, location, it, "profile_control_flow"); + out.printf("[%d, %d]", basicBlockLocation->startOffset(), basicBlockLocation->endOffset()); break; } case op_not: { @@ -840,18 +955,22 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio } case op_inc: { int r0 = (++it)->u.operand; - out.printf("[%4d] pre_inc\t\t %s", location, registerName(exec, r0).data()); + printLocationOpAndRegisterOperand(out, exec, location, it, "inc", r0); break; } case op_dec: { int r0 = (++it)->u.operand; - out.printf("[%4d] pre_dec\t\t %s", location, registerName(exec, r0).data()); + printLocationOpAndRegisterOperand(out, exec, location, it, "dec", r0); break; } case op_to_number: { printUnaryOp(out, exec, location, it, "to_number"); break; } + case op_to_string: { + printUnaryOp(out, exec, location, it, "to_string"); + break; + } case op_negate: { printUnaryOp(out, exec, location, it, "negate"); break; @@ -907,19 +1026,33 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio ++it; break; } - case op_check_has_instance: { + case op_overrides_has_instance: { int r0 = (++it)->u.operand; int r1 = (++it)->u.operand; int r2 = (++it)->u.operand; - int offset = (++it)->u.operand; - out.printf("[%4d] check_has_instance\t\t %s, %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data(), offset, location + offset); + printLocationAndOp(out, exec, location, it, "overrides_has_instance"); + out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data()); break; } case op_instanceof: { int r0 = (++it)->u.operand; int r1 = (++it)->u.operand; int r2 = (++it)->u.operand; - out.printf("[%4d] instanceof\t\t %s, %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data()); + printLocationAndOp(out, exec, location, it, "instanceof"); + out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data()); + break; + } + case op_instanceof_custom: { + int r0 = (++it)->u.operand; + int r1 = (++it)->u.operand; + int r2 = (++it)->u.operand; + int r3 = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "instanceof_custom"); + out.printf("%s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data()); + break; + } + case op_unsigned: { + printUnaryOp(out, exec, location, it, "unsigned"); break; } case op_typeof: { @@ -946,6 +1079,10 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio printUnaryOp(out, exec, location, it, "is_object"); break; } + case op_is_object_or_null: { + printUnaryOp(out, exec, location, it, "is_object_or_null"); + break; + } case op_is_function: { printUnaryOp(out, exec, location, it, "is_function"); break; @@ -954,208 +1091,97 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio printBinaryOp(out, exec, location, it, "in"); break; } - case op_put_to_base_variable: - case op_put_to_base: { - int base = (++it)->u.operand; - int id0 = (++it)->u.operand; - int value = (++it)->u.operand; - int resolveInfo = (++it)->u.operand; - out.printf("[%4d] put_to_base\t %s, %s, %s, %d", location, registerName(exec, base).data(), idName(id0, m_identifiers[id0]).data(), registerName(exec, value).data(), resolveInfo); - break; - } - case op_resolve: - case op_resolve_global_property: - case op_resolve_global_var: - case op_resolve_scoped_var: - case op_resolve_scoped_var_on_top_scope: - case op_resolve_scoped_var_with_top_scope_check: { - int r0 = (++it)->u.operand; - int id0 = (++it)->u.operand; - int resolveInfo = (++it)->u.operand; - out.printf("[%4d] resolve\t\t %s, %s, %d", location, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data(), resolveInfo); - dumpValueProfiling(out, it, hasPrintedProfiling); - break; - } - case op_get_scoped_var: { - int r0 = (++it)->u.operand; - int index = (++it)->u.operand; - int skipLevels = (++it)->u.operand; - out.printf("[%4d] get_scoped_var\t %s, %d, %d", location, registerName(exec, r0).data(), index, skipLevels); + case op_get_by_id: + case op_get_array_length: { + printGetByIdOp(out, exec, location, it); + printGetByIdCacheStatus(out, exec, location, stubInfos); dumpValueProfiling(out, it, hasPrintedProfiling); break; } - case op_put_scoped_var: { - int index = (++it)->u.operand; - int skipLevels = (++it)->u.operand; - int r0 = (++it)->u.operand; - out.printf("[%4d] put_scoped_var\t %d, %d, %s", location, index, skipLevels, registerName(exec, r0).data()); - break; - } - case op_init_global_const_nop: { - out.printf("[%4d] init_global_const_nop\t", location); - it++; - it++; - it++; - it++; - break; - } - case op_init_global_const: { - WriteBarrier<Unknown>* registerPointer = (++it)->u.registerPointer; - int r0 = (++it)->u.operand; - out.printf("[%4d] init_global_const\t g%d(%p), %s", location, m_globalObject->findRegisterIndex(registerPointer), registerPointer, registerName(exec, r0).data()); - it++; - it++; + case op_put_by_id: { + printPutByIdOp(out, exec, location, it, "put_by_id"); + printPutByIdCacheStatus(out, location, stubInfos); break; } - case op_init_global_const_check: { - WriteBarrier<Unknown>* registerPointer = (++it)->u.registerPointer; + case op_put_getter_by_id: { int r0 = (++it)->u.operand; - out.printf("[%4d] init_global_const_check\t g%d(%p), %s", location, m_globalObject->findRegisterIndex(registerPointer), registerPointer, registerName(exec, r0).data()); - it++; - it++; + int id0 = (++it)->u.operand; + int n0 = (++it)->u.operand; + int r1 = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "put_getter_by_id"); + out.printf("%s, %s, %d, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), n0, registerName(r1).data()); break; } - case op_resolve_base_to_global: - case op_resolve_base_to_global_dynamic: - case op_resolve_base_to_scope: - case op_resolve_base_to_scope_with_top_scope_check: - case op_resolve_base: { + case op_put_setter_by_id: { int r0 = (++it)->u.operand; int id0 = (++it)->u.operand; - int isStrict = (++it)->u.operand; - int resolveInfo = (++it)->u.operand; - int putToBaseInfo = (++it)->u.operand; - out.printf("[%4d] resolve_base%s\t %s, %s, %d, %d", location, isStrict ? "_strict" : "", registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data(), resolveInfo, putToBaseInfo); - dumpValueProfiling(out, it, hasPrintedProfiling); + int n0 = (++it)->u.operand; + int r1 = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "put_setter_by_id"); + out.printf("%s, %s, %d, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), n0, registerName(r1).data()); break; } - case op_resolve_with_base: { + case op_put_getter_setter_by_id: { int r0 = (++it)->u.operand; - int r1 = (++it)->u.operand; int id0 = (++it)->u.operand; - int resolveInfo = (++it)->u.operand; - int putToBaseInfo = (++it)->u.operand; - out.printf("[%4d] resolve_with_base %s, %s, %s, %d, %d", location, registerName(exec, r0).data(), registerName(exec, r1).data(), idName(id0, m_identifiers[id0]).data(), resolveInfo, putToBaseInfo); - dumpValueProfiling(out, it, hasPrintedProfiling); + int n0 = (++it)->u.operand; + int r1 = (++it)->u.operand; + int r2 = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "put_getter_setter_by_id"); + out.printf("%s, %s, %d, %s, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), n0, registerName(r1).data(), registerName(r2).data()); break; } - case op_resolve_with_this: { + case op_put_getter_by_val: { int r0 = (++it)->u.operand; int r1 = (++it)->u.operand; - int id0 = (++it)->u.operand; - int resolveInfo = (++it)->u.operand; - out.printf("[%4d] resolve_with_this %s, %s, %s, %d", location, registerName(exec, r0).data(), registerName(exec, r1).data(), idName(id0, m_identifiers[id0]).data(), resolveInfo); - dumpValueProfiling(out, it, hasPrintedProfiling); - break; - } - case op_get_by_id: - case op_get_by_id_out_of_line: - case op_get_by_id_self: - case op_get_by_id_proto: - case op_get_by_id_chain: - case op_get_by_id_getter_self: - case op_get_by_id_getter_proto: - case op_get_by_id_getter_chain: - case op_get_by_id_custom_self: - case op_get_by_id_custom_proto: - case op_get_by_id_custom_chain: - case op_get_by_id_generic: - case op_get_array_length: - case op_get_string_length: { - printGetByIdOp(out, exec, location, it); - printGetByIdCacheStatus(out, exec, location); - dumpValueProfiling(out, it, hasPrintedProfiling); - break; - } - case op_get_arguments_length: { - printUnaryOp(out, exec, location, it, "get_arguments_length"); - it++; - break; - } - case op_put_by_id: { - printPutByIdOp(out, exec, location, it, "put_by_id"); - break; - } - case op_put_by_id_out_of_line: { - printPutByIdOp(out, exec, location, it, "put_by_id_out_of_line"); - break; - } - case op_put_by_id_replace: { - printPutByIdOp(out, exec, location, it, "put_by_id_replace"); - break; - } - case op_put_by_id_transition: { - printPutByIdOp(out, exec, location, it, "put_by_id_transition"); - break; - } - case op_put_by_id_transition_direct: { - printPutByIdOp(out, exec, location, it, "put_by_id_transition_direct"); - break; - } - case op_put_by_id_transition_direct_out_of_line: { - printPutByIdOp(out, exec, location, it, "put_by_id_transition_direct_out_of_line"); - break; - } - case op_put_by_id_transition_normal: { - printPutByIdOp(out, exec, location, it, "put_by_id_transition_normal"); - break; - } - case op_put_by_id_transition_normal_out_of_line: { - printPutByIdOp(out, exec, location, it, "put_by_id_transition_normal_out_of_line"); - break; - } - case op_put_by_id_generic: { - printPutByIdOp(out, exec, location, it, "put_by_id_generic"); + int n0 = (++it)->u.operand; + int r2 = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "put_getter_by_val"); + out.printf("%s, %s, %d, %s", registerName(r0).data(), registerName(r1).data(), n0, registerName(r2).data()); break; } - case op_put_getter_setter: { + case op_put_setter_by_val: { int r0 = (++it)->u.operand; - int id0 = (++it)->u.operand; int r1 = (++it)->u.operand; + int n0 = (++it)->u.operand; int r2 = (++it)->u.operand; - out.printf("[%4d] put_getter_setter\t %s, %s, %s, %s", location, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data(), registerName(exec, r1).data(), registerName(exec, r2).data()); + printLocationAndOp(out, exec, location, it, "put_setter_by_val"); + out.printf("%s, %s, %d, %s", registerName(r0).data(), registerName(r1).data(), n0, registerName(r2).data()); break; } case op_del_by_id: { int r0 = (++it)->u.operand; int r1 = (++it)->u.operand; int id0 = (++it)->u.operand; - out.printf("[%4d] del_by_id\t %s, %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data(), idName(id0, m_identifiers[id0]).data()); + printLocationAndOp(out, exec, location, it, "del_by_id"); + out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data()); break; } case op_get_by_val: { int r0 = (++it)->u.operand; int r1 = (++it)->u.operand; int r2 = (++it)->u.operand; - out.printf("[%4d] get_by_val\t %s, %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data()); + printLocationAndOp(out, exec, location, it, "get_by_val"); + out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data()); dumpArrayProfiling(out, it, hasPrintedProfiling); dumpValueProfiling(out, it, hasPrintedProfiling); break; } - case op_get_argument_by_val: { - int r0 = (++it)->u.operand; - int r1 = (++it)->u.operand; - int r2 = (++it)->u.operand; - out.printf("[%4d] get_argument_by_val\t %s, %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data()); - ++it; - dumpValueProfiling(out, it, hasPrintedProfiling); - break; - } - case op_get_by_pname: { + case op_put_by_val: { int r0 = (++it)->u.operand; int r1 = (++it)->u.operand; int r2 = (++it)->u.operand; - int r3 = (++it)->u.operand; - int r4 = (++it)->u.operand; - int r5 = (++it)->u.operand; - out.printf("[%4d] get_by_pname\t %s, %s, %s, %s, %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data(), registerName(exec, r3).data(), registerName(exec, r4).data(), registerName(exec, r5).data()); + printLocationAndOp(out, exec, location, it, "put_by_val"); + out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data()); + dumpArrayProfiling(out, it, hasPrintedProfiling); break; } - case op_put_by_val: { + case op_put_by_val_direct: { int r0 = (++it)->u.operand; int r1 = (++it)->u.operand; int r2 = (++it)->u.operand; - out.printf("[%4d] put_by_val\t %s, %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data()); + printLocationAndOp(out, exec, location, it, "put_by_val_direct"); + out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data()); dumpArrayProfiling(out, it, hasPrintedProfiling); break; } @@ -1163,19 +1189,22 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio int r0 = (++it)->u.operand; int r1 = (++it)->u.operand; int r2 = (++it)->u.operand; - out.printf("[%4d] del_by_val\t %s, %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data()); + printLocationAndOp(out, exec, location, it, "del_by_val"); + out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data()); break; } case op_put_by_index: { int r0 = (++it)->u.operand; unsigned n0 = (++it)->u.operand; int r1 = (++it)->u.operand; - out.printf("[%4d] put_by_index\t %s, %u, %s", location, registerName(exec, r0).data(), n0, registerName(exec, r1).data()); + printLocationAndOp(out, exec, location, it, "put_by_index"); + out.printf("%s, %u, %s", registerName(r0).data(), n0, registerName(r1).data()); break; } case op_jmp: { int offset = (++it)->u.operand; - out.printf("[%4d] jmp\t\t %d(->%d)", location, offset, location + offset); + printLocationAndOp(out, exec, location, it, "jmp"); + out.printf("%d(->%d)", offset, location + offset); break; } case op_jtrue: { @@ -1198,258 +1227,452 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio int r0 = (++it)->u.operand; Special::Pointer pointer = (++it)->u.specialPointer; int offset = (++it)->u.operand; - out.printf("[%4d] jneq_ptr\t\t %s, %d (%p), %d(->%d)", location, registerName(exec, r0).data(), pointer, m_globalObject->actualPointerFor(pointer), offset, location + offset); + printLocationAndOp(out, exec, location, it, "jneq_ptr"); + out.printf("%s, %d (%p), %d(->%d)", registerName(r0).data(), pointer, m_globalObject->actualPointerFor(pointer), offset, location + offset); break; } case op_jless: { int r0 = (++it)->u.operand; int r1 = (++it)->u.operand; int offset = (++it)->u.operand; - out.printf("[%4d] jless\t\t %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset); + printLocationAndOp(out, exec, location, it, "jless"); + out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset); break; } case op_jlesseq: { int r0 = (++it)->u.operand; int r1 = (++it)->u.operand; int offset = (++it)->u.operand; - out.printf("[%4d] jlesseq\t\t %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset); + printLocationAndOp(out, exec, location, it, "jlesseq"); + out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset); break; } case op_jgreater: { int r0 = (++it)->u.operand; int r1 = (++it)->u.operand; int offset = (++it)->u.operand; - out.printf("[%4d] jgreater\t\t %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset); + printLocationAndOp(out, exec, location, it, "jgreater"); + out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset); break; } case op_jgreatereq: { int r0 = (++it)->u.operand; int r1 = (++it)->u.operand; int offset = (++it)->u.operand; - out.printf("[%4d] jgreatereq\t\t %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset); + printLocationAndOp(out, exec, location, it, "jgreatereq"); + out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset); break; } case op_jnless: { int r0 = (++it)->u.operand; int r1 = (++it)->u.operand; int offset = (++it)->u.operand; - out.printf("[%4d] jnless\t\t %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset); + printLocationAndOp(out, exec, location, it, "jnless"); + out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset); break; } case op_jnlesseq: { int r0 = (++it)->u.operand; int r1 = (++it)->u.operand; int offset = (++it)->u.operand; - out.printf("[%4d] jnlesseq\t\t %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset); + printLocationAndOp(out, exec, location, it, "jnlesseq"); + out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset); break; } case op_jngreater: { int r0 = (++it)->u.operand; int r1 = (++it)->u.operand; int offset = (++it)->u.operand; - out.printf("[%4d] jngreater\t\t %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset); + printLocationAndOp(out, exec, location, it, "jngreater"); + out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset); break; } case op_jngreatereq: { int r0 = (++it)->u.operand; int r1 = (++it)->u.operand; int offset = (++it)->u.operand; - out.printf("[%4d] jngreatereq\t\t %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset); + printLocationAndOp(out, exec, location, it, "jngreatereq"); + out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset); break; } case op_loop_hint: { - out.printf("[%4d] loop_hint", location); + printLocationAndOp(out, exec, location, it, "loop_hint"); + break; + } + case op_watchdog: { + printLocationAndOp(out, exec, location, it, "watchdog"); break; } case op_switch_imm: { int tableIndex = (++it)->u.operand; int defaultTarget = (++it)->u.operand; int scrutineeRegister = (++it)->u.operand; - out.printf("[%4d] switch_imm\t %d, %d(->%d), %s", location, tableIndex, defaultTarget, location + defaultTarget, registerName(exec, scrutineeRegister).data()); + printLocationAndOp(out, exec, location, it, "switch_imm"); + out.printf("%d, %d(->%d), %s", tableIndex, defaultTarget, location + defaultTarget, registerName(scrutineeRegister).data()); break; } case op_switch_char: { int tableIndex = (++it)->u.operand; int defaultTarget = (++it)->u.operand; int scrutineeRegister = (++it)->u.operand; - out.printf("[%4d] switch_char\t %d, %d(->%d), %s", location, tableIndex, defaultTarget, location + defaultTarget, registerName(exec, scrutineeRegister).data()); + printLocationAndOp(out, exec, location, it, "switch_char"); + out.printf("%d, %d(->%d), %s", tableIndex, defaultTarget, location + defaultTarget, registerName(scrutineeRegister).data()); break; } case op_switch_string: { int tableIndex = (++it)->u.operand; int defaultTarget = (++it)->u.operand; int scrutineeRegister = (++it)->u.operand; - out.printf("[%4d] switch_string\t %d, %d(->%d), %s", location, tableIndex, defaultTarget, location + defaultTarget, registerName(exec, scrutineeRegister).data()); + printLocationAndOp(out, exec, location, it, "switch_string"); + out.printf("%d, %d(->%d), %s", tableIndex, defaultTarget, location + defaultTarget, registerName(scrutineeRegister).data()); break; } case op_new_func: { int r0 = (++it)->u.operand; + int r1 = (++it)->u.operand; int f0 = (++it)->u.operand; - int shouldCheck = (++it)->u.operand; - out.printf("[%4d] new_func\t\t %s, f%d, %s", location, registerName(exec, r0).data(), f0, shouldCheck ? "<Checked>" : "<Unchecked>"); + printLocationAndOp(out, exec, location, it, "new_func"); + out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0); break; } - case op_new_func_exp: { + case op_new_generator_func: { int r0 = (++it)->u.operand; + int r1 = (++it)->u.operand; int f0 = (++it)->u.operand; - out.printf("[%4d] new_func_exp\t %s, f%d", location, registerName(exec, r0).data(), f0); + printLocationAndOp(out, exec, location, it, "new_generator_func"); + out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0); break; } - case op_call: { - printCallOp(out, exec, location, it, "call", DumpCaches); + case op_new_arrow_func_exp: { + int r0 = (++it)->u.operand; + int r1 = (++it)->u.operand; + int f0 = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "op_new_arrow_func_exp"); + out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0); break; } - case op_call_eval: { - printCallOp(out, exec, location, it, "call_eval", DontDumpCaches); + case op_new_func_exp: { + int r0 = (++it)->u.operand; + int r1 = (++it)->u.operand; + int f0 = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "new_func_exp"); + out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0); break; } - case op_call_varargs: { - int callee = (++it)->u.operand; - int thisValue = (++it)->u.operand; - int arguments = (++it)->u.operand; - int firstFreeRegister = (++it)->u.operand; - out.printf("[%4d] call_varargs\t %s, %s, %s, %d", location, registerName(exec, callee).data(), registerName(exec, thisValue).data(), registerName(exec, arguments).data(), firstFreeRegister); + case op_new_generator_func_exp: { + int r0 = (++it)->u.operand; + int r1 = (++it)->u.operand; + int f0 = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "new_generator_func_exp"); + out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0); break; } - case op_tear_off_activation: { - int r0 = (++it)->u.operand; - out.printf("[%4d] tear_off_activation\t %s", location, registerName(exec, r0).data()); + case op_call: { + printCallOp(out, exec, location, it, "call", DumpCaches, hasPrintedProfiling, callLinkInfos); break; } - case op_tear_off_arguments: { - int r0 = (++it)->u.operand; - int r1 = (++it)->u.operand; - out.printf("[%4d] tear_off_arguments %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data()); + case op_tail_call: { + printCallOp(out, exec, location, it, "tail_call", DumpCaches, hasPrintedProfiling, callLinkInfos); break; } - case op_ret: { - int r0 = (++it)->u.operand; - out.printf("[%4d] ret\t\t %s", location, registerName(exec, r0).data()); + case op_call_eval: { + printCallOp(out, exec, location, it, "call_eval", DontDumpCaches, hasPrintedProfiling, callLinkInfos); break; } - case op_call_put_result: { - int r0 = (++it)->u.operand; - out.printf("[%4d] call_put_result\t\t %s", location, registerName(exec, r0).data()); + + case op_construct_varargs: + case op_call_varargs: + case op_tail_call_varargs: { + int result = (++it)->u.operand; + int callee = (++it)->u.operand; + int thisValue = (++it)->u.operand; + int arguments = (++it)->u.operand; + int firstFreeRegister = (++it)->u.operand; + int varArgOffset = (++it)->u.operand; + ++it; + printLocationAndOp(out, exec, location, it, opcode == op_call_varargs ? "call_varargs" : opcode == op_construct_varargs ? "construct_varargs" : "tail_call_varargs"); + out.printf("%s, %s, %s, %s, %d, %d", registerName(result).data(), registerName(callee).data(), registerName(thisValue).data(), registerName(arguments).data(), firstFreeRegister, varArgOffset); dumpValueProfiling(out, it, hasPrintedProfiling); break; } - case op_ret_object_or_this: { + + case op_ret: { int r0 = (++it)->u.operand; - int r1 = (++it)->u.operand; - out.printf("[%4d] constructor_ret\t\t %s %s", location, registerName(exec, r0).data(), registerName(exec, r1).data()); + printLocationOpAndRegisterOperand(out, exec, location, it, "ret", r0); break; } case op_construct: { - printCallOp(out, exec, location, it, "construct", DumpCaches); + printCallOp(out, exec, location, it, "construct", DumpCaches, hasPrintedProfiling, callLinkInfos); break; } case op_strcat: { int r0 = (++it)->u.operand; int r1 = (++it)->u.operand; int count = (++it)->u.operand; - out.printf("[%4d] strcat\t\t %s, %s, %d", location, registerName(exec, r0).data(), registerName(exec, r1).data(), count); + printLocationAndOp(out, exec, location, it, "strcat"); + out.printf("%s, %s, %d", registerName(r0).data(), registerName(r1).data(), count); break; } case op_to_primitive: { int r0 = (++it)->u.operand; int r1 = (++it)->u.operand; - out.printf("[%4d] to_primitive\t %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data()); + printLocationAndOp(out, exec, location, it, "to_primitive"); + out.printf("%s, %s", registerName(r0).data(), registerName(r1).data()); break; } - case op_get_pnames: { - int r0 = it[1].u.operand; - int r1 = it[2].u.operand; - int r2 = it[3].u.operand; - int r3 = it[4].u.operand; - int offset = it[5].u.operand; - out.printf("[%4d] get_pnames\t %s, %s, %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data(), registerName(exec, r3).data(), offset, location + offset); - it += OPCODE_LENGTH(op_get_pnames) - 1; + case op_get_enumerable_length: { + int dst = it[1].u.operand; + int base = it[2].u.operand; + printLocationAndOp(out, exec, location, it, "op_get_enumerable_length"); + out.printf("%s, %s", registerName(dst).data(), registerName(base).data()); + it += OPCODE_LENGTH(op_get_enumerable_length) - 1; break; } - case op_next_pname: { - int dest = it[1].u.operand; + case op_has_indexed_property: { + int dst = it[1].u.operand; int base = it[2].u.operand; - int i = it[3].u.operand; - int size = it[4].u.operand; - int iter = it[5].u.operand; - int offset = it[6].u.operand; - out.printf("[%4d] next_pname\t %s, %s, %s, %s, %s, %d(->%d)", location, registerName(exec, dest).data(), registerName(exec, base).data(), registerName(exec, i).data(), registerName(exec, size).data(), registerName(exec, iter).data(), offset, location + offset); - it += OPCODE_LENGTH(op_next_pname) - 1; + int propertyName = it[3].u.operand; + ArrayProfile* arrayProfile = it[4].u.arrayProfile; + printLocationAndOp(out, exec, location, it, "op_has_indexed_property"); + out.printf("%s, %s, %s, %p", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data(), arrayProfile); + it += OPCODE_LENGTH(op_has_indexed_property) - 1; + break; + } + case op_has_structure_property: { + int dst = it[1].u.operand; + int base = it[2].u.operand; + int propertyName = it[3].u.operand; + int enumerator = it[4].u.operand; + printLocationAndOp(out, exec, location, it, "op_has_structure_property"); + out.printf("%s, %s, %s, %s", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data(), registerName(enumerator).data()); + it += OPCODE_LENGTH(op_has_structure_property) - 1; + break; + } + case op_has_generic_property: { + int dst = it[1].u.operand; + int base = it[2].u.operand; + int propertyName = it[3].u.operand; + printLocationAndOp(out, exec, location, it, "op_has_generic_property"); + out.printf("%s, %s, %s", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data()); + it += OPCODE_LENGTH(op_has_generic_property) - 1; + break; + } + case op_get_direct_pname: { + int dst = it[1].u.operand; + int base = it[2].u.operand; + int propertyName = it[3].u.operand; + int index = it[4].u.operand; + int enumerator = it[5].u.operand; + ValueProfile* profile = it[6].u.profile; + printLocationAndOp(out, exec, location, it, "op_get_direct_pname"); + out.printf("%s, %s, %s, %s, %s, %p", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data(), registerName(index).data(), registerName(enumerator).data(), profile); + it += OPCODE_LENGTH(op_get_direct_pname) - 1; + break; + + } + case op_get_property_enumerator: { + int dst = it[1].u.operand; + int base = it[2].u.operand; + printLocationAndOp(out, exec, location, it, "op_get_property_enumerator"); + out.printf("%s, %s", registerName(dst).data(), registerName(base).data()); + it += OPCODE_LENGTH(op_get_property_enumerator) - 1; + break; + } + case op_enumerator_structure_pname: { + int dst = it[1].u.operand; + int enumerator = it[2].u.operand; + int index = it[3].u.operand; + printLocationAndOp(out, exec, location, it, "op_enumerator_structure_pname"); + out.printf("%s, %s, %s", registerName(dst).data(), registerName(enumerator).data(), registerName(index).data()); + it += OPCODE_LENGTH(op_enumerator_structure_pname) - 1; + break; + } + case op_enumerator_generic_pname: { + int dst = it[1].u.operand; + int enumerator = it[2].u.operand; + int index = it[3].u.operand; + printLocationAndOp(out, exec, location, it, "op_enumerator_generic_pname"); + out.printf("%s, %s, %s", registerName(dst).data(), registerName(enumerator).data(), registerName(index).data()); + it += OPCODE_LENGTH(op_enumerator_generic_pname) - 1; + break; + } + case op_to_index_string: { + int dst = it[1].u.operand; + int index = it[2].u.operand; + printLocationAndOp(out, exec, location, it, "op_to_index_string"); + out.printf("%s, %s", registerName(dst).data(), registerName(index).data()); + it += OPCODE_LENGTH(op_to_index_string) - 1; break; } case op_push_with_scope: { - int r0 = (++it)->u.operand; - out.printf("[%4d] push_with_scope\t %s", location, registerName(exec, r0).data()); + int dst = (++it)->u.operand; + int newScope = (++it)->u.operand; + int currentScope = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "push_with_scope"); + out.printf("%s, %s, %s", registerName(dst).data(), registerName(newScope).data(), registerName(currentScope).data()); break; } - case op_pop_scope: { - out.printf("[%4d] pop_scope", location); + case op_get_parent_scope: { + int dst = (++it)->u.operand; + int parentScope = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "get_parent_scope"); + out.printf("%s, %s", registerName(dst).data(), registerName(parentScope).data()); break; } - case op_push_name_scope: { - int id0 = (++it)->u.operand; - int r1 = (++it)->u.operand; - unsigned attributes = (++it)->u.operand; - out.printf("[%4d] push_name_scope \t%s, %s, %u", location, idName(id0, m_identifiers[id0]).data(), registerName(exec, r1).data(), attributes); + case op_create_lexical_environment: { + int dst = (++it)->u.operand; + int scope = (++it)->u.operand; + int symbolTable = (++it)->u.operand; + int initialValue = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "create_lexical_environment"); + out.printf("%s, %s, %s, %s", + registerName(dst).data(), registerName(scope).data(), registerName(symbolTable).data(), registerName(initialValue).data()); break; } case op_catch: { int r0 = (++it)->u.operand; - out.printf("[%4d] catch\t\t %s", location, registerName(exec, r0).data()); + int r1 = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "catch"); + out.printf("%s, %s", registerName(r0).data(), registerName(r1).data()); break; } case op_throw: { int r0 = (++it)->u.operand; - out.printf("[%4d] throw\t\t %s", location, registerName(exec, r0).data()); + printLocationOpAndRegisterOperand(out, exec, location, it, "throw", r0); break; } case op_throw_static_error: { int k0 = (++it)->u.operand; int k1 = (++it)->u.operand; - out.printf("[%4d] throw_static_error\t %s, %s", location, constantName(exec, k0, getConstant(k0)).data(), k1 ? "true" : "false"); + printLocationAndOp(out, exec, location, it, "throw_static_error"); + out.printf("%s, %s", constantName(k0).data(), k1 ? "true" : "false"); break; } case op_debug: { int debugHookID = (++it)->u.operand; - int firstLine = (++it)->u.operand; - int lastLine = (++it)->u.operand; - int column = (++it)->u.operand; - out.printf("[%4d] debug\t\t %s, %d, %d, %d", location, debugHookName(debugHookID), firstLine, lastLine, column); + int hasBreakpointFlag = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "debug"); + out.printf("%s, %d", debugHookName(debugHookID), hasBreakpointFlag); + break; + } + case op_save: { + int generator = (++it)->u.operand; + unsigned liveCalleeLocalsIndex = (++it)->u.unsignedValue; + int offset = (++it)->u.operand; + const FastBitVector& liveness = m_rareData->m_liveCalleeLocalsAtYield[liveCalleeLocalsIndex]; + printLocationAndOp(out, exec, location, it, "save"); + out.printf("%s, ", registerName(generator).data()); + liveness.dump(out); + out.printf("(@live%1u), %d(->%d)", liveCalleeLocalsIndex, offset, location + offset); + break; + } + case op_resume: { + int generator = (++it)->u.operand; + unsigned liveCalleeLocalsIndex = (++it)->u.unsignedValue; + const FastBitVector& liveness = m_rareData->m_liveCalleeLocalsAtYield[liveCalleeLocalsIndex]; + printLocationAndOp(out, exec, location, it, "resume"); + out.printf("%s, ", registerName(generator).data()); + liveness.dump(out); + out.printf("(@live%1u)", liveCalleeLocalsIndex); + break; + } + case op_assert: { + int condition = (++it)->u.operand; + int line = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "assert"); + out.printf("%s, %d", registerName(condition).data(), line); break; } case op_profile_will_call: { int function = (++it)->u.operand; - out.printf("[%4d] profile_will_call %s", location, registerName(exec, function).data()); + printLocationOpAndRegisterOperand(out, exec, location, it, "profile_will_call", function); break; } case op_profile_did_call: { int function = (++it)->u.operand; - out.printf("[%4d] profile_did_call\t %s", location, registerName(exec, function).data()); + printLocationOpAndRegisterOperand(out, exec, location, it, "profile_did_call", function); break; } case op_end: { int r0 = (++it)->u.operand; - out.printf("[%4d] end\t\t %s", location, registerName(exec, r0).data()); + printLocationOpAndRegisterOperand(out, exec, location, it, "end", r0); + break; + } + case op_resolve_scope: { + int r0 = (++it)->u.operand; + int scope = (++it)->u.operand; + int id0 = (++it)->u.operand; + ResolveType resolveType = static_cast<ResolveType>((++it)->u.operand); + int depth = (++it)->u.operand; + void* pointer = (++it)->u.pointer; + printLocationAndOp(out, exec, location, it, "resolve_scope"); + out.printf("%s, %s, %s, <%s>, %d, %p", registerName(r0).data(), registerName(scope).data(), idName(id0, identifier(id0)).data(), resolveTypeName(resolveType), depth, pointer); + break; + } + case op_get_from_scope: { + int r0 = (++it)->u.operand; + int r1 = (++it)->u.operand; + int id0 = (++it)->u.operand; + GetPutInfo getPutInfo = GetPutInfo((++it)->u.operand); + ++it; // Structure + int operand = (++it)->u.operand; // Operand + printLocationAndOp(out, exec, location, it, "get_from_scope"); + out.print(registerName(r0), ", ", registerName(r1)); + if (static_cast<unsigned>(id0) == UINT_MAX) + out.print(", anonymous"); + else + out.print(", ", idName(id0, identifier(id0))); + out.print(", ", getPutInfo.operand(), "<", resolveModeName(getPutInfo.resolveMode()), "|", resolveTypeName(getPutInfo.resolveType()), "|", initializationModeName(getPutInfo.initializationMode()), ">, ", operand); + dumpValueProfiling(out, it, hasPrintedProfiling); + break; + } + case op_put_to_scope: { + int r0 = (++it)->u.operand; + int id0 = (++it)->u.operand; + int r1 = (++it)->u.operand; + GetPutInfo getPutInfo = GetPutInfo((++it)->u.operand); + ++it; // Structure + int operand = (++it)->u.operand; // Operand + printLocationAndOp(out, exec, location, it, "put_to_scope"); + out.print(registerName(r0)); + if (static_cast<unsigned>(id0) == UINT_MAX) + out.print(", anonymous"); + else + out.print(", ", idName(id0, identifier(id0))); + out.print(", ", registerName(r1), ", ", getPutInfo.operand(), "<", resolveModeName(getPutInfo.resolveMode()), "|", resolveTypeName(getPutInfo.resolveType()), "|", initializationModeName(getPutInfo.initializationMode()), ">, <structure>, ", operand); + break; + } + case op_get_from_arguments: { + int r0 = (++it)->u.operand; + int r1 = (++it)->u.operand; + int offset = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "get_from_arguments"); + out.printf("%s, %s, %d", registerName(r0).data(), registerName(r1).data(), offset); + dumpValueProfiling(out, it, hasPrintedProfiling); + break; + } + case op_put_to_arguments: { + int r0 = (++it)->u.operand; + int offset = (++it)->u.operand; + int r1 = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "put_to_arguments"); + out.printf("%s, %d, %s", registerName(r0).data(), offset, registerName(r1).data()); break; } -#if ENABLE(LLINT_C_LOOP) default: RELEASE_ASSERT_NOT_REACHED(); -#endif } -#if ENABLE(VALUE_PROFILER) dumpRareCaseProfile(out, "rare case: ", rareCaseProfileForBytecodeOffset(location), hasPrintedProfiling); - dumpRareCaseProfile(out, "special fast case: ", specialFastCaseProfileForBytecodeOffset(location), hasPrintedProfiling); -#endif + dumpResultProfile(out, resultProfileForBytecodeOffset(location), hasPrintedProfiling); #if ENABLE(DFG_JIT) - Vector<FrequentExitSite> exitSites = exitProfile().exitSitesFor(location); + Vector<DFG::FrequentExitSite> exitSites = exitProfile().exitSitesFor(location); if (!exitSites.isEmpty()) { out.print(" !! frequent exits: "); CommaPrinter comma; for (unsigned i = 0; i < exitSites.size(); ++i) - out.print(comma, exitSites[i].kind()); + out.print(comma, exitSites[i].kind(), " ", exitSites[i].jitType()); } #else // ENABLE(DFG_JIT) UNUSED_PARAM(location); @@ -1457,21 +1680,17 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio out.print("\n"); } -void CodeBlock::dumpBytecode(PrintStream& out, unsigned bytecodeOffset) +void CodeBlock::dumpBytecode( + PrintStream& out, unsigned bytecodeOffset, + const StubInfoMap& stubInfos, const CallLinkInfoMap& callLinkInfos) { ExecState* exec = m_globalObject->globalExec(); const Instruction* it = instructions().begin() + bytecodeOffset; - dumpBytecode(out, exec, instructions().begin(), it); + dumpBytecode(out, exec, instructions().begin(), it, stubInfos, callLinkInfos); } -#if DUMP_CODE_BLOCK_STATISTICS -static HashSet<CodeBlock*> liveCodeBlockSet; -#endif - #define FOR_EACH_MEMBER_VECTOR(macro) \ macro(instructions) \ - macro(globalResolveInfos) \ - macro(structureStubInfos) \ macro(callLinkInfos) \ macro(linkedCallerList) \ macro(identifiers) \ @@ -1482,8 +1701,7 @@ static HashSet<CodeBlock*> liveCodeBlockSet; macro(regexps) \ macro(functions) \ macro(exceptionHandlers) \ - macro(immediateSwitchJumpTables) \ - macro(characterSwitchJumpTables) \ + macro(switchJumpTables) \ macro(stringSwitchJumpTables) \ macro(evalCodeCache) \ macro(expressionInfo) \ @@ -1496,131 +1714,76 @@ static size_t sizeInBytes(const Vector<T>& vector) return vector.capacity() * sizeof(T); } -void CodeBlock::dumpStatistics() -{ -#if DUMP_CODE_BLOCK_STATISTICS - #define DEFINE_VARS(name) size_t name##IsNotEmpty = 0; size_t name##TotalSize = 0; - FOR_EACH_MEMBER_VECTOR(DEFINE_VARS) - FOR_EACH_MEMBER_VECTOR_RARE_DATA(DEFINE_VARS) - #undef DEFINE_VARS - - // Non-vector data members - size_t evalCodeCacheIsNotEmpty = 0; - - size_t symbolTableIsNotEmpty = 0; - size_t symbolTableTotalSize = 0; +namespace { - size_t hasRareData = 0; - - size_t isFunctionCode = 0; - size_t isGlobalCode = 0; - size_t isEvalCode = 0; - - HashSet<CodeBlock*>::const_iterator end = liveCodeBlockSet.end(); - for (HashSet<CodeBlock*>::const_iterator it = liveCodeBlockSet.begin(); it != end; ++it) { - CodeBlock* codeBlock = *it; - - #define GET_STATS(name) if (!codeBlock->m_##name.isEmpty()) { name##IsNotEmpty++; name##TotalSize += sizeInBytes(codeBlock->m_##name); } - FOR_EACH_MEMBER_VECTOR(GET_STATS) - #undef GET_STATS - - if (codeBlock->symbolTable() && !codeBlock->symbolTable()->isEmpty()) { - symbolTableIsNotEmpty++; - symbolTableTotalSize += (codeBlock->symbolTable()->capacity() * (sizeof(SymbolTable::KeyType) + sizeof(SymbolTable::MappedType))); - } - - if (codeBlock->m_rareData) { - hasRareData++; - #define GET_STATS(name) if (!codeBlock->m_rareData->m_##name.isEmpty()) { name##IsNotEmpty++; name##TotalSize += sizeInBytes(codeBlock->m_rareData->m_##name); } - FOR_EACH_MEMBER_VECTOR_RARE_DATA(GET_STATS) - #undef GET_STATS - - if (!codeBlock->m_rareData->m_evalCodeCache.isEmpty()) - evalCodeCacheIsNotEmpty++; - } - - switch (codeBlock->codeType()) { - case FunctionCode: - ++isFunctionCode; - break; - case GlobalCode: - ++isGlobalCode; - break; - case EvalCode: - ++isEvalCode; - break; - } +class PutToScopeFireDetail : public FireDetail { +public: + PutToScopeFireDetail(CodeBlock* codeBlock, const Identifier& ident) + : m_codeBlock(codeBlock) + , m_ident(ident) + { } + + virtual void dump(PrintStream& out) const override + { + out.print("Linking put_to_scope in ", FunctionExecutableDump(jsCast<FunctionExecutable*>(m_codeBlock->ownerExecutable())), " for ", m_ident); + } + +private: + CodeBlock* m_codeBlock; + const Identifier& m_ident; +}; - size_t totalSize = 0; - - #define GET_TOTAL_SIZE(name) totalSize += name##TotalSize; - FOR_EACH_MEMBER_VECTOR(GET_TOTAL_SIZE) - FOR_EACH_MEMBER_VECTOR_RARE_DATA(GET_TOTAL_SIZE) - #undef GET_TOTAL_SIZE - - totalSize += symbolTableTotalSize; - totalSize += (liveCodeBlockSet.size() * sizeof(CodeBlock)); - - dataLogF("Number of live CodeBlocks: %d\n", liveCodeBlockSet.size()); - dataLogF("Size of a single CodeBlock [sizeof(CodeBlock)]: %zu\n", sizeof(CodeBlock)); - dataLogF("Size of all CodeBlocks: %zu\n", totalSize); - dataLogF("Average size of a CodeBlock: %zu\n", totalSize / liveCodeBlockSet.size()); - - dataLogF("Number of FunctionCode CodeBlocks: %zu (%.3f%%)\n", isFunctionCode, static_cast<double>(isFunctionCode) * 100.0 / liveCodeBlockSet.size()); - dataLogF("Number of GlobalCode CodeBlocks: %zu (%.3f%%)\n", isGlobalCode, static_cast<double>(isGlobalCode) * 100.0 / liveCodeBlockSet.size()); - dataLogF("Number of EvalCode CodeBlocks: %zu (%.3f%%)\n", isEvalCode, static_cast<double>(isEvalCode) * 100.0 / liveCodeBlockSet.size()); - - dataLogF("Number of CodeBlocks with rare data: %zu (%.3f%%)\n", hasRareData, static_cast<double>(hasRareData) * 100.0 / liveCodeBlockSet.size()); - - #define PRINT_STATS(name) dataLogF("Number of CodeBlocks with " #name ": %zu\n", name##IsNotEmpty); dataLogF("Size of all " #name ": %zu\n", name##TotalSize); - FOR_EACH_MEMBER_VECTOR(PRINT_STATS) - FOR_EACH_MEMBER_VECTOR_RARE_DATA(PRINT_STATS) - #undef PRINT_STATS - - dataLogF("Number of CodeBlocks with evalCodeCache: %zu\n", evalCodeCacheIsNotEmpty); - dataLogF("Number of CodeBlocks with symbolTable: %zu\n", symbolTableIsNotEmpty); - - dataLogF("Size of all symbolTables: %zu\n", symbolTableTotalSize); - -#else - dataLogF("Dumping CodeBlock statistics is not enabled.\n"); -#endif -} +} // anonymous namespace -CodeBlock::CodeBlock(CopyParsedBlockTag, CodeBlock& other) - : m_globalObject(other.m_globalObject) - , m_heap(other.m_heap) - , m_numCalleeRegisters(other.m_numCalleeRegisters) +CodeBlock::CodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, CodeBlock& other) + : JSCell(*vm, structure) + , m_globalObject(other.m_globalObject) + , m_numCalleeLocals(other.m_numCalleeLocals) , m_numVars(other.m_numVars) + , m_shouldAlwaysBeInlined(true) +#if ENABLE(JIT) + , m_capabilityLevelState(DFG::CapabilityLevelNotSet) +#endif + , m_didFailFTLCompilation(false) + , m_hasBeenCompiledWithFTL(false) , m_isConstructor(other.m_isConstructor) - , m_unlinkedCode(*other.m_vm, other.m_ownerExecutable.get(), other.m_unlinkedCode.get()) - , m_ownerExecutable(*other.m_vm, other.m_ownerExecutable.get(), other.m_ownerExecutable.get()) + , m_isStrictMode(other.m_isStrictMode) + , m_codeType(other.m_codeType) + , m_unlinkedCode(*other.m_vm, this, other.m_unlinkedCode.get()) + , m_hasDebuggerStatement(false) + , m_steppingMode(SteppingModeDisabled) + , m_numBreakpoints(0) + , m_ownerExecutable(*other.m_vm, this, other.m_ownerExecutable.get()) , m_vm(other.m_vm) , m_instructions(other.m_instructions) , m_thisRegister(other.m_thisRegister) - , m_argumentsRegister(other.m_argumentsRegister) - , m_activationRegister(other.m_activationRegister) - , m_isStrictMode(other.m_isStrictMode) - , m_needsActivation(other.m_needsActivation) + , m_scopeRegister(other.m_scopeRegister) + , m_hash(other.m_hash) , m_source(other.m_source) , m_sourceOffset(other.m_sourceOffset) , m_firstLineColumnOffset(other.m_firstLineColumnOffset) - , m_codeType(other.m_codeType) - , m_identifiers(other.m_identifiers) , m_constantRegisters(other.m_constantRegisters) + , m_constantsSourceCodeRepresentation(other.m_constantsSourceCodeRepresentation) , m_functionDecls(other.m_functionDecls) , m_functionExprs(other.m_functionExprs) , m_osrExitCounter(0) , m_optimizationDelayCounter(0) , m_reoptimizationRetryCounter(0) - , m_resolveOperations(other.m_resolveOperations) - , m_putToBaseOperations(other.m_putToBaseOperations) -#if ENABLE(JIT) - , m_canCompileWithDFGState(DFG::CapabilityLevelNotSet) -#endif + , m_creationTime(std::chrono::steady_clock::now()) { + m_visitWeaklyHasBeenCalled.store(false, std::memory_order_relaxed); + + ASSERT(heap()->isDeferred()); + ASSERT(m_scopeRegister.isLocal()); + setNumParameters(other.numParameters()); +} + +void CodeBlock::finishCreation(VM& vm, CopyParsedBlockTag, CodeBlock& other) +{ + Base::finishCreation(vm); + optimizeAfterWarmUp(); jitAfterWarmUp(); @@ -1629,73 +1792,123 @@ CodeBlock::CodeBlock(CopyParsedBlockTag, CodeBlock& other) m_rareData->m_exceptionHandlers = other.m_rareData->m_exceptionHandlers; m_rareData->m_constantBuffers = other.m_rareData->m_constantBuffers; - m_rareData->m_immediateSwitchJumpTables = other.m_rareData->m_immediateSwitchJumpTables; - m_rareData->m_characterSwitchJumpTables = other.m_rareData->m_characterSwitchJumpTables; + m_rareData->m_switchJumpTables = other.m_rareData->m_switchJumpTables; m_rareData->m_stringSwitchJumpTables = other.m_rareData->m_stringSwitchJumpTables; + m_rareData->m_liveCalleeLocalsAtYield = other.m_rareData->m_liveCalleeLocalsAtYield; } + + heap()->m_codeBlocks.add(this); } -CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSGlobalObject* globalObject, unsigned baseScopeDepth, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset, PassOwnPtr<CodeBlock> alternative) - : m_globalObject(globalObject->vm(), ownerExecutable, globalObject) - , m_heap(&m_globalObject->vm().heap) - , m_numCalleeRegisters(unlinkedCodeBlock->m_numCalleeRegisters) +CodeBlock::CodeBlock(VM* vm, Structure* structure, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, + JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset) + : JSCell(*vm, structure) + , m_globalObject(scope->globalObject()->vm(), this, scope->globalObject()) + , m_numCalleeLocals(unlinkedCodeBlock->m_numCalleeLocals) , m_numVars(unlinkedCodeBlock->m_numVars) + , m_shouldAlwaysBeInlined(true) +#if ENABLE(JIT) + , m_capabilityLevelState(DFG::CapabilityLevelNotSet) +#endif + , m_didFailFTLCompilation(false) + , m_hasBeenCompiledWithFTL(false) , m_isConstructor(unlinkedCodeBlock->isConstructor()) - , m_unlinkedCode(globalObject->vm(), ownerExecutable, unlinkedCodeBlock) - , m_ownerExecutable(globalObject->vm(), ownerExecutable, ownerExecutable) + , m_isStrictMode(unlinkedCodeBlock->isStrictMode()) + , m_codeType(unlinkedCodeBlock->codeType()) + , m_unlinkedCode(m_globalObject->vm(), this, unlinkedCodeBlock) + , m_hasDebuggerStatement(false) + , m_steppingMode(SteppingModeDisabled) + , m_numBreakpoints(0) + , m_ownerExecutable(m_globalObject->vm(), this, ownerExecutable) , m_vm(unlinkedCodeBlock->vm()) , m_thisRegister(unlinkedCodeBlock->thisRegister()) - , m_argumentsRegister(unlinkedCodeBlock->argumentsRegister()) - , m_activationRegister(unlinkedCodeBlock->activationRegister()) - , m_isStrictMode(unlinkedCodeBlock->isStrictMode()) - , m_needsActivation(unlinkedCodeBlock->needsFullScopeChain()) + , m_scopeRegister(unlinkedCodeBlock->scopeRegister()) , m_source(sourceProvider) , m_sourceOffset(sourceOffset) , m_firstLineColumnOffset(firstLineColumnOffset) - , m_codeType(unlinkedCodeBlock->codeType()) - , m_alternative(alternative) , m_osrExitCounter(0) , m_optimizationDelayCounter(0) , m_reoptimizationRetryCounter(0) + , m_creationTime(std::chrono::steady_clock::now()) { - m_vm->startedCompiling(this); + m_visitWeaklyHasBeenCalled.store(false, std::memory_order_relaxed); + + ASSERT(heap()->isDeferred()); + ASSERT(m_scopeRegister.isLocal()); ASSERT(m_source); setNumParameters(unlinkedCodeBlock->numParameters()); +} -#if DUMP_CODE_BLOCK_STATISTICS - liveCodeBlockSet.add(this); -#endif - setIdentifiers(unlinkedCodeBlock->identifiers()); - setConstantRegisters(unlinkedCodeBlock->constantRegisters()); +void CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, + JSScope* scope) +{ + Base::finishCreation(vm); + + if (vm.typeProfiler() || vm.controlFlowProfiler()) + vm.functionHasExecutedCache()->removeUnexecutedRange(ownerExecutable->sourceID(), ownerExecutable->typeProfilingStartOffset(), ownerExecutable->typeProfilingEndOffset()); + + setConstantRegisters(unlinkedCodeBlock->constantRegisters(), unlinkedCodeBlock->constantsSourceCodeRepresentation()); if (unlinkedCodeBlock->usesGlobalObject()) - m_constantRegisters[unlinkedCodeBlock->globalObjectRegister()].set(*m_vm, ownerExecutable, globalObject); - m_functionDecls.grow(unlinkedCodeBlock->numberOfFunctionDecls()); + m_constantRegisters[unlinkedCodeBlock->globalObjectRegister().toConstantIndex()].set(*m_vm, this, m_globalObject.get()); + + for (unsigned i = 0; i < LinkTimeConstantCount; i++) { + LinkTimeConstant type = static_cast<LinkTimeConstant>(i); + if (unsigned registerIndex = unlinkedCodeBlock->registerIndexForLinkTimeConstant(type)) + m_constantRegisters[registerIndex].set(*m_vm, this, m_globalObject->jsCellForLinkTimeConstant(type)); + } + +#if !ASSERT_DISABLED + HashSet<int, WTF::IntHash<int>, WTF::UnsignedWithZeroKeyHashTraits<int>> clonedConstantSymbolTables; +#endif + { +#if !ASSERT_DISABLED + HashSet<SymbolTable*> clonedSymbolTables; +#endif + bool hasTypeProfiler = !!vm.typeProfiler(); + for (unsigned i = 0; i < m_constantRegisters.size(); i++) { + if (m_constantRegisters[i].get().isEmpty()) + continue; + if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(m_constantRegisters[i].get())) { + ASSERT(clonedSymbolTables.add(symbolTable).isNewEntry); + if (hasTypeProfiler) { + ConcurrentJITLocker locker(symbolTable->m_lock); + symbolTable->prepareForTypeProfiling(locker); + } + m_constantRegisters[i].set(*m_vm, this, symbolTable->cloneScopePart(*m_vm)); +#if !ASSERT_DISABLED + clonedConstantSymbolTables.add(i + FirstConstantRegisterIndex); +#endif + } + } + } + + // We already have the cloned symbol table for the module environment since we need to instantiate + // the module environments before linking the code block. We replace the stored symbol table with the already cloned one. + if (UnlinkedModuleProgramCodeBlock* unlinkedModuleProgramCodeBlock = jsDynamicCast<UnlinkedModuleProgramCodeBlock*>(unlinkedCodeBlock)) { + SymbolTable* clonedSymbolTable = jsCast<ModuleProgramExecutable*>(ownerExecutable)->moduleEnvironmentSymbolTable(); + if (m_vm->typeProfiler()) { + ConcurrentJITLocker locker(clonedSymbolTable->m_lock); + clonedSymbolTable->prepareForTypeProfiling(locker); + } + replaceConstant(unlinkedModuleProgramCodeBlock->moduleEnvironmentSymbolTableConstantRegisterOffset(), clonedSymbolTable); + } + + bool shouldUpdateFunctionHasExecutedCache = vm.typeProfiler() || vm.controlFlowProfiler(); + m_functionDecls = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionDecls()); for (size_t count = unlinkedCodeBlock->numberOfFunctionDecls(), i = 0; i < count; ++i) { UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionDecl(i); - unsigned lineCount = unlinkedExecutable->lineCount(); - unsigned firstLine = ownerExecutable->lineNo() + unlinkedExecutable->firstLineOffset(); - unsigned startColumn = unlinkedExecutable->functionStartColumn(); - startColumn += (unlinkedExecutable->firstLineOffset() ? 1 : ownerExecutable->startColumn()); - unsigned startOffset = sourceOffset + unlinkedExecutable->startOffset(); - unsigned sourceLength = unlinkedExecutable->sourceLength(); - SourceCode code(m_source, startOffset, startOffset + sourceLength, firstLine, startColumn); - FunctionExecutable* executable = FunctionExecutable::create(*m_vm, code, unlinkedExecutable, firstLine, firstLine + lineCount, startColumn); - m_functionDecls[i].set(*m_vm, ownerExecutable, executable); - } - - m_functionExprs.grow(unlinkedCodeBlock->numberOfFunctionExprs()); + if (shouldUpdateFunctionHasExecutedCache) + vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset()); + m_functionDecls[i].set(*m_vm, this, unlinkedExecutable->link(*m_vm, ownerExecutable->source())); + } + + m_functionExprs = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionExprs()); for (size_t count = unlinkedCodeBlock->numberOfFunctionExprs(), i = 0; i < count; ++i) { UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionExpr(i); - unsigned lineCount = unlinkedExecutable->lineCount(); - unsigned firstLine = ownerExecutable->lineNo() + unlinkedExecutable->firstLineOffset(); - unsigned startColumn = unlinkedExecutable->functionStartColumn(); - startColumn += (unlinkedExecutable->firstLineOffset() ? 1 : ownerExecutable->startColumn()); - unsigned startOffset = sourceOffset + unlinkedExecutable->startOffset(); - unsigned sourceLength = unlinkedExecutable->sourceLength(); - SourceCode code(m_source, startOffset, startOffset + sourceLength, firstLine, startColumn); - FunctionExecutable* executable = FunctionExecutable::create(*m_vm, code, unlinkedExecutable, firstLine, firstLine + lineCount, startColumn); - m_functionExprs[i].set(*m_vm, ownerExecutable, executable); + if (shouldUpdateFunctionHasExecutedCache) + vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset()); + m_functionExprs[i].set(*m_vm, this, unlinkedExecutable->link(*m_vm, ownerExecutable->source())); } if (unlinkedCodeBlock->hasRareData()) { @@ -1708,15 +1921,14 @@ CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlin } } if (size_t count = unlinkedCodeBlock->numberOfExceptionHandlers()) { - m_rareData->m_exceptionHandlers.grow(count); + m_rareData->m_exceptionHandlers.resizeToFit(count); for (size_t i = 0; i < count; i++) { - const UnlinkedHandlerInfo& handler = unlinkedCodeBlock->exceptionHandler(i); - m_rareData->m_exceptionHandlers[i].start = handler.start; - m_rareData->m_exceptionHandlers[i].end = handler.end; - m_rareData->m_exceptionHandlers[i].target = handler.target; - m_rareData->m_exceptionHandlers[i].scopeDepth = handler.scopeDepth + baseScopeDepth; -#if ENABLE(JIT) && ENABLE(LLINT) - m_rareData->m_exceptionHandlers[i].nativeCode = CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(LLInt::getCodePtr(llint_op_catch))); + const UnlinkedHandlerInfo& unlinkedHandler = unlinkedCodeBlock->exceptionHandler(i); + HandlerInfo& handler = m_rareData->m_exceptionHandlers[i]; +#if ENABLE(JIT) + handler.initialize(unlinkedHandler, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(LLInt::getCodePtr(op_catch)))); +#else + handler.initialize(unlinkedHandler); #endif } } @@ -1734,21 +1946,11 @@ CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlin } } - if (size_t count = unlinkedCodeBlock->numberOfImmediateSwitchJumpTables()) { - m_rareData->m_immediateSwitchJumpTables.grow(count); - for (size_t i = 0; i < count; i++) { - UnlinkedSimpleJumpTable& sourceTable = unlinkedCodeBlock->immediateSwitchJumpTable(i); - SimpleJumpTable& destTable = m_rareData->m_immediateSwitchJumpTables[i]; - destTable.branchOffsets = sourceTable.branchOffsets; - destTable.min = sourceTable.min; - } - } - - if (size_t count = unlinkedCodeBlock->numberOfCharacterSwitchJumpTables()) { - m_rareData->m_characterSwitchJumpTables.grow(count); + if (size_t count = unlinkedCodeBlock->numberOfSwitchJumpTables()) { + m_rareData->m_switchJumpTables.grow(count); for (size_t i = 0; i < count; i++) { - UnlinkedSimpleJumpTable& sourceTable = unlinkedCodeBlock->characterSwitchJumpTable(i); - SimpleJumpTable& destTable = m_rareData->m_characterSwitchJumpTables[i]; + UnlinkedSimpleJumpTable& sourceTable = unlinkedCodeBlock->switchJumpTable(i); + SimpleJumpTable& destTable = m_rareData->m_switchJumpTables[i]; destTable.branchOffsets = sourceTable.branchOffsets; destTable.min = sourceTable.min; } @@ -1756,62 +1958,79 @@ CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlin } // Allocate metadata buffers for the bytecode -#if ENABLE(LLINT) if (size_t size = unlinkedCodeBlock->numberOfLLintCallLinkInfos()) - m_llintCallLinkInfos.grow(size); -#endif -#if ENABLE(DFG_JIT) + m_llintCallLinkInfos = RefCountedArray<LLIntCallLinkInfo>(size); if (size_t size = unlinkedCodeBlock->numberOfArrayProfiles()) m_arrayProfiles.grow(size); if (size_t size = unlinkedCodeBlock->numberOfArrayAllocationProfiles()) - m_arrayAllocationProfiles.grow(size); + m_arrayAllocationProfiles = RefCountedArray<ArrayAllocationProfile>(size); if (size_t size = unlinkedCodeBlock->numberOfValueProfiles()) - m_valueProfiles.grow(size); -#endif + m_valueProfiles = RefCountedArray<ValueProfile>(size); if (size_t size = unlinkedCodeBlock->numberOfObjectAllocationProfiles()) - m_objectAllocationProfiles.grow(size); - if (size_t size = unlinkedCodeBlock->numberOfResolveOperations()) - m_resolveOperations.grow(size); - if (size_t putToBaseCount = unlinkedCodeBlock->numberOfPutToBaseOperations()) { - m_putToBaseOperations.reserveInitialCapacity(putToBaseCount); - for (size_t i = 0; i < putToBaseCount; ++i) - m_putToBaseOperations.uncheckedAppend(PutToBaseOperation(isStrictMode())); - } + m_objectAllocationProfiles = RefCountedArray<ObjectAllocationProfile>(size); + +#if ENABLE(JIT) + setCalleeSaveRegisters(RegisterSet::llintBaselineCalleeSaveRegisters()); +#endif // Copy and translate the UnlinkedInstructions - size_t instructionCount = unlinkedCodeBlock->instructions().size(); - UnlinkedInstruction* pc = unlinkedCodeBlock->instructions().data(); - Vector<Instruction, 0, UnsafeVectorOverflow> instructions(instructionCount); - for (size_t i = 0; i < unlinkedCodeBlock->instructions().size(); ) { - unsigned opLength = opcodeLength(pc[i].u.opcode); - instructions[i] = vm()->interpreter->getOpcode(pc[i].u.opcode); + unsigned instructionCount = unlinkedCodeBlock->instructions().count(); + UnlinkedInstructionStream::Reader instructionReader(unlinkedCodeBlock->instructions()); + + // Bookkeep the strongly referenced module environments. + HashSet<JSModuleEnvironment*> stronglyReferencedModuleEnvironments; + + // Bookkeep the merge point bytecode offsets. + Vector<size_t> mergePointBytecodeOffsets; + + RefCountedArray<Instruction> instructions(instructionCount); + + for (unsigned i = 0; !instructionReader.atEnd(); ) { + const UnlinkedInstruction* pc = instructionReader.next(); + + unsigned opLength = opcodeLength(pc[0].u.opcode); + + instructions[i] = vm.interpreter->getOpcode(pc[0].u.opcode); for (size_t j = 1; j < opLength; ++j) { if (sizeof(int32_t) != sizeof(intptr_t)) instructions[i + j].u.pointer = 0; - instructions[i + j].u.operand = pc[i + j].u.operand; + instructions[i + j].u.operand = pc[j].u.operand; } - switch (pc[i].u.opcode) { -#if ENABLE(DFG_JIT) - case op_get_by_val: - case op_get_argument_by_val: { - int arrayProfileIndex = pc[i + opLength - 2].u.operand; + switch (pc[0].u.opcode) { + case op_has_indexed_property: { + int arrayProfileIndex = pc[opLength - 1].u.operand; + m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i); + + instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex]; + break; + } + case op_call_varargs: + case op_tail_call_varargs: + case op_construct_varargs: + case op_get_by_val: { + int arrayProfileIndex = pc[opLength - 2].u.operand; m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i); instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex]; - // fallthrough + FALLTHROUGH; } - case op_convert_this: + case op_get_direct_pname: case op_get_by_id: - case op_call_put_result: - case op_get_callee: { - ValueProfile* profile = &m_valueProfiles[pc[i + opLength - 1].u.operand]; + case op_get_from_arguments: { + ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand]; ASSERT(profile->m_bytecodeOffset == -1); profile->m_bytecodeOffset = i; instructions[i + opLength - 1] = profile; break; } case op_put_by_val: { - int arrayProfileIndex = pc[i + opLength - 1].u.operand; + int arrayProfileIndex = pc[opLength - 1].u.operand; + m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i); + instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex]; + break; + } + case op_put_by_val_direct: { + int arrayProfileIndex = pc[opLength - 1].u.operand; m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i); instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex]; break; @@ -1820,138 +2039,246 @@ CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlin case op_new_array: case op_new_array_buffer: case op_new_array_with_size: { - int arrayAllocationProfileIndex = pc[i + opLength - 1].u.operand; + int arrayAllocationProfileIndex = pc[opLength - 1].u.operand; instructions[i + opLength - 1] = &m_arrayAllocationProfiles[arrayAllocationProfileIndex]; break; } -#endif - case op_resolve_base: - case op_resolve_base_to_global: - case op_resolve_base_to_global_dynamic: - case op_resolve_base_to_scope: - case op_resolve_base_to_scope_with_top_scope_check: { - instructions[i + 4].u.resolveOperations = &m_resolveOperations[pc[i + 4].u.operand]; - instructions[i + 5].u.putToBaseOperation = &m_putToBaseOperations[pc[i + 5].u.operand]; -#if ENABLE(DFG_JIT) - ValueProfile* profile = &m_valueProfiles[pc[i + opLength - 1].u.operand]; - ASSERT(profile->m_bytecodeOffset == -1); - profile->m_bytecodeOffset = i; - ASSERT((opLength - 1) > 5); - instructions[i + opLength - 1] = profile; -#endif - break; - } - case op_resolve_global_property: - case op_resolve_global_var: - case op_resolve_scoped_var: - case op_resolve_scoped_var_on_top_scope: - case op_resolve_scoped_var_with_top_scope_check: { - instructions[i + 3].u.resolveOperations = &m_resolveOperations[pc[i + 3].u.operand]; - break; - } - case op_put_to_base: - case op_put_to_base_variable: { - instructions[i + 4].u.putToBaseOperation = &m_putToBaseOperations[pc[i + 4].u.operand]; + case op_new_object: { + int objectAllocationProfileIndex = pc[opLength - 1].u.operand; + ObjectAllocationProfile* objectAllocationProfile = &m_objectAllocationProfiles[objectAllocationProfileIndex]; + int inferredInlineCapacity = pc[opLength - 2].u.operand; + + instructions[i + opLength - 1] = objectAllocationProfile; + objectAllocationProfile->initialize(vm, + this, m_globalObject->objectPrototype(), inferredInlineCapacity); break; } - case op_resolve: { -#if ENABLE(DFG_JIT) - ValueProfile* profile = &m_valueProfiles[pc[i + opLength - 1].u.operand]; + + case op_call: + case op_tail_call: + case op_call_eval: { + ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand]; ASSERT(profile->m_bytecodeOffset == -1); profile->m_bytecodeOffset = i; - ASSERT((opLength - 1) > 3); instructions[i + opLength - 1] = profile; -#endif - instructions[i + 3].u.resolveOperations = &m_resolveOperations[pc[i + 3].u.operand]; + int arrayProfileIndex = pc[opLength - 2].u.operand; + m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i); + instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex]; + instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand]; break; } - case op_resolve_with_base: - case op_resolve_with_this: { - instructions[i + 4].u.resolveOperations = &m_resolveOperations[pc[i + 4].u.operand]; - if (pc[i].u.opcode != op_resolve_with_this) - instructions[i + 5].u.putToBaseOperation = &m_putToBaseOperations[pc[i + 5].u.operand]; -#if ENABLE(DFG_JIT) - ValueProfile* profile = &m_valueProfiles[pc[i + opLength - 1].u.operand]; + case op_construct: { + instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand]; + ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand]; ASSERT(profile->m_bytecodeOffset == -1); profile->m_bytecodeOffset = i; instructions[i + opLength - 1] = profile; -#endif break; } - case op_new_object: { - int objectAllocationProfileIndex = pc[i + opLength - 1].u.operand; - ObjectAllocationProfile* objectAllocationProfile = &m_objectAllocationProfiles[objectAllocationProfileIndex]; - int inferredInlineCapacity = pc[i + opLength - 2].u.operand; + case op_get_array_length: + CRASH(); - instructions[i + opLength - 1] = objectAllocationProfile; - objectAllocationProfile->initialize(*vm(), - m_ownerExecutable.get(), m_globalObject->objectPrototype(), inferredInlineCapacity); +#if !ASSERT_DISABLED + case op_create_lexical_environment: { + int symbolTableIndex = pc[3].u.operand; + ASSERT(clonedConstantSymbolTables.contains(symbolTableIndex)); break; } +#endif - case op_get_scoped_var: { -#if ENABLE(DFG_JIT) - ValueProfile* profile = &m_valueProfiles[pc[i + opLength - 1].u.operand]; + case op_resolve_scope: { + const Identifier& ident = identifier(pc[3].u.operand); + ResolveType type = static_cast<ResolveType>(pc[4].u.operand); + RELEASE_ASSERT(type != LocalClosureVar); + int localScopeDepth = pc[5].u.operand; + + ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, type, NotInitialization); + instructions[i + 4].u.operand = op.type; + instructions[i + 5].u.operand = op.depth; + if (op.lexicalEnvironment) { + if (op.type == ModuleVar) { + // Keep the linked module environment strongly referenced. + if (stronglyReferencedModuleEnvironments.add(jsCast<JSModuleEnvironment*>(op.lexicalEnvironment)).isNewEntry) + addConstant(op.lexicalEnvironment); + instructions[i + 6].u.jsCell.set(vm, this, op.lexicalEnvironment); + } else + instructions[i + 6].u.symbolTable.set(vm, this, op.lexicalEnvironment->symbolTable()); + } else if (JSScope* constantScope = JSScope::constantScopeForCodeBlock(op.type, this)) + instructions[i + 6].u.jsCell.set(vm, this, constantScope); + else + instructions[i + 6].u.pointer = nullptr; + break; + } + + case op_get_from_scope: { + ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand]; ASSERT(profile->m_bytecodeOffset == -1); profile->m_bytecodeOffset = i; instructions[i + opLength - 1] = profile; -#endif - break; - } - case op_call: - case op_call_eval: { -#if ENABLE(DFG_JIT) - int arrayProfileIndex = pc[i + opLength - 1].u.operand; - m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i); - instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex]; -#endif -#if ENABLE(LLINT) - instructions[i + 4] = &m_llintCallLinkInfos[pc[i + 4].u.operand]; -#endif - break; - } - case op_construct: -#if ENABLE(LLINT) - instructions[i + 4] = &m_llintCallLinkInfos[pc[i + 4].u.operand]; -#endif - break; - case op_get_by_id_out_of_line: - case op_get_by_id_self: - case op_get_by_id_proto: - case op_get_by_id_chain: - case op_get_by_id_getter_self: - case op_get_by_id_getter_proto: - case op_get_by_id_getter_chain: - case op_get_by_id_custom_self: - case op_get_by_id_custom_proto: - case op_get_by_id_custom_chain: - case op_get_by_id_generic: - case op_get_array_length: - case op_get_string_length: - CRASH(); + // get_from_scope dst, scope, id, GetPutInfo, Structure, Operand + + int localScopeDepth = pc[5].u.operand; + instructions[i + 5].u.pointer = nullptr; - case op_init_global_const_nop: { - ASSERT(codeType() == GlobalCode); - Identifier ident = identifier(pc[i + 4].u.operand); - SymbolTableEntry entry = globalObject->symbolTable()->get(ident.impl()); - if (entry.isNull()) + GetPutInfo getPutInfo = GetPutInfo(pc[4].u.operand); + ASSERT(getPutInfo.initializationMode() == NotInitialization); + if (getPutInfo.resolveType() == LocalClosureVar) { + instructions[i + 4] = GetPutInfo(getPutInfo.resolveMode(), ClosureVar, getPutInfo.initializationMode()).operand(); break; + } - if (entry.couldBeWatched()) { - instructions[i + 0] = vm()->interpreter->getOpcode(op_init_global_const_check); - instructions[i + 1] = &globalObject->registerAt(entry.getIndex()); - instructions[i + 3] = entry.addressOfIsWatched(); + const Identifier& ident = identifier(pc[3].u.operand); + ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, getPutInfo.resolveType(), NotInitialization); + + instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), op.type, getPutInfo.initializationMode()).operand(); + if (op.type == ModuleVar) + instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), ClosureVar, getPutInfo.initializationMode()).operand(); + if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks) + instructions[i + 5].u.watchpointSet = op.watchpointSet; + else if (op.structure) + instructions[i + 5].u.structure.set(vm, this, op.structure); + instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand); + break; + } + + case op_put_to_scope: { + // put_to_scope scope, id, value, GetPutInfo, Structure, Operand + GetPutInfo getPutInfo = GetPutInfo(pc[4].u.operand); + if (getPutInfo.resolveType() == LocalClosureVar) { + // Only do watching if the property we're putting to is not anonymous. + if (static_cast<unsigned>(pc[2].u.operand) != UINT_MAX) { + int symbolTableIndex = pc[5].u.operand; + ASSERT(clonedConstantSymbolTables.contains(symbolTableIndex)); + SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex)); + const Identifier& ident = identifier(pc[2].u.operand); + ConcurrentJITLocker locker(symbolTable->m_lock); + auto iter = symbolTable->find(locker, ident.impl()); + ASSERT(iter != symbolTable->end(locker)); + iter->value.prepareToWatch(); + instructions[i + 5].u.watchpointSet = iter->value.watchpointSet(); + } else + instructions[i + 5].u.watchpointSet = nullptr; break; } - instructions[i + 0] = vm()->interpreter->getOpcode(op_init_global_const); - instructions[i + 1] = &globalObject->registerAt(entry.getIndex()); + const Identifier& ident = identifier(pc[2].u.operand); + int localScopeDepth = pc[5].u.operand; + instructions[i + 5].u.pointer = nullptr; + ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Put, getPutInfo.resolveType(), getPutInfo.initializationMode()); + + instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), op.type, getPutInfo.initializationMode()).operand(); + if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks) + instructions[i + 5].u.watchpointSet = op.watchpointSet; + else if (op.type == ClosureVar || op.type == ClosureVarWithVarInjectionChecks) { + if (op.watchpointSet) + op.watchpointSet->invalidate(PutToScopeFireDetail(this, ident)); + } else if (op.structure) + instructions[i + 5].u.structure.set(vm, this, op.structure); + instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand); + + break; + } + + case op_profile_type: { + RELEASE_ASSERT(vm.typeProfiler()); + // The format of this instruction is: op_profile_type regToProfile, TypeLocation*, flag, identifier?, resolveType? + size_t instructionOffset = i + opLength - 1; + unsigned divotStart, divotEnd; + GlobalVariableID globalVariableID = 0; + RefPtr<TypeSet> globalTypeSet; + bool shouldAnalyze = m_unlinkedCode->typeProfilerExpressionInfoForBytecodeOffset(instructionOffset, divotStart, divotEnd); + VirtualRegister profileRegister(pc[1].u.operand); + ProfileTypeBytecodeFlag flag = static_cast<ProfileTypeBytecodeFlag>(pc[3].u.operand); + SymbolTable* symbolTable = nullptr; + + switch (flag) { + case ProfileTypeBytecodeClosureVar: { + const Identifier& ident = identifier(pc[4].u.operand); + int localScopeDepth = pc[2].u.operand; + ResolveType type = static_cast<ResolveType>(pc[5].u.operand); + // Even though type profiling may be profiling either a Get or a Put, we can always claim a Get because + // we're abstractly "read"ing from a JSScope. + ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, type, NotInitialization); + + if (op.type == ClosureVar || op.type == ModuleVar) + symbolTable = op.lexicalEnvironment->symbolTable(); + else if (op.type == GlobalVar) + symbolTable = m_globalObject.get()->symbolTable(); + + UniquedStringImpl* impl = (op.type == ModuleVar) ? op.importedName.get() : ident.impl(); + if (symbolTable) { + ConcurrentJITLocker locker(symbolTable->m_lock); + // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet. + symbolTable->prepareForTypeProfiling(locker); + globalVariableID = symbolTable->uniqueIDForVariable(locker, impl, vm); + globalTypeSet = symbolTable->globalTypeSetForVariable(locker, impl, vm); + } else + globalVariableID = TypeProfilerNoGlobalIDExists; + + break; + } + case ProfileTypeBytecodeLocallyResolved: { + int symbolTableIndex = pc[2].u.operand; + ASSERT(clonedConstantSymbolTables.contains(symbolTableIndex)); + SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex)); + const Identifier& ident = identifier(pc[4].u.operand); + ConcurrentJITLocker locker(symbolTable->m_lock); + // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet. + globalVariableID = symbolTable->uniqueIDForVariable(locker, ident.impl(), vm); + globalTypeSet = symbolTable->globalTypeSetForVariable(locker, ident.impl(), vm); + + break; + } + case ProfileTypeBytecodeDoesNotHaveGlobalID: + case ProfileTypeBytecodeFunctionArgument: { + globalVariableID = TypeProfilerNoGlobalIDExists; + break; + } + case ProfileTypeBytecodeFunctionReturnStatement: { + RELEASE_ASSERT(ownerExecutable->isFunctionExecutable()); + globalTypeSet = jsCast<FunctionExecutable*>(ownerExecutable)->returnStatementTypeSet(); + globalVariableID = TypeProfilerReturnStatement; + if (!shouldAnalyze) { + // Because a return statement can be added implicitly to return undefined at the end of a function, + // and these nodes don't emit expression ranges because they aren't in the actual source text of + // the user's program, give the type profiler some range to identify these return statements. + // Currently, the text offset that is used as identification is "f" in the function keyword + // and is stored on TypeLocation's m_divotForFunctionOffsetIfReturnStatement member variable. + divotStart = divotEnd = ownerExecutable->typeProfilingStartOffset(); + shouldAnalyze = true; + } + break; + } + } + + std::pair<TypeLocation*, bool> locationPair = vm.typeProfiler()->typeLocationCache()->getTypeLocation(globalVariableID, + ownerExecutable->sourceID(), divotStart, divotEnd, globalTypeSet, &vm); + TypeLocation* location = locationPair.first; + bool isNewLocation = locationPair.second; + + if (flag == ProfileTypeBytecodeFunctionReturnStatement) + location->m_divotForFunctionOffsetIfReturnStatement = ownerExecutable->typeProfilingStartOffset(); + + if (shouldAnalyze && isNewLocation) + vm.typeProfiler()->insertNewLocation(location); + + instructions[i + 2].u.location = location; break; } case op_debug: { - instructions[i + 4] = columnNumberForBytecodeOffset(i); + if (pc[1].u.index == DidReachBreakpoint) + m_hasDebuggerStatement = true; + break; + } + + case op_save: { + unsigned liveCalleeLocalsIndex = pc[2].u.index; + int offset = pc[3].u.operand; + if (liveCalleeLocalsIndex >= mergePointBytecodeOffsets.size()) + mergePointBytecodeOffsets.resize(liveCalleeLocalsIndex + 1); + mergePointBytecodeOffsets[liveCalleeLocalsIndex] = i + offset; break; } @@ -1960,7 +2287,25 @@ CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlin } i += opLength; } - m_instructions = WTF::RefCountedArray<Instruction>(instructions); + + if (vm.controlFlowProfiler()) + insertBasicBlockBoundariesForControlFlowProfiler(instructions); + + m_instructions = WTFMove(instructions); + + // Perform bytecode liveness analysis to determine which locals are live and should be resumed when executing op_resume. + if (unlinkedCodeBlock->parseMode() == SourceParseMode::GeneratorBodyMode) { + if (size_t count = mergePointBytecodeOffsets.size()) { + createRareDataIfNecessary(); + BytecodeLivenessAnalysis liveness(this); + m_rareData->m_liveCalleeLocalsAtYield.grow(count); + size_t liveCalleeLocalsIndex = 0; + for (size_t bytecodeOffset : mergePointBytecodeOffsets) { + m_rareData->m_liveCalleeLocalsAtYield[liveCalleeLocalsIndex] = liveness.getLivenessInfoAtBytecodeOffset(bytecodeOffset); + ++liveCalleeLocalsIndex; + } + } + } // Set optimization thresholds only after m_instructions is initialized, since these // rely on the instruction count (and are in theory permitted to also inspect the @@ -1968,104 +2313,94 @@ CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlin optimizeAfterWarmUp(); jitAfterWarmUp(); + // If the concurrent thread will want the code block's hash, then compute it here + // synchronously. + if (Options::alwaysComputeHash()) + hash(); + if (Options::dumpGeneratedBytecodes()) dumpBytecode(); - m_vm->finishedCompiling(this); + + heap()->m_codeBlocks.add(this); + heap()->reportExtraMemoryAllocated(m_instructions.size() * sizeof(Instruction)); } +#if ENABLE(WEBASSEMBLY) +CodeBlock::CodeBlock(VM* vm, Structure* structure, WebAssemblyExecutable* ownerExecutable, JSGlobalObject* globalObject) + : JSCell(*vm, structure) + , m_globalObject(globalObject->vm(), this, globalObject) + , m_numCalleeLocals(0) + , m_numVars(0) + , m_shouldAlwaysBeInlined(false) +#if ENABLE(JIT) + , m_capabilityLevelState(DFG::CannotCompile) +#endif + , m_didFailFTLCompilation(false) + , m_hasBeenCompiledWithFTL(false) + , m_isConstructor(false) + , m_isStrictMode(false) + , m_codeType(FunctionCode) + , m_hasDebuggerStatement(false) + , m_steppingMode(SteppingModeDisabled) + , m_numBreakpoints(0) + , m_ownerExecutable(m_globalObject->vm(), this, ownerExecutable) + , m_vm(vm) + , m_osrExitCounter(0) + , m_optimizationDelayCounter(0) + , m_reoptimizationRetryCounter(0) + , m_creationTime(std::chrono::steady_clock::now()) +{ + ASSERT(heap()->isDeferred()); +} + +void CodeBlock::finishCreation(VM& vm, WebAssemblyExecutable*, JSGlobalObject*) +{ + Base::finishCreation(vm); + + heap()->m_codeBlocks.add(this); +} +#endif + CodeBlock::~CodeBlock() { if (m_vm->m_perBytecodeProfiler) m_vm->m_perBytecodeProfiler->notifyDestruction(this); -#if ENABLE(DFG_JIT) - // Remove myself from the set of DFG code blocks. Note that I may not be in this set - // (because I'm not a DFG code block), in which case this is a no-op anyway. - m_vm->heap.m_dfgCodeBlocks.m_set.remove(this); -#endif - #if ENABLE(VERBOSE_VALUE_PROFILE) dumpValueProfiles(); #endif -#if ENABLE(LLINT) - while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end()) - m_incomingLLIntCalls.begin()->remove(); -#endif // ENABLE(LLINT) -#if ENABLE(JIT) // We may be destroyed before any CodeBlocks that refer to us are destroyed. // Consider that two CodeBlocks become unreachable at the same time. There // is no guarantee about the order in which the CodeBlocks are destroyed. // So, if we don't remove incoming calls, and get destroyed before the // CodeBlock(s) that have calls into us, then the CallLinkInfo vector's // destructor will try to remove nodes from our (no longer valid) linked list. - while (m_incomingCalls.begin() != m_incomingCalls.end()) - m_incomingCalls.begin()->remove(); + unlinkIncomingCalls(); // Note that our outgoing calls will be removed from other CodeBlocks' // m_incomingCalls linked lists through the execution of the ~CallLinkInfo // destructors. - for (size_t size = m_structureStubInfos.size(), i = 0; i < size; ++i) - m_structureStubInfos[i].deref(); +#if ENABLE(JIT) + for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) { + StructureStubInfo* stub = *iter; + stub->aboutToDie(); + stub->deref(); + } #endif // ENABLE(JIT) - -#if DUMP_CODE_BLOCK_STATISTICS - liveCodeBlockSet.remove(this); -#endif } -void CodeBlock::setNumParameters(int newValue) +void CodeBlock::setAlternative(VM& vm, CodeBlock* alternative) { - m_numParameters = newValue; - -#if ENABLE(VALUE_PROFILER) - m_argumentValueProfiles.resizeToFit(newValue); -#endif + m_alternative.set(vm, this, alternative); } -void CodeBlock::visitStructures(SlotVisitor& visitor, Instruction* vPC) +void CodeBlock::setNumParameters(int newValue) { - Interpreter* interpreter = m_vm->interpreter; - - if (vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id) && vPC[4].u.structure) { - visitor.append(&vPC[4].u.structure); - return; - } - - if (vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_self) || vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_getter_self) || vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_custom_self)) { - visitor.append(&vPC[4].u.structure); - return; - } - if (vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_proto) || vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_getter_proto) || vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_custom_proto)) { - visitor.append(&vPC[4].u.structure); - visitor.append(&vPC[5].u.structure); - return; - } - if (vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_chain) || vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_getter_chain) || vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_custom_chain)) { - visitor.append(&vPC[4].u.structure); - if (vPC[5].u.structureChain) - visitor.append(&vPC[5].u.structureChain); - return; - } - if (vPC[0].u.opcode == interpreter->getOpcode(op_put_by_id_transition)) { - visitor.append(&vPC[4].u.structure); - visitor.append(&vPC[5].u.structure); - if (vPC[6].u.structureChain) - visitor.append(&vPC[6].u.structureChain); - return; - } - if (vPC[0].u.opcode == interpreter->getOpcode(op_put_by_id) && vPC[4].u.structure) { - visitor.append(&vPC[4].u.structure); - return; - } - if (vPC[0].u.opcode == interpreter->getOpcode(op_put_by_id_replace)) { - visitor.append(&vPC[4].u.structure); - return; - } + m_numParameters = newValue; - // These instructions don't ref their Structures. - ASSERT(vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id) || vPC[0].u.opcode == interpreter->getOpcode(op_put_by_id) || vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_generic) || vPC[0].u.opcode == interpreter->getOpcode(op_put_by_id_generic) || vPC[0].u.opcode == interpreter->getOpcode(op_get_array_length) || vPC[0].u.opcode == interpreter->getOpcode(op_get_string_length)); + m_argumentValueProfiles = RefCountedArray<ValueProfile>(newValue); } void EvalCodeCache::visitAggregate(SlotVisitor& visitor) @@ -2075,52 +2410,50 @@ void EvalCodeCache::visitAggregate(SlotVisitor& visitor) visitor.append(&ptr->value); } -void CodeBlock::visitAggregate(SlotVisitor& visitor) +CodeBlock* CodeBlock::specialOSREntryBlockOrNull() { -#if ENABLE(PARALLEL_GC) && ENABLE(DFG_JIT) - if (!!m_dfgData) { - // I may be asked to scan myself more than once, and it may even happen concurrently. - // To this end, use a CAS loop to check if I've been called already. Only one thread - // may proceed past this point - whichever one wins the CAS race. - unsigned oldValue; - do { - oldValue = m_dfgData->visitAggregateHasBeenCalled; - if (oldValue) { - // Looks like someone else won! Return immediately to ensure that we don't - // trace the same CodeBlock concurrently. Doing so is hazardous since we will - // be mutating the state of ValueProfiles, which contain JSValues, which can - // have word-tearing on 32-bit, leading to awesome timing-dependent crashes - // that are nearly impossible to track down. - - // Also note that it must be safe to return early as soon as we see the - // value true (well, (unsigned)1), since once a GC thread is in this method - // and has won the CAS race (i.e. was responsible for setting the value true) - // it will definitely complete the rest of this method before declaring - // termination. - return; - } - } while (!WTF::weakCompareAndSwap(&m_dfgData->visitAggregateHasBeenCalled, 0, 1)); - } -#endif // ENABLE(PARALLEL_GC) && ENABLE(DFG_JIT) - - if (!!m_alternative) - m_alternative->visitAggregate(visitor); +#if ENABLE(FTL_JIT) + if (jitType() != JITCode::DFGJIT) + return 0; + DFG::JITCode* jitCode = m_jitCode->dfg(); + return jitCode->osrEntryBlock(); +#else // ENABLE(FTL_JIT) + return 0; +#endif // ENABLE(FTL_JIT) +} - visitor.append(&m_unlinkedCode); +void CodeBlock::visitWeakly(SlotVisitor& visitor) +{ + bool setByMe = m_visitWeaklyHasBeenCalled.compareExchangeStrong(false, true); + if (!setByMe) + return; - // There are three things that may use unconditional finalizers: lazy bytecode freeing, - // inline cache clearing, and jettisoning. The probability of us wanting to do at - // least one of those things is probably quite close to 1. So we add one no matter what - // and when it runs, it figures out whether it has any work to do. - visitor.addUnconditionalFinalizer(this); - - if (shouldImmediatelyAssumeLivenessDuringScan()) { - // This code block is live, so scan all references strongly and return. - stronglyVisitStrongReferences(visitor); - stronglyVisitWeakReferences(visitor); + if (Heap::isMarked(this)) + return; + + if (shouldVisitStrongly()) { + visitor.appendUnbarrieredReadOnlyPointer(this); return; } + + // There are two things that may use unconditional finalizers: inline cache clearing + // and jettisoning. The probability of us wanting to do at least one of those things + // is probably quite close to 1. So we add one no matter what and when it runs, it + // figures out whether it has any work to do. + visitor.addUnconditionalFinalizer(&m_unconditionalFinalizer); + + if (!JITCode::isOptimizingJIT(jitType())) + return; + + // If we jettison ourselves we'll install our alternative, so make sure that it + // survives GC even if we don't. + visitor.append(&m_alternative); + // There are two things that we use weak reference harvesters for: DFG fixpoint for + // jettisoning, and trying to find structures that would be live based on some + // inline cache. So it makes sense to register them regardless. + visitor.addWeakReferenceHarvester(&m_weakReferenceHarvester); + #if ENABLE(DFG_JIT) // We get here if we're live in the sense that our owner executable is live, // but we're not yet live for sure in another sense: we may yet decide that this @@ -2130,69 +2463,222 @@ void CodeBlock::visitAggregate(SlotVisitor& visitor) // either us marking additional objects, or by other objects being marked for // other reasons, that this iteration should run again; it will notify us of this // decision by calling harvestWeakReferences(). - - m_dfgData->livenessHasBeenProved = false; - m_dfgData->allTransitionsHaveBeenMarked = false; - - performTracingFixpointIteration(visitor); - // GC doesn't have enough information yet for us to decide whether to keep our DFG - // data, so we need to register a handler to run again at the end of GC, when more - // information is available. - if (!(m_dfgData->livenessHasBeenProved && m_dfgData->allTransitionsHaveBeenMarked)) - visitor.addWeakReferenceHarvester(this); - -#else // ENABLE(DFG_JIT) - RELEASE_ASSERT_NOT_REACHED(); + m_allTransitionsHaveBeenMarked = false; + propagateTransitions(visitor); + + m_jitCode->dfgCommon()->livenessHasBeenProved = false; + determineLiveness(visitor); #endif // ENABLE(DFG_JIT) } -void CodeBlock::performTracingFixpointIteration(SlotVisitor& visitor) +size_t CodeBlock::estimatedSize(JSCell* cell) +{ + CodeBlock* thisObject = jsCast<CodeBlock*>(cell); + size_t extraMemoryAllocated = thisObject->m_instructions.size() * sizeof(Instruction); + if (thisObject->m_jitCode) + extraMemoryAllocated += thisObject->m_jitCode->size(); + return Base::estimatedSize(cell) + extraMemoryAllocated; +} + +void CodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor) +{ + CodeBlock* thisObject = jsCast<CodeBlock*>(cell); + ASSERT_GC_OBJECT_INHERITS(thisObject, info()); + JSCell::visitChildren(thisObject, visitor); + thisObject->visitChildren(visitor); +} + +void CodeBlock::visitChildren(SlotVisitor& visitor) +{ + // There are two things that may use unconditional finalizers: inline cache clearing + // and jettisoning. The probability of us wanting to do at least one of those things + // is probably quite close to 1. So we add one no matter what and when it runs, it + // figures out whether it has any work to do. + visitor.addUnconditionalFinalizer(&m_unconditionalFinalizer); + + if (CodeBlock* otherBlock = specialOSREntryBlockOrNull()) + visitor.appendUnbarrieredReadOnlyPointer(otherBlock); + + if (m_jitCode) + visitor.reportExtraMemoryVisited(m_jitCode->size()); + if (m_instructions.size()) + visitor.reportExtraMemoryVisited(m_instructions.size() * sizeof(Instruction) / m_instructions.refCount()); + + stronglyVisitStrongReferences(visitor); + stronglyVisitWeakReferences(visitor); + + m_allTransitionsHaveBeenMarked = false; + propagateTransitions(visitor); +} + +bool CodeBlock::shouldVisitStrongly() +{ + if (Options::forceCodeBlockLiveness()) + return true; + + if (shouldJettisonDueToOldAge()) + return false; + + // Interpreter and Baseline JIT CodeBlocks don't need to be jettisoned when + // their weak references go stale. So if a basline JIT CodeBlock gets + // scanned, we can assume that this means that it's live. + if (!JITCode::isOptimizingJIT(jitType())) + return true; + + return false; +} + +bool CodeBlock::shouldJettisonDueToWeakReference() +{ + if (!JITCode::isOptimizingJIT(jitType())) + return false; + return !Heap::isMarked(this); +} + +bool CodeBlock::shouldJettisonDueToOldAge() +{ + return false; +} + +#if ENABLE(DFG_JIT) +static bool shouldMarkTransition(DFG::WeakReferenceTransition& transition) +{ + if (transition.m_codeOrigin && !Heap::isMarked(transition.m_codeOrigin.get())) + return false; + + if (!Heap::isMarked(transition.m_from.get())) + return false; + + return true; +} +#endif // ENABLE(DFG_JIT) + +void CodeBlock::propagateTransitions(SlotVisitor& visitor) { UNUSED_PARAM(visitor); + + if (m_allTransitionsHaveBeenMarked) + return; + + bool allAreMarkedSoFar = true; + + Interpreter* interpreter = m_vm->interpreter; + if (jitType() == JITCode::InterpreterThunk) { + const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions(); + for (size_t i = 0; i < propertyAccessInstructions.size(); ++i) { + Instruction* instruction = &instructions()[propertyAccessInstructions[i]]; + switch (interpreter->getOpcodeID(instruction[0].u.opcode)) { + case op_put_by_id: { + StructureID oldStructureID = instruction[4].u.structureID; + StructureID newStructureID = instruction[6].u.structureID; + if (!oldStructureID || !newStructureID) + break; + Structure* oldStructure = + m_vm->heap.structureIDTable().get(oldStructureID); + Structure* newStructure = + m_vm->heap.structureIDTable().get(newStructureID); + if (Heap::isMarked(oldStructure)) + visitor.appendUnbarrieredReadOnlyPointer(newStructure); + else + allAreMarkedSoFar = false; + break; + } + default: + break; + } + } + } + +#if ENABLE(JIT) + if (JITCode::isJIT(jitType())) { + for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) { + StructureStubInfo& stubInfo = **iter; + if (stubInfo.cacheType != CacheType::Stub) + continue; + PolymorphicAccess* list = stubInfo.u.stub; + JSCell* origin = stubInfo.codeOrigin.codeOriginOwner(); + if (origin && !Heap::isMarked(origin)) { + allAreMarkedSoFar = false; + continue; + } + for (unsigned j = list->size(); j--;) { + const AccessCase& access = list->at(j); + if (access.type() != AccessCase::Transition) + continue; + if (Heap::isMarked(access.structure())) + visitor.appendUnbarrieredReadOnlyPointer(access.newStructure()); + else + allAreMarkedSoFar = false; + } + } + } +#endif // ENABLE(JIT) #if ENABLE(DFG_JIT) - // Evaluate our weak reference transitions, if there are still some to evaluate. - if (!m_dfgData->allTransitionsHaveBeenMarked) { - bool allAreMarkedSoFar = true; - for (unsigned i = 0; i < m_dfgData->transitions.size(); ++i) { - if ((!m_dfgData->transitions[i].m_codeOrigin - || Heap::isMarked(m_dfgData->transitions[i].m_codeOrigin.get())) - && Heap::isMarked(m_dfgData->transitions[i].m_from.get())) { + if (JITCode::isOptimizingJIT(jitType())) { + DFG::CommonData* dfgCommon = m_jitCode->dfgCommon(); + + for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) { + if (shouldMarkTransition(dfgCommon->transitions[i])) { // If the following three things are live, then the target of the // transition is also live: + // // - This code block. We know it's live already because otherwise // we wouldn't be scanning ourselves. + // // - The code origin of the transition. Transitions may arise from // code that was inlined. They are not relevant if the user's // object that is required for the inlinee to run is no longer // live. + // // - The source of the transition. The transition checks if some // heap location holds the source, and if so, stores the target. // Hence the source must be live for the transition to be live. - visitor.append(&m_dfgData->transitions[i].m_to); + // + // We also short-circuit the liveness if the structure is harmless + // to mark (i.e. its global object and prototype are both already + // live). + + visitor.append(&dfgCommon->transitions[i].m_to); } else allAreMarkedSoFar = false; } - - if (allAreMarkedSoFar) - m_dfgData->allTransitionsHaveBeenMarked = true; } +#endif // ENABLE(DFG_JIT) + if (allAreMarkedSoFar) + m_allTransitionsHaveBeenMarked = true; +} + +void CodeBlock::determineLiveness(SlotVisitor& visitor) +{ + UNUSED_PARAM(visitor); + +#if ENABLE(DFG_JIT) // Check if we have any remaining work to do. - if (m_dfgData->livenessHasBeenProved) + DFG::CommonData* dfgCommon = m_jitCode->dfgCommon(); + if (dfgCommon->livenessHasBeenProved) return; // Now check all of our weak references. If all of them are live, then we // have proved liveness and so we scan our strong references. If at end of // GC we still have not proved liveness, then this code block is toast. bool allAreLiveSoFar = true; - for (unsigned i = 0; i < m_dfgData->weakReferences.size(); ++i) { - if (!Heap::isMarked(m_dfgData->weakReferences[i].get())) { + for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) { + if (!Heap::isMarked(dfgCommon->weakReferences[i].get())) { allAreLiveSoFar = false; break; } } + if (allAreLiveSoFar) { + for (unsigned i = 0; i < dfgCommon->weakStructureReferences.size(); ++i) { + if (!Heap::isMarked(dfgCommon->weakStructureReferences[i].get())) { + allAreLiveSoFar = false; + break; + } + } + } // If some weak references are dead, then this fixpoint iteration was // unsuccessful. @@ -2201,217 +2687,268 @@ void CodeBlock::performTracingFixpointIteration(SlotVisitor& visitor) // All weak references are live. Record this information so we don't // come back here again, and scan the strong references. - m_dfgData->livenessHasBeenProved = true; - stronglyVisitStrongReferences(visitor); + dfgCommon->livenessHasBeenProved = true; + visitor.appendUnbarrieredReadOnlyPointer(this); #endif // ENABLE(DFG_JIT) } -void CodeBlock::visitWeakReferences(SlotVisitor& visitor) +void CodeBlock::WeakReferenceHarvester::visitWeakReferences(SlotVisitor& visitor) { - performTracingFixpointIteration(visitor); + CodeBlock* codeBlock = + bitwise_cast<CodeBlock*>( + bitwise_cast<char*>(this) - OBJECT_OFFSETOF(CodeBlock, m_weakReferenceHarvester)); + + codeBlock->propagateTransitions(visitor); + codeBlock->determineLiveness(visitor); } -#if ENABLE(JIT_VERBOSE_OSR) -static const bool verboseUnlinking = true; -#else -static const bool verboseUnlinking = false; +void CodeBlock::finalizeLLIntInlineCaches() +{ +#if ENABLE(WEBASSEMBLY) + if (m_ownerExecutable->isWebAssemblyExecutable()) + return; #endif -void CodeBlock::finalizeUnconditionally() -{ -#if ENABLE(LLINT) Interpreter* interpreter = m_vm->interpreter; - if (!!numberOfInstructions()) { - const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions(); - for (size_t size = propertyAccessInstructions.size(), i = 0; i < size; ++i) { - Instruction* curInstruction = &instructions()[propertyAccessInstructions[i]]; - switch (interpreter->getOpcodeID(curInstruction[0].u.opcode)) { - case op_get_by_id: - case op_get_by_id_out_of_line: - case op_put_by_id: - case op_put_by_id_out_of_line: - if (!curInstruction[4].u.structure || Heap::isMarked(curInstruction[4].u.structure.get())) - break; - if (verboseUnlinking) - dataLogF("Clearing LLInt property access with structure %p.\n", curInstruction[4].u.structure.get()); - curInstruction[4].u.structure.clear(); - curInstruction[5].u.operand = 0; + const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions(); + for (size_t size = propertyAccessInstructions.size(), i = 0; i < size; ++i) { + Instruction* curInstruction = &instructions()[propertyAccessInstructions[i]]; + switch (interpreter->getOpcodeID(curInstruction[0].u.opcode)) { + case op_get_by_id: { + StructureID oldStructureID = curInstruction[4].u.structureID; + if (!oldStructureID || Heap::isMarked(m_vm->heap.structureIDTable().get(oldStructureID))) break; - case op_put_by_id_transition_direct: - case op_put_by_id_transition_normal: - case op_put_by_id_transition_direct_out_of_line: - case op_put_by_id_transition_normal_out_of_line: - if (Heap::isMarked(curInstruction[4].u.structure.get()) - && Heap::isMarked(curInstruction[6].u.structure.get()) - && Heap::isMarked(curInstruction[7].u.structureChain.get())) - break; - if (verboseUnlinking) { - dataLogF("Clearing LLInt put transition with structures %p -> %p, chain %p.\n", - curInstruction[4].u.structure.get(), - curInstruction[6].u.structure.get(), - curInstruction[7].u.structureChain.get()); - } - curInstruction[4].u.structure.clear(); - curInstruction[6].u.structure.clear(); - curInstruction[7].u.structureChain.clear(); - curInstruction[0].u.opcode = interpreter->getOpcode(op_put_by_id); + if (Options::verboseOSR()) + dataLogF("Clearing LLInt property access.\n"); + curInstruction[4].u.structureID = 0; + curInstruction[5].u.operand = 0; + break; + } + case op_put_by_id: { + StructureID oldStructureID = curInstruction[4].u.structureID; + StructureID newStructureID = curInstruction[6].u.structureID; + StructureChain* chain = curInstruction[7].u.structureChain.get(); + if ((!oldStructureID || Heap::isMarked(m_vm->heap.structureIDTable().get(oldStructureID))) && + (!newStructureID || Heap::isMarked(m_vm->heap.structureIDTable().get(newStructureID))) && + (!chain || Heap::isMarked(chain))) break; - case op_get_array_length: + if (Options::verboseOSR()) + dataLogF("Clearing LLInt put transition.\n"); + curInstruction[4].u.structureID = 0; + curInstruction[5].u.operand = 0; + curInstruction[6].u.structureID = 0; + curInstruction[7].u.structureChain.clear(); + break; + } + case op_get_array_length: + break; + case op_to_this: + if (!curInstruction[2].u.structure || Heap::isMarked(curInstruction[2].u.structure.get())) break; - default: - RELEASE_ASSERT_NOT_REACHED(); - } + if (Options::verboseOSR()) + dataLogF("Clearing LLInt to_this with structure %p.\n", curInstruction[2].u.structure.get()); + curInstruction[2].u.structure.clear(); + curInstruction[3].u.toThisStatus = merge( + curInstruction[3].u.toThisStatus, ToThisClearedByGC); + break; + case op_create_this: { + auto& cacheWriteBarrier = curInstruction[4].u.jsCell; + if (!cacheWriteBarrier || cacheWriteBarrier.unvalidatedGet() == JSCell::seenMultipleCalleeObjects()) + break; + JSCell* cachedFunction = cacheWriteBarrier.get(); + if (Heap::isMarked(cachedFunction)) + break; + if (Options::verboseOSR()) + dataLogF("Clearing LLInt create_this with cached callee %p.\n", cachedFunction); + cacheWriteBarrier.clear(); + break; + } + case op_resolve_scope: { + // Right now this isn't strictly necessary. Any symbol tables that this will refer to + // are for outer functions, and we refer to those functions strongly, and they refer + // to the symbol table strongly. But it's nice to be on the safe side. + WriteBarrierBase<SymbolTable>& symbolTable = curInstruction[6].u.symbolTable; + if (!symbolTable || Heap::isMarked(symbolTable.get())) + break; + if (Options::verboseOSR()) + dataLogF("Clearing dead symbolTable %p.\n", symbolTable.get()); + symbolTable.clear(); + break; } + case op_get_from_scope: + case op_put_to_scope: { + GetPutInfo getPutInfo = GetPutInfo(curInstruction[4].u.operand); + if (getPutInfo.resolveType() == GlobalVar || getPutInfo.resolveType() == GlobalVarWithVarInjectionChecks + || getPutInfo.resolveType() == LocalClosureVar || getPutInfo.resolveType() == GlobalLexicalVar || getPutInfo.resolveType() == GlobalLexicalVarWithVarInjectionChecks) + continue; + WriteBarrierBase<Structure>& structure = curInstruction[5].u.structure; + if (!structure || Heap::isMarked(structure.get())) + break; + if (Options::verboseOSR()) + dataLogF("Clearing scope access with structure %p.\n", structure.get()); + structure.clear(); + break; + } + default: + OpcodeID opcodeID = interpreter->getOpcodeID(curInstruction[0].u.opcode); + ASSERT_WITH_MESSAGE_UNUSED(opcodeID, false, "Unhandled opcode in CodeBlock::finalizeUnconditionally, %s(%d) at bc %u", opcodeNames[opcodeID], opcodeID, propertyAccessInstructions[i]); + } + } - for (unsigned i = 0; i < m_llintCallLinkInfos.size(); ++i) { - if (m_llintCallLinkInfos[i].isLinked() && !Heap::isMarked(m_llintCallLinkInfos[i].callee.get())) { - if (verboseUnlinking) - dataLog("Clearing LLInt call from ", *this, "\n"); - m_llintCallLinkInfos[i].unlink(); - } - if (!!m_llintCallLinkInfos[i].lastSeenCallee && !Heap::isMarked(m_llintCallLinkInfos[i].lastSeenCallee.get())) - m_llintCallLinkInfos[i].lastSeenCallee.clear(); + for (unsigned i = 0; i < m_llintCallLinkInfos.size(); ++i) { + if (m_llintCallLinkInfos[i].isLinked() && !Heap::isMarked(m_llintCallLinkInfos[i].callee.get())) { + if (Options::verboseOSR()) + dataLog("Clearing LLInt call from ", *this, "\n"); + m_llintCallLinkInfos[i].unlink(); } + if (!!m_llintCallLinkInfos[i].lastSeenCallee && !Heap::isMarked(m_llintCallLinkInfos[i].lastSeenCallee.get())) + m_llintCallLinkInfos[i].lastSeenCallee.clear(); } -#endif // ENABLE(LLINT) +} -#if ENABLE(DFG_JIT) - // Check if we're not live. If we are, then jettison. - if (!(shouldImmediatelyAssumeLivenessDuringScan() || m_dfgData->livenessHasBeenProved)) { - if (verboseUnlinking) - dataLog(*this, " has dead weak references, jettisoning during GC.\n"); +void CodeBlock::finalizeBaselineJITInlineCaches() +{ +#if ENABLE(JIT) + for (auto iter = callLinkInfosBegin(); !!iter; ++iter) + (*iter)->visitWeak(*vm()); - if (DFG::shouldShowDisassembly()) { - dataLog(*this, " will be jettisoned because of the following dead references:\n"); - for (unsigned i = 0; i < m_dfgData->transitions.size(); ++i) { - WeakReferenceTransition& transition = m_dfgData->transitions[i]; - JSCell* origin = transition.m_codeOrigin.get(); - JSCell* from = transition.m_from.get(); - JSCell* to = transition.m_to.get(); - if ((!origin || Heap::isMarked(origin)) && Heap::isMarked(from)) - continue; - dataLog(" Transition under ", JSValue(origin), ", ", JSValue(from), " -> ", JSValue(to), ".\n"); - } - for (unsigned i = 0; i < m_dfgData->weakReferences.size(); ++i) { - JSCell* weak = m_dfgData->weakReferences[i].get(); - if (Heap::isMarked(weak)) - continue; - dataLog(" Weak reference ", JSValue(weak), ".\n"); - } - } - - jettison(); + for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) { + StructureStubInfo& stubInfo = **iter; + stubInfo.visitWeakReferences(this); + } +#endif +} + +void CodeBlock::UnconditionalFinalizer::finalizeUnconditionally() +{ + CodeBlock* codeBlock = bitwise_cast<CodeBlock*>( + bitwise_cast<char*>(this) - OBJECT_OFFSETOF(CodeBlock, m_unconditionalFinalizer)); + +#if ENABLE(DFG_JIT) + if (codeBlock->shouldJettisonDueToWeakReference()) { + codeBlock->jettison(Profiler::JettisonDueToWeakReference); return; } #endif // ENABLE(DFG_JIT) - for (size_t size = m_putToBaseOperations.size(), i = 0; i < size; ++i) { - if (m_putToBaseOperations[i].m_structure && !Heap::isMarked(m_putToBaseOperations[i].m_structure.get())) { - if (verboseUnlinking) - dataLog("Clearing putToBase info in ", *this, "\n"); - m_putToBaseOperations[i].m_structure.clear(); - } + if (codeBlock->shouldJettisonDueToOldAge()) { + codeBlock->jettison(Profiler::JettisonDueToOldAge); + return; } - for (size_t size = m_resolveOperations.size(), i = 0; i < size; ++i) { - if (m_resolveOperations[i].isEmpty()) - continue; -#ifndef NDEBUG - for (size_t insnSize = m_resolveOperations[i].size() - 1, k = 0; k < insnSize; ++k) - ASSERT(!m_resolveOperations[i][k].m_structure); + + if (JITCode::couldBeInterpreted(codeBlock->jitType())) + codeBlock->finalizeLLIntInlineCaches(); + +#if ENABLE(JIT) + if (!!codeBlock->jitCode()) + codeBlock->finalizeBaselineJITInlineCaches(); #endif - m_resolveOperations[i].last().m_structure.clear(); - if (m_resolveOperations[i].last().m_structure && !Heap::isMarked(m_resolveOperations[i].last().m_structure.get())) { - if (verboseUnlinking) - dataLog("Clearing resolve info in ", *this, "\n"); - m_resolveOperations[i].last().m_structure.clear(); - } - } +} +void CodeBlock::getStubInfoMap(const ConcurrentJITLocker&, StubInfoMap& result) +{ #if ENABLE(JIT) - // Handle inline caches. - if (!!getJITCode()) { - RepatchBuffer repatchBuffer(this); - for (unsigned i = 0; i < numberOfCallLinkInfos(); ++i) { - if (callLinkInfo(i).isLinked()) { - if (ClosureCallStubRoutine* stub = callLinkInfo(i).stub.get()) { - if (!Heap::isMarked(stub->structure()) - || !Heap::isMarked(stub->executable())) { - if (verboseUnlinking) { - dataLog( - "Clearing closure call from ", *this, " to ", - stub->executable()->hashFor(callLinkInfo(i).specializationKind()), - ", stub routine ", RawPointer(stub), ".\n"); - } - callLinkInfo(i).unlink(*m_vm, repatchBuffer); - } - } else if (!Heap::isMarked(callLinkInfo(i).callee.get())) { - if (verboseUnlinking) { - dataLog( - "Clearing call from ", *this, " to ", - RawPointer(callLinkInfo(i).callee.get()), " (", - callLinkInfo(i).callee.get()->executable()->hashFor( - callLinkInfo(i).specializationKind()), - ").\n"); - } - callLinkInfo(i).unlink(*m_vm, repatchBuffer); - } - } - if (!!callLinkInfo(i).lastSeenCallee - && !Heap::isMarked(callLinkInfo(i).lastSeenCallee.get())) - callLinkInfo(i).lastSeenCallee.clear(); - } - for (size_t size = m_structureStubInfos.size(), i = 0; i < size; ++i) { - StructureStubInfo& stubInfo = m_structureStubInfos[i]; - - if (stubInfo.visitWeakReferences()) - continue; - - resetStubDuringGCInternal(repatchBuffer, stubInfo); - } - } + toHashMap(m_stubInfos, getStructureStubInfoCodeOrigin, result); +#else + UNUSED_PARAM(result); #endif } +void CodeBlock::getStubInfoMap(StubInfoMap& result) +{ + ConcurrentJITLocker locker(m_lock); + getStubInfoMap(locker, result); +} + +void CodeBlock::getCallLinkInfoMap(const ConcurrentJITLocker&, CallLinkInfoMap& result) +{ #if ENABLE(JIT) -void CodeBlock::resetStub(StructureStubInfo& stubInfo) + toHashMap(m_callLinkInfos, getCallLinkInfoCodeOrigin, result); +#else + UNUSED_PARAM(result); +#endif +} + +void CodeBlock::getCallLinkInfoMap(CallLinkInfoMap& result) { - if (stubInfo.accessType == access_unset) - return; - - RepatchBuffer repatchBuffer(this); - resetStubInternal(repatchBuffer, stubInfo); + ConcurrentJITLocker locker(m_lock); + getCallLinkInfoMap(locker, result); } -void CodeBlock::resetStubInternal(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo) +void CodeBlock::getByValInfoMap(const ConcurrentJITLocker&, ByValInfoMap& result) { - AccessType accessType = static_cast<AccessType>(stubInfo.accessType); - - if (verboseUnlinking) - dataLog("Clearing structure cache (kind ", static_cast<int>(stubInfo.accessType), ") in ", *this, ".\n"); - - if (isGetByIdAccess(accessType)) { - if (getJITCode().jitType() == JITCode::DFGJIT) - DFG::dfgResetGetByID(repatchBuffer, stubInfo); - else - JIT::resetPatchGetById(repatchBuffer, &stubInfo); - } else { - ASSERT(isPutByIdAccess(accessType)); - if (getJITCode().jitType() == JITCode::DFGJIT) - DFG::dfgResetPutByID(repatchBuffer, stubInfo); - else - JIT::resetPatchPutById(repatchBuffer, &stubInfo); +#if ENABLE(JIT) + for (auto* byValInfo : m_byValInfos) + result.add(CodeOrigin(byValInfo->bytecodeIndex), byValInfo); +#else + UNUSED_PARAM(result); +#endif +} + +void CodeBlock::getByValInfoMap(ByValInfoMap& result) +{ + ConcurrentJITLocker locker(m_lock); + getByValInfoMap(locker, result); +} + +#if ENABLE(JIT) +StructureStubInfo* CodeBlock::addStubInfo(AccessType accessType) +{ + ConcurrentJITLocker locker(m_lock); + return m_stubInfos.add(accessType); +} + +StructureStubInfo* CodeBlock::findStubInfo(CodeOrigin codeOrigin) +{ + for (StructureStubInfo* stubInfo : m_stubInfos) { + if (stubInfo->codeOrigin == codeOrigin) + return stubInfo; } - - stubInfo.reset(); + return nullptr; +} + +ByValInfo* CodeBlock::addByValInfo() +{ + ConcurrentJITLocker locker(m_lock); + return m_byValInfos.add(); +} + +CallLinkInfo* CodeBlock::addCallLinkInfo() +{ + ConcurrentJITLocker locker(m_lock); + return m_callLinkInfos.add(); } -void CodeBlock::resetStubDuringGCInternal(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo) +CallLinkInfo* CodeBlock::getCallLinkInfoForBytecodeIndex(unsigned index) { - resetStubInternal(repatchBuffer, stubInfo); - stubInfo.resetByGC = true; + for (auto iter = m_callLinkInfos.begin(); !!iter; ++iter) { + if ((*iter)->codeOrigin() == CodeOrigin(index)) + return *iter; + } + return nullptr; } #endif +void CodeBlock::visitOSRExitTargets(SlotVisitor& visitor) +{ + // We strongly visit OSR exits targets because we don't want to deal with + // the complexity of generating an exit target CodeBlock on demand and + // guaranteeing that it matches the details of the CodeBlock we compiled + // the OSR exit against. + + visitor.append(&m_alternative); + +#if ENABLE(DFG_JIT) + DFG::CommonData* dfgCommon = m_jitCode->dfgCommon(); + if (dfgCommon->inlineCallFrames) { + for (auto* inlineCallFrame : *dfgCommon->inlineCallFrames) { + ASSERT(inlineCallFrame->baselineCodeBlock); + visitor.append(&inlineCallFrame->baselineCodeBlock); + } + } +#endif +} + void CodeBlock::stronglyVisitStrongReferences(SlotVisitor& visitor) { visitor.append(&m_globalObject); @@ -2427,7 +2964,12 @@ void CodeBlock::stronglyVisitStrongReferences(SlotVisitor& visitor) for (unsigned i = 0; i < m_objectAllocationProfiles.size(); ++i) m_objectAllocationProfiles[i].visitAggregate(visitor); - updateAllPredictions(Collection); +#if ENABLE(DFG_JIT) + if (JITCode::isOptimizingJIT(jitType())) + visitOSRExitTargets(visitor); +#endif + + updateAllPredictions(); } void CodeBlock::stronglyVisitWeakReferences(SlotVisitor& visitor) @@ -2435,43 +2977,137 @@ void CodeBlock::stronglyVisitWeakReferences(SlotVisitor& visitor) UNUSED_PARAM(visitor); #if ENABLE(DFG_JIT) - if (!m_dfgData) + if (!JITCode::isOptimizingJIT(jitType())) return; + + DFG::CommonData* dfgCommon = m_jitCode->dfgCommon(); - for (unsigned i = 0; i < m_dfgData->transitions.size(); ++i) { - if (!!m_dfgData->transitions[i].m_codeOrigin) - visitor.append(&m_dfgData->transitions[i].m_codeOrigin); // Almost certainly not necessary, since the code origin should also be a weak reference. Better to be safe, though. - visitor.append(&m_dfgData->transitions[i].m_from); - visitor.append(&m_dfgData->transitions[i].m_to); + for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) { + if (!!dfgCommon->transitions[i].m_codeOrigin) + visitor.append(&dfgCommon->transitions[i].m_codeOrigin); // Almost certainly not necessary, since the code origin should also be a weak reference. Better to be safe, though. + visitor.append(&dfgCommon->transitions[i].m_from); + visitor.append(&dfgCommon->transitions[i].m_to); } - for (unsigned i = 0; i < m_dfgData->weakReferences.size(); ++i) - visitor.append(&m_dfgData->weakReferences[i]); + for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) + visitor.append(&dfgCommon->weakReferences[i]); + + for (unsigned i = 0; i < dfgCommon->weakStructureReferences.size(); ++i) + visitor.append(&dfgCommon->weakStructureReferences[i]); + + dfgCommon->livenessHasBeenProved = true; #endif } -HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset) +CodeBlock* CodeBlock::baselineAlternative() +{ +#if ENABLE(JIT) + CodeBlock* result = this; + while (result->alternative()) + result = result->alternative(); + RELEASE_ASSERT(result); + RELEASE_ASSERT(JITCode::isBaselineCode(result->jitType()) || result->jitType() == JITCode::None); + return result; +#else + return this; +#endif +} + +CodeBlock* CodeBlock::baselineVersion() +{ +#if ENABLE(JIT) + if (JITCode::isBaselineCode(jitType())) + return this; + CodeBlock* result = replacement(); + if (!result) { + // This can happen if we're creating the original CodeBlock for an executable. + // Assume that we're the baseline CodeBlock. + RELEASE_ASSERT(jitType() == JITCode::None); + return this; + } + result = result->baselineAlternative(); + return result; +#else + return this; +#endif +} + +#if ENABLE(JIT) +bool CodeBlock::hasOptimizedReplacement(JITCode::JITType typeToReplace) +{ + return JITCode::isHigherTier(replacement()->jitType(), typeToReplace); +} + +bool CodeBlock::hasOptimizedReplacement() +{ + return hasOptimizedReplacement(jitType()); +} +#endif + +HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler requiredHandler) { RELEASE_ASSERT(bytecodeOffset < instructions().size()); + return handlerForIndex(bytecodeOffset, requiredHandler); +} +HandlerInfo* CodeBlock::handlerForIndex(unsigned index, RequiredHandler requiredHandler) +{ if (!m_rareData) return 0; Vector<HandlerInfo>& exceptionHandlers = m_rareData->m_exceptionHandlers; for (size_t i = 0; i < exceptionHandlers.size(); ++i) { + HandlerInfo& handler = exceptionHandlers[i]; + if ((requiredHandler == RequiredHandler::CatchHandler) && !handler.isCatchHandler()) + continue; + // Handlers are ordered innermost first, so the first handler we encounter // that contains the source address is the correct handler to use. - if (exceptionHandlers[i].start <= bytecodeOffset && exceptionHandlers[i].end > bytecodeOffset) - return &exceptionHandlers[i]; + // This index used is either the BytecodeOffset or a CallSiteIndex. + if (handler.start <= index && handler.end > index) + return &handler; } return 0; } +CallSiteIndex CodeBlock::newExceptionHandlingCallSiteIndex(CallSiteIndex originalCallSite) +{ +#if ENABLE(DFG_JIT) + RELEASE_ASSERT(JITCode::isOptimizingJIT(jitType())); + RELEASE_ASSERT(canGetCodeOrigin(originalCallSite)); + ASSERT(!!handlerForIndex(originalCallSite.bits())); + CodeOrigin originalOrigin = codeOrigin(originalCallSite); + return m_jitCode->dfgCommon()->addUniqueCallSiteIndex(originalOrigin); +#else + // We never create new on-the-fly exception handling + // call sites outside the DFG/FTL inline caches. + UNUSED_PARAM(originalCallSite); + RELEASE_ASSERT_NOT_REACHED(); + return CallSiteIndex(0u); +#endif +} + +void CodeBlock::removeExceptionHandlerForCallSite(CallSiteIndex callSiteIndex) +{ + RELEASE_ASSERT(m_rareData); + Vector<HandlerInfo>& exceptionHandlers = m_rareData->m_exceptionHandlers; + unsigned index = callSiteIndex.bits(); + for (size_t i = 0; i < exceptionHandlers.size(); ++i) { + HandlerInfo& handler = exceptionHandlers[i]; + if (handler.start <= index && handler.end > index) { + exceptionHandlers.remove(i); + return; + } + } + + RELEASE_ASSERT_NOT_REACHED(); +} + unsigned CodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset) { RELEASE_ASSERT(bytecodeOffset < instructions().size()); - return m_ownerExecutable->lineNo() + m_unlinkedCode->lineNumberForBytecodeOffset(bytecodeOffset); + return ownerScriptExecutable()->firstLine() + m_unlinkedCode->lineNumberForBytecodeOffset(bytecodeOffset); } unsigned CodeBlock::columnNumberForBytecodeOffset(unsigned bytecodeOffset) @@ -2490,452 +3126,409 @@ void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& d m_unlinkedCode->expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column); divot += m_sourceOffset; column += line ? 1 : firstLineColumnOffset(); - line += m_ownerExecutable->lineNo(); + line += ownerScriptExecutable()->firstLine(); +} + +bool CodeBlock::hasOpDebugForLineAndColumn(unsigned line, unsigned column) +{ + Interpreter* interpreter = vm()->interpreter; + const Instruction* begin = instructions().begin(); + const Instruction* end = instructions().end(); + for (const Instruction* it = begin; it != end;) { + OpcodeID opcodeID = interpreter->getOpcodeID(it->u.opcode); + if (opcodeID == op_debug) { + unsigned bytecodeOffset = it - begin; + int unused; + unsigned opDebugLine; + unsigned opDebugColumn; + expressionRangeForBytecodeOffset(bytecodeOffset, unused, unused, unused, opDebugLine, opDebugColumn); + if (line == opDebugLine && (column == Breakpoint::unspecifiedColumn || column == opDebugColumn)) + return true; + } + it += opcodeLengths[opcodeID]; + } + return false; } void CodeBlock::shrinkToFit(ShrinkMode shrinkMode) { -#if ENABLE(LLINT) - m_llintCallLinkInfos.shrinkToFit(); -#endif -#if ENABLE(JIT) - m_structureStubInfos.shrinkToFit(); - m_callLinkInfos.shrinkToFit(); -#endif -#if ENABLE(VALUE_PROFILER) m_rareCaseProfiles.shrinkToFit(); - m_specialFastCaseProfiles.shrinkToFit(); -#endif + m_resultProfiles.shrinkToFit(); if (shrinkMode == EarlyShrink) { - m_identifiers.shrinkToFit(); - m_functionDecls.shrinkToFit(); - m_functionExprs.shrinkToFit(); m_constantRegisters.shrinkToFit(); + m_constantsSourceCodeRepresentation.shrinkToFit(); + + if (m_rareData) { + m_rareData->m_switchJumpTables.shrinkToFit(); + m_rareData->m_stringSwitchJumpTables.shrinkToFit(); + m_rareData->m_liveCalleeLocalsAtYield.shrinkToFit(); + } } // else don't shrink these, because we would have already pointed pointers into these tables. - - if (m_rareData) { - m_rareData->m_exceptionHandlers.shrinkToFit(); - m_rareData->m_immediateSwitchJumpTables.shrinkToFit(); - m_rareData->m_characterSwitchJumpTables.shrinkToFit(); - m_rareData->m_stringSwitchJumpTables.shrinkToFit(); -#if ENABLE(JIT) - m_rareData->m_callReturnIndexVector.shrinkToFit(); -#endif -#if ENABLE(DFG_JIT) - m_rareData->m_inlineCallFrames.shrinkToFit(); - m_rareData->m_codeOrigins.shrinkToFit(); -#endif - } - -#if ENABLE(DFG_JIT) - if (m_dfgData) { - m_dfgData->osrEntry.shrinkToFit(); - m_dfgData->osrExit.shrinkToFit(); - m_dfgData->speculationRecovery.shrinkToFit(); - m_dfgData->weakReferences.shrinkToFit(); - m_dfgData->transitions.shrinkToFit(); - m_dfgData->minifiedDFG.prepareAndShrink(); - m_dfgData->variableEventStream.shrinkToFit(); - } -#endif } -void CodeBlock::createActivation(CallFrame* callFrame) -{ - ASSERT(codeType() == FunctionCode); - ASSERT(needsFullScopeChain()); - ASSERT(!callFrame->uncheckedR(activationRegister()).jsValue()); - JSActivation* activation = JSActivation::create(callFrame->vm(), callFrame, this); - callFrame->uncheckedR(activationRegister()) = JSValue(activation); - callFrame->setScope(activation); -} - -unsigned CodeBlock::addOrFindConstant(JSValue v) +#if ENABLE(JIT) +void CodeBlock::linkIncomingCall(ExecState* callerFrame, CallLinkInfo* incoming) { - unsigned numberOfConstants = numberOfConstantRegisters(); - for (unsigned i = 0; i < numberOfConstants; ++i) { - if (getConstant(FirstConstantRegisterIndex + i) == v) - return i; - } - return addConstant(v); + noticeIncomingCall(callerFrame); + m_incomingCalls.push(incoming); } -#if ENABLE(JIT) -void CodeBlock::unlinkCalls() +void CodeBlock::linkIncomingPolymorphicCall(ExecState* callerFrame, PolymorphicCallNode* incoming) { - if (!!m_alternative) - m_alternative->unlinkCalls(); -#if ENABLE(LLINT) - for (size_t i = 0; i < m_llintCallLinkInfos.size(); ++i) { - if (m_llintCallLinkInfos[i].isLinked()) - m_llintCallLinkInfos[i].unlink(); - } -#endif - if (!m_callLinkInfos.size()) - return; - if (!m_vm->canUseJIT()) - return; - RepatchBuffer repatchBuffer(this); - for (size_t i = 0; i < m_callLinkInfos.size(); i++) { - if (!m_callLinkInfos[i].isLinked()) - continue; - m_callLinkInfos[i].unlink(*m_vm, repatchBuffer); - } + noticeIncomingCall(callerFrame); + m_incomingPolymorphicCalls.push(incoming); } +#endif // ENABLE(JIT) void CodeBlock::unlinkIncomingCalls() { -#if ENABLE(LLINT) while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end()) m_incomingLLIntCalls.begin()->unlink(); -#endif - if (m_incomingCalls.isEmpty()) - return; - RepatchBuffer repatchBuffer(this); +#if ENABLE(JIT) while (m_incomingCalls.begin() != m_incomingCalls.end()) - m_incomingCalls.begin()->unlink(*m_vm, repatchBuffer); -} + m_incomingCalls.begin()->unlink(*vm()); + while (m_incomingPolymorphicCalls.begin() != m_incomingPolymorphicCalls.end()) + m_incomingPolymorphicCalls.begin()->unlink(*vm()); #endif // ENABLE(JIT) +} -#if ENABLE(LLINT) -Instruction* CodeBlock::adjustPCIfAtCallSite(Instruction* potentialReturnPC) +void CodeBlock::linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo* incoming) { - ASSERT(potentialReturnPC); - - unsigned returnPCOffset = potentialReturnPC - instructions().begin(); - Instruction* adjustedPC; - unsigned opcodeLength; + noticeIncomingCall(callerFrame); + m_incomingLLIntCalls.push(incoming); +} - // If we are at a callsite, the LLInt stores the PC after the call - // instruction rather than the PC of the call instruction. This requires - // some correcting. If so, we can rely on the fact that the preceding - // instruction must be one of the call instructions, so either it's a - // call_varargs or it's a call, construct, or eval. - // - // If we are not at a call site, then we need to guard against the - // possibility of peeking past the start of the bytecode range for this - // codeBlock. Hence, we do a bounds check before we peek at the - // potential "preceding" instruction. - // The bounds check is done by comparing the offset of the potential - // returnPC with the length of the opcode. If there is room for a call - // instruction before the returnPC, then the offset of the returnPC must - // be greater than the size of the call opcode we're looking for. - - // The determination of the call instruction present (if we are at a - // callsite) depends on the following assumptions. So, assert that - // they are still true: - ASSERT(OPCODE_LENGTH(op_call_varargs) <= OPCODE_LENGTH(op_call)); - ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct)); - ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_eval)); - - // Check for the case of a preceeding op_call_varargs: - opcodeLength = OPCODE_LENGTH(op_call_varargs); - adjustedPC = potentialReturnPC - opcodeLength; - if ((returnPCOffset >= opcodeLength) - && (adjustedPC->u.pointer == LLInt::getCodePtr(llint_op_call_varargs))) { - return adjustedPC; - } - - // Check for the case of the other 3 call instructions: - opcodeLength = OPCODE_LENGTH(op_call); - adjustedPC = potentialReturnPC - opcodeLength; - if ((returnPCOffset >= opcodeLength) - && (adjustedPC->u.pointer == LLInt::getCodePtr(llint_op_call) - || adjustedPC->u.pointer == LLInt::getCodePtr(llint_op_construct) - || adjustedPC->u.pointer == LLInt::getCodePtr(llint_op_call_eval))) { - return adjustedPC; - } - - // Not a call site. No need to adjust PC. Just return the original. - return potentialReturnPC; -} -#endif // ENABLE(LLINT) +CodeBlock* CodeBlock::newReplacement() +{ + return ownerScriptExecutable()->newReplacementCodeBlockFor(specializationKind()); +} #if ENABLE(JIT) -ClosureCallStubRoutine* CodeBlock::findClosureCallForReturnPC(ReturnAddressPtr returnAddress) +CodeBlock* CodeBlock::replacement() { - for (unsigned i = m_callLinkInfos.size(); i--;) { - CallLinkInfo& info = m_callLinkInfos[i]; - if (!info.stub) - continue; - if (!info.stub->code().executableMemory()->contains(returnAddress.value())) - continue; + const ClassInfo* classInfo = this->classInfo(); - RELEASE_ASSERT(info.stub->codeOrigin().bytecodeIndex < CodeOrigin::maximumBytecodeIndex); - return info.stub.get(); - } - - // The stub routine may have been jettisoned. This is rare, but we have to handle it. - const JITStubRoutineSet& set = m_vm->heap.jitStubRoutines(); - for (unsigned i = set.size(); i--;) { - GCAwareJITStubRoutine* genericStub = set.at(i); - if (!genericStub->isClosureCall()) - continue; - ClosureCallStubRoutine* stub = static_cast<ClosureCallStubRoutine*>(genericStub); - if (!stub->code().executableMemory()->contains(returnAddress.value())) - continue; - RELEASE_ASSERT(stub->codeOrigin().bytecodeIndex < CodeOrigin::maximumBytecodeIndex); - return stub; - } - - return 0; -} + if (classInfo == FunctionCodeBlock::info()) + return jsCast<FunctionExecutable*>(ownerExecutable())->codeBlockFor(m_isConstructor ? CodeForConstruct : CodeForCall); + + if (classInfo == EvalCodeBlock::info()) + return jsCast<EvalExecutable*>(ownerExecutable())->codeBlock(); + + if (classInfo == ProgramCodeBlock::info()) + return jsCast<ProgramExecutable*>(ownerExecutable())->codeBlock(); + + if (classInfo == ModuleProgramCodeBlock::info()) + return jsCast<ModuleProgramExecutable*>(ownerExecutable())->codeBlock(); + +#if ENABLE(WEBASSEMBLY) + if (classInfo == WebAssemblyCodeBlock::info()) + return nullptr; #endif -unsigned CodeBlock::bytecodeOffset(ExecState* exec, ReturnAddressPtr returnAddress) + RELEASE_ASSERT_NOT_REACHED(); + return nullptr; +} + +DFG::CapabilityLevel CodeBlock::computeCapabilityLevel() { - UNUSED_PARAM(exec); - UNUSED_PARAM(returnAddress); -#if ENABLE(LLINT) -#if !ENABLE(LLINT_C_LOOP) - // When using the JIT, we could have addresses that are not bytecode - // addresses. We check if the return address is in the LLint glue and - // opcode handlers range here to ensure that we are looking at bytecode - // before attempting to convert the return address into a bytecode offset. - // - // In the case of the C Loop LLInt, the JIT is disabled, and the only - // valid return addresses should be bytecode PCs. So, we can and need to - // forego this check because when we do not ENABLE(COMPUTED_GOTO_OPCODES), - // then the bytecode "PC"s are actually the opcodeIDs and are not bounded - // by llint_begin and llint_end. - if (returnAddress.value() >= LLInt::getCodePtr(llint_begin) - && returnAddress.value() <= LLInt::getCodePtr(llint_end)) -#endif - { - RELEASE_ASSERT(exec->codeBlock()); - RELEASE_ASSERT(exec->codeBlock() == this); - RELEASE_ASSERT(JITCode::isBaselineCode(getJITType())); - Instruction* instruction = exec->currentVPC(); - RELEASE_ASSERT(instruction); + const ClassInfo* classInfo = this->classInfo(); - instruction = adjustPCIfAtCallSite(instruction); - return bytecodeOffset(instruction); + if (classInfo == FunctionCodeBlock::info()) { + if (m_isConstructor) + return DFG::functionForConstructCapabilityLevel(this); + return DFG::functionForCallCapabilityLevel(this); } -#endif // !ENABLE(LLINT) -#if ENABLE(JIT) - if (!m_rareData) - return 1; - Vector<CallReturnOffsetToBytecodeOffset, 0, UnsafeVectorOverflow>& callIndices = m_rareData->m_callReturnIndexVector; - if (!callIndices.size()) - return 1; - - if (getJITCode().getExecutableMemory()->contains(returnAddress.value())) { - unsigned callReturnOffset = getJITCode().offsetOf(returnAddress.value()); - CallReturnOffsetToBytecodeOffset* result = - binarySearch<CallReturnOffsetToBytecodeOffset, unsigned>( - callIndices, callIndices.size(), callReturnOffset, getCallReturnOffset); - RELEASE_ASSERT(result->callReturnOffset == callReturnOffset); - RELEASE_ASSERT(result->bytecodeOffset < instructionCount()); - return result->bytecodeOffset; - } - ClosureCallStubRoutine* closureInfo = findClosureCallForReturnPC(returnAddress); - CodeOrigin origin = closureInfo->codeOrigin(); - while (InlineCallFrame* inlineCallFrame = origin.inlineCallFrame) { - if (inlineCallFrame->baselineCodeBlock() == this) - break; - origin = inlineCallFrame->caller; - RELEASE_ASSERT(origin.bytecodeIndex < CodeOrigin::maximumBytecodeIndex); - } - RELEASE_ASSERT(origin.bytecodeIndex < CodeOrigin::maximumBytecodeIndex); - unsigned bytecodeIndex = origin.bytecodeIndex; - RELEASE_ASSERT(bytecodeIndex < instructionCount()); - return bytecodeIndex; -#endif // ENABLE(JIT) + if (classInfo == EvalCodeBlock::info()) + return DFG::evalCapabilityLevel(this); -#if !ENABLE(LLINT) && !ENABLE(JIT) - return 1; + if (classInfo == ProgramCodeBlock::info()) + return DFG::programCapabilityLevel(this); + + if (classInfo == ModuleProgramCodeBlock::info()) + return DFG::programCapabilityLevel(this); + +#if ENABLE(WEBASSEMBLY) + if (classInfo == WebAssemblyCodeBlock::info()) + return DFG::CannotCompile; #endif + + RELEASE_ASSERT_NOT_REACHED(); + return DFG::CannotCompile; } -#if ENABLE(DFG_JIT) -bool CodeBlock::codeOriginForReturn(ReturnAddressPtr returnAddress, CodeOrigin& codeOrigin) +#endif // ENABLE(JIT) + +void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mode, const FireDetail* detail) { - if (!hasCodeOrigins()) - return false; +#if !ENABLE(DFG_JIT) + UNUSED_PARAM(mode); + UNUSED_PARAM(detail); +#endif - if (!getJITCode().getExecutableMemory()->contains(returnAddress.value())) { - ClosureCallStubRoutine* stub = findClosureCallForReturnPC(returnAddress); - ASSERT(stub); - if (!stub) - return false; - codeOrigin = stub->codeOrigin(); - return true; + RELEASE_ASSERT(reason != Profiler::NotJettisoned); + +#if ENABLE(DFG_JIT) + if (DFG::shouldDumpDisassembly()) { + dataLog("Jettisoning ", *this); + if (mode == CountReoptimization) + dataLog(" and counting reoptimization"); + dataLog(" due to ", reason); + if (detail) + dataLog(", ", *detail); + dataLog(".\n"); } - unsigned offset = getJITCode().offsetOf(returnAddress.value()); - CodeOriginAtCallReturnOffset* entry = - tryBinarySearch<CodeOriginAtCallReturnOffset, unsigned>( - codeOrigins(), codeOrigins().size(), offset, - getCallReturnOffsetForCodeOrigin); - if (!entry) - return false; - codeOrigin = entry->codeOrigin; - return true; -} + if (reason == Profiler::JettisonDueToWeakReference) { + if (DFG::shouldDumpDisassembly()) { + dataLog(*this, " will be jettisoned because of the following dead references:\n"); + DFG::CommonData* dfgCommon = m_jitCode->dfgCommon(); + for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) { + DFG::WeakReferenceTransition& transition = dfgCommon->transitions[i]; + JSCell* origin = transition.m_codeOrigin.get(); + JSCell* from = transition.m_from.get(); + JSCell* to = transition.m_to.get(); + if ((!origin || Heap::isMarked(origin)) && Heap::isMarked(from)) + continue; + dataLog(" Transition under ", RawPointer(origin), ", ", RawPointer(from), " -> ", RawPointer(to), ".\n"); + } + for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) { + JSCell* weak = dfgCommon->weakReferences[i].get(); + if (Heap::isMarked(weak)) + continue; + dataLog(" Weak reference ", RawPointer(weak), ".\n"); + } + } + } #endif // ENABLE(DFG_JIT) -void CodeBlock::clearEvalCache() -{ - if (!!m_alternative) - m_alternative->clearEvalCache(); - if (!m_rareData) + DeferGCForAWhile deferGC(*heap()); + + // We want to accomplish two things here: + // 1) Make sure that if this CodeBlock is on the stack right now, then if we return to it + // we should OSR exit at the top of the next bytecode instruction after the return. + // 2) Make sure that if we call the owner executable, then we shouldn't call this CodeBlock. + +#if ENABLE(DFG_JIT) + if (reason != Profiler::JettisonDueToOldAge) { + if (Profiler::Compilation* compilation = jitCode()->dfgCommon()->compilation.get()) + compilation->setJettisonReason(reason, detail); + + // This accomplishes (1), and does its own book-keeping about whether it has already happened. + if (!jitCode()->dfgCommon()->invalidate()) { + // We've already been invalidated. + RELEASE_ASSERT(this != replacement()); + return; + } + } + + if (DFG::shouldDumpDisassembly()) + dataLog(" Did invalidate ", *this, "\n"); + + // Count the reoptimization if that's what the user wanted. + if (mode == CountReoptimization) { + // FIXME: Maybe this should call alternative(). + // https://bugs.webkit.org/show_bug.cgi?id=123677 + baselineAlternative()->countReoptimization(); + if (DFG::shouldDumpDisassembly()) + dataLog(" Did count reoptimization for ", *this, "\n"); + } + + if (this != replacement()) { + // This means that we were never the entrypoint. This can happen for OSR entry code + // blocks. return; - m_rareData->m_evalCodeCache.clear(); -} + } -template<typename T, size_t inlineCapacity, typename U, typename V> -inline void replaceExistingEntries(Vector<T, inlineCapacity, U>& target, Vector<T, inlineCapacity, V>& source) -{ - ASSERT(target.size() <= source.size()); - for (size_t i = 0; i < target.size(); ++i) - target[i] = source[i]; -} + if (alternative()) + alternative()->optimizeAfterWarmUp(); -void CodeBlock::copyPostParseDataFrom(CodeBlock* alternative) -{ - if (!alternative) - return; - - replaceExistingEntries(m_constantRegisters, alternative->m_constantRegisters); - replaceExistingEntries(m_functionDecls, alternative->m_functionDecls); - replaceExistingEntries(m_functionExprs, alternative->m_functionExprs); - if (!!m_rareData && !!alternative->m_rareData) - replaceExistingEntries(m_rareData->m_constantBuffers, alternative->m_rareData->m_constantBuffers); -} + if (reason != Profiler::JettisonDueToOldAge) + tallyFrequentExitSites(); +#endif // ENABLE(DFG_JIT) -void CodeBlock::copyPostParseDataFromAlternative() -{ - copyPostParseDataFrom(m_alternative.get()); -} + // This accomplishes (2). + ownerScriptExecutable()->installCode( + m_globalObject->vm(), alternative(), codeType(), specializationKind()); -#if ENABLE(JIT) -void CodeBlock::reoptimize() -{ - ASSERT(replacement() != this); - ASSERT(replacement()->alternative() == this); - if (DFG::shouldShowDisassembly()) - dataLog(*replacement(), " will be jettisoned due to reoptimization of ", *this, ".\n"); - replacement()->jettison(); - countReoptimization(); +#if ENABLE(DFG_JIT) + if (DFG::shouldDumpDisassembly()) + dataLog(" Did install baseline version of ", *this, "\n"); +#endif // ENABLE(DFG_JIT) } -CodeBlock* ProgramCodeBlock::replacement() +JSGlobalObject* CodeBlock::globalObjectFor(CodeOrigin codeOrigin) { - return &static_cast<ProgramExecutable*>(ownerExecutable())->generatedBytecode(); + if (!codeOrigin.inlineCallFrame) + return globalObject(); + return codeOrigin.inlineCallFrame->baselineCodeBlock->globalObject(); } -CodeBlock* EvalCodeBlock::replacement() -{ - return &static_cast<EvalExecutable*>(ownerExecutable())->generatedBytecode(); -} +class RecursionCheckFunctor { +public: + RecursionCheckFunctor(CallFrame* startCallFrame, CodeBlock* codeBlock, unsigned depthToCheck) + : m_startCallFrame(startCallFrame) + , m_codeBlock(codeBlock) + , m_depthToCheck(depthToCheck) + , m_foundStartCallFrame(false) + , m_didRecurse(false) + { } + + StackVisitor::Status operator()(StackVisitor& visitor) + { + CallFrame* currentCallFrame = visitor->callFrame(); -CodeBlock* FunctionCodeBlock::replacement() -{ - return &static_cast<FunctionExecutable*>(ownerExecutable())->generatedBytecodeFor(m_isConstructor ? CodeForConstruct : CodeForCall); -} + if (currentCallFrame == m_startCallFrame) + m_foundStartCallFrame = true; -JSObject* ProgramCodeBlock::compileOptimized(ExecState* exec, JSScope* scope, unsigned bytecodeIndex) -{ - if (replacement()->getJITType() == JITCode::nextTierJIT(getJITType())) - return 0; - JSObject* error = static_cast<ProgramExecutable*>(ownerExecutable())->compileOptimized(exec, scope, bytecodeIndex); - return error; -} + if (m_foundStartCallFrame) { + if (visitor->callFrame()->codeBlock() == m_codeBlock) { + m_didRecurse = true; + return StackVisitor::Done; + } -JSObject* EvalCodeBlock::compileOptimized(ExecState* exec, JSScope* scope, unsigned bytecodeIndex) -{ - if (replacement()->getJITType() == JITCode::nextTierJIT(getJITType())) - return 0; - JSObject* error = static_cast<EvalExecutable*>(ownerExecutable())->compileOptimized(exec, scope, bytecodeIndex); - return error; -} + if (!m_depthToCheck--) + return StackVisitor::Done; + } -JSObject* FunctionCodeBlock::compileOptimized(ExecState* exec, JSScope* scope, unsigned bytecodeIndex) -{ - if (replacement()->getJITType() == JITCode::nextTierJIT(getJITType())) - return 0; - JSObject* error = static_cast<FunctionExecutable*>(ownerExecutable())->compileOptimizedFor(exec, scope, bytecodeIndex, m_isConstructor ? CodeForConstruct : CodeForCall); - return error; -} + return StackVisitor::Continue; + } -DFG::CapabilityLevel ProgramCodeBlock::canCompileWithDFGInternal() -{ - return DFG::canCompileProgram(this); -} + bool didRecurse() const { return m_didRecurse; } -DFG::CapabilityLevel EvalCodeBlock::canCompileWithDFGInternal() -{ - return DFG::canCompileEval(this); -} +private: + CallFrame* m_startCallFrame; + CodeBlock* m_codeBlock; + unsigned m_depthToCheck; + bool m_foundStartCallFrame; + bool m_didRecurse; +}; -DFG::CapabilityLevel FunctionCodeBlock::canCompileWithDFGInternal() +void CodeBlock::noticeIncomingCall(ExecState* callerFrame) { - if (m_isConstructor) - return DFG::canCompileFunctionForConstruct(this); - return DFG::canCompileFunctionForCall(this); -} + CodeBlock* callerCodeBlock = callerFrame->codeBlock(); + + if (Options::verboseCallLink()) + dataLog("Noticing call link from ", pointerDump(callerCodeBlock), " to ", *this, "\n"); + +#if ENABLE(DFG_JIT) + if (!m_shouldAlwaysBeInlined) + return; + + if (!callerCodeBlock) { + m_shouldAlwaysBeInlined = false; + if (Options::verboseCallLink()) + dataLog(" Clearing SABI because caller is native.\n"); + return; + } -void CodeBlock::jettison() -{ - ASSERT(JITCode::isOptimizingJIT(getJITType())); - ASSERT(this == replacement()); - alternative()->optimizeAfterWarmUp(); - tallyFrequentExitSites(); - if (DFG::shouldShowDisassembly()) - dataLog("Jettisoning ", *this, ".\n"); - jettisonImpl(); -} + if (!hasBaselineJITProfiling()) + return; -void ProgramCodeBlock::jettisonImpl() -{ - static_cast<ProgramExecutable*>(ownerExecutable())->jettisonOptimizedCode(*vm()); -} + if (!DFG::mightInlineFunction(this)) + return; -void EvalCodeBlock::jettisonImpl() -{ - static_cast<EvalExecutable*>(ownerExecutable())->jettisonOptimizedCode(*vm()); + if (!canInline(capabilityLevelState())) + return; + + if (!DFG::isSmallEnoughToInlineCodeInto(callerCodeBlock)) { + m_shouldAlwaysBeInlined = false; + if (Options::verboseCallLink()) + dataLog(" Clearing SABI because caller is too large.\n"); + return; + } + + if (callerCodeBlock->jitType() == JITCode::InterpreterThunk) { + // If the caller is still in the interpreter, then we can't expect inlining to + // happen anytime soon. Assume it's profitable to optimize it separately. This + // ensures that a function is SABI only if it is called no more frequently than + // any of its callers. + m_shouldAlwaysBeInlined = false; + if (Options::verboseCallLink()) + dataLog(" Clearing SABI because caller is in LLInt.\n"); + return; + } + + if (JITCode::isOptimizingJIT(callerCodeBlock->jitType())) { + m_shouldAlwaysBeInlined = false; + if (Options::verboseCallLink()) + dataLog(" Clearing SABI bcause caller was already optimized.\n"); + return; + } + + if (callerCodeBlock->codeType() != FunctionCode) { + // If the caller is either eval or global code, assume that that won't be + // optimized anytime soon. For eval code this is particularly true since we + // delay eval optimization by a *lot*. + m_shouldAlwaysBeInlined = false; + if (Options::verboseCallLink()) + dataLog(" Clearing SABI because caller is not a function.\n"); + return; + } + + // Recursive calls won't be inlined. + RecursionCheckFunctor functor(callerFrame, this, Options::maximumInliningDepth()); + vm()->topCallFrame->iterate(functor); + + if (functor.didRecurse()) { + if (Options::verboseCallLink()) + dataLog(" Clearing SABI because recursion was detected.\n"); + m_shouldAlwaysBeInlined = false; + return; + } + + if (callerCodeBlock->capabilityLevelState() == DFG::CapabilityLevelNotSet) { + dataLog("In call from ", *callerCodeBlock, " ", callerFrame->codeOrigin(), " to ", *this, ": caller's DFG capability level is not set.\n"); + CRASH(); + } + + if (canCompile(callerCodeBlock->capabilityLevelState())) + return; + + if (Options::verboseCallLink()) + dataLog(" Clearing SABI because the caller is not a DFG candidate.\n"); + + m_shouldAlwaysBeInlined = false; +#endif } -void FunctionCodeBlock::jettisonImpl() +unsigned CodeBlock::reoptimizationRetryCounter() const { - static_cast<FunctionExecutable*>(ownerExecutable())->jettisonOptimizedCodeFor(*vm(), m_isConstructor ? CodeForConstruct : CodeForCall); +#if ENABLE(JIT) + ASSERT(m_reoptimizationRetryCounter <= Options::reoptimizationRetryCounterMax()); + return m_reoptimizationRetryCounter; +#else + return 0; +#endif // ENABLE(JIT) } -bool ProgramCodeBlock::jitCompileImpl(ExecState* exec) +#if ENABLE(JIT) +void CodeBlock::setCalleeSaveRegisters(RegisterSet calleeSaveRegisters) { - ASSERT(getJITType() == JITCode::InterpreterThunk); - ASSERT(this == replacement()); - return static_cast<ProgramExecutable*>(ownerExecutable())->jitCompile(exec); + m_calleeSaveRegisters = std::make_unique<RegisterAtOffsetList>(calleeSaveRegisters); } -bool EvalCodeBlock::jitCompileImpl(ExecState* exec) +void CodeBlock::setCalleeSaveRegisters(std::unique_ptr<RegisterAtOffsetList> registerAtOffsetList) { - ASSERT(getJITType() == JITCode::InterpreterThunk); - ASSERT(this == replacement()); - return static_cast<EvalExecutable*>(ownerExecutable())->jitCompile(exec); + m_calleeSaveRegisters = WTFMove(registerAtOffsetList); } - -bool FunctionCodeBlock::jitCompileImpl(ExecState* exec) + +static size_t roundCalleeSaveSpaceAsVirtualRegisters(size_t calleeSaveRegisters) { - ASSERT(getJITType() == JITCode::InterpreterThunk); - ASSERT(this == replacement()); - return static_cast<FunctionExecutable*>(ownerExecutable())->jitCompileFor(exec, m_isConstructor ? CodeForConstruct : CodeForCall); + static const unsigned cpuRegisterSize = sizeof(void*); + return (WTF::roundUpToMultipleOf(sizeof(Register), calleeSaveRegisters * cpuRegisterSize) / sizeof(Register)); + } -#endif -JSGlobalObject* CodeBlock::globalObjectFor(CodeOrigin codeOrigin) +size_t CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters() { - if (!codeOrigin.inlineCallFrame) - return globalObject(); - return jsCast<FunctionExecutable*>(codeOrigin.inlineCallFrame->executable.get())->generatedBytecode().globalObject(); + return roundCalleeSaveSpaceAsVirtualRegisters(numberOfLLIntBaselineCalleeSaveRegisters()); } -unsigned CodeBlock::reoptimizationRetryCounter() const +size_t CodeBlock::calleeSaveSpaceAsVirtualRegisters() { - ASSERT(m_reoptimizationRetryCounter <= Options::reoptimizationRetryCounterMax()); - return m_reoptimizationRetryCounter; + return roundCalleeSaveSpaceAsVirtualRegisters(m_calleeSaveRegisters->size()); } void CodeBlock::countReoptimization() @@ -2947,12 +3540,13 @@ void CodeBlock::countReoptimization() unsigned CodeBlock::numberOfDFGCompiles() { -#if ENABLE(JIT) - ASSERT(JITCode::isBaselineCode(getJITType())); - return (JITCode::isOptimizingJIT(replacement()->getJITType()) ? 1 : 0) + m_reoptimizationRetryCounter; -#else - return 0; -#endif + ASSERT(JITCode::isBaselineCode(jitType())); + if (Options::testTheFTL()) { + if (m_didFailFTLCompilation) + return 1000000; + return (m_hasBeenCompiledWithFTL ? 1 : 0) + m_reoptimizationRetryCounter; + } + return (JITCode::isOptimizingJIT(replacement()->jitType()) ? 1 : 0) + m_reoptimizationRetryCounter; } int32_t CodeBlock::codeTypeThresholdMultiplier() const @@ -3032,10 +3626,16 @@ double CodeBlock::optimizationThresholdScalingFactor() ASSERT(instructionCount); // Make sure this is called only after we have an instruction stream; otherwise it'll just return the value of d, which makes no sense. double result = d + a * sqrt(instructionCount + b) + c * instructionCount; -#if ENABLE(JIT_VERBOSE_OSR) - dataLog(*this, ": instruction count is ", instructionCount, ", scaling execution counter by ", result, " * ", codeTypeThresholdMultiplier(), "\n"); -#endif - return result * codeTypeThresholdMultiplier(); + + result *= codeTypeThresholdMultiplier(); + + if (Options::verboseOSR()) { + dataLog( + *this, ": instruction count is ", instructionCount, + ", scaling execution counter by ", result, " * ", codeTypeThresholdMultiplier(), + "\n"); + } + return result; } static int32_t clipThreshold(double threshold) @@ -3049,64 +3649,131 @@ static int32_t clipThreshold(double threshold) return static_cast<int32_t>(threshold); } -int32_t CodeBlock::counterValueForOptimizeAfterWarmUp() -{ - return clipThreshold( - Options::thresholdForOptimizeAfterWarmUp() * - optimizationThresholdScalingFactor() * - (1 << reoptimizationRetryCounter())); -} - -int32_t CodeBlock::counterValueForOptimizeAfterLongWarmUp() -{ - return clipThreshold( - Options::thresholdForOptimizeAfterLongWarmUp() * - optimizationThresholdScalingFactor() * - (1 << reoptimizationRetryCounter())); -} - -int32_t CodeBlock::counterValueForOptimizeSoon() +int32_t CodeBlock::adjustedCounterValue(int32_t desiredThreshold) { return clipThreshold( - Options::thresholdForOptimizeSoon() * + static_cast<double>(desiredThreshold) * optimizationThresholdScalingFactor() * (1 << reoptimizationRetryCounter())); } bool CodeBlock::checkIfOptimizationThresholdReached() { +#if ENABLE(DFG_JIT) + if (DFG::Worklist* worklist = DFG::existingGlobalDFGWorklistOrNull()) { + if (worklist->compilationState(DFG::CompilationKey(this, DFG::DFGMode)) + == DFG::Worklist::Compiled) { + optimizeNextInvocation(); + return true; + } + } +#endif + return m_jitExecuteCounter.checkIfThresholdCrossedAndSet(this); } void CodeBlock::optimizeNextInvocation() { + if (Options::verboseOSR()) + dataLog(*this, ": Optimizing next invocation.\n"); m_jitExecuteCounter.setNewThreshold(0, this); } void CodeBlock::dontOptimizeAnytimeSoon() { + if (Options::verboseOSR()) + dataLog(*this, ": Not optimizing anytime soon.\n"); m_jitExecuteCounter.deferIndefinitely(); } void CodeBlock::optimizeAfterWarmUp() { - m_jitExecuteCounter.setNewThreshold(counterValueForOptimizeAfterWarmUp(), this); + if (Options::verboseOSR()) + dataLog(*this, ": Optimizing after warm-up.\n"); +#if ENABLE(DFG_JIT) + m_jitExecuteCounter.setNewThreshold( + adjustedCounterValue(Options::thresholdForOptimizeAfterWarmUp()), this); +#endif } void CodeBlock::optimizeAfterLongWarmUp() { - m_jitExecuteCounter.setNewThreshold(counterValueForOptimizeAfterLongWarmUp(), this); + if (Options::verboseOSR()) + dataLog(*this, ": Optimizing after long warm-up.\n"); +#if ENABLE(DFG_JIT) + m_jitExecuteCounter.setNewThreshold( + adjustedCounterValue(Options::thresholdForOptimizeAfterLongWarmUp()), this); +#endif } void CodeBlock::optimizeSoon() { - m_jitExecuteCounter.setNewThreshold(counterValueForOptimizeSoon(), this); + if (Options::verboseOSR()) + dataLog(*this, ": Optimizing soon.\n"); +#if ENABLE(DFG_JIT) + m_jitExecuteCounter.setNewThreshold( + adjustedCounterValue(Options::thresholdForOptimizeSoon()), this); +#endif } -#if ENABLE(JIT) +void CodeBlock::forceOptimizationSlowPathConcurrently() +{ + if (Options::verboseOSR()) + dataLog(*this, ": Forcing slow path concurrently.\n"); + m_jitExecuteCounter.forceSlowPathConcurrently(); +} + +#if ENABLE(DFG_JIT) +void CodeBlock::setOptimizationThresholdBasedOnCompilationResult(CompilationResult result) +{ + JITCode::JITType type = jitType(); + if (type != JITCode::BaselineJIT) { + dataLog(*this, ": expected to have baseline code but have ", type, "\n"); + RELEASE_ASSERT_NOT_REACHED(); + } + + CodeBlock* theReplacement = replacement(); + if ((result == CompilationSuccessful) != (theReplacement != this)) { + dataLog(*this, ": we have result = ", result, " but "); + if (theReplacement == this) + dataLog("we are our own replacement.\n"); + else + dataLog("our replacement is ", pointerDump(theReplacement), "\n"); + RELEASE_ASSERT_NOT_REACHED(); + } + + switch (result) { + case CompilationSuccessful: + RELEASE_ASSERT(JITCode::isOptimizingJIT(replacement()->jitType())); + optimizeNextInvocation(); + return; + case CompilationFailed: + dontOptimizeAnytimeSoon(); + return; + case CompilationDeferred: + // We'd like to do dontOptimizeAnytimeSoon() but we cannot because + // forceOptimizationSlowPathConcurrently() is inherently racy. It won't + // necessarily guarantee anything. So, we make sure that even if that + // function ends up being a no-op, we still eventually retry and realize + // that we have optimized code ready. + optimizeAfterWarmUp(); + return; + case CompilationInvalidated: + // Retry with exponential backoff. + countReoptimization(); + optimizeAfterWarmUp(); + return; + } + + dataLog("Unrecognized result: ", static_cast<int>(result), "\n"); + RELEASE_ASSERT_NOT_REACHED(); +} + +#endif + uint32_t CodeBlock::adjustedExitCountThreshold(uint32_t desiredThreshold) { - ASSERT(getJITType() == JITCode::DFGJIT); + ASSERT(JITCode::isOptimizingJIT(jitType())); // Compute this the lame way so we don't saturate. This is called infrequently // enough that this loop won't hurt us. unsigned result = desiredThreshold; @@ -3140,7 +3807,6 @@ bool CodeBlock::shouldReoptimizeFromLoopNow() } #endif -#if ENABLE(VALUE_PROFILER) ArrayProfile* CodeBlock::getArrayProfile(unsigned bytecodeOffset) { for (unsigned i = 0; i < m_arrayProfiles.size(); ++i) { @@ -3158,9 +3824,34 @@ ArrayProfile* CodeBlock::getOrAddArrayProfile(unsigned bytecodeOffset) return addArrayProfile(bytecodeOffset); } -void CodeBlock::updateAllPredictionsAndCountLiveness( - OperationInProgress operation, unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles) +#if ENABLE(DFG_JIT) +Vector<CodeOrigin, 0, UnsafeVectorOverflow>& CodeBlock::codeOrigins() +{ + return m_jitCode->dfgCommon()->codeOrigins; +} + +size_t CodeBlock::numberOfDFGIdentifiers() const +{ + if (!JITCode::isOptimizingJIT(jitType())) + return 0; + + return m_jitCode->dfgCommon()->dfgIdentifiers.size(); +} + +const Identifier& CodeBlock::identifier(int index) const { + size_t unlinkedIdentifiers = m_unlinkedCode->numberOfIdentifiers(); + if (static_cast<unsigned>(index) < unlinkedIdentifiers) + return m_unlinkedCode->identifier(index); + ASSERT(JITCode::isOptimizingJIT(jitType())); + return m_jitCode->dfgCommon()->dfgIdentifiers[index - unlinkedIdentifiers]; +} +#endif // ENABLE(DFG_JIT) + +void CodeBlock::updateAllPredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles) +{ + ConcurrentJITLocker locker(m_lock); + numberOfLiveNonArgumentValueProfiles = 0; numberOfSamplesInProfiles = 0; // If this divided by ValueProfile::numberOfBuckets equals numberOfValueProfiles() then value profiles are full. for (unsigned i = 0; i < totalNumberOfValueProfiles(); ++i) { @@ -3170,50 +3861,51 @@ void CodeBlock::updateAllPredictionsAndCountLiveness( numSamples = ValueProfile::numberOfBuckets; // We don't want profiles that are extremely hot to be given more weight. numberOfSamplesInProfiles += numSamples; if (profile->m_bytecodeOffset < 0) { - profile->computeUpdatedPrediction(operation); + profile->computeUpdatedPrediction(locker); continue; } if (profile->numberOfSamples() || profile->m_prediction != SpecNone) numberOfLiveNonArgumentValueProfiles++; - profile->computeUpdatedPrediction(operation); + profile->computeUpdatedPrediction(locker); } #if ENABLE(DFG_JIT) - m_lazyOperandValueProfiles.computeUpdatedPredictions(operation); + m_lazyOperandValueProfiles.computeUpdatedPredictions(locker); #endif } -void CodeBlock::updateAllValueProfilePredictions(OperationInProgress operation) +void CodeBlock::updateAllValueProfilePredictions() { unsigned ignoredValue1, ignoredValue2; - updateAllPredictionsAndCountLiveness(operation, ignoredValue1, ignoredValue2); + updateAllPredictionsAndCountLiveness(ignoredValue1, ignoredValue2); } -void CodeBlock::updateAllArrayPredictions(OperationInProgress operation) +void CodeBlock::updateAllArrayPredictions() { + ConcurrentJITLocker locker(m_lock); + for (unsigned i = m_arrayProfiles.size(); i--;) - m_arrayProfiles[i].computeUpdatedPrediction(this, operation); + m_arrayProfiles[i].computeUpdatedPrediction(locker, this); // Don't count these either, for similar reasons. for (unsigned i = m_arrayAllocationProfiles.size(); i--;) m_arrayAllocationProfiles[i].updateIndexingType(); } -void CodeBlock::updateAllPredictions(OperationInProgress operation) +void CodeBlock::updateAllPredictions() { - updateAllValueProfilePredictions(operation); - updateAllArrayPredictions(operation); +#if ENABLE(WEBASSEMBLY) + if (m_ownerExecutable->isWebAssemblyExecutable()) + return; +#endif + updateAllValueProfilePredictions(); + updateAllArrayPredictions(); } bool CodeBlock::shouldOptimizeNow() { -#if ENABLE(JIT_VERBOSE_OSR) - dataLog("Considering optimizing ", *this, "...\n"); -#endif - -#if ENABLE(VERBOSE_VALUE_PROFILE) - dumpValueProfiles(); -#endif + if (Options::verboseOSR()) + dataLog("Considering optimizing ", *this, "...\n"); if (m_optimizationDelayCounter >= Options::maximumOptimizationDelay()) return true; @@ -3222,11 +3914,16 @@ bool CodeBlock::shouldOptimizeNow() unsigned numberOfLiveNonArgumentValueProfiles; unsigned numberOfSamplesInProfiles; - updateAllPredictionsAndCountLiveness(NoOperation, numberOfLiveNonArgumentValueProfiles, numberOfSamplesInProfiles); - -#if ENABLE(JIT_VERBOSE_OSR) - dataLogF("Profile hotness: %lf (%u / %u), %lf (%u / %u)\n", (double)numberOfLiveNonArgumentValueProfiles / numberOfValueProfiles(), numberOfLiveNonArgumentValueProfiles, numberOfValueProfiles(), (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / numberOfValueProfiles(), numberOfSamplesInProfiles, ValueProfile::numberOfBuckets * numberOfValueProfiles()); -#endif + updateAllPredictionsAndCountLiveness(numberOfLiveNonArgumentValueProfiles, numberOfSamplesInProfiles); + + if (Options::verboseOSR()) { + dataLogF( + "Profile hotness: %lf (%u / %u), %lf (%u / %u)\n", + (double)numberOfLiveNonArgumentValueProfiles / numberOfValueProfiles(), + numberOfLiveNonArgumentValueProfiles, numberOfValueProfiles(), + (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / numberOfValueProfiles(), + numberOfSamplesInProfiles, ValueProfile::numberOfBuckets * numberOfValueProfiles()); + } if ((!numberOfValueProfiles() || (double)numberOfLiveNonArgumentValueProfiles / numberOfValueProfiles() >= Options::desiredProfileLivenessRate()) && (!totalNumberOfValueProfiles() || (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / totalNumberOfValueProfiles() >= Options::desiredProfileFullnessRate()) @@ -3238,26 +3935,42 @@ bool CodeBlock::shouldOptimizeNow() optimizeAfterWarmUp(); return false; } -#endif #if ENABLE(DFG_JIT) void CodeBlock::tallyFrequentExitSites() { - ASSERT(getJITType() == JITCode::DFGJIT); - ASSERT(alternative()->getJITType() == JITCode::BaselineJIT); - ASSERT(!!m_dfgData); + ASSERT(JITCode::isOptimizingJIT(jitType())); + ASSERT(alternative()->jitType() == JITCode::BaselineJIT); CodeBlock* profiledBlock = alternative(); - for (unsigned i = 0; i < m_dfgData->osrExit.size(); ++i) { - DFG::OSRExit& exit = m_dfgData->osrExit[i]; - - if (!exit.considerAddingAsFrequentExitSite(profiledBlock)) - continue; - -#if DFG_ENABLE(DEBUG_VERBOSE) - dataLog("OSR exit #", i, " (bc#", exit.m_codeOrigin.bytecodeIndex, ", ", exit.m_kind, ") for ", *this, " occurred frequently: counting as frequent exit site.\n"); + switch (jitType()) { + case JITCode::DFGJIT: { + DFG::JITCode* jitCode = m_jitCode->dfg(); + for (unsigned i = 0; i < jitCode->osrExit.size(); ++i) { + DFG::OSRExit& exit = jitCode->osrExit[i]; + exit.considerAddingAsFrequentExitSite(profiledBlock); + } + break; + } + +#if ENABLE(FTL_JIT) + case JITCode::FTLJIT: { + // There is no easy way to avoid duplicating this code since the FTL::JITCode::osrExit + // vector contains a totally different type, that just so happens to behave like + // DFG::JITCode::osrExit. + FTL::JITCode* jitCode = m_jitCode->ftl(); + for (unsigned i = 0; i < jitCode->osrExit.size(); ++i) { + FTL::OSRExit& exit = jitCode->osrExit[i]; + exit.considerAddingAsFrequentExitSite(profiledBlock); + } + break; + } #endif + + default: + RELEASE_ASSERT_NOT_REACHED(); + break; } } #endif // ENABLE(DFG_JIT) @@ -3285,14 +3998,42 @@ void CodeBlock::dumpValueProfiles() RareCaseProfile* profile = rareCaseProfile(i); dataLogF(" bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter); } - dataLog("SpecialFastCaseProfile for ", *this, ":\n"); - for (unsigned i = 0; i < numberOfSpecialFastCaseProfiles(); ++i) { - RareCaseProfile* profile = specialFastCaseProfile(i); - dataLogF(" bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter); + dataLog("ResultProfile for ", *this, ":\n"); + for (unsigned i = 0; i < numberOfResultProfiles(); ++i) { + const ResultProfile& profile = *resultProfile(i); + dataLog(" bc = ", profile.bytecodeOffset(), ": ", profile, "\n"); } } #endif // ENABLE(VERBOSE_VALUE_PROFILE) +unsigned CodeBlock::frameRegisterCount() +{ + switch (jitType()) { + case JITCode::InterpreterThunk: + return LLInt::frameRegisterCountFor(this); + +#if ENABLE(JIT) + case JITCode::BaselineJIT: + return JIT::frameRegisterCountFor(this); +#endif // ENABLE(JIT) + +#if ENABLE(DFG_JIT) + case JITCode::DFGJIT: + case JITCode::FTLJIT: + return jitCode()->dfgCommon()->frameRegisterCount; +#endif // ENABLE(DFG_JIT) + + default: + RELEASE_ASSERT_NOT_REACHED(); + return 0; + } +} + +int CodeBlock::stackPointerOffset() +{ + return virtualRegisterForLocal(frameRegisterCount() - 1).offset(); +} + size_t CodeBlock::predictedMachineCodeSize() { // This will be called from CodeBlock::CodeBlock before either m_vm or the @@ -3350,29 +4091,226 @@ bool CodeBlock::usesOpcode(OpcodeID opcodeID) return false; } -String CodeBlock::nameForRegister(int registerNumber) +String CodeBlock::nameForRegister(VirtualRegister virtualRegister) { - SymbolTable::iterator end = symbolTable()->end(); - for (SymbolTable::iterator ptr = symbolTable()->begin(); ptr != end; ++ptr) { - if (ptr->value.getIndex() == registerNumber) - return String(ptr->key); + for (unsigned i = 0; i < m_constantRegisters.size(); i++) { + if (m_constantRegisters[i].get().isEmpty()) + continue; + if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(m_constantRegisters[i].get())) { + ConcurrentJITLocker locker(symbolTable->m_lock); + auto end = symbolTable->end(locker); + for (auto ptr = symbolTable->begin(locker); ptr != end; ++ptr) { + if (ptr->value.varOffset() == VarOffset(virtualRegister)) { + // FIXME: This won't work from the compilation thread. + // https://bugs.webkit.org/show_bug.cgi?id=115300 + return ptr->key.get(); + } + } + } } - if (needsActivation() && registerNumber == activationRegister()) - return ASCIILiteral("activation"); - if (registerNumber == thisRegister()) + if (virtualRegister == thisRegister()) return ASCIILiteral("this"); - if (usesArguments()) { - if (registerNumber == argumentsRegister()) - return ASCIILiteral("arguments"); - if (unmodifiedArgumentsRegister(argumentsRegister()) == registerNumber) - return ASCIILiteral("real arguments"); + if (virtualRegister.isArgument()) + return String::format("arguments[%3d]", virtualRegister.toArgument()); + + return ""; +} + +ValueProfile* CodeBlock::valueProfileForBytecodeOffset(int bytecodeOffset) +{ + ValueProfile* result = binarySearch<ValueProfile, int>( + m_valueProfiles, m_valueProfiles.size(), bytecodeOffset, + getValueProfileBytecodeOffset<ValueProfile>); + ASSERT(result->m_bytecodeOffset != -1); + ASSERT(instructions()[bytecodeOffset + opcodeLength( + m_vm->interpreter->getOpcodeID( + instructions()[bytecodeOffset].u.opcode)) - 1].u.profile == result); + return result; +} + +void CodeBlock::validate() +{ + BytecodeLivenessAnalysis liveness(this); // Compute directly from scratch so it doesn't effect CodeBlock footprint. + + FastBitVector liveAtHead = liveness.getLivenessInfoAtBytecodeOffset(0); + + if (liveAtHead.numBits() != static_cast<size_t>(m_numCalleeLocals)) { + beginValidationDidFail(); + dataLog(" Wrong number of bits in result!\n"); + dataLog(" Result: ", liveAtHead, "\n"); + dataLog(" Bit count: ", liveAtHead.numBits(), "\n"); + endValidationDidFail(); } - if (registerNumber < 0) { - int argumentPosition = -registerNumber; - argumentPosition -= JSStack::CallFrameHeaderSize + 1; - return String::format("arguments[%3d]", argumentPosition - 1).impl(); + + for (unsigned i = m_numCalleeLocals; i--;) { + VirtualRegister reg = virtualRegisterForLocal(i); + + if (liveAtHead.get(i)) { + beginValidationDidFail(); + dataLog(" Variable ", reg, " is expected to be dead.\n"); + dataLog(" Result: ", liveAtHead, "\n"); + endValidationDidFail(); + } } - return ""; } +void CodeBlock::beginValidationDidFail() +{ + dataLog("Validation failure in ", *this, ":\n"); + dataLog("\n"); +} + +void CodeBlock::endValidationDidFail() +{ + dataLog("\n"); + dumpBytecode(); + dataLog("\n"); + dataLog("Validation failure.\n"); + RELEASE_ASSERT_NOT_REACHED(); +} + +void CodeBlock::addBreakpoint(unsigned numBreakpoints) +{ + m_numBreakpoints += numBreakpoints; + ASSERT(m_numBreakpoints); + if (JITCode::isOptimizingJIT(jitType())) + jettison(Profiler::JettisonDueToDebuggerBreakpoint); +} + +void CodeBlock::setSteppingMode(CodeBlock::SteppingMode mode) +{ + m_steppingMode = mode; + if (mode == SteppingModeEnabled && JITCode::isOptimizingJIT(jitType())) + jettison(Profiler::JettisonDueToDebuggerStepping); +} + +RareCaseProfile* CodeBlock::rareCaseProfileForBytecodeOffset(int bytecodeOffset) +{ + return tryBinarySearch<RareCaseProfile, int>( + m_rareCaseProfiles, m_rareCaseProfiles.size(), bytecodeOffset, + getRareCaseProfileBytecodeOffset); +} + +unsigned CodeBlock::rareCaseProfileCountForBytecodeOffset(int bytecodeOffset) +{ + RareCaseProfile* profile = rareCaseProfileForBytecodeOffset(bytecodeOffset); + if (profile) + return profile->m_counter; + return 0; +} + +ResultProfile* CodeBlock::resultProfileForBytecodeOffset(int bytecodeOffset) +{ + if (!m_bytecodeOffsetToResultProfileIndexMap) + return nullptr; + auto iterator = m_bytecodeOffsetToResultProfileIndexMap->find(bytecodeOffset); + if (iterator == m_bytecodeOffsetToResultProfileIndexMap->end()) + return nullptr; + return &m_resultProfiles[iterator->value]; +} + +#if ENABLE(JIT) +DFG::CapabilityLevel CodeBlock::capabilityLevel() +{ + DFG::CapabilityLevel result = computeCapabilityLevel(); + m_capabilityLevelState = result; + return result; +} +#endif + +void CodeBlock::insertBasicBlockBoundariesForControlFlowProfiler(RefCountedArray<Instruction>& instructions) +{ + if (!unlinkedCodeBlock()->hasOpProfileControlFlowBytecodeOffsets()) + return; + const Vector<size_t>& bytecodeOffsets = unlinkedCodeBlock()->opProfileControlFlowBytecodeOffsets(); + for (size_t i = 0, offsetsLength = bytecodeOffsets.size(); i < offsetsLength; i++) { + // Because op_profile_control_flow is emitted at the beginning of every basic block, finding + // the next op_profile_control_flow will give us the text range of a single basic block. + size_t startIdx = bytecodeOffsets[i]; + RELEASE_ASSERT(vm()->interpreter->getOpcodeID(instructions[startIdx].u.opcode) == op_profile_control_flow); + int basicBlockStartOffset = instructions[startIdx + 1].u.operand; + int basicBlockEndOffset; + if (i + 1 < offsetsLength) { + size_t endIdx = bytecodeOffsets[i + 1]; + RELEASE_ASSERT(vm()->interpreter->getOpcodeID(instructions[endIdx].u.opcode) == op_profile_control_flow); + basicBlockEndOffset = instructions[endIdx + 1].u.operand - 1; + } else { + basicBlockEndOffset = m_sourceOffset + ownerScriptExecutable()->source().length() - 1; // Offset before the closing brace. + basicBlockStartOffset = std::min(basicBlockStartOffset, basicBlockEndOffset); // Some start offsets may be at the closing brace, ensure it is the offset before. + } + + // The following check allows for the same textual JavaScript basic block to have its bytecode emitted more + // than once and still play nice with the control flow profiler. When basicBlockStartOffset is larger than + // basicBlockEndOffset, it indicates that the bytecode generator has emitted code for the same AST node + // more than once (for example: ForInNode, Finally blocks in TryNode, etc). Though these are different + // basic blocks at the bytecode level, they are generated from the same textual basic block in the JavaScript + // program. The condition: + // (basicBlockEndOffset < basicBlockStartOffset) + // is encountered when op_profile_control_flow lies across the boundary of these duplicated bytecode basic + // blocks and the textual offset goes from the end of the duplicated block back to the beginning. These + // ranges are dummy ranges and are ignored. The duplicated bytecode basic blocks point to the same + // internal data structure, so if any of them execute, it will record the same textual basic block in the + // JavaScript program as executing. + // At the bytecode level, this situation looks like: + // j: op_profile_control_flow (from j->k, we have basicBlockEndOffset < basicBlockStartOffset) + // ... + // k: op_profile_control_flow (we want to skip over the j->k block and start fresh at offset k as the start of a new basic block k->m). + // ... + // m: op_profile_control_flow + if (basicBlockEndOffset < basicBlockStartOffset) { + RELEASE_ASSERT(i + 1 < offsetsLength); // We should never encounter dummy blocks at the end of a CodeBlock. + instructions[startIdx + 1].u.basicBlockLocation = vm()->controlFlowProfiler()->dummyBasicBlock(); + continue; + } + + BasicBlockLocation* basicBlockLocation = vm()->controlFlowProfiler()->getBasicBlockLocation(ownerScriptExecutable()->sourceID(), basicBlockStartOffset, basicBlockEndOffset); + + // Find all functions that are enclosed within the range: [basicBlockStartOffset, basicBlockEndOffset] + // and insert these functions' start/end offsets as gaps in the current BasicBlockLocation. + // This is necessary because in the original source text of a JavaScript program, + // function literals form new basic blocks boundaries, but they aren't represented + // inside the CodeBlock's instruction stream. + auto insertFunctionGaps = [basicBlockLocation, basicBlockStartOffset, basicBlockEndOffset] (const WriteBarrier<FunctionExecutable>& functionExecutable) { + const UnlinkedFunctionExecutable* executable = functionExecutable->unlinkedExecutable(); + int functionStart = executable->typeProfilingStartOffset(); + int functionEnd = executable->typeProfilingEndOffset(); + if (functionStart >= basicBlockStartOffset && functionEnd <= basicBlockEndOffset) + basicBlockLocation->insertGap(functionStart, functionEnd); + }; + + for (const WriteBarrier<FunctionExecutable>& executable : m_functionDecls) + insertFunctionGaps(executable); + for (const WriteBarrier<FunctionExecutable>& executable : m_functionExprs) + insertFunctionGaps(executable); + + instructions[startIdx + 1].u.basicBlockLocation = basicBlockLocation; + } +} + +#if ENABLE(JIT) +void CodeBlock::setPCToCodeOriginMap(std::unique_ptr<PCToCodeOriginMap>&& map) +{ + m_pcToCodeOriginMap = WTFMove(map); +} + +Optional<CodeOrigin> CodeBlock::findPC(void* pc) +{ + if (m_pcToCodeOriginMap) { + if (Optional<CodeOrigin> codeOrigin = m_pcToCodeOriginMap->findPC(pc)) + return codeOrigin; + } + + for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) { + StructureStubInfo* stub = *iter; + if (stub->containsPC(pc)) + return Optional<CodeOrigin>(stub->codeOrigin); + } + + if (Optional<CodeOrigin> codeOrigin = m_jitCode->findPC(this, pc)) + return codeOrigin; + + return Nullopt; +} +#endif // ENABLE(JIT) + } // namespace JSC diff --git a/Source/JavaScriptCore/bytecode/CodeBlock.h b/Source/JavaScriptCore/bytecode/CodeBlock.h index 0b3d18e17..96cee40c7 100644 --- a/Source/JavaScriptCore/bytecode/CodeBlock.h +++ b/Source/JavaScriptCore/bytecode/CodeBlock.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008, 2009, 2010, 2011, 2012, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2008-2015 Apple Inc. All rights reserved. * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca> * * Redistribution and use in source and binary forms, with or without @@ -11,7 +11,7 @@ * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * 3. Neither the name of Apple Inc. ("Apple") nor the names of * its contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * @@ -33,45 +33,45 @@ #include "ArrayProfile.h" #include "ByValInfo.h" #include "BytecodeConventions.h" +#include "BytecodeLivenessAnalysis.h" #include "CallLinkInfo.h" #include "CallReturnOffsetToBytecodeOffset.h" #include "CodeBlockHash.h" +#include "CodeBlockSet.h" #include "CodeOrigin.h" #include "CodeType.h" #include "CompactJITCodeMap.h" -#include "DFGCodeBlocks.h" +#include "ConcurrentJITLock.h" #include "DFGCommon.h" #include "DFGExitProfile.h" -#include "DFGMinifiedGraph.h" -#include "DFGOSREntry.h" -#include "DFGOSRExit.h" -#include "DFGVariableEventStream.h" +#include "DeferredCompilationCallback.h" #include "EvalCodeCache.h" #include "ExecutionCounter.h" #include "ExpressionRangeInfo.h" #include "HandlerInfo.h" -#include "ObjectAllocationProfile.h" -#include "Options.h" #include "Instruction.h" #include "JITCode.h" #include "JITWriteBarrier.h" +#include "JSCell.h" #include "JSGlobalObject.h" -#include "JumpReplacementWatchpoint.h" #include "JumpTable.h" #include "LLIntCallLinkInfo.h" #include "LazyOperandValueProfile.h" -#include "LineInfo.h" +#include "ObjectAllocationProfile.h" +#include "Options.h" #include "ProfilerCompilation.h" +#include "ProfilerJettisonReason.h" +#include "PutPropertySlot.h" #include "RegExpObject.h" -#include "ResolveOperation.h" #include "StructureStubInfo.h" #include "UnconditionalFinalizer.h" #include "ValueProfile.h" +#include "VirtualRegister.h" #include "Watchpoint.h" +#include <wtf/Bag.h> +#include <wtf/FastBitVector.h> +#include <wtf/FastMalloc.h> #include <wtf/RefCountedArray.h> -#include <wtf/FastAllocBase.h> -#include <wtf/PassOwnPtr.h> -#include <wtf/Platform.h> #include <wtf/RefPtr.h> #include <wtf/SegmentedVector.h> #include <wtf/Vector.h> @@ -79,89 +79,130 @@ namespace JSC { -class DFGCodeBlocks; class ExecState; class LLIntOffsetsExtractor; -class RepatchBuffer; - -inline int unmodifiedArgumentsRegister(int argumentsRegister) { return argumentsRegister - 1; } +class RegisterAtOffsetList; +class TypeLocation; +class JSModuleEnvironment; +class PCToCodeOriginMap; -static ALWAYS_INLINE int missingThisObjectMarker() { return std::numeric_limits<int>::max(); } +enum ReoptimizationMode { DontCountReoptimization, CountReoptimization }; -class CodeBlock : public UnconditionalFinalizer, public WeakReferenceHarvester { - WTF_MAKE_FAST_ALLOCATED; +class CodeBlock : public JSCell { + typedef JSCell Base; + friend class BytecodeLivenessAnalysis; friend class JIT; friend class LLIntOffsetsExtractor; + + class UnconditionalFinalizer : public JSC::UnconditionalFinalizer { + virtual void finalizeUnconditionally() override; + }; + + class WeakReferenceHarvester : public JSC::WeakReferenceHarvester { + virtual void visitWeakReferences(SlotVisitor&) override; + }; + public: enum CopyParsedBlockTag { CopyParsedBlock }; + + static const unsigned StructureFlags = Base::StructureFlags | StructureIsImmortal; + + DECLARE_INFO; + protected: - CodeBlock(CopyParsedBlockTag, CodeBlock& other); - - CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock*, JSGlobalObject*, unsigned baseScopeDepth, PassRefPtr<SourceProvider>, unsigned sourceOffset, unsigned firstLineColumnOffset, PassOwnPtr<CodeBlock> alternative); + CodeBlock(VM*, Structure*, CopyParsedBlockTag, CodeBlock& other); + CodeBlock(VM*, Structure*, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock*, JSScope*, PassRefPtr<SourceProvider>, unsigned sourceOffset, unsigned firstLineColumnOffset); +#if ENABLE(WEBASSEMBLY) + CodeBlock(VM*, Structure*, WebAssemblyExecutable* ownerExecutable, JSGlobalObject*); +#endif + + void finishCreation(VM&, CopyParsedBlockTag, CodeBlock& other); + void finishCreation(VM&, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock*, JSScope*); +#if ENABLE(WEBASSEMBLY) + void finishCreation(VM&, WebAssemblyExecutable* ownerExecutable, JSGlobalObject*); +#endif WriteBarrier<JSGlobalObject> m_globalObject; - Heap* m_heap; public: - JS_EXPORT_PRIVATE virtual ~CodeBlock(); - + JS_EXPORT_PRIVATE ~CodeBlock(); + UnlinkedCodeBlock* unlinkedCodeBlock() const { return m_unlinkedCode.get(); } - - String inferredName() const; + + CString inferredName() const; CodeBlockHash hash() const; - String sourceCodeForTools() const; // Not quite the actual source we parsed; this will do things like prefix the source for a function with a reified signature. - String sourceCodeOnOneLine() const; // As sourceCodeForTools(), but replaces all whitespace runs with a single space. + bool hasHash() const; + bool isSafeToComputeHash() const; + CString hashAsStringIfPossible() const; + CString sourceCodeForTools() const; // Not quite the actual source we parsed; this will do things like prefix the source for a function with a reified signature. + CString sourceCodeOnOneLine() const; // As sourceCodeForTools(), but replaces all whitespace runs with a single space. void dumpAssumingJITType(PrintStream&, JITCode::JITType) const; void dump(PrintStream&) const; - + int numParameters() const { return m_numParameters; } void setNumParameters(int newValue); - + + int numCalleeLocals() const { return m_numCalleeLocals; } + int* addressOfNumParameters() { return &m_numParameters; } static ptrdiff_t offsetOfNumParameters() { return OBJECT_OFFSETOF(CodeBlock, m_numParameters); } - CodeBlock* alternative() { return m_alternative.get(); } - PassOwnPtr<CodeBlock> releaseAlternative() { return m_alternative.release(); } - void setAlternative(PassOwnPtr<CodeBlock> alternative) { m_alternative = alternative; } - - CodeSpecializationKind specializationKind() const + CodeBlock* alternative() const { return static_cast<CodeBlock*>(m_alternative.get()); } + void setAlternative(VM&, CodeBlock*); + + template <typename Functor> void forEachRelatedCodeBlock(Functor&& functor) { - return specializationFromIsConstruct(m_isConstructor); - } - -#if ENABLE(JIT) - CodeBlock* baselineVersion() - { - CodeBlock* result = replacement(); - if (!result) - return 0; // This can happen if we're in the process of creating the baseline version. - while (result->alternative()) - result = result->alternative(); - ASSERT(result); - ASSERT(JITCode::isBaselineCode(result->getJITType())); - return result; + Functor f(std::forward<Functor>(functor)); + Vector<CodeBlock*, 4> codeBlocks; + codeBlocks.append(this); + + while (!codeBlocks.isEmpty()) { + CodeBlock* currentCodeBlock = codeBlocks.takeLast(); + f(currentCodeBlock); + + if (CodeBlock* alternative = currentCodeBlock->alternative()) + codeBlocks.append(alternative); + if (CodeBlock* osrEntryBlock = currentCodeBlock->specialOSREntryBlockOrNull()) + codeBlocks.append(osrEntryBlock); + } } -#else - CodeBlock* baselineVersion() + + CodeSpecializationKind specializationKind() const { - return this; + return specializationFromIsConstruct(m_isConstructor); } -#endif - - void visitAggregate(SlotVisitor&); - - static void dumpStatistics(); - void dumpBytecode(PrintStream& = WTF::dataFile()); - void dumpBytecode(PrintStream&, unsigned bytecodeOffset); + CodeBlock* alternativeForJettison(); + JS_EXPORT_PRIVATE CodeBlock* baselineAlternative(); + + // FIXME: Get rid of this. + // https://bugs.webkit.org/show_bug.cgi?id=123677 + CodeBlock* baselineVersion(); + + static size_t estimatedSize(JSCell*); + static void visitChildren(JSCell*, SlotVisitor&); + void visitChildren(SlotVisitor&); + void visitWeakly(SlotVisitor&); + void clearVisitWeaklyHasBeenCalled(); + + void dumpSource(); + void dumpSource(PrintStream&); + + void dumpBytecode(); + void dumpBytecode(PrintStream&); + void dumpBytecode( + PrintStream&, unsigned bytecodeOffset, + const StubInfoMap& = StubInfoMap(), const CallLinkInfoMap& = CallLinkInfoMap()); + void dumpExceptionHandlers(PrintStream&); void printStructures(PrintStream&, const Instruction*); void printStructure(PrintStream&, const char* name, const Instruction*, int operand); bool isStrictMode() const { return m_isStrictMode; } + ECMAMode ecmaMode() const { return isStrictMode() ? StrictMode : NotStrictMode; } inline bool isKnownNotImmediate(int index) { - if (index == m_thisRegister && !m_isStrictMode) + if (index == m_thisRegister.offset() && !m_isStrictMode) return true; if (isConstantRegisterIndex(index)) @@ -175,235 +216,65 @@ public: return index >= m_numVars; } - HandlerInfo* handlerForBytecodeOffset(unsigned bytecodeOffset); + enum class RequiredHandler { + CatchHandler, + AnyHandler + }; + HandlerInfo* handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler = RequiredHandler::AnyHandler); + HandlerInfo* handlerForIndex(unsigned, RequiredHandler = RequiredHandler::AnyHandler); + void removeExceptionHandlerForCallSite(CallSiteIndex); unsigned lineNumberForBytecodeOffset(unsigned bytecodeOffset); unsigned columnNumberForBytecodeOffset(unsigned bytecodeOffset); void expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, - int& startOffset, int& endOffset, unsigned& line, unsigned& column); + int& startOffset, int& endOffset, unsigned& line, unsigned& column); + void getStubInfoMap(const ConcurrentJITLocker&, StubInfoMap& result); + void getStubInfoMap(StubInfoMap& result); + + void getCallLinkInfoMap(const ConcurrentJITLocker&, CallLinkInfoMap& result); + void getCallLinkInfoMap(CallLinkInfoMap& result); + + void getByValInfoMap(const ConcurrentJITLocker&, ByValInfoMap& result); + void getByValInfoMap(ByValInfoMap& result); + #if ENABLE(JIT) + StructureStubInfo* addStubInfo(AccessType); + Bag<StructureStubInfo>::iterator stubInfoBegin() { return m_stubInfos.begin(); } + Bag<StructureStubInfo>::iterator stubInfoEnd() { return m_stubInfos.end(); } + + // O(n) operation. Use getStubInfoMap() unless you really only intend to get one + // stub info. + StructureStubInfo* findStubInfo(CodeOrigin); - StructureStubInfo& getStubInfo(ReturnAddressPtr returnAddress) - { - return *(binarySearch<StructureStubInfo, void*>(m_structureStubInfos, m_structureStubInfos.size(), returnAddress.value(), getStructureStubInfoReturnLocation)); - } + ByValInfo* addByValInfo(); - StructureStubInfo& getStubInfo(unsigned bytecodeIndex) - { - return *(binarySearch<StructureStubInfo, unsigned>(m_structureStubInfos, m_structureStubInfos.size(), bytecodeIndex, getStructureStubInfoBytecodeIndex)); - } - - void resetStub(StructureStubInfo&); - - ByValInfo& getByValInfo(unsigned bytecodeIndex) - { - return *(binarySearch<ByValInfo, unsigned>(m_byValInfos, m_byValInfos.size(), bytecodeIndex, getByValInfoBytecodeIndex)); - } + CallLinkInfo* addCallLinkInfo(); + Bag<CallLinkInfo>::iterator callLinkInfosBegin() { return m_callLinkInfos.begin(); } + Bag<CallLinkInfo>::iterator callLinkInfosEnd() { return m_callLinkInfos.end(); } - CallLinkInfo& getCallLinkInfo(ReturnAddressPtr returnAddress) - { - return *(binarySearch<CallLinkInfo, void*>(m_callLinkInfos, m_callLinkInfos.size(), returnAddress.value(), getCallLinkInfoReturnLocation)); - } - - CallLinkInfo& getCallLinkInfo(unsigned bytecodeIndex) - { - ASSERT(JITCode::isBaselineCode(getJITType())); - return *(binarySearch<CallLinkInfo, unsigned>(m_callLinkInfos, m_callLinkInfos.size(), bytecodeIndex, getCallLinkInfoBytecodeIndex)); - } + // This is a slow function call used primarily for compiling OSR exits in the case + // that there had been inlining. Chances are if you want to use this, you're really + // looking for a CallLinkInfoMap to amortize the cost of calling this. + CallLinkInfo* getCallLinkInfoForBytecodeIndex(unsigned bytecodeIndex); #endif // ENABLE(JIT) -#if ENABLE(LLINT) - Instruction* adjustPCIfAtCallSite(Instruction*); -#endif - unsigned bytecodeOffset(ExecState*, ReturnAddressPtr); + void unlinkIncomingCalls(); #if ENABLE(JIT) - unsigned bytecodeOffsetForCallAtIndex(unsigned index) - { - if (!m_rareData) - return 1; - Vector<CallReturnOffsetToBytecodeOffset, 0, UnsafeVectorOverflow>& callIndices = m_rareData->m_callReturnIndexVector; - if (!callIndices.size()) - return 1; - // FIXME: Fix places in DFG that call out to C that don't set the CodeOrigin. https://bugs.webkit.org/show_bug.cgi?id=118315 - ASSERT(index < m_rareData->m_callReturnIndexVector.size()); - if (index >= m_rareData->m_callReturnIndexVector.size()) - return 1; - return m_rareData->m_callReturnIndexVector[index].bytecodeOffset; - } - - void unlinkCalls(); - - bool hasIncomingCalls() { return m_incomingCalls.begin() != m_incomingCalls.end(); } - - void linkIncomingCall(CallLinkInfo* incoming) - { - m_incomingCalls.push(incoming); - } - - bool isIncomingCallAlreadyLinked(CallLinkInfo* incoming) - { - return m_incomingCalls.isOnList(incoming); - } + void linkIncomingCall(ExecState* callerFrame, CallLinkInfo*); + void linkIncomingPolymorphicCall(ExecState* callerFrame, PolymorphicCallNode*); #endif // ENABLE(JIT) -#if ENABLE(LLINT) - void linkIncomingCall(LLIntCallLinkInfo* incoming) - { - m_incomingLLIntCalls.push(incoming); - } -#endif // ENABLE(LLINT) - - void unlinkIncomingCalls(); + void linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo*); -#if ENABLE(DFG_JIT) || ENABLE(LLINT) - void setJITCodeMap(PassOwnPtr<CompactJITCodeMap> jitCodeMap) + void setJITCodeMap(std::unique_ptr<CompactJITCodeMap> jitCodeMap) { - m_jitCodeMap = jitCodeMap; + m_jitCodeMap = WTFMove(jitCodeMap); } CompactJITCodeMap* jitCodeMap() { return m_jitCodeMap.get(); } -#endif - -#if ENABLE(DFG_JIT) - void createDFGDataIfNecessary() - { - if (!!m_dfgData) - return; - - m_dfgData = adoptPtr(new DFGData); - } - - void saveCompilation(PassRefPtr<Profiler::Compilation> compilation) - { - createDFGDataIfNecessary(); - m_dfgData->compilation = compilation; - } - - Profiler::Compilation* compilation() - { - if (!m_dfgData) - return 0; - return m_dfgData->compilation.get(); - } - - DFG::OSREntryData* appendDFGOSREntryData(unsigned bytecodeIndex, unsigned machineCodeOffset) - { - createDFGDataIfNecessary(); - DFG::OSREntryData entry; - entry.m_bytecodeIndex = bytecodeIndex; - entry.m_machineCodeOffset = machineCodeOffset; - m_dfgData->osrEntry.append(entry); - return &m_dfgData->osrEntry.last(); - } - unsigned numberOfDFGOSREntries() const - { - if (!m_dfgData) - return 0; - return m_dfgData->osrEntry.size(); - } - DFG::OSREntryData* dfgOSREntryData(unsigned i) { return &m_dfgData->osrEntry[i]; } - DFG::OSREntryData* dfgOSREntryDataForBytecodeIndex(unsigned bytecodeIndex) - { - if (!m_dfgData) - return 0; - return tryBinarySearch<DFG::OSREntryData, unsigned>( - m_dfgData->osrEntry, m_dfgData->osrEntry.size(), bytecodeIndex, - DFG::getOSREntryDataBytecodeIndex); - } - - unsigned appendOSRExit(const DFG::OSRExit& osrExit) - { - createDFGDataIfNecessary(); - unsigned result = m_dfgData->osrExit.size(); - m_dfgData->osrExit.append(osrExit); - return result; - } - - DFG::OSRExit& lastOSRExit() - { - return m_dfgData->osrExit.last(); - } - - unsigned appendSpeculationRecovery(const DFG::SpeculationRecovery& recovery) - { - createDFGDataIfNecessary(); - unsigned result = m_dfgData->speculationRecovery.size(); - m_dfgData->speculationRecovery.append(recovery); - return result; - } - - unsigned appendWatchpoint(const JumpReplacementWatchpoint& watchpoint) - { - createDFGDataIfNecessary(); - unsigned result = m_dfgData->watchpoints.size(); - m_dfgData->watchpoints.append(watchpoint); - return result; - } - - unsigned numberOfOSRExits() - { - if (!m_dfgData) - return 0; - return m_dfgData->osrExit.size(); - } - - unsigned numberOfSpeculationRecoveries() - { - if (!m_dfgData) - return 0; - return m_dfgData->speculationRecovery.size(); - } - - unsigned numberOfWatchpoints() - { - if (!m_dfgData) - return 0; - return m_dfgData->watchpoints.size(); - } - - DFG::OSRExit& osrExit(unsigned index) - { - return m_dfgData->osrExit[index]; - } - - DFG::SpeculationRecovery& speculationRecovery(unsigned index) - { - return m_dfgData->speculationRecovery[index]; - } - - JumpReplacementWatchpoint& watchpoint(unsigned index) - { - return m_dfgData->watchpoints[index]; - } - - void appendWeakReference(JSCell* target) - { - createDFGDataIfNecessary(); - m_dfgData->weakReferences.append(WriteBarrier<JSCell>(*vm(), ownerExecutable(), target)); - } - - void appendWeakReferenceTransition(JSCell* codeOrigin, JSCell* from, JSCell* to) - { - createDFGDataIfNecessary(); - m_dfgData->transitions.append( - WeakReferenceTransition(*vm(), ownerExecutable(), codeOrigin, from, to)); - } - - DFG::MinifiedGraph& minifiedDFG() - { - createDFGDataIfNecessary(); - return m_dfgData->minifiedDFG; - } - - DFG::VariableEventStream& variableEventStream() - { - createDFGDataIfNecessary(); - return m_dfgData->variableEventStream; - } -#endif unsigned bytecodeOffset(Instruction* returnAddress) { @@ -411,164 +282,90 @@ public: return static_cast<Instruction*>(returnAddress) - instructions().begin(); } - bool isNumericCompareFunction() { return m_unlinkedCode->isNumericCompareFunction(); } - unsigned numberOfInstructions() const { return m_instructions.size(); } RefCountedArray<Instruction>& instructions() { return m_instructions; } const RefCountedArray<Instruction>& instructions() const { return m_instructions; } - + size_t predictedMachineCodeSize(); - - bool usesOpcode(OpcodeID); - unsigned instructionCount() { return m_instructions.size(); } + bool usesOpcode(OpcodeID); - int argumentIndexAfterCapture(size_t argument); + unsigned instructionCount() const { return m_instructions.size(); } -#if ENABLE(JIT) - void setJITCode(const JITCode& code, MacroAssemblerCodePtr codeWithArityCheck) + // Exactly equivalent to codeBlock->ownerExecutable()->newReplacementCodeBlockFor(codeBlock->specializationKind()) + CodeBlock* newReplacement(); + + void setJITCode(PassRefPtr<JITCode> code) { + ASSERT(heap()->isDeferred()); + heap()->reportExtraMemoryAllocated(code->size()); + ConcurrentJITLocker locker(m_lock); + WTF::storeStoreFence(); // This is probably not needed because the lock will also do something similar, but it's good to be paranoid. m_jitCode = code; - m_jitCodeWithArityCheck = codeWithArityCheck; -#if ENABLE(DFG_JIT) - if (m_jitCode.jitType() == JITCode::DFGJIT) { - createDFGDataIfNecessary(); - m_vm->heap.m_dfgCodeBlocks.m_set.add(this); - } -#endif - } - JITCode& getJITCode() { return m_jitCode; } - MacroAssemblerCodePtr getJITCodeWithArityCheck() { return m_jitCodeWithArityCheck; } - JITCode::JITType getJITType() const { return m_jitCode.jitType(); } - ExecutableMemoryHandle* executableMemory() { return getJITCode().getExecutableMemory(); } - virtual JSObject* compileOptimized(ExecState*, JSScope*, unsigned bytecodeIndex) = 0; - void jettison(); - enum JITCompilationResult { AlreadyCompiled, CouldNotCompile, CompiledSuccessfully }; - JITCompilationResult jitCompile(ExecState* exec) - { - if (getJITType() != JITCode::InterpreterThunk) { - ASSERT(getJITType() == JITCode::BaselineJIT); - return AlreadyCompiled; - } -#if ENABLE(JIT) - if (jitCompileImpl(exec)) - return CompiledSuccessfully; - return CouldNotCompile; -#else - UNUSED_PARAM(exec); - return CouldNotCompile; -#endif } - virtual CodeBlock* replacement() = 0; - - virtual DFG::CapabilityLevel canCompileWithDFGInternal() = 0; - DFG::CapabilityLevel canCompileWithDFG() + PassRefPtr<JITCode> jitCode() { return m_jitCode; } + static ptrdiff_t jitCodeOffset() { return OBJECT_OFFSETOF(CodeBlock, m_jitCode); } + JITCode::JITType jitType() const { - DFG::CapabilityLevel result = canCompileWithDFGInternal(); - m_canCompileWithDFGState = result; + JITCode* jitCode = m_jitCode.get(); + WTF::loadLoadFence(); + JITCode::JITType result = JITCode::jitTypeFor(jitCode); + WTF::loadLoadFence(); // This probably isn't needed. Oh well, paranoia is good. return result; } - DFG::CapabilityLevel canCompileWithDFGState() { return m_canCompileWithDFGState; } - bool hasOptimizedReplacement() + bool hasBaselineJITProfiling() const { - ASSERT(JITCode::isBaselineCode(getJITType())); - bool result = replacement()->getJITType() > getJITType(); -#if !ASSERT_DISABLED - if (result) - ASSERT(replacement()->getJITType() == JITCode::DFGJIT); - else { - ASSERT(JITCode::isBaselineCode(replacement()->getJITType())); - ASSERT(replacement() == this); - } -#endif - return result; + return jitType() == JITCode::BaselineJIT; } -#else - JITCode::JITType getJITType() const { return JITCode::BaselineJIT; } + +#if ENABLE(JIT) + CodeBlock* replacement(); + + DFG::CapabilityLevel computeCapabilityLevel(); + DFG::CapabilityLevel capabilityLevel(); + DFG::CapabilityLevel capabilityLevelState() { return static_cast<DFG::CapabilityLevel>(m_capabilityLevelState); } + + bool hasOptimizedReplacement(JITCode::JITType typeToReplace); + bool hasOptimizedReplacement(); // the typeToReplace is my JITType #endif - ScriptExecutable* ownerExecutable() const { return m_ownerExecutable.get(); } + void jettison(Profiler::JettisonReason, ReoptimizationMode = DontCountReoptimization, const FireDetail* = nullptr); + + ExecutableBase* ownerExecutable() const { return m_ownerExecutable.get(); } + ScriptExecutable* ownerScriptExecutable() const { return jsCast<ScriptExecutable*>(m_ownerExecutable.get()); } void setVM(VM* vm) { m_vm = vm; } VM* vm() { return m_vm; } - void setThisRegister(int thisRegister) { m_thisRegister = thisRegister; } - int thisRegister() const { return m_thisRegister; } + void setThisRegister(VirtualRegister thisRegister) { m_thisRegister = thisRegister; } + VirtualRegister thisRegister() const { return m_thisRegister; } - bool needsFullScopeChain() const { return m_unlinkedCode->needsFullScopeChain(); } bool usesEval() const { return m_unlinkedCode->usesEval(); } - - void setArgumentsRegister(int argumentsRegister) - { - ASSERT(argumentsRegister != -1); - m_argumentsRegister = argumentsRegister; - ASSERT(usesArguments()); - } - int argumentsRegister() const - { - ASSERT(usesArguments()); - return m_argumentsRegister; - } - int uncheckedArgumentsRegister() - { - if (!usesArguments()) - return InvalidVirtualRegister; - return argumentsRegister(); - } - void setActivationRegister(int activationRegister) - { - m_activationRegister = activationRegister; - } - int activationRegister() const + + void setScopeRegister(VirtualRegister scopeRegister) { - ASSERT(needsFullScopeChain()); - return m_activationRegister; + ASSERT(scopeRegister.isLocal() || !scopeRegister.isValid()); + m_scopeRegister = scopeRegister; } - int uncheckedActivationRegister() + + VirtualRegister scopeRegister() const { - if (!needsFullScopeChain()) - return InvalidVirtualRegister; - return activationRegister(); + return m_scopeRegister; } - bool usesArguments() const { return m_argumentsRegister != -1; } - - bool needsActivation() const + + CodeType codeType() const { - return needsFullScopeChain() && codeType() != GlobalCode; + return static_cast<CodeType>(m_codeType); } - bool isCaptured(int operand, InlineCallFrame* inlineCallFrame = 0) const + PutPropertySlot::Context putByIdContext() const { - if (operandIsArgument(operand)) - return operandToArgument(operand) && usesArguments(); - - if (inlineCallFrame) - return inlineCallFrame->capturedVars.get(operand); - - // The activation object isn't in the captured region, but it's "captured" - // in the sense that stores to its location can be observed indirectly. - if (needsActivation() && operand == activationRegister()) - return true; - - // Ditto for the arguments object. - if (usesArguments() && operand == argumentsRegister()) - return true; - - // Ditto for the arguments object. - if (usesArguments() && operand == unmodifiedArgumentsRegister(argumentsRegister())) - return true; - - // We're in global code so there are no locals to capture - if (!symbolTable()) - return false; - - return operand >= symbolTable()->captureStart() - && operand < symbolTable()->captureEnd(); + if (codeType() == EvalCode) + return PutPropertySlot::PutByIdEval; + return PutPropertySlot::PutById; } - CodeType codeType() const { return m_unlinkedCode->codeType(); } - SourceProvider* source() const { return m_source.get(); } unsigned sourceOffset() const { return m_sourceOffset; } unsigned firstLineColumnOffset() const { return m_firstLineColumnOffset; } @@ -576,27 +373,8 @@ public: size_t numberOfJumpTargets() const { return m_unlinkedCode->numberOfJumpTargets(); } unsigned jumpTarget(int index) const { return m_unlinkedCode->jumpTarget(index); } - void createActivation(CallFrame*); - - void clearEvalCache(); - - String nameForRegister(int registerNumber); + String nameForRegister(VirtualRegister); -#if ENABLE(JIT) - void setNumberOfStructureStubInfos(size_t size) { m_structureStubInfos.grow(size); } - size_t numberOfStructureStubInfos() const { return m_structureStubInfos.size(); } - StructureStubInfo& structureStubInfo(int index) { return m_structureStubInfos[index]; } - - void setNumberOfByValInfos(size_t size) { m_byValInfos.grow(size); } - size_t numberOfByValInfos() const { return m_byValInfos.size(); } - ByValInfo& byValInfo(size_t index) { return m_byValInfos[index]; } - - void setNumberOfCallLinkInfos(size_t size) { m_callLinkInfos.grow(size); } - size_t numberOfCallLinkInfos() const { return m_callLinkInfos.size(); } - CallLinkInfo& callLinkInfo(int index) { return m_callLinkInfos[index]; } -#endif - -#if ENABLE(VALUE_PROFILER) unsigned numberOfArgumentValueProfiles() { ASSERT(m_numParameters >= 0); @@ -612,23 +390,12 @@ public: unsigned numberOfValueProfiles() { return m_valueProfiles.size(); } ValueProfile* valueProfile(int index) { return &m_valueProfiles[index]; } - ValueProfile* valueProfileForBytecodeOffset(int bytecodeOffset) - { - ValueProfile* result = binarySearch<ValueProfile, int>( - m_valueProfiles, m_valueProfiles.size(), bytecodeOffset, - getValueProfileBytecodeOffset<ValueProfile>); - ASSERT(result->m_bytecodeOffset != -1); - ASSERT(instructions()[bytecodeOffset + opcodeLength( - m_vm->interpreter->getOpcodeID( - instructions()[ - bytecodeOffset].u.opcode)) - 1].u.profile == result); - return result; - } - SpeculatedType valueProfilePredictionForBytecodeOffset(int bytecodeOffset) + ValueProfile* valueProfileForBytecodeOffset(int bytecodeOffset); + SpeculatedType valueProfilePredictionForBytecodeOffset(const ConcurrentJITLocker& locker, int bytecodeOffset) { - return valueProfileForBytecodeOffset(bytecodeOffset)->computeUpdatedPrediction(); + return valueProfileForBytecodeOffset(bytecodeOffset)->computeUpdatedPrediction(locker); } - + unsigned totalNumberOfValueProfiles() { return numberOfArgumentValueProfiles() + numberOfValueProfiles(); @@ -639,87 +406,64 @@ public: return valueProfileForArgument(index); return valueProfile(index - numberOfArgumentValueProfiles()); } - + RareCaseProfile* addRareCaseProfile(int bytecodeOffset) { m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset)); return &m_rareCaseProfiles.last(); } unsigned numberOfRareCaseProfiles() { return m_rareCaseProfiles.size(); } - RareCaseProfile* rareCaseProfile(int index) { return &m_rareCaseProfiles[index]; } - RareCaseProfile* rareCaseProfileForBytecodeOffset(int bytecodeOffset) - { - return tryBinarySearch<RareCaseProfile, int>( - m_rareCaseProfiles, m_rareCaseProfiles.size(), bytecodeOffset, - getRareCaseProfileBytecodeOffset); - } - + RareCaseProfile* rareCaseProfileForBytecodeOffset(int bytecodeOffset); + unsigned rareCaseProfileCountForBytecodeOffset(int bytecodeOffset); + bool likelyToTakeSlowCase(int bytecodeOffset) { - if (!numberOfRareCaseProfiles()) + if (!hasBaselineJITProfiling()) return false; - unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; + unsigned value = rareCaseProfileCountForBytecodeOffset(bytecodeOffset); return value >= Options::likelyToTakeSlowCaseMinimumCount(); } - + bool couldTakeSlowCase(int bytecodeOffset) { - if (!numberOfRareCaseProfiles()) + if (!hasBaselineJITProfiling()) return false; - unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; + unsigned value = rareCaseProfileCountForBytecodeOffset(bytecodeOffset); return value >= Options::couldTakeSlowCaseMinimumCount(); } - - RareCaseProfile* addSpecialFastCaseProfile(int bytecodeOffset) - { - m_specialFastCaseProfiles.append(RareCaseProfile(bytecodeOffset)); - return &m_specialFastCaseProfiles.last(); - } - unsigned numberOfSpecialFastCaseProfiles() { return m_specialFastCaseProfiles.size(); } - RareCaseProfile* specialFastCaseProfile(int index) { return &m_specialFastCaseProfiles[index]; } - RareCaseProfile* specialFastCaseProfileForBytecodeOffset(int bytecodeOffset) + + ResultProfile* ensureResultProfile(int bytecodeOffset) { - return tryBinarySearch<RareCaseProfile, int>( - m_specialFastCaseProfiles, m_specialFastCaseProfiles.size(), bytecodeOffset, - getRareCaseProfileBytecodeOffset); + ResultProfile* profile = resultProfileForBytecodeOffset(bytecodeOffset); + if (!profile) { + m_resultProfiles.append(ResultProfile(bytecodeOffset)); + profile = &m_resultProfiles.last(); + ASSERT(&m_resultProfiles.last() == &m_resultProfiles[m_resultProfiles.size() - 1]); + if (!m_bytecodeOffsetToResultProfileIndexMap) + m_bytecodeOffsetToResultProfileIndexMap = std::make_unique<BytecodeOffsetToResultProfileIndexMap>(); + m_bytecodeOffsetToResultProfileIndexMap->add(bytecodeOffset, m_resultProfiles.size() - 1); + } + return profile; } - - bool likelyToTakeSpecialFastCase(int bytecodeOffset) + unsigned numberOfResultProfiles() { return m_resultProfiles.size(); } + ResultProfile* resultProfileForBytecodeOffset(int bytecodeOffset); + + unsigned specialFastCaseProfileCountForBytecodeOffset(int bytecodeOffset) { - if (!numberOfRareCaseProfiles()) - return false; - unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; - return specialFastCaseCount >= Options::likelyToTakeSlowCaseMinimumCount(); + ResultProfile* profile = resultProfileForBytecodeOffset(bytecodeOffset); + if (!profile) + return 0; + return profile->specialFastPathCount(); } - + bool couldTakeSpecialFastCase(int bytecodeOffset) { - if (!numberOfRareCaseProfiles()) + if (!hasBaselineJITProfiling()) return false; - unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; + unsigned specialFastCaseCount = specialFastCaseProfileCountForBytecodeOffset(bytecodeOffset); return specialFastCaseCount >= Options::couldTakeSlowCaseMinimumCount(); } - - bool likelyToTakeDeepestSlowCase(int bytecodeOffset) - { - if (!numberOfRareCaseProfiles()) - return false; - unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; - unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; - unsigned value = slowCaseCount - specialFastCaseCount; - return value >= Options::likelyToTakeSlowCaseMinimumCount(); - } - - bool likelyToTakeAnySlowCase(int bytecodeOffset) - { - if (!numberOfRareCaseProfiles()) - return false; - unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; - unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; - unsigned value = slowCaseCount + specialFastCaseCount; - return value >= Options::likelyToTakeSlowCaseMinimumCount(); - } - + unsigned numberOfArrayProfiles() const { return m_arrayProfiles.size(); } const ArrayProfileVector& arrayProfiles() { return m_arrayProfiles; } ArrayProfile* addArrayProfile(unsigned bytecodeOffset) @@ -729,113 +473,98 @@ public: } ArrayProfile* getArrayProfile(unsigned bytecodeOffset); ArrayProfile* getOrAddArrayProfile(unsigned bytecodeOffset); -#endif // Exception handling support size_t numberOfExceptionHandlers() const { return m_rareData ? m_rareData->m_exceptionHandlers.size() : 0; } - void allocateHandlers(const Vector<UnlinkedHandlerInfo>& unlinkedHandlers) - { - size_t count = unlinkedHandlers.size(); - if (!count) - return; - createRareDataIfNecessary(); - m_rareData->m_exceptionHandlers.resize(count); - for (size_t i = 0; i < count; ++i) { - m_rareData->m_exceptionHandlers[i].start = unlinkedHandlers[i].start; - m_rareData->m_exceptionHandlers[i].end = unlinkedHandlers[i].end; - m_rareData->m_exceptionHandlers[i].target = unlinkedHandlers[i].target; - m_rareData->m_exceptionHandlers[i].scopeDepth = unlinkedHandlers[i].scopeDepth; - } - - } HandlerInfo& exceptionHandler(int index) { RELEASE_ASSERT(m_rareData); return m_rareData->m_exceptionHandlers[index]; } bool hasExpressionInfo() { return m_unlinkedCode->hasExpressionInfo(); } -#if ENABLE(JIT) - Vector<CallReturnOffsetToBytecodeOffset, 0, UnsafeVectorOverflow>& callReturnIndexVector() - { - createRareDataIfNecessary(); - return m_rareData->m_callReturnIndexVector; - } -#endif - #if ENABLE(DFG_JIT) - SegmentedVector<InlineCallFrame, 4>& inlineCallFrames() - { - createRareDataIfNecessary(); - return m_rareData->m_inlineCallFrames; - } - - Vector<CodeOriginAtCallReturnOffset, 0, UnsafeVectorOverflow>& codeOrigins() - { - createRareDataIfNecessary(); - return m_rareData->m_codeOrigins; - } - + Vector<CodeOrigin, 0, UnsafeVectorOverflow>& codeOrigins(); + // Having code origins implies that there has been some inlining. bool hasCodeOrigins() { - return m_rareData && !!m_rareData->m_codeOrigins.size(); + return JITCode::isOptimizingJIT(jitType()); } - bool codeOriginForReturn(ReturnAddressPtr, CodeOrigin&); - - bool canGetCodeOrigin(unsigned index) + bool canGetCodeOrigin(CallSiteIndex index) { - if (!m_rareData) + if (!hasCodeOrigins()) return false; - return m_rareData->m_codeOrigins.size() > index; + return index.bits() < codeOrigins().size(); } - - CodeOrigin codeOrigin(unsigned index) + + CodeOrigin codeOrigin(CallSiteIndex index) { - RELEASE_ASSERT(m_rareData); - return m_rareData->m_codeOrigins[index].codeOrigin; + return codeOrigins()[index.bits()]; } - + bool addFrequentExitSite(const DFG::FrequentExitSite& site) { - ASSERT(JITCode::isBaselineCode(getJITType())); - return m_exitProfile.add(site); + ASSERT(JITCode::isBaselineCode(jitType())); + ConcurrentJITLocker locker(m_lock); + return m_exitProfile.add(locker, site); + } + + bool hasExitSite(const ConcurrentJITLocker& locker, const DFG::FrequentExitSite& site) const + { + return m_exitProfile.hasExitSite(locker, site); + } + bool hasExitSite(const DFG::FrequentExitSite& site) const + { + ConcurrentJITLocker locker(m_lock); + return hasExitSite(locker, site); } - - bool hasExitSite(const DFG::FrequentExitSite& site) const { return m_exitProfile.hasExitSite(site); } DFG::ExitProfile& exitProfile() { return m_exitProfile; } - + CompressedLazyOperandValueProfileHolder& lazyOperandValueProfiles() { return m_lazyOperandValueProfiles; } -#endif +#endif // ENABLE(DFG_JIT) // Constant Pool +#if ENABLE(DFG_JIT) + size_t numberOfIdentifiers() const { return m_unlinkedCode->numberOfIdentifiers() + numberOfDFGIdentifiers(); } + size_t numberOfDFGIdentifiers() const; + const Identifier& identifier(int index) const; +#else + size_t numberOfIdentifiers() const { return m_unlinkedCode->numberOfIdentifiers(); } + const Identifier& identifier(int index) const { return m_unlinkedCode->identifier(index); } +#endif - size_t numberOfIdentifiers() const { return m_identifiers.size(); } - void addIdentifier(const Identifier& i) { return m_identifiers.append(i); } - Identifier& identifier(int index) { return m_identifiers[index]; } - - size_t numberOfConstantRegisters() const { return m_constantRegisters.size(); } + Vector<WriteBarrier<Unknown>>& constants() { return m_constantRegisters; } + Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation() { return m_constantsSourceCodeRepresentation; } unsigned addConstant(JSValue v) { unsigned result = m_constantRegisters.size(); m_constantRegisters.append(WriteBarrier<Unknown>()); - m_constantRegisters.last().set(m_globalObject->vm(), m_ownerExecutable.get(), v); + m_constantRegisters.last().set(m_globalObject->vm(), this, v); + m_constantsSourceCodeRepresentation.append(SourceCodeRepresentation::Other); return result; } + unsigned addConstantLazily() + { + unsigned result = m_constantRegisters.size(); + m_constantRegisters.append(WriteBarrier<Unknown>()); + m_constantsSourceCodeRepresentation.append(SourceCodeRepresentation::Other); + return result; + } - unsigned addOrFindConstant(JSValue); WriteBarrier<Unknown>& constantRegister(int index) { return m_constantRegisters[index - FirstConstantRegisterIndex]; } ALWAYS_INLINE bool isConstantRegisterIndex(int index) const { return index >= FirstConstantRegisterIndex; } ALWAYS_INLINE JSValue getConstant(int index) const { return m_constantRegisters[index - FirstConstantRegisterIndex].get(); } + ALWAYS_INLINE SourceCodeRepresentation constantSourceCodeRepresentation(int index) const { return m_constantsSourceCodeRepresentation[index - FirstConstantRegisterIndex]; } FunctionExecutable* functionDecl(int index) { return m_functionDecls[index].get(); } int numberOfFunctionDecls() { return m_functionDecls.size(); } FunctionExecutable* functionExpr(int index) { return m_functionExprs[index].get(); } - + RegExp* regexp(int index) const { return m_unlinkedCode->regexp(index); } unsigned numberOfConstantBuffers() const @@ -862,71 +591,99 @@ public: return constantBufferAsVector(index).data(); } + Heap* heap() const { return &m_vm->heap; } JSGlobalObject* globalObject() { return m_globalObject.get(); } - + JSGlobalObject* globalObjectFor(CodeOrigin); - // Jump Tables + BytecodeLivenessAnalysis& livenessAnalysis() + { + { + ConcurrentJITLocker locker(m_lock); + if (!!m_livenessAnalysis) + return *m_livenessAnalysis; + } + std::unique_ptr<BytecodeLivenessAnalysis> analysis = + std::make_unique<BytecodeLivenessAnalysis>(this); + { + ConcurrentJITLocker locker(m_lock); + if (!m_livenessAnalysis) + m_livenessAnalysis = WTFMove(analysis); + return *m_livenessAnalysis; + } + } + + void validate(); - size_t numberOfImmediateSwitchJumpTables() const { return m_rareData ? m_rareData->m_immediateSwitchJumpTables.size() : 0; } - SimpleJumpTable& addImmediateSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_immediateSwitchJumpTables.append(SimpleJumpTable()); return m_rareData->m_immediateSwitchJumpTables.last(); } - SimpleJumpTable& immediateSwitchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_immediateSwitchJumpTables[tableIndex]; } + // Jump Tables - size_t numberOfCharacterSwitchJumpTables() const { return m_rareData ? m_rareData->m_characterSwitchJumpTables.size() : 0; } - SimpleJumpTable& addCharacterSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_characterSwitchJumpTables.append(SimpleJumpTable()); return m_rareData->m_characterSwitchJumpTables.last(); } - SimpleJumpTable& characterSwitchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_characterSwitchJumpTables[tableIndex]; } + size_t numberOfSwitchJumpTables() const { return m_rareData ? m_rareData->m_switchJumpTables.size() : 0; } + SimpleJumpTable& addSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_switchJumpTables.append(SimpleJumpTable()); return m_rareData->m_switchJumpTables.last(); } + SimpleJumpTable& switchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_switchJumpTables[tableIndex]; } + void clearSwitchJumpTables() + { + if (!m_rareData) + return; + m_rareData->m_switchJumpTables.clear(); + } size_t numberOfStringSwitchJumpTables() const { return m_rareData ? m_rareData->m_stringSwitchJumpTables.size() : 0; } StringJumpTable& addStringSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_stringSwitchJumpTables.append(StringJumpTable()); return m_rareData->m_stringSwitchJumpTables.last(); } StringJumpTable& stringSwitchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_stringSwitchJumpTables[tableIndex]; } - - SharedSymbolTable* symbolTable() const { return m_unlinkedCode->symbolTable(); } + // Live callee registers at yield points. + const FastBitVector& liveCalleeLocalsAtYield(unsigned index) const + { + RELEASE_ASSERT(m_rareData); + return m_rareData->m_liveCalleeLocalsAtYield[index]; + } + FastBitVector& liveCalleeLocalsAtYield(unsigned index) + { + RELEASE_ASSERT(m_rareData); + return m_rareData->m_liveCalleeLocalsAtYield[index]; + } EvalCodeCache& evalCodeCache() { createRareDataIfNecessary(); return m_rareData->m_evalCodeCache; } enum ShrinkMode { // Shrink prior to generating machine code that may point directly into vectors. EarlyShrink, - + // Shrink after generating machine code, and after possibly creating new vectors // and appending to others. At this time it is not safe to shrink certain vectors // because we would have generated machine code that references them directly. LateShrink }; void shrinkToFit(ShrinkMode); - - void copyPostParseDataFrom(CodeBlock* alternative); - void copyPostParseDataFromAlternative(); - + // Functions for controlling when JITting kicks in, in a mixed mode // execution world. - + bool checkIfJITThresholdReached() { return m_llintExecuteCounter.checkIfThresholdCrossedAndSet(this); } - + void dontJITAnytimeSoon() { m_llintExecuteCounter.deferIndefinitely(); } - + void jitAfterWarmUp() { m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITAfterWarmUp(), this); } - + void jitSoon() { m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITSoon(), this); } - - const ExecutionCounter& llintExecuteCounter() const + + const BaselineExecutionCounter& llintExecuteCounter() const { return m_llintExecuteCounter; } - + // Functions for controlling when tiered compilation kicks in. This // controls both when the optimizing compiler is invoked and when OSR // entry happens. Two triggers exist: the loop trigger and the return @@ -938,57 +695,59 @@ public: // case of the loop trigger, if the optimized compilation succeeds // (or has already succeeded in the past) then OSR is attempted to // redirect program flow into the optimized code. - + // These functions are called from within the optimization triggers, // and are used as a single point at which we define the heuristics // for how much warm-up is mandated before the next optimization // trigger files. All CodeBlocks start out with optimizeAfterWarmUp(), // as this is called from the CodeBlock constructor. - + // When we observe a lot of speculation failures, we trigger a // reoptimization. But each time, we increase the optimization trigger // to avoid thrashing. - unsigned reoptimizationRetryCounter() const; + JS_EXPORT_PRIVATE unsigned reoptimizationRetryCounter() const; void countReoptimization(); - +#if ENABLE(JIT) + static unsigned numberOfLLIntBaselineCalleeSaveRegisters() { return RegisterSet::llintBaselineCalleeSaveRegisters().numberOfSetRegisters(); } + static size_t llintBaselineCalleeSaveSpaceAsVirtualRegisters(); + size_t calleeSaveSpaceAsVirtualRegisters(); + unsigned numberOfDFGCompiles(); int32_t codeTypeThresholdMultiplier() const; - - int32_t counterValueForOptimizeAfterWarmUp(); - int32_t counterValueForOptimizeAfterLongWarmUp(); - int32_t counterValueForOptimizeSoon(); - + + int32_t adjustedCounterValue(int32_t desiredThreshold); + int32_t* addressOfJITExecuteCounter() { return &m_jitExecuteCounter.m_counter; } - - static ptrdiff_t offsetOfJITExecuteCounter() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_counter); } - static ptrdiff_t offsetOfJITExecutionActiveThreshold() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_activeThreshold); } - static ptrdiff_t offsetOfJITExecutionTotalCount() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_totalCount); } - const ExecutionCounter& jitExecuteCounter() const { return m_jitExecuteCounter; } - + static ptrdiff_t offsetOfJITExecuteCounter() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_counter); } + static ptrdiff_t offsetOfJITExecutionActiveThreshold() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_activeThreshold); } + static ptrdiff_t offsetOfJITExecutionTotalCount() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_totalCount); } + + const BaselineExecutionCounter& jitExecuteCounter() const { return m_jitExecuteCounter; } + unsigned optimizationDelayCounter() const { return m_optimizationDelayCounter; } - + // Check if the optimization threshold has been reached, and if not, // adjust the heuristics accordingly. Returns true if the threshold has // been reached. bool checkIfOptimizationThresholdReached(); - + // Call this to force the next optimization trigger to fire. This is // rarely wise, since optimization triggers are typically more // expensive than executing baseline code. void optimizeNextInvocation(); - + // Call this to prevent optimization from happening again. Note that // optimization will still happen after roughly 2^29 invocations, // so this is really meant to delay that as much as possible. This // is called if optimization failed, and we expect it to fail in // the future as well. void dontOptimizeAnytimeSoon(); - + // Call this to reinitialize the counter to its starting state, // forcing a warm-up to happen before the next optimization trigger // fires. This is called in the CodeBlock constructor. It also @@ -996,11 +755,11 @@ public: // OSR exit code is code generated, so the value of the execute // counter that this corresponds to is also available directly. void optimizeAfterWarmUp(); - + // Call this to force an optimization trigger to fire only after // a lot of warm-up. void optimizeAfterLongWarmUp(); - + // Call this to cause an optimization trigger to fire soon, but // not necessarily the next one. This makes sense if optimization // succeeds. Successfuly optimization means that all calls are @@ -1020,56 +779,151 @@ public: // to trigger optimization if one of those functions becomes hot // in the baseline code. void optimizeSoon(); - + + void forceOptimizationSlowPathConcurrently(); + + void setOptimizationThresholdBasedOnCompilationResult(CompilationResult); + uint32_t osrExitCounter() const { return m_osrExitCounter; } - + void countOSRExit() { m_osrExitCounter++; } - + uint32_t* addressOfOSRExitCounter() { return &m_osrExitCounter; } - + static ptrdiff_t offsetOfOSRExitCounter() { return OBJECT_OFFSETOF(CodeBlock, m_osrExitCounter); } -#if ENABLE(JIT) uint32_t adjustedExitCountThreshold(uint32_t desiredThreshold); uint32_t exitCountThresholdForReoptimization(); uint32_t exitCountThresholdForReoptimizationFromLoop(); bool shouldReoptimizeNow(); bool shouldReoptimizeFromLoopNow(); + + void setCalleeSaveRegisters(RegisterSet); + void setCalleeSaveRegisters(std::unique_ptr<RegisterAtOffsetList>); + + RegisterAtOffsetList* calleeSaveRegisters() const { return m_calleeSaveRegisters.get(); } +#else // No JIT + static unsigned numberOfLLIntBaselineCalleeSaveRegisters() { return 0; } + static size_t llintBaselineCalleeSaveSpaceAsVirtualRegisters() { return 0; }; + void optimizeAfterWarmUp() { } + unsigned numberOfDFGCompiles() { return 0; } #endif -#if ENABLE(VALUE_PROFILER) bool shouldOptimizeNow(); - void updateAllValueProfilePredictions(OperationInProgress = NoOperation); - void updateAllArrayPredictions(OperationInProgress = NoOperation); - void updateAllPredictions(OperationInProgress = NoOperation); -#else - bool shouldOptimizeNow() { return false; } - void updateAllValueProfilePredictions(OperationInProgress = NoOperation) { } - void updateAllArrayPredictions(OperationInProgress = NoOperation) { } - void updateAllPredictions(OperationInProgress = NoOperation) { } -#endif - -#if ENABLE(JIT) - void reoptimize(); -#endif + void updateAllValueProfilePredictions(); + void updateAllArrayPredictions(); + void updateAllPredictions(); -#if ENABLE(VERBOSE_VALUE_PROFILE) - void dumpValueProfiles(); -#endif - + unsigned frameRegisterCount(); + int stackPointerOffset(); + + bool hasOpDebugForLineAndColumn(unsigned line, unsigned column); + + bool hasDebuggerRequests() const { return m_debuggerRequests; } + void* debuggerRequestsAddress() { return &m_debuggerRequests; } + + void addBreakpoint(unsigned numBreakpoints); + void removeBreakpoint(unsigned numBreakpoints) + { + ASSERT(m_numBreakpoints >= numBreakpoints); + m_numBreakpoints -= numBreakpoints; + } + + enum SteppingMode { + SteppingModeDisabled, + SteppingModeEnabled + }; + void setSteppingMode(SteppingMode); + + void clearDebuggerRequests() + { + m_steppingMode = SteppingModeDisabled; + m_numBreakpoints = 0; + } + // FIXME: Make these remaining members private. - int m_numCalleeRegisters; + int m_numCalleeLocals; int m_numVars; - bool m_isConstructor; + + // This is intentionally public; it's the responsibility of anyone doing any + // of the following to hold the lock: + // + // - Modifying any inline cache in this code block. + // + // - Quering any inline cache in this code block, from a thread other than + // the main thread. + // + // Additionally, it's only legal to modify the inline cache on the main + // thread. This means that the main thread can query the inline cache without + // locking. This is crucial since executing the inline cache is effectively + // "querying" it. + // + // Another exception to the rules is that the GC can do whatever it wants + // without holding any locks, because the GC is guaranteed to wait until any + // concurrent compilation threads finish what they're doing. + mutable ConcurrentJITLock m_lock; + + Atomic<bool> m_visitWeaklyHasBeenCalled; + + bool m_shouldAlwaysBeInlined; // Not a bitfield because the JIT wants to store to it. -protected: #if ENABLE(JIT) - virtual bool jitCompileImpl(ExecState*) = 0; - virtual void jettisonImpl() = 0; + unsigned m_capabilityLevelState : 2; // DFG::CapabilityLevel #endif - virtual void visitWeakReferences(SlotVisitor&); - virtual void finalizeUnconditionally(); + + bool m_allTransitionsHaveBeenMarked : 1; // Initialized and used on every GC. + + bool m_didFailFTLCompilation : 1; + bool m_hasBeenCompiledWithFTL : 1; + bool m_isConstructor : 1; + bool m_isStrictMode : 1; + unsigned m_codeType : 2; // CodeType + + // Internal methods for use by validation code. It would be private if it wasn't + // for the fact that we use it from anonymous namespaces. + void beginValidationDidFail(); + NO_RETURN_DUE_TO_CRASH void endValidationDidFail(); + + struct RareData { + WTF_MAKE_FAST_ALLOCATED; + public: + Vector<HandlerInfo> m_exceptionHandlers; + + // Buffers used for large array literals + Vector<Vector<JSValue>> m_constantBuffers; + + // Jump Tables + Vector<SimpleJumpTable> m_switchJumpTables; + Vector<StringJumpTable> m_stringSwitchJumpTables; + + Vector<FastBitVector> m_liveCalleeLocalsAtYield; + + EvalCodeCache m_evalCodeCache; + }; + + void clearExceptionHandlers() + { + if (m_rareData) + m_rareData->m_exceptionHandlers.clear(); + } + + void appendExceptionHandler(const HandlerInfo& handler) + { + createRareDataIfNecessary(); // We may be handling the exception of an inlined call frame. + m_rareData->m_exceptionHandlers.append(handler); + } + + CallSiteIndex newExceptionHandlingCallSiteIndex(CallSiteIndex originalCallSite); + +#if ENABLE(JIT) + void setPCToCodeOriginMap(std::unique_ptr<PCToCodeOriginMap>&&); + Optional<CodeOrigin> findPC(void* pc); +#endif + +protected: + void finalizeLLIntInlineCaches(); + void finalizeBaselineJITInlineCaches(); #if ENABLE(DFG_JIT) void tallyFrequentExitSites(); @@ -1078,297 +932,306 @@ protected: #endif private: - friend class DFGCodeBlocks; - + friend class CodeBlockSet; + + CodeBlock* specialOSREntryBlockOrNull(); + + void noticeIncomingCall(ExecState* callerFrame); + double optimizationThresholdScalingFactor(); -#if ENABLE(JIT) - ClosureCallStubRoutine* findClosureCallForReturnPC(ReturnAddressPtr); -#endif - -#if ENABLE(VALUE_PROFILER) - void updateAllPredictionsAndCountLiveness(OperationInProgress, unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles); -#endif + void updateAllPredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles); - void setIdentifiers(const Vector<Identifier>& identifiers) + void setConstantRegisters(const Vector<WriteBarrier<Unknown>>& constants, const Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation) { - RELEASE_ASSERT(m_identifiers.isEmpty()); - m_identifiers.appendVector(identifiers); + ASSERT(constants.size() == constantsSourceCodeRepresentation.size()); + size_t count = constants.size(); + m_constantRegisters.resizeToFit(count); + for (size_t i = 0; i < count; i++) + m_constantRegisters[i].set(*m_vm, this, constants[i].get()); + m_constantsSourceCodeRepresentation = constantsSourceCodeRepresentation; } - void setConstantRegisters(const Vector<WriteBarrier<Unknown> >& constants) + void replaceConstant(int index, JSValue value) { - size_t count = constants.size(); - m_constantRegisters.resize(count); - for (size_t i = 0; i < count; i++) - m_constantRegisters[i].set(*m_vm, ownerExecutable(), constants[i].get()); + ASSERT(isConstantRegisterIndex(index) && static_cast<size_t>(index - FirstConstantRegisterIndex) < m_constantRegisters.size()); + m_constantRegisters[index - FirstConstantRegisterIndex].set(m_globalObject->vm(), this, value); } - void dumpBytecode(PrintStream&, ExecState*, const Instruction* begin, const Instruction*&); + void dumpBytecode( + PrintStream&, ExecState*, const Instruction* begin, const Instruction*&, + const StubInfoMap& = StubInfoMap(), const CallLinkInfoMap& = CallLinkInfoMap()); - CString registerName(ExecState*, int r) const; + CString registerName(int r) const; + CString constantName(int index) const; void printUnaryOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op); void printBinaryOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op); void printConditionalJump(PrintStream&, ExecState*, const Instruction*, const Instruction*&, int location, const char* op); void printGetByIdOp(PrintStream&, ExecState*, int location, const Instruction*&); - void printGetByIdCacheStatus(PrintStream&, ExecState*, int location); + void printGetByIdCacheStatus(PrintStream&, ExecState*, int location, const StubInfoMap&); enum CacheDumpMode { DumpCaches, DontDumpCaches }; - void printCallOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op, CacheDumpMode); + void printCallOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op, CacheDumpMode, bool& hasPrintedProfiling, const CallLinkInfoMap&); void printPutByIdOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op); + void printPutByIdCacheStatus(PrintStream&, int location, const StubInfoMap&); + void printLocationAndOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op); + void printLocationOpAndRegisterOperand(PrintStream&, ExecState*, int location, const Instruction*& it, const char* op, int operand); + void beginDumpProfiling(PrintStream&, bool& hasPrintedProfiling); void dumpValueProfiling(PrintStream&, const Instruction*&, bool& hasPrintedProfiling); void dumpArrayProfiling(PrintStream&, const Instruction*&, bool& hasPrintedProfiling); -#if ENABLE(VALUE_PROFILER) void dumpRareCaseProfile(PrintStream&, const char* name, RareCaseProfile*, bool& hasPrintedProfiling); -#endif + void dumpResultProfile(PrintStream&, ResultProfile*, bool& hasPrintedProfiling); - void visitStructures(SlotVisitor&, Instruction* vPC); - -#if ENABLE(DFG_JIT) - bool shouldImmediatelyAssumeLivenessDuringScan() - { - // Null m_dfgData means that this is a baseline JIT CodeBlock. Baseline JIT - // CodeBlocks don't need to be jettisoned when their weak references go - // stale. So if a basline JIT CodeBlock gets scanned, we can assume that - // this means that it's live. - if (!m_dfgData) - return true; - - // For simplicity, we don't attempt to jettison code blocks during GC if - // they are executing. Instead we strongly mark their weak references to - // allow them to continue to execute soundly. - if (m_dfgData->mayBeExecuting) - return true; - - if (Options::forceDFGCodeBlockLiveness()) - return true; - - return false; - } -#else - bool shouldImmediatelyAssumeLivenessDuringScan() { return true; } -#endif - - void performTracingFixpointIteration(SlotVisitor&); + bool shouldVisitStrongly(); + bool shouldJettisonDueToWeakReference(); + bool shouldJettisonDueToOldAge(); + + void propagateTransitions(SlotVisitor&); + void determineLiveness(SlotVisitor&); void stronglyVisitStrongReferences(SlotVisitor&); void stronglyVisitWeakReferences(SlotVisitor&); + void visitOSRExitTargets(SlotVisitor&); + + std::chrono::milliseconds timeSinceCreation() + { + return std::chrono::duration_cast<std::chrono::milliseconds>( + std::chrono::steady_clock::now() - m_creationTime); + } void createRareDataIfNecessary() { if (!m_rareData) - m_rareData = adoptPtr(new RareData); + m_rareData = std::make_unique<RareData>(); } -#if ENABLE(JIT) - void resetStubInternal(RepatchBuffer&, StructureStubInfo&); - void resetStubDuringGCInternal(RepatchBuffer&, StructureStubInfo&); -#endif + void insertBasicBlockBoundariesForControlFlowProfiler(RefCountedArray<Instruction>&); + WriteBarrier<UnlinkedCodeBlock> m_unlinkedCode; int m_numParameters; - WriteBarrier<ScriptExecutable> m_ownerExecutable; + union { + unsigned m_debuggerRequests; + struct { + unsigned m_hasDebuggerStatement : 1; + unsigned m_steppingMode : 1; + unsigned m_numBreakpoints : 30; + }; + }; + WriteBarrier<ExecutableBase> m_ownerExecutable; VM* m_vm; RefCountedArray<Instruction> m_instructions; - int m_thisRegister; - int m_argumentsRegister; - int m_activationRegister; - - bool m_isStrictMode; - bool m_needsActivation; + VirtualRegister m_thisRegister; + VirtualRegister m_scopeRegister; + mutable CodeBlockHash m_hash; RefPtr<SourceProvider> m_source; unsigned m_sourceOffset; unsigned m_firstLineColumnOffset; - unsigned m_codeType; -#if ENABLE(LLINT) - SegmentedVector<LLIntCallLinkInfo, 8> m_llintCallLinkInfos; - SentinelLinkedList<LLIntCallLinkInfo, BasicRawSentinelNode<LLIntCallLinkInfo> > m_incomingLLIntCalls; -#endif + RefCountedArray<LLIntCallLinkInfo> m_llintCallLinkInfos; + SentinelLinkedList<LLIntCallLinkInfo, BasicRawSentinelNode<LLIntCallLinkInfo>> m_incomingLLIntCalls; + RefPtr<JITCode> m_jitCode; #if ENABLE(JIT) - Vector<StructureStubInfo> m_structureStubInfos; - Vector<ByValInfo> m_byValInfos; - Vector<CallLinkInfo> m_callLinkInfos; - JITCode m_jitCode; - MacroAssemblerCodePtr m_jitCodeWithArityCheck; - SentinelLinkedList<CallLinkInfo, BasicRawSentinelNode<CallLinkInfo> > m_incomingCalls; -#endif -#if ENABLE(DFG_JIT) || ENABLE(LLINT) - OwnPtr<CompactJITCodeMap> m_jitCodeMap; + std::unique_ptr<RegisterAtOffsetList> m_calleeSaveRegisters; + Bag<StructureStubInfo> m_stubInfos; + Bag<ByValInfo> m_byValInfos; + Bag<CallLinkInfo> m_callLinkInfos; + SentinelLinkedList<CallLinkInfo, BasicRawSentinelNode<CallLinkInfo>> m_incomingCalls; + SentinelLinkedList<PolymorphicCallNode, BasicRawSentinelNode<PolymorphicCallNode>> m_incomingPolymorphicCalls; + std::unique_ptr<PCToCodeOriginMap> m_pcToCodeOriginMap; #endif + std::unique_ptr<CompactJITCodeMap> m_jitCodeMap; #if ENABLE(DFG_JIT) - struct WeakReferenceTransition { - WeakReferenceTransition() { } - - WeakReferenceTransition(VM& vm, JSCell* owner, JSCell* codeOrigin, JSCell* from, JSCell* to) - : m_from(vm, owner, from) - , m_to(vm, owner, to) - { - if (!!codeOrigin) - m_codeOrigin.set(vm, owner, codeOrigin); - } - - WriteBarrier<JSCell> m_codeOrigin; - WriteBarrier<JSCell> m_from; - WriteBarrier<JSCell> m_to; - }; - - struct DFGData { - DFGData() - : mayBeExecuting(false) - , isJettisoned(false) - { - } - - Vector<DFG::OSREntryData> osrEntry; - SegmentedVector<DFG::OSRExit, 8> osrExit; - Vector<DFG::SpeculationRecovery> speculationRecovery; - SegmentedVector<JumpReplacementWatchpoint, 1, 0> watchpoints; - Vector<WeakReferenceTransition> transitions; - Vector<WriteBarrier<JSCell> > weakReferences; - DFG::VariableEventStream variableEventStream; - DFG::MinifiedGraph minifiedDFG; - RefPtr<Profiler::Compilation> compilation; - bool mayBeExecuting; - bool isJettisoned; - bool livenessHasBeenProved; // Initialized and used on every GC. - bool allTransitionsHaveBeenMarked; // Initialized and used on every GC. - unsigned visitAggregateHasBeenCalled; // Unsigned to make it work seamlessly with the broadest set of CAS implementations. - }; - - OwnPtr<DFGData> m_dfgData; - // This is relevant to non-DFG code blocks that serve as the profiled code block // for DFG code blocks. DFG::ExitProfile m_exitProfile; CompressedLazyOperandValueProfileHolder m_lazyOperandValueProfiles; #endif -#if ENABLE(VALUE_PROFILER) - Vector<ValueProfile> m_argumentValueProfiles; - SegmentedVector<ValueProfile, 8> m_valueProfiles; + RefCountedArray<ValueProfile> m_argumentValueProfiles; + RefCountedArray<ValueProfile> m_valueProfiles; SegmentedVector<RareCaseProfile, 8> m_rareCaseProfiles; - SegmentedVector<RareCaseProfile, 8> m_specialFastCaseProfiles; - SegmentedVector<ArrayAllocationProfile, 8> m_arrayAllocationProfiles; + SegmentedVector<ResultProfile, 8> m_resultProfiles; + typedef HashMap<unsigned, unsigned, IntHash<unsigned>, WTF::UnsignedWithZeroKeyHashTraits<unsigned>> BytecodeOffsetToResultProfileIndexMap; + std::unique_ptr<BytecodeOffsetToResultProfileIndexMap> m_bytecodeOffsetToResultProfileIndexMap; + RefCountedArray<ArrayAllocationProfile> m_arrayAllocationProfiles; ArrayProfileVector m_arrayProfiles; -#endif - SegmentedVector<ObjectAllocationProfile, 8> m_objectAllocationProfiles; + RefCountedArray<ObjectAllocationProfile> m_objectAllocationProfiles; // Constant Pool - Vector<Identifier> m_identifiers; COMPILE_ASSERT(sizeof(Register) == sizeof(WriteBarrier<Unknown>), Register_must_be_same_size_as_WriteBarrier_Unknown); // TODO: This could just be a pointer to m_unlinkedCodeBlock's data, but the DFG mutates // it, so we're stuck with it for now. - Vector<WriteBarrier<Unknown> > m_constantRegisters; - Vector<WriteBarrier<FunctionExecutable> > m_functionDecls; - Vector<WriteBarrier<FunctionExecutable> > m_functionExprs; + Vector<WriteBarrier<Unknown>> m_constantRegisters; + Vector<SourceCodeRepresentation> m_constantsSourceCodeRepresentation; + RefCountedArray<WriteBarrier<FunctionExecutable>> m_functionDecls; + RefCountedArray<WriteBarrier<FunctionExecutable>> m_functionExprs; - OwnPtr<CodeBlock> m_alternative; - - ExecutionCounter m_llintExecuteCounter; - - ExecutionCounter m_jitExecuteCounter; - int32_t m_totalJITExecutions; + WriteBarrier<CodeBlock> m_alternative; + + BaselineExecutionCounter m_llintExecuteCounter; + + BaselineExecutionCounter m_jitExecuteCounter; uint32_t m_osrExitCounter; uint16_t m_optimizationDelayCounter; uint16_t m_reoptimizationRetryCounter; - Vector<ResolveOperations> m_resolveOperations; - Vector<PutToBaseOperation, 1> m_putToBaseOperations; - - struct RareData { - WTF_MAKE_FAST_ALLOCATED; - public: - Vector<HandlerInfo> m_exceptionHandlers; + std::chrono::steady_clock::time_point m_creationTime; - // Buffers used for large array literals - Vector<Vector<JSValue> > m_constantBuffers; - - // Jump Tables - Vector<SimpleJumpTable> m_immediateSwitchJumpTables; - Vector<SimpleJumpTable> m_characterSwitchJumpTables; - Vector<StringJumpTable> m_stringSwitchJumpTables; + std::unique_ptr<BytecodeLivenessAnalysis> m_livenessAnalysis; - EvalCodeCache m_evalCodeCache; + std::unique_ptr<RareData> m_rareData; -#if ENABLE(JIT) - Vector<CallReturnOffsetToBytecodeOffset, 0, UnsafeVectorOverflow> m_callReturnIndexVector; -#endif -#if ENABLE(DFG_JIT) - SegmentedVector<InlineCallFrame, 4> m_inlineCallFrames; - Vector<CodeOriginAtCallReturnOffset, 0, UnsafeVectorOverflow> m_codeOrigins; -#endif - }; -#if COMPILER(MSVC) - friend void WTF::deleteOwnedPtr<RareData>(RareData*); -#endif - OwnPtr<RareData> m_rareData; -#if ENABLE(JIT) - DFG::CapabilityLevel m_canCompileWithDFGState; -#endif + UnconditionalFinalizer m_unconditionalFinalizer; + WeakReferenceHarvester m_weakReferenceHarvester; }; // Program code is not marked by any function, so we make the global object // responsible for marking it. class GlobalCodeBlock : public CodeBlock { + typedef CodeBlock Base; + DECLARE_INFO; + protected: - GlobalCodeBlock(CopyParsedBlockTag, GlobalCodeBlock& other) - : CodeBlock(CopyParsedBlock, other) + GlobalCodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, GlobalCodeBlock& other) + : CodeBlock(vm, structure, CopyParsedBlock, other) { } - - GlobalCodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSGlobalObject* globalObject, unsigned baseScopeDepth, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset, PassOwnPtr<CodeBlock> alternative) - : CodeBlock(ownerExecutable, unlinkedCodeBlock, globalObject, baseScopeDepth, sourceProvider, sourceOffset, firstLineColumnOffset, alternative) + + GlobalCodeBlock(VM* vm, Structure* structure, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset) + : CodeBlock(vm, structure, ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, sourceOffset, firstLineColumnOffset) { } }; class ProgramCodeBlock : public GlobalCodeBlock { public: - ProgramCodeBlock(CopyParsedBlockTag, ProgramCodeBlock& other) - : GlobalCodeBlock(CopyParsedBlock, other) + typedef GlobalCodeBlock Base; + DECLARE_INFO; + + static ProgramCodeBlock* create(VM* vm, CopyParsedBlockTag, ProgramCodeBlock& other) { + ProgramCodeBlock* instance = new (NotNull, allocateCell<ProgramCodeBlock>(vm->heap)) + ProgramCodeBlock(vm, vm->programCodeBlockStructure.get(), CopyParsedBlock, other); + instance->finishCreation(*vm, CopyParsedBlock, other); + return instance; } - ProgramCodeBlock(ProgramExecutable* ownerExecutable, UnlinkedProgramCodeBlock* unlinkedCodeBlock, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, unsigned firstLineColumnOffset, PassOwnPtr<CodeBlock> alternative) - : GlobalCodeBlock(ownerExecutable, unlinkedCodeBlock, globalObject, 0, sourceProvider, 0, firstLineColumnOffset, alternative) + static ProgramCodeBlock* create(VM* vm, ProgramExecutable* ownerExecutable, UnlinkedProgramCodeBlock* unlinkedCodeBlock, + JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned firstLineColumnOffset) { + ProgramCodeBlock* instance = new (NotNull, allocateCell<ProgramCodeBlock>(vm->heap)) + ProgramCodeBlock(vm, vm->programCodeBlockStructure.get(), ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, firstLineColumnOffset); + instance->finishCreation(*vm, ownerExecutable, unlinkedCodeBlock, scope); + return instance; } -#if ENABLE(JIT) -protected: - virtual JSObject* compileOptimized(ExecState*, JSScope*, unsigned bytecodeIndex); - virtual void jettisonImpl(); - virtual bool jitCompileImpl(ExecState*); - virtual CodeBlock* replacement(); - virtual DFG::CapabilityLevel canCompileWithDFGInternal(); -#endif + static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue prototype) + { + return Structure::create(vm, globalObject, prototype, TypeInfo(CellType, StructureFlags), info()); + } + +private: + ProgramCodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, ProgramCodeBlock& other) + : GlobalCodeBlock(vm, structure, CopyParsedBlock, other) + { + } + + ProgramCodeBlock(VM* vm, Structure* structure, ProgramExecutable* ownerExecutable, UnlinkedProgramCodeBlock* unlinkedCodeBlock, + JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned firstLineColumnOffset) + : GlobalCodeBlock(vm, structure, ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, 0, firstLineColumnOffset) + { + } + + static void destroy(JSCell*); +}; + +class ModuleProgramCodeBlock : public GlobalCodeBlock { +public: + typedef GlobalCodeBlock Base; + DECLARE_INFO; + + static ModuleProgramCodeBlock* create(VM* vm, CopyParsedBlockTag, ModuleProgramCodeBlock& other) + { + ModuleProgramCodeBlock* instance = new (NotNull, allocateCell<ModuleProgramCodeBlock>(vm->heap)) + ModuleProgramCodeBlock(vm, vm->moduleProgramCodeBlockStructure.get(), CopyParsedBlock, other); + instance->finishCreation(*vm, CopyParsedBlock, other); + return instance; + } + + static ModuleProgramCodeBlock* create(VM* vm, ModuleProgramExecutable* ownerExecutable, UnlinkedModuleProgramCodeBlock* unlinkedCodeBlock, + JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned firstLineColumnOffset) + { + ModuleProgramCodeBlock* instance = new (NotNull, allocateCell<ModuleProgramCodeBlock>(vm->heap)) + ModuleProgramCodeBlock(vm, vm->moduleProgramCodeBlockStructure.get(), ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, firstLineColumnOffset); + instance->finishCreation(*vm, ownerExecutable, unlinkedCodeBlock, scope); + return instance; + } + + static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue prototype) + { + return Structure::create(vm, globalObject, prototype, TypeInfo(CellType, StructureFlags), info()); + } + +private: + ModuleProgramCodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, ModuleProgramCodeBlock& other) + : GlobalCodeBlock(vm, structure, CopyParsedBlock, other) + { + } + + ModuleProgramCodeBlock(VM* vm, Structure* structure, ModuleProgramExecutable* ownerExecutable, UnlinkedModuleProgramCodeBlock* unlinkedCodeBlock, + JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned firstLineColumnOffset) + : GlobalCodeBlock(vm, structure, ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, 0, firstLineColumnOffset) + { + } + + static void destroy(JSCell*); }; class EvalCodeBlock : public GlobalCodeBlock { public: - EvalCodeBlock(CopyParsedBlockTag, EvalCodeBlock& other) - : GlobalCodeBlock(CopyParsedBlock, other) + typedef GlobalCodeBlock Base; + DECLARE_INFO; + + static EvalCodeBlock* create(VM* vm, CopyParsedBlockTag, EvalCodeBlock& other) { + EvalCodeBlock* instance = new (NotNull, allocateCell<EvalCodeBlock>(vm->heap)) + EvalCodeBlock(vm, vm->evalCodeBlockStructure.get(), CopyParsedBlock, other); + instance->finishCreation(*vm, CopyParsedBlock, other); + return instance; } - - EvalCodeBlock(EvalExecutable* ownerExecutable, UnlinkedEvalCodeBlock* unlinkedCodeBlock, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, int baseScopeDepth, PassOwnPtr<CodeBlock> alternative) - : GlobalCodeBlock(ownerExecutable, unlinkedCodeBlock, globalObject, baseScopeDepth, sourceProvider, 0, 1, alternative) + + static EvalCodeBlock* create(VM* vm, EvalExecutable* ownerExecutable, UnlinkedEvalCodeBlock* unlinkedCodeBlock, + JSScope* scope, PassRefPtr<SourceProvider> sourceProvider) { + EvalCodeBlock* instance = new (NotNull, allocateCell<EvalCodeBlock>(vm->heap)) + EvalCodeBlock(vm, vm->evalCodeBlockStructure.get(), ownerExecutable, unlinkedCodeBlock, scope, sourceProvider); + instance->finishCreation(*vm, ownerExecutable, unlinkedCodeBlock, scope); + return instance; + } + + static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue prototype) + { + return Structure::create(vm, globalObject, prototype, TypeInfo(CellType, StructureFlags), info()); } const Identifier& variable(unsigned index) { return unlinkedEvalCodeBlock()->variable(index); } unsigned numVariables() { return unlinkedEvalCodeBlock()->numVariables(); } + +private: + EvalCodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, EvalCodeBlock& other) + : GlobalCodeBlock(vm, structure, CopyParsedBlock, other) + { + } -#if ENABLE(JIT) -protected: - virtual JSObject* compileOptimized(ExecState*, JSScope*, unsigned bytecodeIndex); - virtual void jettisonImpl(); - virtual bool jitCompileImpl(ExecState*); - virtual CodeBlock* replacement(); - virtual DFG::CapabilityLevel canCompileWithDFGInternal(); -#endif + EvalCodeBlock(VM* vm, Structure* structure, EvalExecutable* ownerExecutable, UnlinkedEvalCodeBlock* unlinkedCodeBlock, + JSScope* scope, PassRefPtr<SourceProvider> sourceProvider) + : GlobalCodeBlock(vm, structure, ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, 0, 1) + { + } + + static void destroy(JSCell*); private: UnlinkedEvalCodeBlock* unlinkedEvalCodeBlock() const { return jsCast<UnlinkedEvalCodeBlock*>(unlinkedCodeBlock()); } @@ -1376,53 +1239,87 @@ private: class FunctionCodeBlock : public CodeBlock { public: - FunctionCodeBlock(CopyParsedBlockTag, FunctionCodeBlock& other) - : CodeBlock(CopyParsedBlock, other) + typedef CodeBlock Base; + DECLARE_INFO; + + static FunctionCodeBlock* create(VM* vm, CopyParsedBlockTag, FunctionCodeBlock& other) { + FunctionCodeBlock* instance = new (NotNull, allocateCell<FunctionCodeBlock>(vm->heap)) + FunctionCodeBlock(vm, vm->functionCodeBlockStructure.get(), CopyParsedBlock, other); + instance->finishCreation(*vm, CopyParsedBlock, other); + return instance; } - FunctionCodeBlock(FunctionExecutable* ownerExecutable, UnlinkedFunctionCodeBlock* unlinkedCodeBlock, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset, PassOwnPtr<CodeBlock> alternative = nullptr) - : CodeBlock(ownerExecutable, unlinkedCodeBlock, globalObject, 0, sourceProvider, sourceOffset, firstLineColumnOffset, alternative) + static FunctionCodeBlock* create(VM* vm, FunctionExecutable* ownerExecutable, UnlinkedFunctionCodeBlock* unlinkedCodeBlock, JSScope* scope, + PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset) { + FunctionCodeBlock* instance = new (NotNull, allocateCell<FunctionCodeBlock>(vm->heap)) + FunctionCodeBlock(vm, vm->functionCodeBlockStructure.get(), ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, sourceOffset, firstLineColumnOffset); + instance->finishCreation(*vm, ownerExecutable, unlinkedCodeBlock, scope); + return instance; } - -#if ENABLE(JIT) -protected: - virtual JSObject* compileOptimized(ExecState*, JSScope*, unsigned bytecodeIndex); - virtual void jettisonImpl(); - virtual bool jitCompileImpl(ExecState*); - virtual CodeBlock* replacement(); - virtual DFG::CapabilityLevel canCompileWithDFGInternal(); -#endif -}; -inline CodeBlock* baselineCodeBlockForInlineCallFrame(InlineCallFrame* inlineCallFrame) -{ - RELEASE_ASSERT(inlineCallFrame); - ExecutableBase* executable = inlineCallFrame->executable.get(); - RELEASE_ASSERT(executable->structure()->classInfo() == &FunctionExecutable::s_info); - return static_cast<FunctionExecutable*>(executable)->baselineCodeBlockFor(inlineCallFrame->isCall ? CodeForCall : CodeForConstruct); -} + static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue prototype) + { + return Structure::create(vm, globalObject, prototype, TypeInfo(CellType, StructureFlags), info()); + } + +private: + FunctionCodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, FunctionCodeBlock& other) + : CodeBlock(vm, structure, CopyParsedBlock, other) + { + } + + FunctionCodeBlock(VM* vm, Structure* structure, FunctionExecutable* ownerExecutable, UnlinkedFunctionCodeBlock* unlinkedCodeBlock, JSScope* scope, + PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset) + : CodeBlock(vm, structure, ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, sourceOffset, firstLineColumnOffset) + { + } -inline CodeBlock* baselineCodeBlockForOriginAndBaselineCodeBlock(const CodeOrigin& codeOrigin, CodeBlock* baselineCodeBlock) -{ - if (codeOrigin.inlineCallFrame) - return baselineCodeBlockForInlineCallFrame(codeOrigin.inlineCallFrame); - return baselineCodeBlock; -} + static void destroy(JSCell*); +}; -inline int CodeBlock::argumentIndexAfterCapture(size_t argument) -{ - if (argument >= static_cast<size_t>(symbolTable()->parameterCount())) - return CallFrame::argumentOffset(argument); +#if ENABLE(WEBASSEMBLY) +class WebAssemblyCodeBlock : public CodeBlock { +public: + typedef CodeBlock Base; + DECLARE_INFO; + + static WebAssemblyCodeBlock* create(VM* vm, CopyParsedBlockTag, WebAssemblyCodeBlock& other) + { + WebAssemblyCodeBlock* instance = new (NotNull, allocateCell<WebAssemblyCodeBlock>(vm->heap)) + WebAssemblyCodeBlock(vm, vm->webAssemblyCodeBlockStructure.get(), CopyParsedBlock, other); + instance->finishCreation(*vm, CopyParsedBlock, other); + return instance; + } - const SlowArgument* slowArguments = symbolTable()->slowArguments(); - if (!slowArguments || slowArguments[argument].status == SlowArgument::Normal) - return CallFrame::argumentOffset(argument); + static WebAssemblyCodeBlock* create(VM* vm, WebAssemblyExecutable* ownerExecutable, JSGlobalObject* globalObject) + { + WebAssemblyCodeBlock* instance = new (NotNull, allocateCell<WebAssemblyCodeBlock>(vm->heap)) + WebAssemblyCodeBlock(vm, vm->webAssemblyCodeBlockStructure.get(), ownerExecutable, globalObject); + instance->finishCreation(*vm, ownerExecutable, globalObject); + return instance; + } - ASSERT(slowArguments[argument].status == SlowArgument::Captured); - return slowArguments[argument].index; -} + static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue prototype) + { + return Structure::create(vm, globalObject, prototype, TypeInfo(CellType, StructureFlags), info()); + } + +private: + WebAssemblyCodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, WebAssemblyCodeBlock& other) + : CodeBlock(vm, structure, CopyParsedBlock, other) + { + } + + WebAssemblyCodeBlock(VM* vm, Structure* structure, WebAssemblyExecutable* ownerExecutable, JSGlobalObject* globalObject) + : CodeBlock(vm, structure, ownerExecutable, globalObject) + { + } + + static void destroy(JSCell*); +}; +#endif inline Register& ExecState::r(int index) { @@ -1432,52 +1329,95 @@ inline Register& ExecState::r(int index) return this[index]; } +inline Register& ExecState::r(VirtualRegister reg) +{ + return r(reg.offset()); +} + inline Register& ExecState::uncheckedR(int index) { RELEASE_ASSERT(index < FirstConstantRegisterIndex); return this[index]; } -#if ENABLE(DFG_JIT) -inline bool ExecState::isInlineCallFrame() +inline Register& ExecState::uncheckedR(VirtualRegister reg) { - if (LIKELY(!codeBlock() || codeBlock()->getJITType() != JITCode::DFGJIT)) - return false; - return isInlineCallFrameSlow(); + return uncheckedR(reg.offset()); } -#endif -inline JSValue ExecState::argumentAfterCapture(size_t argument) +inline void CodeBlock::clearVisitWeaklyHasBeenCalled() { - if (argument >= argumentCount()) - return jsUndefined(); - - if (!codeBlock()) - return this[argumentOffset(argument)].jsValue(); - - return this[codeBlock()->argumentIndexAfterCapture(argument)].jsValue(); + m_visitWeaklyHasBeenCalled.store(false, std::memory_order_relaxed); } -#if ENABLE(DFG_JIT) -inline void DFGCodeBlocks::mark(void* candidateCodeBlock) +inline void CodeBlockSet::mark(const LockHolder& locker, void* candidateCodeBlock) { + ASSERT(m_lock.isLocked()); // We have to check for 0 and -1 because those are used by the HashMap as markers. uintptr_t value = reinterpret_cast<uintptr_t>(candidateCodeBlock); - + // This checks for both of those nasty cases in one go. // 0 + 1 = 1 // -1 + 1 = 0 if (value + 1 <= 1) return; - - HashSet<CodeBlock*>::iterator iter = m_set.find(static_cast<CodeBlock*>(candidateCodeBlock)); - if (iter == m_set.end()) + + CodeBlock* codeBlock = static_cast<CodeBlock*>(candidateCodeBlock); + if (!m_oldCodeBlocks.contains(codeBlock) && !m_newCodeBlocks.contains(codeBlock)) return; - - (*iter)->m_dfgData->mayBeExecuting = true; + + mark(locker, codeBlock); } -#endif - + +inline void CodeBlockSet::mark(const LockHolder&, CodeBlock* codeBlock) +{ + if (!codeBlock) + return; + + // Try to recover gracefully if we forget to execute a barrier for a + // CodeBlock that does value profiling. This is probably overkill, but we + // have always done it. + Heap::heap(codeBlock)->writeBarrier(codeBlock); + + m_currentlyExecuting.add(codeBlock); +} + +template <typename Functor> inline void ScriptExecutable::forEachCodeBlock(Functor&& functor) +{ + switch (type()) { + case ProgramExecutableType: { + if (CodeBlock* codeBlock = static_cast<CodeBlock*>(jsCast<ProgramExecutable*>(this)->m_programCodeBlock.get())) + codeBlock->forEachRelatedCodeBlock(std::forward<Functor>(functor)); + break; + } + + case EvalExecutableType: { + if (CodeBlock* codeBlock = static_cast<CodeBlock*>(jsCast<EvalExecutable*>(this)->m_evalCodeBlock.get())) + codeBlock->forEachRelatedCodeBlock(std::forward<Functor>(functor)); + break; + } + + case FunctionExecutableType: { + Functor f(std::forward<Functor>(functor)); + FunctionExecutable* executable = jsCast<FunctionExecutable*>(this); + if (CodeBlock* codeBlock = static_cast<CodeBlock*>(executable->m_codeBlockForCall.get())) + codeBlock->forEachRelatedCodeBlock(f); + if (CodeBlock* codeBlock = static_cast<CodeBlock*>(executable->m_codeBlockForConstruct.get())) + codeBlock->forEachRelatedCodeBlock(f); + break; + } + + case ModuleProgramExecutableType: { + if (CodeBlock* codeBlock = static_cast<CodeBlock*>(jsCast<ModuleProgramExecutable*>(this)->m_moduleProgramCodeBlock.get())) + codeBlock->forEachRelatedCodeBlock(std::forward<Functor>(functor)); + break; + } + + default: + RELEASE_ASSERT_NOT_REACHED(); + } +} + } // namespace JSC #endif // CodeBlock_h diff --git a/Source/JavaScriptCore/bytecode/CodeBlockHash.cpp b/Source/JavaScriptCore/bytecode/CodeBlockHash.cpp index 7c890cc88..87c092f64 100644 --- a/Source/JavaScriptCore/bytecode/CodeBlockHash.cpp +++ b/Source/JavaScriptCore/bytecode/CodeBlockHash.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012 Apple Inc. All rights reserved. + * Copyright (C) 2012, 2013 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -28,61 +28,40 @@ #include "SourceCode.h" #include <wtf/SHA1.h> +#include <wtf/SixCharacterHash.h> namespace JSC { -#define TABLE ("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789") - CodeBlockHash::CodeBlockHash(const char* string) - : m_hash(0) + : m_hash(sixCharacterHashStringToInteger(string)) { - RELEASE_ASSERT(strlen(string) == 6); - - for (unsigned i = 0; i < 6; ++i) { - m_hash *= 62; - unsigned c = string[i]; - if (c >= 'A' && c <= 'Z') { - m_hash += c - 'A'; - continue; - } - if (c >= 'a' && c <= 'z') { - m_hash += c - 'a' + 26; - continue; - } - ASSERT(c >= '0' && c <= '9'); - m_hash += c - '0' + 26 * 2; - } } CodeBlockHash::CodeBlockHash(const SourceCode& sourceCode, CodeSpecializationKind kind) : m_hash(0) { SHA1 sha1; - sha1.addBytes(sourceCode.toString().utf8()); - Vector<uint8_t, 20> digest; + sha1.addBytes(sourceCode.toUTF8()); + SHA1::Digest digest; sha1.computeHash(digest); m_hash += digest[0] | (digest[1] << 8) | (digest[2] << 16) | (digest[3] << 24); m_hash ^= static_cast<unsigned>(kind); + + // Ensure that 0 corresponds to the hash not having been computed. + if (!m_hash) + m_hash = 1; } void CodeBlockHash::dump(PrintStream& out) const { - ASSERT(strlen(TABLE) == 62); - - char buffer[7]; - unsigned accumulator = m_hash; - for (unsigned i = 6; i--;) { - buffer[i] = TABLE[accumulator % 62]; - accumulator /= 62; - } - buffer[6] = 0; + std::array<char, 7> buffer = integerToSixCharacterHashString(m_hash); #if !ASSERT_DISABLED - CodeBlockHash recompute(buffer); + CodeBlockHash recompute(buffer.data()); ASSERT(recompute == *this); #endif // !ASSERT_DISABLED - out.print(buffer); + out.print(buffer.data()); } } // namespace JSC diff --git a/Source/JavaScriptCore/bytecode/CodeBlockHash.h b/Source/JavaScriptCore/bytecode/CodeBlockHash.h index 20de8ed48..4e3398867 100644 --- a/Source/JavaScriptCore/bytecode/CodeBlockHash.h +++ b/Source/JavaScriptCore/bytecode/CodeBlockHash.h @@ -56,6 +56,9 @@ public: CodeBlockHash(const SourceCode&, CodeSpecializationKind); explicit CodeBlockHash(const char*); + + bool isSet() const { return !!m_hash; } + bool operator!() const { return !isSet(); } unsigned hash() const { return m_hash; } diff --git a/Source/JavaScriptCore/bytecode/ResolveGlobalStatus.cpp b/Source/JavaScriptCore/bytecode/CodeBlockJettisoningWatchpoint.cpp index c02acb38d..50cf7378d 100644 --- a/Source/JavaScriptCore/bytecode/ResolveGlobalStatus.cpp +++ b/Source/JavaScriptCore/bytecode/CodeBlockJettisoningWatchpoint.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012 Apple Inc. All rights reserved. + * Copyright (C) 2013-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -24,35 +24,20 @@ */ #include "config.h" -#include "ResolveGlobalStatus.h" +#include "CodeBlockJettisoningWatchpoint.h" #include "CodeBlock.h" -#include "JSCJSValue.h" -#include "Operations.h" -#include "Structure.h" +#include "DFGCommon.h" +#include "JSCInlines.h" namespace JSC { -static ResolveGlobalStatus computeForStructure(CodeBlock* codeBlock, Structure* structure, Identifier& identifier) +void CodeBlockJettisoningWatchpoint::fireInternal(const FireDetail& detail) { - unsigned attributesIgnored; - JSCell* specificValue; - PropertyOffset offset = structure->get(*codeBlock->vm(), identifier, attributesIgnored, specificValue); - if (structure->isDictionary()) - specificValue = 0; - if (!isValidOffset(offset)) - return ResolveGlobalStatus(); - - return ResolveGlobalStatus(ResolveGlobalStatus::Simple, structure, offset, specificValue); -} + if (DFG::shouldDumpDisassembly()) + dataLog("Firing watchpoint ", RawPointer(this), " on ", *m_codeBlock, "\n"); -ResolveGlobalStatus ResolveGlobalStatus::computeFor(CodeBlock* codeBlock, int, ResolveOperation* operation, Identifier& identifier) -{ - ASSERT(operation->m_operation == ResolveOperation::GetAndReturnGlobalProperty); - if (!operation->m_structure) - return ResolveGlobalStatus(); - - return computeForStructure(codeBlock, operation->m_structure.get(), identifier); + m_codeBlock->jettison(Profiler::JettisonDueToUnprofiledWatchpoint, CountReoptimization, &detail); } } // namespace JSC diff --git a/Source/JavaScriptCore/bytecode/CodeBlockJettisoningWatchpoint.h b/Source/JavaScriptCore/bytecode/CodeBlockJettisoningWatchpoint.h new file mode 100644 index 000000000..b5e6dd330 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/CodeBlockJettisoningWatchpoint.h @@ -0,0 +1,52 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef CodeBlockJettisoningWatchpoint_h +#define CodeBlockJettisoningWatchpoint_h + +#include "Watchpoint.h" + +namespace JSC { + +class CodeBlock; + +class CodeBlockJettisoningWatchpoint : public Watchpoint { +public: + CodeBlockJettisoningWatchpoint(CodeBlock* codeBlock) + : m_codeBlock(codeBlock) + { + } + +protected: + virtual void fireInternal(const FireDetail&) override; + +private: + CodeBlock* m_codeBlock; +}; + +} // namespace JSC + +#endif // CodeBlockJettisoningWatchpoint_h + diff --git a/Source/JavaScriptCore/bytecode/CodeOrigin.cpp b/Source/JavaScriptCore/bytecode/CodeOrigin.cpp index 52bc2bf7f..d51695012 100644 --- a/Source/JavaScriptCore/bytecode/CodeOrigin.cpp +++ b/Source/JavaScriptCore/bytecode/CodeOrigin.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2012-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -29,14 +29,15 @@ #include "CallFrame.h" #include "CodeBlock.h" #include "Executable.h" -#include "Operations.h" +#include "InlineCallFrame.h" +#include "JSCInlines.h" namespace JSC { unsigned CodeOrigin::inlineDepthForCallFrame(InlineCallFrame* inlineCallFrame) { unsigned result = 1; - for (InlineCallFrame* current = inlineCallFrame; current; current = current->caller.inlineCallFrame) + for (InlineCallFrame* current = inlineCallFrame; current; current = current->directCaller.inlineCallFrame) result++; return result; } @@ -45,28 +46,105 @@ unsigned CodeOrigin::inlineDepth() const { return inlineDepthForCallFrame(inlineCallFrame); } + +bool CodeOrigin::isApproximatelyEqualTo(const CodeOrigin& other) const +{ + CodeOrigin a = *this; + CodeOrigin b = other; + + if (!a.isSet()) + return !b.isSet(); + if (!b.isSet()) + return false; + if (a.isHashTableDeletedValue()) + return b.isHashTableDeletedValue(); + if (b.isHashTableDeletedValue()) + return false; + + for (;;) { + ASSERT(a.isSet()); + ASSERT(b.isSet()); + + if (a.bytecodeIndex != b.bytecodeIndex) + return false; + + if ((!!a.inlineCallFrame) != (!!b.inlineCallFrame)) + return false; + + if (!a.inlineCallFrame) + return true; + + if (a.inlineCallFrame->baselineCodeBlock.get() != b.inlineCallFrame->baselineCodeBlock.get()) + return false; + + a = a.inlineCallFrame->directCaller; + b = b.inlineCallFrame->directCaller; + } +} + +unsigned CodeOrigin::approximateHash() const +{ + if (!isSet()) + return 0; + if (isHashTableDeletedValue()) + return 1; + + unsigned result = 2; + CodeOrigin codeOrigin = *this; + for (;;) { + result += codeOrigin.bytecodeIndex; + + if (!codeOrigin.inlineCallFrame) + return result; + + result += WTF::PtrHash<JSCell*>::hash(codeOrigin.inlineCallFrame->baselineCodeBlock.get()); + + codeOrigin = codeOrigin.inlineCallFrame->directCaller; + } +} + Vector<CodeOrigin> CodeOrigin::inlineStack() const { Vector<CodeOrigin> result(inlineDepth()); result.last() = *this; unsigned index = result.size() - 2; - for (InlineCallFrame* current = inlineCallFrame; current; current = current->caller.inlineCallFrame) - result[index--] = current->caller; + for (InlineCallFrame* current = inlineCallFrame; current; current = current->directCaller.inlineCallFrame) + result[index--] = current->directCaller; RELEASE_ASSERT(!result[0].inlineCallFrame); return result; } +CodeBlock* CodeOrigin::codeOriginOwner() const +{ + if (!inlineCallFrame) + return 0; + return inlineCallFrame->baselineCodeBlock.get(); +} + +int CodeOrigin::stackOffset() const +{ + if (!inlineCallFrame) + return 0; + + return inlineCallFrame->stackOffset; +} + void CodeOrigin::dump(PrintStream& out) const { + if (!isSet()) { + out.print("<none>"); + return; + } + Vector<CodeOrigin> stack = inlineStack(); for (unsigned i = 0; i < stack.size(); ++i) { if (i) out.print(" --> "); if (InlineCallFrame* frame = stack[i].inlineCallFrame) { - out.print(frame->briefFunctionInformation(), ":<", RawPointer(frame->executable.get()), "> "); - if (frame->isClosureCall()) + out.print(frame->briefFunctionInformation(), ":<", RawPointer(frame->baselineCodeBlock.get()), "> "); + if (frame->isClosureCall) out.print("(closure) "); } @@ -74,45 +152,9 @@ void CodeOrigin::dump(PrintStream& out) const } } -JSFunction* InlineCallFrame::calleeForCallFrame(ExecState* exec) const +void CodeOrigin::dumpInContext(PrintStream& out, DumpContext*) const { - if (!isClosureCall()) - return callee.get(); - - return jsCast<JSFunction*>((exec + stackOffset)->callee()); -} - -CodeBlockHash InlineCallFrame::hash() const -{ - return executable->hashFor(specializationKind()); -} - -String InlineCallFrame::inferredName() const -{ - return jsCast<FunctionExecutable*>(executable.get())->inferredName().string(); -} - -CodeBlock* InlineCallFrame::baselineCodeBlock() const -{ - return jsCast<FunctionExecutable*>(executable.get())->baselineCodeBlockFor(specializationKind()); -} - -void InlineCallFrame::dumpBriefFunctionInformation(PrintStream& out) const -{ - out.print(inferredName(), "#", hash()); -} - -void InlineCallFrame::dump(PrintStream& out) const -{ - out.print(briefFunctionInformation(), ":<", RawPointer(executable.get()), ", bc#", caller.bytecodeIndex, ", ", specializationKind()); - if (callee) - out.print(", known callee: ", JSValue(callee.get())); - else - out.print(", closure call"); - out.print(", numArgs+this = ", arguments.size()); - out.print(", stack >= r", stackOffset); - out.print(">"); + dump(out); } } // namespace JSC - diff --git a/Source/JavaScriptCore/bytecode/CodeOrigin.h b/Source/JavaScriptCore/bytecode/CodeOrigin.h index 5d9eaa041..66ab42724 100644 --- a/Source/JavaScriptCore/bytecode/CodeOrigin.h +++ b/Source/JavaScriptCore/bytecode/CodeOrigin.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2011-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,11 +26,12 @@ #ifndef CodeOrigin_h #define CodeOrigin_h +#include "CallMode.h" #include "CodeBlockHash.h" #include "CodeSpecializationKind.h" -#include "ValueRecovery.h" #include "WriteBarrier.h" #include <wtf/BitVector.h> +#include <wtf/HashMap.h> #include <wtf/PrintStream.h> #include <wtf/StdLibExtras.h> #include <wtf/Vector.h> @@ -39,41 +40,45 @@ namespace JSC { struct InlineCallFrame; class ExecState; -class ExecutableBase; +class ScriptExecutable; class JSFunction; struct CodeOrigin { - static const unsigned maximumBytecodeIndex = (1u << 29) - 1; + static const unsigned invalidBytecodeIndex = UINT_MAX; - // Bytecode offset that you'd use to re-execute this instruction. - unsigned bytecodeIndex : 29; - // Bytecode offset corresponding to the opcode that gives the result (needed to handle - // op_call/op_call_put_result and op_method_check/op_get_by_id). - unsigned valueProfileOffset : 3; + // Bytecode offset that you'd use to re-execute this instruction, and the + // bytecode index of the bytecode instruction that produces some result that + // you're interested in (used for mapping Nodes whose values you're using + // to bytecode instructions that have the appropriate value profile). + unsigned bytecodeIndex; InlineCallFrame* inlineCallFrame; CodeOrigin() - : bytecodeIndex(maximumBytecodeIndex) - , valueProfileOffset(0) + : bytecodeIndex(invalidBytecodeIndex) , inlineCallFrame(0) { } - explicit CodeOrigin(unsigned bytecodeIndex, InlineCallFrame* inlineCallFrame = 0, unsigned valueProfileOffset = 0) + CodeOrigin(WTF::HashTableDeletedValueType) + : bytecodeIndex(invalidBytecodeIndex) + , inlineCallFrame(deletedMarker()) + { + } + + explicit CodeOrigin(unsigned bytecodeIndex, InlineCallFrame* inlineCallFrame = 0) : bytecodeIndex(bytecodeIndex) - , valueProfileOffset(valueProfileOffset) , inlineCallFrame(inlineCallFrame) { - RELEASE_ASSERT(bytecodeIndex <= maximumBytecodeIndex); - RELEASE_ASSERT(valueProfileOffset < (1u << 3)); + ASSERT(bytecodeIndex < invalidBytecodeIndex); } - bool isSet() const { return bytecodeIndex != maximumBytecodeIndex; } + bool isSet() const { return bytecodeIndex != invalidBytecodeIndex; } + explicit operator bool() const { return isSet(); } - unsigned bytecodeIndexForValueProfile() const + bool isHashTableDeletedValue() const { - return bytecodeIndex + valueProfileOffset; + return bytecodeIndex == invalidBytecodeIndex && !!inlineCallFrame; } // The inline depth is the depth of the inline stack, so 1 = not inlined, @@ -82,60 +87,42 @@ struct CodeOrigin { // If the code origin corresponds to inlined code, gives you the heap object that // would have owned the code if it had not been inlined. Otherwise returns 0. - ExecutableBase* codeOriginOwner() const; + CodeBlock* codeOriginOwner() const; - unsigned stackOffset() const; + int stackOffset() const; static unsigned inlineDepthForCallFrame(InlineCallFrame*); + unsigned hash() const; bool operator==(const CodeOrigin& other) const; - bool operator!=(const CodeOrigin& other) const { return !(*this == other); } - // Get the inline stack. This is slow, and is intended for debugging only. - Vector<CodeOrigin> inlineStack() const; + // This checks if the two code origins correspond to the same stack trace snippets, + // but ignore whether the InlineCallFrame's are identical. + bool isApproximatelyEqualTo(const CodeOrigin& other) const; - void dump(PrintStream&) const; -}; + unsigned approximateHash() const; -struct InlineCallFrame { - Vector<ValueRecovery> arguments; - WriteBarrier<ExecutableBase> executable; - WriteBarrier<JSFunction> callee; // This may be null, indicating that this is a closure call and that the JSFunction and JSScope are already on the stack. - CodeOrigin caller; - BitVector capturedVars; // Indexed by the machine call frame's variable numbering. - unsigned stackOffset : 31; - bool isCall : 1; - - CodeSpecializationKind specializationKind() const { return specializationFromIsCall(isCall); } + template <typename Function> + void walkUpInlineStack(const Function&); - bool isClosureCall() const { return !callee; } - - // Get the callee given a machine call frame to which this InlineCallFrame belongs. - JSFunction* calleeForCallFrame(ExecState*) const; - - String inferredName() const; - CodeBlockHash hash() const; - - CodeBlock* baselineCodeBlock() const; + // Get the inline stack. This is slow, and is intended for debugging only. + Vector<CodeOrigin> inlineStack() const; - void dumpBriefFunctionInformation(PrintStream&) const; void dump(PrintStream&) const; + void dumpInContext(PrintStream&, DumpContext*) const; - MAKE_PRINT_METHOD(InlineCallFrame, dumpBriefFunctionInformation, briefFunctionInformation); -}; - -struct CodeOriginAtCallReturnOffset { - CodeOrigin codeOrigin; - unsigned callReturnOffset; +private: + static InlineCallFrame* deletedMarker() + { + return bitwise_cast<InlineCallFrame*>(static_cast<uintptr_t>(1)); + } }; -inline unsigned CodeOrigin::stackOffset() const +inline unsigned CodeOrigin::hash() const { - if (!inlineCallFrame) - return 0; - - return inlineCallFrame->stackOffset; + return WTF::IntHash<unsigned>::hash(bytecodeIndex) + + WTF::PtrHash<InlineCallFrame*>::hash(inlineCallFrame); } inline bool CodeOrigin::operator==(const CodeOrigin& other) const @@ -143,20 +130,34 @@ inline bool CodeOrigin::operator==(const CodeOrigin& other) const return bytecodeIndex == other.bytecodeIndex && inlineCallFrame == other.inlineCallFrame; } - -inline unsigned getCallReturnOffsetForCodeOrigin(CodeOriginAtCallReturnOffset* data) -{ - return data->callReturnOffset; -} -inline ExecutableBase* CodeOrigin::codeOriginOwner() const -{ - if (!inlineCallFrame) - return 0; - return inlineCallFrame->executable.get(); -} +struct CodeOriginHash { + static unsigned hash(const CodeOrigin& key) { return key.hash(); } + static bool equal(const CodeOrigin& a, const CodeOrigin& b) { return a == b; } + static const bool safeToCompareToEmptyOrDeleted = true; +}; + +struct CodeOriginApproximateHash { + static unsigned hash(const CodeOrigin& key) { return key.approximateHash(); } + static bool equal(const CodeOrigin& a, const CodeOrigin& b) { return a.isApproximatelyEqualTo(b); } + static const bool safeToCompareToEmptyOrDeleted = true; +}; } // namespace JSC +namespace WTF { + +template<typename T> struct DefaultHash; +template<> struct DefaultHash<JSC::CodeOrigin> { + typedef JSC::CodeOriginHash Hash; +}; + +template<typename T> struct HashTraits; +template<> struct HashTraits<JSC::CodeOrigin> : SimpleClassHashTraits<JSC::CodeOrigin> { + static const bool emptyValueIsZero = false; +}; + +} // namespace WTF + #endif // CodeOrigin_h diff --git a/Source/JavaScriptCore/bytecode/CodeType.cpp b/Source/JavaScriptCore/bytecode/CodeType.cpp index 8b2cad56a..0c7043dfa 100644 --- a/Source/JavaScriptCore/bytecode/CodeType.cpp +++ b/Source/JavaScriptCore/bytecode/CodeType.cpp @@ -42,6 +42,9 @@ void printInternal(PrintStream& out, JSC::CodeType codeType) case JSC::FunctionCode: out.print("Function"); return; + case JSC::ModuleCode: + out.print("Module"); + return; default: CRASH(); return; diff --git a/Source/JavaScriptCore/bytecode/CodeType.h b/Source/JavaScriptCore/bytecode/CodeType.h index 04afc1109..9941d514c 100644 --- a/Source/JavaScriptCore/bytecode/CodeType.h +++ b/Source/JavaScriptCore/bytecode/CodeType.h @@ -26,11 +26,9 @@ #ifndef CodeType_h #define CodeType_h -#include <wtf/Platform.h> - namespace JSC { -enum CodeType { GlobalCode, EvalCode, FunctionCode }; +enum CodeType { GlobalCode, EvalCode, FunctionCode, ModuleCode }; } // namespace JSC diff --git a/Source/JavaScriptCore/bytecode/ComplexGetStatus.cpp b/Source/JavaScriptCore/bytecode/ComplexGetStatus.cpp new file mode 100644 index 000000000..33663d057 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/ComplexGetStatus.cpp @@ -0,0 +1,79 @@ +/* + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "ComplexGetStatus.h" + +#include "JSCInlines.h" + +namespace JSC { + +ComplexGetStatus ComplexGetStatus::computeFor( + Structure* headStructure, const ObjectPropertyConditionSet& conditionSet, UniquedStringImpl* uid) +{ + // FIXME: We should assert that we never see a structure that + // getOwnPropertySlotIsImpure() but for which we don't + // newImpurePropertyFiresWatchpoints(). We're not at a point where we can do + // that, yet. + // https://bugs.webkit.org/show_bug.cgi?id=131810 + + ASSERT(conditionSet.isValid()); + + if (headStructure->takesSlowPathInDFGForImpureProperty()) + return takesSlowPath(); + + ComplexGetStatus result; + result.m_kind = Inlineable; + + if (!conditionSet.isEmpty()) { + result.m_conditionSet = conditionSet; + + if (!result.m_conditionSet.structuresEnsureValidity()) + return skip(); + + unsigned numberOfSlotBases = + result.m_conditionSet.numberOfConditionsWithKind(PropertyCondition::Presence); + RELEASE_ASSERT(numberOfSlotBases <= 1); + if (!numberOfSlotBases) { + // Currently we don't support misses. That's a bummer. + // FIXME: https://bugs.webkit.org/show_bug.cgi?id=133052 + return takesSlowPath(); + } + ObjectPropertyCondition base = result.m_conditionSet.slotBaseCondition(); + ASSERT(base.kind() == PropertyCondition::Presence); + + result.m_offset = base.offset(); + } else + result.m_offset = headStructure->getConcurrently(uid); + + if (!isValidOffset(result.m_offset)) + return takesSlowPath(); + + return result; +} + +} // namespace JSC + + diff --git a/Source/JavaScriptCore/bytecode/ComplexGetStatus.h b/Source/JavaScriptCore/bytecode/ComplexGetStatus.h new file mode 100644 index 000000000..a06e995d5 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/ComplexGetStatus.h @@ -0,0 +1,114 @@ +/* + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef ComplexGetStatus_h +#define ComplexGetStatus_h + +#include "JSCJSValue.h" +#include "ObjectPropertyConditionSet.h" +#include "PropertyOffset.h" + +namespace JSC { + +class CodeBlock; +class StructureChain; + +// This class is useful for figuring out how to inline a cached get-like access. We +// say "get-like" because this is appropriate for loading the GetterSetter object in +// a put_by_id that hits a setter. Notably, this doesn't figure out how to call +// accessors, or even whether they should be called. What it gives us, is a way of +// determining how to load the value from the requested property (identified by a +// StringImpl* uid) from an object of the given structure in the given CodeBlock, +// assuming that such an access had already been cached by Repatch (and so Repatch had +// already done a bunch of safety checks). This doesn't reexecute any checks that +// Repatch would have executed, and for prototype chain accesses, it doesn't ask the +// objects in the prototype chain whether their getOwnPropertySlot would attempt to +// intercept the access - so this really is only appropriate if you already know that +// one of the JITOperations had OK'd this for caching and that Repatch concurred. +// +// The typical use pattern is something like: +// +// ComplexGetStatus status = ComplexGetStatus::computeFor(...); +// switch (status.kind()) { +// case ComplexGetStatus::ShouldSkip: +// // Handle the case where this kind of access is possibly safe but wouldn't +// // pass the required safety checks. For example, if an IC gives us a list of +// // accesses and one of them is ShouldSkip, then we should pretend as if it +// // wasn't even there. +// break; +// case ComplexGetStatus::TakesSlowPath: +// // This kind of access is not safe to inline. Bail out of any attempst to +// // inline. +// break; +// case ComplexGetStatus::Inlineable: +// // The good stuff goes here. If it's Inlineable then the other properties of +// // the 'status' object will tell you everything you need to know about how +// // to execute the get-like operation. +// break; +// } + +class ComplexGetStatus { +public: + enum Kind { + ShouldSkip, + TakesSlowPath, + Inlineable + }; + + ComplexGetStatus() + : m_kind(ShouldSkip) + , m_offset(invalidOffset) + { + } + + static ComplexGetStatus skip() + { + return ComplexGetStatus(); + } + + static ComplexGetStatus takesSlowPath() + { + ComplexGetStatus result; + result.m_kind = TakesSlowPath; + return result; + } + + static ComplexGetStatus computeFor( + Structure* headStructure, const ObjectPropertyConditionSet&, UniquedStringImpl* uid); + + Kind kind() const { return m_kind; } + PropertyOffset offset() const { return m_offset; } + const ObjectPropertyConditionSet& conditionSet() const { return m_conditionSet; } + +private: + Kind m_kind; + PropertyOffset m_offset; + ObjectPropertyConditionSet m_conditionSet; +}; + +} // namespace JSC + +#endif // ComplexGetStatus_h + diff --git a/Source/JavaScriptCore/bytecode/DFGExitProfile.cpp b/Source/JavaScriptCore/bytecode/DFGExitProfile.cpp index d36878fc9..40a25ced6 100644 --- a/Source/JavaScriptCore/bytecode/DFGExitProfile.cpp +++ b/Source/JavaScriptCore/bytecode/DFGExitProfile.cpp @@ -26,19 +26,21 @@ #include "config.h" #include "DFGExitProfile.h" -#include <wtf/PassOwnPtr.h> +#if ENABLE(DFG_JIT) namespace JSC { namespace DFG { ExitProfile::ExitProfile() { } ExitProfile::~ExitProfile() { } -bool ExitProfile::add(const FrequentExitSite& site) +bool ExitProfile::add(const ConcurrentJITLocker&, const FrequentExitSite& site) { + ASSERT(site.jitType() != ExitFromAnything); + // If we've never seen any frequent exits then create the list and put this site // into it. if (!m_frequentExitSites) { - m_frequentExitSites = adoptPtr(new Vector<FrequentExitSite>()); + m_frequentExitSites = std::make_unique<Vector<FrequentExitSite>>(); m_frequentExitSites->append(site); return true; } @@ -70,19 +72,22 @@ Vector<FrequentExitSite> ExitProfile::exitSitesFor(unsigned bytecodeIndex) return result; } -bool ExitProfile::hasExitSite(const FrequentExitSite& site) const +bool ExitProfile::hasExitSite(const ConcurrentJITLocker&, const FrequentExitSite& site) const { if (!m_frequentExitSites) return false; for (unsigned i = m_frequentExitSites->size(); i--;) { - if (m_frequentExitSites->at(i) == site) + if (site.subsumes(m_frequentExitSites->at(i))) return true; } return false; } -QueryableExitProfile::QueryableExitProfile(const ExitProfile& profile) +QueryableExitProfile::QueryableExitProfile() { } +QueryableExitProfile::~QueryableExitProfile() { } + +void QueryableExitProfile::initialize(const ConcurrentJITLocker&, const ExitProfile& profile) { if (!profile.m_frequentExitSites) return; @@ -91,6 +96,6 @@ QueryableExitProfile::QueryableExitProfile(const ExitProfile& profile) m_frequentExitSites.add(profile.m_frequentExitSites->at(i)); } -QueryableExitProfile::~QueryableExitProfile() { } - } } // namespace JSC::DFG + +#endif diff --git a/Source/JavaScriptCore/bytecode/DFGExitProfile.h b/Source/JavaScriptCore/bytecode/DFGExitProfile.h index fe7b2f921..cdecbaf97 100644 --- a/Source/JavaScriptCore/bytecode/DFGExitProfile.h +++ b/Source/JavaScriptCore/bytecode/DFGExitProfile.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011, 2012 Apple Inc. All rights reserved. + * Copyright (C) 2011, 2012, 2013, 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,9 +26,12 @@ #ifndef DFGExitProfile_h #define DFGExitProfile_h +#if ENABLE(DFG_JIT) + +#include "ConcurrentJITLock.h" #include "ExitKind.h" +#include "ExitingJITType.h" #include <wtf/HashSet.h> -#include <wtf/OwnPtr.h> #include <wtf/Vector.h> namespace JSC { namespace DFG { @@ -38,29 +41,36 @@ public: FrequentExitSite() : m_bytecodeOffset(0) // 0 = empty value , m_kind(ExitKindUnset) + , m_jitType(ExitFromAnything) { } FrequentExitSite(WTF::HashTableDeletedValueType) : m_bytecodeOffset(1) // 1 = deleted value , m_kind(ExitKindUnset) + , m_jitType(ExitFromAnything) { } - explicit FrequentExitSite(unsigned bytecodeOffset, ExitKind kind) + explicit FrequentExitSite(unsigned bytecodeOffset, ExitKind kind, ExitingJITType jitType = ExitFromAnything) : m_bytecodeOffset(bytecodeOffset) , m_kind(kind) + , m_jitType(jitType) { - ASSERT(exitKindIsCountable(kind)); + if (m_kind == ArgumentsEscaped) { + // Count this one globally. It doesn't matter where in the code block the arguments excaped; + // the fact that they did is not associated with any particular instruction. + m_bytecodeOffset = 0; + } } // Use this constructor if you wish for the exit site to be counted globally within its // code block. - explicit FrequentExitSite(ExitKind kind) + explicit FrequentExitSite(ExitKind kind, ExitingJITType jitType = ExitFromAnything) : m_bytecodeOffset(0) , m_kind(kind) + , m_jitType(jitType) { - ASSERT(exitKindIsCountable(kind)); } bool operator!() const @@ -71,16 +81,36 @@ public: bool operator==(const FrequentExitSite& other) const { return m_bytecodeOffset == other.m_bytecodeOffset - && m_kind == other.m_kind; + && m_kind == other.m_kind + && m_jitType == other.m_jitType; + } + + bool subsumes(const FrequentExitSite& other) const + { + if (m_bytecodeOffset != other.m_bytecodeOffset) + return false; + if (m_kind != other.m_kind) + return false; + if (m_jitType == ExitFromAnything) + return true; + return m_jitType == other.m_jitType; } unsigned hash() const { - return WTF::intHash(m_bytecodeOffset) + m_kind; + return WTF::intHash(m_bytecodeOffset) + m_kind + m_jitType * 7; } unsigned bytecodeOffset() const { return m_bytecodeOffset; } ExitKind kind() const { return m_kind; } + ExitingJITType jitType() const { return m_jitType; } + + FrequentExitSite withJITType(ExitingJITType jitType) const + { + FrequentExitSite result = *this; + result.m_jitType = jitType; + return result; + } bool isHashTableDeletedValue() const { @@ -90,6 +120,7 @@ public: private: unsigned m_bytecodeOffset; ExitKind m_kind; + ExitingJITType m_jitType; }; struct FrequentExitSiteHash { @@ -100,6 +131,7 @@ struct FrequentExitSiteHash { } } // namespace JSC::DFG + namespace WTF { template<typename T> struct DefaultHash; @@ -127,7 +159,7 @@ public: // be called a fixed number of times per recompilation. Recompilation is // rare to begin with, and implies doing O(n) operations on the CodeBlock // anyway. - bool add(const FrequentExitSite&); + bool add(const ConcurrentJITLocker&, const FrequentExitSite&); // Get the frequent exit sites for a bytecode index. This is O(n), and is // meant to only be used from debugging/profiling code. @@ -137,29 +169,35 @@ public: // in the compiler. It should be strictly cheaper than building a // QueryableExitProfile, if you really expect this to be called infrequently // and you believe that there are few exit sites. - bool hasExitSite(const FrequentExitSite&) const; - bool hasExitSite(ExitKind kind) const + bool hasExitSite(const ConcurrentJITLocker&, const FrequentExitSite&) const; + bool hasExitSite(const ConcurrentJITLocker& locker, ExitKind kind) const { - return hasExitSite(FrequentExitSite(kind)); + return hasExitSite(locker, FrequentExitSite(kind)); } - bool hasExitSite(unsigned bytecodeIndex, ExitKind kind) const + bool hasExitSite(const ConcurrentJITLocker& locker, unsigned bytecodeIndex, ExitKind kind) const { - return hasExitSite(FrequentExitSite(bytecodeIndex, kind)); + return hasExitSite(locker, FrequentExitSite(bytecodeIndex, kind)); } private: friend class QueryableExitProfile; - OwnPtr<Vector<FrequentExitSite> > m_frequentExitSites; + std::unique_ptr<Vector<FrequentExitSite>> m_frequentExitSites; }; class QueryableExitProfile { public: - explicit QueryableExitProfile(const ExitProfile&); + QueryableExitProfile(); ~QueryableExitProfile(); + void initialize(const ConcurrentJITLocker&, const ExitProfile&); + bool hasExitSite(const FrequentExitSite& site) const { + if (site.jitType() == ExitFromAnything) { + return hasExitSite(site.withJITType(ExitFromDFG)) + || hasExitSite(site.withJITType(ExitFromFTL)); + } return m_frequentExitSites.find(site) != m_frequentExitSites.end(); } @@ -178,4 +216,6 @@ private: } } // namespace JSC::DFG +#endif // ENABLE(DFG_JIT) + #endif // DFGExitProfile_h diff --git a/Source/JavaScriptCore/bytecode/DataFormat.cpp b/Source/JavaScriptCore/bytecode/DataFormat.cpp new file mode 100644 index 000000000..8bd42e100 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/DataFormat.cpp @@ -0,0 +1,39 @@ +/* + * Copyright (C) 2012-2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DataFormat.h" + +#include <wtf/Assertions.h> +#include <wtf/PrintStream.h> + +namespace WTF { + +void printInternal(PrintStream& out, JSC::DataFormat dataFormat) +{ + out.print(dataFormatToString(dataFormat)); +} + +} // namespace WTF diff --git a/Source/JavaScriptCore/bytecode/DataFormat.h b/Source/JavaScriptCore/bytecode/DataFormat.h index da8dacf49..81d6831ad 100644 --- a/Source/JavaScriptCore/bytecode/DataFormat.h +++ b/Source/JavaScriptCore/bytecode/DataFormat.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011 Apple Inc. All rights reserved. + * Copyright (C) 2011, 2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -38,13 +38,15 @@ namespace JSC { // (May also need bool, array, object, string types!) enum DataFormat { DataFormatNone = 0, - DataFormatInteger = 1, - DataFormatDouble = 2, - DataFormatBoolean = 3, - DataFormatCell = 4, - DataFormatStorage = 5, + DataFormatInt32 = 1, + DataFormatInt52 = 2, // Int52's are left-shifted by 16 by default. + DataFormatStrictInt52 = 3, // "Strict" Int52 means it's not shifted. + DataFormatDouble = 4, + DataFormatBoolean = 5, + DataFormatCell = 6, + DataFormatStorage = 7, DataFormatJS = 8, - DataFormatJSInteger = DataFormatJS | DataFormatInteger, + DataFormatJSInt32 = DataFormatJS | DataFormatInt32, DataFormatJSDouble = DataFormatJS | DataFormatDouble, DataFormatJSCell = DataFormatJS | DataFormatCell, DataFormatJSBoolean = DataFormatJS | DataFormatBoolean, @@ -54,7 +56,6 @@ enum DataFormat { // Special data formats used only for OSR. DataFormatDead = 33, // Implies jsUndefined(). - DataFormatArguments = 34 // Implies that the arguments object must be reified. }; inline const char* dataFormatToString(DataFormat dataFormat) @@ -62,8 +63,12 @@ inline const char* dataFormatToString(DataFormat dataFormat) switch (dataFormat) { case DataFormatNone: return "None"; - case DataFormatInteger: - return "Integer"; + case DataFormatInt32: + return "Int32"; + case DataFormatInt52: + return "Int52"; + case DataFormatStrictInt52: + return "StrictInt52"; case DataFormatDouble: return "Double"; case DataFormatCell: @@ -74,8 +79,8 @@ inline const char* dataFormatToString(DataFormat dataFormat) return "Storage"; case DataFormatJS: return "JS"; - case DataFormatJSInteger: - return "JSInteger"; + case DataFormatJSInt32: + return "JSInt32"; case DataFormatJSDouble: return "JSDouble"; case DataFormatJSCell: @@ -84,91 +89,21 @@ inline const char* dataFormatToString(DataFormat dataFormat) return "JSBoolean"; case DataFormatDead: return "Dead"; - case DataFormatArguments: - return "Arguments"; default: RELEASE_ASSERT_NOT_REACHED(); return "Unknown"; } } -#if USE(JSVALUE64) -inline bool needDataFormatConversion(DataFormat from, DataFormat to) -{ - ASSERT(from != DataFormatNone); - ASSERT(to != DataFormatNone); - switch (from) { - case DataFormatInteger: - case DataFormatDouble: - return to != from; - case DataFormatCell: - case DataFormatJS: - case DataFormatJSInteger: - case DataFormatJSDouble: - case DataFormatJSCell: - case DataFormatJSBoolean: - switch (to) { - case DataFormatInteger: - case DataFormatDouble: - return true; - case DataFormatCell: - case DataFormatJS: - case DataFormatJSInteger: - case DataFormatJSDouble: - case DataFormatJSCell: - case DataFormatJSBoolean: - return false; - default: - // This captures DataFormatBoolean, which is currently unused. - RELEASE_ASSERT_NOT_REACHED(); - } - case DataFormatStorage: - ASSERT(to == DataFormatStorage); - return false; - default: - // This captures DataFormatBoolean, which is currently unused. - RELEASE_ASSERT_NOT_REACHED(); - } - return true; -} - -#elif USE(JSVALUE32_64) -inline bool needDataFormatConversion(DataFormat from, DataFormat to) -{ - ASSERT(from != DataFormatNone); - ASSERT(to != DataFormatNone); - switch (from) { - case DataFormatInteger: - case DataFormatCell: - case DataFormatBoolean: - return ((to & DataFormatJS) || to == DataFormatDouble); - case DataFormatDouble: - case DataFormatJSDouble: - return (to != DataFormatDouble && to != DataFormatJSDouble); - case DataFormatJS: - case DataFormatJSInteger: - case DataFormatJSCell: - case DataFormatJSBoolean: - return (!(to & DataFormatJS) || to == DataFormatJSDouble); - case DataFormatStorage: - ASSERT(to == DataFormatStorage); - return false; - default: - RELEASE_ASSERT_NOT_REACHED(); - } - return true; -} -#endif - inline bool isJSFormat(DataFormat format, DataFormat expectedFormat) { ASSERT(expectedFormat & DataFormatJS); return (format | DataFormatJS) == expectedFormat; } -inline bool isJSInteger(DataFormat format) +inline bool isJSInt32(DataFormat format) { - return isJSFormat(format, DataFormatJSInteger); + return isJSFormat(format, DataFormatJSInt32); } inline bool isJSDouble(DataFormat format) @@ -186,6 +121,13 @@ inline bool isJSBoolean(DataFormat format) return isJSFormat(format, DataFormatJSBoolean); } -} +} // namespace JSC + +namespace WTF { + +class PrintStream; +void printInternal(PrintStream&, JSC::DataFormat); + +} // namespace WTF #endif // DataFormat_h diff --git a/Source/JavaScriptCore/bytecode/DeferredCompilationCallback.cpp b/Source/JavaScriptCore/bytecode/DeferredCompilationCallback.cpp new file mode 100644 index 000000000..762387caf --- /dev/null +++ b/Source/JavaScriptCore/bytecode/DeferredCompilationCallback.cpp @@ -0,0 +1,71 @@ +/* + * Copyright (C) 2013, 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DeferredCompilationCallback.h" + +#include "CodeBlock.h" + +namespace JSC { + +DeferredCompilationCallback::DeferredCompilationCallback() { } +DeferredCompilationCallback::~DeferredCompilationCallback() { } + +void DeferredCompilationCallback::compilationDidComplete(CodeBlock*, CodeBlock*, CompilationResult result) +{ + dumpCompiledSourcesIfNeeded(); + + switch (result) { + case CompilationFailed: + case CompilationInvalidated: + case CompilationSuccessful: + break; + case CompilationDeferred: + RELEASE_ASSERT_NOT_REACHED(); + } +} + +Vector<DeferredSourceDump>& DeferredCompilationCallback::ensureDeferredSourceDump() +{ + if (!m_deferredSourceDump) + m_deferredSourceDump = std::make_unique<Vector<DeferredSourceDump>>(); + return *m_deferredSourceDump; +} + +void DeferredCompilationCallback::dumpCompiledSourcesIfNeeded() +{ + if (!m_deferredSourceDump) + return; + + ASSERT(Options::dumpSourceAtDFGTime()); + unsigned index = 0; + for (auto& info : *m_deferredSourceDump) { + dataLog("[", ++index, "] "); + info.dump(); + } +} + +} // JSC + diff --git a/Source/JavaScriptCore/bytecode/DeferredCompilationCallback.h b/Source/JavaScriptCore/bytecode/DeferredCompilationCallback.h new file mode 100644 index 000000000..adecb82bb --- /dev/null +++ b/Source/JavaScriptCore/bytecode/DeferredCompilationCallback.h @@ -0,0 +1,59 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DeferredCompilationCallback_h +#define DeferredCompilationCallback_h + +#include "CompilationResult.h" +#include "DeferredSourceDump.h" +#include <wtf/RefCounted.h> +#include <wtf/Vector.h> + +namespace JSC { + +class CodeBlock; + +class DeferredCompilationCallback : public RefCounted<DeferredCompilationCallback> { +protected: + DeferredCompilationCallback(); + +public: + virtual ~DeferredCompilationCallback(); + + virtual void compilationDidBecomeReadyAsynchronously(CodeBlock*, CodeBlock* profiledDFGCodeBlock) = 0; + virtual void compilationDidComplete(CodeBlock*, CodeBlock* profiledDFGCodeBlock, CompilationResult); + + Vector<DeferredSourceDump>& ensureDeferredSourceDump(); + +private: + void dumpCompiledSourcesIfNeeded(); + + std::unique_ptr<Vector<DeferredSourceDump>> m_deferredSourceDump; +}; + +} // namespace JSC + +#endif // DeferredCompilationCallback_h + diff --git a/Source/JavaScriptCore/bytecode/DeferredSourceDump.cpp b/Source/JavaScriptCore/bytecode/DeferredSourceDump.cpp new file mode 100644 index 000000000..48079db66 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/DeferredSourceDump.cpp @@ -0,0 +1,66 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DeferredSourceDump.h" + +#include "CodeBlock.h" +#include "CodeBlockWithJITType.h" + +namespace JSC { + +DeferredSourceDump::DeferredSourceDump(CodeBlock* codeBlock) + : m_codeBlock(codeBlock) + , m_rootCodeBlock(nullptr) + , m_rootJITType(JITCode::None) +{ +} + +DeferredSourceDump::DeferredSourceDump(CodeBlock* codeBlock, CodeBlock* rootCodeBlock, JITCode::JITType rootJITType, CodeOrigin callerCodeOrigin) + : m_codeBlock(codeBlock) + , m_rootCodeBlock(rootCodeBlock) + , m_rootJITType(rootJITType) + , m_callerCodeOrigin(callerCodeOrigin) +{ +} + +void DeferredSourceDump::dump() +{ + bool isInlinedFrame = !!m_rootCodeBlock; + if (isInlinedFrame) + dataLog("Inlined "); + else + dataLog("Compiled "); + dataLog(*m_codeBlock); + + if (isInlinedFrame) + dataLog(" at ", CodeBlockWithJITType(m_rootCodeBlock, m_rootJITType), " ", m_callerCodeOrigin); + + dataLog("\n'''"); + m_codeBlock->dumpSource(); + dataLog("'''\n"); +} + +} // namespace JSC diff --git a/Source/JavaScriptCore/bytecode/DeferredSourceDump.h b/Source/JavaScriptCore/bytecode/DeferredSourceDump.h new file mode 100644 index 000000000..72cb6b3b8 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/DeferredSourceDump.h @@ -0,0 +1,52 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DeferredSourceDump_h +#define DeferredSourceDump_h + +#include "CodeOrigin.h" +#include "JITCode.h" + +namespace JSC { + +class CodeBlock; + +class DeferredSourceDump { +public: + DeferredSourceDump(CodeBlock*); + DeferredSourceDump(CodeBlock*, CodeBlock* rootCodeBlock, JITCode::JITType rootJITType, CodeOrigin callerCodeOrigin); + + void dump(); + +private: + CodeBlock* m_codeBlock; + CodeBlock* m_rootCodeBlock; + JITCode::JITType m_rootJITType; + CodeOrigin m_callerCodeOrigin; +}; + +} // namespace JSC + +#endif // DeferredSourceDump_h diff --git a/Source/JavaScriptCore/bytecode/EvalCodeCache.h b/Source/JavaScriptCore/bytecode/EvalCodeCache.h index 5d04637f4..e1c7b2b47 100644 --- a/Source/JavaScriptCore/bytecode/EvalCodeCache.h +++ b/Source/JavaScriptCore/bytecode/EvalCodeCache.h @@ -10,7 +10,7 @@ * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * 3. Neither the name of Apple Inc. ("Apple") nor the names of * its contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * @@ -31,6 +31,8 @@ #include "Executable.h" #include "JSGlobalObject.h" +#include "JSScope.h" +#include "Options.h" #include "SourceCode.h" #include <wtf/HashMap.h> #include <wtf/RefPtr.h> @@ -38,38 +40,79 @@ namespace JSC { - class CodeCache; class SlotVisitor; class EvalCodeCache { public: - EvalExecutable* tryGet(bool inStrictContext, const String& evalSource, JSScope* scope) + class CacheKey { + public: + CacheKey(const String& source, bool isArrowFunctionContext) + : m_source(source.impl()) + , m_isArrowFunctionContext(isArrowFunctionContext) + { + } + + CacheKey(WTF::HashTableDeletedValueType) + : m_source(WTF::HashTableDeletedValue) + { + } + + CacheKey() = default; + + unsigned hash() const { return m_source->hash(); } + + bool isEmptyValue() const { return !m_source; } + + bool operator==(const CacheKey& other) const + { + return m_source == other.m_source && m_isArrowFunctionContext == other.m_isArrowFunctionContext; + } + + bool isHashTableDeletedValue() const { return m_source.isHashTableDeletedValue(); } + + struct Hash { + static unsigned hash(const CacheKey& key) + { + return key.hash(); + } + static bool equal(const CacheKey& lhs, const CacheKey& rhs) + { + return StringHash::equal(lhs.m_source, rhs.m_source) && lhs.m_isArrowFunctionContext == rhs.m_isArrowFunctionContext; + } + static const bool safeToCompareToEmptyOrDeleted = false; + }; + + typedef SimpleClassHashTraits<CacheKey> HashTraits; + + private: + RefPtr<StringImpl> m_source; + bool m_isArrowFunctionContext { false }; + }; + + EvalExecutable* tryGet(bool inStrictContext, const String& evalSource, bool isArrowFunctionContext, JSScope* scope) { - if (!inStrictContext && evalSource.length() < maxCacheableSourceLength && scope->begin()->isVariableObject()) - return m_cacheMap.get(evalSource.impl()).get(); - return 0; + if (isCacheable(inStrictContext, evalSource, scope)) { + ASSERT(!inStrictContext); + return m_cacheMap.fastGet(CacheKey(evalSource, isArrowFunctionContext)).get(); + } + return nullptr; } - EvalExecutable* getSlow(ExecState* exec, CodeCache* codeCache, ScriptExecutable* owner, bool inStrictContext, const String& evalSource, JSScope* scope, JSValue& exceptionValue) + EvalExecutable* getSlow(ExecState* exec, JSCell* owner, bool inStrictContext, ThisTDZMode thisTDZMode, DerivedContextType derivedContextType, bool isArrowFunctionContext, const String& evalSource, JSScope* scope) { - EvalExecutable* evalExecutable = EvalExecutable::create(exec, codeCache, makeSource(evalSource), inStrictContext); - exceptionValue = evalExecutable->compile(exec, scope); - if (exceptionValue) - return 0; - - if (!inStrictContext && evalSource.length() < maxCacheableSourceLength && scope->begin()->isVariableObject() && m_cacheMap.size() < maxCacheEntries) - m_cacheMap.set(evalSource.impl(), WriteBarrier<EvalExecutable>(exec->vm(), owner, evalExecutable)); - - return evalExecutable; - } - - EvalExecutable* get(ExecState* exec, CodeCache* codeCache, ScriptExecutable* owner, bool inStrictContext, const String& evalSource, JSScope* scope, JSValue& exceptionValue) - { - EvalExecutable* evalExecutable = tryGet(inStrictContext, evalSource, scope); - + VariableEnvironment variablesUnderTDZ; + JSScope::collectVariablesUnderTDZ(scope, variablesUnderTDZ); + EvalExecutable* evalExecutable = EvalExecutable::create(exec, makeSource(evalSource), inStrictContext, thisTDZMode, derivedContextType, isArrowFunctionContext, &variablesUnderTDZ); if (!evalExecutable) - evalExecutable = getSlow(exec, codeCache, owner, inStrictContext, evalSource, scope, exceptionValue); + return nullptr; + if (isCacheable(inStrictContext, evalSource, scope) && m_cacheMap.size() < maxCacheEntries) { + ASSERT(!inStrictContext); + ASSERT_WITH_MESSAGE(thisTDZMode == ThisTDZMode::CheckIfNeeded, "Always CheckIfNeeded because the caching is enabled only in the sloppy mode."); + ASSERT_WITH_MESSAGE(derivedContextType == DerivedContextType::None, "derivedContextType is always None because class methods and class constructors are always evaluated as the strict code."); + m_cacheMap.set(CacheKey(evalSource, isArrowFunctionContext), WriteBarrier<EvalExecutable>(exec->vm(), owner, evalExecutable)); + } + return evalExecutable; } @@ -83,10 +126,22 @@ namespace JSC { } private: - static const unsigned maxCacheableSourceLength = 256; + ALWAYS_INLINE bool isCacheableScope(JSScope* scope) + { + return scope->isGlobalLexicalEnvironment() || scope->isFunctionNameScopeObject() || scope->isVarScope(); + } + + ALWAYS_INLINE bool isCacheable(bool inStrictContext, const String& evalSource, JSScope* scope) + { + // If eval() is called and it has access to a lexical scope, we can't soundly cache it. + // If the eval() only has access to the "var" scope, then we can cache it. + return !inStrictContext + && static_cast<size_t>(evalSource.length()) < Options::maximumEvalCacheableSourceLength() + && isCacheableScope(scope); + } static const int maxCacheEntries = 64; - typedef HashMap<RefPtr<StringImpl>, WriteBarrier<EvalExecutable> > EvalCacheMap; + typedef HashMap<CacheKey, WriteBarrier<EvalExecutable>, CacheKey::Hash, CacheKey::HashTraits> EvalCacheMap; EvalCacheMap m_cacheMap; }; diff --git a/Source/JavaScriptCore/bytecode/ExecutableInfo.h b/Source/JavaScriptCore/bytecode/ExecutableInfo.h new file mode 100644 index 000000000..a45d5039d --- /dev/null +++ b/Source/JavaScriptCore/bytecode/ExecutableInfo.h @@ -0,0 +1,80 @@ +/* + * Copyright (C) 2012-2015 Apple Inc. All Rights Reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef ExecutableInfo_h +#define ExecutableInfo_h + +#include "ParserModes.h" + +namespace JSC { + +enum class DerivedContextType : uint8_t { None, DerivedConstructorContext, DerivedMethodContext }; + +// FIXME: These flags, ParserModes and propagation to XXXCodeBlocks should be reorganized. +// https://bugs.webkit.org/show_bug.cgi?id=151547 +struct ExecutableInfo { + ExecutableInfo(bool usesEval, bool isStrictMode, bool isConstructor, bool isBuiltinFunction, ConstructorKind constructorKind, SuperBinding superBinding, SourceParseMode parseMode, DerivedContextType derivedContextType, bool isArrowFunctionContext, bool isClassContext) + : m_usesEval(usesEval) + , m_isStrictMode(isStrictMode) + , m_isConstructor(isConstructor) + , m_isBuiltinFunction(isBuiltinFunction) + , m_constructorKind(static_cast<unsigned>(constructorKind)) + , m_superBinding(static_cast<unsigned>(superBinding)) + , m_parseMode(parseMode) + , m_derivedContextType(static_cast<unsigned>(derivedContextType)) + , m_isArrowFunctionContext(isArrowFunctionContext) + , m_isClassContext(isClassContext) + { + ASSERT(m_constructorKind == static_cast<unsigned>(constructorKind)); + ASSERT(m_superBinding == static_cast<unsigned>(superBinding)); + } + + bool usesEval() const { return m_usesEval; } + bool isStrictMode() const { return m_isStrictMode; } + bool isConstructor() const { return m_isConstructor; } + bool isBuiltinFunction() const { return m_isBuiltinFunction; } + ConstructorKind constructorKind() const { return static_cast<ConstructorKind>(m_constructorKind); } + SuperBinding superBinding() const { return static_cast<SuperBinding>(m_superBinding); } + SourceParseMode parseMode() const { return m_parseMode; } + DerivedContextType derivedContextType() const { return static_cast<DerivedContextType>(m_derivedContextType); } + bool isArrowFunctionContext() const { return m_isArrowFunctionContext; } + bool isClassContext() const { return m_isClassContext; } + +private: + unsigned m_usesEval : 1; + unsigned m_isStrictMode : 1; + unsigned m_isConstructor : 1; + unsigned m_isBuiltinFunction : 1; + unsigned m_constructorKind : 2; + unsigned m_superBinding : 1; + SourceParseMode m_parseMode; + unsigned m_derivedContextType : 2; + unsigned m_isArrowFunctionContext : 1; + unsigned m_isClassContext : 1; +}; + +} // namespace JSC + +#endif // ExecutableInfo_h diff --git a/Source/JavaScriptCore/bytecode/ExecutionCounter.cpp b/Source/JavaScriptCore/bytecode/ExecutionCounter.cpp index dca9c5126..fe4e430f1 100644 --- a/Source/JavaScriptCore/bytecode/ExecutionCounter.cpp +++ b/Source/JavaScriptCore/bytecode/ExecutionCounter.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012 Apple Inc. All rights reserved. + * Copyright (C) 2012, 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -28,16 +28,25 @@ #include "CodeBlock.h" #include "ExecutableAllocator.h" +#include "JSCInlines.h" #include <wtf/StringExtras.h> namespace JSC { -ExecutionCounter::ExecutionCounter() +template<CountingVariant countingVariant> +ExecutionCounter<countingVariant>::ExecutionCounter() { reset(); } -bool ExecutionCounter::checkIfThresholdCrossedAndSet(CodeBlock* codeBlock) +template<CountingVariant countingVariant> +void ExecutionCounter<countingVariant>::forceSlowPathConcurrently() +{ + m_counter = 0; +} + +template<CountingVariant countingVariant> +bool ExecutionCounter<countingVariant>::checkIfThresholdCrossedAndSet(CodeBlock* codeBlock) { if (hasCrossedThreshold(codeBlock)) return true; @@ -48,21 +57,23 @@ bool ExecutionCounter::checkIfThresholdCrossedAndSet(CodeBlock* codeBlock) return false; } -void ExecutionCounter::setNewThreshold(int32_t threshold, CodeBlock* codeBlock) +template<CountingVariant countingVariant> +void ExecutionCounter<countingVariant>::setNewThreshold(int32_t threshold, CodeBlock* codeBlock) { reset(); m_activeThreshold = threshold; setThreshold(codeBlock); } -void ExecutionCounter::deferIndefinitely() +template<CountingVariant countingVariant> +void ExecutionCounter<countingVariant>::deferIndefinitely() { m_totalCount = 0; m_activeThreshold = std::numeric_limits<int32_t>::max(); m_counter = std::numeric_limits<int32_t>::min(); } -double ExecutionCounter::applyMemoryUsageHeuristics(int32_t value, CodeBlock* codeBlock) +double applyMemoryUsageHeuristics(int32_t value, CodeBlock* codeBlock) { #if ENABLE(JIT) double multiplier = @@ -77,8 +88,7 @@ double ExecutionCounter::applyMemoryUsageHeuristics(int32_t value, CodeBlock* co return multiplier * value; } -int32_t ExecutionCounter::applyMemoryUsageHeuristicsAndConvertToInt( - int32_t value, CodeBlock* codeBlock) +int32_t applyMemoryUsageHeuristicsAndConvertToInt(int32_t value, CodeBlock* codeBlock) { double doubleResult = applyMemoryUsageHeuristics(value, codeBlock); @@ -90,7 +100,8 @@ int32_t ExecutionCounter::applyMemoryUsageHeuristicsAndConvertToInt( return static_cast<int32_t>(doubleResult); } -bool ExecutionCounter::hasCrossedThreshold(CodeBlock* codeBlock) const +template<CountingVariant countingVariant> +bool ExecutionCounter<countingVariant>::hasCrossedThreshold(CodeBlock* codeBlock) const { // This checks if the current count rounded up to the threshold we were targeting. // For example, if we are using half of available executable memory and have @@ -114,21 +125,20 @@ bool ExecutionCounter::hasCrossedThreshold(CodeBlock* codeBlock) const return static_cast<double>(m_totalCount) + m_counter >= modifiedThreshold - static_cast<double>( - std::min(m_activeThreshold, Options::maximumExecutionCountsBetweenCheckpoints())) / 2; + std::min(m_activeThreshold, maximumExecutionCountsBetweenCheckpoints())) / 2; } -bool ExecutionCounter::setThreshold(CodeBlock* codeBlock) +template<CountingVariant countingVariant> +bool ExecutionCounter<countingVariant>::setThreshold(CodeBlock* codeBlock) { if (m_activeThreshold == std::numeric_limits<int32_t>::max()) { deferIndefinitely(); return false; } - ASSERT(!hasCrossedThreshold(codeBlock)); - // Compute the true total count. double trueTotalCount = count(); - + // Correct the threshold for current memory usage. double threshold = applyMemoryUsageHeuristics(m_activeThreshold, codeBlock); @@ -154,17 +164,22 @@ bool ExecutionCounter::setThreshold(CodeBlock* codeBlock) return false; } -void ExecutionCounter::reset() +template<CountingVariant countingVariant> +void ExecutionCounter<countingVariant>::reset() { m_counter = 0; m_totalCount = 0; m_activeThreshold = 0; } -void ExecutionCounter::dump(PrintStream& out) const +template<CountingVariant countingVariant> +void ExecutionCounter<countingVariant>::dump(PrintStream& out) const { out.printf("%lf/%lf, %d", count(), static_cast<double>(m_activeThreshold), m_counter); } +template class ExecutionCounter<CountingForBaseline>; +template class ExecutionCounter<CountingForUpperTiers>; + } // namespace JSC diff --git a/Source/JavaScriptCore/bytecode/ExecutionCounter.h b/Source/JavaScriptCore/bytecode/ExecutionCounter.h index c755c0445..5002c6c67 100644 --- a/Source/JavaScriptCore/bytecode/ExecutionCounter.h +++ b/Source/JavaScriptCore/bytecode/ExecutionCounter.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012 Apple Inc. All rights reserved. + * Copyright (C) 2012, 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -35,46 +35,67 @@ namespace JSC { class CodeBlock; +enum CountingVariant { + CountingForBaseline, + CountingForUpperTiers +}; + +double applyMemoryUsageHeuristics(int32_t value, CodeBlock*); +int32_t applyMemoryUsageHeuristicsAndConvertToInt(int32_t value, CodeBlock*); + +inline int32_t formattedTotalExecutionCount(float value) +{ + union { + int32_t i; + float f; + } u; + u.f = value; + return u.i; +} + +template<CountingVariant countingVariant> class ExecutionCounter { public: ExecutionCounter(); + void forceSlowPathConcurrently(); // If you use this, checkIfThresholdCrossedAndSet() may still return false. bool checkIfThresholdCrossedAndSet(CodeBlock*); void setNewThreshold(int32_t threshold, CodeBlock*); void deferIndefinitely(); double count() const { return static_cast<double>(m_totalCount) + m_counter; } void dump(PrintStream&) const; - static double applyMemoryUsageHeuristics(int32_t value, CodeBlock*); - static int32_t applyMemoryUsageHeuristicsAndConvertToInt(int32_t value, CodeBlock*); + + static int32_t maximumExecutionCountsBetweenCheckpoints() + { + switch (countingVariant) { + case CountingForBaseline: + return Options::maximumExecutionCountsBetweenCheckpointsForBaseline(); + case CountingForUpperTiers: + return Options::maximumExecutionCountsBetweenCheckpointsForUpperTiers(); + default: + RELEASE_ASSERT_NOT_REACHED(); + return 0; + } + } + template<typename T> static T clippedThreshold(JSGlobalObject* globalObject, T threshold) { int32_t maxThreshold; if (Options::randomizeExecutionCountsBetweenCheckpoints()) - maxThreshold = globalObject->weakRandomInteger() % Options::maximumExecutionCountsBetweenCheckpoints(); + maxThreshold = globalObject->weakRandomInteger() % maximumExecutionCountsBetweenCheckpoints(); else - maxThreshold = Options::maximumExecutionCountsBetweenCheckpoints(); + maxThreshold = maximumExecutionCountsBetweenCheckpoints(); if (threshold > maxThreshold) threshold = maxThreshold; return threshold; } - static int32_t formattedTotalCount(float value) - { - union { - int32_t i; - float f; - } u; - u.f = value; - return u.i; - } - private: bool hasCrossedThreshold(CodeBlock*) const; bool setThreshold(CodeBlock*); void reset(); public: - // NB. These are intentionally public because it will be modified from machine code. // This counter is incremented by the JIT or LLInt. It starts out negative and is @@ -89,11 +110,14 @@ public: // m_counter. float m_totalCount; - // This is the threshold we were originally targetting, without any correction for + // This is the threshold we were originally targeting, without any correction for // the memory usage heuristics. int32_t m_activeThreshold; }; +typedef ExecutionCounter<CountingForBaseline> BaselineExecutionCounter; +typedef ExecutionCounter<CountingForUpperTiers> UpperTierExecutionCounter; + } // namespace JSC #endif // ExecutionCounter_h diff --git a/Source/JavaScriptCore/bytecode/ExitKind.cpp b/Source/JavaScriptCore/bytecode/ExitKind.cpp index a8d904585..84ff57b59 100644 --- a/Source/JavaScriptCore/bytecode/ExitKind.cpp +++ b/Source/JavaScriptCore/bytecode/ExitKind.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2012-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -38,60 +38,73 @@ const char* exitKindToString(ExitKind kind) return "Unset"; case BadType: return "BadType"; - case BadFunction: - return "BadFunction"; + case BadCell: + return "BadCell"; + case BadIdent: + return "BadIdent"; case BadExecutable: return "BadExecutable"; case BadCache: return "BadCache"; - case BadWeakConstantCache: - return "BadWeakConstantCache"; + case BadConstantCache: + return "BadConstantCache"; case BadIndexingType: return "BadIndexingType"; + case BadTypeInfoFlags: + return "BadTypeInfoFlags"; case Overflow: return "Overflow"; case NegativeZero: return "NegativeZero"; + case Int52Overflow: + return "Int52Overflow"; case StoreToHole: return "StoreToHole"; case LoadFromHole: return "LoadFromHole"; case OutOfBounds: return "OutOfBounds"; - case StoreToHoleOrOutOfBounds: - return "StoreToHoleOrOutOfBounds"; case InadequateCoverage: return "InadequateCoverage"; case ArgumentsEscaped: return "ArgumentsEscaped"; + case ExoticObjectMode: + return "ExoticObjectMode"; case NotStringObject: return "NotStringObject"; + case VarargsOverflow: + return "VarargsOverflow"; + case TDZFailure: + return "TDZFailure"; case Uncountable: return "Uncountable"; - case UncountableWatchpoint: - return "UncountableWatchpoint"; - default: - RELEASE_ASSERT_NOT_REACHED(); - return "Unknown"; + case UncountableInvalidation: + return "UncountableInvalidation"; + case WatchdogTimerFired: + return "WatchdogTimerFired"; + case DebuggerEvent: + return "DebuggerEvent"; + case ExceptionCheck: + return "ExceptionCheck"; + case GenericUnwind: + return "GenericUnwind"; } + RELEASE_ASSERT_NOT_REACHED(); + return "Unknown"; } -bool exitKindIsCountable(ExitKind kind) +bool exitKindMayJettison(ExitKind kind) { switch (kind) { - case ExitKindUnset: - RELEASE_ASSERT_NOT_REACHED(); - case BadType: - case Uncountable: - case UncountableWatchpoint: - case LoadFromHole: // Already counted directly by the baseline JIT. - case StoreToHole: // Already counted directly by the baseline JIT. - case OutOfBounds: // Already counted directly by the baseline JIT. - case StoreToHoleOrOutOfBounds: // Already counted directly by the baseline JIT. + case ExceptionCheck: + case GenericUnwind: return false; default: return true; } + + RELEASE_ASSERT_NOT_REACHED(); + return false; } } // namespace JSC diff --git a/Source/JavaScriptCore/bytecode/ExitKind.h b/Source/JavaScriptCore/bytecode/ExitKind.h index af918ace3..22a54a1a9 100644 --- a/Source/JavaScriptCore/bytecode/ExitKind.h +++ b/Source/JavaScriptCore/bytecode/ExitKind.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2012-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -28,30 +28,38 @@ namespace JSC { -enum ExitKind { +enum ExitKind : uint8_t { ExitKindUnset, BadType, // We exited because a type prediction was wrong. - BadFunction, // We exited because we made an incorrect assumption about what function we would see. + BadCell, // We exited because we made an incorrect assumption about what cell we would see. Usually used for function checks. + BadIdent, // We exited because we made an incorrect assumption about what identifier we would see. Usually used for cached Id check in get_by_val. BadExecutable, // We exited because we made an incorrect assumption about what executable we would see. BadCache, // We exited because an inline cache was wrong. - BadWeakConstantCache, // We exited because a cache on a weak constant (usually a prototype) was wrong. + BadConstantCache, // We exited because a cache on a weak constant (usually a prototype) was wrong. BadIndexingType, // We exited because an indexing type was wrong. + BadTypeInfoFlags, // We exited because we made an incorrect assumption about what TypeInfo flags we would see. Overflow, // We exited because of overflow. NegativeZero, // We exited because we encountered negative zero. + Int52Overflow, // We exited because of an Int52 overflow. StoreToHole, // We had a store to a hole. LoadFromHole, // We had a load from a hole. OutOfBounds, // We had an out-of-bounds access to an array. - StoreToHoleOrOutOfBounds, // We're simultaneously speculating that we're in bounds and not accessing a hole, and one of those things didn't pan out. InadequateCoverage, // We exited because we ended up in code that didn't have profiling coverage. ArgumentsEscaped, // We exited because arguments escaped but we didn't expect them to. + ExoticObjectMode, // We exited because some exotic object that we were accessing was in an exotic mode (like Arguments with slow arguments). NotStringObject, // We exited because we shouldn't have attempted to optimize string object access. + VarargsOverflow, // We exited because a varargs call passed more arguments than we expected. + TDZFailure, // We exited because we were in the TDZ and accessed the variable. Uncountable, // We exited for none of the above reasons, and we should not count it. Most uses of this should be viewed as a FIXME. - UncountableWatchpoint, // We exited because of a watchpoint, which isn't counted because watchpoints do tracking themselves. - WatchdogTimerFired // We exited because we need to service the watchdog timer. + UncountableInvalidation, // We exited because the code block was invalidated; this means that we've already counted the reasons why the code block was invalidated. + WatchdogTimerFired, // We exited because we need to service the watchdog timer. + DebuggerEvent, // We exited because we need to service the debugger. + ExceptionCheck, // We exited because a direct exception check showed that we threw an exception from a C call. + GenericUnwind, // We exited because a we arrived at this OSR exit from genericUnwind. }; const char* exitKindToString(ExitKind); -bool exitKindIsCountable(ExitKind); +bool exitKindMayJettison(ExitKind); } // namespace JSC diff --git a/Source/JavaScriptCore/bytecode/ExitingJITType.cpp b/Source/JavaScriptCore/bytecode/ExitingJITType.cpp new file mode 100644 index 000000000..aa8f120b6 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/ExitingJITType.cpp @@ -0,0 +1,52 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "ExitingJITType.h" + +#include <wtf/PrintStream.h> + +namespace WTF { + +using namespace JSC; + +void printInternal(PrintStream& out, ExitingJITType type) +{ + switch (type) { + case ExitFromAnything: + out.print("FromAnything"); + return; + case ExitFromDFG: + out.print("FromDFG"); + return; + case ExitFromFTL: + out.print("FromFTL"); + return; + } + RELEASE_ASSERT_NOT_REACHED(); +} + +} // namespace WTF + diff --git a/Source/JavaScriptCore/bytecode/ExitingJITType.h b/Source/JavaScriptCore/bytecode/ExitingJITType.h new file mode 100644 index 000000000..e8ed03e41 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/ExitingJITType.h @@ -0,0 +1,62 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef ExitingJITType_h +#define ExitingJITType_h + +#include "JITCode.h" + +namespace JSC { + +enum ExitingJITType : uint8_t { + ExitFromAnything, + ExitFromDFG, + ExitFromFTL +}; + +inline ExitingJITType exitingJITTypeFor(JITCode::JITType type) +{ + switch (type) { + case JITCode::DFGJIT: + return ExitFromDFG; + case JITCode::FTLJIT: + return ExitFromFTL; + default: + RELEASE_ASSERT_NOT_REACHED(); + return ExitFromAnything; + } +} + +} // namespace JSC + +namespace WTF { + +class PrintStream; +void printInternal(PrintStream&, JSC::ExitingJITType); + +} // namespace WTF + +#endif // ExitingJITType_h + diff --git a/Source/JavaScriptCore/bytecode/FullBytecodeLiveness.h b/Source/JavaScriptCore/bytecode/FullBytecodeLiveness.h new file mode 100644 index 000000000..b22198a00 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/FullBytecodeLiveness.h @@ -0,0 +1,59 @@ +/* + * Copyright (C) 2013, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FullBytecodeLiveness_h +#define FullBytecodeLiveness_h + +#include <wtf/FastBitVector.h> + +namespace JSC { + +class BytecodeLivenessAnalysis; + +typedef HashMap<unsigned, FastBitVector, WTF::IntHash<unsigned>, WTF::UnsignedWithZeroKeyHashTraits<unsigned>> BytecodeToBitmapMap; + +class FullBytecodeLiveness { + WTF_MAKE_FAST_ALLOCATED; +public: + const FastBitVector& getLiveness(unsigned bytecodeIndex) const + { + return m_map[bytecodeIndex]; + } + + bool operandIsLive(int operand, unsigned bytecodeIndex) const + { + return operandIsAlwaysLive(operand) || operandThatIsNotAlwaysLiveIsLive(getLiveness(bytecodeIndex), operand); + } + +private: + friend class BytecodeLivenessAnalysis; + + Vector<FastBitVector, 0, UnsafeVectorOverflow> m_map; +}; + +} // namespace JSC + +#endif // FullBytecodeLiveness_h + diff --git a/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp b/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp index db4aa9b99..66a4dd81d 100644 --- a/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp +++ b/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012 Apple Inc. All rights reserved. + * Copyright (C) 2012-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -27,264 +27,348 @@ #include "GetByIdStatus.h" #include "CodeBlock.h" +#include "ComplexGetStatus.h" +#include "JSCInlines.h" #include "JSScope.h" #include "LLIntData.h" #include "LowLevelInterpreter.h" -#include "Operations.h" +#include "PolymorphicAccess.h" +#include <wtf/ListDump.h> namespace JSC { -GetByIdStatus GetByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex, Identifier& ident) +bool GetByIdStatus::appendVariant(const GetByIdVariant& variant) +{ + // Attempt to merge this variant with an already existing variant. + for (unsigned i = 0; i < m_variants.size(); ++i) { + if (m_variants[i].attemptToMerge(variant)) + return true; + } + + // Make sure there is no overlap. We should have pruned out opportunities for + // overlap but it's possible that an inline cache got into a weird state. We are + // defensive and bail if we detect crazy. + for (unsigned i = 0; i < m_variants.size(); ++i) { + if (m_variants[i].structureSet().overlaps(variant.structureSet())) + return false; + } + + m_variants.append(variant); + return true; +} + +#if ENABLE(DFG_JIT) +bool GetByIdStatus::hasExitSite(const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, unsigned bytecodeIndex) +{ + return profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadCache)) + || profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadConstantCache)); +} +#endif + +GetByIdStatus GetByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex, UniquedStringImpl* uid) { UNUSED_PARAM(profiledBlock); UNUSED_PARAM(bytecodeIndex); - UNUSED_PARAM(ident); -#if ENABLE(LLINT) + UNUSED_PARAM(uid); + + VM& vm = *profiledBlock->vm(); + Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex; - if (instruction[0].u.opcode == LLInt::getOpcode(llint_op_get_array_length)) + if (instruction[0].u.opcode == LLInt::getOpcode(op_get_array_length)) return GetByIdStatus(NoInformation, false); - Structure* structure = instruction[4].u.structure.get(); - if (!structure) + StructureID structureID = instruction[4].u.structureID; + if (!structureID) return GetByIdStatus(NoInformation, false); - + + Structure* structure = vm.heap.structureIDTable().get(structureID); + + if (structure->takesSlowPathInDFGForImpureProperty()) + return GetByIdStatus(NoInformation, false); + unsigned attributesIgnored; - JSCell* specificValue; - PropertyOffset offset = structure->get( - *profiledBlock->vm(), ident, attributesIgnored, specificValue); - if (structure->isDictionary()) - specificValue = 0; + PropertyOffset offset = structure->getConcurrently(uid, attributesIgnored); if (!isValidOffset(offset)) return GetByIdStatus(NoInformation, false); - return GetByIdStatus(Simple, false, StructureSet(structure), offset, specificValue); -#else - return GetByIdStatus(NoInformation, false); -#endif + return GetByIdStatus(Simple, false, GetByIdVariant(StructureSet(structure), offset)); } -void GetByIdStatus::computeForChain(GetByIdStatus& result, CodeBlock* profiledBlock, Identifier& ident, Structure* structure) +GetByIdStatus GetByIdStatus::computeFor(CodeBlock* profiledBlock, StubInfoMap& map, unsigned bytecodeIndex, UniquedStringImpl* uid) { -#if ENABLE(JIT) && ENABLE(VALUE_PROFILER) - // Validate the chain. If the chain is invalid, then currently the best thing - // we can do is to assume that TakesSlow is true. In the future, it might be - // worth exploring reifying the structure chain from the structure we've got - // instead of using the one from the cache, since that will do the right things - // if the structure chain has changed. But that may be harder, because we may - // then end up having a different type of access altogether. And it currently - // does not appear to be worth it to do so -- effectively, the heuristic we - // have now is that if the structure chain has changed between when it was - // cached on in the baseline JIT and when the DFG tried to inline the access, - // then we fall back on a polymorphic access. - Structure* currentStructure = structure; - JSObject* currentObject = 0; - for (unsigned i = 0; i < result.m_chain.size(); ++i) { - ASSERT(!currentStructure->isDictionary()); - currentObject = asObject(currentStructure->prototypeForLookup(profiledBlock)); - currentStructure = result.m_chain[i]; - if (currentObject->structure() != currentStructure) - return; - } + ConcurrentJITLocker locker(profiledBlock->m_lock); + + GetByIdStatus result; + +#if ENABLE(DFG_JIT) + result = computeForStubInfoWithoutExitSiteFeedback( + locker, profiledBlock, map.get(CodeOrigin(bytecodeIndex)), uid, + CallLinkStatus::computeExitSiteData(locker, profiledBlock, bytecodeIndex)); - ASSERT(currentObject); - - unsigned attributesIgnored; - JSCell* specificValue; - - result.m_offset = currentStructure->get( - *profiledBlock->vm(), ident, attributesIgnored, specificValue); - if (currentStructure->isDictionary()) - specificValue = 0; - if (!isValidOffset(result.m_offset)) - return; - - result.m_structureSet.add(structure); - result.m_specificValue = JSValue(specificValue); + if (!result.takesSlowPath() + && hasExitSite(locker, profiledBlock, bytecodeIndex)) + return GetByIdStatus(result.makesCalls() ? MakesCalls : TakesSlowPath, true); #else - UNUSED_PARAM(result); - UNUSED_PARAM(profiledBlock); - UNUSED_PARAM(ident); - UNUSED_PARAM(structure); - UNREACHABLE_FOR_PLATFORM(); + UNUSED_PARAM(map); #endif + + if (!result) + return computeFromLLInt(profiledBlock, bytecodeIndex, uid); + + return result; } -GetByIdStatus GetByIdStatus::computeFor(CodeBlock* profiledBlock, unsigned bytecodeIndex, Identifier& ident) +#if ENABLE(DFG_JIT) +GetByIdStatus GetByIdStatus::computeForStubInfo(const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, StructureStubInfo* stubInfo, CodeOrigin codeOrigin, UniquedStringImpl* uid) { - UNUSED_PARAM(profiledBlock); - UNUSED_PARAM(bytecodeIndex); - UNUSED_PARAM(ident); -#if ENABLE(JIT) && ENABLE(VALUE_PROFILER) - if (!profiledBlock->numberOfStructureStubInfos()) - return computeFromLLInt(profiledBlock, bytecodeIndex, ident); - - // First check if it makes either calls, in which case we want to be super careful, or - // if it's not set at all, in which case we punt. - StructureStubInfo& stubInfo = profiledBlock->getStubInfo(bytecodeIndex); - if (!stubInfo.seen) - return computeFromLLInt(profiledBlock, bytecodeIndex, ident); - - if (stubInfo.resetByGC) - return GetByIdStatus(TakesSlowPath, true); + GetByIdStatus result = GetByIdStatus::computeForStubInfoWithoutExitSiteFeedback( + locker, profiledBlock, stubInfo, uid, + CallLinkStatus::computeExitSiteData(locker, profiledBlock, codeOrigin.bytecodeIndex)); - PolymorphicAccessStructureList* list; - int listSize; - switch (stubInfo.accessType) { - case access_get_by_id_self_list: - list = stubInfo.u.getByIdSelfList.structureList; - listSize = stubInfo.u.getByIdSelfList.listSize; - break; - case access_get_by_id_proto_list: - list = stubInfo.u.getByIdProtoList.structureList; - listSize = stubInfo.u.getByIdProtoList.listSize; - break; - default: - list = 0; - listSize = 0; - break; - } - for (int i = 0; i < listSize; ++i) { - if (!list->list[i].isDirect) - return GetByIdStatus(MakesCalls, true); + if (!result.takesSlowPath() && GetByIdStatus::hasExitSite(locker, profiledBlock, codeOrigin.bytecodeIndex)) + return GetByIdStatus(result.makesCalls() ? GetByIdStatus::MakesCalls : GetByIdStatus::TakesSlowPath, true); + return result; +} +#endif // ENABLE(DFG_JIT) + +#if ENABLE(JIT) +GetByIdStatus GetByIdStatus::computeForStubInfoWithoutExitSiteFeedback( + const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, StructureStubInfo* stubInfo, UniquedStringImpl* uid, + CallLinkStatus::ExitSiteData callExitSiteData) +{ + if (!stubInfo || !stubInfo->everConsidered) + return GetByIdStatus(NoInformation); + + PolymorphicAccess* list = 0; + State slowPathState = TakesSlowPath; + if (stubInfo->cacheType == CacheType::Stub) { + list = stubInfo->u.stub; + for (unsigned i = 0; i < list->size(); ++i) { + const AccessCase& access = list->at(i); + if (access.doesCalls()) + slowPathState = MakesCalls; + } } - // Next check if it takes slow case, in which case we want to be kind of careful. - if (profiledBlock->likelyToTakeSlowCase(bytecodeIndex)) - return GetByIdStatus(TakesSlowPath, true); + if (stubInfo->tookSlowPath) + return GetByIdStatus(slowPathState); // Finally figure out if we can derive an access strategy. GetByIdStatus result; + result.m_state = Simple; result.m_wasSeenInJIT = true; // This is interesting for bytecode dumping only. - switch (stubInfo.accessType) { - case access_unset: - return computeFromLLInt(profiledBlock, bytecodeIndex, ident); + switch (stubInfo->cacheType) { + case CacheType::Unset: + return GetByIdStatus(NoInformation); - case access_get_by_id_self: { - Structure* structure = stubInfo.u.getByIdSelf.baseObjectStructure.get(); + case CacheType::GetByIdSelf: { + Structure* structure = stubInfo->u.byIdSelf.baseObjectStructure.get(); + if (structure->takesSlowPathInDFGForImpureProperty()) + return GetByIdStatus(slowPathState, true); unsigned attributesIgnored; - JSCell* specificValue; - result.m_offset = structure->get( - *profiledBlock->vm(), ident, attributesIgnored, specificValue); - if (structure->isDictionary()) - specificValue = 0; + GetByIdVariant variant; + variant.m_offset = structure->getConcurrently(uid, attributesIgnored); + if (!isValidOffset(variant.m_offset)) + return GetByIdStatus(slowPathState, true); - if (isValidOffset(result.m_offset)) { - result.m_structureSet.add(structure); - result.m_specificValue = JSValue(specificValue); - } - - if (isValidOffset(result.m_offset)) - ASSERT(result.m_structureSet.size()); - break; + variant.m_structureSet.add(structure); + bool didAppend = result.appendVariant(variant); + ASSERT_UNUSED(didAppend, didAppend); + return result; } - case access_get_by_id_self_list: { - for (int i = 0; i < listSize; ++i) { - ASSERT(list->list[i].isDirect); - - Structure* structure = list->list[i].base.get(); - if (result.m_structureSet.contains(structure)) - continue; - - unsigned attributesIgnored; - JSCell* specificValue; - PropertyOffset myOffset = structure->get( - *profiledBlock->vm(), ident, attributesIgnored, specificValue); - if (structure->isDictionary()) - specificValue = 0; + case CacheType::Stub: { + for (unsigned listIndex = 0; listIndex < list->size(); ++listIndex) { + const AccessCase& access = list->at(listIndex); + if (access.viaProxy()) + return GetByIdStatus(slowPathState, true); - if (!isValidOffset(myOffset)) { - result.m_offset = invalidOffset; - break; + Structure* structure = access.structure(); + if (!structure) { + // The null structure cases arise due to array.length and string.length. We have no way + // of creating a GetByIdVariant for those, and we don't really have to since the DFG + // handles those cases in FixupPhase using value profiling. That's a bit awkward - we + // shouldn't have to use value profiling to discover something that the AccessCase + // could have told us. But, it works well enough. So, our only concern here is to not + // crash on null structure. + return GetByIdStatus(slowPathState, true); } - - if (!i) { - result.m_offset = myOffset; - result.m_specificValue = JSValue(specificValue); - } else if (result.m_offset != myOffset) { - result.m_offset = invalidOffset; - break; - } else if (result.m_specificValue != JSValue(specificValue)) - result.m_specificValue = JSValue(); - result.m_structureSet.add(structure); + ComplexGetStatus complexGetStatus = ComplexGetStatus::computeFor( + structure, access.conditionSet(), uid); + + switch (complexGetStatus.kind()) { + case ComplexGetStatus::ShouldSkip: + continue; + + case ComplexGetStatus::TakesSlowPath: + return GetByIdStatus(slowPathState, true); + + case ComplexGetStatus::Inlineable: { + std::unique_ptr<CallLinkStatus> callLinkStatus; + JSFunction* intrinsicFunction = nullptr; + + switch (access.type()) { + case AccessCase::Load: { + break; + } + case AccessCase::IntrinsicGetter: { + intrinsicFunction = access.intrinsicFunction(); + break; + } + case AccessCase::Getter: { + CallLinkInfo* callLinkInfo = access.callLinkInfo(); + ASSERT(callLinkInfo); + callLinkStatus = std::make_unique<CallLinkStatus>( + CallLinkStatus::computeFor( + locker, profiledBlock, *callLinkInfo, callExitSiteData)); + break; + } + default: { + // FIXME: It would be totally sweet to support more of these at some point in the + // future. https://bugs.webkit.org/show_bug.cgi?id=133052 + return GetByIdStatus(slowPathState, true); + } } + + GetByIdVariant variant( + StructureSet(structure), complexGetStatus.offset(), + complexGetStatus.conditionSet(), WTFMove(callLinkStatus), + intrinsicFunction); + + if (!result.appendVariant(variant)) + return GetByIdStatus(slowPathState, true); + break; + } } } - - if (isValidOffset(result.m_offset)) - ASSERT(result.m_structureSet.size()); - break; - } - case access_get_by_id_proto: { - if (!stubInfo.u.getByIdProto.isDirect) - return GetByIdStatus(MakesCalls, true); - result.m_chain.append(stubInfo.u.getByIdProto.prototypeStructure.get()); - computeForChain( - result, profiledBlock, ident, - stubInfo.u.getByIdProto.baseObjectStructure.get()); - break; - } - - case access_get_by_id_chain: { - if (!stubInfo.u.getByIdChain.isDirect) - return GetByIdStatus(MakesCalls, true); - for (unsigned i = 0; i < stubInfo.u.getByIdChain.count; ++i) - result.m_chain.append(stubInfo.u.getByIdChain.chain->head()[i].get()); - computeForChain( - result, profiledBlock, ident, - stubInfo.u.getByIdChain.baseObjectStructure.get()); - break; + return result; } default: - ASSERT(!isValidOffset(result.m_offset)); - break; + return GetByIdStatus(slowPathState, true); } - if (!isValidOffset(result.m_offset)) { - result.m_state = TakesSlowPath; - result.m_structureSet.clear(); - result.m_chain.clear(); - result.m_specificValue = JSValue(); - } else - result.m_state = Simple; - - return result; -#else // ENABLE(JIT) - return GetByIdStatus(NoInformation, false); + RELEASE_ASSERT_NOT_REACHED(); + return GetByIdStatus(); +} #endif // ENABLE(JIT) + +GetByIdStatus GetByIdStatus::computeFor( + CodeBlock* profiledBlock, CodeBlock* dfgBlock, StubInfoMap& baselineMap, + StubInfoMap& dfgMap, CodeOrigin codeOrigin, UniquedStringImpl* uid) +{ +#if ENABLE(DFG_JIT) + if (dfgBlock) { + CallLinkStatus::ExitSiteData exitSiteData; + { + ConcurrentJITLocker locker(profiledBlock->m_lock); + exitSiteData = CallLinkStatus::computeExitSiteData( + locker, profiledBlock, codeOrigin.bytecodeIndex); + } + + GetByIdStatus result; + { + ConcurrentJITLocker locker(dfgBlock->m_lock); + result = computeForStubInfoWithoutExitSiteFeedback( + locker, dfgBlock, dfgMap.get(codeOrigin), uid, exitSiteData); + } + + if (result.takesSlowPath()) + return result; + + { + ConcurrentJITLocker locker(profiledBlock->m_lock); + if (hasExitSite(locker, profiledBlock, codeOrigin.bytecodeIndex)) + return GetByIdStatus(TakesSlowPath, true); + } + + if (result.isSet()) + return result; + } +#else + UNUSED_PARAM(dfgBlock); + UNUSED_PARAM(dfgMap); +#endif + + return computeFor(profiledBlock, baselineMap, codeOrigin.bytecodeIndex, uid); } -GetByIdStatus GetByIdStatus::computeFor(VM& vm, Structure* structure, Identifier& ident) +GetByIdStatus GetByIdStatus::computeFor(const StructureSet& set, UniquedStringImpl* uid) { // For now we only handle the super simple self access case. We could handle the // prototype case in the future. - if (PropertyName(ident).asIndex() != PropertyName::NotAnIndex) - return GetByIdStatus(TakesSlowPath); - - if (structure->typeInfo().overridesGetOwnPropertySlot()) - return GetByIdStatus(TakesSlowPath); - - if (!structure->propertyAccessesAreCacheable()) + if (set.isEmpty()) + return GetByIdStatus(); + + if (parseIndex(*uid)) return GetByIdStatus(TakesSlowPath); GetByIdStatus result; - result.m_wasSeenInJIT = false; // To my knowledge nobody that uses computeFor(VM&, Structure*, Identifier&) reads this field, but I might as well be honest: no, it wasn't seen in the JIT, since I computed it statically. - unsigned attributes; - JSCell* specificValue; - result.m_offset = structure->get(vm, ident, attributes, specificValue); - if (!isValidOffset(result.m_offset)) - return GetByIdStatus(TakesSlowPath); // It's probably a prototype lookup. Give up on life for now, even though we could totally be way smarter about it. - if (attributes & Accessor) - return GetByIdStatus(MakesCalls); - if (structure->isDictionary()) - specificValue = 0; - result.m_structureSet.add(structure); - result.m_specificValue = JSValue(specificValue); + result.m_state = Simple; + result.m_wasSeenInJIT = false; + for (unsigned i = 0; i < set.size(); ++i) { + Structure* structure = set[i]; + if (structure->typeInfo().overridesGetOwnPropertySlot() && structure->typeInfo().type() != GlobalObjectType) + return GetByIdStatus(TakesSlowPath); + + if (!structure->propertyAccessesAreCacheable()) + return GetByIdStatus(TakesSlowPath); + + unsigned attributes; + PropertyOffset offset = structure->getConcurrently(uid, attributes); + if (!isValidOffset(offset)) + return GetByIdStatus(TakesSlowPath); // It's probably a prototype lookup. Give up on life for now, even though we could totally be way smarter about it. + if (attributes & Accessor) + return GetByIdStatus(MakesCalls); // We could be smarter here, like strength-reducing this to a Call. + + if (!result.appendVariant(GetByIdVariant(structure, offset))) + return GetByIdStatus(TakesSlowPath); + } + return result; } +bool GetByIdStatus::makesCalls() const +{ + switch (m_state) { + case NoInformation: + case TakesSlowPath: + return false; + case Simple: + for (unsigned i = m_variants.size(); i--;) { + if (m_variants[i].callLinkStatus()) + return true; + } + return false; + case MakesCalls: + return true; + } + RELEASE_ASSERT_NOT_REACHED(); + + return false; +} + +void GetByIdStatus::dump(PrintStream& out) const +{ + out.print("("); + switch (m_state) { + case NoInformation: + out.print("NoInformation"); + break; + case Simple: + out.print("Simple"); + break; + case TakesSlowPath: + out.print("TakesSlowPath"); + break; + case MakesCalls: + out.print("MakesCalls"); + break; + } + out.print(", ", listDump(m_variants), ", seenInJIT = ", m_wasSeenInJIT, ")"); +} + } // namespace JSC diff --git a/Source/JavaScriptCore/bytecode/GetByIdStatus.h b/Source/JavaScriptCore/bytecode/GetByIdStatus.h index 117766646..6afac5400 100644 --- a/Source/JavaScriptCore/bytecode/GetByIdStatus.h +++ b/Source/JavaScriptCore/bytecode/GetByIdStatus.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012 Apple Inc. All rights reserved. + * Copyright (C) 2012, 2013, 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,14 +26,16 @@ #ifndef GetByIdStatus_h #define GetByIdStatus_h -#include "PropertyOffset.h" -#include "StructureSet.h" -#include <wtf/NotFound.h> +#include "CallLinkStatus.h" +#include "CodeOrigin.h" +#include "ConcurrentJITLock.h" +#include "ExitingJITType.h" +#include "GetByIdVariant.h" +#include "StructureStubInfo.h" namespace JSC { class CodeBlock; -class Identifier; class GetByIdStatus { public: @@ -47,57 +49,66 @@ public: GetByIdStatus() : m_state(NoInformation) - , m_offset(invalidOffset) { } explicit GetByIdStatus(State state) : m_state(state) - , m_offset(invalidOffset) { ASSERT(state == NoInformation || state == TakesSlowPath || state == MakesCalls); } GetByIdStatus( - State state, bool wasSeenInJIT, const StructureSet& structureSet = StructureSet(), - PropertyOffset offset = invalidOffset, JSValue specificValue = JSValue(), Vector<Structure*> chain = Vector<Structure*>()) + State state, bool wasSeenInJIT, const GetByIdVariant& variant = GetByIdVariant()) : m_state(state) - , m_structureSet(structureSet) - , m_chain(chain) - , m_specificValue(specificValue) - , m_offset(offset) , m_wasSeenInJIT(wasSeenInJIT) { - ASSERT((state == Simple) == (offset != invalidOffset)); + ASSERT((state == Simple) == variant.isSet()); + m_variants.append(variant); } - static GetByIdStatus computeFor(CodeBlock*, unsigned bytecodeIndex, Identifier&); - static GetByIdStatus computeFor(VM&, Structure*, Identifier&); + static GetByIdStatus computeFor(CodeBlock*, StubInfoMap&, unsigned bytecodeIndex, UniquedStringImpl* uid); + static GetByIdStatus computeFor(const StructureSet&, UniquedStringImpl* uid); + static GetByIdStatus computeFor(CodeBlock* baselineBlock, CodeBlock* dfgBlock, StubInfoMap& baselineMap, StubInfoMap& dfgMap, CodeOrigin, UniquedStringImpl* uid); + +#if ENABLE(DFG_JIT) + static GetByIdStatus computeForStubInfo(const ConcurrentJITLocker&, CodeBlock* baselineBlock, StructureStubInfo*, CodeOrigin, UniquedStringImpl* uid); +#endif + State state() const { return m_state; } bool isSet() const { return m_state != NoInformation; } bool operator!() const { return !isSet(); } bool isSimple() const { return m_state == Simple; } + + size_t numVariants() const { return m_variants.size(); } + const Vector<GetByIdVariant, 1>& variants() const { return m_variants; } + const GetByIdVariant& at(size_t index) const { return m_variants[index]; } + const GetByIdVariant& operator[](size_t index) const { return at(index); } + bool takesSlowPath() const { return m_state == TakesSlowPath || m_state == MakesCalls; } - bool makesCalls() const { return m_state == MakesCalls; } - - const StructureSet& structureSet() const { return m_structureSet; } - const Vector<Structure*>& chain() const { return m_chain; } // Returns empty vector if this is a direct access. - JSValue specificValue() const { return m_specificValue; } // Returns JSValue() if there is no specific value. - PropertyOffset offset() const { return m_offset; } + bool makesCalls() const; bool wasSeenInJIT() const { return m_wasSeenInJIT; } + void dump(PrintStream&) const; + private: - static void computeForChain(GetByIdStatus& result, CodeBlock*, Identifier&, Structure*); - static GetByIdStatus computeFromLLInt(CodeBlock*, unsigned bytecodeIndex, Identifier&); +#if ENABLE(DFG_JIT) + static bool hasExitSite(const ConcurrentJITLocker&, CodeBlock*, unsigned bytecodeIndex); +#endif +#if ENABLE(JIT) + static GetByIdStatus computeForStubInfoWithoutExitSiteFeedback( + const ConcurrentJITLocker&, CodeBlock* profiledBlock, StructureStubInfo*, + UniquedStringImpl* uid, CallLinkStatus::ExitSiteData); +#endif + static GetByIdStatus computeFromLLInt(CodeBlock*, unsigned bytecodeIndex, UniquedStringImpl* uid); + + bool appendVariant(const GetByIdVariant&); State m_state; - StructureSet m_structureSet; - Vector<Structure*> m_chain; - JSValue m_specificValue; - PropertyOffset m_offset; + Vector<GetByIdVariant, 1> m_variants; bool m_wasSeenInJIT; }; diff --git a/Source/JavaScriptCore/bytecode/GetByIdVariant.cpp b/Source/JavaScriptCore/bytecode/GetByIdVariant.cpp new file mode 100644 index 000000000..a869d4a1c --- /dev/null +++ b/Source/JavaScriptCore/bytecode/GetByIdVariant.cpp @@ -0,0 +1,145 @@ +/* + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "GetByIdVariant.h" + +#include "CallLinkStatus.h" +#include "JSCInlines.h" +#include <wtf/ListDump.h> + +namespace JSC { + +GetByIdVariant::GetByIdVariant( + const StructureSet& structureSet, PropertyOffset offset, + const ObjectPropertyConditionSet& conditionSet, + std::unique_ptr<CallLinkStatus> callLinkStatus, + JSFunction* intrinsicFunction) + : m_structureSet(structureSet) + , m_conditionSet(conditionSet) + , m_offset(offset) + , m_callLinkStatus(WTFMove(callLinkStatus)) + , m_intrinsicFunction(intrinsicFunction) +{ + if (!structureSet.size()) { + ASSERT(offset == invalidOffset); + ASSERT(conditionSet.isEmpty()); + } + if (intrinsicFunction) + ASSERT(intrinsic() != NoIntrinsic); +} + +GetByIdVariant::~GetByIdVariant() { } + +GetByIdVariant::GetByIdVariant(const GetByIdVariant& other) + : GetByIdVariant() +{ + *this = other; +} + +GetByIdVariant& GetByIdVariant::operator=(const GetByIdVariant& other) +{ + m_structureSet = other.m_structureSet; + m_conditionSet = other.m_conditionSet; + m_offset = other.m_offset; + m_intrinsicFunction = other.m_intrinsicFunction; + if (other.m_callLinkStatus) + m_callLinkStatus = std::make_unique<CallLinkStatus>(*other.m_callLinkStatus); + else + m_callLinkStatus = nullptr; + return *this; +} + +inline bool GetByIdVariant::canMergeIntrinsicStructures(const GetByIdVariant& other) const +{ + if (m_intrinsicFunction != other.m_intrinsicFunction) + return false; + switch (intrinsic()) { + case TypedArrayByteLengthIntrinsic: { + // We can merge these sets as long as the element size of the two sets is the same. + TypedArrayType thisType = (*m_structureSet.begin())->classInfo()->typedArrayStorageType; + TypedArrayType otherType = (*other.m_structureSet.begin())->classInfo()->typedArrayStorageType; + + ASSERT(isTypedView(thisType) && isTypedView(otherType)); + + return logElementSize(thisType) == logElementSize(otherType); + } + + default: + return true; + } + RELEASE_ASSERT_NOT_REACHED(); +} + +bool GetByIdVariant::attemptToMerge(const GetByIdVariant& other) +{ + if (m_offset != other.m_offset) + return false; + if (m_callLinkStatus || other.m_callLinkStatus) + return false; + + if (!canMergeIntrinsicStructures(other)) + return false; + + if (m_conditionSet.isEmpty() != other.m_conditionSet.isEmpty()) + return false; + + ObjectPropertyConditionSet mergedConditionSet; + if (!m_conditionSet.isEmpty()) { + mergedConditionSet = m_conditionSet.mergedWith(other.m_conditionSet); + if (!mergedConditionSet.isValid() || !mergedConditionSet.hasOneSlotBaseCondition()) + return false; + } + m_conditionSet = mergedConditionSet; + + m_structureSet.merge(other.m_structureSet); + + return true; +} + +void GetByIdVariant::dump(PrintStream& out) const +{ + dumpInContext(out, 0); +} + +void GetByIdVariant::dumpInContext(PrintStream& out, DumpContext* context) const +{ + if (!isSet()) { + out.print("<empty>"); + return; + } + + out.print( + "<", inContext(structureSet(), context), ", ", inContext(m_conditionSet, context)); + out.print(", offset = ", offset()); + if (m_callLinkStatus) + out.print(", call = ", *m_callLinkStatus); + if (m_intrinsicFunction) + out.print(", intrinsic = ", *m_intrinsicFunction); + out.print(">"); +} + +} // namespace JSC + diff --git a/Source/JavaScriptCore/bytecode/GetByIdVariant.h b/Source/JavaScriptCore/bytecode/GetByIdVariant.h new file mode 100644 index 000000000..03a1e566f --- /dev/null +++ b/Source/JavaScriptCore/bytecode/GetByIdVariant.h @@ -0,0 +1,87 @@ +/* + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef GetByIdVariant_h +#define GetByIdVariant_h + +#include "CallLinkStatus.h" +#include "JSCJSValue.h" +#include "ObjectPropertyConditionSet.h" +#include "PropertyOffset.h" +#include "StructureSet.h" + +namespace JSC { + +class CallLinkStatus; +class GetByIdStatus; +struct DumpContext; + +class GetByIdVariant { +public: + GetByIdVariant( + const StructureSet& structureSet = StructureSet(), PropertyOffset offset = invalidOffset, + const ObjectPropertyConditionSet& = ObjectPropertyConditionSet(), + std::unique_ptr<CallLinkStatus> = nullptr, + JSFunction* = nullptr); + + ~GetByIdVariant(); + + GetByIdVariant(const GetByIdVariant&); + GetByIdVariant& operator=(const GetByIdVariant&); + + bool isSet() const { return !!m_structureSet.size(); } + bool operator!() const { return !isSet(); } + const StructureSet& structureSet() const { return m_structureSet; } + StructureSet& structureSet() { return m_structureSet; } + + // A non-empty condition set means that this is a prototype load. + const ObjectPropertyConditionSet& conditionSet() const { return m_conditionSet; } + + PropertyOffset offset() const { return m_offset; } + CallLinkStatus* callLinkStatus() const { return m_callLinkStatus.get(); } + JSFunction* intrinsicFunction() const { return m_intrinsicFunction; } + Intrinsic intrinsic() const { return m_intrinsicFunction ? m_intrinsicFunction->intrinsic() : NoIntrinsic; } + + bool attemptToMerge(const GetByIdVariant& other); + + void dump(PrintStream&) const; + void dumpInContext(PrintStream&, DumpContext*) const; + +private: + friend class GetByIdStatus; + + bool canMergeIntrinsicStructures(const GetByIdVariant&) const; + + StructureSet m_structureSet; + ObjectPropertyConditionSet m_conditionSet; + PropertyOffset m_offset; + std::unique_ptr<CallLinkStatus> m_callLinkStatus; + JSFunction* m_intrinsicFunction; +}; + +} // namespace JSC + +#endif // GetByIdVariant_h + diff --git a/Source/JavaScriptCore/bytecode/HandlerInfo.h b/Source/JavaScriptCore/bytecode/HandlerInfo.h index 8396c9607..acdda08ed 100644 --- a/Source/JavaScriptCore/bytecode/HandlerInfo.h +++ b/Source/JavaScriptCore/bytecode/HandlerInfo.h @@ -27,16 +27,70 @@ #define HandlerInfo_h #include "CodeLocation.h" -#include <wtf/Platform.h> namespace JSC { -struct HandlerInfo { +enum class HandlerType { + Illegal = 0, + Catch = 1, + Finally = 2, + SynthesizedFinally = 3 +}; + +struct HandlerInfoBase { + HandlerType type() const { return static_cast<HandlerType>(typeBits); } + void setType(HandlerType type) { typeBits = static_cast<uint32_t>(type); } + + const char* typeName() + { + switch (type()) { + case HandlerType::Catch: + return "catch"; + case HandlerType::Finally: + return "finally"; + case HandlerType::SynthesizedFinally: + return "synthesized finally"; + default: + ASSERT_NOT_REACHED(); + } + return nullptr; + } + + bool isCatchHandler() const { return type() == HandlerType::Catch; } + uint32_t start; uint32_t end; uint32_t target; - uint32_t scopeDepth; + uint32_t typeBits : 2; // HandlerType +}; + +struct UnlinkedHandlerInfo : public HandlerInfoBase { + UnlinkedHandlerInfo(uint32_t start, uint32_t end, uint32_t target, HandlerType handlerType) + { + this->start = start; + this->end = end; + this->target = target; + setType(handlerType); + ASSERT(type() == handlerType); + } +}; + +struct HandlerInfo : public HandlerInfoBase { + void initialize(const UnlinkedHandlerInfo& unlinkedInfo) + { + start = unlinkedInfo.start; + end = unlinkedInfo.end; + target = unlinkedInfo.target; + typeBits = unlinkedInfo.typeBits; + } + #if ENABLE(JIT) + void initialize(const UnlinkedHandlerInfo& unlinkedInfo, CodeLocationLabel label) + { + initialize(unlinkedInfo); + nativeCode = label; + } + CodeLocationLabel nativeCode; #endif }; diff --git a/Source/JavaScriptCore/bytecode/InlineCallFrame.cpp b/Source/JavaScriptCore/bytecode/InlineCallFrame.cpp new file mode 100644 index 000000000..447bc7e73 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/InlineCallFrame.cpp @@ -0,0 +1,124 @@ +/* + * Copyright (C) 2012-2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "InlineCallFrame.h" + +#include "CallFrame.h" +#include "CodeBlock.h" +#include "Executable.h" +#include "JSCInlines.h" + +namespace JSC { + +JSFunction* InlineCallFrame::calleeConstant() const +{ + if (calleeRecovery.isConstant()) + return jsCast<JSFunction*>(calleeRecovery.constant()); + return nullptr; +} + +JSFunction* InlineCallFrame::calleeForCallFrame(ExecState* exec) const +{ + return jsCast<JSFunction*>(calleeRecovery.recover(exec)); +} + +CodeBlockHash InlineCallFrame::hash() const +{ + return baselineCodeBlock->hash(); +} + +CString InlineCallFrame::hashAsStringIfPossible() const +{ + return baselineCodeBlock->hashAsStringIfPossible(); +} + +CString InlineCallFrame::inferredName() const +{ + return jsCast<FunctionExecutable*>(baselineCodeBlock->ownerExecutable())->inferredName().utf8(); +} + +void InlineCallFrame::dumpBriefFunctionInformation(PrintStream& out) const +{ + out.print(inferredName(), "#", hashAsStringIfPossible()); +} + +void InlineCallFrame::dumpInContext(PrintStream& out, DumpContext* context) const +{ + out.print(briefFunctionInformation(), ":<", RawPointer(baselineCodeBlock.get())); + if (isStrictMode()) + out.print(" (StrictMode)"); + out.print(", bc#", directCaller.bytecodeIndex, ", ", static_cast<Kind>(kind)); + if (isClosureCall) + out.print(", closure call"); + else + out.print(", known callee: ", inContext(calleeRecovery.constant(), context)); + out.print(", numArgs+this = ", arguments.size()); + out.print(", stackOffset = ", stackOffset); + out.print(" (", virtualRegisterForLocal(0), " maps to ", virtualRegisterForLocal(0) + stackOffset, ")>"); +} + +void InlineCallFrame::dump(PrintStream& out) const +{ + dumpInContext(out, 0); +} + +} // namespace JSC + +namespace WTF { + +void printInternal(PrintStream& out, JSC::InlineCallFrame::Kind kind) +{ + switch (kind) { + case JSC::InlineCallFrame::Call: + out.print("Call"); + return; + case JSC::InlineCallFrame::Construct: + out.print("Construct"); + return; + case JSC::InlineCallFrame::TailCall: + out.print("TailCall"); + return; + case JSC::InlineCallFrame::CallVarargs: + out.print("CallVarargs"); + return; + case JSC::InlineCallFrame::ConstructVarargs: + out.print("ConstructVarargs"); + return; + case JSC::InlineCallFrame::TailCallVarargs: + out.print("TailCallVarargs"); + return; + case JSC::InlineCallFrame::GetterCall: + out.print("GetterCall"); + return; + case JSC::InlineCallFrame::SetterCall: + out.print("SetterCall"); + return; + } + RELEASE_ASSERT_NOT_REACHED(); +} + +} // namespace WTF + diff --git a/Source/JavaScriptCore/bytecode/InlineCallFrame.h b/Source/JavaScriptCore/bytecode/InlineCallFrame.h new file mode 100644 index 000000000..eaa943cfe --- /dev/null +++ b/Source/JavaScriptCore/bytecode/InlineCallFrame.h @@ -0,0 +1,269 @@ +/* + * Copyright (C) 2011-2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef InlineCallFrame_h +#define InlineCallFrame_h + +#include "CodeBlock.h" +#include "CodeBlockHash.h" +#include "CodeOrigin.h" +#include "ValueRecovery.h" +#include "WriteBarrier.h" +#include <wtf/BitVector.h> +#include <wtf/HashMap.h> +#include <wtf/PrintStream.h> +#include <wtf/StdLibExtras.h> +#include <wtf/Vector.h> + +namespace JSC { + +struct InlineCallFrame; +class ExecState; +class JSFunction; + +struct InlineCallFrame { + enum Kind { + Call, + Construct, + TailCall, + CallVarargs, + ConstructVarargs, + TailCallVarargs, + + // For these, the stackOffset incorporates the argument count plus the true return PC + // slot. + GetterCall, + SetterCall + }; + + static CallMode callModeFor(Kind kind) + { + switch (kind) { + case Call: + case CallVarargs: + case GetterCall: + case SetterCall: + return CallMode::Regular; + case TailCall: + case TailCallVarargs: + return CallMode::Tail; + case Construct: + case ConstructVarargs: + return CallMode::Construct; + } + RELEASE_ASSERT_NOT_REACHED(); + } + + static Kind kindFor(CallMode callMode) + { + switch (callMode) { + case CallMode::Regular: + return Call; + case CallMode::Construct: + return Construct; + case CallMode::Tail: + return TailCall; + } + RELEASE_ASSERT_NOT_REACHED(); + } + + static Kind varargsKindFor(CallMode callMode) + { + switch (callMode) { + case CallMode::Regular: + return CallVarargs; + case CallMode::Construct: + return ConstructVarargs; + case CallMode::Tail: + return TailCallVarargs; + } + RELEASE_ASSERT_NOT_REACHED(); + } + + static CodeSpecializationKind specializationKindFor(Kind kind) + { + switch (kind) { + case Call: + case CallVarargs: + case TailCall: + case TailCallVarargs: + case GetterCall: + case SetterCall: + return CodeForCall; + case Construct: + case ConstructVarargs: + return CodeForConstruct; + } + RELEASE_ASSERT_NOT_REACHED(); + } + + static bool isVarargs(Kind kind) + { + switch (kind) { + case CallVarargs: + case TailCallVarargs: + case ConstructVarargs: + return true; + default: + return false; + } + } + + static bool isTail(Kind kind) + { + switch (kind) { + case TailCall: + case TailCallVarargs: + return true; + default: + return false; + } + } + bool isTail() const + { + return isTail(static_cast<Kind>(kind)); + } + + static CodeOrigin* computeCallerSkippingTailCalls(InlineCallFrame* inlineCallFrame, Kind* callerCallKind = nullptr) + { + CodeOrigin* codeOrigin; + bool tailCallee; + int callKind; + do { + tailCallee = inlineCallFrame->isTail(); + callKind = inlineCallFrame->kind; + codeOrigin = &inlineCallFrame->directCaller; + inlineCallFrame = codeOrigin->inlineCallFrame; + } while (inlineCallFrame && tailCallee); + + if (tailCallee) + return nullptr; + + if (callerCallKind) + *callerCallKind = static_cast<Kind>(callKind); + + return codeOrigin; + } + + CodeOrigin* getCallerSkippingTailCalls(Kind* callerCallKind = nullptr) + { + return computeCallerSkippingTailCalls(this, callerCallKind); + } + + InlineCallFrame* getCallerInlineFrameSkippingTailCalls() + { + CodeOrigin* caller = getCallerSkippingTailCalls(); + return caller ? caller->inlineCallFrame : nullptr; + } + + Vector<ValueRecovery> arguments; // Includes 'this'. + WriteBarrier<CodeBlock> baselineCodeBlock; + ValueRecovery calleeRecovery; + CodeOrigin directCaller; + + signed stackOffset : 28; + unsigned kind : 3; // real type is Kind + bool isClosureCall : 1; // If false then we know that callee/scope are constants and the DFG won't treat them as variables, i.e. they have to be recovered manually. + VirtualRegister argumentCountRegister; // Only set when we inline a varargs call. + + // There is really no good notion of a "default" set of values for + // InlineCallFrame's fields. This constructor is here just to reduce confusion if + // we forgot to initialize explicitly. + InlineCallFrame() + : stackOffset(0) + , kind(Call) + , isClosureCall(false) + { + } + + bool isVarargs() const + { + return isVarargs(static_cast<Kind>(kind)); + } + + CodeSpecializationKind specializationKind() const { return specializationKindFor(static_cast<Kind>(kind)); } + + JSFunction* calleeConstant() const; + + // Get the callee given a machine call frame to which this InlineCallFrame belongs. + JSFunction* calleeForCallFrame(ExecState*) const; + + CString inferredName() const; + CodeBlockHash hash() const; + CString hashAsStringIfPossible() const; + + void setStackOffset(signed offset) + { + stackOffset = offset; + RELEASE_ASSERT(static_cast<signed>(stackOffset) == offset); + } + + ptrdiff_t callerFrameOffset() const { return stackOffset * sizeof(Register) + CallFrame::callerFrameOffset(); } + ptrdiff_t returnPCOffset() const { return stackOffset * sizeof(Register) + CallFrame::returnPCOffset(); } + + bool isStrictMode() const { return baselineCodeBlock->isStrictMode(); } + + void dumpBriefFunctionInformation(PrintStream&) const; + void dump(PrintStream&) const; + void dumpInContext(PrintStream&, DumpContext*) const; + + MAKE_PRINT_METHOD(InlineCallFrame, dumpBriefFunctionInformation, briefFunctionInformation); + +}; + +inline CodeBlock* baselineCodeBlockForInlineCallFrame(InlineCallFrame* inlineCallFrame) +{ + RELEASE_ASSERT(inlineCallFrame); + return inlineCallFrame->baselineCodeBlock.get(); +} + +inline CodeBlock* baselineCodeBlockForOriginAndBaselineCodeBlock(const CodeOrigin& codeOrigin, CodeBlock* baselineCodeBlock) +{ + if (codeOrigin.inlineCallFrame) + return baselineCodeBlockForInlineCallFrame(codeOrigin.inlineCallFrame); + return baselineCodeBlock; +} + +template <typename Function> +inline void CodeOrigin::walkUpInlineStack(const Function& function) +{ + CodeOrigin codeOrigin = *this; + while (true) { + function(codeOrigin); + if (!codeOrigin.inlineCallFrame) + break; + codeOrigin = codeOrigin.inlineCallFrame->directCaller; + } +} + +} // namespace JSC + +namespace WTF { + +void printInternal(PrintStream&, JSC::InlineCallFrame::Kind); + +} // namespace WTF + +#endif // InlineCallFrame_h diff --git a/Source/JavaScriptCore/bytecode/InlineCallFrameSet.cpp b/Source/JavaScriptCore/bytecode/InlineCallFrameSet.cpp new file mode 100644 index 000000000..402cfd06d --- /dev/null +++ b/Source/JavaScriptCore/bytecode/InlineCallFrameSet.cpp @@ -0,0 +1,43 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "InlineCallFrameSet.h" + +#include "InlineCallFrame.h" +#include "JSCInlines.h" + +namespace JSC { + +InlineCallFrameSet::InlineCallFrameSet() { } +InlineCallFrameSet::~InlineCallFrameSet() { } + +InlineCallFrame* InlineCallFrameSet::add() +{ + return m_frames.add(); +} + +} // namespace JSC + diff --git a/Source/JavaScriptCore/bytecode/InlineCallFrameSet.h b/Source/JavaScriptCore/bytecode/InlineCallFrameSet.h new file mode 100644 index 000000000..6dae56db9 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/InlineCallFrameSet.h @@ -0,0 +1,55 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef InlineCallFrameSet_h +#define InlineCallFrameSet_h + +#include "CodeOrigin.h" +#include <wtf/Bag.h> +#include <wtf/RefCounted.h> + +namespace JSC { + +class InlineCallFrameSet : public RefCounted<InlineCallFrameSet> { +public: + InlineCallFrameSet(); + ~InlineCallFrameSet(); + + bool isEmpty() const { return m_frames.isEmpty(); } + + InlineCallFrame* add(); + + typedef Bag<InlineCallFrame>::iterator iterator; + iterator begin() { return m_frames.begin(); } + iterator end() { return m_frames.end(); } + +private: + Bag<InlineCallFrame> m_frames; +}; + +} // namespace JSC + +#endif // InlineCallFrameSet_h + diff --git a/Source/JavaScriptCore/bytecode/Instruction.h b/Source/JavaScriptCore/bytecode/Instruction.h index 988b1ddf1..494b00044 100644 --- a/Source/JavaScriptCore/bytecode/Instruction.h +++ b/Source/JavaScriptCore/bytecode/Instruction.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008, 2012, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2012-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -10,7 +10,7 @@ * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * 3. Neither the name of Apple Inc. ("Apple") nor the names of * its contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * @@ -29,13 +29,18 @@ #ifndef Instruction_h #define Instruction_h +#include "BasicBlockLocation.h" #include "MacroAssembler.h" #include "Opcode.h" +#include "PutByIdFlags.h" +#include "SymbolTable.h" +#include "TypeLocation.h" #include "PropertySlot.h" -#include "ResolveOperation.h" #include "SpecialPointer.h" #include "Structure.h" #include "StructureChain.h" +#include "ToThisStatus.h" +#include "VirtualRegister.h" #include <wtf/VectorTraits.h> namespace JSC { @@ -43,6 +48,7 @@ namespace JSC { class ArrayAllocationProfile; class ArrayProfile; class ObjectAllocationProfile; +class WatchpointSet; struct LLIntCallLinkInfo; struct ValueProfile; @@ -69,6 +75,18 @@ struct Instruction { u.jsCell.clear(); u.operand = operand; } + Instruction(unsigned unsignedValue) + { + // We have to initialize one of the pointer members to ensure that + // the entire struct is initialized in 64-bit. + u.jsCell.clear(); + u.unsignedValue = unsignedValue; + } + + Instruction(PutByIdFlags flags) + { + u.putByIdFlags = flags; + } Instruction(VM& vm, JSCell* owner, Structure* structure) { @@ -89,36 +107,40 @@ struct Instruction { Instruction(PropertySlot::GetValueFunc getterFunc) { u.getterFunc = getterFunc; } Instruction(LLIntCallLinkInfo* callLinkInfo) { u.callLinkInfo = callLinkInfo; } - Instruction(ValueProfile* profile) { u.profile = profile; } Instruction(ArrayProfile* profile) { u.arrayProfile = profile; } Instruction(ArrayAllocationProfile* profile) { u.arrayAllocationProfile = profile; } Instruction(ObjectAllocationProfile* profile) { u.objectAllocationProfile = profile; } - - Instruction(WriteBarrier<Unknown>* registerPointer) { u.registerPointer = registerPointer; } - + Instruction(WriteBarrier<Unknown>* variablePointer) { u.variablePointer = variablePointer; } Instruction(Special::Pointer pointer) { u.specialPointer = pointer; } - + Instruction(UniquedStringImpl* uid) { u.uid = uid; } Instruction(bool* predicatePointer) { u.predicatePointer = predicatePointer; } union { Opcode opcode; int operand; + unsigned unsignedValue; WriteBarrierBase<Structure> structure; + StructureID structureID; + WriteBarrierBase<SymbolTable> symbolTable; WriteBarrierBase<StructureChain> structureChain; WriteBarrierBase<JSCell> jsCell; - WriteBarrier<Unknown>* registerPointer; + WriteBarrier<Unknown>* variablePointer; Special::Pointer specialPointer; PropertySlot::GetValueFunc getterFunc; LLIntCallLinkInfo* callLinkInfo; + UniquedStringImpl* uid; ValueProfile* profile; ArrayProfile* arrayProfile; ArrayAllocationProfile* arrayAllocationProfile; ObjectAllocationProfile* objectAllocationProfile; + WatchpointSet* watchpointSet; void* pointer; bool* predicatePointer; - ResolveOperations* resolveOperations; - PutToBaseOperation* putToBaseOperation; + ToThisStatus toThisStatus; + TypeLocation* location; + BasicBlockLocation* basicBlockLocation; + PutByIdFlags putByIdFlags; } u; private: diff --git a/Source/JavaScriptCore/bytecode/InternalFunctionAllocationProfile.h b/Source/JavaScriptCore/bytecode/InternalFunctionAllocationProfile.h new file mode 100644 index 000000000..1926e93f2 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/InternalFunctionAllocationProfile.h @@ -0,0 +1,64 @@ +/* + * Copyright (C) 2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef InternalFunctionAllocationProfile_h +#define InternalFunctionAllocationProfile_h + +#include "JSGlobalObject.h" +#include "ObjectPrototype.h" +#include "SlotVisitor.h" +#include "WriteBarrier.h" + +namespace JSC { + +class InternalFunctionAllocationProfile { +public: + Structure* structure() { return m_structure.get(); } + Structure* createAllocationStructureFromBase(VM&, JSCell* owner, JSObject* prototype, Structure* base); + + void clear() { m_structure.clear(); } + void visitAggregate(SlotVisitor& visitor) { visitor.append(&m_structure); } + +private: + WriteBarrier<Structure> m_structure; +}; + +inline Structure* InternalFunctionAllocationProfile::createAllocationStructureFromBase(VM& vm, JSCell* owner, JSObject* prototype, Structure* baseStructure) +{ + ASSERT(prototype != baseStructure->storedPrototype()); + ASSERT(!m_structure || m_structure.get()->classInfo() != baseStructure->classInfo()); + + Structure* structure = vm.prototypeMap.emptyStructureForPrototypeFromBaseStructure(prototype, baseStructure); + + // Ensure that if another thread sees the structure, it will see it properly created. + WTF::storeStoreFence(); + + m_structure.set(vm, owner, structure); + return m_structure.get(); +} + +} // namespace JSC + +#endif /* InternalFunctionAllocationProfile_h */ diff --git a/Source/JavaScriptCore/bytecode/JumpTable.cpp b/Source/JavaScriptCore/bytecode/JumpTable.cpp index ef7098b65..e22ad03c9 100644 --- a/Source/JavaScriptCore/bytecode/JumpTable.cpp +++ b/Source/JavaScriptCore/bytecode/JumpTable.cpp @@ -11,7 +11,7 @@ * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * 3. Neither the name of Apple Inc. ("Apple") nor the names of * its contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * diff --git a/Source/JavaScriptCore/bytecode/JumpTable.h b/Source/JavaScriptCore/bytecode/JumpTable.h index f54a3718f..b83e842cb 100644 --- a/Source/JavaScriptCore/bytecode/JumpTable.h +++ b/Source/JavaScriptCore/bytecode/JumpTable.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2013 Apple Inc. All rights reserved. * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca> * * Redistribution and use in source and binary forms, with or without @@ -11,7 +11,7 @@ * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * 3. Neither the name of Apple Inc. ("Apple") nor the names of * its contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * @@ -70,6 +70,11 @@ namespace JSC { return loc->value.ctiOffset; } #endif + + void clear() + { + offsetTable.clear(); + } }; struct SimpleJumpTable { @@ -89,6 +94,12 @@ namespace JSC { } #if ENABLE(JIT) + void ensureCTITable() + { + ASSERT(ctiOffsets.isEmpty() || ctiOffsets.size() == branchOffsets.size()); + ctiOffsets.grow(branchOffsets.size()); + } + inline CodeLocationLabel ctiForValue(int32_t value) { if (value >= min && static_cast<uint32_t>(value - min) < ctiOffsets.size()) @@ -96,6 +107,14 @@ namespace JSC { return ctiDefault; } #endif + + void clear() + { + branchOffsets.clear(); +#if ENABLE(JIT) + ctiOffsets.clear(); +#endif + } }; } // namespace JSC diff --git a/Source/JavaScriptCore/bytecode/LLIntCallLinkInfo.h b/Source/JavaScriptCore/bytecode/LLIntCallLinkInfo.h index bfb951018..2645dd5be 100644 --- a/Source/JavaScriptCore/bytecode/LLIntCallLinkInfo.h +++ b/Source/JavaScriptCore/bytecode/LLIntCallLinkInfo.h @@ -45,7 +45,7 @@ struct LLIntCallLinkInfo : public BasicRawSentinelNode<LLIntCallLinkInfo> { remove(); } - bool isLinked() { return callee; } + bool isLinked() { return !!callee; } void unlink() { diff --git a/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.cpp b/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.cpp index 97b8f3bcd..de654db68 100644 --- a/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.cpp +++ b/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012 Apple Inc. All rights reserved. + * Copyright (C) 2012, 2013 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,29 +26,27 @@ #include "config.h" #include "LazyOperandValueProfile.h" -#if ENABLE(VALUE_PROFILER) - -#include "Operations.h" +#include "JSCInlines.h" namespace JSC { CompressedLazyOperandValueProfileHolder::CompressedLazyOperandValueProfileHolder() { } CompressedLazyOperandValueProfileHolder::~CompressedLazyOperandValueProfileHolder() { } -void CompressedLazyOperandValueProfileHolder::computeUpdatedPredictions(OperationInProgress operation) +void CompressedLazyOperandValueProfileHolder::computeUpdatedPredictions(const ConcurrentJITLocker& locker) { if (!m_data) return; for (unsigned i = 0; i < m_data->size(); ++i) - m_data->at(i).computeUpdatedPrediction(operation); + m_data->at(i).computeUpdatedPrediction(locker); } LazyOperandValueProfile* CompressedLazyOperandValueProfileHolder::add( - const LazyOperandValueProfileKey& key) + const ConcurrentJITLocker&, const LazyOperandValueProfileKey& key) { if (!m_data) - m_data = adoptPtr(new LazyOperandValueProfile::List()); + m_data = std::make_unique<LazyOperandValueProfile::List>(); else { for (unsigned i = 0; i < m_data->size(); ++i) { if (m_data->at(i).key() == key) @@ -60,20 +58,22 @@ LazyOperandValueProfile* CompressedLazyOperandValueProfileHolder::add( return &m_data->last(); } -LazyOperandValueProfileParser::LazyOperandValueProfileParser( - CompressedLazyOperandValueProfileHolder& holder) - : m_holder(holder) +LazyOperandValueProfileParser::LazyOperandValueProfileParser() { } +LazyOperandValueProfileParser::~LazyOperandValueProfileParser() { } + +void LazyOperandValueProfileParser::initialize( + const ConcurrentJITLocker&, CompressedLazyOperandValueProfileHolder& holder) { - if (!m_holder.m_data) + ASSERT(m_map.isEmpty()); + + if (!holder.m_data) return; - LazyOperandValueProfile::List& data = *m_holder.m_data; + LazyOperandValueProfile::List& data = *holder.m_data; for (unsigned i = 0; i < data.size(); ++i) m_map.add(data[i].key(), &data[i]); } -LazyOperandValueProfileParser::~LazyOperandValueProfileParser() { } - LazyOperandValueProfile* LazyOperandValueProfileParser::getIfPresent( const LazyOperandValueProfileKey& key) const { @@ -87,16 +87,14 @@ LazyOperandValueProfile* LazyOperandValueProfileParser::getIfPresent( } SpeculatedType LazyOperandValueProfileParser::prediction( - const LazyOperandValueProfileKey& key) const + const ConcurrentJITLocker& locker, const LazyOperandValueProfileKey& key) const { LazyOperandValueProfile* profile = getIfPresent(key); if (!profile) return SpecNone; - return profile->computeUpdatedPrediction(); + return profile->computeUpdatedPrediction(locker); } } // namespace JSC -#endif // ENABLE(VALUE_PROFILER) - diff --git a/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.h b/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.h index a117db64f..74e4f3318 100644 --- a/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.h +++ b/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012 Apple Inc. All rights reserved. + * Copyright (C) 2012, 2013 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,14 +26,11 @@ #ifndef LazyOperandValueProfile_h #define LazyOperandValueProfile_h -#include <wtf/Platform.h> - -#if ENABLE(VALUE_PROFILER) - +#include "ConcurrentJITLock.h" #include "ValueProfile.h" +#include "VirtualRegister.h" #include <wtf/HashMap.h> #include <wtf/Noncopyable.h> -#include <wtf/OwnPtr.h> #include <wtf/SegmentedVector.h> namespace JSC { @@ -44,26 +41,26 @@ class LazyOperandValueProfileKey { public: LazyOperandValueProfileKey() : m_bytecodeOffset(0) // 0 = empty value - , m_operand(-1) // not a valid operand index in our current scheme + , m_operand(VirtualRegister()) // not a valid operand index in our current scheme { } LazyOperandValueProfileKey(WTF::HashTableDeletedValueType) : m_bytecodeOffset(1) // 1 = deleted value - , m_operand(-1) // not a valid operand index in our current scheme + , m_operand(VirtualRegister()) // not a valid operand index in our current scheme { } - LazyOperandValueProfileKey(unsigned bytecodeOffset, int operand) + LazyOperandValueProfileKey(unsigned bytecodeOffset, VirtualRegister operand) : m_bytecodeOffset(bytecodeOffset) , m_operand(operand) { - ASSERT(operand != -1); + ASSERT(m_operand.isValid()); } bool operator!() const { - return m_operand == -1; + return !m_operand.isValid(); } bool operator==(const LazyOperandValueProfileKey& other) const @@ -74,7 +71,7 @@ public: unsigned hash() const { - return WTF::intHash(m_bytecodeOffset) + m_operand; + return WTF::intHash(m_bytecodeOffset) + m_operand.offset(); } unsigned bytecodeOffset() const @@ -82,7 +79,8 @@ public: ASSERT(!!*this); return m_bytecodeOffset; } - int operand() const + + VirtualRegister operand() const { ASSERT(!!*this); return m_operand; @@ -90,11 +88,11 @@ public: bool isHashTableDeletedValue() const { - return m_operand == -1 && m_bytecodeOffset; + return !m_operand.isValid() && m_bytecodeOffset; } private: unsigned m_bytecodeOffset; - int m_operand; + VirtualRegister m_operand; }; struct LazyOperandValueProfileKeyHash { @@ -127,7 +125,7 @@ namespace JSC { struct LazyOperandValueProfile : public MinimalValueProfile { LazyOperandValueProfile() : MinimalValueProfile() - , m_operand(-1) + , m_operand(VirtualRegister()) { } @@ -142,7 +140,7 @@ struct LazyOperandValueProfile : public MinimalValueProfile { return LazyOperandValueProfileKey(m_bytecodeOffset, m_operand); } - int m_operand; + VirtualRegister m_operand; typedef SegmentedVector<LazyOperandValueProfile, 8> List; }; @@ -155,35 +153,36 @@ public: CompressedLazyOperandValueProfileHolder(); ~CompressedLazyOperandValueProfileHolder(); - void computeUpdatedPredictions(OperationInProgress); + void computeUpdatedPredictions(const ConcurrentJITLocker&); - LazyOperandValueProfile* add(const LazyOperandValueProfileKey& key); + LazyOperandValueProfile* add( + const ConcurrentJITLocker&, const LazyOperandValueProfileKey& key); private: friend class LazyOperandValueProfileParser; - OwnPtr<LazyOperandValueProfile::List> m_data; + std::unique_ptr<LazyOperandValueProfile::List> m_data; }; class LazyOperandValueProfileParser { WTF_MAKE_NONCOPYABLE(LazyOperandValueProfileParser); public: - explicit LazyOperandValueProfileParser( - CompressedLazyOperandValueProfileHolder& holder); + explicit LazyOperandValueProfileParser(); ~LazyOperandValueProfileParser(); + void initialize( + const ConcurrentJITLocker&, CompressedLazyOperandValueProfileHolder& holder); + LazyOperandValueProfile* getIfPresent( const LazyOperandValueProfileKey& key) const; - SpeculatedType prediction(const LazyOperandValueProfileKey& key) const; + SpeculatedType prediction( + const ConcurrentJITLocker&, const LazyOperandValueProfileKey& key) const; private: - CompressedLazyOperandValueProfileHolder& m_holder; HashMap<LazyOperandValueProfileKey, LazyOperandValueProfile*> m_map; }; } // namespace JSC -#endif // ENABLE(VALUE_PROFILER) - #endif // LazyOperandValueProfile_h diff --git a/Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.cpp b/Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.cpp index a0f301a0c..bec692ef7 100644 --- a/Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.cpp +++ b/Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012 Apple Inc. All rights reserved. + * Copyright (C) 2012, 2013 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -29,6 +29,7 @@ #if ENABLE(DFG_JIT) #include "CodeBlock.h" +#include "JSCInlines.h" namespace JSC { @@ -39,7 +40,7 @@ MethodOfGettingAValueProfile MethodOfGettingAValueProfile::fromLazyOperand( result.m_kind = LazyOperand; result.u.lazyOperand.codeBlock = codeBlock; result.u.lazyOperand.bytecodeOffset = key.bytecodeOffset(); - result.u.lazyOperand.operand = key.operand(); + result.u.lazyOperand.operand = key.operand().offset(); return result; } @@ -52,10 +53,14 @@ EncodedJSValue* MethodOfGettingAValueProfile::getSpecFailBucket(unsigned index) case Ready: return u.profile->specFailBucket(index); - case LazyOperand: - return u.lazyOperand.codeBlock->lazyOperandValueProfiles().add( - LazyOperandValueProfileKey( - u.lazyOperand.bytecodeOffset, u.lazyOperand.operand))->specFailBucket(index); + case LazyOperand: { + LazyOperandValueProfileKey key(u.lazyOperand.bytecodeOffset, VirtualRegister(u.lazyOperand.operand)); + + ConcurrentJITLocker locker(u.lazyOperand.codeBlock->m_lock); + LazyOperandValueProfile* profile = + u.lazyOperand.codeBlock->lazyOperandValueProfiles().add(locker, key); + return profile->specFailBucket(index); + } default: RELEASE_ASSERT_NOT_REACHED(); diff --git a/Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.h b/Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.h index c6fe6c5f0..846f8cf7a 100644 --- a/Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.h +++ b/Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.h @@ -26,8 +26,6 @@ #ifndef MethodOfGettingAValueProfile_h #define MethodOfGettingAValueProfile_h -#include <wtf/Platform.h> - // This is guarded by ENABLE_DFG_JIT only because it uses some value profiles // that are currently only used if the DFG is enabled (i.e. they are not // available in the profile-only configuration). Hopefully someday all of diff --git a/Source/JavaScriptCore/bytecode/ObjectAllocationProfile.h b/Source/JavaScriptCore/bytecode/ObjectAllocationProfile.h index 9a9db0bc7..5fa706d25 100644 --- a/Source/JavaScriptCore/bytecode/ObjectAllocationProfile.h +++ b/Source/JavaScriptCore/bytecode/ObjectAllocationProfile.h @@ -89,13 +89,23 @@ public: if (inlineCapacity > JSFinalObject::maxInlineCapacity()) inlineCapacity = JSFinalObject::maxInlineCapacity(); + Structure* structure = vm.prototypeMap.emptyObjectStructureForPrototype(prototype, inlineCapacity); + + // Ensure that if another thread sees the structure, it will see it properly created + WTF::storeStoreFence(); + m_allocator = allocator; - m_structure.set(vm, owner, - vm.prototypeMap.emptyObjectStructureForPrototype(prototype, inlineCapacity)); + m_structure.set(vm, owner, structure); } - Structure* structure() { return m_structure.get(); } - unsigned inlineCapacity() { return m_structure->inlineCapacity(); } + Structure* structure() + { + Structure* structure = m_structure.get(); + // Ensure that if we see the structure, it has been properly created + WTF::loadLoadFence(); + return structure; + } + unsigned inlineCapacity() { return structure()->inlineCapacity(); } void clear() { @@ -117,8 +127,8 @@ private: return 0; size_t count = 0; - PropertyNameArray propertyNameArray(&vm); - prototype->structure()->getPropertyNamesFromStructure(vm, propertyNameArray, ExcludeDontEnumProperties); + PropertyNameArray propertyNameArray(&vm, PropertyNameMode::StringsAndSymbols); + prototype->structure()->getPropertyNamesFromStructure(vm, propertyNameArray, EnumerationMode()); PropertyNameArrayData::PropertyNameVector& propertyNameVector = propertyNameArray.data()->propertyNameVector(); for (size_t i = 0; i < propertyNameVector.size(); ++i) { JSValue value = prototype->getDirect(vm, propertyNameVector[i]); diff --git a/Source/JavaScriptCore/bytecode/ObjectPropertyCondition.cpp b/Source/JavaScriptCore/bytecode/ObjectPropertyCondition.cpp new file mode 100644 index 000000000..1f153b956 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/ObjectPropertyCondition.cpp @@ -0,0 +1,160 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "ObjectPropertyCondition.h" + +#include "JSCInlines.h" +#include "TrackedReferences.h" + +namespace JSC { + +void ObjectPropertyCondition::dumpInContext(PrintStream& out, DumpContext* context) const +{ + if (!*this) { + out.print("<invalid>"); + return; + } + + out.print("<", inContext(JSValue(m_object), context), ": ", inContext(m_condition, context), ">"); +} + +void ObjectPropertyCondition::dump(PrintStream& out) const +{ + dumpInContext(out, nullptr); +} + +bool ObjectPropertyCondition::structureEnsuresValidityAssumingImpurePropertyWatchpoint( + Structure* structure) const +{ + return m_condition.isStillValidAssumingImpurePropertyWatchpoint(structure); +} + +bool ObjectPropertyCondition::structureEnsuresValidityAssumingImpurePropertyWatchpoint() const +{ + if (!*this) + return false; + + return structureEnsuresValidityAssumingImpurePropertyWatchpoint(m_object->structure()); +} + +bool ObjectPropertyCondition::validityRequiresImpurePropertyWatchpoint(Structure* structure) const +{ + return m_condition.validityRequiresImpurePropertyWatchpoint(structure); +} + +bool ObjectPropertyCondition::validityRequiresImpurePropertyWatchpoint() const +{ + if (!*this) + return false; + + return validityRequiresImpurePropertyWatchpoint(m_object->structure()); +} + +bool ObjectPropertyCondition::isStillValid(Structure* structure) const +{ + return m_condition.isStillValid(structure, m_object); +} + +bool ObjectPropertyCondition::isStillValid() const +{ + if (!*this) + return false; + + return isStillValid(m_object->structure()); +} + +bool ObjectPropertyCondition::structureEnsuresValidity(Structure* structure) const +{ + return m_condition.isStillValid(structure); +} + +bool ObjectPropertyCondition::structureEnsuresValidity() const +{ + if (!*this) + return false; + + return structureEnsuresValidity(m_object->structure()); +} + +bool ObjectPropertyCondition::isWatchableAssumingImpurePropertyWatchpoint( + Structure* structure, PropertyCondition::WatchabilityEffort effort) const +{ + return m_condition.isWatchableAssumingImpurePropertyWatchpoint(structure, m_object, effort); +} + +bool ObjectPropertyCondition::isWatchableAssumingImpurePropertyWatchpoint( + PropertyCondition::WatchabilityEffort effort) const +{ + if (!*this) + return false; + + return isWatchableAssumingImpurePropertyWatchpoint(m_object->structure(), effort); +} + +bool ObjectPropertyCondition::isWatchable( + Structure* structure, PropertyCondition::WatchabilityEffort effort) const +{ + return m_condition.isWatchable(structure, m_object, effort); +} + +bool ObjectPropertyCondition::isWatchable(PropertyCondition::WatchabilityEffort effort) const +{ + if (!*this) + return false; + + return isWatchable(m_object->structure(), effort); +} + +bool ObjectPropertyCondition::isStillLive() const +{ + if (!*this) + return false; + + if (!Heap::isMarked(m_object)) + return false; + + return m_condition.isStillLive(); +} + +void ObjectPropertyCondition::validateReferences(const TrackedReferences& tracked) const +{ + if (!*this) + return; + + tracked.check(m_object); + m_condition.validateReferences(tracked); +} + +ObjectPropertyCondition ObjectPropertyCondition::attemptToMakeEquivalenceWithoutBarrier() const +{ + PropertyCondition result = condition().attemptToMakeEquivalenceWithoutBarrier(object()); + if (!result) + return ObjectPropertyCondition(); + return ObjectPropertyCondition(object(), result); +} + +} // namespace JSC + diff --git a/Source/JavaScriptCore/bytecode/ObjectPropertyCondition.h b/Source/JavaScriptCore/bytecode/ObjectPropertyCondition.h new file mode 100644 index 000000000..4c2a9bd1f --- /dev/null +++ b/Source/JavaScriptCore/bytecode/ObjectPropertyCondition.h @@ -0,0 +1,268 @@ +/* + * Copyright (C) 2015-2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef ObjectPropertyCondition_h +#define ObjectPropertyCondition_h + +#include "JSObject.h" +#include "PropertyCondition.h" +#include <wtf/HashMap.h> + +namespace JSC { + +class TrackedReferences; + +class ObjectPropertyCondition { +public: + ObjectPropertyCondition() + : m_object(nullptr) + { + } + + ObjectPropertyCondition(WTF::HashTableDeletedValueType token) + : m_object(nullptr) + , m_condition(token) + { + } + + ObjectPropertyCondition(JSObject* object, const PropertyCondition& condition) + : m_object(object) + , m_condition(condition) + { + } + + static ObjectPropertyCondition presenceWithoutBarrier( + JSObject* object, UniquedStringImpl* uid, PropertyOffset offset, unsigned attributes) + { + ObjectPropertyCondition result; + result.m_object = object; + result.m_condition = PropertyCondition::presenceWithoutBarrier(uid, offset, attributes); + return result; + } + + static ObjectPropertyCondition presence( + VM& vm, JSCell* owner, JSObject* object, UniquedStringImpl* uid, PropertyOffset offset, + unsigned attributes) + { + if (owner) + vm.heap.writeBarrier(owner); + return presenceWithoutBarrier(object, uid, offset, attributes); + } + + // NOTE: The prototype is the storedPrototype, not the prototypeForLookup. + static ObjectPropertyCondition absenceWithoutBarrier( + JSObject* object, UniquedStringImpl* uid, JSObject* prototype) + { + ObjectPropertyCondition result; + result.m_object = object; + result.m_condition = PropertyCondition::absenceWithoutBarrier(uid, prototype); + return result; + } + + static ObjectPropertyCondition absence( + VM& vm, JSCell* owner, JSObject* object, UniquedStringImpl* uid, JSObject* prototype) + { + if (owner) + vm.heap.writeBarrier(owner); + return absenceWithoutBarrier(object, uid, prototype); + } + + static ObjectPropertyCondition absenceOfSetterWithoutBarrier( + JSObject* object, UniquedStringImpl* uid, JSObject* prototype) + { + ObjectPropertyCondition result; + result.m_object = object; + result.m_condition = PropertyCondition::absenceOfSetterWithoutBarrier(uid, prototype); + return result; + } + + static ObjectPropertyCondition absenceOfSetter( + VM& vm, JSCell* owner, JSObject* object, UniquedStringImpl* uid, JSObject* prototype) + { + if (owner) + vm.heap.writeBarrier(owner); + return absenceOfSetterWithoutBarrier(object, uid, prototype); + } + + static ObjectPropertyCondition equivalenceWithoutBarrier( + JSObject* object, UniquedStringImpl* uid, JSValue value) + { + ObjectPropertyCondition result; + result.m_object = object; + result.m_condition = PropertyCondition::equivalenceWithoutBarrier(uid, value); + return result; + } + + static ObjectPropertyCondition equivalence( + VM& vm, JSCell* owner, JSObject* object, UniquedStringImpl* uid, JSValue value) + { + if (owner) + vm.heap.writeBarrier(owner); + return equivalenceWithoutBarrier(object, uid, value); + } + + explicit operator bool() const { return !!m_condition; } + + JSObject* object() const { return m_object; } + PropertyCondition condition() const { return m_condition; } + + PropertyCondition::Kind kind() const { return condition().kind(); } + UniquedStringImpl* uid() const { return condition().uid(); } + bool hasOffset() const { return condition().hasOffset(); } + PropertyOffset offset() const { return condition().offset(); } + unsigned hasAttributes() const { return condition().hasAttributes(); } + unsigned attributes() const { return condition().attributes(); } + bool hasPrototype() const { return condition().hasPrototype(); } + JSObject* prototype() const { return condition().prototype(); } + bool hasRequiredValue() const { return condition().hasRequiredValue(); } + JSValue requiredValue() const { return condition().requiredValue(); } + + void dumpInContext(PrintStream&, DumpContext*) const; + void dump(PrintStream&) const; + + unsigned hash() const + { + return WTF::PtrHash<JSObject*>::hash(m_object) ^ m_condition.hash(); + } + + bool operator==(const ObjectPropertyCondition& other) const + { + return m_object == other.m_object + && m_condition == other.m_condition; + } + + bool isHashTableDeletedValue() const + { + return !m_object && m_condition.isHashTableDeletedValue(); + } + + // Two conditions are compatible if they are identical or if they speak of different uids or + // different objects. If false is returned, you have to decide how to resolve the conflict - + // for example if there is a Presence and an Equivalence then in some cases you'll want the + // more general of the two while in other cases you'll want the more specific of the two. This + // will also return false for contradictions, like Presence and Absence on the same + // object/uid. By convention, invalid conditions aren't compatible with anything. + bool isCompatibleWith(const ObjectPropertyCondition& other) const + { + if (!*this || !other) + return false; + return *this == other || uid() != other.uid() || object() != other.object(); + } + + // These validity-checking methods can optionally take a Struture* instead of loading the + // Structure* from the object. If you're in the concurrent JIT, then you must use the forms + // that take an explicit Structure* because you want the compiler to optimize for the same + // structure that you validated (i.e. avoid a TOCTOU race). + + // Checks if the object's structure claims that the property won't be intercepted. Validity + // does not require watchpoints on the object. + bool structureEnsuresValidityAssumingImpurePropertyWatchpoint(Structure*) const; + bool structureEnsuresValidityAssumingImpurePropertyWatchpoint() const; + + // Returns true if we need an impure property watchpoint to ensure validity even if + // isStillValidAccordingToStructure() returned true. + bool validityRequiresImpurePropertyWatchpoint(Structure*) const; + bool validityRequiresImpurePropertyWatchpoint() const; + + // Checks if the condition still holds. May conservatively return false, if the object and + // structure alone don't guarantee the condition. Note that this may return true if the + // condition still requires some watchpoints on the object in addition to checking the + // structure. If you want to check if the condition holds by using the structure alone, + // use structureEnsuresValidity(). + bool isStillValid(Structure*) const; + bool isStillValid() const; + + // Shorthand for condition().isStillValid(structure). + bool structureEnsuresValidity(Structure*) const; + bool structureEnsuresValidity() const; + + // This means that it's still valid and we could enforce validity by setting a transition + // watchpoint on the structure and possibly an impure property watchpoint. + bool isWatchableAssumingImpurePropertyWatchpoint( + Structure*, + PropertyCondition::WatchabilityEffort = PropertyCondition::MakeNoChanges) const; + bool isWatchableAssumingImpurePropertyWatchpoint( + PropertyCondition::WatchabilityEffort = PropertyCondition::MakeNoChanges) const; + + // This means that it's still valid and we could enforce validity by setting a transition + // watchpoint on the structure. + bool isWatchable( + Structure*, + PropertyCondition::WatchabilityEffort = PropertyCondition::MakeNoChanges) const; + bool isWatchable( + PropertyCondition::WatchabilityEffort = PropertyCondition::MakeNoChanges) const; + + bool watchingRequiresStructureTransitionWatchpoint() const + { + return condition().watchingRequiresStructureTransitionWatchpoint(); + } + bool watchingRequiresReplacementWatchpoint() const + { + return condition().watchingRequiresReplacementWatchpoint(); + } + + // This means that the objects involved in this are still live. + bool isStillLive() const; + + void validateReferences(const TrackedReferences&) const; + + bool isValidValueForPresence(JSValue value) const + { + return condition().isValidValueForPresence(value); + } + + ObjectPropertyCondition attemptToMakeEquivalenceWithoutBarrier() const; + +private: + JSObject* m_object; + PropertyCondition m_condition; +}; + +struct ObjectPropertyConditionHash { + static unsigned hash(const ObjectPropertyCondition& key) { return key.hash(); } + static bool equal( + const ObjectPropertyCondition& a, const ObjectPropertyCondition& b) + { + return a == b; + } + static const bool safeToCompareToEmptyOrDeleted = true; +}; + +} // namespace JSC + +namespace WTF { + +template<typename T> struct DefaultHash; +template<> struct DefaultHash<JSC::ObjectPropertyCondition> { + typedef JSC::ObjectPropertyConditionHash Hash; +}; + +template<typename T> struct HashTraits; +template<> struct HashTraits<JSC::ObjectPropertyCondition> : SimpleClassHashTraits<JSC::ObjectPropertyCondition> { }; + +} // namespace WTF + +#endif // ObjectPropertyCondition_h + diff --git a/Source/JavaScriptCore/bytecode/ObjectPropertyConditionSet.cpp b/Source/JavaScriptCore/bytecode/ObjectPropertyConditionSet.cpp new file mode 100644 index 000000000..1b92412af --- /dev/null +++ b/Source/JavaScriptCore/bytecode/ObjectPropertyConditionSet.cpp @@ -0,0 +1,368 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "ObjectPropertyConditionSet.h" + +#include "JSCInlines.h" +#include <wtf/ListDump.h> + +namespace JSC { + +ObjectPropertyCondition ObjectPropertyConditionSet::forObject(JSObject* object) const +{ + for (const ObjectPropertyCondition& condition : *this) { + if (condition.object() == object) + return condition; + } + return ObjectPropertyCondition(); +} + +ObjectPropertyCondition ObjectPropertyConditionSet::forConditionKind( + PropertyCondition::Kind kind) const +{ + for (const ObjectPropertyCondition& condition : *this) { + if (condition.kind() == kind) + return condition; + } + return ObjectPropertyCondition(); +} + +unsigned ObjectPropertyConditionSet::numberOfConditionsWithKind(PropertyCondition::Kind kind) const +{ + unsigned result = 0; + for (const ObjectPropertyCondition& condition : *this) { + if (condition.kind() == kind) + result++; + } + return result; +} + +bool ObjectPropertyConditionSet::hasOneSlotBaseCondition() const +{ + return numberOfConditionsWithKind(PropertyCondition::Presence) == 1; +} + +ObjectPropertyCondition ObjectPropertyConditionSet::slotBaseCondition() const +{ + ObjectPropertyCondition result; + unsigned numFound = 0; + for (const ObjectPropertyCondition& condition : *this) { + if (condition.kind() == PropertyCondition::Presence) { + result = condition; + numFound++; + } + } + RELEASE_ASSERT(numFound == 1); + return result; +} + +ObjectPropertyConditionSet ObjectPropertyConditionSet::mergedWith( + const ObjectPropertyConditionSet& other) const +{ + if (!isValid() || !other.isValid()) + return invalid(); + + Vector<ObjectPropertyCondition> result; + + if (!isEmpty()) + result.appendVector(m_data->vector); + + for (const ObjectPropertyCondition& newCondition : other) { + bool foundMatch = false; + for (const ObjectPropertyCondition& existingCondition : *this) { + if (newCondition == existingCondition) { + foundMatch = true; + continue; + } + if (!newCondition.isCompatibleWith(existingCondition)) + return invalid(); + } + if (!foundMatch) + result.append(newCondition); + } + + return create(result); +} + +bool ObjectPropertyConditionSet::structuresEnsureValidity() const +{ + if (!isValid()) + return false; + + for (const ObjectPropertyCondition& condition : *this) { + if (!condition.structureEnsuresValidity()) + return false; + } + return true; +} + +bool ObjectPropertyConditionSet::structuresEnsureValidityAssumingImpurePropertyWatchpoint() const +{ + if (!isValid()) + return false; + + for (const ObjectPropertyCondition& condition : *this) { + if (!condition.structureEnsuresValidityAssumingImpurePropertyWatchpoint()) + return false; + } + return true; +} + +bool ObjectPropertyConditionSet::needImpurePropertyWatchpoint() const +{ + for (const ObjectPropertyCondition& condition : *this) { + if (condition.validityRequiresImpurePropertyWatchpoint()) + return true; + } + return false; +} + +bool ObjectPropertyConditionSet::areStillLive() const +{ + for (const ObjectPropertyCondition& condition : *this) { + if (!condition.isStillLive()) + return false; + } + return true; +} + +void ObjectPropertyConditionSet::dumpInContext(PrintStream& out, DumpContext* context) const +{ + if (!isValid()) { + out.print("<invalid>"); + return; + } + + out.print("["); + if (m_data) + out.print(listDumpInContext(m_data->vector, context)); + out.print("]"); +} + +void ObjectPropertyConditionSet::dump(PrintStream& out) const +{ + dumpInContext(out, nullptr); +} + +namespace { + +bool verbose = false; + +ObjectPropertyCondition generateCondition( + VM& vm, JSCell* owner, JSObject* object, UniquedStringImpl* uid, PropertyCondition::Kind conditionKind) +{ + Structure* structure = object->structure(); + if (verbose) + dataLog("Creating condition ", conditionKind, " for ", pointerDump(structure), "\n"); + + ObjectPropertyCondition result; + switch (conditionKind) { + case PropertyCondition::Presence: { + unsigned attributes; + PropertyOffset offset = structure->getConcurrently(uid, attributes); + if (offset == invalidOffset) + return ObjectPropertyCondition(); + result = ObjectPropertyCondition::presence(vm, owner, object, uid, offset, attributes); + break; + } + case PropertyCondition::Absence: { + result = ObjectPropertyCondition::absence( + vm, owner, object, uid, object->structure()->storedPrototypeObject()); + break; + } + case PropertyCondition::AbsenceOfSetter: { + result = ObjectPropertyCondition::absenceOfSetter( + vm, owner, object, uid, object->structure()->storedPrototypeObject()); + break; + } + default: + RELEASE_ASSERT_NOT_REACHED(); + return ObjectPropertyCondition(); + } + + if (!result.structureEnsuresValidityAssumingImpurePropertyWatchpoint()) { + if (verbose) + dataLog("Failed to create condition: ", result, "\n"); + return ObjectPropertyCondition(); + } + + if (verbose) + dataLog("New condition: ", result, "\n"); + return result; +} + +enum Concurrency { + MainThread, + Concurrent +}; +template<typename Functor> +ObjectPropertyConditionSet generateConditions( + VM& vm, JSGlobalObject* globalObject, Structure* structure, JSObject* prototype, const Functor& functor, + Concurrency concurrency = MainThread) +{ + Vector<ObjectPropertyCondition> conditions; + + for (;;) { + if (verbose) + dataLog("Considering structure: ", pointerDump(structure), "\n"); + + if (structure->isProxy()) { + if (verbose) + dataLog("It's a proxy, so invalid.\n"); + return ObjectPropertyConditionSet::invalid(); + } + + JSValue value = structure->prototypeForLookup(globalObject); + + if (value.isNull()) { + if (!prototype) { + if (verbose) + dataLog("Reached end up prototype chain as expected, done.\n"); + break; + } + if (verbose) + dataLog("Unexpectedly reached end of prototype chain, so invalid.\n"); + return ObjectPropertyConditionSet::invalid(); + } + + JSObject* object = jsCast<JSObject*>(value); + structure = object->structure(vm); + + // Since we're accessing a prototype repeatedly, it's a good bet that it should not be + // treated as a dictionary. + if (structure->isDictionary()) { + if (concurrency == MainThread) + structure->flattenDictionaryStructure(vm, object); + else { + if (verbose) + dataLog("Cannot flatten dictionary when not on main thread, so invalid.\n"); + return ObjectPropertyConditionSet::invalid(); + } + } + + if (!functor(conditions, object)) { + if (verbose) + dataLog("Functor failed, invalid.\n"); + return ObjectPropertyConditionSet::invalid(); + } + + if (object == prototype) { + if (verbose) + dataLog("Reached desired prototype, done.\n"); + break; + } + } + + if (verbose) + dataLog("Returning conditions: ", listDump(conditions), "\n"); + return ObjectPropertyConditionSet::create(conditions); +} + +} // anonymous namespace + +ObjectPropertyConditionSet generateConditionsForPropertyMiss( + VM& vm, JSCell* owner, ExecState* exec, Structure* headStructure, UniquedStringImpl* uid) +{ + return generateConditions( + vm, exec->lexicalGlobalObject(), headStructure, nullptr, + [&] (Vector<ObjectPropertyCondition>& conditions, JSObject* object) -> bool { + ObjectPropertyCondition result = + generateCondition(vm, owner, object, uid, PropertyCondition::Absence); + if (!result) + return false; + conditions.append(result); + return true; + }); +} + +ObjectPropertyConditionSet generateConditionsForPropertySetterMiss( + VM& vm, JSCell* owner, ExecState* exec, Structure* headStructure, UniquedStringImpl* uid) +{ + return generateConditions( + vm, exec->lexicalGlobalObject(), headStructure, nullptr, + [&] (Vector<ObjectPropertyCondition>& conditions, JSObject* object) -> bool { + ObjectPropertyCondition result = + generateCondition(vm, owner, object, uid, PropertyCondition::AbsenceOfSetter); + if (!result) + return false; + conditions.append(result); + return true; + }); +} + +ObjectPropertyConditionSet generateConditionsForPrototypePropertyHit( + VM& vm, JSCell* owner, ExecState* exec, Structure* headStructure, JSObject* prototype, + UniquedStringImpl* uid) +{ + return generateConditions( + vm, exec->lexicalGlobalObject(), headStructure, prototype, + [&] (Vector<ObjectPropertyCondition>& conditions, JSObject* object) -> bool { + PropertyCondition::Kind kind = + object == prototype ? PropertyCondition::Presence : PropertyCondition::Absence; + ObjectPropertyCondition result = + generateCondition(vm, owner, object, uid, kind); + if (!result) + return false; + conditions.append(result); + return true; + }); +} + +ObjectPropertyConditionSet generateConditionsForPrototypePropertyHitCustom( + VM& vm, JSCell* owner, ExecState* exec, Structure* headStructure, JSObject* prototype, + UniquedStringImpl* uid) +{ + return generateConditions( + vm, exec->lexicalGlobalObject(), headStructure, prototype, + [&] (Vector<ObjectPropertyCondition>& conditions, JSObject* object) -> bool { + if (object == prototype) + return true; + ObjectPropertyCondition result = + generateCondition(vm, owner, object, uid, PropertyCondition::Absence); + if (!result) + return false; + conditions.append(result); + return true; + }); +} + +ObjectPropertyConditionSet generateConditionsForPropertySetterMissConcurrently( + VM& vm, JSGlobalObject* globalObject, Structure* headStructure, UniquedStringImpl* uid) +{ + return generateConditions( + vm, globalObject, headStructure, nullptr, + [&] (Vector<ObjectPropertyCondition>& conditions, JSObject* object) -> bool { + ObjectPropertyCondition result = + generateCondition(vm, nullptr, object, uid, PropertyCondition::AbsenceOfSetter); + if (!result) + return false; + conditions.append(result); + return true; + }, Concurrent); +} + +} // namespace JSC + diff --git a/Source/JavaScriptCore/bytecode/ObjectPropertyConditionSet.h b/Source/JavaScriptCore/bytecode/ObjectPropertyConditionSet.h new file mode 100644 index 000000000..957eaac25 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/ObjectPropertyConditionSet.h @@ -0,0 +1,175 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef ObjectPropertyConditionSet_h +#define ObjectPropertyConditionSet_h + +#include "ObjectPropertyCondition.h" +#include <wtf/FastMalloc.h> +#include <wtf/RefCounted.h> +#include <wtf/Vector.h> + +namespace JSC { + +// An object property condition set is used to represent the set of additional conditions +// that need to be met for some heap access to be valid. The set can have the following +// interesting states: +// +// Empty: There are no special conditions that need to be met. +// Invalid: The heap access is never valid. +// Non-empty: The heap access is valid if all the ObjectPropertyConditions in the set are valid. + +class ObjectPropertyConditionSet { +public: + ObjectPropertyConditionSet() { } + + static ObjectPropertyConditionSet invalid() + { + ObjectPropertyConditionSet result; + result.m_data = adoptRef(new Data()); + return result; + } + + static ObjectPropertyConditionSet create(const Vector<ObjectPropertyCondition>& vector) + { + if (vector.isEmpty()) + return ObjectPropertyConditionSet(); + + ObjectPropertyConditionSet result; + result.m_data = adoptRef(new Data()); + result.m_data->vector = vector; + return result; + } + + bool isValid() const + { + return !m_data || !m_data->vector.isEmpty(); + } + + bool isEmpty() const + { + return !m_data; + } + + typedef const ObjectPropertyCondition* iterator; + + iterator begin() const + { + if (!m_data) + return nullptr; + return m_data->vector.begin(); + } + iterator end() const + { + if (!m_data) + return nullptr; + return m_data->vector.end(); + } + + ObjectPropertyCondition forObject(JSObject*) const; + ObjectPropertyCondition forConditionKind(PropertyCondition::Kind) const; + + unsigned numberOfConditionsWithKind(PropertyCondition::Kind) const; + + bool hasOneSlotBaseCondition() const; + + // If this is a condition set for a prototype hit, then this is guaranteed to return the + // condition on the prototype itself. This allows you to get the object, offset, and + // attributes for the prototype. This will RELEASE_ASSERT that there is exactly one Presence + // in the set, and it will return that presence. + ObjectPropertyCondition slotBaseCondition() const; + + // Attempt to create a new condition set by merging this one with the other one. This will + // fail if any of the conditions are incompatible with each other. When if fails, it returns + // invalid(). + ObjectPropertyConditionSet mergedWith(const ObjectPropertyConditionSet& other) const; + + bool structuresEnsureValidity() const; + bool structuresEnsureValidityAssumingImpurePropertyWatchpoint() const; + + bool needImpurePropertyWatchpoint() const; + bool areStillLive() const; + + void dumpInContext(PrintStream&, DumpContext*) const; + void dump(PrintStream&) const; + + // Helpers for using this in a union. + void* releaseRawPointer() + { + return static_cast<void*>(m_data.leakRef()); + } + static ObjectPropertyConditionSet adoptRawPointer(void* rawPointer) + { + ObjectPropertyConditionSet result; + result.m_data = adoptRef(static_cast<Data*>(rawPointer)); + return result; + } + static ObjectPropertyConditionSet fromRawPointer(void* rawPointer) + { + ObjectPropertyConditionSet result; + result.m_data = static_cast<Data*>(rawPointer); + return result; + } + + // FIXME: Everything below here should be private, but cannot be because of a bug in VS. + + // Internally, this represents Invalid using a pointer to a Data that has an empty vector. + + // FIXME: This could be made more compact by having it internally use a vector that just has + // the non-uid portion of ObjectPropertyCondition, and then requiring that the callers of all + // of the APIs supply the uid. + + class Data : public ThreadSafeRefCounted<Data> { + WTF_MAKE_NONCOPYABLE(Data); + WTF_MAKE_FAST_ALLOCATED; + + public: + Data() { } + + Vector<ObjectPropertyCondition> vector; + }; + +private: + RefPtr<Data> m_data; +}; + +ObjectPropertyConditionSet generateConditionsForPropertyMiss( + VM&, JSCell* owner, ExecState*, Structure* headStructure, UniquedStringImpl* uid); +ObjectPropertyConditionSet generateConditionsForPropertySetterMiss( + VM&, JSCell* owner, ExecState*, Structure* headStructure, UniquedStringImpl* uid); +ObjectPropertyConditionSet generateConditionsForPrototypePropertyHit( + VM&, JSCell* owner, ExecState*, Structure* headStructure, JSObject* prototype, + UniquedStringImpl* uid); +ObjectPropertyConditionSet generateConditionsForPrototypePropertyHitCustom( + VM&, JSCell* owner, ExecState*, Structure* headStructure, JSObject* prototype, + UniquedStringImpl* uid); + +ObjectPropertyConditionSet generateConditionsForPropertySetterMissConcurrently( + VM&, JSGlobalObject*, Structure* headStructure, UniquedStringImpl* uid); + +} // namespace JSC + +#endif // ObjectPropertyConditionSet_h + diff --git a/Source/JavaScriptCore/bytecode/Opcode.cpp b/Source/JavaScriptCore/bytecode/Opcode.cpp index 0adc76b28..0d16dfc2f 100644 --- a/Source/JavaScriptCore/bytecode/Opcode.cpp +++ b/Source/JavaScriptCore/bytecode/Opcode.cpp @@ -11,7 +11,7 @@ * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * 3. Neither the name of Apple Inc. ("Apple") nor the names of * its contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * @@ -30,9 +30,11 @@ #include "config.h" #include "Opcode.h" +#include <wtf/PrintStream.h> + #if ENABLE(OPCODE_STATS) -#include <stdio.h> -#include <wtf/FixedArray.h> +#include <array> +#include <wtf/DataLog.h> #endif using namespace std; @@ -78,9 +80,9 @@ static int compareOpcodeIndices(const void* left, const void* right) static int compareOpcodePairIndices(const void* left, const void* right) { - pair<int, int> leftPair = *(pair<int, int>*) left; + std::pair<int, int> leftPair = *(pair<int, int>*) left; long long leftValue = OpcodeStats::opcodePairCounts[leftPair.first][leftPair.second]; - pair<int, int> rightPair = *(pair<int, int>*) right; + std::pair<int, int> rightPair = *(pair<int, int>*) right; long long rightValue = OpcodeStats::opcodePairCounts[rightPair.first][rightPair.second]; if (leftValue < rightValue) @@ -102,17 +104,17 @@ OpcodeStats::~OpcodeStats() for (int j = 0; j < numOpcodeIDs; ++j) totalInstructionPairs += opcodePairCounts[i][j]; - FixedArray<int, numOpcodeIDs> sortedIndices; + std::array<int, numOpcodeIDs> sortedIndices; for (int i = 0; i < numOpcodeIDs; ++i) sortedIndices[i] = i; qsort(sortedIndices.data(), numOpcodeIDs, sizeof(int), compareOpcodeIndices); - pair<int, int> sortedPairIndices[numOpcodeIDs * numOpcodeIDs]; - pair<int, int>* currentPairIndex = sortedPairIndices; + std::pair<int, int> sortedPairIndices[numOpcodeIDs * numOpcodeIDs]; + std::pair<int, int>* currentPairIndex = sortedPairIndices; for (int i = 0; i < numOpcodeIDs; ++i) for (int j = 0; j < numOpcodeIDs; ++j) - *(currentPairIndex++) = make_pair(i, j); - qsort(sortedPairIndices, numOpcodeIDs * numOpcodeIDs, sizeof(pair<int, int>), compareOpcodePairIndices); + *(currentPairIndex++) = std::make_pair(i, j); + qsort(sortedPairIndices, numOpcodeIDs * numOpcodeIDs, sizeof(std::pair<int, int>), compareOpcodePairIndices); dataLogF("\nExecuted opcode statistics\n"); @@ -129,7 +131,7 @@ OpcodeStats::~OpcodeStats() dataLogF("2-opcode sequences by frequency: %lld\n\n", totalInstructions); for (int i = 0; i < numOpcodeIDs * numOpcodeIDs; ++i) { - pair<int, int> indexPair = sortedPairIndices[i]; + std::pair<int, int> indexPair = sortedPairIndices[i]; long long count = opcodePairCounts[indexPair.first][indexPair.second]; if (!count) @@ -150,7 +152,7 @@ OpcodeStats::~OpcodeStats() dataLogF("\n%s:%s %lld - %.2f%%\n", opcodeNames[index], padOpcodeName((OpcodeID)index, 28), opcodeCount, opcodeProportion * 100.0); for (int j = 0; j < numOpcodeIDs * numOpcodeIDs; ++j) { - pair<int, int> indexPair = sortedPairIndices[j]; + std::pair<int, int> indexPair = sortedPairIndices[j]; long long pairCount = opcodePairCounts[indexPair.first][indexPair.second]; double pairProportion = ((double) pairCount) / ((double) totalInstructionPairs); @@ -185,3 +187,14 @@ void OpcodeStats::resetLastInstruction() #endif } // namespace JSC + +namespace WTF { + +using namespace JSC; + +void printInternal(PrintStream& out, OpcodeID opcode) +{ + out.print(opcodeNames[opcode]); +} + +} // namespace WTF diff --git a/Source/JavaScriptCore/bytecode/Opcode.h b/Source/JavaScriptCore/bytecode/Opcode.h index 72ac51b4f..ee667c84f 100644 --- a/Source/JavaScriptCore/bytecode/Opcode.h +++ b/Source/JavaScriptCore/bytecode/Opcode.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008, 2009, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2009, 2013, 2014 Apple Inc. All rights reserved. * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca> * * Redistribution and use in source and binary forms, with or without @@ -11,7 +11,7 @@ * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * 3. Neither the name of Apple Inc. ("Apple") nor the names of * its contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * @@ -30,6 +30,7 @@ #ifndef Opcode_h #define Opcode_h +#include "Bytecodes.h" #include "LLIntOpcode.h" #include <algorithm> @@ -40,174 +41,8 @@ namespace JSC { #define FOR_EACH_CORE_OPCODE_ID_WITH_EXTENSION(macro, extension__) \ - macro(op_enter, 1) \ - macro(op_create_activation, 2) \ - macro(op_init_lazy_reg, 2) \ - macro(op_create_arguments, 2) \ - macro(op_create_this, 4) \ - macro(op_get_callee, 3) \ - macro(op_convert_this, 3) \ - \ - macro(op_new_object, 4) \ - macro(op_new_array, 5) \ - macro(op_new_array_with_size, 4) \ - macro(op_new_array_buffer, 5) \ - macro(op_new_regexp, 3) \ - macro(op_mov, 3) \ - \ - macro(op_not, 3) \ - macro(op_eq, 4) \ - macro(op_eq_null, 3) \ - macro(op_neq, 4) \ - macro(op_neq_null, 3) \ - macro(op_stricteq, 4) \ - macro(op_nstricteq, 4) \ - macro(op_less, 4) \ - macro(op_lesseq, 4) \ - macro(op_greater, 4) \ - macro(op_greatereq, 4) \ - \ - macro(op_inc, 2) \ - macro(op_dec, 2) \ - macro(op_to_number, 3) \ - macro(op_negate, 3) \ - macro(op_add, 5) \ - macro(op_mul, 5) \ - macro(op_div, 5) \ - macro(op_mod, 4) \ - macro(op_sub, 5) \ - \ - macro(op_lshift, 4) \ - macro(op_rshift, 4) \ - macro(op_urshift, 4) \ - macro(op_bitand, 5) \ - macro(op_bitxor, 5) \ - macro(op_bitor, 5) \ - \ - macro(op_check_has_instance, 5) \ - macro(op_instanceof, 4) \ - macro(op_typeof, 3) \ - macro(op_is_undefined, 3) \ - macro(op_is_boolean, 3) \ - macro(op_is_number, 3) \ - macro(op_is_string, 3) \ - macro(op_is_object, 3) \ - macro(op_is_function, 3) \ - macro(op_in, 4) \ - \ - macro(op_get_scoped_var, 5) /* has value profiling */ \ - macro(op_put_scoped_var, 4) \ - \ - macro(op_resolve, 5) /* has value profiling */ \ - macro(op_resolve_global_property, 5) /* has value profiling */ \ - macro(op_resolve_global_var, 5) /* has value profiling */ \ - macro(op_resolve_scoped_var, 5) /* has value profiling */ \ - macro(op_resolve_scoped_var_on_top_scope, 5) /* has value profiling */ \ - macro(op_resolve_scoped_var_with_top_scope_check, 5) /* has value profiling */ \ - \ - macro(op_resolve_base_to_global, 7) /* has value profiling */ \ - macro(op_resolve_base_to_global_dynamic, 7) /* has value profiling */ \ - macro(op_resolve_base_to_scope, 7) /* has value profiling */ \ - macro(op_resolve_base_to_scope_with_top_scope_check, 7) /* has value profiling */ \ - macro(op_resolve_base, 7) /* has value profiling */ \ - \ - macro(op_resolve_with_base, 7) /* has value profiling */ \ - \ - macro(op_resolve_with_this, 6) /* has value profiling */ \ - \ - macro(op_put_to_base, 5) \ - macro(op_put_to_base_variable, 5) \ - \ - macro(op_init_global_const_nop, 5) \ - macro(op_init_global_const, 5) \ - macro(op_init_global_const_check, 5) \ - macro(op_get_by_id, 9) /* has value profiling */ \ - macro(op_get_by_id_out_of_line, 9) /* has value profiling */ \ - macro(op_get_by_id_self, 9) /* has value profiling */ \ - macro(op_get_by_id_proto, 9) /* has value profiling */ \ - macro(op_get_by_id_chain, 9) /* has value profiling */ \ - macro(op_get_by_id_getter_self, 9) /* has value profiling */ \ - macro(op_get_by_id_getter_proto, 9) /* has value profiling */ \ - macro(op_get_by_id_getter_chain, 9) /* has value profiling */ \ - macro(op_get_by_id_custom_self, 9) /* has value profiling */ \ - macro(op_get_by_id_custom_proto, 9) /* has value profiling */ \ - macro(op_get_by_id_custom_chain, 9) /* has value profiling */ \ - macro(op_get_by_id_generic, 9) /* has value profiling */ \ - macro(op_get_array_length, 9) /* has value profiling */ \ - macro(op_get_string_length, 9) /* has value profiling */ \ - macro(op_get_arguments_length, 4) \ - macro(op_put_by_id, 9) \ - macro(op_put_by_id_out_of_line, 9) \ - macro(op_put_by_id_transition, 9) \ - macro(op_put_by_id_transition_direct, 9) \ - macro(op_put_by_id_transition_direct_out_of_line, 9) \ - macro(op_put_by_id_transition_normal, 9) \ - macro(op_put_by_id_transition_normal_out_of_line, 9) \ - macro(op_put_by_id_replace, 9) \ - macro(op_put_by_id_generic, 9) \ - macro(op_del_by_id, 4) \ - macro(op_get_by_val, 6) /* has value profiling */ \ - macro(op_get_argument_by_val, 6) /* must be the same size as op_get_by_val */ \ - macro(op_get_by_pname, 7) \ - macro(op_put_by_val, 5) \ - macro(op_del_by_val, 4) \ - macro(op_put_by_index, 4) \ - macro(op_put_getter_setter, 5) \ - \ - macro(op_jmp, 2) \ - macro(op_jtrue, 3) \ - macro(op_jfalse, 3) \ - macro(op_jeq_null, 3) \ - macro(op_jneq_null, 3) \ - macro(op_jneq_ptr, 4) \ - macro(op_jless, 4) \ - macro(op_jlesseq, 4) \ - macro(op_jgreater, 4) \ - macro(op_jgreatereq, 4) \ - macro(op_jnless, 4) \ - macro(op_jnlesseq, 4) \ - macro(op_jngreater, 4) \ - macro(op_jngreatereq, 4) \ - \ - macro(op_loop_hint, 1) \ - \ - macro(op_switch_imm, 4) \ - macro(op_switch_char, 4) \ - macro(op_switch_string, 4) \ - \ - macro(op_new_func, 4) \ - macro(op_new_func_exp, 3) \ - macro(op_call, 6) \ - macro(op_call_eval, 6) \ - macro(op_call_varargs, 5) \ - macro(op_tear_off_activation, 2) \ - macro(op_tear_off_arguments, 3) \ - macro(op_ret, 2) \ - macro(op_call_put_result, 3) /* has value profiling */ \ - macro(op_ret_object_or_this, 3) \ - \ - macro(op_construct, 6) \ - macro(op_strcat, 4) \ - macro(op_to_primitive, 3) \ - \ - macro(op_get_pnames, 6) \ - macro(op_next_pname, 7) \ - \ - macro(op_push_with_scope, 2) \ - macro(op_pop_scope, 1) \ - macro(op_push_name_scope, 4) \ - \ - macro(op_catch, 2) \ - macro(op_throw, 2) \ - macro(op_throw_static_error, 3) \ - \ - macro(op_debug, 5) \ - macro(op_profile_will_call, 2) \ - macro(op_profile_did_call, 2) \ - \ - extension__ \ - \ - macro(op_end, 2) // end must be the last opcode in the list + FOR_EACH_BYTECODE_ID(macro) \ + extension__ #define FOR_EACH_CORE_OPCODE_ID(macro) \ FOR_EACH_CORE_OPCODE_ID_WITH_EXTENSION(macro, /* No extension */ ) @@ -224,7 +59,11 @@ namespace JSC { #undef OPCODE_ID_ENUM const int maxOpcodeLength = 9; -const int numOpcodeIDs = op_end + 1; +#if !ENABLE(JIT) +const int numOpcodeIDs = NUMBER_OF_BYTECODE_IDS + NUMBER_OF_CLOOP_BYTECODE_HELPER_IDS + NUMBER_OF_BYTECODE_HELPER_IDS; +#else +const int numOpcodeIDs = NUMBER_OF_BYTECODE_IDS + NUMBER_OF_BYTECODE_HELPER_IDS; +#endif #define OPCODE_ID_LENGTHS(id, length) const int id##_length = length; FOR_EACH_OPCODE_ID(OPCODE_ID_LENGTHS); @@ -236,7 +75,7 @@ const int numOpcodeIDs = op_end + 1; const int opcodeLengths[numOpcodeIDs] = { FOR_EACH_OPCODE_ID(OPCODE_ID_LENGTH_MAP) }; #undef OPCODE_ID_LENGTH_MAP -#define VERIFY_OPCODE_ID(id, size) COMPILE_ASSERT(id <= op_end, ASSERT_THAT_JS_OPCODE_IDS_ARE_VALID); +#define VERIFY_OPCODE_ID(id, size) COMPILE_ASSERT(id <= numOpcodeIDs, ASSERT_THAT_JS_OPCODE_IDS_ARE_VALID); FOR_EACH_OPCODE_ID(VERIFY_OPCODE_ID); #undef VERIFY_OPCODE_ID @@ -289,4 +128,12 @@ inline size_t opcodeLength(OpcodeID opcode) } // namespace JSC +namespace WTF { + +class PrintStream; + +void printInternal(PrintStream&, JSC::OpcodeID); + +} // namespace WTF + #endif // Opcode_h diff --git a/Source/JavaScriptCore/bytecode/Operands.h b/Source/JavaScriptCore/bytecode/Operands.h index e7b3e241f..78ddaa525 100644 --- a/Source/JavaScriptCore/bytecode/Operands.h +++ b/Source/JavaScriptCore/bytecode/Operands.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011, 2012 Apple Inc. All rights reserved. + * Copyright (C) 2011, 2012, 2013, 2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -28,35 +28,41 @@ #include "CallFrame.h" #include "JSObject.h" +#include "VirtualRegister.h" + #include <wtf/PrintStream.h> #include <wtf/Vector.h> namespace JSC { -// argument 0 is 'this'. -inline bool operandIsArgument(int operand) { return operand < 0; } -inline int operandToArgument(int operand) { return -operand + CallFrame::thisArgumentOffset(); } -inline int argumentToOperand(int argument) { return -argument + CallFrame::thisArgumentOffset(); } - template<typename T> struct OperandValueTraits; template<typename T> struct OperandValueTraits { static T defaultValue() { return T(); } - static void dump(const T& value, PrintStream& out) { value.dump(out); } + static bool isEmptyForDump(const T& value) { return !value; } }; enum OperandKind { ArgumentOperand, LocalOperand }; -template<typename T, typename Traits = OperandValueTraits<T> > +enum OperandsLikeTag { OperandsLike }; + +template<typename T, typename Traits = OperandValueTraits<T>> class Operands { public: Operands() { } - explicit Operands(size_t numArguments, size_t numLocals) + explicit Operands(size_t numArguments, size_t numLocals, const T& initialValue = Traits::defaultValue()) { - m_arguments.fill(Traits::defaultValue(), numArguments); - m_locals.fill(Traits::defaultValue(), numLocals); + m_arguments.fill(initialValue, numArguments); + m_locals.fill(initialValue, numLocals); + } + + template<typename U, typename OtherTraits> + explicit Operands(OperandsLikeTag, const Operands<U, OtherTraits>& other) + { + m_arguments.fill(Traits::defaultValue(), other.numberOfArguments()); + m_locals.fill(Traits::defaultValue(), other.numberOfLocals()); } size_t numberOfArguments() const { return m_arguments.size(); } @@ -90,7 +96,7 @@ public: return local(idx); } - void ensureLocals(size_t size) + void ensureLocals(size_t size, const T& ensuredValue = Traits::defaultValue()) { if (size <= m_locals.size()) return; @@ -98,7 +104,7 @@ public: size_t oldSize = m_locals.size(); m_locals.resize(size); for (size_t i = oldSize; i < m_locals.size(); ++i) - m_locals[i] = Traits::defaultValue(); + m_locals[i] = ensuredValue; } void setLocal(size_t idx, const T& value) @@ -130,33 +136,48 @@ public: T& operand(int operand) { if (operandIsArgument(operand)) { - int argument = operandToArgument(operand); + int argument = VirtualRegister(operand).toArgument(); return m_arguments[argument]; } - - return m_locals[operand]; + + return m_locals[VirtualRegister(operand).toLocal()]; } - + + T& operand(VirtualRegister virtualRegister) + { + return operand(virtualRegister.offset()); + } + const T& operand(int operand) const { return const_cast<const T&>(const_cast<Operands*>(this)->operand(operand)); } + const T& operand(VirtualRegister operand) const { return const_cast<const T&>(const_cast<Operands*>(this)->operand(operand)); } bool hasOperand(int operand) const { if (operandIsArgument(operand)) return true; - return static_cast<size_t>(operand) < numberOfLocals(); + return static_cast<size_t>(VirtualRegister(operand).toLocal()) < numberOfLocals(); + } + bool hasOperand(VirtualRegister reg) const + { + return hasOperand(reg.offset()); } void setOperand(int operand, const T& value) { if (operandIsArgument(operand)) { - int argument = operandToArgument(operand); + int argument = VirtualRegister(operand).toArgument(); m_arguments[argument] = value; return; } - setLocal(operand, value); + setLocal(VirtualRegister(operand).toLocal(), value); } + void setOperand(VirtualRegister virtualRegister, const T& value) + { + setOperand(virtualRegister.offset(), value); + } + size_t size() const { return numberOfArguments() + numberOfLocals(); } const T& at(size_t index) const { @@ -186,51 +207,63 @@ public: int operandForIndex(size_t index) const { if (index < numberOfArguments()) - return argumentToOperand(index); - return index - numberOfArguments(); + return virtualRegisterForArgument(index).offset(); + return virtualRegisterForLocal(index - numberOfArguments()).offset(); + } + VirtualRegister virtualRegisterForIndex(size_t index) const + { + return VirtualRegister(operandForIndex(index)); + } + size_t indexForOperand(int operand) const + { + if (operandIsArgument(operand)) + return static_cast<size_t>(VirtualRegister(operand).toArgument()); + return static_cast<size_t>(VirtualRegister(operand).toLocal()) + numberOfArguments(); + } + size_t indexForOperand(VirtualRegister reg) const + { + return indexForOperand(reg.offset()); } void setOperandFirstTime(int operand, const T& value) { if (operandIsArgument(operand)) { - setArgumentFirstTime(operandToArgument(operand), value); + setArgumentFirstTime(VirtualRegister(operand).toArgument(), value); return; } - setLocalFirstTime(operand, value); + setLocalFirstTime(VirtualRegister(operand).toLocal(), value); } - void clear() + void fill(T value) { for (size_t i = 0; i < m_arguments.size(); ++i) - m_arguments[i] = Traits::defaultValue(); + m_arguments[i] = value; for (size_t i = 0; i < m_locals.size(); ++i) - m_locals[i] = Traits::defaultValue(); + m_locals[i] = value; + } + + void clear() + { + fill(Traits::defaultValue()); } + bool operator==(const Operands& other) const + { + ASSERT(numberOfArguments() == other.numberOfArguments()); + ASSERT(numberOfLocals() == other.numberOfLocals()); + + return m_arguments == other.m_arguments && m_locals == other.m_locals; + } + + void dumpInContext(PrintStream& out, DumpContext* context) const; + void dump(PrintStream& out) const; + private: Vector<T, 8> m_arguments; Vector<T, 16> m_locals; }; -template<typename T, typename Traits> -void dumpOperands(const Operands<T, Traits>& operands, PrintStream& out) -{ - for (size_t argument = operands.numberOfArguments(); argument--;) { - if (argument != operands.numberOfArguments() - 1) - out.printf(" "); - out.print("arg", argument, ":"); - Traits::dump(operands.argument(argument), out); - } - out.printf(" : "); - for (size_t local = 0; local < operands.numberOfLocals(); ++local) { - if (local) - out.printf(" "); - out.print("r", local, ":"); - Traits::dump(operands.local(local), out); - } -} - } // namespace JSC #endif // Operands_h diff --git a/Source/JavaScriptCore/bytecode/OperandsInlines.h b/Source/JavaScriptCore/bytecode/OperandsInlines.h new file mode 100644 index 000000000..c9dee88c7 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/OperandsInlines.h @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef OperandsInlines_h +#define OperandsInlines_h + +#include "Operands.h" +#include <wtf/CommaPrinter.h> + +namespace JSC { + +template<typename T, typename Traits> +void Operands<T, Traits>::dumpInContext(PrintStream& out, DumpContext* context) const +{ + CommaPrinter comma(" "); + for (size_t argumentIndex = numberOfArguments(); argumentIndex--;) { + if (Traits::isEmptyForDump(argument(argumentIndex))) + continue; + out.print(comma, "arg", argumentIndex, ":", inContext(argument(argumentIndex), context)); + } + for (size_t localIndex = 0; localIndex < numberOfLocals(); ++localIndex) { + if (Traits::isEmptyForDump(local(localIndex))) + continue; + out.print(comma, "loc", localIndex, ":", inContext(local(localIndex), context)); + } +} + +template<typename T, typename Traits> +void Operands<T, Traits>::dump(PrintStream& out) const +{ + CommaPrinter comma(" "); + for (size_t argumentIndex = numberOfArguments(); argumentIndex--;) { + if (Traits::isEmptyForDump(argument(argumentIndex))) + continue; + out.print(comma, "arg", argumentIndex, ":", argument(argumentIndex)); + } + for (size_t localIndex = 0; localIndex < numberOfLocals(); ++localIndex) { + if (Traits::isEmptyForDump(local(localIndex))) + continue; + out.print(comma, "loc", localIndex, ":", local(localIndex)); + } +} + +} // namespace JSC + +#endif // OperandsInlines_h + diff --git a/Source/JavaScriptCore/bytecode/PolymorphicAccess.cpp b/Source/JavaScriptCore/bytecode/PolymorphicAccess.cpp new file mode 100644 index 000000000..3a59f8db4 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/PolymorphicAccess.cpp @@ -0,0 +1,1469 @@ +/* + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "PolymorphicAccess.h" + +#if ENABLE(JIT) + +#include "BinarySwitch.h" +#include "CCallHelpers.h" +#include "CodeBlock.h" +#include "GetterSetter.h" +#include "Heap.h" +#include "JITOperations.h" +#include "JSCInlines.h" +#include "LinkBuffer.h" +#include "ScratchRegisterAllocator.h" +#include "StructureStubClearingWatchpoint.h" +#include "StructureStubInfo.h" +#include <wtf/CommaPrinter.h> +#include <wtf/ListDump.h> + +namespace JSC { + +static const bool verbose = false; + +Watchpoint* AccessGenerationState::addWatchpoint(const ObjectPropertyCondition& condition) +{ + return WatchpointsOnStructureStubInfo::ensureReferenceAndAddWatchpoint( + watchpoints, jit->codeBlock(), stubInfo, condition); +} + +void AccessGenerationState::restoreScratch() +{ + allocator->restoreReusedRegistersByPopping(*jit, preservedReusedRegisterState); +} + +void AccessGenerationState::succeed() +{ + restoreScratch(); + success.append(jit->jump()); +} + +void AccessGenerationState::calculateLiveRegistersForCallAndExceptionHandling() +{ + if (!m_calculatedRegistersForCallAndExceptionHandling) { + m_calculatedRegistersForCallAndExceptionHandling = true; + + m_liveRegistersToPreserveAtExceptionHandlingCallSite = jit->codeBlock()->jitCode()->liveRegistersToPreserveAtExceptionHandlingCallSite(jit->codeBlock(), stubInfo->callSiteIndex); + m_needsToRestoreRegistersIfException = m_liveRegistersToPreserveAtExceptionHandlingCallSite.numberOfSetRegisters() > 0; + if (m_needsToRestoreRegistersIfException) + RELEASE_ASSERT(JITCode::isOptimizingJIT(jit->codeBlock()->jitType())); + + m_liveRegistersForCall = RegisterSet(m_liveRegistersToPreserveAtExceptionHandlingCallSite, allocator->usedRegisters()); + m_liveRegistersForCall.exclude(RegisterSet::registersToNotSaveForJSCall()); + } +} + +void AccessGenerationState::preserveLiveRegistersToStackForCall() +{ + unsigned extraStackPadding = 0; + unsigned numberOfStackBytesUsedForRegisterPreservation = ScratchRegisterAllocator::preserveRegistersToStackForCall(*jit, liveRegistersForCall(), extraStackPadding); + if (m_numberOfStackBytesUsedForRegisterPreservation != std::numeric_limits<unsigned>::max()) + RELEASE_ASSERT(numberOfStackBytesUsedForRegisterPreservation == m_numberOfStackBytesUsedForRegisterPreservation); + m_numberOfStackBytesUsedForRegisterPreservation = numberOfStackBytesUsedForRegisterPreservation; +} + +void AccessGenerationState::restoreLiveRegistersFromStackForCall(bool isGetter) +{ + RegisterSet dontRestore; + if (isGetter) { + // This is the result value. We don't want to overwrite the result with what we stored to the stack. + // We sometimes have to store it to the stack just in case we throw an exception and need the original value. + dontRestore.set(valueRegs); + } + restoreLiveRegistersFromStackForCall(dontRestore); +} + +void AccessGenerationState::restoreLiveRegistersFromStackForCallWithThrownException() +{ + // Even if we're a getter, we don't want to ignore the result value like we normally do + // because the getter threw, and therefore, didn't return a value that means anything. + // Instead, we want to restore that register to what it was upon entering the getter + // inline cache. The subtlety here is if the base and the result are the same register, + // and the getter threw, we want OSR exit to see the original base value, not the result + // of the getter call. + RegisterSet dontRestore = liveRegistersForCall(); + // As an optimization here, we only need to restore what is live for exception handling. + // We can construct the dontRestore set to accomplish this goal by having it contain only + // what is live for call but not live for exception handling. By ignoring things that are + // only live at the call but not the exception handler, we will only restore things live + // at the exception handler. + dontRestore.exclude(liveRegistersToPreserveAtExceptionHandlingCallSite()); + restoreLiveRegistersFromStackForCall(dontRestore); +} + +void AccessGenerationState::restoreLiveRegistersFromStackForCall(const RegisterSet& dontRestore) +{ + unsigned extraStackPadding = 0; + ScratchRegisterAllocator::restoreRegistersFromStackForCall(*jit, liveRegistersForCall(), dontRestore, m_numberOfStackBytesUsedForRegisterPreservation, extraStackPadding); +} + +CallSiteIndex AccessGenerationState::callSiteIndexForExceptionHandlingOrOriginal() +{ + RELEASE_ASSERT(m_calculatedRegistersForCallAndExceptionHandling); + + if (!m_calculatedCallSiteIndex) { + m_calculatedCallSiteIndex = true; + + if (m_needsToRestoreRegistersIfException) + m_callSiteIndex = jit->codeBlock()->newExceptionHandlingCallSiteIndex(stubInfo->callSiteIndex); + else + m_callSiteIndex = originalCallSiteIndex(); + } + + return m_callSiteIndex; +} + +const HandlerInfo& AccessGenerationState::originalExceptionHandler() const +{ + RELEASE_ASSERT(m_needsToRestoreRegistersIfException); + HandlerInfo* exceptionHandler = jit->codeBlock()->handlerForIndex(stubInfo->callSiteIndex.bits()); + RELEASE_ASSERT(exceptionHandler); + return *exceptionHandler; +} + +CallSiteIndex AccessGenerationState::originalCallSiteIndex() const { return stubInfo->callSiteIndex; } + +AccessCase::AccessCase() +{ +} + +std::unique_ptr<AccessCase> AccessCase::get( + VM& vm, JSCell* owner, AccessType type, PropertyOffset offset, Structure* structure, + const ObjectPropertyConditionSet& conditionSet, bool viaProxy, WatchpointSet* additionalSet, + PropertySlot::GetValueFunc customGetter, JSObject* customSlotBase) +{ + std::unique_ptr<AccessCase> result(new AccessCase()); + + result->m_type = type; + result->m_offset = offset; + result->m_structure.set(vm, owner, structure); + result->m_conditionSet = conditionSet; + + if (viaProxy || additionalSet || result->doesCalls() || customGetter || customSlotBase) { + result->m_rareData = std::make_unique<RareData>(); + result->m_rareData->viaProxy = viaProxy; + result->m_rareData->additionalSet = additionalSet; + result->m_rareData->customAccessor.getter = customGetter; + result->m_rareData->customSlotBase.setMayBeNull(vm, owner, customSlotBase); + } + + return result; +} + +std::unique_ptr<AccessCase> AccessCase::replace( + VM& vm, JSCell* owner, Structure* structure, PropertyOffset offset) +{ + std::unique_ptr<AccessCase> result(new AccessCase()); + + result->m_type = Replace; + result->m_offset = offset; + result->m_structure.set(vm, owner, structure); + + return result; +} + +std::unique_ptr<AccessCase> AccessCase::transition( + VM& vm, JSCell* owner, Structure* oldStructure, Structure* newStructure, PropertyOffset offset, + const ObjectPropertyConditionSet& conditionSet) +{ + RELEASE_ASSERT(oldStructure == newStructure->previousID()); + + // Skip optimizing the case where we need a realloc, if we don't have + // enough registers to make it happen. + if (GPRInfo::numberOfRegisters < 6 + && oldStructure->outOfLineCapacity() != newStructure->outOfLineCapacity() + && oldStructure->outOfLineCapacity()) { + return nullptr; + } + + // Skip optimizing the case where we need realloc, and the structure has + // indexing storage. + // FIXME: We shouldn't skip this! Implement it! + // https://bugs.webkit.org/show_bug.cgi?id=130914 + if (oldStructure->couldHaveIndexingHeader()) + return nullptr; + + std::unique_ptr<AccessCase> result(new AccessCase()); + + result->m_type = Transition; + result->m_offset = offset; + result->m_structure.set(vm, owner, newStructure); + result->m_conditionSet = conditionSet; + + return result; +} + +std::unique_ptr<AccessCase> AccessCase::setter( + VM& vm, JSCell* owner, AccessType type, Structure* structure, PropertyOffset offset, + const ObjectPropertyConditionSet& conditionSet, PutPropertySlot::PutValueFunc customSetter, + JSObject* customSlotBase) +{ + std::unique_ptr<AccessCase> result(new AccessCase()); + + result->m_type = type; + result->m_offset = offset; + result->m_structure.set(vm, owner, structure); + result->m_conditionSet = conditionSet; + result->m_rareData = std::make_unique<RareData>(); + result->m_rareData->customAccessor.setter = customSetter; + result->m_rareData->customSlotBase.setMayBeNull(vm, owner, customSlotBase); + + return result; +} + +std::unique_ptr<AccessCase> AccessCase::in( + VM& vm, JSCell* owner, AccessType type, Structure* structure, + const ObjectPropertyConditionSet& conditionSet) +{ + std::unique_ptr<AccessCase> result(new AccessCase()); + + result->m_type = type; + result->m_structure.set(vm, owner, structure); + result->m_conditionSet = conditionSet; + + return result; +} + +std::unique_ptr<AccessCase> AccessCase::getLength(VM&, JSCell*, AccessType type) +{ + std::unique_ptr<AccessCase> result(new AccessCase()); + + result->m_type = type; + + return result; +} + +std::unique_ptr<AccessCase> AccessCase::getIntrinsic( + VM& vm, JSCell* owner, JSFunction* getter, PropertyOffset offset, + Structure* structure, const ObjectPropertyConditionSet& conditionSet) +{ + std::unique_ptr<AccessCase> result(new AccessCase()); + + result->m_type = IntrinsicGetter; + result->m_structure.set(vm, owner, structure); + result->m_conditionSet = conditionSet; + result->m_offset = offset; + + result->m_rareData = std::make_unique<RareData>(); + result->m_rareData->intrinsicFunction.set(vm, owner, getter); + + return result; +} + +AccessCase::~AccessCase() +{ +} + +std::unique_ptr<AccessCase> AccessCase::fromStructureStubInfo( + VM& vm, JSCell* owner, StructureStubInfo& stubInfo) +{ + switch (stubInfo.cacheType) { + case CacheType::GetByIdSelf: + return get( + vm, owner, Load, stubInfo.u.byIdSelf.offset, + stubInfo.u.byIdSelf.baseObjectStructure.get()); + + case CacheType::PutByIdReplace: + return replace( + vm, owner, stubInfo.u.byIdSelf.baseObjectStructure.get(), stubInfo.u.byIdSelf.offset); + + default: + return nullptr; + } +} + +std::unique_ptr<AccessCase> AccessCase::clone() const +{ + std::unique_ptr<AccessCase> result(new AccessCase()); + result->m_type = m_type; + result->m_offset = m_offset; + result->m_structure = m_structure; + result->m_conditionSet = m_conditionSet; + if (RareData* rareData = m_rareData.get()) { + result->m_rareData = std::make_unique<RareData>(); + result->m_rareData->viaProxy = rareData->viaProxy; + result->m_rareData->additionalSet = rareData->additionalSet; + // NOTE: We don't copy the callLinkInfo, since that's created during code generation. + result->m_rareData->customAccessor.opaque = rareData->customAccessor.opaque; + result->m_rareData->customSlotBase = rareData->customSlotBase; + result->m_rareData->intrinsicFunction = rareData->intrinsicFunction; + } + return result; +} + +bool AccessCase::guardedByStructureCheck() const +{ + if (viaProxy()) + return false; + + switch (m_type) { + case ArrayLength: + case StringLength: + return false; + default: + return true; + } +} + +JSObject* AccessCase::alternateBase() const +{ + if (customSlotBase()) + return customSlotBase(); + return conditionSet().slotBaseCondition().object(); +} + +bool AccessCase::couldStillSucceed() const +{ + return m_conditionSet.structuresEnsureValidityAssumingImpurePropertyWatchpoint(); +} + +bool AccessCase::canReplace(const AccessCase& other) +{ + // We could do a lot better here, but for now we just do something obvious. + + if (!guardedByStructureCheck() || !other.guardedByStructureCheck()) { + // FIXME: Implement this! + return false; + } + + return structure() == other.structure(); +} + +void AccessCase::dump(PrintStream& out) const +{ + out.print(m_type, ":("); + + CommaPrinter comma; + + if (m_type == Transition) + out.print(comma, "structure = ", pointerDump(structure()), " -> ", pointerDump(newStructure())); + else if (m_structure) + out.print(comma, "structure = ", pointerDump(m_structure.get())); + + if (isValidOffset(m_offset)) + out.print(comma, "offset = ", m_offset); + if (!m_conditionSet.isEmpty()) + out.print(comma, "conditions = ", m_conditionSet); + + if (RareData* rareData = m_rareData.get()) { + if (rareData->viaProxy) + out.print(comma, "viaProxy = ", rareData->viaProxy); + if (rareData->additionalSet) + out.print(comma, "additionalSet = ", RawPointer(rareData->additionalSet.get())); + if (rareData->callLinkInfo) + out.print(comma, "callLinkInfo = ", RawPointer(rareData->callLinkInfo.get())); + if (rareData->customAccessor.opaque) + out.print(comma, "customAccessor = ", RawPointer(rareData->customAccessor.opaque)); + if (rareData->customSlotBase) + out.print(comma, "customSlotBase = ", RawPointer(rareData->customSlotBase.get())); + } + + out.print(")"); +} + +bool AccessCase::visitWeak(VM& vm) const +{ + if (m_structure && !Heap::isMarked(m_structure.get())) + return false; + if (!m_conditionSet.areStillLive()) + return false; + if (m_rareData) { + if (m_rareData->callLinkInfo) + m_rareData->callLinkInfo->visitWeak(vm); + if (m_rareData->customSlotBase && !Heap::isMarked(m_rareData->customSlotBase.get())) + return false; + if (m_rareData->intrinsicFunction && !Heap::isMarked(m_rareData->intrinsicFunction.get())) + return false; + } + return true; +} + +void AccessCase::generateWithGuard( + AccessGenerationState& state, CCallHelpers::JumpList& fallThrough) +{ + CCallHelpers& jit = *state.jit; + + switch (m_type) { + case ArrayLength: { + ASSERT(!viaProxy()); + jit.load8(CCallHelpers::Address(state.baseGPR, JSCell::indexingTypeOffset()), state.scratchGPR); + fallThrough.append( + jit.branchTest32( + CCallHelpers::Zero, state.scratchGPR, CCallHelpers::TrustedImm32(IsArray))); + fallThrough.append( + jit.branchTest32( + CCallHelpers::Zero, state.scratchGPR, CCallHelpers::TrustedImm32(IndexingShapeMask))); + break; + } + + case StringLength: { + ASSERT(!viaProxy()); + fallThrough.append( + jit.branch8( + CCallHelpers::NotEqual, + CCallHelpers::Address(state.baseGPR, JSCell::typeInfoTypeOffset()), + CCallHelpers::TrustedImm32(StringType))); + break; + } + + default: { + if (viaProxy()) { + fallThrough.append( + jit.branch8( + CCallHelpers::NotEqual, + CCallHelpers::Address(state.baseGPR, JSCell::typeInfoTypeOffset()), + CCallHelpers::TrustedImm32(PureForwardingProxyType))); + + jit.loadPtr( + CCallHelpers::Address(state.baseGPR, JSProxy::targetOffset()), + state.scratchGPR); + + fallThrough.append( + jit.branchStructure( + CCallHelpers::NotEqual, + CCallHelpers::Address(state.scratchGPR, JSCell::structureIDOffset()), + structure())); + } else { + fallThrough.append( + jit.branchStructure( + CCallHelpers::NotEqual, + CCallHelpers::Address(state.baseGPR, JSCell::structureIDOffset()), + structure())); + } + break; + } }; + + generate(state); +} + +// EncodedJSValue in JSVALUE32_64 is a 64-bit integer. When being compiled in ARM EABI, it must be aligned on an even-numbered register (r0, r2 or [sp]). +// To prevent the assembler from using wrong registers, let's occupy r1 or r3 with a dummy argument when necessary. +#if (COMPILER_SUPPORTS(EABI) && CPU(ARM)) || CPU(MIPS) +#define EABI_32BIT_DUMMY_ARG CCallHelpers::TrustedImm32(0), +#else +#define EABI_32BIT_DUMMY_ARG +#endif + +void AccessCase::generate(AccessGenerationState& state) +{ + if (verbose) + dataLog("Generating code for: ", *this, "\n"); + + CCallHelpers& jit = *state.jit; + VM& vm = *jit.vm(); + CodeBlock* codeBlock = jit.codeBlock(); + StructureStubInfo& stubInfo = *state.stubInfo; + const Identifier& ident = *state.ident; + JSValueRegs valueRegs = state.valueRegs; + GPRReg baseGPR = state.baseGPR; + GPRReg scratchGPR = state.scratchGPR; + + ASSERT(m_conditionSet.structuresEnsureValidityAssumingImpurePropertyWatchpoint()); + + if ((structure() && structure()->needImpurePropertyWatchpoint()) + || m_conditionSet.needImpurePropertyWatchpoint()) + vm.registerWatchpointForImpureProperty(ident, state.addWatchpoint()); + + if (additionalSet()) + additionalSet()->add(state.addWatchpoint()); + + for (const ObjectPropertyCondition& condition : m_conditionSet) { + Structure* structure = condition.object()->structure(); + + if (condition.isWatchableAssumingImpurePropertyWatchpoint()) { + structure->addTransitionWatchpoint(state.addWatchpoint(condition)); + continue; + } + + if (!condition.structureEnsuresValidityAssumingImpurePropertyWatchpoint(structure)) { + dataLog("This condition is no longer met: ", condition, "\n"); + RELEASE_ASSERT_NOT_REACHED(); + } + + // We will emit code that has a weak reference that isn't otherwise listed anywhere. + state.weakReferences.append(WriteBarrier<JSCell>(vm, codeBlock, structure)); + + jit.move(CCallHelpers::TrustedImmPtr(condition.object()), scratchGPR); + state.failAndRepatch.append( + jit.branchStructure( + CCallHelpers::NotEqual, + CCallHelpers::Address(scratchGPR, JSCell::structureIDOffset()), + structure)); + } + + switch (m_type) { + case InHit: + case InMiss: + jit.boxBooleanPayload(m_type == InHit, valueRegs.payloadGPR()); + state.succeed(); + return; + + case Miss: + jit.moveTrustedValue(jsUndefined(), valueRegs); + state.succeed(); + return; + + case Load: + case Getter: + case Setter: + case CustomValueGetter: + case CustomAccessorGetter: + case CustomValueSetter: + case CustomAccessorSetter: { + if (isValidOffset(m_offset)) { + Structure* currStructure; + if (m_conditionSet.isEmpty()) + currStructure = structure(); + else + currStructure = m_conditionSet.slotBaseCondition().object()->structure(); + currStructure->startWatchingPropertyForReplacements(vm, offset()); + } + + GPRReg baseForGetGPR; + if (viaProxy()) { + baseForGetGPR = valueRegs.payloadGPR(); + jit.loadPtr( + CCallHelpers::Address(baseGPR, JSProxy::targetOffset()), + baseForGetGPR); + } else + baseForGetGPR = baseGPR; + + GPRReg baseForAccessGPR; + if (!m_conditionSet.isEmpty()) { + jit.move( + CCallHelpers::TrustedImmPtr(alternateBase()), + scratchGPR); + baseForAccessGPR = scratchGPR; + } else + baseForAccessGPR = baseForGetGPR; + + GPRReg loadedValueGPR = InvalidGPRReg; + if (m_type != CustomValueGetter && m_type != CustomAccessorGetter && m_type != CustomValueSetter && m_type != CustomAccessorSetter) { + if (m_type == Load) + loadedValueGPR = valueRegs.payloadGPR(); + else + loadedValueGPR = scratchGPR; + + GPRReg storageGPR; + if (isInlineOffset(m_offset)) + storageGPR = baseForAccessGPR; + else { + jit.loadPtr( + CCallHelpers::Address(baseForAccessGPR, JSObject::butterflyOffset()), + loadedValueGPR); + jit.removeSpaceBits(loadedValueGPR); + storageGPR = loadedValueGPR; + } + +#if USE(JSVALUE64) + jit.load64( + CCallHelpers::Address(storageGPR, offsetRelativeToBase(m_offset)), loadedValueGPR); +#else + if (m_type == Load) { + jit.load32( + CCallHelpers::Address(storageGPR, offsetRelativeToBase(m_offset) + TagOffset), + valueRegs.tagGPR()); + } + jit.load32( + CCallHelpers::Address(storageGPR, offsetRelativeToBase(m_offset) + PayloadOffset), + loadedValueGPR); +#endif + } + + if (m_type == Load) { + state.succeed(); + return; + } + + // Stuff for custom getters/setters. + CCallHelpers::Call operationCall; + CCallHelpers::Call lookupExceptionHandlerCall; + + // Stuff for JS getters/setters. + CCallHelpers::DataLabelPtr addressOfLinkFunctionCheck; + CCallHelpers::Call fastPathCall; + CCallHelpers::Call slowPathCall; + + CCallHelpers::Jump success; + CCallHelpers::Jump fail; + + // This also does the necessary calculations of whether or not we're an + // exception handling call site. + state.calculateLiveRegistersForCallAndExceptionHandling(); + state.preserveLiveRegistersToStackForCall(); + + // Need to make sure that whenever this call is made in the future, we remember the + // place that we made it from. + jit.store32( + CCallHelpers::TrustedImm32(state.callSiteIndexForExceptionHandlingOrOriginal().bits()), + CCallHelpers::tagFor(static_cast<VirtualRegister>(JSStack::ArgumentCount))); + + if (m_type == Getter || m_type == Setter) { + // Create a JS call using a JS call inline cache. Assume that: + // + // - SP is aligned and represents the extent of the calling compiler's stack usage. + // + // - FP is set correctly (i.e. it points to the caller's call frame header). + // + // - SP - FP is an aligned difference. + // + // - Any byte between FP (exclusive) and SP (inclusive) could be live in the calling + // code. + // + // Therefore, we temporarily grow the stack for the purpose of the call and then + // shrink it after. + + RELEASE_ASSERT(!m_rareData->callLinkInfo); + m_rareData->callLinkInfo = std::make_unique<CallLinkInfo>(); + + // FIXME: If we generated a polymorphic call stub that jumped back to the getter + // stub, which then jumped back to the main code, then we'd have a reachability + // situation that the GC doesn't know about. The GC would ensure that the polymorphic + // call stub stayed alive, and it would ensure that the main code stayed alive, but + // it wouldn't know that the getter stub was alive. Ideally JIT stub routines would + // be GC objects, and then we'd be able to say that the polymorphic call stub has a + // reference to the getter stub. + // https://bugs.webkit.org/show_bug.cgi?id=148914 + m_rareData->callLinkInfo->disallowStubs(); + + m_rareData->callLinkInfo->setUpCall( + CallLinkInfo::Call, stubInfo.codeOrigin, loadedValueGPR); + + CCallHelpers::JumpList done; + + // There is a "this" argument. + unsigned numberOfParameters = 1; + // ... and a value argument if we're calling a setter. + if (m_type == Setter) + numberOfParameters++; + + // Get the accessor; if there ain't one then the result is jsUndefined(). + if (m_type == Setter) { + jit.loadPtr( + CCallHelpers::Address(loadedValueGPR, GetterSetter::offsetOfSetter()), + loadedValueGPR); + } else { + jit.loadPtr( + CCallHelpers::Address(loadedValueGPR, GetterSetter::offsetOfGetter()), + loadedValueGPR); + } + + CCallHelpers::Jump returnUndefined = jit.branchTestPtr( + CCallHelpers::Zero, loadedValueGPR); + + unsigned numberOfRegsForCall = JSStack::CallFrameHeaderSize + numberOfParameters; + + unsigned numberOfBytesForCall = + numberOfRegsForCall * sizeof(Register) + sizeof(CallerFrameAndPC); + + unsigned alignedNumberOfBytesForCall = + WTF::roundUpToMultipleOf(stackAlignmentBytes(), numberOfBytesForCall); + + jit.subPtr( + CCallHelpers::TrustedImm32(alignedNumberOfBytesForCall), + CCallHelpers::stackPointerRegister); + + CCallHelpers::Address calleeFrame = CCallHelpers::Address( + CCallHelpers::stackPointerRegister, + -static_cast<ptrdiff_t>(sizeof(CallerFrameAndPC))); + + jit.store32( + CCallHelpers::TrustedImm32(numberOfParameters), + calleeFrame.withOffset(JSStack::ArgumentCount * sizeof(Register) + PayloadOffset)); + + jit.storeCell( + loadedValueGPR, calleeFrame.withOffset(JSStack::Callee * sizeof(Register))); + + jit.storeCell( + baseForGetGPR, + calleeFrame.withOffset(virtualRegisterForArgument(0).offset() * sizeof(Register))); + + if (m_type == Setter) { + jit.storeValue( + valueRegs, + calleeFrame.withOffset( + virtualRegisterForArgument(1).offset() * sizeof(Register))); + } + + CCallHelpers::Jump slowCase = jit.branchPtrWithPatch( + CCallHelpers::NotEqual, loadedValueGPR, addressOfLinkFunctionCheck, + CCallHelpers::TrustedImmPtr(0)); + + fastPathCall = jit.nearCall(); + if (m_type == Getter) + jit.setupResults(valueRegs); + done.append(jit.jump()); + + slowCase.link(&jit); + jit.move(loadedValueGPR, GPRInfo::regT0); +#if USE(JSVALUE32_64) + // We *always* know that the getter/setter, if non-null, is a cell. + jit.move(CCallHelpers::TrustedImm32(JSValue::CellTag), GPRInfo::regT1); +#endif + jit.move(CCallHelpers::TrustedImmPtr(m_rareData->callLinkInfo.get()), GPRInfo::regT2); + slowPathCall = jit.nearCall(); + if (m_type == Getter) + jit.setupResults(valueRegs); + done.append(jit.jump()); + + returnUndefined.link(&jit); + if (m_type == Getter) + jit.moveTrustedValue(jsUndefined(), valueRegs); + + done.link(&jit); + + jit.addPtr(CCallHelpers::TrustedImm32((jit.codeBlock()->stackPointerOffset() * sizeof(Register)) - state.preservedReusedRegisterState.numberOfBytesPreserved - state.numberOfStackBytesUsedForRegisterPreservation()), + GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister); + state.restoreLiveRegistersFromStackForCall(isGetter()); + + state.callbacks.append( + [=, &vm] (LinkBuffer& linkBuffer) { + m_rareData->callLinkInfo->setCallLocations( + linkBuffer.locationOfNearCall(slowPathCall), + linkBuffer.locationOf(addressOfLinkFunctionCheck), + linkBuffer.locationOfNearCall(fastPathCall)); + + linkBuffer.link( + slowPathCall, + CodeLocationLabel(vm.getCTIStub(linkCallThunkGenerator).code())); + }); + } else { + // Need to make room for the C call so any of our stack spillage isn't overwritten. + // We also need to make room because we may be an inline cache in the FTL and not + // have a JIT call frame. + bool needsToMakeRoomOnStackForCCall = state.numberOfStackBytesUsedForRegisterPreservation() || codeBlock->jitType() == JITCode::FTLJIT; + if (needsToMakeRoomOnStackForCCall) + jit.makeSpaceOnStackForCCall(); + + // getter: EncodedJSValue (*GetValueFunc)(ExecState*, EncodedJSValue thisValue, PropertyName); + // setter: void (*PutValueFunc)(ExecState*, EncodedJSValue thisObject, EncodedJSValue value); + // Custom values are passed the slotBase (the property holder), custom accessors are passed the thisVaule (reciever). + GPRReg baseForCustomValue = m_type == CustomValueGetter || m_type == CustomValueSetter ? baseForAccessGPR : baseForGetGPR; +#if USE(JSVALUE64) + if (m_type == CustomValueGetter || m_type == CustomAccessorGetter) { + jit.setupArgumentsWithExecState( + baseForCustomValue, + CCallHelpers::TrustedImmPtr(ident.impl())); + } else + jit.setupArgumentsWithExecState(baseForCustomValue, valueRegs.gpr()); +#else + if (m_type == CustomValueGetter || m_type == CustomAccessorGetter) { + jit.setupArgumentsWithExecState( + EABI_32BIT_DUMMY_ARG baseForCustomValue, + CCallHelpers::TrustedImm32(JSValue::CellTag), + CCallHelpers::TrustedImmPtr(ident.impl())); + } else { + jit.setupArgumentsWithExecState( + EABI_32BIT_DUMMY_ARG baseForCustomValue, + CCallHelpers::TrustedImm32(JSValue::CellTag), + valueRegs.payloadGPR(), valueRegs.tagGPR()); + } +#endif + jit.storePtr(GPRInfo::callFrameRegister, &vm.topCallFrame); + + operationCall = jit.call(); + if (m_type == CustomValueGetter || m_type == CustomAccessorGetter) + jit.setupResults(valueRegs); + if (needsToMakeRoomOnStackForCCall) + jit.reclaimSpaceOnStackForCCall(); + + CCallHelpers::Jump noException = + jit.emitExceptionCheck(CCallHelpers::InvertedExceptionCheck); + + bool didSetLookupExceptionHandler = false; + state.restoreLiveRegistersFromStackForCallWithThrownException(); + state.restoreScratch(); + jit.copyCalleeSavesToVMCalleeSavesBuffer(); + if (state.needsToRestoreRegistersIfException()) { + // To the JIT that produces the original exception handling + // call site, they will expect the OSR exit to be arrived + // at from genericUnwind. Therefore we must model what genericUnwind + // does here. I.e, set callFrameForCatch and copy callee saves. + + jit.storePtr(GPRInfo::callFrameRegister, vm.addressOfCallFrameForCatch()); + CCallHelpers::Jump jumpToOSRExitExceptionHandler = jit.jump(); + + // We don't need to insert a new exception handler in the table + // because we're doing a manual exception check here. i.e, we'll + // never arrive here from genericUnwind(). + HandlerInfo originalHandler = state.originalExceptionHandler(); + state.callbacks.append( + [=] (LinkBuffer& linkBuffer) { + linkBuffer.link(jumpToOSRExitExceptionHandler, originalHandler.nativeCode); + }); + } else { + jit.setupArguments(CCallHelpers::TrustedImmPtr(&vm), GPRInfo::callFrameRegister); + lookupExceptionHandlerCall = jit.call(); + didSetLookupExceptionHandler = true; + jit.jumpToExceptionHandler(); + } + + noException.link(&jit); + state.restoreLiveRegistersFromStackForCall(isGetter()); + + state.callbacks.append( + [=] (LinkBuffer& linkBuffer) { + linkBuffer.link(operationCall, FunctionPtr(m_rareData->customAccessor.opaque)); + if (didSetLookupExceptionHandler) + linkBuffer.link(lookupExceptionHandlerCall, lookupExceptionHandler); + }); + } + state.succeed(); + return; + } + + case Replace: { + if (InferredType* type = structure()->inferredTypeFor(ident.impl())) { + if (verbose) + dataLog("Have type: ", type->descriptor(), "\n"); + state.failAndRepatch.append( + jit.branchIfNotType( + valueRegs, scratchGPR, type->descriptor(), CCallHelpers::DoNotHaveTagRegisters)); + } else if (verbose) + dataLog("Don't have type.\n"); + + if (isInlineOffset(m_offset)) { + jit.storeValue( + valueRegs, + CCallHelpers::Address( + baseGPR, + JSObject::offsetOfInlineStorage() + + offsetInInlineStorage(m_offset) * sizeof(JSValue))); + } else { + jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR); + state.failAndIgnore.append(jit.branchIfNotToSpace(scratchGPR)); + jit.storeValue( + valueRegs, + CCallHelpers::Address( + scratchGPR, offsetInButterfly(m_offset) * sizeof(JSValue))); + } + state.succeed(); + return; + } + + case Transition: { + // AccessCase::transition() should have returned null. + RELEASE_ASSERT(GPRInfo::numberOfRegisters >= 6 || !structure()->outOfLineCapacity() || structure()->outOfLineCapacity() == newStructure()->outOfLineCapacity()); + RELEASE_ASSERT(!structure()->couldHaveIndexingHeader()); + + if (InferredType* type = newStructure()->inferredTypeFor(ident.impl())) { + if (verbose) + dataLog("Have type: ", type->descriptor(), "\n"); + state.failAndRepatch.append( + jit.branchIfNotType( + valueRegs, scratchGPR, type->descriptor(), CCallHelpers::DoNotHaveTagRegisters)); + } else if (verbose) + dataLog("Don't have type.\n"); + + CCallHelpers::JumpList slowPath; + + ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters); + allocator.lock(baseGPR); +#if USE(JSVALUE32_64) + allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseTagGPR)); +#endif + allocator.lock(valueRegs); + allocator.lock(scratchGPR); + + GPRReg scratchGPR2 = allocator.allocateScratchGPR(); + GPRReg scratchGPR3; + if (newStructure()->outOfLineCapacity() != structure()->outOfLineCapacity() + && structure()->outOfLineCapacity()) + scratchGPR3 = allocator.allocateScratchGPR(); + else + scratchGPR3 = InvalidGPRReg; + + ScratchRegisterAllocator::PreservedState preservedState = + allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::SpaceForCCall); + + ASSERT(structure()->transitionWatchpointSetHasBeenInvalidated()); + + bool scratchGPRHasStorage = false; + bool needsToMakeRoomOnStackForCCall = !preservedState.numberOfBytesPreserved && codeBlock->jitType() == JITCode::FTLJIT; + + if (newStructure()->outOfLineCapacity() != structure()->outOfLineCapacity()) { + size_t newSize = newStructure()->outOfLineCapacity() * sizeof(JSValue); + CopiedAllocator* copiedAllocator = &vm.heap.storageAllocator(); + + if (!structure()->outOfLineCapacity()) { + jit.loadPtr(&copiedAllocator->m_currentRemaining, scratchGPR); + slowPath.append( + jit.branchSubPtr( + CCallHelpers::Signed, CCallHelpers::TrustedImm32(newSize), scratchGPR)); + jit.storePtr(scratchGPR, &copiedAllocator->m_currentRemaining); + jit.negPtr(scratchGPR); + jit.addPtr( + CCallHelpers::AbsoluteAddress(&copiedAllocator->m_currentPayloadEnd), scratchGPR); + jit.addPtr(CCallHelpers::TrustedImm32(sizeof(JSValue)), scratchGPR); + } else { + size_t oldSize = structure()->outOfLineCapacity() * sizeof(JSValue); + ASSERT(newSize > oldSize); + + jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR3); + slowPath.append(jit.branchIfNotToSpace(scratchGPR3)); + jit.loadPtr(&copiedAllocator->m_currentRemaining, scratchGPR); + slowPath.append( + jit.branchSubPtr( + CCallHelpers::Signed, CCallHelpers::TrustedImm32(newSize), scratchGPR)); + jit.storePtr(scratchGPR, &copiedAllocator->m_currentRemaining); + jit.negPtr(scratchGPR); + jit.addPtr( + CCallHelpers::AbsoluteAddress(&copiedAllocator->m_currentPayloadEnd), scratchGPR); + jit.addPtr(CCallHelpers::TrustedImm32(sizeof(JSValue)), scratchGPR); + // We have scratchGPR = new storage, scratchGPR3 = old storage, + // scratchGPR2 = available + for (size_t offset = 0; offset < oldSize; offset += sizeof(void*)) { + jit.loadPtr( + CCallHelpers::Address( + scratchGPR3, + -static_cast<ptrdiff_t>( + offset + sizeof(JSValue) + sizeof(void*))), + scratchGPR2); + jit.storePtr( + scratchGPR2, + CCallHelpers::Address( + scratchGPR, + -static_cast<ptrdiff_t>(offset + sizeof(JSValue) + sizeof(void*)))); + } + } + + jit.storePtr(scratchGPR, CCallHelpers::Address(baseGPR, JSObject::butterflyOffset())); + scratchGPRHasStorage = true; + } + + uint32_t structureBits = bitwise_cast<uint32_t>(newStructure()->id()); + jit.store32( + CCallHelpers::TrustedImm32(structureBits), + CCallHelpers::Address(baseGPR, JSCell::structureIDOffset())); + + if (isInlineOffset(m_offset)) { + jit.storeValue( + valueRegs, + CCallHelpers::Address( + baseGPR, + JSObject::offsetOfInlineStorage() + + offsetInInlineStorage(m_offset) * sizeof(JSValue))); + } else { + if (!scratchGPRHasStorage) { + jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR); + state.failAndIgnore.append(jit.branchIfNotToSpace(scratchGPR)); + } + jit.storeValue( + valueRegs, + CCallHelpers::Address(scratchGPR, offsetInButterfly(m_offset) * sizeof(JSValue))); + } + + ScratchBuffer* scratchBuffer = nullptr; + if (newStructure()->outOfLineCapacity() != structure()->outOfLineCapacity()) + scratchBuffer = vm.scratchBufferForSize(allocator.desiredScratchBufferSizeForCall()); + + if (newStructure()->outOfLineCapacity() != structure()->outOfLineCapacity()) { + CCallHelpers::Call callFlushWriteBarrierBuffer; + CCallHelpers::Jump ownerIsRememberedOrInEden = jit.jumpIfIsRememberedOrInEden(baseGPR); + WriteBarrierBuffer& writeBarrierBuffer = jit.vm()->heap.writeBarrierBuffer(); + jit.load32(writeBarrierBuffer.currentIndexAddress(), scratchGPR2); + CCallHelpers::Jump needToFlush = + jit.branch32( + CCallHelpers::AboveOrEqual, scratchGPR2, + CCallHelpers::TrustedImm32(writeBarrierBuffer.capacity())); + + jit.add32(CCallHelpers::TrustedImm32(1), scratchGPR2); + jit.store32(scratchGPR2, writeBarrierBuffer.currentIndexAddress()); + + jit.move(CCallHelpers::TrustedImmPtr(writeBarrierBuffer.buffer()), scratchGPR); + // We use an offset of -sizeof(void*) because we already added 1 to scratchGPR2. + jit.storePtr( + baseGPR, + CCallHelpers::BaseIndex( + scratchGPR, scratchGPR2, CCallHelpers::ScalePtr, + static_cast<int32_t>(-sizeof(void*)))); + + CCallHelpers::Jump doneWithBarrier = jit.jump(); + needToFlush.link(&jit); + + // FIXME: We should restoreReusedRegistersByPopping() before this. Then, we wouldn't need + // padding in preserveReusedRegistersByPushing(). Or, maybe it would be even better if the + // barrier slow path was just the normal slow path, below. + // https://bugs.webkit.org/show_bug.cgi?id=149030 + allocator.preserveUsedRegistersToScratchBufferForCall(jit, scratchBuffer, scratchGPR2); + if (needsToMakeRoomOnStackForCCall) + jit.makeSpaceOnStackForCCall(); + jit.setupArgumentsWithExecState(baseGPR); + callFlushWriteBarrierBuffer = jit.call(); + if (needsToMakeRoomOnStackForCCall) + jit.reclaimSpaceOnStackForCCall(); + allocator.restoreUsedRegistersFromScratchBufferForCall( + jit, scratchBuffer, scratchGPR2); + + doneWithBarrier.link(&jit); + ownerIsRememberedOrInEden.link(&jit); + + state.callbacks.append( + [=] (LinkBuffer& linkBuffer) { + linkBuffer.link(callFlushWriteBarrierBuffer, operationFlushWriteBarrierBuffer); + }); + } + + allocator.restoreReusedRegistersByPopping(jit, preservedState); + state.succeed(); + + if (newStructure()->outOfLineCapacity() != structure()->outOfLineCapacity()) { + slowPath.link(&jit); + allocator.restoreReusedRegistersByPopping(jit, preservedState); + allocator.preserveUsedRegistersToScratchBufferForCall(jit, scratchBuffer, scratchGPR); + if (needsToMakeRoomOnStackForCCall) + jit.makeSpaceOnStackForCCall(); +#if USE(JSVALUE64) + jit.setupArgumentsWithExecState( + baseGPR, + CCallHelpers::TrustedImmPtr(newStructure()), + CCallHelpers::TrustedImm32(m_offset), + valueRegs.gpr()); +#else + jit.setupArgumentsWithExecState( + baseGPR, + CCallHelpers::TrustedImmPtr(newStructure()), + CCallHelpers::TrustedImm32(m_offset), + valueRegs.payloadGPR(), valueRegs.tagGPR()); +#endif + CCallHelpers::Call operationCall = jit.call(); + if (needsToMakeRoomOnStackForCCall) + jit.reclaimSpaceOnStackForCCall(); + allocator.restoreUsedRegistersFromScratchBufferForCall(jit, scratchBuffer, scratchGPR); + state.succeed(); + + state.callbacks.append( + [=] (LinkBuffer& linkBuffer) { + linkBuffer.link(operationCall, operationReallocateStorageAndFinishPut); + }); + } + return; + } + + case ArrayLength: { + jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR); + jit.removeSpaceBits(scratchGPR); + jit.load32(CCallHelpers::Address(scratchGPR, ArrayStorage::lengthOffset()), scratchGPR); + state.failAndIgnore.append( + jit.branch32(CCallHelpers::LessThan, scratchGPR, CCallHelpers::TrustedImm32(0))); + jit.boxInt32(scratchGPR, valueRegs, CCallHelpers::DoNotHaveTagRegisters); + state.succeed(); + return; + } + + case StringLength: { + jit.load32(CCallHelpers::Address(baseGPR, JSString::offsetOfLength()), valueRegs.payloadGPR()); + jit.boxInt32(valueRegs.payloadGPR(), valueRegs, CCallHelpers::DoNotHaveTagRegisters); + state.succeed(); + return; + } + + case IntrinsicGetter: { + RELEASE_ASSERT(isValidOffset(offset())); + + // We need to ensure the getter value does not move from under us. Note that GetterSetters + // are immutable so we just need to watch the property not any value inside it. + Structure* currStructure; + if (m_conditionSet.isEmpty()) + currStructure = structure(); + else + currStructure = m_conditionSet.slotBaseCondition().object()->structure(); + currStructure->startWatchingPropertyForReplacements(vm, offset()); + + emitIntrinsicGetter(state); + return; + } } + + RELEASE_ASSERT_NOT_REACHED(); +} + +PolymorphicAccess::PolymorphicAccess() { } +PolymorphicAccess::~PolymorphicAccess() { } + +MacroAssemblerCodePtr PolymorphicAccess::regenerateWithCases( + VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, const Identifier& ident, + Vector<std::unique_ptr<AccessCase>> originalCasesToAdd) +{ + // This method will add the originalCasesToAdd to the list one at a time while preserving the + // invariants: + // - If a newly added case canReplace() any existing case, then the existing case is removed before + // the new case is added. Removal doesn't change order of the list. Any number of existing cases + // can be removed via the canReplace() rule. + // - Cases in the list always appear in ascending order of time of addition. Therefore, if you + // cascade through the cases in reverse order, you will get the most recent cases first. + // - If this method fails (returns null, doesn't add the cases), then both the previous case list + // and the previous stub are kept intact and the new cases are destroyed. It's OK to attempt to + // add more things after failure. + + // First, verify that we can generate code for all of the new cases while eliminating any of the + // new cases that replace each other. + Vector<std::unique_ptr<AccessCase>> casesToAdd; + for (unsigned i = 0; i < originalCasesToAdd.size(); ++i) { + std::unique_ptr<AccessCase> myCase = WTFMove(originalCasesToAdd[i]); + + // Add it only if it is not replaced by the subsequent cases in the list. + bool found = false; + for (unsigned j = i + 1; j < originalCasesToAdd.size(); ++j) { + if (originalCasesToAdd[j]->canReplace(*myCase)) { + found = true; + break; + } + } + + if (found) + continue; + + casesToAdd.append(WTFMove(myCase)); + } + + if (verbose) + dataLog("casesToAdd: ", listDump(casesToAdd), "\n"); + + // If there aren't any cases to add, then fail on the grounds that there's no point to generating a + // new stub that will be identical to the old one. Returning null should tell the caller to just + // keep doing what they were doing before. + if (casesToAdd.isEmpty()) + return MacroAssemblerCodePtr(); + + // Now construct the list of cases as they should appear if we are successful. This means putting + // all of the previous cases in this list in order but excluding those that can be replaced, and + // then adding the new cases. + ListType newCases; + for (auto& oldCase : m_list) { + // Ignore old cases that cannot possibly succeed anymore. + if (!oldCase->couldStillSucceed()) + continue; + + // Figure out if this is replaced by any new cases. + bool found = false; + for (auto& caseToAdd : casesToAdd) { + if (caseToAdd->canReplace(*oldCase)) { + found = true; + break; + } + } + if (found) + continue; + + newCases.append(oldCase->clone()); + } + for (auto& caseToAdd : casesToAdd) + newCases.append(WTFMove(caseToAdd)); + + if (verbose) + dataLog("newCases: ", listDump(newCases), "\n"); + + if (newCases.size() > Options::maxAccessVariantListSize()) { + if (verbose) + dataLog("Too many cases.\n"); + return MacroAssemblerCodePtr(); + } + + MacroAssemblerCodePtr result = regenerate(vm, codeBlock, stubInfo, ident, newCases); + if (!result) + return MacroAssemblerCodePtr(); + + m_list = WTFMove(newCases); + return result; +} + +MacroAssemblerCodePtr PolymorphicAccess::regenerateWithCase( + VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, const Identifier& ident, + std::unique_ptr<AccessCase> newAccess) +{ + Vector<std::unique_ptr<AccessCase>> newAccesses; + newAccesses.append(WTFMove(newAccess)); + return regenerateWithCases(vm, codeBlock, stubInfo, ident, WTFMove(newAccesses)); +} + +bool PolymorphicAccess::visitWeak(VM& vm) const +{ + for (unsigned i = 0; i < size(); ++i) { + if (!at(i).visitWeak(vm)) + return false; + } + if (Vector<WriteBarrier<JSCell>>* weakReferences = m_weakReferences.get()) { + for (WriteBarrier<JSCell>& weakReference : *weakReferences) { + if (!Heap::isMarked(weakReference.get())) + return false; + } + } + return true; +} + +void PolymorphicAccess::dump(PrintStream& out) const +{ + out.print(RawPointer(this), ":["); + CommaPrinter comma; + for (auto& entry : m_list) + out.print(comma, *entry); + out.print("]"); +} + +MacroAssemblerCodePtr PolymorphicAccess::regenerate( + VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, const Identifier& ident, + PolymorphicAccess::ListType& cases) +{ + if (verbose) + dataLog("Generating code for cases: ", listDump(cases), "\n"); + + AccessGenerationState state; + + state.access = this; + state.stubInfo = &stubInfo; + state.ident = &ident; + + state.baseGPR = static_cast<GPRReg>(stubInfo.patch.baseGPR); + state.valueRegs = JSValueRegs( +#if USE(JSVALUE32_64) + static_cast<GPRReg>(stubInfo.patch.valueTagGPR), +#endif + static_cast<GPRReg>(stubInfo.patch.valueGPR)); + + ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters); + state.allocator = &allocator; + allocator.lock(state.baseGPR); + allocator.lock(state.valueRegs); +#if USE(JSVALUE32_64) + allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseTagGPR)); +#endif + + state.scratchGPR = allocator.allocateScratchGPR(); + + CCallHelpers jit(&vm, codeBlock); + state.jit = &jit; + + state.preservedReusedRegisterState = + allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::NoExtraSpace); + + bool allGuardedByStructureCheck = true; + bool hasJSGetterSetterCall = false; + for (auto& entry : cases) { + allGuardedByStructureCheck &= entry->guardedByStructureCheck(); + if (entry->type() == AccessCase::Getter || entry->type() == AccessCase::Setter) + hasJSGetterSetterCall = true; + } + + if (cases.isEmpty()) { + // This is super unlikely, but we make it legal anyway. + state.failAndRepatch.append(jit.jump()); + } else if (!allGuardedByStructureCheck || cases.size() == 1) { + // If there are any proxies in the list, we cannot just use a binary switch over the structure. + // We need to resort to a cascade. A cascade also happens to be optimal if we only have just + // one case. + CCallHelpers::JumpList fallThrough; + + // Cascade through the list, preferring newer entries. + for (unsigned i = cases.size(); i--;) { + fallThrough.link(&jit); + cases[i]->generateWithGuard(state, fallThrough); + } + state.failAndRepatch.append(fallThrough); + } else { + jit.load32( + CCallHelpers::Address(state.baseGPR, JSCell::structureIDOffset()), + state.scratchGPR); + + Vector<int64_t> caseValues(cases.size()); + for (unsigned i = 0; i < cases.size(); ++i) + caseValues[i] = bitwise_cast<int32_t>(cases[i]->structure()->id()); + + BinarySwitch binarySwitch(state.scratchGPR, caseValues, BinarySwitch::Int32); + while (binarySwitch.advance(jit)) + cases[binarySwitch.caseIndex()]->generate(state); + state.failAndRepatch.append(binarySwitch.fallThrough()); + } + + if (!state.failAndIgnore.empty()) { + state.failAndIgnore.link(&jit); + + // Make sure that the inline cache optimization code knows that we are taking slow path because + // of something that isn't patchable. The slow path will decrement "countdown" and will only + // patch things if the countdown reaches zero. We increment the slow path count here to ensure + // that the slow path does not try to patch. + jit.load8(&stubInfo.countdown, state.scratchGPR); + jit.add32(CCallHelpers::TrustedImm32(1), state.scratchGPR); + jit.store8(state.scratchGPR, &stubInfo.countdown); + } + + CCallHelpers::JumpList failure; + if (allocator.didReuseRegisters()) { + state.failAndRepatch.link(&jit); + state.restoreScratch(); + } else + failure = state.failAndRepatch; + failure.append(jit.jump()); + + CodeBlock* codeBlockThatOwnsExceptionHandlers = nullptr; + CallSiteIndex callSiteIndexForExceptionHandling; + if (state.needsToRestoreRegistersIfException() && hasJSGetterSetterCall) { + // Emit the exception handler. + // Note that this code is only reachable when doing genericUnwind from a pure JS getter/setter . + // Note also that this is not reachable from custom getter/setter. Custom getter/setters will have + // their own exception handling logic that doesn't go through genericUnwind. + MacroAssembler::Label makeshiftCatchHandler = jit.label(); + + int stackPointerOffset = codeBlock->stackPointerOffset() * sizeof(EncodedJSValue); + stackPointerOffset -= state.preservedReusedRegisterState.numberOfBytesPreserved; + stackPointerOffset -= state.numberOfStackBytesUsedForRegisterPreservation(); + + jit.loadPtr(vm.addressOfCallFrameForCatch(), GPRInfo::callFrameRegister); + jit.addPtr(CCallHelpers::TrustedImm32(stackPointerOffset), GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister); + + state.restoreLiveRegistersFromStackForCallWithThrownException(); + state.restoreScratch(); + CCallHelpers::Jump jumpToOSRExitExceptionHandler = jit.jump(); + + HandlerInfo oldHandler = state.originalExceptionHandler(); + CallSiteIndex newExceptionHandlingCallSite = state.callSiteIndexForExceptionHandling(); + state.callbacks.append( + [=] (LinkBuffer& linkBuffer) { + linkBuffer.link(jumpToOSRExitExceptionHandler, oldHandler.nativeCode); + + HandlerInfo handlerToRegister = oldHandler; + handlerToRegister.nativeCode = linkBuffer.locationOf(makeshiftCatchHandler); + handlerToRegister.start = newExceptionHandlingCallSite.bits(); + handlerToRegister.end = newExceptionHandlingCallSite.bits() + 1; + codeBlock->appendExceptionHandler(handlerToRegister); + }); + + // We set these to indicate to the stub to remove itself from the CodeBlock's + // exception handler table when it is deallocated. + codeBlockThatOwnsExceptionHandlers = codeBlock; + ASSERT(JITCode::isOptimizingJIT(codeBlockThatOwnsExceptionHandlers->jitType())); + callSiteIndexForExceptionHandling = state.callSiteIndexForExceptionHandling(); + } + + LinkBuffer linkBuffer(vm, jit, codeBlock, JITCompilationCanFail); + if (linkBuffer.didFailToAllocate()) { + if (verbose) + dataLog("Did fail to allocate.\n"); + return MacroAssemblerCodePtr(); + } + + CodeLocationLabel successLabel = + stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone); + + linkBuffer.link(state.success, successLabel); + + linkBuffer.link( + failure, + stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase)); + + for (auto callback : state.callbacks) + callback(linkBuffer); + + if (verbose) + dataLog(*codeBlock, " ", stubInfo.codeOrigin, ": Generating polymorphic access stub for ", listDump(cases), "\n"); + + MacroAssemblerCodeRef code = FINALIZE_CODE_FOR( + codeBlock, linkBuffer, + ("%s", toCString("Access stub for ", *codeBlock, " ", stubInfo.codeOrigin, " with return point ", successLabel, ": ", listDump(cases)).data())); + + bool doesCalls = false; + for (auto& entry : cases) + doesCalls |= entry->doesCalls(); + + m_stubRoutine = createJITStubRoutine(code, vm, codeBlock, doesCalls, nullptr, codeBlockThatOwnsExceptionHandlers, callSiteIndexForExceptionHandling); + m_watchpoints = WTFMove(state.watchpoints); + if (!state.weakReferences.isEmpty()) + m_weakReferences = std::make_unique<Vector<WriteBarrier<JSCell>>>(WTFMove(state.weakReferences)); + if (verbose) + dataLog("Returning: ", code.code(), "\n"); + return code.code(); +} + +void PolymorphicAccess::aboutToDie() +{ + m_stubRoutine->aboutToDie(); +} + +} // namespace JSC + +namespace WTF { + +using namespace JSC; + +void printInternal(PrintStream& out, AccessCase::AccessType type) +{ + switch (type) { + case AccessCase::Load: + out.print("Load"); + return; + case AccessCase::Transition: + out.print("Transition"); + return; + case AccessCase::Replace: + out.print("Replace"); + return; + case AccessCase::Miss: + out.print("Miss"); + return; + case AccessCase::Getter: + out.print("Getter"); + return; + case AccessCase::Setter: + out.print("Setter"); + return; + case AccessCase::CustomValueGetter: + out.print("CustomValueGetter"); + return; + case AccessCase::CustomAccessorGetter: + out.print("CustomAccessorGetter"); + return; + case AccessCase::CustomValueSetter: + out.print("CustomValueSetter"); + return; + case AccessCase::CustomAccessorSetter: + out.print("CustomAccessorSetter"); + return; + case AccessCase::IntrinsicGetter: + out.print("IntrinsicGetter"); + return; + case AccessCase::InHit: + out.print("InHit"); + return; + case AccessCase::InMiss: + out.print("InMiss"); + return; + case AccessCase::ArrayLength: + out.print("ArrayLength"); + return; + case AccessCase::StringLength: + out.print("StringLength"); + return; + } + + RELEASE_ASSERT_NOT_REACHED(); +} + +} // namespace WTF + +#endif // ENABLE(JIT) + + diff --git a/Source/JavaScriptCore/bytecode/PolymorphicAccess.h b/Source/JavaScriptCore/bytecode/PolymorphicAccess.h new file mode 100644 index 000000000..bb1ea0a4e --- /dev/null +++ b/Source/JavaScriptCore/bytecode/PolymorphicAccess.h @@ -0,0 +1,451 @@ +/* + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef PolymorphicAccess_h +#define PolymorphicAccess_h + +#if ENABLE(JIT) + +#include "CodeOrigin.h" +#include "JSFunctionInlines.h" +#include "MacroAssembler.h" +#include "ObjectPropertyConditionSet.h" +#include "Opcode.h" +#include "ScratchRegisterAllocator.h" +#include "Structure.h" +#include <wtf/Vector.h> + +namespace JSC { + +class CodeBlock; +class PolymorphicAccess; +class StructureStubInfo; +class WatchpointsOnStructureStubInfo; +class ScratchRegisterAllocator; + +struct AccessGenerationState; + +class AccessCase { + WTF_MAKE_NONCOPYABLE(AccessCase); + WTF_MAKE_FAST_ALLOCATED; +public: + enum AccessType { + Load, + Transition, + Replace, + Miss, + Getter, + Setter, + CustomValueGetter, + CustomAccessorGetter, + CustomValueSetter, + CustomAccessorSetter, + IntrinsicGetter, + InHit, + InMiss, + ArrayLength, + StringLength + }; + + static bool isGet(AccessType type) + { + switch (type) { + case Transition: + case Replace: + case Setter: + case CustomValueSetter: + case CustomAccessorSetter: + case InHit: + case InMiss: + return false; + case Load: + case Miss: + case Getter: + case CustomValueGetter: + case CustomAccessorGetter: + case IntrinsicGetter: + case ArrayLength: + case StringLength: + return true; + } + } + + static bool isPut(AccessType type) + { + switch (type) { + case Load: + case Miss: + case Getter: + case CustomValueGetter: + case CustomAccessorGetter: + case IntrinsicGetter: + case InHit: + case InMiss: + case ArrayLength: + case StringLength: + return false; + case Transition: + case Replace: + case Setter: + case CustomValueSetter: + case CustomAccessorSetter: + return true; + } + } + + static bool isIn(AccessType type) + { + switch (type) { + case Load: + case Miss: + case Getter: + case CustomValueGetter: + case CustomAccessorGetter: + case IntrinsicGetter: + case Transition: + case Replace: + case Setter: + case CustomValueSetter: + case CustomAccessorSetter: + case ArrayLength: + case StringLength: + return false; + case InHit: + case InMiss: + return true; + } + } + + static std::unique_ptr<AccessCase> get( + VM&, JSCell* owner, AccessType, PropertyOffset, Structure*, + const ObjectPropertyConditionSet& = ObjectPropertyConditionSet(), + bool viaProxy = false, + WatchpointSet* additionalSet = nullptr, + PropertySlot::GetValueFunc = nullptr, + JSObject* customSlotBase = nullptr); + + static std::unique_ptr<AccessCase> replace(VM&, JSCell* owner, Structure*, PropertyOffset); + + static std::unique_ptr<AccessCase> transition( + VM&, JSCell* owner, Structure* oldStructure, Structure* newStructure, PropertyOffset, + const ObjectPropertyConditionSet& = ObjectPropertyConditionSet()); + + static std::unique_ptr<AccessCase> setter( + VM&, JSCell* owner, AccessType, Structure*, PropertyOffset, + const ObjectPropertyConditionSet&, PutPropertySlot::PutValueFunc = nullptr, + JSObject* customSlotBase = nullptr); + + static std::unique_ptr<AccessCase> in( + VM&, JSCell* owner, AccessType, Structure*, + const ObjectPropertyConditionSet& = ObjectPropertyConditionSet()); + + static std::unique_ptr<AccessCase> getLength(VM&, JSCell* owner, AccessType); + static std::unique_ptr<AccessCase> getIntrinsic(VM&, JSCell* owner, JSFunction* intrinsic, PropertyOffset, Structure*, const ObjectPropertyConditionSet&); + + static std::unique_ptr<AccessCase> fromStructureStubInfo(VM&, JSCell* owner, StructureStubInfo&); + + ~AccessCase(); + + std::unique_ptr<AccessCase> clone() const; + + AccessType type() const { return m_type; } + PropertyOffset offset() const { return m_offset; } + bool viaProxy() const { return m_rareData ? m_rareData->viaProxy : false; } + + Structure* structure() const + { + if (m_type == Transition) + return m_structure->previousID(); + return m_structure.get(); + } + bool guardedByStructureCheck() const; + + Structure* newStructure() const + { + ASSERT(m_type == Transition); + return m_structure.get(); + } + + ObjectPropertyConditionSet conditionSet() const { return m_conditionSet; } + JSFunction* intrinsicFunction() const + { + ASSERT(type() == IntrinsicGetter && m_rareData); + return m_rareData->intrinsicFunction.get(); + } + Intrinsic intrinsic() const + { + return intrinsicFunction()->intrinsic(); + } + + WatchpointSet* additionalSet() const + { + return m_rareData ? m_rareData->additionalSet.get() : nullptr; + } + + JSObject* customSlotBase() const + { + return m_rareData ? m_rareData->customSlotBase.get() : nullptr; + } + + JSObject* alternateBase() const; + + bool doesCalls() const + { + switch (type()) { + case Getter: + case Setter: + case CustomValueGetter: + case CustomAccessorGetter: + case CustomValueSetter: + case CustomAccessorSetter: + return true; + default: + return false; + } + } + + bool isGetter() const + { + switch (type()) { + case Getter: + case CustomValueGetter: + case CustomAccessorGetter: + return true; + default: + return false; + } + } + + CallLinkInfo* callLinkInfo() const + { + if (!m_rareData) + return nullptr; + return m_rareData->callLinkInfo.get(); + } + + // Is it still possible for this case to ever be taken? + bool couldStillSucceed() const; + + static bool canEmitIntrinsicGetter(JSFunction*, Structure*); + + // If this method returns true, then it's a good idea to remove 'other' from the access once 'this' + // is added. This method assumes that in case of contradictions, 'this' represents a newer, and so + // more useful, truth. This method can be conservative; it will return false when it doubt. + bool canReplace(const AccessCase& other); + + void dump(PrintStream& out) const; + +private: + friend class CodeBlock; + friend class PolymorphicAccess; + + AccessCase(); + + bool visitWeak(VM&) const; + + // Fall through on success. Two kinds of failures are supported: fall-through, which means that we + // should try a different case; and failure, which means that this was the right case but it needs + // help from the slow path. + void generateWithGuard(AccessGenerationState&, MacroAssembler::JumpList& fallThrough); + + // Fall through on success, add a jump to the failure list on failure. + void generate(AccessGenerationState&); + void emitIntrinsicGetter(AccessGenerationState&); + + AccessType m_type { Load }; + PropertyOffset m_offset { invalidOffset }; + + // Usually this is the structure that we expect the base object to have. But, this is the *new* + // structure for a transition and we rely on the fact that it has a strong reference to the old + // structure. For proxies, this is the structure of the object behind the proxy. + WriteBarrier<Structure> m_structure; + + ObjectPropertyConditionSet m_conditionSet; + + class RareData { + WTF_MAKE_FAST_ALLOCATED; + public: + RareData() + : viaProxy(false) + { + customAccessor.opaque = nullptr; + } + + bool viaProxy; + RefPtr<WatchpointSet> additionalSet; + std::unique_ptr<CallLinkInfo> callLinkInfo; + union { + PropertySlot::GetValueFunc getter; + PutPropertySlot::PutValueFunc setter; + void* opaque; + } customAccessor; + WriteBarrier<JSObject> customSlotBase; + WriteBarrier<JSFunction> intrinsicFunction; + }; + + std::unique_ptr<RareData> m_rareData; +}; + +class PolymorphicAccess { + WTF_MAKE_NONCOPYABLE(PolymorphicAccess); + WTF_MAKE_FAST_ALLOCATED; +public: + PolymorphicAccess(); + ~PolymorphicAccess(); + + // This may return null, in which case the old stub routine is left intact. You are required to + // pass a vector of non-null access cases. This will prune the access cases by rejecting any case + // in the list that is subsumed by a later case in the list. + MacroAssemblerCodePtr regenerateWithCases( + VM&, CodeBlock*, StructureStubInfo&, const Identifier&, Vector<std::unique_ptr<AccessCase>>); + + MacroAssemblerCodePtr regenerateWithCase( + VM&, CodeBlock*, StructureStubInfo&, const Identifier&, std::unique_ptr<AccessCase>); + + bool isEmpty() const { return m_list.isEmpty(); } + unsigned size() const { return m_list.size(); } + const AccessCase& at(unsigned i) const { return *m_list[i]; } + const AccessCase& operator[](unsigned i) const { return *m_list[i]; } + + // If this returns false then we are requesting a reset of the owning StructureStubInfo. + bool visitWeak(VM&) const; + + void aboutToDie(); + + void dump(PrintStream& out) const; + bool containsPC(void* pc) const + { + if (!m_stubRoutine) + return false; + + uintptr_t pcAsInt = bitwise_cast<uintptr_t>(pc); + return m_stubRoutine->startAddress() <= pcAsInt && pcAsInt <= m_stubRoutine->endAddress(); + } + +private: + friend class AccessCase; + friend class CodeBlock; + friend struct AccessGenerationState; + + typedef Vector<std::unique_ptr<AccessCase>, 2> ListType; + + MacroAssemblerCodePtr regenerate( + VM&, CodeBlock*, StructureStubInfo&, const Identifier&, ListType& cases); + + ListType m_list; + RefPtr<JITStubRoutine> m_stubRoutine; + std::unique_ptr<WatchpointsOnStructureStubInfo> m_watchpoints; + std::unique_ptr<Vector<WriteBarrier<JSCell>>> m_weakReferences; +}; + +struct AccessGenerationState { + AccessGenerationState() + : m_calculatedRegistersForCallAndExceptionHandling(false) + , m_needsToRestoreRegistersIfException(false) + , m_calculatedCallSiteIndex(false) + { + } + CCallHelpers* jit { nullptr }; + ScratchRegisterAllocator* allocator; + ScratchRegisterAllocator::PreservedState preservedReusedRegisterState; + PolymorphicAccess* access { nullptr }; + StructureStubInfo* stubInfo { nullptr }; + MacroAssembler::JumpList success; + MacroAssembler::JumpList failAndRepatch; + MacroAssembler::JumpList failAndIgnore; + GPRReg baseGPR { InvalidGPRReg }; + JSValueRegs valueRegs; + GPRReg scratchGPR { InvalidGPRReg }; + Vector<std::function<void(LinkBuffer&)>> callbacks; + const Identifier* ident; + std::unique_ptr<WatchpointsOnStructureStubInfo> watchpoints; + Vector<WriteBarrier<JSCell>> weakReferences; + + Watchpoint* addWatchpoint(const ObjectPropertyCondition& = ObjectPropertyCondition()); + + void restoreScratch(); + void succeed(); + + void calculateLiveRegistersForCallAndExceptionHandling(); + + void preserveLiveRegistersToStackForCall(); + + void restoreLiveRegistersFromStackForCall(bool isGetter); + void restoreLiveRegistersFromStackForCallWithThrownException(); + void restoreLiveRegistersFromStackForCall(const RegisterSet& dontRestore); + + const RegisterSet& liveRegistersForCall() + { + RELEASE_ASSERT(m_calculatedRegistersForCallAndExceptionHandling); + return m_liveRegistersForCall; + } + + CallSiteIndex callSiteIndexForExceptionHandlingOrOriginal(); + CallSiteIndex callSiteIndexForExceptionHandling() + { + RELEASE_ASSERT(m_calculatedRegistersForCallAndExceptionHandling); + RELEASE_ASSERT(m_needsToRestoreRegistersIfException); + RELEASE_ASSERT(m_calculatedCallSiteIndex); + return m_callSiteIndex; + } + + const HandlerInfo& originalExceptionHandler() const; + unsigned numberOfStackBytesUsedForRegisterPreservation() const + { + RELEASE_ASSERT(m_calculatedRegistersForCallAndExceptionHandling); + return m_numberOfStackBytesUsedForRegisterPreservation; + } + + bool needsToRestoreRegistersIfException() const { return m_needsToRestoreRegistersIfException; } + CallSiteIndex originalCallSiteIndex() const; + +private: + const RegisterSet& liveRegistersToPreserveAtExceptionHandlingCallSite() + { + RELEASE_ASSERT(m_calculatedRegistersForCallAndExceptionHandling); + return m_liveRegistersToPreserveAtExceptionHandlingCallSite; + } + + RegisterSet m_liveRegistersToPreserveAtExceptionHandlingCallSite; + RegisterSet m_liveRegistersForCall; + CallSiteIndex m_callSiteIndex { CallSiteIndex(std::numeric_limits<unsigned>::max()) }; + unsigned m_numberOfStackBytesUsedForRegisterPreservation { std::numeric_limits<unsigned>::max() }; + bool m_calculatedRegistersForCallAndExceptionHandling : 1; + bool m_needsToRestoreRegistersIfException : 1; + bool m_calculatedCallSiteIndex : 1; +}; + +} // namespace JSC + +namespace WTF { + +void printInternal(PrintStream&, JSC::AccessCase::AccessType); + +} // namespace WTF + +#endif // ENABLE(JIT) + +#endif // PolymorphicAccess_h + diff --git a/Source/JavaScriptCore/bytecode/PolymorphicAccessStructureList.h b/Source/JavaScriptCore/bytecode/PolymorphicAccessStructureList.h deleted file mode 100644 index d1da89d77..000000000 --- a/Source/JavaScriptCore/bytecode/PolymorphicAccessStructureList.h +++ /dev/null @@ -1,139 +0,0 @@ -/* - * Copyright (C) 2013 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef PolymorphicAccessStructureList_h -#define PolymorphicAccessStructureList_h - -#include "JITStubRoutine.h" -#include "Structure.h" -#include "StructureChain.h" -#include <wtf/Platform.h> - -#define POLYMORPHIC_LIST_CACHE_SIZE 8 - -namespace JSC { - -// *Sigh*, If the JIT is enabled we need to track the stubRountine (of type CodeLocationLabel), -// If the JIT is not in use we don't actually need the variable (that said, if the JIT is not in use we don't -// curently actually use PolymorphicAccessStructureLists, which we should). Anyway, this seems like the best -// solution for now - will need to something smarter if/when we actually want mixed-mode operation. - -#if ENABLE(JIT) -// Structure used by op_get_by_id_self_list and op_get_by_id_proto_list instruction to hold data off the main opcode stream. -struct PolymorphicAccessStructureList { - WTF_MAKE_FAST_ALLOCATED; -public: - struct PolymorphicStubInfo { - bool isChain; - bool isDirect; - RefPtr<JITStubRoutine> stubRoutine; - WriteBarrier<Structure> base; - union { - WriteBarrierBase<Structure> proto; - WriteBarrierBase<StructureChain> chain; - } u; - - PolymorphicStubInfo() - { - u.proto.clear(); - } - - void set(VM& vm, JSCell* owner, PassRefPtr<JITStubRoutine> _stubRoutine, Structure* _base, bool isDirect) - { - stubRoutine = _stubRoutine; - base.set(vm, owner, _base); - u.proto.clear(); - isChain = false; - this->isDirect = isDirect; - } - - void set(VM& vm, JSCell* owner, PassRefPtr<JITStubRoutine> _stubRoutine, Structure* _base, Structure* _proto, bool isDirect) - { - stubRoutine = _stubRoutine; - base.set(vm, owner, _base); - u.proto.set(vm, owner, _proto); - isChain = false; - this->isDirect = isDirect; - } - - void set(VM& vm, JSCell* owner, PassRefPtr<JITStubRoutine> _stubRoutine, Structure* _base, StructureChain* _chain, bool isDirect) - { - stubRoutine = _stubRoutine; - base.set(vm, owner, _base); - u.chain.set(vm, owner, _chain); - isChain = true; - this->isDirect = isDirect; - } - } list[POLYMORPHIC_LIST_CACHE_SIZE]; - - PolymorphicAccessStructureList() - { - } - - PolymorphicAccessStructureList(VM& vm, JSCell* owner, PassRefPtr<JITStubRoutine> stubRoutine, Structure* firstBase, bool isDirect) - { - list[0].set(vm, owner, stubRoutine, firstBase, isDirect); - } - - PolymorphicAccessStructureList(VM& vm, JSCell* owner, PassRefPtr<JITStubRoutine> stubRoutine, Structure* firstBase, Structure* firstProto, bool isDirect) - { - list[0].set(vm, owner, stubRoutine, firstBase, firstProto, isDirect); - } - - PolymorphicAccessStructureList(VM& vm, JSCell* owner, PassRefPtr<JITStubRoutine> stubRoutine, Structure* firstBase, StructureChain* firstChain, bool isDirect) - { - list[0].set(vm, owner, stubRoutine, firstBase, firstChain, isDirect); - } - - bool visitWeak(int count) - { - for (int i = 0; i < count; ++i) { - PolymorphicStubInfo& info = list[i]; - if (!info.base) { - // We're being marked during initialisation of an entry - ASSERT(!info.u.proto); - continue; - } - - if (!Heap::isMarked(info.base.get())) - return false; - if (info.u.proto && !info.isChain - && !Heap::isMarked(info.u.proto.get())) - return false; - if (info.u.chain && info.isChain - && !Heap::isMarked(info.u.chain.get())) - return false; - } - - return true; - } -}; - -#endif // ENABLE(JIT) - -} // namespace JSC - -#endif // PolymorphicAccessStructureList_h - diff --git a/Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.cpp b/Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.cpp deleted file mode 100644 index 6a6ec8141..000000000 --- a/Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.cpp +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Copyright (C) 2012 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "config.h" -#include "PolymorphicPutByIdList.h" - -#if ENABLE(JIT) - -#include "StructureStubInfo.h" - -namespace JSC { - -PutByIdAccess PutByIdAccess::fromStructureStubInfo( - StructureStubInfo& stubInfo, - MacroAssemblerCodePtr initialSlowPath) -{ - PutByIdAccess result; - - switch (stubInfo.accessType) { - case access_put_by_id_replace: - result.m_type = Replace; - result.m_oldStructure.copyFrom(stubInfo.u.putByIdReplace.baseObjectStructure); - result.m_stubRoutine = JITStubRoutine::createSelfManagedRoutine(initialSlowPath); - break; - - case access_put_by_id_transition_direct: - case access_put_by_id_transition_normal: - result.m_type = Transition; - result.m_oldStructure.copyFrom(stubInfo.u.putByIdTransition.previousStructure); - result.m_newStructure.copyFrom(stubInfo.u.putByIdTransition.structure); - result.m_chain.copyFrom(stubInfo.u.putByIdTransition.chain); - result.m_stubRoutine = stubInfo.stubRoutine; - break; - - default: - RELEASE_ASSERT_NOT_REACHED(); - } - - return result; -} - -bool PutByIdAccess::visitWeak() const -{ - switch (m_type) { - case Replace: - if (!Heap::isMarked(m_oldStructure.get())) - return false; - break; - case Transition: - if (!Heap::isMarked(m_oldStructure.get())) - return false; - if (!Heap::isMarked(m_newStructure.get())) - return false; - if (!Heap::isMarked(m_chain.get())) - return false; - break; - default: - RELEASE_ASSERT_NOT_REACHED(); - return false; - } - return true; -} - -PolymorphicPutByIdList::PolymorphicPutByIdList( - PutKind putKind, - StructureStubInfo& stubInfo, - MacroAssemblerCodePtr initialSlowPath) - : m_kind(putKind) -{ - m_list.append(PutByIdAccess::fromStructureStubInfo(stubInfo, initialSlowPath)); -} - -PolymorphicPutByIdList* PolymorphicPutByIdList::from( - PutKind putKind, - StructureStubInfo& stubInfo, - MacroAssemblerCodePtr initialSlowPath) -{ - if (stubInfo.accessType == access_put_by_id_list) - return stubInfo.u.putByIdList.list; - - ASSERT(stubInfo.accessType == access_put_by_id_replace - || stubInfo.accessType == access_put_by_id_transition_normal - || stubInfo.accessType == access_put_by_id_transition_direct); - - PolymorphicPutByIdList* result = - new PolymorphicPutByIdList(putKind, stubInfo, initialSlowPath); - - stubInfo.initPutByIdList(result); - - return result; -} - -PolymorphicPutByIdList::~PolymorphicPutByIdList() { } - -bool PolymorphicPutByIdList::isFull() const -{ - ASSERT(size() <= POLYMORPHIC_LIST_CACHE_SIZE); - return size() == POLYMORPHIC_LIST_CACHE_SIZE; -} - -bool PolymorphicPutByIdList::isAlmostFull() const -{ - ASSERT(size() <= POLYMORPHIC_LIST_CACHE_SIZE); - return size() >= POLYMORPHIC_LIST_CACHE_SIZE - 1; -} - -void PolymorphicPutByIdList::addAccess(const PutByIdAccess& putByIdAccess) -{ - ASSERT(!isFull()); - // Make sure that the resizing optimizes for space, not time. - m_list.resize(m_list.size() + 1); - m_list.last() = putByIdAccess; -} - -bool PolymorphicPutByIdList::visitWeak() const -{ - for (unsigned i = 0; i < size(); ++i) { - if (!at(i).visitWeak()) - return false; - } - return true; -} - -} // namespace JSC - -#endif // ENABLE(JIT) diff --git a/Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.h b/Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.h deleted file mode 100644 index 6e88e7062..000000000 --- a/Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.h +++ /dev/null @@ -1,190 +0,0 @@ -/* - * Copyright (C) 2012 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef PolymorphicPutByIdList_h -#define PolymorphicPutByIdList_h - -#include <wtf/Platform.h> - -#if ENABLE(JIT) - -#include "CodeOrigin.h" -#include "MacroAssembler.h" -#include "Opcode.h" -#include "PutKind.h" -#include "Structure.h" -#include <wtf/Vector.h> - -namespace JSC { - -struct StructureStubInfo; - -class PutByIdAccess { -public: - enum AccessType { - Invalid, - Transition, - Replace - }; - - PutByIdAccess() - : m_type(Invalid) - { - } - - static PutByIdAccess transition( - VM& vm, - JSCell* owner, - Structure* oldStructure, - Structure* newStructure, - StructureChain* chain, - PassRefPtr<JITStubRoutine> stubRoutine) - { - PutByIdAccess result; - result.m_type = Transition; - result.m_oldStructure.set(vm, owner, oldStructure); - result.m_newStructure.set(vm, owner, newStructure); - result.m_chain.set(vm, owner, chain); - result.m_stubRoutine = stubRoutine; - return result; - } - - static PutByIdAccess replace( - VM& vm, - JSCell* owner, - Structure* structure, - PassRefPtr<JITStubRoutine> stubRoutine) - { - PutByIdAccess result; - result.m_type = Replace; - result.m_oldStructure.set(vm, owner, structure); - result.m_stubRoutine = stubRoutine; - return result; - } - - static PutByIdAccess fromStructureStubInfo( - StructureStubInfo&, - MacroAssemblerCodePtr initialSlowPath); - - bool isSet() const { return m_type != Invalid; } - bool operator!() const { return !isSet(); } - - AccessType type() const { return m_type; } - - bool isTransition() const { return m_type == Transition; } - bool isReplace() const { return m_type == Replace; } - - Structure* oldStructure() const - { - // Using this instead of isSet() to make this assertion robust against the possibility - // of additional access types being added. - ASSERT(isTransition() || isReplace()); - - return m_oldStructure.get(); - } - - Structure* structure() const - { - ASSERT(isReplace()); - return m_oldStructure.get(); - } - - Structure* newStructure() const - { - ASSERT(isTransition()); - return m_newStructure.get(); - } - - StructureChain* chain() const - { - ASSERT(isTransition()); - return m_chain.get(); - } - - PassRefPtr<JITStubRoutine> stubRoutine() const - { - ASSERT(isTransition() || isReplace()); - return m_stubRoutine; - } - - bool visitWeak() const; - -private: - AccessType m_type; - WriteBarrier<Structure> m_oldStructure; - WriteBarrier<Structure> m_newStructure; - WriteBarrier<StructureChain> m_chain; - RefPtr<JITStubRoutine> m_stubRoutine; -}; - -class PolymorphicPutByIdList { - WTF_MAKE_FAST_ALLOCATED; -public: - // Initialize from a stub info; this will place one element in the list and it will - // be created by converting the stub info's put by id access information into our - // PutByIdAccess. - PolymorphicPutByIdList( - PutKind, - StructureStubInfo&, - MacroAssemblerCodePtr initialSlowPath); - - // Either creates a new polymorphic put list, or returns the one that is already - // in place. - static PolymorphicPutByIdList* from( - PutKind, - StructureStubInfo&, - MacroAssemblerCodePtr initialSlowPath); - - ~PolymorphicPutByIdList(); - - MacroAssemblerCodePtr currentSlowPathTarget() const - { - return m_list.last().stubRoutine()->code().code(); - } - - void addAccess(const PutByIdAccess&); - - bool isEmpty() const { return m_list.isEmpty(); } - unsigned size() const { return m_list.size(); } - bool isFull() const; - bool isAlmostFull() const; // True if adding an element would make isFull() true. - const PutByIdAccess& at(unsigned i) const { return m_list[i]; } - const PutByIdAccess& operator[](unsigned i) const { return m_list[i]; } - - PutKind kind() const { return m_kind; } - - bool visitWeak() const; - -private: - Vector<PutByIdAccess, 2> m_list; - PutKind m_kind; -}; - -} // namespace JSC - -#endif // ENABLE(JIT) - -#endif // PolymorphicPutByIdList_h - diff --git a/Source/JavaScriptCore/bytecode/PreciseJumpTargets.cpp b/Source/JavaScriptCore/bytecode/PreciseJumpTargets.cpp index b789da104..e2cb00048 100644 --- a/Source/JavaScriptCore/bytecode/PreciseJumpTargets.cpp +++ b/Source/JavaScriptCore/bytecode/PreciseJumpTargets.cpp @@ -24,15 +24,62 @@ */ #include "config.h" -#include "JSCellInlines.h" #include "PreciseJumpTargets.h" +#include "JSCInlines.h" + namespace JSC { -static void addSimpleSwitchTargets(SimpleJumpTable& jumpTable, unsigned bytecodeOffset, Vector<unsigned, 32>& out) +template <size_t vectorSize> +static void getJumpTargetsForBytecodeOffset(CodeBlock* codeBlock, Interpreter* interpreter, Instruction* instructionsBegin, unsigned bytecodeOffset, Vector<unsigned, vectorSize>& out) { - for (unsigned i = jumpTable.branchOffsets.size(); i--;) - out.append(bytecodeOffset + jumpTable.branchOffsets[i]); + OpcodeID opcodeID = interpreter->getOpcodeID(instructionsBegin[bytecodeOffset].u.opcode); + Instruction* current = instructionsBegin + bytecodeOffset; + switch (opcodeID) { + case op_jmp: + out.append(bytecodeOffset + current[1].u.operand); + break; + case op_jtrue: + case op_jfalse: + case op_jeq_null: + case op_jneq_null: + out.append(bytecodeOffset + current[2].u.operand); + break; + case op_jneq_ptr: + case op_jless: + case op_jlesseq: + case op_jgreater: + case op_jgreatereq: + case op_jnless: + case op_jnlesseq: + case op_jngreater: + case op_jngreatereq: + case op_save: // The jump of op_save is purely for calculating liveness. + out.append(bytecodeOffset + current[3].u.operand); + break; + case op_switch_imm: + case op_switch_char: { + SimpleJumpTable& table = codeBlock->switchJumpTable(current[1].u.operand); + for (unsigned i = table.branchOffsets.size(); i--;) + out.append(bytecodeOffset + table.branchOffsets[i]); + out.append(bytecodeOffset + current[2].u.operand); + break; + } + case op_switch_string: { + StringJumpTable& table = codeBlock->stringSwitchJumpTable(current[1].u.operand); + StringJumpTable::StringOffsetTable::iterator iter = table.offsetTable.begin(); + StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end(); + for (; iter != end; ++iter) + out.append(bytecodeOffset + iter->value.branchOffset); + out.append(bytecodeOffset + current[2].u.operand); + break; + } + case op_loop_hint: + out.append(bytecodeOffset); + break; + default: + break; + } } void computePreciseJumpTargets(CodeBlock* codeBlock, Vector<unsigned, 32>& out) @@ -44,68 +91,18 @@ void computePreciseJumpTargets(CodeBlock* codeBlock, Vector<unsigned, 32>& out) if (!codeBlock->numberOfJumpTargets()) return; - for (unsigned i = codeBlock->numberOfExceptionHandlers(); i--;) + for (unsigned i = codeBlock->numberOfExceptionHandlers(); i--;) { out.append(codeBlock->exceptionHandler(i).target); - + out.append(codeBlock->exceptionHandler(i).start); + out.append(codeBlock->exceptionHandler(i).end); + } + Interpreter* interpreter = codeBlock->vm()->interpreter; Instruction* instructionsBegin = codeBlock->instructions().begin(); unsigned instructionCount = codeBlock->instructions().size(); for (unsigned bytecodeOffset = 0; bytecodeOffset < instructionCount;) { OpcodeID opcodeID = interpreter->getOpcodeID(instructionsBegin[bytecodeOffset].u.opcode); - Instruction* current = instructionsBegin + bytecodeOffset; - switch (opcodeID) { - case op_jmp: - out.append(bytecodeOffset + current[1].u.operand); - break; - case op_jtrue: - case op_jfalse: - case op_jeq_null: - case op_jneq_null: - out.append(bytecodeOffset + current[2].u.operand); - break; - case op_jneq_ptr: - case op_jless: - case op_jlesseq: - case op_jgreater: - case op_jgreatereq: - case op_jnless: - case op_jnlesseq: - case op_jngreater: - case op_jngreatereq: - out.append(bytecodeOffset + current[3].u.operand); - break; - case op_switch_imm: - addSimpleSwitchTargets(codeBlock->immediateSwitchJumpTable(current[1].u.operand), bytecodeOffset, out); - out.append(bytecodeOffset + current[2].u.operand); - break; - case op_switch_char: - addSimpleSwitchTargets(codeBlock->characterSwitchJumpTable(current[1].u.operand), bytecodeOffset, out); - out.append(bytecodeOffset + current[2].u.operand); - break; - case op_switch_string: { - StringJumpTable& table = codeBlock->stringSwitchJumpTable(current[1].u.operand); - StringJumpTable::StringOffsetTable::iterator iter = table.offsetTable.begin(); - StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end(); - for (; iter != end; ++iter) - out.append(bytecodeOffset + iter->value.branchOffset); - out.append(bytecodeOffset + current[2].u.operand); - break; - } - case op_get_pnames: - out.append(bytecodeOffset + current[5].u.operand); - break; - case op_next_pname: - out.append(bytecodeOffset + current[6].u.operand); - break; - case op_check_has_instance: - out.append(bytecodeOffset + current[4].u.operand); - break; - case op_loop_hint: - out.append(bytecodeOffset); - break; - default: - break; - } + getJumpTargetsForBytecodeOffset(codeBlock, interpreter, instructionsBegin, bytecodeOffset, out); bytecodeOffset += opcodeLengths[opcodeID]; } @@ -123,6 +120,14 @@ void computePreciseJumpTargets(CodeBlock* codeBlock, Vector<unsigned, 32>& out) lastValue = value; } out.resize(toIndex); + out.shrinkToFit(); +} + +void findJumpTargetsForBytecodeOffset(CodeBlock* codeBlock, unsigned bytecodeOffset, Vector<unsigned, 1>& out) +{ + Interpreter* interpreter = codeBlock->vm()->interpreter; + Instruction* instructionsBegin = codeBlock->instructions().begin(); + getJumpTargetsForBytecodeOffset(codeBlock, interpreter, instructionsBegin, bytecodeOffset, out); } } // namespace JSC diff --git a/Source/JavaScriptCore/bytecode/PreciseJumpTargets.h b/Source/JavaScriptCore/bytecode/PreciseJumpTargets.h index 109c40cea..852413d77 100644 --- a/Source/JavaScriptCore/bytecode/PreciseJumpTargets.h +++ b/Source/JavaScriptCore/bytecode/PreciseJumpTargets.h @@ -30,8 +30,11 @@ namespace JSC { +// Return a sorted list of bytecode index that are the destination of a jump. void computePreciseJumpTargets(CodeBlock*, Vector<unsigned, 32>& out); +void findJumpTargetsForBytecodeOffset(CodeBlock*, unsigned bytecodeOffset, Vector<unsigned, 1>& out); + } // namespace JSC #endif // PreciseJumpTargets_h diff --git a/Source/JavaScriptCore/bytecode/PropertyCondition.cpp b/Source/JavaScriptCore/bytecode/PropertyCondition.cpp new file mode 100644 index 000000000..347b86f8b --- /dev/null +++ b/Source/JavaScriptCore/bytecode/PropertyCondition.cpp @@ -0,0 +1,364 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "PropertyCondition.h" + +#include "GetterSetter.h" +#include "JSCInlines.h" +#include "TrackedReferences.h" + +namespace JSC { + +static bool verbose = false; + +void PropertyCondition::dumpInContext(PrintStream& out, DumpContext* context) const +{ + if (!*this) { + out.print("<invalid>"); + return; + } + + out.print(m_kind, " of ", m_uid); + switch (m_kind) { + case Presence: + out.print(" at ", offset(), " with attributes ", attributes()); + return; + case Absence: + case AbsenceOfSetter: + out.print(" with prototype ", inContext(JSValue(prototype()), context)); + return; + case Equivalence: + out.print(" with ", inContext(requiredValue(), context)); + return; + } + RELEASE_ASSERT_NOT_REACHED(); +} + +void PropertyCondition::dump(PrintStream& out) const +{ + dumpInContext(out, nullptr); +} + +bool PropertyCondition::isStillValidAssumingImpurePropertyWatchpoint( + Structure* structure, JSObject* base) const +{ + if (verbose) { + dataLog( + "Determining validity of ", *this, " with structure ", pointerDump(structure), " and base ", + JSValue(base), " assuming impure property watchpoints are set.\n"); + } + + if (!*this) { + if (verbose) + dataLog("Invalid because unset.\n"); + return false; + } + + if (!structure->propertyAccessesAreCacheable()) { + if (verbose) + dataLog("Invalid because accesses are not cacheable.\n"); + return false; + } + + switch (m_kind) { + case Presence: { + unsigned currentAttributes; + PropertyOffset currentOffset = structure->getConcurrently(uid(), currentAttributes); + if (currentOffset != offset() || currentAttributes != attributes()) { + if (verbose) { + dataLog( + "Invalid because we need offset, attributes to be ", offset(), ", ", attributes(), + " but they are ", currentOffset, ", ", currentAttributes, "\n"); + } + return false; + } + return true; + } + + case Absence: { + if (structure->isDictionary()) { + if (verbose) + dataLog("Invalid because it's a dictionary.\n"); + return false; + } + + PropertyOffset currentOffset = structure->getConcurrently(uid()); + if (currentOffset != invalidOffset) { + if (verbose) + dataLog("Invalid because the property exists at offset: ", currentOffset, "\n"); + return false; + } + + if (structure->storedPrototypeObject() != prototype()) { + if (verbose) { + dataLog( + "Invalid because the prototype is ", structure->storedPrototype(), " even though " + "it should have been ", JSValue(prototype()), "\n"); + } + return false; + } + + return true; + } + + case AbsenceOfSetter: { + if (structure->isDictionary()) { + if (verbose) + dataLog("Invalid because it's a dictionary.\n"); + return false; + } + + unsigned currentAttributes; + PropertyOffset currentOffset = structure->getConcurrently(uid(), currentAttributes); + if (currentOffset != invalidOffset) { + if (currentAttributes & (Accessor | CustomAccessor)) { + if (verbose) { + dataLog( + "Invalid because we expected not to have a setter, but we have one at offset ", + currentOffset, " with attributes ", currentAttributes, "\n"); + } + return false; + } + } + + if (structure->storedPrototypeObject() != prototype()) { + if (verbose) { + dataLog( + "Invalid because the prototype is ", structure->storedPrototype(), " even though " + "it should have been ", JSValue(prototype()), "\n"); + } + return false; + } + + return true; + } + + case Equivalence: { + if (!base || base->structure() != structure) { + // Conservatively return false, since we cannot verify this one without having the + // object. + if (verbose) { + dataLog( + "Invalid because we don't have a base or the base has the wrong structure: ", + RawPointer(base), "\n"); + } + return false; + } + + // FIXME: This is somewhat racy, and maybe more risky than we want. + // https://bugs.webkit.org/show_bug.cgi?id=134641 + + PropertyOffset currentOffset = structure->getConcurrently(uid()); + if (currentOffset == invalidOffset) { + if (verbose) { + dataLog( + "Invalid because the base no long appears to have ", uid(), " on its structure: ", + RawPointer(base), "\n"); + } + return false; + } + + JSValue currentValue = base->getDirect(currentOffset); + if (currentValue != requiredValue()) { + if (verbose) { + dataLog( + "Invalid because the value is ", currentValue, " but we require ", requiredValue(), + "\n"); + } + return false; + } + + return true; + } } + + RELEASE_ASSERT_NOT_REACHED(); + return false; +} + +bool PropertyCondition::validityRequiresImpurePropertyWatchpoint(Structure* structure) const +{ + if (!*this) + return false; + + switch (m_kind) { + case Presence: + case Absence: + case Equivalence: + return structure->needImpurePropertyWatchpoint(); + default: + return false; + } +} + +bool PropertyCondition::isStillValid(Structure* structure, JSObject* base) const +{ + if (!isStillValidAssumingImpurePropertyWatchpoint(structure, base)) + return false; + + // Currently we assume that an impure property can cause a property to appear, and can also + // "shadow" an existing JS property on the same object. Hence it affects both presence and + // absence. It doesn't affect AbsenceOfSetter because impure properties aren't ever setters. + switch (m_kind) { + case Absence: + if (structure->typeInfo().getOwnPropertySlotIsImpure() || structure->typeInfo().getOwnPropertySlotIsImpureForPropertyAbsence()) + return false; + break; + case Presence: + case Equivalence: + if (structure->typeInfo().getOwnPropertySlotIsImpure()) + return false; + break; + default: + break; + } + + return true; +} + +bool PropertyCondition::isWatchableWhenValid( + Structure* structure, WatchabilityEffort effort) const +{ + if (structure->transitionWatchpointSetHasBeenInvalidated()) + return false; + + switch (m_kind) { + case Equivalence: { + PropertyOffset offset = structure->getConcurrently(uid()); + + // This method should only be called when some variant of isValid returned true, which + // implies that we already confirmed that the structure knows of the property. We should + // also have verified that the Structure is a cacheable dictionary, which means we + // shouldn't have a TOCTOU race either. + RELEASE_ASSERT(offset != invalidOffset); + + WatchpointSet* set; + switch (effort) { + case MakeNoChanges: + set = structure->propertyReplacementWatchpointSet(offset); + break; + case EnsureWatchability: + set = structure->ensurePropertyReplacementWatchpointSet( + *Heap::heap(structure)->vm(), offset); + break; + } + + if (!set || !set->isStillValid()) + return false; + + break; + } + + default: + break; + } + + return true; +} + +bool PropertyCondition::isWatchableAssumingImpurePropertyWatchpoint( + Structure* structure, JSObject* base, WatchabilityEffort effort) const +{ + return isStillValidAssumingImpurePropertyWatchpoint(structure, base) + && isWatchableWhenValid(structure, effort); +} + +bool PropertyCondition::isWatchable( + Structure* structure, JSObject* base, WatchabilityEffort effort) const +{ + return isStillValid(structure, base) + && isWatchableWhenValid(structure, effort); +} + +bool PropertyCondition::isStillLive() const +{ + if (hasPrototype() && prototype() && !Heap::isMarked(prototype())) + return false; + + if (hasRequiredValue() + && requiredValue() + && requiredValue().isCell() + && !Heap::isMarked(requiredValue().asCell())) + return false; + + return true; +} + +void PropertyCondition::validateReferences(const TrackedReferences& tracked) const +{ + if (hasPrototype()) + tracked.check(prototype()); + + if (hasRequiredValue()) + tracked.check(requiredValue()); +} + +bool PropertyCondition::isValidValueForAttributes(JSValue value, unsigned attributes) +{ + bool attributesClaimAccessor = !!(attributes & Accessor); + bool valueClaimsAccessor = !!jsDynamicCast<GetterSetter*>(value); + return attributesClaimAccessor == valueClaimsAccessor; +} + +bool PropertyCondition::isValidValueForPresence(JSValue value) const +{ + return isValidValueForAttributes(value, attributes()); +} + +PropertyCondition PropertyCondition::attemptToMakeEquivalenceWithoutBarrier(JSObject* base) const +{ + Structure* structure = base->structure(); + if (!structure->isValidOffset(offset())) + return PropertyCondition(); + JSValue value = base->getDirect(offset()); + if (!isValidValueForPresence(value)) + return PropertyCondition(); + return equivalenceWithoutBarrier(uid(), value); +} + +} // namespace JSC + +namespace WTF { + +void printInternal(PrintStream& out, JSC::PropertyCondition::Kind condition) +{ + switch (condition) { + case JSC::PropertyCondition::Presence: + out.print("Presence"); + return; + case JSC::PropertyCondition::Absence: + out.print("Absence"); + return; + case JSC::PropertyCondition::AbsenceOfSetter: + out.print("Absence"); + return; + case JSC::PropertyCondition::Equivalence: + out.print("Equivalence"); + return; + } + RELEASE_ASSERT_NOT_REACHED(); +} + +} // namespace WTF diff --git a/Source/JavaScriptCore/bytecode/PropertyCondition.h b/Source/JavaScriptCore/bytecode/PropertyCondition.h new file mode 100644 index 000000000..1d5568f8d --- /dev/null +++ b/Source/JavaScriptCore/bytecode/PropertyCondition.h @@ -0,0 +1,338 @@ +/* + * Copyright (C) 2015-2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef PropertyCondition_h +#define PropertyCondition_h + +#include "JSObject.h" +#include <wtf/HashMap.h> + +namespace JSC { + +class TrackedReferences; + +class PropertyCondition { +public: + enum Kind { + Presence, + Absence, + AbsenceOfSetter, + Equivalence // An adaptive watchpoint on this will be a pair of watchpoints, and when the structure transitions, we will set the replacement watchpoint on the new structure. + }; + + PropertyCondition() + : m_uid(nullptr) + , m_kind(Presence) + { + memset(&u, 0, sizeof(u)); + } + + PropertyCondition(WTF::HashTableDeletedValueType) + : m_uid(nullptr) + , m_kind(Absence) + { + memset(&u, 0, sizeof(u)); + } + + static PropertyCondition presenceWithoutBarrier(UniquedStringImpl* uid, PropertyOffset offset, unsigned attributes) + { + PropertyCondition result; + result.m_uid = uid; + result.m_kind = Presence; + result.u.presence.offset = offset; + result.u.presence.attributes = attributes; + return result; + } + + static PropertyCondition presence( + VM&, JSCell*, UniquedStringImpl* uid, PropertyOffset offset, unsigned attributes) + { + return presenceWithoutBarrier(uid, offset, attributes); + } + + // NOTE: The prototype is the storedPrototype not the prototypeForLookup. + static PropertyCondition absenceWithoutBarrier(UniquedStringImpl* uid, JSObject* prototype) + { + PropertyCondition result; + result.m_uid = uid; + result.m_kind = Absence; + result.u.absence.prototype = prototype; + return result; + } + + static PropertyCondition absence( + VM& vm, JSCell* owner, UniquedStringImpl* uid, JSObject* prototype) + { + if (owner) + vm.heap.writeBarrier(owner); + return absenceWithoutBarrier(uid, prototype); + } + + static PropertyCondition absenceOfSetterWithoutBarrier( + UniquedStringImpl* uid, JSObject* prototype) + { + PropertyCondition result; + result.m_uid = uid; + result.m_kind = AbsenceOfSetter; + result.u.absence.prototype = prototype; + return result; + } + + static PropertyCondition absenceOfSetter( + VM& vm, JSCell* owner, UniquedStringImpl* uid, JSObject* prototype) + { + if (owner) + vm.heap.writeBarrier(owner); + return absenceOfSetterWithoutBarrier(uid, prototype); + } + + static PropertyCondition equivalenceWithoutBarrier( + UniquedStringImpl* uid, JSValue value) + { + PropertyCondition result; + result.m_uid = uid; + result.m_kind = Equivalence; + result.u.equivalence.value = JSValue::encode(value); + return result; + } + + static PropertyCondition equivalence( + VM& vm, JSCell* owner, UniquedStringImpl* uid, JSValue value) + { + if (value.isCell() && owner) + vm.heap.writeBarrier(owner); + return equivalenceWithoutBarrier(uid, value); + } + + explicit operator bool() const { return m_uid || m_kind != Presence; } + + Kind kind() const { return m_kind; } + UniquedStringImpl* uid() const { return m_uid; } + + bool hasOffset() const { return !!*this && m_kind == Presence; }; + PropertyOffset offset() const + { + ASSERT(hasOffset()); + return u.presence.offset; + } + bool hasAttributes() const { return !!*this && m_kind == Presence; }; + unsigned attributes() const + { + ASSERT(hasAttributes()); + return u.presence.attributes; + } + + bool hasPrototype() const { return !!*this && (m_kind == Absence || m_kind == AbsenceOfSetter); } + JSObject* prototype() const + { + ASSERT(hasPrototype()); + return u.absence.prototype; + } + + bool hasRequiredValue() const { return !!*this && m_kind == Equivalence; } + JSValue requiredValue() const + { + ASSERT(hasRequiredValue()); + return JSValue::decode(u.equivalence.value); + } + + void dumpInContext(PrintStream&, DumpContext*) const; + void dump(PrintStream&) const; + + unsigned hash() const + { + unsigned result = WTF::PtrHash<UniquedStringImpl*>::hash(m_uid) + static_cast<unsigned>(m_kind); + switch (m_kind) { + case Presence: + result ^= u.presence.offset; + result ^= u.presence.attributes; + break; + case Absence: + case AbsenceOfSetter: + result ^= WTF::PtrHash<JSObject*>::hash(u.absence.prototype); + break; + case Equivalence: + result ^= EncodedJSValueHash::hash(u.equivalence.value); + break; + } + return result; + } + + bool operator==(const PropertyCondition& other) const + { + if (m_uid != other.m_uid) + return false; + if (m_kind != other.m_kind) + return false; + switch (m_kind) { + case Presence: + return u.presence.offset == other.u.presence.offset + && u.presence.attributes == other.u.presence.attributes; + case Absence: + case AbsenceOfSetter: + return u.absence.prototype == other.u.absence.prototype; + case Equivalence: + return u.equivalence.value == other.u.equivalence.value; + } + RELEASE_ASSERT_NOT_REACHED(); + return false; + } + + bool isHashTableDeletedValue() const + { + return !m_uid && m_kind == Absence; + } + + // Two conditions are compatible if they are identical or if they speak of different uids. If + // false is returned, you have to decide how to resolve the conflict - for example if there is + // a Presence and an Equivalence then in some cases you'll want the more general of the two + // while in other cases you'll want the more specific of the two. This will also return false + // for contradictions, like Presence and Absence on the same uid. By convention, invalid + // conditions aren't compatible with anything. + bool isCompatibleWith(const PropertyCondition& other) const + { + if (!*this || !other) + return false; + return *this == other || uid() != other.uid(); + } + + // Checks if the object's structure claims that the property won't be intercepted. + bool isStillValidAssumingImpurePropertyWatchpoint(Structure*, JSObject* base = nullptr) const; + + // Returns true if we need an impure property watchpoint to ensure validity even if + // isStillValidAccordingToStructure() returned true. + bool validityRequiresImpurePropertyWatchpoint(Structure*) const; + + // Checks if the condition is still valid right now for the given object and structure. + // May conservatively return false, if the object and structure alone don't guarantee the + // condition. This happens for an Absence condition on an object that may have impure + // properties. If the object is not supplied, then a "true" return indicates that checking if + // an object has the given structure guarantees the condition still holds. If an object is + // supplied, then you may need to use some other watchpoints on the object to guarantee the + // condition in addition to the structure check. + bool isStillValid(Structure*, JSObject* base = nullptr) const; + + // In some cases, the condition is not watchable, but could be made watchable by enabling the + // appropriate watchpoint. For example, replacement watchpoints are enabled only when some + // access is cached on the property in some structure. This is mainly to save space for + // dictionary properties or properties that never get very hot. But, it's always safe to + // enable watching, provided that this is called from the main thread. + enum WatchabilityEffort { + // This is the default. It means that we don't change the state of any Structure or + // object, and implies that if the property happens not to be watchable then we don't make + // it watchable. This is mandatory if calling from a JIT thread. This is also somewhat + // preferable when first deciding whether to watch a condition for the first time (i.e. + // not from a watchpoint fire that causes us to see if we should adapt), since a + // watchpoint not being initialized for watching implies that maybe we don't know enough + // yet to make it profitable to watch -- as in, the thing being watched may not have + // stabilized yet. We prefer to only assume that a condition will hold if it has been + // known to hold for a while already. + MakeNoChanges, + + // Do what it takes to ensure that the property can be watched, if doing so has no + // user-observable effect. For now this just means that we will ensure that a property + // replacement watchpoint is enabled if it hadn't been enabled already. Do not use this + // from JIT threads, since the act of enabling watchpoints is not thread-safe. + EnsureWatchability + }; + + // This means that it's still valid and we could enforce validity by setting a transition + // watchpoint on the structure and possibly an impure property watchpoint. + bool isWatchableAssumingImpurePropertyWatchpoint( + Structure*, JSObject* base = nullptr, WatchabilityEffort = MakeNoChanges) const; + + // This means that it's still valid and we could enforce validity by setting a transition + // watchpoint on the structure. + bool isWatchable( + Structure*, JSObject* base = nullptr, WatchabilityEffort = MakeNoChanges) const; + + bool watchingRequiresStructureTransitionWatchpoint() const + { + // Currently, this is required for all of our conditions. + return !!*this; + } + bool watchingRequiresReplacementWatchpoint() const + { + return !!*this && m_kind == Equivalence; + } + + // This means that the objects involved in this are still live. + bool isStillLive() const; + + void validateReferences(const TrackedReferences&) const; + + static bool isValidValueForAttributes(JSValue value, unsigned attributes); + + bool isValidValueForPresence(JSValue) const; + + PropertyCondition attemptToMakeEquivalenceWithoutBarrier(JSObject* base) const; + +private: + bool isWatchableWhenValid(Structure*, WatchabilityEffort) const; + + UniquedStringImpl* m_uid; + Kind m_kind; + union { + struct { + PropertyOffset offset; + unsigned attributes; + } presence; + struct { + JSObject* prototype; + } absence; + struct { + EncodedJSValue value; + } equivalence; + } u; +}; + +struct PropertyConditionHash { + static unsigned hash(const PropertyCondition& key) { return key.hash(); } + static bool equal( + const PropertyCondition& a, const PropertyCondition& b) + { + return a == b; + } + static const bool safeToCompareToEmptyOrDeleted = true; +}; + +} // namespace JSC + +namespace WTF { + +void printInternal(PrintStream&, JSC::PropertyCondition::Kind); + +template<typename T> struct DefaultHash; +template<> struct DefaultHash<JSC::PropertyCondition> { + typedef JSC::PropertyConditionHash Hash; +}; + +template<typename T> struct HashTraits; +template<> struct HashTraits<JSC::PropertyCondition> : SimpleClassHashTraits<JSC::PropertyCondition> { }; + +} // namespace WTF + +#endif // PropertyCondition_h + diff --git a/Source/JavaScriptCore/bytecode/PutByIdFlags.cpp b/Source/JavaScriptCore/bytecode/PutByIdFlags.cpp new file mode 100644 index 000000000..f28090049 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/PutByIdFlags.cpp @@ -0,0 +1,50 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "PutByIdFlags.h" + +#include "InferredType.h" +#include <wtf/CommaPrinter.h> +#include <wtf/PrintStream.h> +#include <wtf/StringPrintStream.h> + +namespace WTF { + +using namespace JSC; + +void printInternal(PrintStream& out, PutByIdFlags flags) { + CommaPrinter comma("|"); + if (flags & PutByIdIsDirect) + out.print(comma, "IsDirect"); + + InferredType::Kind kind = InferredType::kindForFlags(flags); + out.print(comma, kind); + if (InferredType::hasStructure(kind)) + out.print(":", bitwise_cast<int32_t>(decodeStructureID(flags))); +} + +} // namespace WTF + diff --git a/Source/JavaScriptCore/bytecode/PutByIdFlags.h b/Source/JavaScriptCore/bytecode/PutByIdFlags.h new file mode 100644 index 000000000..6ad364393 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/PutByIdFlags.h @@ -0,0 +1,105 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef PutByIdFlags_h +#define PutByIdFlags_h + +#include "StructureIDTable.h" + +namespace JSC { + +enum PutByIdFlags : intptr_t { + PutByIdNone = 0, + + // This flag indicates that the put_by_id is direct. That means that we store the property without + // checking if the prototype chain has a setter. + PutByIdIsDirect = 0x1, + PutByIdPersistentFlagsMask = 0x1, + + // NOTE: The values below must be in sync with what is in LowLevelInterpreter.asm. + + // Determining the required inferred type involves first checking the primary type mask, and then + // using that to figure out the meaning of the secondary mask: + // switch (flags & PutByIdPrimaryTypeMask) { + // case PutByIdPrimaryTypeSecondary: + // switch (flags & PutByIdSecondaryTypeMask) { + // ... + // } + // break; + // case PutByIdPrimaryTypeObjectWithStructure: + // case PutByIdPrimaryTypeObjectWithStructureOrOther: + // StructureID structureID = decodeStructureID(flags); + // break; + // } + PutByIdPrimaryTypeMask = 0x6, + PutByIdPrimaryTypeSecondary = 0x0, // Need to check the secondary type mask for the type. + PutByIdPrimaryTypeObjectWithStructure = 0x2, // Secondary type has structure ID. + PutByIdPrimaryTypeObjectWithStructureOrOther = 0x4, // Secondary type has structure ID. + + PutByIdSecondaryTypeMask = -0x8, + PutByIdSecondaryTypeBottom = 0x0, + PutByIdSecondaryTypeBoolean = 0x8, + PutByIdSecondaryTypeOther = 0x10, + PutByIdSecondaryTypeInt32 = 0x18, + PutByIdSecondaryTypeNumber = 0x20, + PutByIdSecondaryTypeString = 0x28, + PutByIdSecondaryTypeSymbol = 0x30, + PutByIdSecondaryTypeObject = 0x38, + PutByIdSecondaryTypeObjectOrOther = 0x40, + PutByIdSecondaryTypeTop = 0x48 +}; + +inline PutByIdFlags encodeStructureID(StructureID id) +{ +#if USE(JSVALUE64) + return static_cast<PutByIdFlags>(static_cast<PutByIdFlags>(id) << 3); +#else + PutByIdFlags result = bitwise_cast<PutByIdFlags>(id); + ASSERT(!(result & ~PutByIdSecondaryTypeMask)); + return result; +#endif +} + +inline StructureID decodeStructureID(PutByIdFlags flags) +{ +#if USE(JSVALUE64) + return static_cast<StructureID>(flags >> 3); +#else + return bitwise_cast<StructureID>(flags & PutByIdSecondaryTypeMask); +#endif +} + +} // namespace JSC + +namespace WTF { + +class PrintStream; + +void printInternal(PrintStream&, JSC::PutByIdFlags); + +} // namespace WTF + +#endif // PutByIdFlags_h + diff --git a/Source/JavaScriptCore/bytecode/PutByIdStatus.cpp b/Source/JavaScriptCore/bytecode/PutByIdStatus.cpp index 24a57eb50..3d066b9ae 100644 --- a/Source/JavaScriptCore/bytecode/PutByIdStatus.cpp +++ b/Source/JavaScriptCore/bytecode/PutByIdStatus.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012 Apple Inc. All rights reserved. + * Copyright (C) 2012-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -27,193 +27,391 @@ #include "PutByIdStatus.h" #include "CodeBlock.h" +#include "ComplexGetStatus.h" #include "LLIntData.h" #include "LowLevelInterpreter.h" -#include "Operations.h" +#include "JSCInlines.h" +#include "PolymorphicAccess.h" #include "Structure.h" #include "StructureChain.h" +#include <wtf/ListDump.h> namespace JSC { -PutByIdStatus PutByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex, Identifier& ident) +bool PutByIdStatus::appendVariant(const PutByIdVariant& variant) +{ + for (unsigned i = 0; i < m_variants.size(); ++i) { + if (m_variants[i].attemptToMerge(variant)) + return true; + } + for (unsigned i = 0; i < m_variants.size(); ++i) { + if (m_variants[i].oldStructure().overlaps(variant.oldStructure())) + return false; + } + m_variants.append(variant); + return true; +} + +#if ENABLE(DFG_JIT) +bool PutByIdStatus::hasExitSite(const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, unsigned bytecodeIndex) +{ + return profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadCache)) + || profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadConstantCache)); + +} +#endif + +PutByIdStatus PutByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex, UniquedStringImpl* uid) { UNUSED_PARAM(profiledBlock); UNUSED_PARAM(bytecodeIndex); - UNUSED_PARAM(ident); -#if ENABLE(LLINT) + UNUSED_PARAM(uid); + + VM& vm = *profiledBlock->vm(); + Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex; - Structure* structure = instruction[4].u.structure.get(); - if (!structure) - return PutByIdStatus(NoInformation, 0, 0, 0, invalidOffset); + StructureID structureID = instruction[4].u.structureID; + if (!structureID) + return PutByIdStatus(NoInformation); - if (instruction[0].u.opcode == LLInt::getOpcode(llint_op_put_by_id) - || instruction[0].u.opcode == LLInt::getOpcode(llint_op_put_by_id_out_of_line)) { - PropertyOffset offset = structure->get(*profiledBlock->vm(), ident); + Structure* structure = vm.heap.structureIDTable().get(structureID); + + StructureID newStructureID = instruction[6].u.structureID; + if (!newStructureID) { + PropertyOffset offset = structure->getConcurrently(uid); if (!isValidOffset(offset)) - return PutByIdStatus(NoInformation, 0, 0, 0, invalidOffset); + return PutByIdStatus(NoInformation); - return PutByIdStatus(SimpleReplace, structure, 0, 0, offset); + return PutByIdVariant::replace(structure, offset, structure->inferredTypeDescriptorFor(uid)); } + + Structure* newStructure = vm.heap.structureIDTable().get(newStructureID); ASSERT(structure->transitionWatchpointSetHasBeenInvalidated()); - ASSERT(instruction[0].u.opcode == LLInt::getOpcode(llint_op_put_by_id_transition_direct) - || instruction[0].u.opcode == LLInt::getOpcode(llint_op_put_by_id_transition_normal) - || instruction[0].u.opcode == LLInt::getOpcode(llint_op_put_by_id_transition_direct_out_of_line) - || instruction[0].u.opcode == LLInt::getOpcode(llint_op_put_by_id_transition_normal_out_of_line)); - - Structure* newStructure = instruction[6].u.structure.get(); - StructureChain* chain = instruction[7].u.structureChain.get(); - ASSERT(newStructure); - ASSERT(chain); - - PropertyOffset offset = newStructure->get(*profiledBlock->vm(), ident); + PropertyOffset offset = newStructure->getConcurrently(uid); if (!isValidOffset(offset)) - return PutByIdStatus(NoInformation, 0, 0, 0, invalidOffset); + return PutByIdStatus(NoInformation); - return PutByIdStatus(SimpleTransition, structure, newStructure, chain, offset); -#else - return PutByIdStatus(NoInformation, 0, 0, 0, invalidOffset); -#endif + ObjectPropertyConditionSet conditionSet; + if (!(instruction[8].u.putByIdFlags & PutByIdIsDirect)) { + conditionSet = + generateConditionsForPropertySetterMissConcurrently( + *profiledBlock->vm(), profiledBlock->globalObject(), structure, uid); + if (!conditionSet.isValid()) + return PutByIdStatus(NoInformation); + } + + return PutByIdVariant::transition( + structure, newStructure, conditionSet, offset, newStructure->inferredTypeDescriptorFor(uid)); } -PutByIdStatus PutByIdStatus::computeFor(CodeBlock* profiledBlock, unsigned bytecodeIndex, Identifier& ident) +PutByIdStatus PutByIdStatus::computeFor(CodeBlock* profiledBlock, StubInfoMap& map, unsigned bytecodeIndex, UniquedStringImpl* uid) { + ConcurrentJITLocker locker(profiledBlock->m_lock); + UNUSED_PARAM(profiledBlock); UNUSED_PARAM(bytecodeIndex); - UNUSED_PARAM(ident); -#if ENABLE(JIT) && ENABLE(VALUE_PROFILER) - if (!profiledBlock->numberOfStructureStubInfos()) - return computeFromLLInt(profiledBlock, bytecodeIndex, ident); - - if (profiledBlock->likelyToTakeSlowCase(bytecodeIndex)) - return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset); + UNUSED_PARAM(uid); +#if ENABLE(DFG_JIT) + if (hasExitSite(locker, profiledBlock, bytecodeIndex)) + return PutByIdStatus(TakesSlowPath); - StructureStubInfo& stubInfo = profiledBlock->getStubInfo(bytecodeIndex); - if (!stubInfo.seen) - return computeFromLLInt(profiledBlock, bytecodeIndex, ident); + StructureStubInfo* stubInfo = map.get(CodeOrigin(bytecodeIndex)); + PutByIdStatus result = computeForStubInfo( + locker, profiledBlock, stubInfo, uid, + CallLinkStatus::computeExitSiteData(locker, profiledBlock, bytecodeIndex)); + if (!result) + return computeFromLLInt(profiledBlock, bytecodeIndex, uid); - if (stubInfo.resetByGC) - return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset); + return result; +#else // ENABLE(JIT) + UNUSED_PARAM(map); + return PutByIdStatus(NoInformation); +#endif // ENABLE(JIT) +} - switch (stubInfo.accessType) { - case access_unset: - // If the JIT saw it but didn't optimize it, then assume that this takes slow path. - return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset); +#if ENABLE(JIT) +PutByIdStatus PutByIdStatus::computeForStubInfo(const ConcurrentJITLocker& locker, CodeBlock* baselineBlock, StructureStubInfo* stubInfo, CodeOrigin codeOrigin, UniquedStringImpl* uid) +{ + return computeForStubInfo( + locker, baselineBlock, stubInfo, uid, + CallLinkStatus::computeExitSiteData(locker, baselineBlock, codeOrigin.bytecodeIndex)); +} + +PutByIdStatus PutByIdStatus::computeForStubInfo( + const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, StructureStubInfo* stubInfo, + UniquedStringImpl* uid, CallLinkStatus::ExitSiteData callExitSiteData) +{ + if (!stubInfo || !stubInfo->everConsidered) + return PutByIdStatus(); + + if (stubInfo->tookSlowPath) + return PutByIdStatus(TakesSlowPath); + + switch (stubInfo->cacheType) { + case CacheType::Unset: + // This means that we attempted to cache but failed for some reason. + return PutByIdStatus(TakesSlowPath); - case access_put_by_id_replace: { - PropertyOffset offset = stubInfo.u.putByIdReplace.baseObjectStructure->get( - *profiledBlock->vm(), ident); + case CacheType::PutByIdReplace: { + PropertyOffset offset = + stubInfo->u.byIdSelf.baseObjectStructure->getConcurrently(uid); if (isValidOffset(offset)) { - return PutByIdStatus( - SimpleReplace, - stubInfo.u.putByIdReplace.baseObjectStructure.get(), - 0, 0, - offset); + return PutByIdVariant::replace( + stubInfo->u.byIdSelf.baseObjectStructure.get(), offset, InferredType::Top); } - return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset); + return PutByIdStatus(TakesSlowPath); } - case access_put_by_id_transition_normal: - case access_put_by_id_transition_direct: { - ASSERT(stubInfo.u.putByIdTransition.previousStructure->transitionWatchpointSetHasBeenInvalidated()); - PropertyOffset offset = stubInfo.u.putByIdTransition.structure->get( - *profiledBlock->vm(), ident); - if (isValidOffset(offset)) { - return PutByIdStatus( - SimpleTransition, - stubInfo.u.putByIdTransition.previousStructure.get(), - stubInfo.u.putByIdTransition.structure.get(), - stubInfo.u.putByIdTransition.chain.get(), - offset); + case CacheType::Stub: { + PolymorphicAccess* list = stubInfo->u.stub; + + PutByIdStatus result; + result.m_state = Simple; + + State slowPathState = TakesSlowPath; + for (unsigned i = 0; i < list->size(); ++i) { + const AccessCase& access = list->at(i); + if (access.doesCalls()) + slowPathState = MakesCalls; } - return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset); + + for (unsigned i = 0; i < list->size(); ++i) { + const AccessCase& access = list->at(i); + if (access.viaProxy()) + return PutByIdStatus(slowPathState); + + PutByIdVariant variant; + + switch (access.type()) { + case AccessCase::Replace: { + Structure* structure = access.structure(); + PropertyOffset offset = structure->getConcurrently(uid); + if (!isValidOffset(offset)) + return PutByIdStatus(slowPathState); + variant = PutByIdVariant::replace( + structure, offset, structure->inferredTypeDescriptorFor(uid)); + break; + } + + case AccessCase::Transition: { + PropertyOffset offset = + access.newStructure()->getConcurrently(uid); + if (!isValidOffset(offset)) + return PutByIdStatus(slowPathState); + ObjectPropertyConditionSet conditionSet = access.conditionSet(); + if (!conditionSet.structuresEnsureValidity()) + return PutByIdStatus(slowPathState); + variant = PutByIdVariant::transition( + access.structure(), access.newStructure(), conditionSet, offset, + access.newStructure()->inferredTypeDescriptorFor(uid)); + break; + } + + case AccessCase::Setter: { + Structure* structure = access.structure(); + + ComplexGetStatus complexGetStatus = ComplexGetStatus::computeFor( + structure, access.conditionSet(), uid); + + switch (complexGetStatus.kind()) { + case ComplexGetStatus::ShouldSkip: + continue; + + case ComplexGetStatus::TakesSlowPath: + return PutByIdStatus(slowPathState); + + case ComplexGetStatus::Inlineable: { + CallLinkInfo* callLinkInfo = access.callLinkInfo(); + ASSERT(callLinkInfo); + std::unique_ptr<CallLinkStatus> callLinkStatus = + std::make_unique<CallLinkStatus>( + CallLinkStatus::computeFor( + locker, profiledBlock, *callLinkInfo, callExitSiteData)); + + variant = PutByIdVariant::setter( + structure, complexGetStatus.offset(), complexGetStatus.conditionSet(), + WTFMove(callLinkStatus)); + } } + break; + } + + case AccessCase::CustomValueSetter: + case AccessCase::CustomAccessorSetter: + return PutByIdStatus(MakesCalls); + + default: + return PutByIdStatus(slowPathState); + } + + if (!result.appendVariant(variant)) + return PutByIdStatus(slowPathState); + } + + return result; } default: - return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset); + return PutByIdStatus(TakesSlowPath); } -#else // ENABLE(JIT) - return PutByIdStatus(NoInformation, 0, 0, 0, invalidOffset); -#endif // ENABLE(JIT) } +#endif -PutByIdStatus PutByIdStatus::computeFor(VM& vm, JSGlobalObject* globalObject, Structure* structure, Identifier& ident, bool isDirect) +PutByIdStatus PutByIdStatus::computeFor(CodeBlock* baselineBlock, CodeBlock* dfgBlock, StubInfoMap& baselineMap, StubInfoMap& dfgMap, CodeOrigin codeOrigin, UniquedStringImpl* uid) { - if (PropertyName(ident).asIndex() != PropertyName::NotAnIndex) - return PutByIdStatus(TakesSlowPath); - - if (structure->typeInfo().overridesGetOwnPropertySlot()) - return PutByIdStatus(TakesSlowPath); +#if ENABLE(DFG_JIT) + if (dfgBlock) { + CallLinkStatus::ExitSiteData exitSiteData; + { + ConcurrentJITLocker locker(baselineBlock->m_lock); + if (hasExitSite(locker, baselineBlock, codeOrigin.bytecodeIndex)) + return PutByIdStatus(TakesSlowPath); + exitSiteData = CallLinkStatus::computeExitSiteData( + locker, baselineBlock, codeOrigin.bytecodeIndex); + } + + PutByIdStatus result; + { + ConcurrentJITLocker locker(dfgBlock->m_lock); + result = computeForStubInfo( + locker, dfgBlock, dfgMap.get(codeOrigin), uid, exitSiteData); + } + + // We use TakesSlowPath in some cases where the stub was unset. That's weird and + // it would be better not to do that. But it means that we have to defend + // ourselves here. + if (result.isSimple()) + return result; + } +#else + UNUSED_PARAM(dfgBlock); + UNUSED_PARAM(dfgMap); +#endif - if (!structure->propertyAccessesAreCacheable()) + return computeFor(baselineBlock, baselineMap, codeOrigin.bytecodeIndex, uid); +} + +PutByIdStatus PutByIdStatus::computeFor(JSGlobalObject* globalObject, const StructureSet& set, UniquedStringImpl* uid, bool isDirect) +{ + if (parseIndex(*uid)) return PutByIdStatus(TakesSlowPath); + + if (set.isEmpty()) + return PutByIdStatus(); - unsigned attributes; - JSCell* specificValue; - PropertyOffset offset = structure->get(vm, ident, attributes, specificValue); - if (isValidOffset(offset)) { - if (attributes & (Accessor | ReadOnly)) + PutByIdStatus result; + result.m_state = Simple; + for (unsigned i = 0; i < set.size(); ++i) { + Structure* structure = set[i]; + + if (structure->typeInfo().overridesGetOwnPropertySlot() && structure->typeInfo().type() != GlobalObjectType) return PutByIdStatus(TakesSlowPath); - if (specificValue) { - // We need the PutById slow path to verify that we're storing the right value into - // the specialized slot. + + if (!structure->propertyAccessesAreCacheable()) return PutByIdStatus(TakesSlowPath); + + unsigned attributes; + PropertyOffset offset = structure->getConcurrently(uid, attributes); + if (isValidOffset(offset)) { + if (attributes & CustomAccessor) + return PutByIdStatus(MakesCalls); + + if (attributes & (Accessor | ReadOnly)) + return PutByIdStatus(TakesSlowPath); + + WatchpointSet* replaceSet = structure->propertyReplacementWatchpointSet(offset); + if (!replaceSet || replaceSet->isStillValid()) { + // When this executes, it'll create, and fire, this replacement watchpoint set. + // That means that this has probably never executed or that something fishy is + // going on. Also, we cannot create or fire the watchpoint set from the concurrent + // JIT thread, so even if we wanted to do this, we'd need to have a lazy thingy. + // So, better leave this alone and take slow path. + return PutByIdStatus(TakesSlowPath); + } + + PutByIdVariant variant = + PutByIdVariant::replace(structure, offset, structure->inferredTypeDescriptorFor(uid)); + if (!result.appendVariant(variant)) + return PutByIdStatus(TakesSlowPath); + continue; } - return PutByIdStatus(SimpleReplace, structure, 0, 0, offset); - } - // Our hypothesis is that we're doing a transition. Before we prove that this is really - // true, we want to do some sanity checks. + // Our hypothesis is that we're doing a transition. Before we prove that this is really + // true, we want to do some sanity checks. - // Don't cache put transitions on dictionaries. - if (structure->isDictionary()) - return PutByIdStatus(TakesSlowPath); + // Don't cache put transitions on dictionaries. + if (structure->isDictionary()) + return PutByIdStatus(TakesSlowPath); - // If the structure corresponds to something that isn't an object, then give up, since - // we don't want to be adding properties to strings. - if (structure->typeInfo().type() == StringType) - return PutByIdStatus(TakesSlowPath); + // If the structure corresponds to something that isn't an object, then give up, since + // we don't want to be adding properties to strings. + if (!structure->typeInfo().isObject()) + return PutByIdStatus(TakesSlowPath); + + ObjectPropertyConditionSet conditionSet; + if (!isDirect) { + conditionSet = generateConditionsForPropertySetterMissConcurrently( + globalObject->vm(), globalObject, structure, uid); + if (!conditionSet.isValid()) + return PutByIdStatus(TakesSlowPath); + } - if (!isDirect) { - // If the prototype chain has setters or read-only properties, then give up. - if (structure->prototypeChainMayInterceptStoreTo(vm, ident)) + // We only optimize if there is already a structure that the transition is cached to. + Structure* transition = + Structure::addPropertyTransitionToExistingStructureConcurrently(structure, uid, 0, offset); + if (!transition) return PutByIdStatus(TakesSlowPath); - - // If the prototype chain hasn't been normalized (i.e. there are proxies or dictionaries) - // then give up. The dictionary case would only happen if this structure has not been - // used in an optimized put_by_id transition. And really the only reason why we would - // bail here is that I don't really feel like having the optimizing JIT go and flatten - // dictionaries if we have evidence to suggest that those objects were never used as - // prototypes in a cacheable prototype access - i.e. there's a good chance that some of - // the other checks below will fail. - if (!isPrototypeChainNormalized(globalObject, structure)) + ASSERT(isValidOffset(offset)); + + bool didAppend = result.appendVariant( + PutByIdVariant::transition( + structure, transition, conditionSet, offset, + transition->inferredTypeDescriptorFor(uid))); + if (!didAppend) return PutByIdStatus(TakesSlowPath); } - // We only optimize if there is already a structure that the transition is cached to. - // Among other things, this allows us to guard against a transition with a specific - // value. - // - // - If we're storing a value that could be specific: this would only be a problem if - // the existing transition did have a specific value already, since if it didn't, - // then we would behave "as if" we were not storing a specific value. If it did - // have a specific value, then we'll know - the fact that we pass 0 for - // specificValue will tell us. - // - // - If we're not storing a value that could be specific: again, this would only be a - // problem if the existing transition did have a specific value, which we check for - // by passing 0 for the specificValue. - Structure* transition = Structure::addPropertyTransitionToExistingStructure(structure, ident, 0, 0, offset); - if (!transition) - return PutByIdStatus(TakesSlowPath); // This occurs in bizarre cases only. See above. - ASSERT(!transition->transitionDidInvolveSpecificValue()); - ASSERT(isValidOffset(offset)); - - return PutByIdStatus( - SimpleTransition, structure, transition, - structure->prototypeChain(vm, globalObject), offset); + return result; +} + +bool PutByIdStatus::makesCalls() const +{ + if (m_state == MakesCalls) + return true; + + if (m_state != Simple) + return false; + + for (unsigned i = m_variants.size(); i--;) { + if (m_variants[i].makesCalls()) + return true; + } + + return false; +} + +void PutByIdStatus::dump(PrintStream& out) const +{ + switch (m_state) { + case NoInformation: + out.print("(NoInformation)"); + return; + + case Simple: + out.print("(", listDump(m_variants), ")"); + return; + + case TakesSlowPath: + out.print("(TakesSlowPath)"); + return; + case MakesCalls: + out.print("(MakesCalls)"); + return; + } + + RELEASE_ASSERT_NOT_REACHED(); } } // namespace JSC diff --git a/Source/JavaScriptCore/bytecode/PutByIdStatus.h b/Source/JavaScriptCore/bytecode/PutByIdStatus.h index 659e629d2..b0473472a 100644 --- a/Source/JavaScriptCore/bytecode/PutByIdStatus.h +++ b/Source/JavaScriptCore/bytecode/PutByIdStatus.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012 Apple Inc. All rights reserved. + * Copyright (C) 2012, 2013, 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,13 +26,15 @@ #ifndef PutByIdStatus_h #define PutByIdStatus_h -#include "PropertyOffset.h" -#include <wtf/NotFound.h> +#include "CallLinkStatus.h" +#include "ExitingJITType.h" +#include "PutByIdVariant.h" +#include "StructureStubInfo.h" +#include <wtf/text/StringImpl.h> namespace JSC { class CodeBlock; -class Identifier; class VM; class JSGlobalObject; class Structure; @@ -43,77 +45,70 @@ public: enum State { // It's uncached so we have no information. NoInformation, - // It's cached as a direct store into an object property for cases where the object - // already has the property. - SimpleReplace, - // It's cached as a transition from one structure that lacks the property to one that - // includes the property, and a direct store to this new property. - SimpleTransition, + // It's cached as a simple store of some kind. + Simple, // It's known to often take slow path. - TakesSlowPath + TakesSlowPath, + // It's known to take paths that make calls. + MakesCalls }; PutByIdStatus() : m_state(NoInformation) - , m_oldStructure(0) - , m_newStructure(0) - , m_structureChain(0) - , m_offset(invalidOffset) { } explicit PutByIdStatus(State state) : m_state(state) - , m_oldStructure(0) - , m_newStructure(0) - , m_structureChain(0) - , m_offset(invalidOffset) { - ASSERT(m_state == NoInformation || m_state == TakesSlowPath); + ASSERT(m_state == NoInformation || m_state == TakesSlowPath || m_state == MakesCalls); } - PutByIdStatus( - State state, - Structure* oldStructure, - Structure* newStructure, - StructureChain* structureChain, - PropertyOffset offset) - : m_state(state) - , m_oldStructure(oldStructure) - , m_newStructure(newStructure) - , m_structureChain(structureChain) - , m_offset(offset) + PutByIdStatus(const PutByIdVariant& variant) + : m_state(Simple) { - ASSERT((m_state == NoInformation || m_state == TakesSlowPath) == !m_oldStructure); - ASSERT((m_state != SimpleTransition) == !m_newStructure); - ASSERT((m_state != SimpleTransition) == !m_structureChain); - ASSERT((m_state == NoInformation || m_state == TakesSlowPath) == (m_offset == invalidOffset)); + m_variants.append(variant); } - static PutByIdStatus computeFor(CodeBlock*, unsigned bytecodeIndex, Identifier&); - static PutByIdStatus computeFor(VM&, JSGlobalObject*, Structure*, Identifier&, bool isDirect); + static PutByIdStatus computeFor(CodeBlock*, StubInfoMap&, unsigned bytecodeIndex, UniquedStringImpl* uid); + static PutByIdStatus computeFor(JSGlobalObject*, const StructureSet&, UniquedStringImpl* uid, bool isDirect); + + static PutByIdStatus computeFor(CodeBlock* baselineBlock, CodeBlock* dfgBlock, StubInfoMap& baselineMap, StubInfoMap& dfgMap, CodeOrigin, UniquedStringImpl* uid); + +#if ENABLE(JIT) + static PutByIdStatus computeForStubInfo(const ConcurrentJITLocker&, CodeBlock* baselineBlock, StructureStubInfo*, CodeOrigin, UniquedStringImpl* uid); +#endif State state() const { return m_state; } bool isSet() const { return m_state != NoInformation; } bool operator!() const { return m_state == NoInformation; } - bool isSimpleReplace() const { return m_state == SimpleReplace; } - bool isSimpleTransition() const { return m_state == SimpleTransition; } - bool takesSlowPath() const { return m_state == TakesSlowPath; } + bool isSimple() const { return m_state == Simple; } + bool takesSlowPath() const { return m_state == TakesSlowPath || m_state == MakesCalls; } + bool makesCalls() const; - Structure* oldStructure() const { return m_oldStructure; } - Structure* newStructure() const { return m_newStructure; } - StructureChain* structureChain() const { return m_structureChain; } - PropertyOffset offset() const { return m_offset; } + size_t numVariants() const { return m_variants.size(); } + const Vector<PutByIdVariant, 1>& variants() const { return m_variants; } + const PutByIdVariant& at(size_t index) const { return m_variants[index]; } + const PutByIdVariant& operator[](size_t index) const { return at(index); } + + void dump(PrintStream&) const; private: - static PutByIdStatus computeFromLLInt(CodeBlock*, unsigned bytecodeIndex, Identifier&); +#if ENABLE(DFG_JIT) + static bool hasExitSite(const ConcurrentJITLocker&, CodeBlock*, unsigned bytecodeIndex); +#endif +#if ENABLE(JIT) + static PutByIdStatus computeForStubInfo( + const ConcurrentJITLocker&, CodeBlock*, StructureStubInfo*, UniquedStringImpl* uid, + CallLinkStatus::ExitSiteData); +#endif + static PutByIdStatus computeFromLLInt(CodeBlock*, unsigned bytecodeIndex, UniquedStringImpl* uid); + + bool appendVariant(const PutByIdVariant&); State m_state; - Structure* m_oldStructure; - Structure* m_newStructure; - StructureChain* m_structureChain; - PropertyOffset m_offset; + Vector<PutByIdVariant, 1> m_variants; }; } // namespace JSC diff --git a/Source/JavaScriptCore/bytecode/PutByIdVariant.cpp b/Source/JavaScriptCore/bytecode/PutByIdVariant.cpp new file mode 100644 index 000000000..9904c625b --- /dev/null +++ b/Source/JavaScriptCore/bytecode/PutByIdVariant.cpp @@ -0,0 +1,249 @@ +/* + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "PutByIdVariant.h" + +#include "CallLinkStatus.h" +#include "JSCInlines.h" +#include <wtf/ListDump.h> + +namespace JSC { + +PutByIdVariant::PutByIdVariant(const PutByIdVariant& other) + : PutByIdVariant() +{ + *this = other; +} + +PutByIdVariant& PutByIdVariant::operator=(const PutByIdVariant& other) +{ + m_kind = other.m_kind; + m_oldStructure = other.m_oldStructure; + m_newStructure = other.m_newStructure; + m_conditionSet = other.m_conditionSet; + m_offset = other.m_offset; + m_requiredType = other.m_requiredType; + if (other.m_callLinkStatus) + m_callLinkStatus = std::make_unique<CallLinkStatus>(*other.m_callLinkStatus); + else + m_callLinkStatus = nullptr; + return *this; +} + +PutByIdVariant PutByIdVariant::replace( + const StructureSet& structure, PropertyOffset offset, const InferredType::Descriptor& requiredType) +{ + PutByIdVariant result; + result.m_kind = Replace; + result.m_oldStructure = structure; + result.m_offset = offset; + result.m_requiredType = requiredType; + return result; +} + +PutByIdVariant PutByIdVariant::transition( + const StructureSet& oldStructure, Structure* newStructure, + const ObjectPropertyConditionSet& conditionSet, PropertyOffset offset, + const InferredType::Descriptor& requiredType) +{ + PutByIdVariant result; + result.m_kind = Transition; + result.m_oldStructure = oldStructure; + result.m_newStructure = newStructure; + result.m_conditionSet = conditionSet; + result.m_offset = offset; + result.m_requiredType = requiredType; + return result; +} + +PutByIdVariant PutByIdVariant::setter( + const StructureSet& structure, PropertyOffset offset, + const ObjectPropertyConditionSet& conditionSet, + std::unique_ptr<CallLinkStatus> callLinkStatus) +{ + PutByIdVariant result; + result.m_kind = Setter; + result.m_oldStructure = structure; + result.m_conditionSet = conditionSet; + result.m_offset = offset; + result.m_callLinkStatus = WTFMove(callLinkStatus); + result.m_requiredType = InferredType::Top; + return result; +} + +Structure* PutByIdVariant::oldStructureForTransition() const +{ + ASSERT(kind() == Transition); + ASSERT(m_oldStructure.size() <= 2); + for (unsigned i = m_oldStructure.size(); i--;) { + Structure* structure = m_oldStructure[i]; + if (structure != m_newStructure) + return structure; + } + RELEASE_ASSERT_NOT_REACHED(); + + return nullptr; +} + +bool PutByIdVariant::writesStructures() const +{ + switch (kind()) { + case Transition: + case Setter: + return true; + default: + return false; + } +} + +bool PutByIdVariant::reallocatesStorage() const +{ + switch (kind()) { + case Transition: + return oldStructureForTransition()->outOfLineCapacity() != newStructure()->outOfLineCapacity(); + case Setter: + return true; + default: + return false; + } +} + +bool PutByIdVariant::makesCalls() const +{ + return kind() == Setter; +} + +bool PutByIdVariant::attemptToMerge(const PutByIdVariant& other) +{ + if (m_offset != other.m_offset) + return false; + + if (m_requiredType != other.m_requiredType) + return false; + + switch (m_kind) { + case Replace: { + switch (other.m_kind) { + case Replace: { + ASSERT(m_conditionSet.isEmpty()); + ASSERT(other.m_conditionSet.isEmpty()); + + m_oldStructure.merge(other.m_oldStructure); + return true; + } + + case Transition: { + PutByIdVariant newVariant = other; + if (newVariant.attemptToMergeTransitionWithReplace(*this)) { + *this = newVariant; + return true; + } + return false; + } + + default: + return false; + } + } + + case Transition: + switch (other.m_kind) { + case Replace: + return attemptToMergeTransitionWithReplace(other); + + default: + return false; + } + + default: + return false; + } +} + +bool PutByIdVariant::attemptToMergeTransitionWithReplace(const PutByIdVariant& replace) +{ + ASSERT(m_kind == Transition); + ASSERT(replace.m_kind == Replace); + ASSERT(m_offset == replace.m_offset); + ASSERT(!replace.writesStructures()); + ASSERT(!replace.reallocatesStorage()); + ASSERT(replace.conditionSet().isEmpty()); + + // This sort of merging only works when we have one path along which we add a new field which + // transitions to structure S while the other path was already on structure S. This doesn't + // work if we need to reallocate anything or if the replace path is polymorphic. + + if (reallocatesStorage()) + return false; + + if (replace.m_oldStructure.onlyStructure() != m_newStructure) + return false; + + m_oldStructure.merge(m_newStructure); + return true; +} + +void PutByIdVariant::dump(PrintStream& out) const +{ + dumpInContext(out, 0); +} + +void PutByIdVariant::dumpInContext(PrintStream& out, DumpContext* context) const +{ + switch (kind()) { + case NotSet: + out.print("<empty>"); + return; + + case Replace: + out.print( + "<Replace: ", inContext(structure(), context), ", offset = ", offset(), ", ", + inContext(requiredType(), context), ">"); + return; + + case Transition: + out.print( + "<Transition: ", inContext(oldStructure(), context), " -> ", + pointerDumpInContext(newStructure(), context), ", [", + inContext(m_conditionSet, context), "], offset = ", offset(), ", ", + inContext(requiredType(), context), ">"); + return; + + case Setter: + out.print( + "<Setter: ", inContext(structure(), context), ", [", + inContext(m_conditionSet, context), "]"); + out.print(", offset = ", m_offset); + out.print(", call = ", *m_callLinkStatus); + out.print(">"); + return; + } + + RELEASE_ASSERT_NOT_REACHED(); +} + +} // namespace JSC + diff --git a/Source/JavaScriptCore/bytecode/PutByIdVariant.h b/Source/JavaScriptCore/bytecode/PutByIdVariant.h new file mode 100644 index 000000000..29cd08d03 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/PutByIdVariant.h @@ -0,0 +1,148 @@ +/* + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef PutByIdVariant_h +#define PutByIdVariant_h + +#include "ObjectPropertyConditionSet.h" +#include "PropertyOffset.h" +#include "StructureSet.h" + +namespace JSC { + +class CallLinkStatus; + +class PutByIdVariant { +public: + enum Kind { + NotSet, + Replace, + Transition, + Setter + }; + + PutByIdVariant() + : m_kind(NotSet) + , m_newStructure(nullptr) + , m_offset(invalidOffset) + { + } + + PutByIdVariant(const PutByIdVariant&); + PutByIdVariant& operator=(const PutByIdVariant&); + + static PutByIdVariant replace(const StructureSet&, PropertyOffset, const InferredType::Descriptor&); + + static PutByIdVariant transition( + const StructureSet& oldStructure, Structure* newStructure, + const ObjectPropertyConditionSet&, PropertyOffset, const InferredType::Descriptor&); + + static PutByIdVariant setter( + const StructureSet&, PropertyOffset, const ObjectPropertyConditionSet&, + std::unique_ptr<CallLinkStatus>); + + Kind kind() const { return m_kind; } + + bool isSet() const { return kind() != NotSet; } + bool operator!() const { return !isSet(); } + + const StructureSet& structure() const + { + ASSERT(kind() == Replace || kind() == Setter); + return m_oldStructure; + } + + const StructureSet& structureSet() const + { + return structure(); + } + + const StructureSet& oldStructure() const + { + ASSERT(kind() == Transition || kind() == Replace || kind() == Setter); + return m_oldStructure; + } + + StructureSet& oldStructure() + { + ASSERT(kind() == Transition || kind() == Replace || kind() == Setter); + return m_oldStructure; + } + + Structure* oldStructureForTransition() const; + + Structure* newStructure() const + { + ASSERT(kind() == Transition); + return m_newStructure; + } + + InferredType::Descriptor requiredType() const + { + return m_requiredType; + } + + bool writesStructures() const; + bool reallocatesStorage() const; + bool makesCalls() const; + + const ObjectPropertyConditionSet& conditionSet() const { return m_conditionSet; } + + // We don't support intrinsics for Setters (it would be sweet if we did) but we need this for templated helpers. + Intrinsic intrinsic() const { return NoIntrinsic; } + + PropertyOffset offset() const + { + ASSERT(isSet()); + return m_offset; + } + + CallLinkStatus* callLinkStatus() const + { + ASSERT(kind() == Setter); + return m_callLinkStatus.get(); + } + + bool attemptToMerge(const PutByIdVariant& other); + + void dump(PrintStream&) const; + void dumpInContext(PrintStream&, DumpContext*) const; + +private: + bool attemptToMergeTransitionWithReplace(const PutByIdVariant& replace); + + Kind m_kind; + StructureSet m_oldStructure; + Structure* m_newStructure; + ObjectPropertyConditionSet m_conditionSet; + PropertyOffset m_offset; + InferredType::Descriptor m_requiredType; + std::unique_ptr<CallLinkStatus> m_callLinkStatus; +}; + +} // namespace JSC + +#endif // PutByIdVariant_h + diff --git a/Source/JavaScriptCore/bytecode/ReduceWhitespace.cpp b/Source/JavaScriptCore/bytecode/ReduceWhitespace.cpp index 1636cba1d..d1f25b01f 100644 --- a/Source/JavaScriptCore/bytecode/ReduceWhitespace.cpp +++ b/Source/JavaScriptCore/bytecode/ReduceWhitespace.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012 Apple Inc. All rights reserved. + * Copyright (C) 2012, 2013 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,27 +26,29 @@ #include "config.h" #include "ReduceWhitespace.h" -#include <wtf/text/StringBuilder.h> -#include <wtf/text/WTFString.h> +#include <wtf/ASCIICType.h> +#include <wtf/StringPrintStream.h> namespace JSC { -String reduceWhitespace(const String& input) +CString reduceWhitespace(const CString& input) { - StringBuilder builder; + StringPrintStream out; + + const char* data = input.data(); for (unsigned i = 0; i < input.length();) { - if (isASCIISpace(input[i])) { - while (i < input.length() && isASCIISpace(input[i])) + if (isASCIISpace(data[i])) { + while (i < input.length() && isASCIISpace(data[i])) ++i; - builder.append(' '); + out.print(CharacterDump(' ')); continue; } - builder.append(input[i]); + out.print(CharacterDump(data[i])); ++i; } - return builder.toString(); + return out.toCString(); } } // namespace JSC diff --git a/Source/JavaScriptCore/bytecode/ReduceWhitespace.h b/Source/JavaScriptCore/bytecode/ReduceWhitespace.h index 383dd3798..121caf2c2 100644 --- a/Source/JavaScriptCore/bytecode/ReduceWhitespace.h +++ b/Source/JavaScriptCore/bytecode/ReduceWhitespace.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012 Apple Inc. All rights reserved. + * Copyright (C) 2012, 2013 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,12 +26,12 @@ #ifndef ReduceWhitespace_h #define ReduceWhitespace_h -#include <wtf/text/WTFString.h> +#include <wtf/text/CString.h> namespace JSC { // Replace all whitespace runs with a single space. -String reduceWhitespace(const String&); +CString reduceWhitespace(const CString&); } // namespace JSC diff --git a/Source/JavaScriptCore/bytecode/ResolveGlobalStatus.h b/Source/JavaScriptCore/bytecode/ResolveGlobalStatus.h deleted file mode 100644 index 6763ff7c8..000000000 --- a/Source/JavaScriptCore/bytecode/ResolveGlobalStatus.h +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright (C) 2012 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef ResolveGlobalStatus_h -#define ResolveGlobalStatus_h - -#include "JSCJSValue.h" -#include "PropertyOffset.h" -#include <wtf/NotFound.h> - -namespace JSC { - -class CodeBlock; -class Identifier; -struct ResolveOperation; -class Structure; - -class ResolveGlobalStatus { -public: - enum State { - NoInformation, - Simple, - TakesSlowPath - }; - - ResolveGlobalStatus() - : m_state(NoInformation) - , m_structure(0) - , m_offset(invalidOffset) - { - } - - ResolveGlobalStatus( - State state, Structure* structure = 0, PropertyOffset offset = invalidOffset, - JSValue specificValue = JSValue()) - : m_state(state) - , m_structure(structure) - , m_offset(offset) - , m_specificValue(specificValue) - { - } - - static ResolveGlobalStatus computeFor(CodeBlock*, int bytecodeIndex, ResolveOperation*, Identifier&); - - State state() const { return m_state; } - - bool isSet() const { return m_state != NoInformation; } - bool operator!() const { return !isSet(); } - bool isSimple() const { return m_state == Simple; } - bool takesSlowPath() const { return m_state == TakesSlowPath; } - - Structure* structure() const { return m_structure; } - PropertyOffset offset() const { return m_offset; } - JSValue specificValue() const { return m_specificValue; } - -private: - State m_state; - Structure* m_structure; - PropertyOffset m_offset; - JSValue m_specificValue; -}; // class ResolveGlobalStatus - -} // namespace JSC - -#endif // ResolveGlobalStatus_h - diff --git a/Source/JavaScriptCore/bytecode/ResolveOperation.h b/Source/JavaScriptCore/bytecode/ResolveOperation.h deleted file mode 100644 index 1543ef209..000000000 --- a/Source/JavaScriptCore/bytecode/ResolveOperation.h +++ /dev/null @@ -1,178 +0,0 @@ -/* - * Copyright (C) 2012 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef ResolveOperation_h -#define ResolveOperation_h - -#include "PropertyOffset.h" -#include "WriteBarrier.h" - -#include <wtf/Vector.h> - -namespace JSC { - -class Structure; - -struct ResolveOperation { - typedef enum { - Fail, - SetBaseToUndefined, - ReturnScopeAsBase, - SetBaseToScope, - SetBaseToGlobal, - GetAndReturnScopedVar, - GetAndReturnGlobalVar, - GetAndReturnGlobalVarWatchable, - SkipTopScopeNode, - SkipScopes, - ReturnGlobalObjectAsBase, - GetAndReturnGlobalProperty, - CheckForDynamicEntriesBeforeGlobalScope - } ResolveOperationType; - - ResolveOperationType m_operation; - WriteBarrier<Structure> m_structure; - union { - PropertyOffset m_offset; - WriteBarrier<Unknown>* m_registerAddress; - int m_scopesToSkip; - int m_activationRegister; - }; - static ResolveOperation getAndReturnScopedVar(PropertyOffset offset) - { - ResolveOperation op; - op.m_operation = GetAndReturnScopedVar; - op.m_offset = offset; - return op; - } - static ResolveOperation checkForDynamicEntriesBeforeGlobalScope() - { - ResolveOperation op; - op.m_operation = CheckForDynamicEntriesBeforeGlobalScope; - return op; - } - - static ResolveOperation getAndReturnGlobalVar(WriteBarrier<Unknown>* registerAddress, bool couldBeWatched) - { - ResolveOperation op; - op.m_operation = couldBeWatched ? GetAndReturnGlobalVarWatchable : GetAndReturnGlobalVar; - op.m_registerAddress = registerAddress; - return op; - } - static ResolveOperation getAndReturnGlobalProperty() - { - ResolveOperation op; - op.m_operation = GetAndReturnGlobalProperty; - return op; - } - static ResolveOperation resolveFail() - { - ResolveOperation op; - op.m_operation = Fail; - return op; - } - static ResolveOperation skipTopScopeNode(int activationRegister) - { - ResolveOperation op; - op.m_operation = SkipTopScopeNode; - op.m_activationRegister = activationRegister; - return op; - } - static ResolveOperation skipScopes(int scopesToSkip) - { - ResolveOperation op; - op.m_operation = SkipScopes; - op.m_scopesToSkip = scopesToSkip; - return op; - } - static ResolveOperation returnGlobalObjectAsBase() - { - ResolveOperation op; - op.m_operation = ReturnGlobalObjectAsBase; - return op; - } - static ResolveOperation setBaseToGlobal() - { - ResolveOperation op; - op.m_operation = SetBaseToGlobal; - return op; - } - static ResolveOperation setBaseToUndefined() - { - ResolveOperation op; - op.m_operation = SetBaseToUndefined; - return op; - } - static ResolveOperation setBaseToScope() - { - ResolveOperation op; - op.m_operation = SetBaseToScope; - return op; - } - static ResolveOperation returnScopeAsBase() - { - ResolveOperation op; - op.m_operation = ReturnScopeAsBase; - return op; - } -}; - -typedef Vector<ResolveOperation> ResolveOperations; - -struct PutToBaseOperation { - PutToBaseOperation(bool isStrict) - : m_kind(Uninitialised) - , m_isDynamic(false) - , m_isStrict(isStrict) - , m_predicatePointer(0) - { - - } - enum Kind { Uninitialised, Generic, Readonly, GlobalVariablePut, GlobalVariablePutChecked, GlobalPropertyPut, VariablePut }; - union { - Kind m_kind : 8; - uint8_t m_kindAsUint8; - }; - bool m_isDynamic : 8; - bool m_isStrict : 8; - union { - bool* m_predicatePointer; - unsigned m_scopeDepth; - }; - WriteBarrier<Structure> m_structure; - union { - // Used for GlobalVariablePut - WriteBarrier<Unknown>* m_registerAddress; - - // Used for GlobalPropertyPut and VariablePut - struct { - PropertyOffset m_offset; - int32_t m_offsetInButterfly; - }; - }; -}; -} - -#endif // ResolveOperation_h diff --git a/Source/JavaScriptCore/bytecode/SamplingTool.cpp b/Source/JavaScriptCore/bytecode/SamplingTool.cpp index d18dbc1ff..f5bf2b72a 100644 --- a/Source/JavaScriptCore/bytecode/SamplingTool.cpp +++ b/Source/JavaScriptCore/bytecode/SamplingTool.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008, 2009 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2009, 2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -10,7 +10,7 @@ * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * 3. Neither the name of Apple Inc. ("Apple") nor the names of * its contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * @@ -32,6 +32,7 @@ #include "CodeBlock.h" #include "Interpreter.h" #include "Opcode.h" +#include "JSCInlines.h" #if !OS(WINDOWS) #include <unistd.h> @@ -284,7 +285,7 @@ void SamplingTool::doRun() #if ENABLE(CODEBLOCK_SAMPLING) if (CodeBlock* codeBlock = sample.codeBlock()) { - MutexLocker locker(m_scriptSampleMapMutex); + LockHolder locker(m_scriptSampleMapMutex); ScriptSampleRecord* record = m_scopeSampleMap->get(codeBlock->ownerExecutable()); ASSERT(record); record->sample(codeBlock, sample.vPC()); @@ -300,7 +301,7 @@ void SamplingTool::sample() void SamplingTool::notifyOfScope(VM& vm, ScriptExecutable* script) { #if ENABLE(CODEBLOCK_SAMPLING) - MutexLocker locker(m_scriptSampleMapMutex); + LockHolder locker(m_scriptSampleMapMutex); m_scopeSampleMap->set(script, adoptPtr(new ScriptSampleRecord(vm, script))); #else UNUSED_PARAM(vm); diff --git a/Source/JavaScriptCore/bytecode/SamplingTool.h b/Source/JavaScriptCore/bytecode/SamplingTool.h index 516968a33..18e348377 100644 --- a/Source/JavaScriptCore/bytecode/SamplingTool.h +++ b/Source/JavaScriptCore/bytecode/SamplingTool.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2013, 2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -10,7 +10,7 @@ * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * 3. Neither the name of Apple Inc. ("Apple") nor the names of * its contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * @@ -35,6 +35,7 @@ #include <wtf/Assertions.h> #include <wtf/Atomics.h> #include <wtf/HashMap.h> +#include <wtf/Lock.h> #include <wtf/MainThread.h> #include <wtf/Spectrum.h> #include <wtf/Threading.h> @@ -211,7 +212,7 @@ namespace JSC { unsigned m_size; }; - typedef HashMap<ScriptExecutable*, OwnPtr<ScriptSampleRecord> > ScriptSampleRecordMap; + typedef HashMap<ScriptExecutable*, std::unique_ptr<ScriptSampleRecord>> ScriptSampleRecordMap; class SamplingThread { public: @@ -227,6 +228,7 @@ namespace JSC { }; class SamplingTool { + WTF_MAKE_FAST_ALLOCATED; public: friend struct CallRecord; @@ -271,7 +273,7 @@ namespace JSC { , m_sampleCount(0) , m_opcodeSampleCount(0) #if ENABLE(CODEBLOCK_SAMPLING) - , m_scopeSampleMap(adoptPtr(new ScriptSampleRecordMap)) + , m_scopeSampleMap(std::make_unique<ScriptSampleRecordMap>) #endif { memset(m_opcodeSamples, 0, sizeof(m_opcodeSamples)); @@ -337,8 +339,8 @@ namespace JSC { unsigned m_opcodeSamplesInCTIFunctions[numOpcodeIDs]; #if ENABLE(CODEBLOCK_SAMPLING) - Mutex m_scriptSampleMapMutex; - OwnPtr<ScriptSampleRecordMap> m_scopeSampleMap; + Lock m_scriptSampleMapMutex; + std::unique_ptr<ScriptSampleRecordMap> m_scopeSampleMap; #endif }; diff --git a/Source/JavaScriptCore/bytecode/SpecialPointer.cpp b/Source/JavaScriptCore/bytecode/SpecialPointer.cpp index 7789653f0..dc5a363b6 100644 --- a/Source/JavaScriptCore/bytecode/SpecialPointer.cpp +++ b/Source/JavaScriptCore/bytecode/SpecialPointer.cpp @@ -28,6 +28,7 @@ #include "CodeBlock.h" #include "JSGlobalObject.h" +#include "JSCInlines.h" namespace JSC { diff --git a/Source/JavaScriptCore/bytecode/SpecialPointer.h b/Source/JavaScriptCore/bytecode/SpecialPointer.h index c18a6e904..64fb23fcf 100644 --- a/Source/JavaScriptCore/bytecode/SpecialPointer.h +++ b/Source/JavaScriptCore/bytecode/SpecialPointer.h @@ -41,6 +41,11 @@ enum Pointer { }; } // namespace Special +enum class LinkTimeConstant { + DefinePropertyFunction, +}; +const unsigned LinkTimeConstantCount = 1; + inline bool pointerIsFunction(Special::Pointer pointer) { ASSERT_UNUSED(pointer, pointer < Special::TableSize); diff --git a/Source/JavaScriptCore/bytecode/SpeculatedType.cpp b/Source/JavaScriptCore/bytecode/SpeculatedType.cpp index 0e33b650f..af67f4504 100644 --- a/Source/JavaScriptCore/bytecode/SpeculatedType.cpp +++ b/Source/JavaScriptCore/bytecode/SpeculatedType.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2011-2013, 2015-2016 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -10,7 +10,7 @@ * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * 3. Neither the name of Apple Inc. ("Apple") nor the names of * its contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * @@ -29,13 +29,13 @@ #include "config.h" #include "SpeculatedType.h" -#include "Arguments.h" +#include "DirectArguments.h" #include "JSArray.h" #include "JSFunction.h" -#include "Operations.h" +#include "JSCInlines.h" +#include "ScopedArguments.h" #include "StringObject.h" #include "ValueProfile.h" -#include <wtf/BoundsCheckedPointer.h> #include <wtf/StringPrintStream.h> namespace JSC { @@ -51,105 +51,161 @@ void dumpSpeculation(PrintStream& out, SpeculatedType value) bool isTop = true; - if (value & SpecCellOther) - myOut.print("Othercell"); - else - isTop = false; + if ((value & SpecCell) == SpecCell) + myOut.print("Cell"); + else { + if ((value & SpecObject) == SpecObject) + myOut.print("Object"); + else { + if (value & SpecCellOther) + myOut.print("Othercell"); + else + isTop = false; - if (value & SpecObjectOther) - myOut.print("Otherobj"); - else - isTop = false; + if (value & SpecObjectOther) + myOut.print("Otherobj"); + else + isTop = false; - if (value & SpecFinalObject) - myOut.print("Final"); - else - isTop = false; + if (value & SpecFinalObject) + myOut.print("Final"); + else + isTop = false; - if (value & SpecArray) - myOut.print("Array"); - else - isTop = false; + if (value & SpecArray) + myOut.print("Array"); + else + isTop = false; - if (value & SpecInt8Array) - myOut.print("Int8array"); - else - isTop = false; + if (value & SpecInt8Array) + myOut.print("Int8array"); + else + isTop = false; - if (value & SpecInt16Array) - myOut.print("Int16array"); - else - isTop = false; + if (value & SpecInt16Array) + myOut.print("Int16array"); + else + isTop = false; - if (value & SpecInt32Array) - myOut.print("Int32array"); - else - isTop = false; + if (value & SpecInt32Array) + myOut.print("Int32array"); + else + isTop = false; - if (value & SpecUint8Array) - myOut.print("Uint8array"); - else - isTop = false; + if (value & SpecUint8Array) + myOut.print("Uint8array"); + else + isTop = false; - if (value & SpecUint8ClampedArray) - myOut.print("Uint8clampedarray"); - else - isTop = false; + if (value & SpecUint8ClampedArray) + myOut.print("Uint8clampedarray"); + else + isTop = false; - if (value & SpecUint16Array) - myOut.print("Uint16array"); - else - isTop = false; + if (value & SpecUint16Array) + myOut.print("Uint16array"); + else + isTop = false; - if (value & SpecUint32Array) - myOut.print("Uint32array"); - else - isTop = false; + if (value & SpecUint32Array) + myOut.print("Uint32array"); + else + isTop = false; - if (value & SpecFloat32Array) - myOut.print("Float32array"); - else - isTop = false; + if (value & SpecFloat32Array) + myOut.print("Float32array"); + else + isTop = false; - if (value & SpecFloat64Array) - myOut.print("Float64array"); - else - isTop = false; + if (value & SpecFloat64Array) + myOut.print("Float64array"); + else + isTop = false; - if (value & SpecFunction) - myOut.print("Function"); - else - isTop = false; + if (value & SpecFunction) + myOut.print("Function"); + else + isTop = false; - if (value & SpecArguments) - myOut.print("Arguments"); - else - isTop = false; + if (value & SpecDirectArguments) + myOut.print("Directarguments"); + else + isTop = false; - if (value & SpecString) - myOut.print("String"); - else - isTop = false; + if (value & SpecScopedArguments) + myOut.print("Scopedarguments"); + else + isTop = false; - if (value & SpecStringObject) - myOut.print("Stringobject"); - else - isTop = false; + if (value & SpecStringObject) + myOut.print("Stringobject"); + else + isTop = false; - if (value & SpecInt32) - myOut.print("Int"); - else - isTop = false; + if (value & SpecRegExpObject) + myOut.print("Regexpobject"); + else + isTop = false; + } + + if ((value & SpecString) == SpecString) + myOut.print("String"); + else { + if (value & SpecStringIdent) + myOut.print("Stringident"); + else + isTop = false; + + if (value & SpecStringVar) + myOut.print("Stringvar"); + else + isTop = false; + } + + if (value & SpecSymbol) + myOut.print("Symbol"); + else + isTop = false; + } - if (value & SpecDoubleReal) - myOut.print("Doublereal"); - else - isTop = false; + if (value == SpecInt32) + myOut.print("Int32"); + else { + if (value & SpecBoolInt32) + myOut.print("Boolint32"); + else + isTop = false; + + if (value & SpecNonBoolInt32) + myOut.print("Nonboolint32"); + else + isTop = false; + } - if (value & SpecDoubleNaN) - myOut.print("Doublenan"); - else - isTop = false; + if (value & SpecInt52) + myOut.print("Int52"); + + if ((value & SpecBytecodeDouble) == SpecBytecodeDouble) + myOut.print("Bytecodedouble"); + else { + if (value & SpecInt52AsDouble) + myOut.print("Int52asdouble"); + else + isTop = false; + + if (value & SpecNonIntAsDouble) + myOut.print("Nonintasdouble"); + else + isTop = false; + + if (value & SpecDoublePureNaN) + myOut.print("Doublepurenan"); + else + isTop = false; + } + + if (value & SpecDoubleImpureNaN) + out.print("Doubleimpurenan"); if (value & SpecBoolean) myOut.print("Bool"); @@ -178,6 +234,8 @@ static const char* speculationToAbbreviatedString(SpeculatedType prediction) return "<Final>"; if (isArraySpeculation(prediction)) return "<Array>"; + if (isStringIdentSpeculation(prediction)) + return "<StringIdent>"; if (isStringSpeculation(prediction)) return "<String>"; if (isFunctionSpeculation(prediction)) @@ -198,26 +256,40 @@ static const char* speculationToAbbreviatedString(SpeculatedType prediction) return "<Float32array>"; if (isFloat64ArraySpeculation(prediction)) return "<Float64array>"; - if (isArgumentsSpeculation(prediction)) - return "<Arguments>"; + if (isDirectArgumentsSpeculation(prediction)) + return "<DirectArguments>"; + if (isScopedArgumentsSpeculation(prediction)) + return "<ScopedArguments>"; if (isStringObjectSpeculation(prediction)) return "<StringObject>"; + if (isRegExpObjectSpeculation(prediction)) + return "<RegExpObject>"; if (isStringOrStringObjectSpeculation(prediction)) return "<StringOrStringObject>"; if (isObjectSpeculation(prediction)) return "<Object>"; if (isCellSpeculation(prediction)) return "<Cell>"; + if (isBoolInt32Speculation(prediction)) + return "<BoolInt32>"; if (isInt32Speculation(prediction)) return "<Int32>"; + if (isInt52AsDoubleSpeculation(prediction)) + return "<Int52AsDouble>"; + if (isInt52Speculation(prediction)) + return "<Int52>"; + if (isMachineIntSpeculation(prediction)) + return "<MachineInt>"; if (isDoubleSpeculation(prediction)) return "<Double>"; - if (isNumberSpeculation(prediction)) + if (isFullNumberSpeculation(prediction)) return "<Number>"; if (isBooleanSpeculation(prediction)) return "<Boolean>"; if (isOtherSpeculation(prediction)) return "<Other>"; + if (isMiscSpeculation(prediction)) + return "<Misc>"; return ""; } @@ -226,49 +298,62 @@ void dumpSpeculationAbbreviated(PrintStream& out, SpeculatedType value) out.print(speculationToAbbreviatedString(value)); } +SpeculatedType speculationFromTypedArrayType(TypedArrayType type) +{ + switch (type) { + case TypeInt8: + return SpecInt8Array; + case TypeInt16: + return SpecInt16Array; + case TypeInt32: + return SpecInt32Array; + case TypeUint8: + return SpecUint8Array; + case TypeUint8Clamped: + return SpecUint8ClampedArray; + case TypeUint16: + return SpecUint16Array; + case TypeUint32: + return SpecUint32Array; + case TypeFloat32: + return SpecFloat32Array; + case TypeFloat64: + return SpecFloat64Array; + case NotTypedArray: + case TypeDataView: + break; + } + RELEASE_ASSERT_NOT_REACHED(); + return SpecNone; +} + SpeculatedType speculationFromClassInfo(const ClassInfo* classInfo) { - if (classInfo == &JSFinalObject::s_info) + if (classInfo == JSFinalObject::info()) return SpecFinalObject; - if (classInfo == &JSArray::s_info) + if (classInfo == JSArray::info()) return SpecArray; - if (classInfo == &Arguments::s_info) - return SpecArguments; + if (classInfo == DirectArguments::info()) + return SpecDirectArguments; - if (classInfo == &StringObject::s_info) + if (classInfo == ScopedArguments::info()) + return SpecScopedArguments; + + if (classInfo == StringObject::info()) return SpecStringObject; + + if (classInfo == RegExpObject::info()) + return SpecRegExpObject; - if (classInfo->isSubClassOf(&JSFunction::s_info)) + if (classInfo->isSubClassOf(JSFunction::info())) return SpecFunction; - if (classInfo->typedArrayStorageType != TypedArrayNone) { - switch (classInfo->typedArrayStorageType) { - case TypedArrayInt8: - return SpecInt8Array; - case TypedArrayInt16: - return SpecInt16Array; - case TypedArrayInt32: - return SpecInt32Array; - case TypedArrayUint8: - return SpecUint8Array; - case TypedArrayUint8Clamped: - return SpecUint8ClampedArray; - case TypedArrayUint16: - return SpecUint16Array; - case TypedArrayUint32: - return SpecUint32Array; - case TypedArrayFloat32: - return SpecFloat32Array; - case TypedArrayFloat64: - return SpecFloat64Array; - default: - break; - } - } + if (isTypedView(classInfo->typedArrayStorageType)) + return speculationFromTypedArrayType(classInfo->typedArrayStorageType); - if (classInfo->isSubClassOf(&JSObject::s_info)) + if (classInfo->isSubClassOf(JSObject::info())) return SpecObjectOther; return SpecCellOther; @@ -278,11 +363,20 @@ SpeculatedType speculationFromStructure(Structure* structure) { if (structure->typeInfo().type() == StringType) return SpecString; + if (structure->typeInfo().type() == SymbolType) + return SpecSymbol; return speculationFromClassInfo(structure->classInfo()); } SpeculatedType speculationFromCell(JSCell* cell) { + if (JSString* string = jsDynamicCast<JSString*>(cell)) { + if (const StringImpl* impl = string->tryGetValueImpl()) { + if (impl->isAtomic()) + return SpecStringIdent; + } + return SpecStringVar; + } return speculationFromStructure(cell->structure()); } @@ -290,13 +384,18 @@ SpeculatedType speculationFromValue(JSValue value) { if (value.isEmpty()) return SpecEmpty; - if (value.isInt32()) - return SpecInt32; + if (value.isInt32()) { + if (value.asInt32() & ~1) + return SpecNonBoolInt32; + return SpecBoolInt32; + } if (value.isDouble()) { double number = value.asNumber(); - if (number == number) - return SpecDoubleReal; - return SpecDoubleNaN; + if (number != number) + return SpecDoublePureNaN; + if (value.isMachineInt()) + return SpecInt52AsDouble; + return SpecNonIntAsDouble; } if (value.isCell()) return speculationFromCell(value.asCell()); @@ -306,5 +405,168 @@ SpeculatedType speculationFromValue(JSValue value) return SpecOther; } +TypedArrayType typedArrayTypeFromSpeculation(SpeculatedType type) +{ + if (isInt8ArraySpeculation(type)) + return TypeInt8; + + if (isInt16ArraySpeculation(type)) + return TypeInt16; + + if (isInt32ArraySpeculation(type)) + return TypeInt32; + + if (isUint8ArraySpeculation(type)) + return TypeUint8; + + if (isUint8ClampedArraySpeculation(type)) + return TypeUint8Clamped; + + if (isUint16ArraySpeculation(type)) + return TypeUint16; + + if (isUint32ArraySpeculation(type)) + return TypeUint32; + + if (isFloat32ArraySpeculation(type)) + return TypeFloat32; + + if (isFloat64ArraySpeculation(type)) + return TypeFloat64; + + return NotTypedArray; +} + +SpeculatedType leastUpperBoundOfStrictlyEquivalentSpeculations(SpeculatedType type) +{ + if (type & SpecInteger) + type |= SpecInteger; + if (type & SpecString) + type |= SpecString; + return type; +} + +bool valuesCouldBeEqual(SpeculatedType a, SpeculatedType b) +{ + a = leastUpperBoundOfStrictlyEquivalentSpeculations(a); + b = leastUpperBoundOfStrictlyEquivalentSpeculations(b); + + // Anything could be equal to a string. + if (a & SpecString) + return true; + if (b & SpecString) + return true; + + // If both sides are definitely only objects, then equality is fairly sane. + if (isObjectSpeculation(a) && isObjectSpeculation(b)) + return !!(a & b); + + // If either side could be an object or not, then we could call toString or + // valueOf, which could return anything. + if (a & SpecObject) + return true; + if (b & SpecObject) + return true; + + // Neither side is an object or string, so the world is relatively sane. + return !!(a & b); +} + +SpeculatedType typeOfDoubleSum(SpeculatedType a, SpeculatedType b) +{ + SpeculatedType result = a | b; + // Impure NaN could become pure NaN during addition because addition may clear bits. + if (result & SpecDoubleImpureNaN) + result |= SpecDoublePureNaN; + // Values could overflow, or fractions could become integers. + if (result & SpecDoubleReal) + result |= SpecDoubleReal; + return result; +} + +SpeculatedType typeOfDoubleDifference(SpeculatedType a, SpeculatedType b) +{ + return typeOfDoubleSum(a, b); +} + +SpeculatedType typeOfDoubleProduct(SpeculatedType a, SpeculatedType b) +{ + return typeOfDoubleSum(a, b); +} + +static SpeculatedType polluteDouble(SpeculatedType value) +{ + // Impure NaN could become pure NaN because the operation could clear some bits. + if (value & SpecDoubleImpureNaN) + value |= SpecDoubleNaN; + // Values could overflow, fractions could become integers, or an error could produce + // PureNaN. + if (value & SpecDoubleReal) + value |= SpecDoubleReal | SpecDoublePureNaN; + return value; +} + +SpeculatedType typeOfDoubleQuotient(SpeculatedType a, SpeculatedType b) +{ + return polluteDouble(a | b); +} + +SpeculatedType typeOfDoubleMinMax(SpeculatedType a, SpeculatedType b) +{ + SpeculatedType result = a | b; + // Impure NaN could become pure NaN during addition because addition may clear bits. + if (result & SpecDoubleImpureNaN) + result |= SpecDoublePureNaN; + return result; +} + +SpeculatedType typeOfDoubleNegation(SpeculatedType value) +{ + // Impure NaN could become pure NaN because bits might get cleared. + if (value & SpecDoubleImpureNaN) + value |= SpecDoublePureNaN; + // We could get negative zero, which mixes SpecInt52AsDouble and SpecNotIntAsDouble. + // We could also overflow a large negative int into something that is no longer + // representable as an int. + if (value & SpecDoubleReal) + value |= SpecDoubleReal; + return value; +} + +SpeculatedType typeOfDoubleAbs(SpeculatedType value) +{ + return typeOfDoubleNegation(value); +} + +SpeculatedType typeOfDoubleRounding(SpeculatedType value) +{ + // We might lose bits, which leads to a NaN being purified. + if (value & SpecDoubleImpureNaN) + value |= SpecDoublePureNaN; + // We might lose bits, which leads to a value becoming integer-representable. + if (value & SpecNonIntAsDouble) + value |= SpecInt52AsDouble; + return value; +} + +SpeculatedType typeOfDoublePow(SpeculatedType xValue, SpeculatedType yValue) +{ + // Math.pow() always return NaN if the exponent is NaN, unlike std::pow(). + // We always set a pure NaN in that case. + if (yValue & SpecDoubleNaN) + xValue |= SpecDoublePureNaN; + return polluteDouble(xValue); +} + +SpeculatedType typeOfDoubleBinaryOp(SpeculatedType a, SpeculatedType b) +{ + return polluteDouble(a | b); +} + +SpeculatedType typeOfDoubleUnaryOp(SpeculatedType value) +{ + return polluteDouble(value); +} + } // namespace JSC diff --git a/Source/JavaScriptCore/bytecode/SpeculatedType.h b/Source/JavaScriptCore/bytecode/SpeculatedType.h index 05788f0f1..1ebbeb1f5 100644 --- a/Source/JavaScriptCore/bytecode/SpeculatedType.h +++ b/Source/JavaScriptCore/bytecode/SpeculatedType.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2011-2016 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -10,7 +10,7 @@ * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * 3. Neither the name of Apple Inc. ("Apple") nor the names of * its contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * @@ -30,6 +30,7 @@ #define SpeculatedType_h #include "JSCJSValue.h" +#include "TypedArrayType.h" #include <wtf/PrintStream.h> namespace JSC { @@ -37,38 +38,58 @@ namespace JSC { class Structure; typedef uint32_t SpeculatedType; -static const SpeculatedType SpecNone = 0x00000000; // We don't know anything yet. -static const SpeculatedType SpecFinalObject = 0x00000001; // It's definitely a JSFinalObject. -static const SpeculatedType SpecArray = 0x00000002; // It's definitely a JSArray. -static const SpeculatedType SpecFunction = 0x00000008; // It's definitely a JSFunction or one of its subclasses. -static const SpeculatedType SpecInt8Array = 0x00000010; // It's definitely an Int8Array or one of its subclasses. -static const SpeculatedType SpecInt16Array = 0x00000020; // It's definitely an Int16Array or one of its subclasses. -static const SpeculatedType SpecInt32Array = 0x00000040; // It's definitely an Int32Array or one of its subclasses. -static const SpeculatedType SpecUint8Array = 0x00000080; // It's definitely an Uint8Array or one of its subclasses. -static const SpeculatedType SpecUint8ClampedArray = 0x00000100; // It's definitely an Uint8ClampedArray or one of its subclasses. -static const SpeculatedType SpecUint16Array = 0x00000200; // It's definitely an Uint16Array or one of its subclasses. -static const SpeculatedType SpecUint32Array = 0x00000400; // It's definitely an Uint32Array or one of its subclasses. -static const SpeculatedType SpecFloat32Array = 0x00000800; // It's definitely an Uint16Array or one of its subclasses. -static const SpeculatedType SpecFloat64Array = 0x00001000; // It's definitely an Uint16Array or one of its subclasses. -static const SpeculatedType SpecArguments = 0x00002000; // It's definitely an Arguments object. -static const SpeculatedType SpecStringObject = 0x00004000; // It's definitely a StringObject. -static const SpeculatedType SpecObjectOther = 0x00008000; // It's definitely an object but not JSFinalObject, JSArray, or JSFunction. -static const SpeculatedType SpecObject = 0x0000ffff; // Bitmask used for testing for any kind of object prediction. -static const SpeculatedType SpecString = 0x00010000; // It's definitely a JSString. -static const SpeculatedType SpecCellOther = 0x00020000; // It's definitely a JSCell but not a subclass of JSObject and definitely not a JSString. -static const SpeculatedType SpecCell = 0x0003ffff; // It's definitely a JSCell. -static const SpeculatedType SpecInt32 = 0x00800000; // It's definitely an Int32. -static const SpeculatedType SpecDoubleReal = 0x01000000; // It's definitely a non-NaN double. -static const SpeculatedType SpecDoubleNaN = 0x02000000; // It's definitely a NaN. -static const SpeculatedType SpecDouble = 0x03000000; // It's either a non-NaN or a NaN double. -static const SpeculatedType SpecRealNumber = 0x01800000; // It's either an Int32 or a DoubleReal. -static const SpeculatedType SpecNumber = 0x03800000; // It's either an Int32 or a Double. -static const SpeculatedType SpecBoolean = 0x04000000; // It's definitely a Boolean. -static const SpeculatedType SpecOther = 0x08000000; // It's definitely none of the above. -static const SpeculatedType SpecTop = 0x0fffffff; // It can be any of the above. -static const SpeculatedType SpecEmpty = 0x10000000; // It's definitely an empty value marker. -static const SpeculatedType SpecEmptyOrTop = 0x1fffffff; // It can be any of the above. -static const SpeculatedType FixedIndexedStorageMask = SpecInt8Array | SpecInt16Array | SpecInt32Array | SpecUint8Array | SpecUint8ClampedArray | SpecUint16Array | SpecUint32Array | SpecFloat32Array | SpecFloat64Array; +static const SpeculatedType SpecNone = 0; // We don't know anything yet. +static const SpeculatedType SpecFinalObject = 1u << 0; // It's definitely a JSFinalObject. +static const SpeculatedType SpecArray = 1u << 1; // It's definitely a JSArray. +static const SpeculatedType SpecFunction = 1u << 2; // It's definitely a JSFunction. +static const SpeculatedType SpecInt8Array = 1u << 3; // It's definitely an Int8Array or one of its subclasses. +static const SpeculatedType SpecInt16Array = 1u << 4; // It's definitely an Int16Array or one of its subclasses. +static const SpeculatedType SpecInt32Array = 1u << 5; // It's definitely an Int32Array or one of its subclasses. +static const SpeculatedType SpecUint8Array = 1u << 6; // It's definitely an Uint8Array or one of its subclasses. +static const SpeculatedType SpecUint8ClampedArray = 1u << 7; // It's definitely an Uint8ClampedArray or one of its subclasses. +static const SpeculatedType SpecUint16Array = 1u << 8; // It's definitely an Uint16Array or one of its subclasses. +static const SpeculatedType SpecUint32Array = 1u << 9; // It's definitely an Uint32Array or one of its subclasses. +static const SpeculatedType SpecFloat32Array = 1u << 10; // It's definitely an Uint16Array or one of its subclasses. +static const SpeculatedType SpecFloat64Array = 1u << 11; // It's definitely an Uint16Array or one of its subclasses. +static const SpeculatedType SpecTypedArrayView = SpecInt8Array | SpecInt16Array | SpecInt32Array | SpecUint8Array | SpecUint8ClampedArray | SpecUint16Array | SpecUint32Array | SpecFloat32Array | SpecFloat64Array; +static const SpeculatedType SpecDirectArguments = 1u << 12; // It's definitely a DirectArguments object. +static const SpeculatedType SpecScopedArguments = 1u << 13; // It's definitely a ScopedArguments object. +static const SpeculatedType SpecStringObject = 1u << 14; // It's definitely a StringObject. +static const SpeculatedType SpecRegExpObject = 1u << 15; // It's definitely a RegExpObject (and not any subclass of RegExpObject). +static const SpeculatedType SpecObjectOther = 1u << 16; // It's definitely an object but not JSFinalObject, JSArray, or JSFunction. +static const SpeculatedType SpecObject = SpecFinalObject | SpecArray | SpecFunction | SpecTypedArrayView | SpecDirectArguments | SpecScopedArguments | SpecStringObject | SpecRegExpObject | SpecObjectOther; // Bitmask used for testing for any kind of object prediction. +static const SpeculatedType SpecStringIdent = 1u << 17; // It's definitely a JSString, and it's an identifier. +static const SpeculatedType SpecStringVar = 1u << 18; // It's definitely a JSString, and it's not an identifier. +static const SpeculatedType SpecString = SpecStringIdent | SpecStringVar; // It's definitely a JSString. +static const SpeculatedType SpecSymbol = 1u << 19; // It's definitely a Symbol. +static const SpeculatedType SpecCellOther = 1u << 20; // It's definitely a JSCell but not a subclass of JSObject and definitely not a JSString or a Symbol. FIXME: This shouldn't be part of heap-top or bytecode-top. https://bugs.webkit.org/show_bug.cgi?id=133078 +static const SpeculatedType SpecCell = SpecObject | SpecString | SpecSymbol | SpecCellOther; // It's definitely a JSCell. +static const SpeculatedType SpecBoolInt32 = 1u << 21; // It's definitely an Int32 with value 0 or 1. +static const SpeculatedType SpecNonBoolInt32 = 1u << 22; // It's definitely an Int32 with value other than 0 or 1. +static const SpeculatedType SpecInt32 = SpecBoolInt32 | SpecNonBoolInt32; // It's definitely an Int32. +static const SpeculatedType SpecInt52 = 1u << 23; // It's definitely an Int52 and we intend it to unbox it. +static const SpeculatedType SpecMachineInt = SpecInt32 | SpecInt52; // It's something that we can do machine int arithmetic on. +static const SpeculatedType SpecInt52AsDouble = 1u << 24; // It's definitely an Int52 and it's inside a double. +static const SpeculatedType SpecInteger = SpecMachineInt | SpecInt52AsDouble; // It's definitely some kind of integer. +static const SpeculatedType SpecNonIntAsDouble = 1u << 25; // It's definitely not an Int52 but it's a real number and it's a double. +static const SpeculatedType SpecDoubleReal = SpecNonIntAsDouble | SpecInt52AsDouble; // It's definitely a non-NaN double. +static const SpeculatedType SpecDoublePureNaN = 1u << 26; // It's definitely a NaN that is sae to tag (i.e. pure). +static const SpeculatedType SpecDoubleImpureNaN = 1u << 27; // It's definitely a NaN that is unsafe to tag (i.e. impure). +static const SpeculatedType SpecDoubleNaN = SpecDoublePureNaN | SpecDoubleImpureNaN; // It's definitely some kind of NaN. +static const SpeculatedType SpecBytecodeDouble = SpecDoubleReal | SpecDoublePureNaN; // It's either a non-NaN or a NaN double, but it's definitely not impure NaN. +static const SpeculatedType SpecFullDouble = SpecDoubleReal | SpecDoubleNaN; // It's either a non-NaN or a NaN double. +static const SpeculatedType SpecBytecodeRealNumber = SpecInt32 | SpecDoubleReal; // It's either an Int32 or a DoubleReal. +static const SpeculatedType SpecFullRealNumber = SpecMachineInt | SpecDoubleReal; // It's either an Int32 or a DoubleReal, or a Int52. +static const SpeculatedType SpecBytecodeNumber = SpecInt32 | SpecBytecodeDouble; // It's either an Int32 or a Double, and the Double cannot be an impure NaN. +static const SpeculatedType SpecFullNumber = SpecMachineInt | SpecFullDouble; // It's either an Int32, Int52, or a Double, and the Double can be impure NaN. +static const SpeculatedType SpecBoolean = 1u << 28; // It's definitely a Boolean. +static const SpeculatedType SpecOther = 1u << 29; // It's definitely either Null or Undefined. +static const SpeculatedType SpecMisc = SpecBoolean | SpecOther; // It's definitely either a boolean, Null, or Undefined. +static const SpeculatedType SpecHeapTop = SpecCell | SpecBytecodeNumber | SpecMisc; // It can be any of the above, except for SpecInt52 and SpecDoubleImpureNaN. +static const SpeculatedType SpecPrimitive = SpecString | SpecSymbol | SpecBytecodeNumber | SpecMisc; // It's any non-Object JSValue. +static const SpeculatedType SpecEmpty = 1u << 30; // It's definitely an empty value marker. +static const SpeculatedType SpecBytecodeTop = SpecHeapTop | SpecEmpty; // It can be any of the above, except for SpecInt52 and SpecDoubleImpureNaN. Corresponds to what could be found in a bytecode local. +static const SpeculatedType SpecFullTop = SpecBytecodeTop | SpecFullNumber; // It can be anything that bytecode could see plus exotic encodings of numbers. typedef bool (*SpeculatedTypeChecker)(SpeculatedType); @@ -83,6 +104,16 @@ inline bool isCellSpeculation(SpeculatedType value) return !!(value & SpecCell) && !(value & ~SpecCell); } +inline bool isCellOrOtherSpeculation(SpeculatedType value) +{ + return !!value && !(value & ~(SpecCell | SpecOther)); +} + +inline bool isNotCellSpeculation(SpeculatedType value) +{ + return !(value & SpecCell) && value; +} + inline bool isObjectSpeculation(SpeculatedType value) { return !!(value & SpecObject) && !(value & ~SpecObject); @@ -103,14 +134,29 @@ inline bool isFinalObjectOrOtherSpeculation(SpeculatedType value) return !!(value & (SpecFinalObject | SpecOther)) && !(value & ~(SpecFinalObject | SpecOther)); } -inline bool isFixedIndexedStorageObjectSpeculation(SpeculatedType value) +inline bool isStringIdentSpeculation(SpeculatedType value) +{ + return value == SpecStringIdent; +} + +inline bool isNotStringVarSpeculation(SpeculatedType value) { - return !!value && (value & FixedIndexedStorageMask) == value; + return !(value & SpecStringVar); } inline bool isStringSpeculation(SpeculatedType value) { - return value == SpecString; + return !!value && (value & SpecString) == value; +} + +inline bool isStringOrOtherSpeculation(SpeculatedType value) +{ + return !!value && (value & (SpecString | SpecOther)) == value; +} + +inline bool isSymbolSpeculation(SpeculatedType value) +{ + return value == SpecSymbol; } inline bool isArraySpeculation(SpeculatedType value) @@ -168,9 +214,14 @@ inline bool isFloat64ArraySpeculation(SpeculatedType value) return value == SpecFloat64Array; } -inline bool isArgumentsSpeculation(SpeculatedType value) +inline bool isDirectArgumentsSpeculation(SpeculatedType value) { - return !!value && (value & SpecArguments) == value; + return value == SpecDirectArguments; +} + +inline bool isScopedArgumentsSpeculation(SpeculatedType value) +{ + return value == SpecScopedArguments; } inline bool isActionableIntMutableArraySpeculation(SpeculatedType value) @@ -199,13 +250,14 @@ inline bool isActionableTypedMutableArraySpeculation(SpeculatedType value) inline bool isActionableMutableArraySpeculation(SpeculatedType value) { return isArraySpeculation(value) - || isArgumentsSpeculation(value) || isActionableTypedMutableArraySpeculation(value); } inline bool isActionableArraySpeculation(SpeculatedType value) { return isStringSpeculation(value) + || isDirectArgumentsSpeculation(value) + || isScopedArgumentsSpeculation(value) || isActionableMutableArraySpeculation(value); } @@ -224,49 +276,104 @@ inline bool isStringOrStringObjectSpeculation(SpeculatedType value) return !!value && !(value & ~(SpecString | SpecStringObject)); } +inline bool isRegExpObjectSpeculation(SpeculatedType value) +{ + return value == SpecRegExpObject; +} + +inline bool isBoolInt32Speculation(SpeculatedType value) +{ + return value == SpecBoolInt32; +} + inline bool isInt32Speculation(SpeculatedType value) { - return value == SpecInt32; + return value && !(value & ~SpecInt32); +} + +inline bool isInt32OrBooleanSpeculation(SpeculatedType value) +{ + return value && !(value & ~(SpecBoolean | SpecInt32)); } inline bool isInt32SpeculationForArithmetic(SpeculatedType value) { - return !(value & SpecDouble); + return !(value & (SpecFullDouble | SpecInt52)); } -inline bool isInt32SpeculationExpectingDefined(SpeculatedType value) +inline bool isInt32OrBooleanSpeculationForArithmetic(SpeculatedType value) { - return isInt32Speculation(value & ~SpecOther); + return !(value & (SpecFullDouble | SpecInt52)); +} + +inline bool isInt32OrBooleanSpeculationExpectingDefined(SpeculatedType value) +{ + return isInt32OrBooleanSpeculation(value & ~SpecOther); +} + +inline bool isInt52Speculation(SpeculatedType value) +{ + return value == SpecInt52; +} + +inline bool isMachineIntSpeculation(SpeculatedType value) +{ + return !!value && (value & SpecMachineInt) == value; +} + +inline bool isInt52AsDoubleSpeculation(SpeculatedType value) +{ + return value == SpecInt52AsDouble; +} + +inline bool isIntegerSpeculation(SpeculatedType value) +{ + return !!value && (value & SpecInteger) == value; } inline bool isDoubleRealSpeculation(SpeculatedType value) { - return value == SpecDoubleReal; + return !!value && (value & SpecDoubleReal) == value; } inline bool isDoubleSpeculation(SpeculatedType value) { - return !!value && (value & SpecDouble) == value; + return !!value && (value & SpecFullDouble) == value; } inline bool isDoubleSpeculationForArithmetic(SpeculatedType value) { - return !!(value & SpecDouble); + return !!(value & SpecFullDouble); +} + +inline bool isBytecodeRealNumberSpeculation(SpeculatedType value) +{ + return !!(value & SpecBytecodeRealNumber) && !(value & ~SpecBytecodeRealNumber); +} + +inline bool isFullRealNumberSpeculation(SpeculatedType value) +{ + return !!(value & SpecFullRealNumber) && !(value & ~SpecFullRealNumber); +} + +inline bool isBytecodeNumberSpeculation(SpeculatedType value) +{ + return !!(value & SpecBytecodeNumber) && !(value & ~SpecBytecodeNumber); } -inline bool isRealNumberSpeculation(SpeculatedType value) +inline bool isFullNumberSpeculation(SpeculatedType value) { - return !!(value & SpecRealNumber) && !(value & ~SpecRealNumber); + return !!(value & SpecFullNumber) && !(value & ~SpecFullNumber); } -inline bool isNumberSpeculation(SpeculatedType value) +inline bool isFullNumberOrBooleanSpeculation(SpeculatedType value) { - return !!(value & SpecNumber) && !(value & ~SpecNumber); + return value && !(value & ~(SpecFullNumber | SpecBoolean)); } -inline bool isNumberSpeculationExpectingDefined(SpeculatedType value) +inline bool isFullNumberOrBooleanSpeculationExpectingDefined(SpeculatedType value) { - return isNumberSpeculation(value & ~SpecOther); + return isFullNumberOrBooleanSpeculation(value & ~SpecOther); } inline bool isBooleanSpeculation(SpeculatedType value) @@ -279,6 +386,11 @@ inline bool isOtherSpeculation(SpeculatedType value) return value == SpecOther; } +inline bool isMiscSpeculation(SpeculatedType value) +{ + return !!value && !(value & ~SpecMisc); +} + inline bool isOtherOrEmptySpeculation(SpeculatedType value) { return !value || value == SpecOther; @@ -289,6 +401,16 @@ inline bool isEmptySpeculation(SpeculatedType value) return value == SpecEmpty; } +inline bool isUntypedSpeculationForArithmetic(SpeculatedType value) +{ + return !!(value & ~(SpecFullNumber | SpecBoolean)); +} + +inline bool isUntypedSpeculationForBitOps(SpeculatedType value) +{ + return !!(value & ~(SpecFullNumber | SpecBoolean | SpecOther)); +} + void dumpSpeculation(PrintStream&, SpeculatedType); void dumpSpeculationAbbreviated(PrintStream&, SpeculatedType); @@ -323,6 +445,30 @@ SpeculatedType speculationFromStructure(Structure*); SpeculatedType speculationFromCell(JSCell*); SpeculatedType speculationFromValue(JSValue); +SpeculatedType speculationFromTypedArrayType(TypedArrayType); // only valid for typed views. +TypedArrayType typedArrayTypeFromSpeculation(SpeculatedType); + +SpeculatedType leastUpperBoundOfStrictlyEquivalentSpeculations(SpeculatedType); + +bool valuesCouldBeEqual(SpeculatedType, SpeculatedType); + +// Precise computation of the type of the result of a double computation after we +// already know that the inputs are doubles and that the result must be a double. Use +// the closest one of these that applies. +SpeculatedType typeOfDoubleSum(SpeculatedType, SpeculatedType); +SpeculatedType typeOfDoubleDifference(SpeculatedType, SpeculatedType); +SpeculatedType typeOfDoubleProduct(SpeculatedType, SpeculatedType); +SpeculatedType typeOfDoubleQuotient(SpeculatedType, SpeculatedType); +SpeculatedType typeOfDoubleMinMax(SpeculatedType, SpeculatedType); +SpeculatedType typeOfDoubleNegation(SpeculatedType); +SpeculatedType typeOfDoubleAbs(SpeculatedType); +SpeculatedType typeOfDoubleRounding(SpeculatedType); +SpeculatedType typeOfDoublePow(SpeculatedType, SpeculatedType); + +// This conservatively models the behavior of arbitrary double operations. +SpeculatedType typeOfDoubleBinaryOp(SpeculatedType, SpeculatedType); +SpeculatedType typeOfDoubleUnaryOp(SpeculatedType); + } // namespace JSC #endif // SpeculatedType_h diff --git a/Source/JavaScriptCore/bytecode/StructureSet.cpp b/Source/JavaScriptCore/bytecode/StructureSet.cpp new file mode 100644 index 000000000..40fea8da3 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/StructureSet.cpp @@ -0,0 +1,109 @@ +/* + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "StructureSet.h" + +#include "DFGAbstractValue.h" +#include "TrackedReferences.h" +#include <wtf/CommaPrinter.h> + +namespace JSC { + +#if ENABLE(DFG_JIT) + +void StructureSet::filter(const DFG::StructureAbstractValue& other) +{ + genericFilter([&] (Structure* structure) -> bool { return other.contains(structure); }); +} + +void StructureSet::filter(SpeculatedType type) +{ + genericFilter( + [&] (Structure* structure) -> bool { + return type & speculationFromStructure(structure); + }); +} + +void StructureSet::filterArrayModes(ArrayModes arrayModes) +{ + genericFilter( + [&] (Structure* structure) -> bool { + return arrayModes & arrayModeFromStructure(structure); + }); +} + +void StructureSet::filter(const DFG::AbstractValue& other) +{ + filter(other.m_structure); + filter(other.m_type); + filterArrayModes(other.m_arrayModes); +} + +#endif // ENABLE(DFG_JIT) + +SpeculatedType StructureSet::speculationFromStructures() const +{ + SpeculatedType result = SpecNone; + forEach( + [&] (Structure* structure) { + mergeSpeculation(result, speculationFromStructure(structure)); + }); + return result; +} + +ArrayModes StructureSet::arrayModesFromStructures() const +{ + ArrayModes result = 0; + forEach( + [&] (Structure* structure) { + mergeArrayModes(result, asArrayModes(structure->indexingType())); + }); + return result; +} + +void StructureSet::dumpInContext(PrintStream& out, DumpContext* context) const +{ + CommaPrinter comma; + out.print("["); + forEach([&] (Structure* structure) { out.print(comma, inContext(*structure, context)); }); + out.print("]"); +} + +void StructureSet::dump(PrintStream& out) const +{ + dumpInContext(out, nullptr); +} + +void StructureSet::validateReferences(const TrackedReferences& trackedReferences) const +{ + forEach( + [&] (Structure* structure) { + trackedReferences.check(structure); + }); +} + +} // namespace JSC + diff --git a/Source/JavaScriptCore/bytecode/StructureSet.h b/Source/JavaScriptCore/bytecode/StructureSet.h index c95d3047b..df19ec538 100644 --- a/Source/JavaScriptCore/bytecode/StructureSet.h +++ b/Source/JavaScriptCore/bytecode/StructureSet.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011 Apple Inc. All rights reserved. + * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -27,155 +27,60 @@ #define StructureSet_h #include "ArrayProfile.h" +#include "DumpContext.h" #include "SpeculatedType.h" #include "Structure.h" -#include <stdio.h> -#include <wtf/Vector.h> +#include <wtf/TinyPtrSet.h> namespace JSC { +class TrackedReferences; + namespace DFG { class StructureAbstractValue; +struct AbstractValue; } -class StructureSet { +class StructureSet : public TinyPtrSet<Structure*> { public: - StructureSet() { } - - StructureSet(Structure* structure) - { - m_structures.append(structure); - } - - void clear() - { - m_structures.clear(); - } - - void add(Structure* structure) - { - ASSERT(!contains(structure)); - m_structures.append(structure); - } - - bool addAll(const StructureSet& other) - { - bool changed = false; - for (size_t i = 0; i < other.size(); ++i) { - if (contains(other[i])) - continue; - add(other[i]); - changed = true; - } - return changed; - } + // I really want to do this: + // using TinyPtrSet::TinyPtrSet; + // + // But I can't because Windows. - void remove(Structure* structure) + StructureSet() { - for (size_t i = 0; i < m_structures.size(); ++i) { - if (m_structures[i] != structure) - continue; - - m_structures[i] = m_structures.last(); - m_structures.removeLast(); - return; - } } - bool contains(Structure* structure) const - { - for (size_t i = 0; i < m_structures.size(); ++i) { - if (m_structures[i] == structure) - return true; - } - return false; - } - - bool containsOnly(Structure* structure) const - { - if (size() != 1) - return false; - return singletonStructure() == structure; - } - - bool isSubsetOf(const StructureSet& other) const - { - for (size_t i = 0; i < m_structures.size(); ++i) { - if (!other.contains(m_structures[i])) - return false; - } - return true; - } - - bool isSupersetOf(const StructureSet& other) const - { - return other.isSubsetOf(*this); - } - - size_t size() const { return m_structures.size(); } - - // Call this if you know that the structure set must consist of exactly - // one structure. - Structure* singletonStructure() const + StructureSet(Structure* structure) + : TinyPtrSet(structure) { - ASSERT(m_structures.size() == 1); - return m_structures[0]; } - Structure* at(size_t i) const { return m_structures.at(i); } - - Structure* operator[](size_t i) const { return at(i); } - - Structure* last() const { return m_structures.last(); } - - SpeculatedType speculationFromStructures() const + ALWAYS_INLINE StructureSet(const StructureSet& other) + : TinyPtrSet(other) { - SpeculatedType result = SpecNone; - - for (size_t i = 0; i < m_structures.size(); ++i) - mergeSpeculation(result, speculationFromStructure(m_structures[i])); - - return result; } - ArrayModes arrayModesFromStructures() const + Structure* onlyStructure() const { - ArrayModes result = 0; - - for (size_t i = 0; i < m_structures.size(); ++i) - mergeArrayModes(result, asArrayModes(m_structures[i]->indexingType())); - - return result; + return onlyEntry(); } - bool operator==(const StructureSet& other) const - { - if (m_structures.size() != other.m_structures.size()) - return false; - - for (size_t i = 0; i < m_structures.size(); ++i) { - if (!other.contains(m_structures[i])) - return false; - } - - return true; - } +#if ENABLE(DFG_JIT) + void filter(const DFG::StructureAbstractValue&); + void filter(SpeculatedType); + void filterArrayModes(ArrayModes); + void filter(const DFG::AbstractValue&); +#endif // ENABLE(DFG_JIT) - void dump(FILE* out) - { - fprintf(out, "["); - for (size_t i = 0; i < m_structures.size(); ++i) { - if (i) - fprintf(out, ", "); - fprintf(out, "%p", m_structures[i]); - } - fprintf(out, "]"); - } + SpeculatedType speculationFromStructures() const; + ArrayModes arrayModesFromStructures() const; -private: - friend class DFG::StructureAbstractValue; + void dumpInContext(PrintStream&, DumpContext*) const; + void dump(PrintStream&) const; - Vector<Structure*, 2> m_structures; + void validateReferences(const TrackedReferences&) const; }; } // namespace JSC diff --git a/Source/JavaScriptCore/bytecode/StructureStubClearingWatchpoint.cpp b/Source/JavaScriptCore/bytecode/StructureStubClearingWatchpoint.cpp index 5cfb3d1e8..d2bdd6a5a 100644 --- a/Source/JavaScriptCore/bytecode/StructureStubClearingWatchpoint.cpp +++ b/Source/JavaScriptCore/bytecode/StructureStubClearingWatchpoint.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012 Apple Inc. All rights reserved. + * Copyright (C) 2012, 2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -29,6 +29,7 @@ #if ENABLE(JIT) #include "CodeBlock.h" +#include "JSCInlines.h" #include "StructureStubInfo.h" namespace JSC { @@ -36,42 +37,56 @@ namespace JSC { StructureStubClearingWatchpoint::~StructureStubClearingWatchpoint() { } StructureStubClearingWatchpoint* StructureStubClearingWatchpoint::push( + const ObjectPropertyCondition& key, WatchpointsOnStructureStubInfo& holder, - OwnPtr<StructureStubClearingWatchpoint>& head) + std::unique_ptr<StructureStubClearingWatchpoint>& head) { - head = adoptPtr(new StructureStubClearingWatchpoint(holder, head.release())); + head = std::make_unique<StructureStubClearingWatchpoint>(key, holder, WTFMove(head)); return head.get(); } -void StructureStubClearingWatchpoint::fireInternal() +void StructureStubClearingWatchpoint::fireInternal(const FireDetail&) { - // This will implicitly cause my own demise: stub reset removes all watchpoints. - // That works, because deleting a watchpoint removes it from the set's list, and - // the set's list traversal for firing is robust against the set changing. - m_holder.codeBlock()->resetStub(*m_holder.stubInfo()); + if (!m_key || !m_key.isWatchable(PropertyCondition::EnsureWatchability)) { + // This will implicitly cause my own demise: stub reset removes all watchpoints. + // That works, because deleting a watchpoint removes it from the set's list, and + // the set's list traversal for firing is robust against the set changing. + ConcurrentJITLocker locker(m_holder.codeBlock()->m_lock); + m_holder.stubInfo()->reset(m_holder.codeBlock()); + return; + } + + if (m_key.kind() == PropertyCondition::Presence) { + // If this was a presence condition, let's watch the property for replacements. This is profitable + // for the DFG, which will want the replacement set to be valid in order to do constant folding. + VM& vm = *Heap::heap(m_key.object())->vm(); + m_key.object()->structure()->startWatchingPropertyForReplacements(vm, m_key.offset()); + } + + m_key.object()->structure()->addTransitionWatchpoint(this); } WatchpointsOnStructureStubInfo::~WatchpointsOnStructureStubInfo() { } -StructureStubClearingWatchpoint* WatchpointsOnStructureStubInfo::addWatchpoint() +StructureStubClearingWatchpoint* WatchpointsOnStructureStubInfo::addWatchpoint(const ObjectPropertyCondition& key) { - return StructureStubClearingWatchpoint::push(*this, m_head); + return StructureStubClearingWatchpoint::push(key, *this, m_head); } StructureStubClearingWatchpoint* WatchpointsOnStructureStubInfo::ensureReferenceAndAddWatchpoint( - RefPtr<WatchpointsOnStructureStubInfo>& holderRef, CodeBlock* codeBlock, - StructureStubInfo* stubInfo) + std::unique_ptr<WatchpointsOnStructureStubInfo>& holderRef, CodeBlock* codeBlock, + StructureStubInfo* stubInfo, const ObjectPropertyCondition& key) { if (!holderRef) - holderRef = adoptRef(new WatchpointsOnStructureStubInfo(codeBlock, stubInfo)); + holderRef = std::make_unique<WatchpointsOnStructureStubInfo>(codeBlock, stubInfo); else { ASSERT(holderRef->m_codeBlock == codeBlock); ASSERT(holderRef->m_stubInfo == stubInfo); } - return holderRef->addWatchpoint(); + return holderRef->addWatchpoint(key); } } // namespace JSC diff --git a/Source/JavaScriptCore/bytecode/StructureStubClearingWatchpoint.h b/Source/JavaScriptCore/bytecode/StructureStubClearingWatchpoint.h index 827e816ee..37668c3b9 100644 --- a/Source/JavaScriptCore/bytecode/StructureStubClearingWatchpoint.h +++ b/Source/JavaScriptCore/bytecode/StructureStubClearingWatchpoint.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012 Apple Inc. All rights reserved. + * Copyright (C) 2012, 2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,57 +26,53 @@ #ifndef StructureStubClearingWatchpoint_h #define StructureStubClearingWatchpoint_h +#include "ObjectPropertyCondition.h" #include "Watchpoint.h" -#include <wtf/Platform.h> #if ENABLE(JIT) -#include <wtf/FastAllocBase.h> +#include <wtf/FastMalloc.h> #include <wtf/Noncopyable.h> -#include <wtf/OwnPtr.h> -#include <wtf/PassOwnPtr.h> -#include <wtf/RefCounted.h> -#include <wtf/RefPtr.h> namespace JSC { class CodeBlock; +class StructureStubInfo; class WatchpointsOnStructureStubInfo; -struct StructureStubInfo; class StructureStubClearingWatchpoint : public Watchpoint { WTF_MAKE_NONCOPYABLE(StructureStubClearingWatchpoint); WTF_MAKE_FAST_ALLOCATED; public: StructureStubClearingWatchpoint( - WatchpointsOnStructureStubInfo& holder) - : m_holder(holder) - { - } - - StructureStubClearingWatchpoint( + const ObjectPropertyCondition& key, WatchpointsOnStructureStubInfo& holder, - PassOwnPtr<StructureStubClearingWatchpoint> next) - : m_holder(holder) - , m_next(next) + std::unique_ptr<StructureStubClearingWatchpoint> next) + : m_key(key) + , m_holder(holder) + , m_next(WTFMove(next)) { } virtual ~StructureStubClearingWatchpoint(); static StructureStubClearingWatchpoint* push( + const ObjectPropertyCondition& key, WatchpointsOnStructureStubInfo& holder, - OwnPtr<StructureStubClearingWatchpoint>& head); + std::unique_ptr<StructureStubClearingWatchpoint>& head); protected: - void fireInternal(); + virtual void fireInternal(const FireDetail&) override; private: + ObjectPropertyCondition m_key; WatchpointsOnStructureStubInfo& m_holder; - OwnPtr<StructureStubClearingWatchpoint> m_next; + std::unique_ptr<StructureStubClearingWatchpoint> m_next; }; -class WatchpointsOnStructureStubInfo : public RefCounted<WatchpointsOnStructureStubInfo> { +class WatchpointsOnStructureStubInfo { + WTF_MAKE_NONCOPYABLE(WatchpointsOnStructureStubInfo); + WTF_MAKE_FAST_ALLOCATED; public: WatchpointsOnStructureStubInfo(CodeBlock* codeBlock, StructureStubInfo* stubInfo) : m_codeBlock(codeBlock) @@ -86,11 +82,11 @@ public: ~WatchpointsOnStructureStubInfo(); - StructureStubClearingWatchpoint* addWatchpoint(); + StructureStubClearingWatchpoint* addWatchpoint(const ObjectPropertyCondition& key); static StructureStubClearingWatchpoint* ensureReferenceAndAddWatchpoint( - RefPtr<WatchpointsOnStructureStubInfo>& holderRef, - CodeBlock*, StructureStubInfo*); + std::unique_ptr<WatchpointsOnStructureStubInfo>& holderRef, + CodeBlock*, StructureStubInfo*, const ObjectPropertyCondition& key); CodeBlock* codeBlock() const { return m_codeBlock; } StructureStubInfo* stubInfo() const { return m_stubInfo; } @@ -98,7 +94,7 @@ public: private: CodeBlock* m_codeBlock; StructureStubInfo* m_stubInfo; - OwnPtr<StructureStubClearingWatchpoint> m_head; + std::unique_ptr<StructureStubClearingWatchpoint> m_head; }; } // namespace JSC diff --git a/Source/JavaScriptCore/bytecode/StructureStubInfo.cpp b/Source/JavaScriptCore/bytecode/StructureStubInfo.cpp index 70cf2fccb..5ffd743a6 100644 --- a/Source/JavaScriptCore/bytecode/StructureStubInfo.cpp +++ b/Source/JavaScriptCore/bytecode/StructureStubInfo.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2014, 2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,97 +26,171 @@ #include "config.h" #include "StructureStubInfo.h" +#include "JSCellInlines.h" #include "JSObject.h" -#include "PolymorphicPutByIdList.h" - +#include "PolymorphicAccess.h" +#include "Repatch.h" namespace JSC { #if ENABLE(JIT) +StructureStubInfo::StructureStubInfo(AccessType accessType) + : callSiteIndex(UINT_MAX) + , accessType(accessType) + , cacheType(CacheType::Unset) + , countdown(1) // For a totally clear stub, we'll patch it after the first execution. + , repatchCount(0) + , numberOfCoolDowns(0) + , resetByGC(false) + , tookSlowPath(false) + , everConsidered(false) +{ +} + +StructureStubInfo::~StructureStubInfo() +{ +} + +void StructureStubInfo::initGetByIdSelf(CodeBlock* codeBlock, Structure* baseObjectStructure, PropertyOffset offset) +{ + cacheType = CacheType::GetByIdSelf; + + u.byIdSelf.baseObjectStructure.set( + *codeBlock->vm(), codeBlock, baseObjectStructure); + u.byIdSelf.offset = offset; +} + +void StructureStubInfo::initPutByIdReplace(CodeBlock* codeBlock, Structure* baseObjectStructure, PropertyOffset offset) +{ + cacheType = CacheType::PutByIdReplace; + + u.byIdSelf.baseObjectStructure.set( + *codeBlock->vm(), codeBlock, baseObjectStructure); + u.byIdSelf.offset = offset; +} + +void StructureStubInfo::initStub(CodeBlock*, std::unique_ptr<PolymorphicAccess> stub) +{ + cacheType = CacheType::Stub; + u.stub = stub.release(); +} + void StructureStubInfo::deref() { - switch (accessType) { - case access_get_by_id_self_list: { - PolymorphicAccessStructureList* polymorphicStructures = u.getByIdSelfList.structureList; - delete polymorphicStructures; + switch (cacheType) { + case CacheType::Stub: + delete u.stub; return; - } - case access_get_by_id_proto_list: { - PolymorphicAccessStructureList* polymorphicStructures = u.getByIdProtoList.structureList; - delete polymorphicStructures; + case CacheType::Unset: + case CacheType::GetByIdSelf: + case CacheType::PutByIdReplace: return; } - case access_put_by_id_list: - delete u.putByIdList.list; + + RELEASE_ASSERT_NOT_REACHED(); +} + +void StructureStubInfo::aboutToDie() +{ + switch (cacheType) { + case CacheType::Stub: + u.stub->aboutToDie(); return; - case access_get_by_id_self: - case access_get_by_id_proto: - case access_get_by_id_chain: - case access_put_by_id_transition_normal: - case access_put_by_id_transition_direct: - case access_put_by_id_replace: - case access_unset: - case access_get_by_id_generic: - case access_put_by_id_generic: - case access_get_array_length: - case access_get_string_length: - // These instructions don't have to release any allocated memory + case CacheType::Unset: + case CacheType::GetByIdSelf: + case CacheType::PutByIdReplace: return; - default: - RELEASE_ASSERT_NOT_REACHED(); } + + RELEASE_ASSERT_NOT_REACHED(); } -bool StructureStubInfo::visitWeakReferences() +MacroAssemblerCodePtr StructureStubInfo::addAccessCase( + CodeBlock* codeBlock, const Identifier& ident, std::unique_ptr<AccessCase> accessCase) { + VM& vm = *codeBlock->vm(); + + if (!accessCase) + return MacroAssemblerCodePtr(); + + if (cacheType == CacheType::Stub) + return u.stub->regenerateWithCase(vm, codeBlock, *this, ident, WTFMove(accessCase)); + + std::unique_ptr<PolymorphicAccess> access = std::make_unique<PolymorphicAccess>(); + + Vector<std::unique_ptr<AccessCase>> accessCases; + + std::unique_ptr<AccessCase> previousCase = + AccessCase::fromStructureStubInfo(vm, codeBlock, *this); + if (previousCase) + accessCases.append(WTFMove(previousCase)); + + accessCases.append(WTFMove(accessCase)); + + MacroAssemblerCodePtr result = + access->regenerateWithCases(vm, codeBlock, *this, ident, WTFMove(accessCases)); + + if (!result) + return MacroAssemblerCodePtr(); + + initStub(codeBlock, WTFMove(access)); + return result; +} + +void StructureStubInfo::reset(CodeBlock* codeBlock) +{ + if (cacheType == CacheType::Unset) + return; + + if (Options::verboseOSR()) { + // This can be called from GC destructor calls, so we don't try to do a full dump + // of the CodeBlock. + dataLog("Clearing structure cache (kind ", static_cast<int>(accessType), ") in ", RawPointer(codeBlock), ".\n"); + } + switch (accessType) { - case access_get_by_id_self: - if (!Heap::isMarked(u.getByIdSelf.baseObjectStructure.get())) - return false; - break; - case access_get_by_id_proto: - if (!Heap::isMarked(u.getByIdProto.baseObjectStructure.get()) - || !Heap::isMarked(u.getByIdProto.prototypeStructure.get())) - return false; - break; - case access_get_by_id_chain: - if (!Heap::isMarked(u.getByIdChain.baseObjectStructure.get()) - || !Heap::isMarked(u.getByIdChain.chain.get())) - return false; + case AccessType::Get: + resetGetByID(codeBlock, *this); break; - case access_get_by_id_self_list: { - PolymorphicAccessStructureList* polymorphicStructures = u.getByIdSelfList.structureList; - if (!polymorphicStructures->visitWeak(u.getByIdSelfList.listSize)) - return false; + case AccessType::Put: + resetPutByID(codeBlock, *this); break; - } - case access_get_by_id_proto_list: { - PolymorphicAccessStructureList* polymorphicStructures = u.getByIdProtoList.structureList; - if (!polymorphicStructures->visitWeak(u.getByIdProtoList.listSize)) - return false; + case AccessType::In: + resetIn(codeBlock, *this); break; } - case access_put_by_id_transition_normal: - case access_put_by_id_transition_direct: - if (!Heap::isMarked(u.putByIdTransition.previousStructure.get()) - || !Heap::isMarked(u.putByIdTransition.structure.get()) - || !Heap::isMarked(u.putByIdTransition.chain.get())) - return false; - break; - case access_put_by_id_replace: - if (!Heap::isMarked(u.putByIdReplace.baseObjectStructure.get())) - return false; + + deref(); + cacheType = CacheType::Unset; +} + +void StructureStubInfo::visitWeakReferences(CodeBlock* codeBlock) +{ + VM& vm = *codeBlock->vm(); + + switch (cacheType) { + case CacheType::GetByIdSelf: + case CacheType::PutByIdReplace: + if (Heap::isMarked(u.byIdSelf.baseObjectStructure.get())) + return; break; - case access_put_by_id_list: - if (!u.putByIdList.list->visitWeak()) - return false; + case CacheType::Stub: + if (u.stub->visitWeak(vm)) + return; break; default: - // The rest of the instructions don't require references, so there is no need to - // do anything. - break; + return; } - return true; + + reset(codeBlock); + resetByGC = true; +} + +bool StructureStubInfo::containsPC(void* pc) const +{ + if (cacheType != CacheType::Stub) + return false; + return u.stub->containsPC(pc); } #endif diff --git a/Source/JavaScriptCore/bytecode/StructureStubInfo.h b/Source/JavaScriptCore/bytecode/StructureStubInfo.h index f5c39357c..40d362d44 100644 --- a/Source/JavaScriptCore/bytecode/StructureStubInfo.h +++ b/Source/JavaScriptCore/bytecode/StructureStubInfo.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008, 2012 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2012-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,298 +26,155 @@ #ifndef StructureStubInfo_h #define StructureStubInfo_h -#include <wtf/Platform.h> - -#if ENABLE(JIT) - #include "CodeOrigin.h" -#include "DFGRegisterSet.h" #include "Instruction.h" #include "JITStubRoutine.h" #include "MacroAssembler.h" +#include "ObjectPropertyConditionSet.h" #include "Opcode.h" -#include "PolymorphicAccessStructureList.h" +#include "Options.h" +#include "PolymorphicAccess.h" +#include "RegisterSet.h" #include "Structure.h" #include "StructureStubClearingWatchpoint.h" -#include <wtf/OwnPtr.h> namespace JSC { -class PolymorphicPutByIdList; - -enum AccessType { - access_get_by_id_self, - access_get_by_id_proto, - access_get_by_id_chain, - access_get_by_id_self_list, - access_get_by_id_proto_list, - access_put_by_id_transition_normal, - access_put_by_id_transition_direct, - access_put_by_id_replace, - access_put_by_id_list, - access_unset, - access_get_by_id_generic, - access_put_by_id_generic, - access_get_array_length, - access_get_string_length, -}; - -inline bool isGetByIdAccess(AccessType accessType) -{ - switch (accessType) { - case access_get_by_id_self: - case access_get_by_id_proto: - case access_get_by_id_chain: - case access_get_by_id_self_list: - case access_get_by_id_proto_list: - case access_get_by_id_generic: - case access_get_array_length: - case access_get_string_length: - return true; - default: - return false; - } -} - -inline bool isPutByIdAccess(AccessType accessType) -{ - switch (accessType) { - case access_put_by_id_transition_normal: - case access_put_by_id_transition_direct: - case access_put_by_id_replace: - case access_put_by_id_list: - case access_put_by_id_generic: - return true; - default: - return false; - } -} - -struct StructureStubInfo { - StructureStubInfo() - : accessType(access_unset) - , seen(false) - , resetByGC(false) - { - } - - void initGetByIdSelf(VM& vm, JSCell* owner, Structure* baseObjectStructure) - { - accessType = access_get_by_id_self; - - u.getByIdSelf.baseObjectStructure.set(vm, owner, baseObjectStructure); - } - - void initGetByIdProto(VM& vm, JSCell* owner, Structure* baseObjectStructure, Structure* prototypeStructure, bool isDirect) - { - accessType = access_get_by_id_proto; - - u.getByIdProto.baseObjectStructure.set(vm, owner, baseObjectStructure); - u.getByIdProto.prototypeStructure.set(vm, owner, prototypeStructure); - u.getByIdProto.isDirect = isDirect; - } - - void initGetByIdChain(VM& vm, JSCell* owner, Structure* baseObjectStructure, StructureChain* chain, unsigned count, bool isDirect) - { - accessType = access_get_by_id_chain; - - u.getByIdChain.baseObjectStructure.set(vm, owner, baseObjectStructure); - u.getByIdChain.chain.set(vm, owner, chain); - u.getByIdChain.count = count; - u.getByIdChain.isDirect = isDirect; - } +#if ENABLE(JIT) - void initGetByIdSelfList(PolymorphicAccessStructureList* structureList, int listSize) - { - accessType = access_get_by_id_self_list; +class PolymorphicAccess; - u.getByIdSelfList.structureList = structureList; - u.getByIdSelfList.listSize = listSize; - } +enum class AccessType : int8_t { + Get, + Put, + In +}; - void initGetByIdProtoList(PolymorphicAccessStructureList* structureList, int listSize) - { - accessType = access_get_by_id_proto_list; +enum class CacheType : int8_t { + Unset, + GetByIdSelf, + PutByIdReplace, + Stub +}; - u.getByIdProtoList.structureList = structureList; - u.getByIdProtoList.listSize = listSize; - } +class StructureStubInfo { + WTF_MAKE_NONCOPYABLE(StructureStubInfo); + WTF_MAKE_FAST_ALLOCATED; +public: + StructureStubInfo(AccessType); + ~StructureStubInfo(); - // PutById* + void initGetByIdSelf(CodeBlock*, Structure* baseObjectStructure, PropertyOffset); + void initPutByIdReplace(CodeBlock*, Structure* baseObjectStructure, PropertyOffset); + void initStub(CodeBlock*, std::unique_ptr<PolymorphicAccess>); - void initPutByIdTransition(VM& vm, JSCell* owner, Structure* previousStructure, Structure* structure, StructureChain* chain, bool isDirect) - { - if (isDirect) - accessType = access_put_by_id_transition_direct; - else - accessType = access_put_by_id_transition_normal; + MacroAssemblerCodePtr addAccessCase( + CodeBlock*, const Identifier&, std::unique_ptr<AccessCase>); - u.putByIdTransition.previousStructure.set(vm, owner, previousStructure); - u.putByIdTransition.structure.set(vm, owner, structure); - u.putByIdTransition.chain.set(vm, owner, chain); - } - - void initPutByIdReplace(VM& vm, JSCell* owner, Structure* baseObjectStructure) - { - accessType = access_put_by_id_replace; - - u.putByIdReplace.baseObjectStructure.set(vm, owner, baseObjectStructure); - } - - void initPutByIdList(PolymorphicPutByIdList* list) - { - accessType = access_put_by_id_list; - u.putByIdList.list = list; - } - - void reset() - { - deref(); - accessType = access_unset; - stubRoutine.clear(); - watchpoints.clear(); - } + void reset(CodeBlock*); void deref(); + void aboutToDie(); - bool visitWeakReferences(); + // Check if the stub has weak references that are dead. If it does, then it resets itself, + // either entirely or just enough to ensure that those dead pointers don't get used anymore. + void visitWeakReferences(CodeBlock*); - bool seenOnce() + ALWAYS_INLINE bool considerCaching() { - return seen; + everConsidered = true; + if (!countdown) { + // Check if we have been doing repatching too frequently. If so, then we should cool off + // for a while. + willRepatch(); + if (repatchCount > Options::repatchCountForCoolDown()) { + // We've been repatching too much, so don't do it now. + repatchCount = 0; + // The amount of time we require for cool-down depends on the number of times we've + // had to cool down in the past. The relationship is exponential. The max value we + // allow here is 2^256 - 2, since the slow paths may increment the count to indicate + // that they'd like to temporarily skip patching just this once. + countdown = WTF::leftShiftWithSaturation( + static_cast<uint8_t>(Options::initialCoolDownCount()), + numberOfCoolDowns, + static_cast<uint8_t>(std::numeric_limits<uint8_t>::max() - 1)); + willCoolDown(); + return false; + } + return true; + } + countdown--; + return false; } - void setSeen() + ALWAYS_INLINE void willRepatch() { - seen = true; + WTF::incrementWithSaturation(repatchCount); } - - StructureStubClearingWatchpoint* addWatchpoint(CodeBlock* codeBlock) + + ALWAYS_INLINE void willCoolDown() { - return WatchpointsOnStructureStubInfo::ensureReferenceAndAddWatchpoint( - watchpoints, codeBlock, this); + WTF::incrementWithSaturation(numberOfCoolDowns); } - - unsigned bytecodeIndex; - int8_t accessType; - bool seen : 1; - bool resetByGC : 1; + CodeLocationCall callReturnLocation; -#if ENABLE(DFG_JIT) CodeOrigin codeOrigin; -#endif // ENABLE(DFG_JIT) + CallSiteIndex callSiteIndex; + + bool containsPC(void* pc) const; union { struct { - int8_t registersFlushed; - int8_t baseGPR; + WriteBarrierBase<Structure> baseObjectStructure; + PropertyOffset offset; + } byIdSelf; + PolymorphicAccess* stub; + } u; + + struct { + int8_t baseGPR; #if USE(JSVALUE32_64) - int8_t valueTagGPR; -#endif - int8_t valueGPR; - DFG::RegisterSetPOD usedRegisters; - int32_t deltaCallToDone; - int32_t deltaCallToStorageLoad; - int32_t deltaCallToStructCheck; - int32_t deltaCallToSlowCase; - int32_t deltaCheckImmToCall; -#if USE(JSVALUE64) - int32_t deltaCallToLoadOrStore; -#else - int32_t deltaCallToTagLoadOrStore; - int32_t deltaCallToPayloadLoadOrStore; + int8_t valueTagGPR; + int8_t baseTagGPR; #endif - } dfg; - struct { - union { - struct { - int16_t structureToCompare; - int16_t structureCheck; - int16_t propertyStorageLoad; + int8_t valueGPR; + RegisterSet usedRegisters; + int32_t deltaCallToDone; + int32_t deltaCallToJump; + int32_t deltaCallToSlowCase; + int32_t deltaCheckImmToCall; #if USE(JSVALUE64) - int16_t displacementLabel; + int32_t deltaCallToLoadOrStore; #else - int16_t displacementLabel1; - int16_t displacementLabel2; + int32_t deltaCallToTagLoadOrStore; + int32_t deltaCallToPayloadLoadOrStore; #endif - int16_t putResult; - int16_t coldPathBegin; - } get; - struct { - int16_t structureToCompare; - int16_t propertyStorageLoad; -#if USE(JSVALUE64) - int16_t displacementLabel; -#else - int16_t displacementLabel1; - int16_t displacementLabel2; -#endif - } put; - } u; - int16_t methodCheckProtoObj; - int16_t methodCheckProtoStructureToCompare; - int16_t methodCheckPutFunction; - } baseline; } patch; - union { - struct { - // It would be unwise to put anything here, as it will surely be overwritten. - } unset; - struct { - WriteBarrierBase<Structure> baseObjectStructure; - } getByIdSelf; - struct { - WriteBarrierBase<Structure> baseObjectStructure; - WriteBarrierBase<Structure> prototypeStructure; - bool isDirect; - } getByIdProto; - struct { - WriteBarrierBase<Structure> baseObjectStructure; - WriteBarrierBase<StructureChain> chain; - unsigned count : 31; - bool isDirect : 1; - } getByIdChain; - struct { - PolymorphicAccessStructureList* structureList; - int listSize; - } getByIdSelfList; - struct { - PolymorphicAccessStructureList* structureList; - int listSize; - } getByIdProtoList; - struct { - WriteBarrierBase<Structure> previousStructure; - WriteBarrierBase<Structure> structure; - WriteBarrierBase<StructureChain> chain; - } putByIdTransition; - struct { - WriteBarrierBase<Structure> baseObjectStructure; - } putByIdReplace; - struct { - PolymorphicPutByIdList* list; - } putByIdList; - } u; - - RefPtr<JITStubRoutine> stubRoutine; - CodeLocationCall callReturnLocation; - CodeLocationLabel hotPathBegin; - RefPtr<WatchpointsOnStructureStubInfo> watchpoints; + AccessType accessType; + CacheType cacheType; + uint8_t countdown; // We repatch only when this is zero. If not zero, we decrement. + uint8_t repatchCount; + uint8_t numberOfCoolDowns; + bool resetByGC : 1; + bool tookSlowPath : 1; + bool everConsidered : 1; }; -inline void* getStructureStubInfoReturnLocation(StructureStubInfo* structureStubInfo) +inline CodeOrigin getStructureStubInfoCodeOrigin(StructureStubInfo& structureStubInfo) { - return structureStubInfo->callReturnLocation.executableAddress(); + return structureStubInfo.codeOrigin; } -inline unsigned getStructureStubInfoBytecodeIndex(StructureStubInfo* structureStubInfo) -{ - return structureStubInfo->bytecodeIndex; -} +typedef HashMap<CodeOrigin, StructureStubInfo*, CodeOriginApproximateHash> StubInfoMap; -} // namespace JSC +#else + +typedef HashMap<int, void*> StubInfoMap; #endif // ENABLE(JIT) +} // namespace JSC + #endif // StructureStubInfo_h diff --git a/Source/JavaScriptCore/bytecode/ToThisStatus.cpp b/Source/JavaScriptCore/bytecode/ToThisStatus.cpp new file mode 100644 index 000000000..23d1e0800 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/ToThisStatus.cpp @@ -0,0 +1,72 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "ToThisStatus.h" + +namespace JSC { + +ToThisStatus merge(ToThisStatus a, ToThisStatus b) +{ + switch (a) { + case ToThisOK: + return b; + case ToThisConflicted: + return ToThisConflicted; + case ToThisClearedByGC: + if (b == ToThisConflicted) + return ToThisConflicted; + return ToThisClearedByGC; + } + + RELEASE_ASSERT_NOT_REACHED(); + return ToThisConflicted; +} + +} // namespace JSC + +namespace WTF { + +using namespace JSC; + +void printInternal(PrintStream& out, ToThisStatus status) +{ + switch (status) { + case ToThisOK: + out.print("OK"); + return; + case ToThisConflicted: + out.print("Conflicted"); + return; + case ToThisClearedByGC: + out.print("ClearedByGC"); + return; + } + + RELEASE_ASSERT_NOT_REACHED(); +} + +} // namespace WTF + diff --git a/Source/JavaScriptCore/bytecode/ToThisStatus.h b/Source/JavaScriptCore/bytecode/ToThisStatus.h new file mode 100644 index 000000000..55d707c0f --- /dev/null +++ b/Source/JavaScriptCore/bytecode/ToThisStatus.h @@ -0,0 +1,50 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef ToThisStatus_h +#define ToThisStatus_h + +#include <wtf/PrintStream.h> + +namespace JSC { + +enum ToThisStatus { + ToThisOK, + ToThisConflicted, + ToThisClearedByGC +}; + +ToThisStatus merge(ToThisStatus, ToThisStatus); + +} // namespace JSC + +namespace WTF { + +void printInternal(PrintStream&, JSC::ToThisStatus); + +} // namespace WTF + +#endif // ToThisStatus_h + diff --git a/Source/JavaScriptCore/bytecode/TrackedReferences.cpp b/Source/JavaScriptCore/bytecode/TrackedReferences.cpp new file mode 100644 index 000000000..d98fa9759 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/TrackedReferences.cpp @@ -0,0 +1,81 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "TrackedReferences.h" + +#include "JSCInlines.h" +#include <wtf/CommaPrinter.h> + +namespace JSC { + +TrackedReferences::TrackedReferences() +{ +} + +TrackedReferences::~TrackedReferences() +{ +} + +void TrackedReferences::add(JSCell* cell) +{ + if (cell) + m_references.add(cell); +} + +void TrackedReferences::add(JSValue value) +{ + if (value.isCell()) + add(value.asCell()); +} + +void TrackedReferences::check(JSCell* cell) const +{ + if (!cell) + return; + + if (m_references.contains(cell)) + return; + + dataLog("Found untracked reference: ", RawPointer(cell), "\n"); + dataLog("All tracked references: ", *this, "\n"); + RELEASE_ASSERT_NOT_REACHED(); +} + +void TrackedReferences::check(JSValue value) const +{ + if (value.isCell()) + check(value.asCell()); +} + +void TrackedReferences::dump(PrintStream& out) const +{ + CommaPrinter comma; + for (JSCell* cell : m_references) + out.print(comma, RawPointer(cell)); +} + +} // namespace JSC + diff --git a/Source/JavaScriptCore/bytecode/TrackedReferences.h b/Source/JavaScriptCore/bytecode/TrackedReferences.h new file mode 100644 index 000000000..cc15e1ee7 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/TrackedReferences.h @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef TrackedReferences_h +#define TrackedReferences_h + +#include "JSCJSValue.h" +#include "JSCell.h" +#include <wtf/HashSet.h> +#include <wtf/PrintStream.h> + +namespace JSC { + +class TrackedReferences { +public: + TrackedReferences(); + ~TrackedReferences(); + + void add(JSCell*); + void add(JSValue); + + void check(JSCell*) const; + void check(JSValue) const; + + void dump(PrintStream&) const; + +private: + HashSet<JSCell*> m_references; +}; + +} // namespace JSC + +#endif // TrackedReferences_h + diff --git a/Source/JavaScriptCore/bytecode/TypeLocation.h b/Source/JavaScriptCore/bytecode/TypeLocation.h new file mode 100644 index 000000000..ec07656ee --- /dev/null +++ b/Source/JavaScriptCore/bytecode/TypeLocation.h @@ -0,0 +1,63 @@ +/* + * Copyright (C) 2014 Apple Inc. All Rights Reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef TypeLocation_h +#define TypeLocation_h + +#include "TypeSet.h" + +namespace JSC { + +enum TypeProfilerGlobalIDFlags { + TypeProfilerNeedsUniqueIDGeneration = -1, + TypeProfilerNoGlobalIDExists = -2, + TypeProfilerReturnStatement = -3 +}; + +typedef intptr_t GlobalVariableID; + +class TypeLocation { +public: + TypeLocation() + : m_lastSeenType(TypeNothing) + , m_divotForFunctionOffsetIfReturnStatement(UINT_MAX) + , m_instructionTypeSet(TypeSet::create()) + , m_globalTypeSet(nullptr) + { + } + + GlobalVariableID m_globalVariableID; + RuntimeType m_lastSeenType; + intptr_t m_sourceID; + unsigned m_divotStart; + unsigned m_divotEnd; + unsigned m_divotForFunctionOffsetIfReturnStatement; + RefPtr<TypeSet> m_instructionTypeSet; + RefPtr<TypeSet> m_globalTypeSet; +}; + +} //namespace JSC + +#endif //TypeLocation_h diff --git a/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.cpp b/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.cpp index 8aba1ff8b..83d9054a3 100644 --- a/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.cpp +++ b/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012, 2013 Apple Inc. All Rights Reserved. + * Copyright (C) 2012, 2013, 2015 Apple Inc. All Rights Reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -31,217 +31,92 @@ #include "ClassInfo.h" #include "CodeCache.h" #include "Executable.h" +#include "ExecutableInfo.h" +#include "FunctionOverrides.h" #include "JSString.h" -#include "Operations.h" +#include "JSCInlines.h" #include "Parser.h" #include "SourceProvider.h" #include "Structure.h" #include "SymbolTable.h" +#include "UnlinkedInstructionStream.h" +#include <wtf/DataLog.h> namespace JSC { -const ClassInfo UnlinkedFunctionExecutable::s_info = { "UnlinkedFunctionExecutable", 0, 0, 0, CREATE_METHOD_TABLE(UnlinkedFunctionExecutable) }; -const ClassInfo UnlinkedCodeBlock::s_info = { "UnlinkedCodeBlock", 0, 0, 0, CREATE_METHOD_TABLE(UnlinkedCodeBlock) }; -const ClassInfo UnlinkedGlobalCodeBlock::s_info = { "UnlinkedGlobalCodeBlock", &Base::s_info, 0, 0, CREATE_METHOD_TABLE(UnlinkedGlobalCodeBlock) }; -const ClassInfo UnlinkedProgramCodeBlock::s_info = { "UnlinkedProgramCodeBlock", &Base::s_info, 0, 0, CREATE_METHOD_TABLE(UnlinkedProgramCodeBlock) }; -const ClassInfo UnlinkedEvalCodeBlock::s_info = { "UnlinkedEvalCodeBlock", &Base::s_info, 0, 0, CREATE_METHOD_TABLE(UnlinkedEvalCodeBlock) }; -const ClassInfo UnlinkedFunctionCodeBlock::s_info = { "UnlinkedFunctionCodeBlock", &Base::s_info, 0, 0, CREATE_METHOD_TABLE(UnlinkedFunctionCodeBlock) }; - -static UnlinkedFunctionCodeBlock* generateFunctionCodeBlock(VM& vm, JSScope* scope, UnlinkedFunctionExecutable* executable, const SourceCode& source, CodeSpecializationKind kind, DebuggerMode debuggerMode, ProfilerMode profilerMode, ParserError& error) -{ - RefPtr<FunctionBodyNode> body = parse<FunctionBodyNode>(&vm, source, executable->parameters(), executable->name(), executable->isInStrictContext() ? JSParseStrict : JSParseNormal, JSParseFunctionCode, error); - - if (!body) { - ASSERT(error.m_type != ParserError::ErrorNone); - return 0; - } - - if (executable->forceUsesArguments()) - body->setUsesArguments(); - body->finishParsing(executable->parameters(), executable->name(), executable->functionNameIsInScopeToggle()); - executable->recordParse(body->features(), body->hasCapturedVariables(), body->lineNo(), body->lastLine()); - - UnlinkedFunctionCodeBlock* result = UnlinkedFunctionCodeBlock::create(&vm, FunctionCode, ExecutableInfo(body->needsActivation(), body->usesEval(), body->isStrictMode(), kind == CodeForConstruct)); - OwnPtr<BytecodeGenerator> generator(adoptPtr(new BytecodeGenerator(vm, scope, body.get(), result, debuggerMode, profilerMode))); - error = generator->generate(); - body->destroyData(); - if (error.m_type != ParserError::ErrorNone) - return 0; - return result; -} - -unsigned UnlinkedCodeBlock::addOrFindConstant(JSValue v) -{ - unsigned numberOfConstants = numberOfConstantRegisters(); - for (unsigned i = 0; i < numberOfConstants; ++i) { - if (getConstant(FirstConstantRegisterIndex + i) == v) - return i; - } - return addConstant(v); -} - -UnlinkedFunctionExecutable::UnlinkedFunctionExecutable(VM* vm, Structure* structure, const SourceCode& source, FunctionBodyNode* node) - : Base(*vm, structure) - , m_numCapturedVariables(node->capturedVariableCount()) - , m_forceUsesArguments(node->usesArguments()) - , m_isInStrictContext(node->isStrictMode()) - , m_hasCapturedVariables(node->hasCapturedVariables()) - , m_name(node->ident()) - , m_inferredName(node->inferredName()) - , m_parameters(node->parameters()) - , m_firstLineOffset(node->firstLine() - source.firstLine()) - , m_lineCount(node->lastLine() - node->firstLine()) - , m_functionStartOffset(node->functionStart() - source.startOffset()) - , m_functionStartColumn(node->startColumn()) - , m_startOffset(node->source().startOffset() - source.startOffset()) - , m_sourceLength(node->source().length()) - , m_features(node->features()) - , m_functionNameIsInScopeToggle(node->functionNameIsInScopeToggle()) -{ -} - -size_t UnlinkedFunctionExecutable::parameterCount() const -{ - return m_parameters->size(); -} - -void UnlinkedFunctionExecutable::visitChildren(JSCell* cell, SlotVisitor& visitor) -{ - UnlinkedFunctionExecutable* thisObject = jsCast<UnlinkedFunctionExecutable*>(cell); - ASSERT_GC_OBJECT_INHERITS(thisObject, &s_info); - COMPILE_ASSERT(StructureFlags & OverridesVisitChildren, OverridesVisitChildrenWithoutSettingFlag); - ASSERT(thisObject->structure()->typeInfo().overridesVisitChildren()); - Base::visitChildren(thisObject, visitor); - visitor.append(&thisObject->m_codeBlockForCall); - visitor.append(&thisObject->m_codeBlockForConstruct); - visitor.append(&thisObject->m_nameValue); - visitor.append(&thisObject->m_symbolTableForCall); - visitor.append(&thisObject->m_symbolTableForConstruct); -} - -FunctionExecutable* UnlinkedFunctionExecutable::link(VM& vm, const SourceCode& source, size_t lineOffset, size_t sourceOffset) -{ - unsigned firstLine = lineOffset + m_firstLineOffset; - unsigned startOffset = sourceOffset + m_startOffset; - unsigned startColumn = m_functionStartColumn + 1; // startColumn should start from 1, not 0. - SourceCode code(source.provider(), startOffset, startOffset + m_sourceLength, firstLine, startColumn); - return FunctionExecutable::create(vm, code, this, firstLine, firstLine + m_lineCount, startColumn); -} - -UnlinkedFunctionExecutable* UnlinkedFunctionExecutable::fromGlobalCode(const Identifier& name, ExecState* exec, Debugger*, const SourceCode& source, JSObject** exception) -{ - ParserError error; - CodeCache* codeCache = exec->vm().codeCache(); - UnlinkedFunctionExecutable* executable = codeCache->getFunctionExecutableFromGlobalCode(exec->vm(), name, source, error); - - if (exec->lexicalGlobalObject()->hasDebugger()) - exec->lexicalGlobalObject()->debugger()->sourceParsed(exec, source.provider(), error.m_line, error.m_message); - - if (error.m_type != ParserError::ErrorNone) { - *exception = error.toErrorObject(exec->lexicalGlobalObject(), source); - return 0; - } - - return executable; -} - -UnlinkedFunctionCodeBlock* UnlinkedFunctionExecutable::codeBlockFor(VM& vm, JSScope* scope, const SourceCode& source, CodeSpecializationKind specializationKind, DebuggerMode debuggerMode, ProfilerMode profilerMode, ParserError& error) -{ - switch (specializationKind) { - case CodeForCall: - if (UnlinkedFunctionCodeBlock* codeBlock = m_codeBlockForCall.get()) - return codeBlock; - break; - case CodeForConstruct: - if (UnlinkedFunctionCodeBlock* codeBlock = m_codeBlockForConstruct.get()) - return codeBlock; - break; - } - - UnlinkedFunctionCodeBlock* result = generateFunctionCodeBlock(vm, scope, this, source, specializationKind, debuggerMode, profilerMode, error); - - if (error.m_type != ParserError::ErrorNone) - return 0; - - switch (specializationKind) { - case CodeForCall: - m_codeBlockForCall.set(vm, this, result); - m_symbolTableForCall.set(vm, this, result->symbolTable()); - break; - case CodeForConstruct: - m_codeBlockForConstruct.set(vm, this, result); - m_symbolTableForConstruct.set(vm, this, result->symbolTable()); - break; - } - return result; -} - -String UnlinkedFunctionExecutable::paramString() const -{ - FunctionParameters& parameters = *m_parameters; - StringBuilder builder; - for (size_t pos = 0; pos < parameters.size(); ++pos) { - if (!builder.isEmpty()) - builder.appendLiteral(", "); - builder.append(parameters.at(pos).string()); - } - return builder.toString(); -} +const ClassInfo UnlinkedCodeBlock::s_info = { "UnlinkedCodeBlock", 0, 0, CREATE_METHOD_TABLE(UnlinkedCodeBlock) }; +const ClassInfo UnlinkedGlobalCodeBlock::s_info = { "UnlinkedGlobalCodeBlock", &Base::s_info, 0, CREATE_METHOD_TABLE(UnlinkedGlobalCodeBlock) }; +const ClassInfo UnlinkedProgramCodeBlock::s_info = { "UnlinkedProgramCodeBlock", &Base::s_info, 0, CREATE_METHOD_TABLE(UnlinkedProgramCodeBlock) }; +const ClassInfo UnlinkedModuleProgramCodeBlock::s_info = { "UnlinkedModuleProgramCodeBlock", &Base::s_info, nullptr, CREATE_METHOD_TABLE(UnlinkedModuleProgramCodeBlock) }; +const ClassInfo UnlinkedEvalCodeBlock::s_info = { "UnlinkedEvalCodeBlock", &Base::s_info, 0, CREATE_METHOD_TABLE(UnlinkedEvalCodeBlock) }; +const ClassInfo UnlinkedFunctionCodeBlock::s_info = { "UnlinkedFunctionCodeBlock", &Base::s_info, 0, CREATE_METHOD_TABLE(UnlinkedFunctionCodeBlock) }; UnlinkedCodeBlock::UnlinkedCodeBlock(VM* vm, Structure* structure, CodeType codeType, const ExecutableInfo& info) : Base(*vm, structure) , m_numVars(0) - , m_numCalleeRegisters(0) + , m_numCalleeLocals(0) , m_numParameters(0) - , m_vm(vm) - , m_argumentsRegister(-1) - , m_globalObjectRegister(-1) - , m_needsFullScopeChain(info.m_needsActivation) - , m_usesEval(info.m_usesEval) - , m_isNumericCompareFunction(false) - , m_isStrictMode(info.m_isStrictMode) - , m_isConstructor(info.m_isConstructor) + , m_globalObjectRegister(VirtualRegister()) + , m_usesEval(info.usesEval()) + , m_isStrictMode(info.isStrictMode()) + , m_isConstructor(info.isConstructor()) , m_hasCapturedVariables(false) + , m_isBuiltinFunction(info.isBuiltinFunction()) + , m_constructorKind(static_cast<unsigned>(info.constructorKind())) + , m_superBinding(static_cast<unsigned>(info.superBinding())) + , m_derivedContextType(static_cast<unsigned>(info.derivedContextType())) + , m_isArrowFunctionContext(info.isArrowFunctionContext()) + , m_isClassContext(info.isClassContext()) , m_firstLine(0) , m_lineCount(0) + , m_endColumn(UINT_MAX) + , m_parseMode(info.parseMode()) , m_features(0) , m_codeType(codeType) - , m_resolveOperationCount(0) - , m_putToBaseOperationCount(1) , m_arrayProfileCount(0) , m_arrayAllocationProfileCount(0) , m_objectAllocationProfileCount(0) , m_valueProfileCount(0) , m_llintCallLinkInfoCount(0) -#if ENABLE(BYTECODE_COMMENTS) - , m_bytecodeCommentIterator(0) -#endif { + for (auto& constantRegisterIndex : m_linkTimeConstants) + constantRegisterIndex = 0; + ASSERT(m_constructorKind == static_cast<unsigned>(info.constructorKind())); +} +VM* UnlinkedCodeBlock::vm() const +{ + return MarkedBlock::blockFor(this)->vm(); } void UnlinkedCodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor) { UnlinkedCodeBlock* thisObject = jsCast<UnlinkedCodeBlock*>(cell); - ASSERT_GC_OBJECT_INHERITS(thisObject, &s_info); - COMPILE_ASSERT(StructureFlags & OverridesVisitChildren, OverridesVisitChildrenWithoutSettingFlag); - ASSERT(thisObject->structure()->typeInfo().overridesVisitChildren()); + ASSERT_GC_OBJECT_INHERITS(thisObject, info()); Base::visitChildren(thisObject, visitor); - visitor.append(&thisObject->m_symbolTable); for (FunctionExpressionVector::iterator ptr = thisObject->m_functionDecls.begin(), end = thisObject->m_functionDecls.end(); ptr != end; ++ptr) visitor.append(ptr); for (FunctionExpressionVector::iterator ptr = thisObject->m_functionExprs.begin(), end = thisObject->m_functionExprs.end(); ptr != end; ++ptr) visitor.append(ptr); visitor.appendValues(thisObject->m_constantRegisters.data(), thisObject->m_constantRegisters.size()); + if (thisObject->m_unlinkedInstructions) + visitor.reportExtraMemoryVisited(thisObject->m_unlinkedInstructions->sizeInBytes()); if (thisObject->m_rareData) { for (size_t i = 0, end = thisObject->m_rareData->m_regexps.size(); i != end; i++) visitor.append(&thisObject->m_rareData->m_regexps[i]); } } +size_t UnlinkedCodeBlock::estimatedSize(JSCell* cell) +{ + UnlinkedCodeBlock* thisObject = jsCast<UnlinkedCodeBlock*>(cell); + size_t extraSize = thisObject->m_unlinkedInstructions ? thisObject->m_unlinkedInstructions->sizeInBytes() : 0; + return Base::estimatedSize(cell) + extraSize; +} + int UnlinkedCodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset) { - ASSERT(bytecodeOffset < instructions().size()); + ASSERT(bytecodeOffset < instructions().count()); int divot; int startOffset; int endOffset; @@ -251,10 +126,66 @@ int UnlinkedCodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset) return line; } +inline void UnlinkedCodeBlock::getLineAndColumn(ExpressionRangeInfo& info, + unsigned& line, unsigned& column) +{ + switch (info.mode) { + case ExpressionRangeInfo::FatLineMode: + info.decodeFatLineMode(line, column); + break; + case ExpressionRangeInfo::FatColumnMode: + info.decodeFatColumnMode(line, column); + break; + case ExpressionRangeInfo::FatLineAndColumnMode: { + unsigned fatIndex = info.position; + ExpressionRangeInfo::FatPosition& fatPos = m_rareData->m_expressionInfoFatPositions[fatIndex]; + line = fatPos.line; + column = fatPos.column; + break; + } + } // switch +} + +#ifndef NDEBUG +static void dumpLineColumnEntry(size_t index, const UnlinkedInstructionStream& instructionStream, unsigned instructionOffset, unsigned line, unsigned column) +{ + const auto& instructions = instructionStream.unpackForDebugging(); + OpcodeID opcode = instructions[instructionOffset].u.opcode; + const char* event = ""; + if (opcode == op_debug) { + switch (instructions[instructionOffset + 1].u.operand) { + case WillExecuteProgram: event = " WillExecuteProgram"; break; + case DidExecuteProgram: event = " DidExecuteProgram"; break; + case DidEnterCallFrame: event = " DidEnterCallFrame"; break; + case DidReachBreakpoint: event = " DidReachBreakpoint"; break; + case WillLeaveCallFrame: event = " WillLeaveCallFrame"; break; + case WillExecuteStatement: event = " WillExecuteStatement"; break; + } + } + dataLogF(" [%zu] pc %u @ line %u col %u : %s%s\n", index, instructionOffset, line, column, opcodeNames[opcode], event); +} + +void UnlinkedCodeBlock::dumpExpressionRangeInfo() +{ + Vector<ExpressionRangeInfo>& expressionInfo = m_expressionInfo; + + size_t size = m_expressionInfo.size(); + dataLogF("UnlinkedCodeBlock %p expressionRangeInfo[%zu] {\n", this, size); + for (size_t i = 0; i < size; i++) { + ExpressionRangeInfo& info = expressionInfo[i]; + unsigned line; + unsigned column; + getLineAndColumn(info, line, column); + dumpLineColumnEntry(i, instructions(), info.instructionOffset, line, column); + } + dataLog("}\n"); +} +#endif + void UnlinkedCodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column) { - ASSERT(bytecodeOffset < instructions().size()); + ASSERT(bytecodeOffset < instructions().count()); if (!m_expressionInfo.size()) { startOffset = 0; @@ -284,22 +215,7 @@ void UnlinkedCodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset startOffset = info.startOffset; endOffset = info.endOffset; divot = info.divotPoint; - - switch (info.mode) { - case ExpressionRangeInfo::FatLineMode: - info.decodeFatLineMode(line, column); - break; - case ExpressionRangeInfo::FatColumnMode: - info.decodeFatColumnMode(line, column); - break; - case ExpressionRangeInfo::FatLineAndColumnMode: { - unsigned fatIndex = info.position; - ExpressionRangeInfo::FatPosition& fatPos = m_rareData->m_expressionInfoFatPositions[fatIndex]; - line = fatPos.line; - column = fatPos.column; - break; - } - } // switch + getLineAndColumn(info, line, column); } void UnlinkedCodeBlock::addExpressionInfo(unsigned instructionOffset, @@ -356,15 +272,53 @@ void UnlinkedCodeBlock::addExpressionInfo(unsigned instructionOffset, m_expressionInfo.append(info); } +bool UnlinkedCodeBlock::typeProfilerExpressionInfoForBytecodeOffset(unsigned bytecodeOffset, unsigned& startDivot, unsigned& endDivot) +{ + static const bool verbose = false; + if (!m_rareData) { + if (verbose) + dataLogF("Don't have assignment info for offset:%u\n", bytecodeOffset); + startDivot = UINT_MAX; + endDivot = UINT_MAX; + return false; + } + + auto iter = m_rareData->m_typeProfilerInfoMap.find(bytecodeOffset); + if (iter == m_rareData->m_typeProfilerInfoMap.end()) { + if (verbose) + dataLogF("Don't have assignment info for offset:%u\n", bytecodeOffset); + startDivot = UINT_MAX; + endDivot = UINT_MAX; + return false; + } + + RareData::TypeProfilerExpressionRange& range = iter->value; + startDivot = range.m_startDivot; + endDivot = range.m_endDivot; + return true; +} + +void UnlinkedCodeBlock::addTypeProfilerExpressionInfo(unsigned instructionOffset, unsigned startDivot, unsigned endDivot) +{ + createRareDataIfNecessary(); + RareData::TypeProfilerExpressionRange range; + range.m_startDivot = startDivot; + range.m_endDivot = endDivot; + m_rareData->m_typeProfilerInfoMap.set(instructionOffset, range); +} + void UnlinkedProgramCodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor) { UnlinkedProgramCodeBlock* thisObject = jsCast<UnlinkedProgramCodeBlock*>(cell); - ASSERT_GC_OBJECT_INHERITS(thisObject, &s_info); - COMPILE_ASSERT(StructureFlags & OverridesVisitChildren, OverridesVisitChildrenWithoutSettingFlag); - ASSERT(thisObject->structure()->typeInfo().overridesVisitChildren()); + ASSERT_GC_OBJECT_INHERITS(thisObject, info()); + Base::visitChildren(thisObject, visitor); +} + +void UnlinkedModuleProgramCodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor) +{ + UnlinkedModuleProgramCodeBlock* thisObject = jsCast<UnlinkedModuleProgramCodeBlock*>(cell); + ASSERT_GC_OBJECT_INHERITS(thisObject, info()); Base::visitChildren(thisObject, visitor); - for (size_t i = 0, end = thisObject->m_functionDeclarations.size(); i != end; i++) - visitor.append(&thisObject->m_functionDeclarations[i].second); } UnlinkedCodeBlock::~UnlinkedCodeBlock() @@ -376,6 +330,11 @@ void UnlinkedProgramCodeBlock::destroy(JSCell* cell) jsCast<UnlinkedProgramCodeBlock*>(cell)->~UnlinkedProgramCodeBlock(); } +void UnlinkedModuleProgramCodeBlock::destroy(JSCell* cell) +{ + jsCast<UnlinkedModuleProgramCodeBlock*>(cell)->~UnlinkedModuleProgramCodeBlock(); +} + void UnlinkedEvalCodeBlock::destroy(JSCell* cell) { jsCast<UnlinkedEvalCodeBlock*>(cell)->~UnlinkedEvalCodeBlock(); @@ -391,5 +350,18 @@ void UnlinkedFunctionExecutable::destroy(JSCell* cell) jsCast<UnlinkedFunctionExecutable*>(cell)->~UnlinkedFunctionExecutable(); } +void UnlinkedCodeBlock::setInstructions(std::unique_ptr<UnlinkedInstructionStream> instructions) +{ + ASSERT(instructions); + m_unlinkedInstructions = WTFMove(instructions); + Heap::heap(this)->reportExtraMemoryAllocated(m_unlinkedInstructions->sizeInBytes()); +} + +const UnlinkedInstructionStream& UnlinkedCodeBlock::instructions() const +{ + ASSERT(m_unlinkedInstructions.get()); + return *m_unlinkedInstructions; +} + } diff --git a/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.h b/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.h index 634968313..f5b2b44a9 100644 --- a/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.h +++ b/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012, 2013 Apple Inc. All Rights Reserved. + * Copyright (C) 2012-2015 Apple Inc. All Rights Reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -27,36 +27,39 @@ #define UnlinkedCodeBlock_h #include "BytecodeConventions.h" -#include "CodeCache.h" #include "CodeSpecializationKind.h" #include "CodeType.h" +#include "ConstructAbility.h" #include "ExpressionRangeInfo.h" +#include "HandlerInfo.h" #include "Identifier.h" #include "JSCell.h" #include "JSString.h" -#include "LineInfo.h" #include "ParserModes.h" #include "RegExp.h" #include "SpecialPointer.h" -#include "SymbolTable.h" - +#include "UnlinkedFunctionExecutable.h" +#include "VariableEnvironment.h" +#include "VirtualRegister.h" +#include <wtf/FastBitVector.h> #include <wtf/RefCountedArray.h> #include <wtf/Vector.h> namespace JSC { class Debugger; -class FunctionBodyNode; +class FunctionMetadataNode; class FunctionExecutable; -class FunctionParameters; class JSScope; -struct ParserError; +class ParserError; class ScriptExecutable; class SourceCode; class SourceProvider; -class SharedSymbolTable; class UnlinkedCodeBlock; class UnlinkedFunctionCodeBlock; +class UnlinkedFunctionExecutable; +class UnlinkedInstructionStream; +struct ExecutableInfo; typedef unsigned UnlinkedValueProfile; typedef unsigned UnlinkedArrayProfile; @@ -64,130 +67,6 @@ typedef unsigned UnlinkedArrayAllocationProfile; typedef unsigned UnlinkedObjectAllocationProfile; typedef unsigned UnlinkedLLIntCallLinkInfo; -struct ExecutableInfo { - ExecutableInfo(bool needsActivation, bool usesEval, bool isStrictMode, bool isConstructor) - : m_needsActivation(needsActivation) - , m_usesEval(usesEval) - , m_isStrictMode(isStrictMode) - , m_isConstructor(isConstructor) - { - } - bool m_needsActivation; - bool m_usesEval; - bool m_isStrictMode; - bool m_isConstructor; -}; - -class UnlinkedFunctionExecutable : public JSCell { -public: - friend class CodeCache; - typedef JSCell Base; - static UnlinkedFunctionExecutable* create(VM* vm, const SourceCode& source, FunctionBodyNode* node) - { - UnlinkedFunctionExecutable* instance = new (NotNull, allocateCell<UnlinkedFunctionExecutable>(vm->heap)) UnlinkedFunctionExecutable(vm, vm->unlinkedFunctionExecutableStructure.get(), source, node); - instance->finishCreation(*vm); - return instance; - } - - const Identifier& name() const { return m_name; } - const Identifier& inferredName() const { return m_inferredName; } - JSString* nameValue() const { return m_nameValue.get(); } - SharedSymbolTable* symbolTable(CodeSpecializationKind kind) - { - return (kind == CodeForCall) ? m_symbolTableForCall.get() : m_symbolTableForConstruct.get(); - } - size_t parameterCount() const; - bool isInStrictContext() const { return m_isInStrictContext; } - FunctionNameIsInScopeToggle functionNameIsInScopeToggle() const { return m_functionNameIsInScopeToggle; } - - unsigned firstLineOffset() const { return m_firstLineOffset; } - unsigned lineCount() const { return m_lineCount; } - unsigned functionStartOffset() const { return m_functionStartOffset; } - unsigned functionStartColumn() const { return m_functionStartColumn; } - unsigned startOffset() const { return m_startOffset; } - unsigned sourceLength() { return m_sourceLength; } - - String paramString() const; - - UnlinkedFunctionCodeBlock* codeBlockFor(VM&, JSScope*, const SourceCode&, CodeSpecializationKind, DebuggerMode, ProfilerMode, ParserError&); - - static UnlinkedFunctionExecutable* fromGlobalCode(const Identifier&, ExecState*, Debugger*, const SourceCode&, JSObject** exception); - - FunctionExecutable* link(VM&, const SourceCode&, size_t lineOffset, size_t sourceOffset); - - void clearCodeForRecompilation() - { - m_symbolTableForCall.clear(); - m_symbolTableForConstruct.clear(); - m_codeBlockForCall.clear(); - m_codeBlockForConstruct.clear(); - } - - FunctionParameters* parameters() { return m_parameters.get(); } - - void recordParse(CodeFeatures features, bool hasCapturedVariables, int firstLine, int lastLine) - { - m_features = features; - m_hasCapturedVariables = hasCapturedVariables; - m_lineCount = lastLine - firstLine; - } - - bool forceUsesArguments() const { return m_forceUsesArguments; } - - CodeFeatures features() const { return m_features; } - bool hasCapturedVariables() const { return m_hasCapturedVariables; } - - static const bool needsDestruction = true; - static const bool hasImmortalStructure = true; - static void destroy(JSCell*); - -private: - UnlinkedFunctionExecutable(VM*, Structure*, const SourceCode&, FunctionBodyNode*); - WriteBarrier<UnlinkedFunctionCodeBlock> m_codeBlockForCall; - WriteBarrier<UnlinkedFunctionCodeBlock> m_codeBlockForConstruct; - - unsigned m_numCapturedVariables : 29; - bool m_forceUsesArguments : 1; - bool m_isInStrictContext : 1; - bool m_hasCapturedVariables : 1; - - Identifier m_name; - Identifier m_inferredName; - WriteBarrier<JSString> m_nameValue; - WriteBarrier<SharedSymbolTable> m_symbolTableForCall; - WriteBarrier<SharedSymbolTable> m_symbolTableForConstruct; - RefPtr<FunctionParameters> m_parameters; - unsigned m_firstLineOffset; - unsigned m_lineCount; - unsigned m_functionStartOffset; - unsigned m_functionStartColumn; - unsigned m_startOffset; - unsigned m_sourceLength; - - CodeFeatures m_features; - - FunctionNameIsInScopeToggle m_functionNameIsInScopeToggle; - -protected: - void finishCreation(VM& vm) - { - Base::finishCreation(vm); - m_nameValue.set(vm, this, jsString(&vm, name().string())); - } - - static void visitChildren(JSCell*, SlotVisitor&); - -public: - static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue proto) - { - return Structure::create(vm, globalObject, proto, TypeInfo(UnlinkedFunctionExecutableType, StructureFlags), &s_info); - } - - static const unsigned StructureFlags = OverridesVisitChildren | JSCell::StructureFlags; - - static const ClassInfo s_info; -}; - struct UnlinkedStringJumpTable { typedef HashMap<RefPtr<StringImpl>, int32_t> StringOffsetTable; StringOffsetTable offsetTable; @@ -215,13 +94,6 @@ struct UnlinkedSimpleJumpTable { } }; -struct UnlinkedHandlerInfo { - uint32_t start; - uint32_t end; - uint32_t target; - uint32_t scopeDepth; -}; - struct UnlinkedInstruction { UnlinkedInstruction() { u.operand = 0; } UnlinkedInstruction(OpcodeID opcode) { u.opcode = opcode; } @@ -229,41 +101,43 @@ struct UnlinkedInstruction { union { OpcodeID opcode; int32_t operand; + unsigned index; } u; }; class UnlinkedCodeBlock : public JSCell { public: typedef JSCell Base; + static const unsigned StructureFlags = Base::StructureFlags; + static const bool needsDestruction = true; - static const bool hasImmortalStructure = true; enum { CallFunction, ApplyFunction }; bool isConstructor() const { return m_isConstructor; } bool isStrictMode() const { return m_isStrictMode; } bool usesEval() const { return m_usesEval; } - - bool needsFullScopeChain() const { return m_needsFullScopeChain; } - void setNeedsFullScopeChain(bool needsFullScopeChain) { m_needsFullScopeChain = needsFullScopeChain; } + SourceParseMode parseMode() const { return m_parseMode; } + bool isArrowFunction() const { return m_parseMode == SourceParseMode::ArrowFunctionMode; } + DerivedContextType derivedContextType() const { return static_cast<DerivedContextType>(m_derivedContextType); } + bool isArrowFunctionContext() const { return m_isArrowFunctionContext; } + bool isClassContext() const { return m_isClassContext; } void addExpressionInfo(unsigned instructionOffset, int divot, int startOffset, int endOffset, unsigned line, unsigned column); + void addTypeProfilerExpressionInfo(unsigned instructionOffset, unsigned startDivot, unsigned endDivot); + bool hasExpressionInfo() { return m_expressionInfo.size(); } + const Vector<ExpressionRangeInfo>& expressionInfo() { return m_expressionInfo; } // Special registers - void setThisRegister(int thisRegister) { m_thisRegister = thisRegister; } - void setActivationRegister(int activationRegister) { m_activationRegister = activationRegister; } - - void setArgumentsRegister(int argumentsRegister) { m_argumentsRegister = argumentsRegister; } - bool usesArguments() const { return m_argumentsRegister != -1; } - int argumentsRegister() const { return m_argumentsRegister; } + void setThisRegister(VirtualRegister thisRegister) { m_thisRegister = thisRegister; } + void setScopeRegister(VirtualRegister scopeRegister) { m_scopeRegister = scopeRegister; } - - bool usesGlobalObject() const { return m_globalObjectRegister != -1; } - void setGlobalObjectRegister(int globalObjectRegister) { m_globalObjectRegister = globalObjectRegister; } - int globalObjectRegister() const { return m_globalObjectRegister; } + bool usesGlobalObject() const { return m_globalObjectRegister.isValid(); } + void setGlobalObjectRegister(VirtualRegister globalObjectRegister) { m_globalObjectRegister = globalObjectRegister; } + VirtualRegister globalObjectRegister() const { return m_globalObjectRegister; } // Parameter information void setNumParameters(int newValue) { m_numParameters = newValue; } @@ -274,7 +148,7 @@ public: { createRareDataIfNecessary(); unsigned size = m_rareData->m_regexps.size(); - m_rareData->m_regexps.append(WriteBarrier<RegExp>(*m_vm, this, r)); + m_rareData->m_regexps.append(WriteBarrier<RegExp>(*vm(), this, r)); return size; } unsigned numberOfRegExps() const @@ -292,19 +166,35 @@ public: const Identifier& identifier(int index) const { return m_identifiers[index]; } const Vector<Identifier>& identifiers() const { return m_identifiers; } - size_t numberOfConstantRegisters() const { return m_constantRegisters.size(); } - unsigned addConstant(JSValue v) + unsigned addConstant(JSValue v, SourceCodeRepresentation sourceCodeRepresentation = SourceCodeRepresentation::Other) + { + unsigned result = m_constantRegisters.size(); + m_constantRegisters.append(WriteBarrier<Unknown>()); + m_constantRegisters.last().set(*vm(), this, v); + m_constantsSourceCodeRepresentation.append(sourceCodeRepresentation); + return result; + } + unsigned addConstant(LinkTimeConstant type) { unsigned result = m_constantRegisters.size(); + ASSERT(result); + unsigned index = static_cast<unsigned>(type); + ASSERT(index < LinkTimeConstantCount); + m_linkTimeConstants[index] = result; m_constantRegisters.append(WriteBarrier<Unknown>()); - m_constantRegisters.last().set(*m_vm, this, v); + m_constantsSourceCodeRepresentation.append(SourceCodeRepresentation::Other); return result; } - unsigned addOrFindConstant(JSValue); - const Vector<WriteBarrier<Unknown> >& constantRegisters() { return m_constantRegisters; } + unsigned registerIndexForLinkTimeConstant(LinkTimeConstant type) + { + unsigned index = static_cast<unsigned>(type); + ASSERT(index < LinkTimeConstantCount); + return m_linkTimeConstants[index]; + } + const Vector<WriteBarrier<Unknown>>& constantRegisters() { return m_constantRegisters; } const WriteBarrier<Unknown>& constantRegister(int index) const { return m_constantRegisters[index - FirstConstantRegisterIndex]; } ALWAYS_INLINE bool isConstantRegisterIndex(int index) const { return index >= FirstConstantRegisterIndex; } - ALWAYS_INLINE JSValue getConstant(int index) const { return m_constantRegisters[index - FirstConstantRegisterIndex].get(); } + const Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation() { return m_constantsSourceCodeRepresentation; } // Jumps size_t numberOfJumpTargets() const { return m_jumpTargets.size(); } @@ -312,50 +202,44 @@ public: unsigned jumpTarget(int index) const { return m_jumpTargets[index]; } unsigned lastJumpTarget() const { return m_jumpTargets.last(); } - void setIsNumericCompareFunction(bool isNumericCompareFunction) { m_isNumericCompareFunction = isNumericCompareFunction; } - bool isNumericCompareFunction() const { return m_isNumericCompareFunction; } + bool isBuiltinFunction() const { return m_isBuiltinFunction; } + + ConstructorKind constructorKind() const { return static_cast<ConstructorKind>(m_constructorKind); } + SuperBinding superBinding() const { return static_cast<SuperBinding>(m_superBinding); } void shrinkToFit() { m_jumpTargets.shrinkToFit(); m_identifiers.shrinkToFit(); m_constantRegisters.shrinkToFit(); + m_constantsSourceCodeRepresentation.shrinkToFit(); m_functionDecls.shrinkToFit(); m_functionExprs.shrinkToFit(); m_propertyAccessInstructions.shrinkToFit(); m_expressionInfo.shrinkToFit(); -#if ENABLE(BYTECODE_COMMENTS) - m_bytecodeComments.shrinkToFit(); -#endif if (m_rareData) { m_rareData->m_exceptionHandlers.shrinkToFit(); m_rareData->m_regexps.shrinkToFit(); m_rareData->m_constantBuffers.shrinkToFit(); - m_rareData->m_immediateSwitchJumpTables.shrinkToFit(); - m_rareData->m_characterSwitchJumpTables.shrinkToFit(); + m_rareData->m_switchJumpTables.shrinkToFit(); m_rareData->m_stringSwitchJumpTables.shrinkToFit(); m_rareData->m_expressionInfoFatPositions.shrinkToFit(); } } - unsigned numberOfInstructions() const { return m_unlinkedInstructions.size(); } - RefCountedArray<UnlinkedInstruction>& instructions() { return m_unlinkedInstructions; } - const RefCountedArray<UnlinkedInstruction>& instructions() const { return m_unlinkedInstructions; } + void setInstructions(std::unique_ptr<UnlinkedInstructionStream>); + const UnlinkedInstructionStream& instructions() const; int m_numVars; int m_numCapturedVars; - int m_numCalleeRegisters; + int m_numCalleeLocals; // Jump Tables - size_t numberOfImmediateSwitchJumpTables() const { return m_rareData ? m_rareData->m_immediateSwitchJumpTables.size() : 0; } - UnlinkedSimpleJumpTable& addImmediateSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_immediateSwitchJumpTables.append(UnlinkedSimpleJumpTable()); return m_rareData->m_immediateSwitchJumpTables.last(); } - UnlinkedSimpleJumpTable& immediateSwitchJumpTable(int tableIndex) { ASSERT(m_rareData); return m_rareData->m_immediateSwitchJumpTables[tableIndex]; } - - size_t numberOfCharacterSwitchJumpTables() const { return m_rareData ? m_rareData->m_characterSwitchJumpTables.size() : 0; } - UnlinkedSimpleJumpTable& addCharacterSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_characterSwitchJumpTables.append(UnlinkedSimpleJumpTable()); return m_rareData->m_characterSwitchJumpTables.last(); } - UnlinkedSimpleJumpTable& characterSwitchJumpTable(int tableIndex) { ASSERT(m_rareData); return m_rareData->m_characterSwitchJumpTables[tableIndex]; } + size_t numberOfSwitchJumpTables() const { return m_rareData ? m_rareData->m_switchJumpTables.size() : 0; } + UnlinkedSimpleJumpTable& addSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_switchJumpTables.append(UnlinkedSimpleJumpTable()); return m_rareData->m_switchJumpTables.last(); } + UnlinkedSimpleJumpTable& switchJumpTable(int tableIndex) { ASSERT(m_rareData); return m_rareData->m_switchJumpTables[tableIndex]; } size_t numberOfStringSwitchJumpTables() const { return m_rareData ? m_rareData->m_stringSwitchJumpTables.size() : 0; } UnlinkedStringJumpTable& addStringSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_stringSwitchJumpTables.append(UnlinkedStringJumpTable()); return m_rareData->m_stringSwitchJumpTables.last(); } @@ -365,7 +249,7 @@ public: { unsigned size = m_functionDecls.size(); m_functionDecls.append(WriteBarrier<UnlinkedFunctionExecutable>()); - m_functionDecls.last().set(*m_vm, this, n); + m_functionDecls.last().set(*vm(), this, n); return size; } UnlinkedFunctionExecutable* functionDecl(int index) { return m_functionDecls[index].get(); } @@ -374,7 +258,7 @@ public: { unsigned size = m_functionExprs.size(); m_functionExprs.append(WriteBarrier<UnlinkedFunctionExecutable>()); - m_functionExprs.last().set(*m_vm, this, n); + m_functionExprs.last().set(*vm(), this, n); return size; } UnlinkedFunctionExecutable* functionExpr(int index) { return m_functionExprs[index].get(); } @@ -382,17 +266,10 @@ public: // Exception handling support size_t numberOfExceptionHandlers() const { return m_rareData ? m_rareData->m_exceptionHandlers.size() : 0; } - void addExceptionHandler(const UnlinkedHandlerInfo& hanler) { createRareDataIfNecessary(); return m_rareData->m_exceptionHandlers.append(hanler); } + void addExceptionHandler(const UnlinkedHandlerInfo& handler) { createRareDataIfNecessary(); return m_rareData->m_exceptionHandlers.append(handler); } UnlinkedHandlerInfo& exceptionHandler(int index) { ASSERT(m_rareData); return m_rareData->m_exceptionHandlers[index]; } - SharedSymbolTable* symbolTable() const { return m_symbolTable.get(); } - - VM* vm() const { return m_vm; } - - unsigned addResolve() { return m_resolveOperationCount++; } - unsigned numberOfResolveOperations() const { return m_resolveOperationCount; } - unsigned addPutToBase() { return m_putToBaseOperationCount++; } - unsigned numberOfPutToBaseOperations() const { return m_putToBaseOperationCount; } + VM* vm() const; UnlinkedArrayProfile addArrayProfile() { return m_arrayProfileCount++; } unsigned numberOfArrayProfiles() { return m_arrayProfileCount; } @@ -408,9 +285,8 @@ public: CodeType codeType() const { return m_codeType; } - int thisRegister() const { return m_thisRegister; } - int activationRegister() const { return m_activationRegister; } - + VirtualRegister thisRegister() const { return m_thisRegister; } + VirtualRegister scopeRegister() const { return m_scopeRegister; } void addPropertyAccessInstruction(unsigned propertyAccessInstruction) { @@ -443,36 +319,49 @@ public: return m_rareData->m_constantBuffers[index]; } - bool hasRareData() const { return m_rareData; } + bool hasRareData() const { return m_rareData.get(); } int lineNumberForBytecodeOffset(unsigned bytecodeOffset); void expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column); - void recordParse(CodeFeatures features, bool hasCapturedVariables, unsigned firstLine, unsigned lineCount) + bool typeProfilerExpressionInfoForBytecodeOffset(unsigned bytecodeOffset, unsigned& startDivot, unsigned& endDivot); + + void recordParse(CodeFeatures features, bool hasCapturedVariables, unsigned firstLine, unsigned lineCount, unsigned endColumn) { m_features = features; m_hasCapturedVariables = hasCapturedVariables; m_firstLine = firstLine; m_lineCount = lineCount; + // For the UnlinkedCodeBlock, startColumn is always 0. + m_endColumn = endColumn; } CodeFeatures codeFeatures() const { return m_features; } bool hasCapturedVariables() const { return m_hasCapturedVariables; } unsigned firstLine() const { return m_firstLine; } unsigned lineCount() const { return m_lineCount; } + ALWAYS_INLINE unsigned startColumn() const { return 0; } + unsigned endColumn() const { return m_endColumn; } - PassRefPtr<CodeCache> codeCacheForEval() + void addOpProfileControlFlowBytecodeOffset(size_t offset) { - if (m_codeType == GlobalCode) - return m_vm->codeCache(); createRareDataIfNecessary(); - if (!m_rareData->m_evalCodeCache) - m_rareData->m_evalCodeCache = CodeCache::create(CodeCache::NonGlobalCodeCache); - return m_rareData->m_evalCodeCache.get(); + m_rareData->m_opProfileControlFlowBytecodeOffsets.append(offset); + } + const Vector<size_t>& opProfileControlFlowBytecodeOffsets() const + { + ASSERT(m_rareData); + return m_rareData->m_opProfileControlFlowBytecodeOffsets; + } + bool hasOpProfileControlFlowBytecodeOffsets() const + { + return m_rareData && !m_rareData->m_opProfileControlFlowBytecodeOffsets.isEmpty(); } + void dumpExpressionRangeInfo(); // For debugging purpose only. + protected: UnlinkedCodeBlock(VM*, Structure*, CodeType, const ExecutableInfo&); ~UnlinkedCodeBlock(); @@ -480,9 +369,6 @@ protected: void finishCreation(VM& vm) { Base::finishCreation(vm); - if (codeType() == GlobalCode) - return; - m_symbolTable.set(vm, this, SharedSymbolTable::create(vm)); } private: @@ -490,51 +376,50 @@ private: void createRareDataIfNecessary() { if (!m_rareData) - m_rareData = adoptPtr(new RareData); + m_rareData = std::make_unique<RareData>(); } - RefCountedArray<UnlinkedInstruction> m_unlinkedInstructions; + void getLineAndColumn(ExpressionRangeInfo&, unsigned& line, unsigned& column); int m_numParameters; - VM* m_vm; - - int m_thisRegister; - int m_argumentsRegister; - int m_activationRegister; - int m_globalObjectRegister; - - bool m_needsFullScopeChain : 1; - bool m_usesEval : 1; - bool m_isNumericCompareFunction : 1; - bool m_isStrictMode : 1; - bool m_isConstructor : 1; - bool m_hasCapturedVariables : 1; + + std::unique_ptr<UnlinkedInstructionStream> m_unlinkedInstructions; + + VirtualRegister m_thisRegister; + VirtualRegister m_scopeRegister; + VirtualRegister m_globalObjectRegister; + + unsigned m_usesEval : 1; + unsigned m_isStrictMode : 1; + unsigned m_isConstructor : 1; + unsigned m_hasCapturedVariables : 1; + unsigned m_isBuiltinFunction : 1; + unsigned m_constructorKind : 2; + unsigned m_superBinding : 1; + unsigned m_derivedContextType : 2; + unsigned m_isArrowFunctionContext : 1; + unsigned m_isClassContext : 1; unsigned m_firstLine; unsigned m_lineCount; + unsigned m_endColumn; + SourceParseMode m_parseMode; CodeFeatures m_features; CodeType m_codeType; Vector<unsigned> m_jumpTargets; + Vector<unsigned> m_propertyAccessInstructions; + // Constant Pools Vector<Identifier> m_identifiers; - Vector<WriteBarrier<Unknown> > m_constantRegisters; - typedef Vector<WriteBarrier<UnlinkedFunctionExecutable> > FunctionExpressionVector; + Vector<WriteBarrier<Unknown>> m_constantRegisters; + Vector<SourceCodeRepresentation> m_constantsSourceCodeRepresentation; + typedef Vector<WriteBarrier<UnlinkedFunctionExecutable>> FunctionExpressionVector; FunctionExpressionVector m_functionDecls; FunctionExpressionVector m_functionExprs; + std::array<unsigned, LinkTimeConstantCount> m_linkTimeConstants; - WriteBarrier<SharedSymbolTable> m_symbolTable; - - Vector<unsigned> m_propertyAccessInstructions; - -#if ENABLE(BYTECODE_COMMENTS) - Vector<Comment> m_bytecodeComments; - size_t m_bytecodeCommentIterator; -#endif - - unsigned m_resolveOperationCount; - unsigned m_putToBaseOperationCount; unsigned m_arrayProfileCount; unsigned m_arrayAllocationProfileCount; unsigned m_objectAllocationProfileCount; @@ -548,31 +433,35 @@ public: Vector<UnlinkedHandlerInfo> m_exceptionHandlers; // Rare Constants - Vector<WriteBarrier<RegExp> > m_regexps; + Vector<WriteBarrier<RegExp>> m_regexps; // Buffers used for large array literals Vector<ConstantBuffer> m_constantBuffers; // Jump Tables - Vector<UnlinkedSimpleJumpTable> m_immediateSwitchJumpTables; - Vector<UnlinkedSimpleJumpTable> m_characterSwitchJumpTables; + Vector<UnlinkedSimpleJumpTable> m_switchJumpTables; Vector<UnlinkedStringJumpTable> m_stringSwitchJumpTables; - RefPtr<CodeCache> m_evalCodeCache; Vector<ExpressionRangeInfo::FatPosition> m_expressionInfoFatPositions; + + struct TypeProfilerExpressionRange { + unsigned m_startDivot; + unsigned m_endDivot; + }; + HashMap<unsigned, TypeProfilerExpressionRange> m_typeProfilerInfoMap; + Vector<size_t> m_opProfileControlFlowBytecodeOffsets; }; private: - OwnPtr<RareData> m_rareData; + std::unique_ptr<RareData> m_rareData; Vector<ExpressionRangeInfo> m_expressionInfo; protected: - - static const unsigned StructureFlags = OverridesVisitChildren | Base::StructureFlags; static void visitChildren(JSCell*, SlotVisitor&); + static size_t estimatedSize(JSCell*); public: - static const ClassInfo s_info; + DECLARE_INFO; }; class UnlinkedGlobalCodeBlock : public UnlinkedCodeBlock { @@ -585,12 +474,10 @@ protected: { } - static const unsigned StructureFlags = OverridesVisitChildren | Base::StructureFlags; - - static const ClassInfo s_info; + DECLARE_INFO; }; -class UnlinkedProgramCodeBlock : public UnlinkedGlobalCodeBlock { +class UnlinkedProgramCodeBlock final : public UnlinkedGlobalCodeBlock { private: friend class CodeCache; static UnlinkedProgramCodeBlock* create(VM* vm, const ExecutableInfo& info) @@ -602,47 +489,102 @@ private: public: typedef UnlinkedGlobalCodeBlock Base; + static const unsigned StructureFlags = Base::StructureFlags | StructureIsImmortal; + static void destroy(JSCell*); - void addFunctionDeclaration(VM& vm, const Identifier& name, UnlinkedFunctionExecutable* functionExecutable) + void setVariableDeclarations(const VariableEnvironment& environment) { m_varDeclarations = environment; } + const VariableEnvironment& variableDeclarations() const { return m_varDeclarations; } + + void setLexicalDeclarations(const VariableEnvironment& environment) { m_lexicalDeclarations = environment; } + const VariableEnvironment& lexicalDeclarations() const { return m_lexicalDeclarations; } + + static void visitChildren(JSCell*, SlotVisitor&); + +private: + UnlinkedProgramCodeBlock(VM* vm, Structure* structure, const ExecutableInfo& info) + : Base(vm, structure, GlobalCode, info) + { + } + + VariableEnvironment m_varDeclarations; + VariableEnvironment m_lexicalDeclarations; + +public: + static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue proto) { - m_functionDeclarations.append(std::make_pair(name, WriteBarrier<UnlinkedFunctionExecutable>(vm, this, functionExecutable))); + return Structure::create(vm, globalObject, proto, TypeInfo(UnlinkedProgramCodeBlockType, StructureFlags), info()); } - void addVariableDeclaration(const Identifier& name, bool isConstant) + DECLARE_INFO; +}; + +class UnlinkedModuleProgramCodeBlock final : public UnlinkedGlobalCodeBlock { +private: + friend class CodeCache; + static UnlinkedModuleProgramCodeBlock* create(VM* vm, const ExecutableInfo& info) { - m_varDeclarations.append(std::make_pair(name, isConstant)); + UnlinkedModuleProgramCodeBlock* instance = new (NotNull, allocateCell<UnlinkedModuleProgramCodeBlock>(vm->heap)) UnlinkedModuleProgramCodeBlock(vm, vm->unlinkedModuleProgramCodeBlockStructure.get(), info); + instance->finishCreation(*vm); + return instance; } - typedef Vector<std::pair<Identifier, bool> > VariableDeclations; - typedef Vector<std::pair<Identifier, WriteBarrier<UnlinkedFunctionExecutable> > > FunctionDeclations; +public: + typedef UnlinkedGlobalCodeBlock Base; + static const unsigned StructureFlags = Base::StructureFlags | StructureIsImmortal; - const VariableDeclations& variableDeclarations() const { return m_varDeclarations; } - const FunctionDeclations& functionDeclarations() const { return m_functionDeclarations; } + static void destroy(JSCell*); static void visitChildren(JSCell*, SlotVisitor&); + // This offset represents the constant register offset to the stored symbol table that represents the layout of the + // module environment. This symbol table is created by the byte code generator since the module environment includes + // the top-most lexical captured variables inside the module code. This means that, once the module environment is + // allocated and instantiated from this symbol table, it is titely coupled with the specific unlinked module program + // code block and the stored symbol table. So before executing the module code, we should not clear the unlinked module + // program code block in the module executable. This requirement is met because the garbage collector only clears + // unlinked code in (1) unmarked executables and (2) function executables. + // + // Since the function code may be executed repeatedly and the environment of each function execution is different, + // the function code need to allocate and instantiate the environment in the prologue of the function code. On the + // other hand, the module code is executed only once. So we can instantiate the module environment outside the module + // code. At that time, we construct the module environment by using the symbol table that is held by the module executable. + // The symbol table held by the executable is the cloned one from one in the unlinked code block. Instantiating the module + // environment before executing and linking the module code is required to link the imported bindings between the modules. + // + // The unlinked module program code block only holds the pre-cloned symbol table in its constant register pool. It does + // not hold the instantiated module environment. So while the module environment requires the specific unlinked module + // program code block, the unlinked module code block can be used for the module environment instantiated from this + // unlinked code block. There is 1:N relation between the unlinked module code block and the module environments. So the + // unlinked module program code block can be cached. + // + // On the other hand, the linked code block for the module environment includes the resolved references to the imported + // bindings. The imported binding references the other module environment, so the linked code block is titly coupled + // with the specific set of the module environments. Thus, the linked code block should not be cached. + int moduleEnvironmentSymbolTableConstantRegisterOffset() { return m_moduleEnvironmentSymbolTableConstantRegisterOffset; } + void setModuleEnvironmentSymbolTableConstantRegisterOffset(int offset) + { + m_moduleEnvironmentSymbolTableConstantRegisterOffset = offset; + } + private: - UnlinkedProgramCodeBlock(VM* vm, Structure* structure, const ExecutableInfo& info) - : Base(vm, structure, GlobalCode, info) + UnlinkedModuleProgramCodeBlock(VM* vm, Structure* structure, const ExecutableInfo& info) + : Base(vm, structure, ModuleCode, info) { } - VariableDeclations m_varDeclarations; - FunctionDeclations m_functionDeclarations; + int m_moduleEnvironmentSymbolTableConstantRegisterOffset { 0 }; public: static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue proto) { - return Structure::create(vm, globalObject, proto, TypeInfo(UnlinkedProgramCodeBlockType, StructureFlags), &s_info); + return Structure::create(vm, globalObject, proto, TypeInfo(UnlinkedModuleProgramCodeBlockType, StructureFlags), info()); } - static const unsigned StructureFlags = OverridesVisitChildren | Base::StructureFlags; - - static const ClassInfo s_info; + DECLARE_INFO; }; -class UnlinkedEvalCodeBlock : public UnlinkedGlobalCodeBlock { +class UnlinkedEvalCodeBlock final : public UnlinkedGlobalCodeBlock { private: friend class CodeCache; @@ -655,6 +597,8 @@ private: public: typedef UnlinkedGlobalCodeBlock Base; + static const unsigned StructureFlags = Base::StructureFlags | StructureIsImmortal; + static void destroy(JSCell*); const Identifier& variable(unsigned index) { return m_variables[index]; } @@ -676,16 +620,17 @@ private: public: static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue proto) { - return Structure::create(vm, globalObject, proto, TypeInfo(UnlinkedEvalCodeBlockType, StructureFlags), &s_info); + return Structure::create(vm, globalObject, proto, TypeInfo(UnlinkedEvalCodeBlockType, StructureFlags), info()); } - static const unsigned StructureFlags = OverridesVisitChildren | Base::StructureFlags; - - static const ClassInfo s_info; + DECLARE_INFO; }; -class UnlinkedFunctionCodeBlock : public UnlinkedCodeBlock { +class UnlinkedFunctionCodeBlock final : public UnlinkedCodeBlock { public: + typedef UnlinkedCodeBlock Base; + static const unsigned StructureFlags = Base::StructureFlags | StructureIsImmortal; + static UnlinkedFunctionCodeBlock* create(VM* vm, CodeType codeType, const ExecutableInfo& info) { UnlinkedFunctionCodeBlock* instance = new (NotNull, allocateCell<UnlinkedFunctionCodeBlock>(vm->heap)) UnlinkedFunctionCodeBlock(vm, vm->unlinkedFunctionCodeBlockStructure.get(), codeType, info); @@ -693,7 +638,6 @@ public: return instance; } - typedef UnlinkedCodeBlock Base; static void destroy(JSCell*); private: @@ -705,12 +649,10 @@ private: public: static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue proto) { - return Structure::create(vm, globalObject, proto, TypeInfo(UnlinkedFunctionCodeBlockType, StructureFlags), &s_info); + return Structure::create(vm, globalObject, proto, TypeInfo(UnlinkedFunctionCodeBlockType, StructureFlags), info()); } - static const unsigned StructureFlags = OverridesVisitChildren | Base::StructureFlags; - - static const ClassInfo s_info; + DECLARE_INFO; }; } diff --git a/Source/JavaScriptCore/bytecode/UnlinkedFunctionExecutable.cpp b/Source/JavaScriptCore/bytecode/UnlinkedFunctionExecutable.cpp new file mode 100644 index 000000000..7ad9d1042 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/UnlinkedFunctionExecutable.cpp @@ -0,0 +1,225 @@ +/* + * Copyright (C) 2012, 2013, 2015 Apple Inc. All Rights Reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "UnlinkedFunctionExecutable.h" + +#include "BytecodeGenerator.h" +#include "ClassInfo.h" +#include "CodeCache.h" +#include "Executable.h" +#include "ExecutableInfo.h" +#include "FunctionOverrides.h" +#include "JSCInlines.h" +#include "JSString.h" +#include "Parser.h" +#include "SourceProvider.h" +#include "Structure.h" +#include "SymbolTable.h" +#include "UnlinkedInstructionStream.h" +#include <wtf/DataLog.h> + +namespace JSC { + +static_assert(sizeof(UnlinkedFunctionExecutable) <= 256, "UnlinkedFunctionExecutable should fit in a 256-byte cell."); + +const ClassInfo UnlinkedFunctionExecutable::s_info = { "UnlinkedFunctionExecutable", 0, 0, CREATE_METHOD_TABLE(UnlinkedFunctionExecutable) }; + +static UnlinkedFunctionCodeBlock* generateUnlinkedFunctionCodeBlock( + VM& vm, UnlinkedFunctionExecutable* executable, const SourceCode& source, + CodeSpecializationKind kind, DebuggerMode debuggerMode, ProfilerMode profilerMode, + UnlinkedFunctionKind functionKind, ParserError& error, SourceParseMode parseMode) +{ + JSParserBuiltinMode builtinMode = executable->isBuiltinFunction() ? JSParserBuiltinMode::Builtin : JSParserBuiltinMode::NotBuiltin; + JSParserStrictMode strictMode = executable->isInStrictContext() ? JSParserStrictMode::Strict : JSParserStrictMode::NotStrict; + ASSERT(isFunctionParseMode(executable->parseMode())); + std::unique_ptr<FunctionNode> function = parse<FunctionNode>( + &vm, source, executable->name(), builtinMode, strictMode, executable->parseMode(), executable->superBinding(), error, nullptr); + + if (!function) { + ASSERT(error.isValid()); + return nullptr; + } + + function->finishParsing(executable->name(), executable->functionMode()); + executable->recordParse(function->features(), function->hasCapturedVariables()); + + bool isClassContext = executable->superBinding() == SuperBinding::Needed; + + UnlinkedFunctionCodeBlock* result = UnlinkedFunctionCodeBlock::create(&vm, FunctionCode, + ExecutableInfo(function->usesEval(), function->isStrictMode(), kind == CodeForConstruct, functionKind == UnlinkedBuiltinFunction, executable->constructorKind(), executable->superBinding(), parseMode, executable->derivedContextType(), false, isClassContext)); + + auto generator(std::make_unique<BytecodeGenerator>(vm, function.get(), result, debuggerMode, profilerMode, executable->parentScopeTDZVariables())); + error = generator->generate(); + if (error.isValid()) + return nullptr; + return result; +} + +UnlinkedFunctionExecutable::UnlinkedFunctionExecutable(VM* vm, Structure* structure, const SourceCode& source, RefPtr<SourceProvider>&& sourceOverride, FunctionMetadataNode* node, UnlinkedFunctionKind kind, ConstructAbility constructAbility, VariableEnvironment& parentScopeTDZVariables, DerivedContextType derivedContextType) + : Base(*vm, structure) + , m_firstLineOffset(node->firstLine() - source.firstLine()) + , m_lineCount(node->lastLine() - node->firstLine()) + , m_unlinkedFunctionNameStart(node->functionNameStart() - source.startOffset()) + , m_unlinkedBodyStartColumn(node->startColumn()) + , m_unlinkedBodyEndColumn(m_lineCount ? node->endColumn() : node->endColumn() - node->startColumn()) + , m_startOffset(node->source().startOffset() - source.startOffset()) + , m_sourceLength(node->source().length()) + , m_parametersStartOffset(node->parametersStart()) + , m_typeProfilingStartOffset(node->functionKeywordStart()) + , m_typeProfilingEndOffset(node->startStartOffset() + node->source().length() - 1) + , m_parameterCount(node->parameterCount()) + , m_features(0) + , m_isInStrictContext(node->isInStrictContext()) + , m_hasCapturedVariables(false) + , m_isBuiltinFunction(kind == UnlinkedBuiltinFunction) + , m_constructAbility(static_cast<unsigned>(constructAbility)) + , m_constructorKind(static_cast<unsigned>(node->constructorKind())) + , m_functionMode(node->functionMode()) + , m_superBinding(static_cast<unsigned>(node->superBinding())) + , m_derivedContextType(static_cast<unsigned>(derivedContextType)) + , m_sourceParseMode(static_cast<unsigned>(node->parseMode())) + , m_name(node->ident()) + , m_inferredName(node->inferredName()) + , m_sourceOverride(WTFMove(sourceOverride)) +{ + ASSERT(m_constructorKind == static_cast<unsigned>(node->constructorKind())); + m_parentScopeTDZVariables.swap(parentScopeTDZVariables); +} + +void UnlinkedFunctionExecutable::visitChildren(JSCell* cell, SlotVisitor& visitor) +{ + UnlinkedFunctionExecutable* thisObject = jsCast<UnlinkedFunctionExecutable*>(cell); + ASSERT_GC_OBJECT_INHERITS(thisObject, info()); + Base::visitChildren(thisObject, visitor); + visitor.append(&thisObject->m_unlinkedCodeBlockForCall); + visitor.append(&thisObject->m_unlinkedCodeBlockForConstruct); + visitor.append(&thisObject->m_nameValue); +} + +FunctionExecutable* UnlinkedFunctionExecutable::link(VM& vm, const SourceCode& ownerSource, int overrideLineNumber) +{ + SourceCode source = m_sourceOverride ? SourceCode(m_sourceOverride) : ownerSource; + unsigned firstLine = source.firstLine() + m_firstLineOffset; + unsigned startOffset = source.startOffset() + m_startOffset; + unsigned lineCount = m_lineCount; + + // Adjust to one-based indexing. + bool startColumnIsOnFirstSourceLine = !m_firstLineOffset; + unsigned startColumn = m_unlinkedBodyStartColumn + (startColumnIsOnFirstSourceLine ? source.startColumn() : 1); + bool endColumnIsOnStartLine = !lineCount; + unsigned endColumn = m_unlinkedBodyEndColumn + (endColumnIsOnStartLine ? startColumn : 1); + + SourceCode code(source.provider(), startOffset, startOffset + m_sourceLength, firstLine, startColumn); + FunctionOverrides::OverrideInfo overrideInfo; + bool hasFunctionOverride = false; + + if (UNLIKELY(Options::functionOverrides())) { + hasFunctionOverride = FunctionOverrides::initializeOverrideFor(code, overrideInfo); + if (hasFunctionOverride) { + firstLine = overrideInfo.firstLine; + lineCount = overrideInfo.lineCount; + startColumn = overrideInfo.startColumn; + endColumn = overrideInfo.endColumn; + code = overrideInfo.sourceCode; + } + } + + FunctionExecutable* result = FunctionExecutable::create(vm, code, this, firstLine, firstLine + lineCount, startColumn, endColumn); + if (overrideLineNumber != -1) + result->setOverrideLineNumber(overrideLineNumber); + + if (UNLIKELY(hasFunctionOverride)) { + result->overrideParameterAndTypeProfilingStartEndOffsets( + overrideInfo.parametersStartOffset, + overrideInfo.typeProfilingStartOffset, + overrideInfo.typeProfilingEndOffset); + } + + return result; +} + +UnlinkedFunctionExecutable* UnlinkedFunctionExecutable::fromGlobalCode( + const Identifier& name, ExecState& exec, const SourceCode& source, + JSObject*& exception, int overrideLineNumber) +{ + ParserError error; + VM& vm = exec.vm(); + CodeCache* codeCache = vm.codeCache(); + UnlinkedFunctionExecutable* executable = codeCache->getFunctionExecutableFromGlobalCode(vm, name, source, error); + + auto& globalObject = *exec.lexicalGlobalObject(); + if (globalObject.hasDebugger()) + globalObject.debugger()->sourceParsed(&exec, source.provider(), error.line(), error.message()); + + if (error.isValid()) { + exception = error.toErrorObject(&globalObject, source, overrideLineNumber); + return nullptr; + } + + return executable; +} + +UnlinkedFunctionCodeBlock* UnlinkedFunctionExecutable::unlinkedCodeBlockFor( + VM& vm, const SourceCode& source, CodeSpecializationKind specializationKind, + DebuggerMode debuggerMode, ProfilerMode profilerMode, ParserError& error, SourceParseMode parseMode) +{ + switch (specializationKind) { + case CodeForCall: + if (UnlinkedFunctionCodeBlock* codeBlock = m_unlinkedCodeBlockForCall.get()) + return codeBlock; + break; + case CodeForConstruct: + if (UnlinkedFunctionCodeBlock* codeBlock = m_unlinkedCodeBlockForConstruct.get()) + return codeBlock; + break; + } + + UnlinkedFunctionCodeBlock* result = generateUnlinkedFunctionCodeBlock( + vm, this, source, specializationKind, debuggerMode, profilerMode, + isBuiltinFunction() ? UnlinkedBuiltinFunction : UnlinkedNormalFunction, + error, parseMode); + + if (error.isValid()) + return nullptr; + + switch (specializationKind) { + case CodeForCall: + m_unlinkedCodeBlockForCall.set(vm, this, result); + break; + case CodeForConstruct: + m_unlinkedCodeBlockForConstruct.set(vm, this, result); + break; + } + return result; +} + +void UnlinkedFunctionExecutable::setInvalidTypeProfilingOffsets() +{ + m_typeProfilingStartOffset = std::numeric_limits<unsigned>::max(); + m_typeProfilingEndOffset = std::numeric_limits<unsigned>::max(); +} + +} // namespace JSC diff --git a/Source/JavaScriptCore/bytecode/UnlinkedFunctionExecutable.h b/Source/JavaScriptCore/bytecode/UnlinkedFunctionExecutable.h new file mode 100644 index 000000000..8a614db21 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/UnlinkedFunctionExecutable.h @@ -0,0 +1,190 @@ +/* + * Copyright (C) 2012-2015 Apple Inc. All Rights Reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef UnlinkedFunctionExecutable_h +#define UnlinkedFunctionExecutable_h + +#include "BytecodeConventions.h" +#include "CodeSpecializationKind.h" +#include "CodeType.h" +#include "ConstructAbility.h" +#include "ExecutableInfo.h" +#include "ExpressionRangeInfo.h" +#include "HandlerInfo.h" +#include "Identifier.h" +#include "JSCell.h" +#include "JSString.h" +#include "ParserModes.h" +#include "RegExp.h" +#include "SpecialPointer.h" +#include "VariableEnvironment.h" +#include "VirtualRegister.h" +#include <wtf/RefCountedArray.h> +#include <wtf/Vector.h> + +namespace JSC { + +class FunctionMetadataNode; +class FunctionExecutable; +class ParserError; +class SourceCode; +class SourceProvider; +class UnlinkedFunctionCodeBlock; + +enum UnlinkedFunctionKind { + UnlinkedNormalFunction, + UnlinkedBuiltinFunction, +}; + +class UnlinkedFunctionExecutable final : public JSCell { +public: + friend class CodeCache; + friend class VM; + + typedef JSCell Base; + static const unsigned StructureFlags = Base::StructureFlags | StructureIsImmortal; + + static UnlinkedFunctionExecutable* create(VM* vm, const SourceCode& source, FunctionMetadataNode* node, UnlinkedFunctionKind unlinkedFunctionKind, ConstructAbility constructAbility, VariableEnvironment& parentScopeTDZVariables, DerivedContextType derivedContextType, RefPtr<SourceProvider>&& sourceOverride = nullptr) + { + UnlinkedFunctionExecutable* instance = new (NotNull, allocateCell<UnlinkedFunctionExecutable>(vm->heap)) + UnlinkedFunctionExecutable(vm, vm->unlinkedFunctionExecutableStructure.get(), source, WTFMove(sourceOverride), node, unlinkedFunctionKind, constructAbility, parentScopeTDZVariables, derivedContextType); + instance->finishCreation(*vm); + return instance; + } + + const Identifier& name() const { return m_name; } + const Identifier& inferredName() const { return m_inferredName; } + JSString* nameValue() const { return m_nameValue.get(); } + void setNameValue(VM& vm, JSString* nameValue) { m_nameValue.set(vm, this, nameValue); } + unsigned parameterCount() const { return m_parameterCount; }; + SourceParseMode parseMode() const { return static_cast<SourceParseMode>(m_sourceParseMode); }; + bool isInStrictContext() const { return m_isInStrictContext; } + FunctionMode functionMode() const { return static_cast<FunctionMode>(m_functionMode); } + ConstructorKind constructorKind() const { return static_cast<ConstructorKind>(m_constructorKind); } + SuperBinding superBinding() const { return static_cast<SuperBinding>(m_superBinding); } + + unsigned unlinkedFunctionNameStart() const { return m_unlinkedFunctionNameStart; } + unsigned unlinkedBodyStartColumn() const { return m_unlinkedBodyStartColumn; } + unsigned unlinkedBodyEndColumn() const { return m_unlinkedBodyEndColumn; } + unsigned startOffset() const { return m_startOffset; } + unsigned sourceLength() { return m_sourceLength; } + unsigned parametersStartOffset() const { return m_parametersStartOffset; } + unsigned typeProfilingStartOffset() const { return m_typeProfilingStartOffset; } + unsigned typeProfilingEndOffset() const { return m_typeProfilingEndOffset; } + void setInvalidTypeProfilingOffsets(); + + UnlinkedFunctionCodeBlock* unlinkedCodeBlockFor( + VM&, const SourceCode&, CodeSpecializationKind, DebuggerMode, ProfilerMode, + ParserError&, SourceParseMode); + + static UnlinkedFunctionExecutable* fromGlobalCode( + const Identifier&, ExecState&, const SourceCode&, JSObject*& exception, + int overrideLineNumber); + + JS_EXPORT_PRIVATE FunctionExecutable* link(VM&, const SourceCode&, int overrideLineNumber = -1); + + void clearCode() + { + m_unlinkedCodeBlockForCall.clear(); + m_unlinkedCodeBlockForConstruct.clear(); + } + + void recordParse(CodeFeatures features, bool hasCapturedVariables) + { + m_features = features; + m_hasCapturedVariables = hasCapturedVariables; + } + + CodeFeatures features() const { return m_features; } + bool hasCapturedVariables() const { return m_hasCapturedVariables; } + + static const bool needsDestruction = true; + static void destroy(JSCell*); + + bool isBuiltinFunction() const { return m_isBuiltinFunction; } + ConstructAbility constructAbility() const { return static_cast<ConstructAbility>(m_constructAbility); } + bool isClassConstructorFunction() const { return constructorKind() != ConstructorKind::None; } + const VariableEnvironment* parentScopeTDZVariables() const { return &m_parentScopeTDZVariables; } + + bool isArrowFunction() const { return parseMode() == SourceParseMode::ArrowFunctionMode; } + + JSC::DerivedContextType derivedContextType() const {return static_cast<JSC::DerivedContextType>(m_derivedContextType); } + +private: + UnlinkedFunctionExecutable(VM*, Structure*, const SourceCode&, RefPtr<SourceProvider>&& sourceOverride, FunctionMetadataNode*, UnlinkedFunctionKind, ConstructAbility, VariableEnvironment&, JSC::DerivedContextType); + + unsigned m_firstLineOffset; + unsigned m_lineCount; + unsigned m_unlinkedFunctionNameStart; + unsigned m_unlinkedBodyStartColumn; + unsigned m_unlinkedBodyEndColumn; + unsigned m_startOffset; + unsigned m_sourceLength; + unsigned m_parametersStartOffset; + unsigned m_typeProfilingStartOffset; + unsigned m_typeProfilingEndOffset; + unsigned m_parameterCount; + CodeFeatures m_features; + unsigned m_isInStrictContext : 1; + unsigned m_hasCapturedVariables : 1; + unsigned m_isBuiltinFunction : 1; + unsigned m_constructAbility: 1; + unsigned m_constructorKind : 2; + unsigned m_functionMode : 1; // FunctionMode + unsigned m_superBinding : 1; + unsigned m_derivedContextType: 2; + unsigned m_sourceParseMode : 4; // SourceParseMode + + WriteBarrier<UnlinkedFunctionCodeBlock> m_unlinkedCodeBlockForCall; + WriteBarrier<UnlinkedFunctionCodeBlock> m_unlinkedCodeBlockForConstruct; + + Identifier m_name; + Identifier m_inferredName; + WriteBarrier<JSString> m_nameValue; + RefPtr<SourceProvider> m_sourceOverride; + + VariableEnvironment m_parentScopeTDZVariables; + +protected: + void finishCreation(VM& vm) + { + Base::finishCreation(vm); + m_nameValue.set(vm, this, jsString(&vm, name().string())); + } + + static void visitChildren(JSCell*, SlotVisitor&); + +public: + static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue proto) + { + return Structure::create(vm, globalObject, proto, TypeInfo(UnlinkedFunctionExecutableType, StructureFlags), info()); + } + + DECLARE_EXPORT_INFO; +}; + +} // namespace JSC + +#endif // UnlinkedFunctionExecutable_h diff --git a/Source/JavaScriptCore/bytecode/UnlinkedInstructionStream.cpp b/Source/JavaScriptCore/bytecode/UnlinkedInstructionStream.cpp new file mode 100644 index 000000000..6a300d3b4 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/UnlinkedInstructionStream.cpp @@ -0,0 +1,130 @@ +/* + * Copyright (C) 2014 Apple Inc. All Rights Reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "UnlinkedInstructionStream.h" + +namespace JSC { + +static void append8(unsigned char*& ptr, unsigned char value) +{ + *(ptr++) = value; +} + +static void append32(unsigned char*& ptr, unsigned value) +{ + if (!(value & 0xffffffe0)) { + *(ptr++) = value; + return; + } + + if ((value & 0xffffffe0) == 0xffffffe0) { + *(ptr++) = (Negative5Bit << 5) | (value & 0x1f); + return; + } + + if ((value & 0xffffffe0) == 0x40000000) { + *(ptr++) = (ConstantRegister5Bit << 5) | (value & 0x1f); + return; + } + + if (!(value & 0xffffe000)) { + *(ptr++) = (Positive13Bit << 5) | ((value >> 8) & 0x1f); + *(ptr++) = value & 0xff; + return; + } + + if ((value & 0xffffe000) == 0xffffe000) { + *(ptr++) = (Negative13Bit << 5) | ((value >> 8) & 0x1f); + *(ptr++) = value & 0xff; + return; + } + + if ((value & 0xffffe000) == 0x40000000) { + *(ptr++) = (ConstantRegister13Bit << 5) | ((value >> 8) & 0x1f); + *(ptr++) = value & 0xff; + return; + } + + *(ptr++) = Full32Bit << 5; + *(ptr++) = value & 0xff; + *(ptr++) = (value >> 8) & 0xff; + *(ptr++) = (value >> 16) & 0xff; + *(ptr++) = (value >> 24) & 0xff; +} + +UnlinkedInstructionStream::UnlinkedInstructionStream(const Vector<UnlinkedInstruction, 0, UnsafeVectorOverflow>& instructions) + : m_instructionCount(instructions.size()) +{ + Vector<unsigned char> buffer; + + // Reserve enough space up front so we never have to reallocate when appending. + buffer.resizeToFit(m_instructionCount * 5); + unsigned char* ptr = buffer.data(); + + const UnlinkedInstruction* instructionsData = instructions.data(); + for (unsigned i = 0; i < m_instructionCount;) { + const UnlinkedInstruction* pc = &instructionsData[i]; + OpcodeID opcode = pc[0].u.opcode; + append8(ptr, opcode); + + unsigned opLength = opcodeLength(opcode); + + for (unsigned j = 1; j < opLength; ++j) + append32(ptr, pc[j].u.index); + + i += opLength; + } + + buffer.shrink(ptr - buffer.data()); + m_data = RefCountedArray<unsigned char>(buffer); +} + +size_t UnlinkedInstructionStream::sizeInBytes() const +{ + return m_data.size() * sizeof(unsigned char); +} + +#ifndef NDEBUG +const RefCountedArray<UnlinkedInstruction>& UnlinkedInstructionStream::unpackForDebugging() const +{ + if (!m_unpackedInstructionsForDebugging.size()) { + m_unpackedInstructionsForDebugging = RefCountedArray<UnlinkedInstruction>(m_instructionCount); + + Reader instructionReader(*this); + for (unsigned i = 0; !instructionReader.atEnd(); ) { + const UnlinkedInstruction* pc = instructionReader.next(); + unsigned opLength = opcodeLength(pc[0].u.opcode); + for (unsigned j = 0; j < opLength; ++j) + m_unpackedInstructionsForDebugging[i++] = pc[j]; + } + } + + return m_unpackedInstructionsForDebugging; +} +#endif + +} + diff --git a/Source/JavaScriptCore/bytecode/UnlinkedInstructionStream.h b/Source/JavaScriptCore/bytecode/UnlinkedInstructionStream.h new file mode 100644 index 000000000..a875e4906 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/UnlinkedInstructionStream.h @@ -0,0 +1,151 @@ +/* + * Copyright (C) 2014 Apple Inc. All Rights Reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + +#ifndef UnlinkedInstructionStream_h +#define UnlinkedInstructionStream_h + +#include "UnlinkedCodeBlock.h" +#include <wtf/RefCountedArray.h> + +namespace JSC { + +class UnlinkedInstructionStream { + WTF_MAKE_FAST_ALLOCATED; +public: + explicit UnlinkedInstructionStream(const Vector<UnlinkedInstruction, 0, UnsafeVectorOverflow>&); + + unsigned count() const { return m_instructionCount; } + size_t sizeInBytes() const; + + class Reader { + public: + explicit Reader(const UnlinkedInstructionStream&); + + const UnlinkedInstruction* next(); + bool atEnd() const { return m_index == m_stream.m_data.size(); } + + private: + unsigned char read8(); + unsigned read32(); + + const UnlinkedInstructionStream& m_stream; + UnlinkedInstruction m_unpackedBuffer[16]; + unsigned m_index; + }; + +#ifndef NDEBUG + const RefCountedArray<UnlinkedInstruction>& unpackForDebugging() const; +#endif + +private: + friend class Reader; + +#ifndef NDEBUG + mutable RefCountedArray<UnlinkedInstruction> m_unpackedInstructionsForDebugging; +#endif + + RefCountedArray<unsigned char> m_data; + unsigned m_instructionCount; +}; + +// Unlinked instructions are packed in a simple stream format. +// +// The first byte is always the opcode. +// It's followed by an opcode-dependent number of argument values. +// The first 3 bits of each value determines the format: +// +// 5-bit positive integer (1 byte total) +// 5-bit negative integer (1 byte total) +// 13-bit positive integer (2 bytes total) +// 13-bit negative integer (2 bytes total) +// 5-bit constant register index, based at 0x40000000 (1 byte total) +// 13-bit constant register index, based at 0x40000000 (2 bytes total) +// 32-bit raw value (5 bytes total) + +enum PackedValueType { + Positive5Bit = 0, + Negative5Bit, + Positive13Bit, + Negative13Bit, + ConstantRegister5Bit, + ConstantRegister13Bit, + Full32Bit +}; + +ALWAYS_INLINE UnlinkedInstructionStream::Reader::Reader(const UnlinkedInstructionStream& stream) + : m_stream(stream) + , m_index(0) +{ +} + +ALWAYS_INLINE unsigned char UnlinkedInstructionStream::Reader::read8() +{ + return m_stream.m_data.data()[m_index++]; +} + +ALWAYS_INLINE unsigned UnlinkedInstructionStream::Reader::read32() +{ + const unsigned char* data = &m_stream.m_data.data()[m_index]; + unsigned char type = data[0] >> 5; + + switch (type) { + case Positive5Bit: + m_index++; + return data[0]; + case Negative5Bit: + m_index++; + return 0xffffffe0 | data[0]; + case Positive13Bit: + m_index += 2; + return ((data[0] & 0x1F) << 8) | data[1]; + case Negative13Bit: + m_index += 2; + return 0xffffe000 | ((data[0] & 0x1F) << 8) | data[1]; + case ConstantRegister5Bit: + m_index++; + return 0x40000000 | (data[0] & 0x1F); + case ConstantRegister13Bit: + m_index += 2; + return 0x40000000 | ((data[0] & 0x1F) << 8) | data[1]; + default: + ASSERT(type == Full32Bit); + m_index += 5; + return data[1] | data[2] << 8 | data[3] << 16 | data[4] << 24; + } +} + +ALWAYS_INLINE const UnlinkedInstruction* UnlinkedInstructionStream::Reader::next() +{ + m_unpackedBuffer[0].u.opcode = static_cast<OpcodeID>(read8()); + unsigned opLength = opcodeLength(m_unpackedBuffer[0].u.opcode); + for (unsigned i = 1; i < opLength; ++i) + m_unpackedBuffer[i].u.index = read32(); + return m_unpackedBuffer; +} + +} // namespace JSC + +#endif // UnlinkedInstructionStream_h diff --git a/Source/JavaScriptCore/bytecode/ValueProfile.cpp b/Source/JavaScriptCore/bytecode/ValueProfile.cpp new file mode 100644 index 000000000..876ce30e8 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/ValueProfile.cpp @@ -0,0 +1,68 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "ValueProfile.h" + +namespace WTF { + +using namespace JSC; + +void printInternal(PrintStream& out, const ResultProfile& profile) +{ + const char* separator = ""; + + if (!profile.didObserveNonInt32()) { + out.print("Int32"); + separator = "|"; + } else { + if (profile.didObserveNegZeroDouble()) { + out.print(separator, "NegZeroDouble"); + separator = "|"; + } + if (profile.didObserveNonNegZeroDouble()) { + out.print("NonNegZeroDouble"); + separator = "|"; + } + if (profile.didObserveNonNumber()) { + out.print("NonNumber"); + separator = "|"; + } + if (profile.didObserveInt32Overflow()) { + out.print("Int32Overflow"); + separator = "|"; + } + if (profile.didObserveInt52Overflow()) { + out.print("Int52Overflow"); + separator = "|"; + } + } + if (profile.specialFastPathCount()) { + out.print(" special fast path: "); + out.print(profile.specialFastPathCount()); + } +} + +} // namespace WTF diff --git a/Source/JavaScriptCore/bytecode/ValueProfile.h b/Source/JavaScriptCore/bytecode/ValueProfile.h index 028c1f696..48b47da7c 100644 --- a/Source/JavaScriptCore/bytecode/ValueProfile.h +++ b/Source/JavaScriptCore/bytecode/ValueProfile.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011, 2012 Apple Inc. All rights reserved. + * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -10,7 +10,7 @@ * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * 3. Neither the name of Apple Inc. ("Apple") nor the names of * its contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * @@ -29,10 +29,7 @@ #ifndef ValueProfile_h #define ValueProfile_h -#include <wtf/Platform.h> - -#if ENABLE(VALUE_PROFILER) - +#include "ConcurrentJITLock.h" #include "Heap.h" #include "JSArray.h" #include "SpeculatedType.h" @@ -54,7 +51,6 @@ struct ValueProfileBase { : m_bytecodeOffset(-1) , m_prediction(SpecNone) , m_numberOfSamplesInPrediction(0) - , m_singletonValueIsTop(false) { for (unsigned i = 0; i < totalNumberOfBuckets; ++i) m_buckets[i] = JSValue::encode(JSValue()); @@ -64,7 +60,6 @@ struct ValueProfileBase { : m_bytecodeOffset(bytecodeOffset) , m_prediction(SpecNone) , m_numberOfSamplesInPrediction(0) - , m_singletonValueIsTop(false) { for (unsigned i = 0; i < totalNumberOfBuckets; ++i) m_buckets[i] = JSValue::encode(JSValue()); @@ -111,28 +106,18 @@ struct ValueProfileBase { return false; } - CString briefDescription() + CString briefDescription(const ConcurrentJITLocker& locker) { - computeUpdatedPrediction(); + computeUpdatedPrediction(locker); StringPrintStream out; - - if (m_singletonValueIsTop) - out.print("predicting ", SpeculationDump(m_prediction)); - else if (m_singletonValue) - out.print("predicting ", m_singletonValue); - + out.print("predicting ", SpeculationDump(m_prediction)); return out.toCString(); } void dump(PrintStream& out) { out.print("samples = ", totalNumberOfSamples(), " prediction = ", SpeculationDump(m_prediction)); - out.printf(", value = "); - if (m_singletonValueIsTop) - out.printf("TOP"); - else - out.print(m_singletonValue); bool first = true; for (unsigned i = 0; i < totalNumberOfBuckets; ++i) { JSValue value = JSValue::decode(m_buckets[i]); @@ -147,8 +132,9 @@ struct ValueProfileBase { } } - // Updates the prediction and returns the new one. - SpeculatedType computeUpdatedPrediction(OperationInProgress operation = NoOperation) + // Updates the prediction and returns the new one. Never call this from any thread + // that isn't executing the code. + SpeculatedType computeUpdatedPrediction(const ConcurrentJITLocker&) { for (unsigned i = 0; i < totalNumberOfBuckets; ++i) { JSValue value = JSValue::decode(m_buckets[i]); @@ -158,23 +144,9 @@ struct ValueProfileBase { m_numberOfSamplesInPrediction++; mergeSpeculation(m_prediction, speculationFromValue(value)); - if (!m_singletonValueIsTop && !!value) { - if (!m_singletonValue) - m_singletonValue = value; - else if (m_singletonValue != value) - m_singletonValueIsTop = true; - } - m_buckets[i] = JSValue::encode(JSValue()); } - if (operation == Collection - && !m_singletonValueIsTop - && !!m_singletonValue - && m_singletonValue.isCell() - && !Heap::isMarked(m_singletonValue.asCell())) - m_singletonValueIsTop = true; - return m_prediction; } @@ -183,9 +155,6 @@ struct ValueProfileBase { SpeculatedType m_prediction; unsigned m_numberOfSamplesInPrediction; - bool m_singletonValueIsTop; - JSValue m_singletonValue; - EncodedJSValue m_buckets[totalNumberOfBuckets]; }; @@ -237,9 +206,65 @@ inline int getRareCaseProfileBytecodeOffset(RareCaseProfile* rareCaseProfile) return rareCaseProfile->m_bytecodeOffset; } +struct ResultProfile { +private: + static const int numberOfFlagBits = 5; + +public: + ResultProfile(int bytecodeOffset) + : m_bytecodeOffsetAndFlags(bytecodeOffset << numberOfFlagBits) + { + ASSERT(((bytecodeOffset << numberOfFlagBits) >> numberOfFlagBits) == bytecodeOffset); + } + + enum ObservedResults { + NonNegZeroDouble = 1 << 0, + NegZeroDouble = 1 << 1, + NonNumber = 1 << 2, + Int32Overflow = 1 << 3, + Int52Overflow = 1 << 4, + }; + + int bytecodeOffset() const { return m_bytecodeOffsetAndFlags >> numberOfFlagBits; } + unsigned specialFastPathCount() const { return m_specialFastPathCount; } + + bool didObserveNonInt32() const { return hasBits(NonNegZeroDouble | NegZeroDouble | NonNumber); } + bool didObserveDouble() const { return hasBits(NonNegZeroDouble | NegZeroDouble); } + bool didObserveNonNegZeroDouble() const { return hasBits(NonNegZeroDouble); } + bool didObserveNegZeroDouble() const { return hasBits(NegZeroDouble); } + bool didObserveNonNumber() const { return hasBits(NonNumber); } + bool didObserveInt32Overflow() const { return hasBits(Int32Overflow); } + bool didObserveInt52Overflow() const { return hasBits(Int52Overflow); } + + void setObservedNonNegZeroDouble() { setBit(NonNegZeroDouble); } + void setObservedNegZeroDouble() { setBit(NegZeroDouble); } + void setObservedNonNumber() { setBit(NonNumber); } + void setObservedInt32Overflow() { setBit(Int32Overflow); } + void setObservedInt52Overflow() { setBit(Int52Overflow); } + + void* addressOfFlags() { return &m_bytecodeOffsetAndFlags; } + void* addressOfSpecialFastPathCount() { return &m_specialFastPathCount; } + +private: + bool hasBits(int mask) const { return m_bytecodeOffsetAndFlags & mask; } + void setBit(int mask) { m_bytecodeOffsetAndFlags |= mask; } + + int m_bytecodeOffsetAndFlags; + unsigned m_specialFastPathCount { 0 }; +}; + +inline int getResultProfileBytecodeOffset(ResultProfile* profile) +{ + return profile->bytecodeOffset(); +} + } // namespace JSC -#endif // ENABLE(VALUE_PROFILER) +namespace WTF { + +void printInternal(PrintStream&, const JSC::ResultProfile&); + +} // namespace WTF #endif // ValueProfile_h diff --git a/Source/JavaScriptCore/bytecode/ValueRecovery.cpp b/Source/JavaScriptCore/bytecode/ValueRecovery.cpp new file mode 100644 index 000000000..9c083b04a --- /dev/null +++ b/Source/JavaScriptCore/bytecode/ValueRecovery.cpp @@ -0,0 +1,141 @@ +/* + * Copyright (C) 2011, 2013, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "ValueRecovery.h" + +#include "CodeBlock.h" +#include "JSCInlines.h" + +namespace JSC { + +JSValue ValueRecovery::recover(ExecState* exec) const +{ + switch (technique()) { + case DisplacedInJSStack: + return exec->r(virtualRegister().offset()).jsValue(); + case Int32DisplacedInJSStack: + return jsNumber(exec->r(virtualRegister().offset()).unboxedInt32()); + case Int52DisplacedInJSStack: + return jsNumber(exec->r(virtualRegister().offset()).unboxedInt52()); + case StrictInt52DisplacedInJSStack: + return jsNumber(exec->r(virtualRegister().offset()).unboxedStrictInt52()); + case DoubleDisplacedInJSStack: + return jsNumber(exec->r(virtualRegister().offset()).unboxedDouble()); + case CellDisplacedInJSStack: + return exec->r(virtualRegister().offset()).unboxedCell(); + case BooleanDisplacedInJSStack: +#if USE(JSVALUE64) + return exec->r(virtualRegister().offset()).jsValue(); +#else + return jsBoolean(exec->r(virtualRegister().offset()).unboxedBoolean()); +#endif + case Constant: + return constant(); + default: + RELEASE_ASSERT_NOT_REACHED(); + return JSValue(); + } +} + +#if ENABLE(JIT) + +void ValueRecovery::dumpInContext(PrintStream& out, DumpContext* context) const +{ + switch (technique()) { + case InGPR: + out.print(gpr()); + return; + case UnboxedInt32InGPR: + out.print("int32(", gpr(), ")"); + return; + case UnboxedInt52InGPR: + out.print("int52(", gpr(), ")"); + return; + case UnboxedStrictInt52InGPR: + out.print("strictInt52(", gpr(), ")"); + return; + case UnboxedBooleanInGPR: + out.print("bool(", gpr(), ")"); + return; + case UnboxedCellInGPR: + out.print("cell(", gpr(), ")"); + return; + case InFPR: + out.print(fpr()); + return; + case UnboxedDoubleInFPR: + out.print("double(", fpr(), ")"); + return; +#if USE(JSVALUE32_64) + case InPair: + out.print("pair(", tagGPR(), ", ", payloadGPR(), ")"); + return; +#endif + case DisplacedInJSStack: + out.print("*", virtualRegister()); + return; + case Int32DisplacedInJSStack: + out.print("*int32(", virtualRegister(), ")"); + return; + case Int52DisplacedInJSStack: + out.print("*int52(", virtualRegister(), ")"); + return; + case StrictInt52DisplacedInJSStack: + out.print("*strictInt52(", virtualRegister(), ")"); + return; + case DoubleDisplacedInJSStack: + out.print("*double(", virtualRegister(), ")"); + return; + case CellDisplacedInJSStack: + out.print("*cell(", virtualRegister(), ")"); + return; + case BooleanDisplacedInJSStack: + out.print("*bool(", virtualRegister(), ")"); + return; + case DirectArgumentsThatWereNotCreated: + out.print("DirectArguments(", nodeID(), ")"); + return; + case ClonedArgumentsThatWereNotCreated: + out.print("ClonedArguments(", nodeID(), ")"); + return; + case Constant: + out.print("[", inContext(constant(), context), "]"); + return; + case DontKnow: + out.printf("!"); + return; + } + RELEASE_ASSERT_NOT_REACHED(); +} + +void ValueRecovery::dump(PrintStream& out) const +{ + dumpInContext(out, 0); +} +#endif // ENABLE(JIT) + +} // namespace JSC + diff --git a/Source/JavaScriptCore/bytecode/ValueRecovery.h b/Source/JavaScriptCore/bytecode/ValueRecovery.h index 77d5a1030..5f6ee9c72 100644 --- a/Source/JavaScriptCore/bytecode/ValueRecovery.h +++ b/Source/JavaScriptCore/bytecode/ValueRecovery.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011 Apple Inc. All rights reserved. + * Copyright (C) 2011, 2013, 2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,43 +26,49 @@ #ifndef ValueRecovery_h #define ValueRecovery_h +#include "DFGMinifiedID.h" #include "DataFormat.h" +#if ENABLE(JIT) +#include "GPRInfo.h" +#include "FPRInfo.h" +#include "Reg.h" +#endif #include "JSCJSValue.h" #include "MacroAssembler.h" #include "VirtualRegister.h" -#include <stdio.h> -#include <wtf/Platform.h> namespace JSC { +struct DumpContext; +struct InlineCallFrame; + // Describes how to recover a given bytecode virtual register at a given // code point. enum ValueRecoveryTechnique { - // It's already in the stack at the right location. - AlreadyInJSStack, - // It's already in the stack but unboxed. - AlreadyInJSStackAsUnboxedInt32, - AlreadyInJSStackAsUnboxedCell, - AlreadyInJSStackAsUnboxedBoolean, - AlreadyInJSStackAsUnboxedDouble, // It's in a register. InGPR, UnboxedInt32InGPR, + UnboxedInt52InGPR, + UnboxedStrictInt52InGPR, UnboxedBooleanInGPR, + UnboxedCellInGPR, #if USE(JSVALUE32_64) InPair, #endif InFPR, - UInt32InGPR, + UnboxedDoubleInFPR, // It's in the stack, but at a different location. DisplacedInJSStack, // It's in the stack, at a different location, and it's unboxed. Int32DisplacedInJSStack, + Int52DisplacedInJSStack, + StrictInt52DisplacedInJSStack, DoubleDisplacedInJSStack, CellDisplacedInJSStack, BooleanDisplacedInJSStack, - // It's an Arguments object. - ArgumentsThatWereNotCreated, + // It's an Arguments object. This arises because of the simplified arguments simplification done by the DFG. + DirectArgumentsThatWereNotCreated, + ClonedArgumentsThatWereNotCreated, // It's a constant. Constant, // Don't know how to recover it. @@ -78,67 +84,43 @@ public: bool isSet() const { return m_technique != DontKnow; } bool operator!() const { return !isSet(); } - - static ValueRecovery alreadyInJSStack() - { - ValueRecovery result; - result.m_technique = AlreadyInJSStack; - return result; - } - - static ValueRecovery alreadyInJSStackAsUnboxedInt32() - { - ValueRecovery result; - result.m_technique = AlreadyInJSStackAsUnboxedInt32; - return result; - } - - static ValueRecovery alreadyInJSStackAsUnboxedCell() - { - ValueRecovery result; - result.m_technique = AlreadyInJSStackAsUnboxedCell; - return result; - } - - static ValueRecovery alreadyInJSStackAsUnboxedBoolean() - { - ValueRecovery result; - result.m_technique = AlreadyInJSStackAsUnboxedBoolean; - return result; - } - - static ValueRecovery alreadyInJSStackAsUnboxedDouble() + +#if ENABLE(JIT) + static ValueRecovery inRegister(Reg reg, DataFormat dataFormat) { - ValueRecovery result; - result.m_technique = AlreadyInJSStackAsUnboxedDouble; - return result; + if (reg.isGPR()) + return inGPR(reg.gpr(), dataFormat); + + ASSERT(reg.isFPR()); + return inFPR(reg.fpr(), dataFormat); } +#endif + + explicit operator bool() const { return isSet(); } static ValueRecovery inGPR(MacroAssembler::RegisterID gpr, DataFormat dataFormat) { ASSERT(dataFormat != DataFormatNone); #if USE(JSVALUE32_64) - ASSERT(dataFormat == DataFormatInteger || dataFormat == DataFormatCell || dataFormat == DataFormatBoolean); + ASSERT(dataFormat == DataFormatInt32 || dataFormat == DataFormatCell || dataFormat == DataFormatBoolean); #endif ValueRecovery result; - if (dataFormat == DataFormatInteger) + if (dataFormat == DataFormatInt32) result.m_technique = UnboxedInt32InGPR; + else if (dataFormat == DataFormatInt52) + result.m_technique = UnboxedInt52InGPR; + else if (dataFormat == DataFormatStrictInt52) + result.m_technique = UnboxedStrictInt52InGPR; else if (dataFormat == DataFormatBoolean) result.m_technique = UnboxedBooleanInGPR; + else if (dataFormat == DataFormatCell) + result.m_technique = UnboxedCellInGPR; else result.m_technique = InGPR; result.m_source.gpr = gpr; return result; } - static ValueRecovery uint32InGPR(MacroAssembler::RegisterID gpr) - { - ValueRecovery result; - result.m_technique = UInt32InGPR; - result.m_source.gpr = gpr; - return result; - } - #if USE(JSVALUE32_64) static ValueRecovery inPair(MacroAssembler::RegisterID tagGPR, MacroAssembler::RegisterID payloadGPR) { @@ -150,10 +132,14 @@ public: } #endif - static ValueRecovery inFPR(MacroAssembler::FPRegisterID fpr) + static ValueRecovery inFPR(MacroAssembler::FPRegisterID fpr, DataFormat dataFormat) { + ASSERT(dataFormat == DataFormatDouble || dataFormat & DataFormatJS); ValueRecovery result; - result.m_technique = InFPR; + if (dataFormat == DataFormatDouble) + result.m_technique = UnboxedDoubleInFPR; + else + result.m_technique = InFPR; result.m_source.fpr = fpr; return result; } @@ -162,10 +148,18 @@ public: { ValueRecovery result; switch (dataFormat) { - case DataFormatInteger: + case DataFormatInt32: result.m_technique = Int32DisplacedInJSStack; break; + case DataFormatInt52: + result.m_technique = Int52DisplacedInJSStack; + break; + + case DataFormatStrictInt52: + result.m_technique = StrictInt52DisplacedInJSStack; + break; + case DataFormatDouble: result.m_technique = DoubleDisplacedInJSStack; break; @@ -183,7 +177,7 @@ public: result.m_technique = DisplacedInJSStack; break; } - result.m_source.virtualReg = virtualReg; + result.m_source.virtualReg = virtualReg.offset(); return result; } @@ -195,50 +189,110 @@ public: return result; } - static ValueRecovery argumentsThatWereNotCreated() + static ValueRecovery directArgumentsThatWereNotCreated(DFG::MinifiedID id) { ValueRecovery result; - result.m_technique = ArgumentsThatWereNotCreated; + result.m_technique = DirectArgumentsThatWereNotCreated; + result.m_source.nodeID = id.bits(); return result; } + static ValueRecovery outOfBandArgumentsThatWereNotCreated(DFG::MinifiedID id) + { + ValueRecovery result; + result.m_technique = ClonedArgumentsThatWereNotCreated; + result.m_source.nodeID = id.bits(); + return result; + } + ValueRecoveryTechnique technique() const { return m_technique; } bool isConstant() const { return m_technique == Constant; } - - bool isInRegisters() const + + bool isInGPR() const { switch (m_technique) { case InGPR: case UnboxedInt32InGPR: case UnboxedBooleanInGPR: -#if USE(JSVALUE32_64) - case InPair: -#endif + case UnboxedCellInGPR: + case UnboxedInt52InGPR: + case UnboxedStrictInt52InGPR: + return true; + default: + return false; + } + } + + bool isInFPR() const + { + switch (m_technique) { case InFPR: + case UnboxedDoubleInFPR: return true; default: return false; } } - - bool isAlreadyInJSStack() const + + bool isInRegisters() const + { + return isInJSValueRegs() || isInGPR() || isInFPR(); + } + + bool isInJSStack() const { - switch (technique()) { - case AlreadyInJSStack: - case AlreadyInJSStackAsUnboxedInt32: - case AlreadyInJSStackAsUnboxedCell: - case AlreadyInJSStackAsUnboxedBoolean: - case AlreadyInJSStackAsUnboxedDouble: + switch (m_technique) { + case DisplacedInJSStack: + case Int32DisplacedInJSStack: + case Int52DisplacedInJSStack: + case StrictInt52DisplacedInJSStack: + case DoubleDisplacedInJSStack: + case CellDisplacedInJSStack: + case BooleanDisplacedInJSStack: return true; default: return false; } } + + DataFormat dataFormat() const + { + switch (m_technique) { + case InGPR: + case InFPR: + case DisplacedInJSStack: + case Constant: +#if USE(JSVALUE32_64) + case InPair: +#endif + return DataFormatJS; + case UnboxedInt32InGPR: + case Int32DisplacedInJSStack: + return DataFormatInt32; + case UnboxedInt52InGPR: + case Int52DisplacedInJSStack: + return DataFormatInt52; + case UnboxedStrictInt52InGPR: + case StrictInt52DisplacedInJSStack: + return DataFormatStrictInt52; + case UnboxedBooleanInGPR: + case BooleanDisplacedInJSStack: + return DataFormatBoolean; + case UnboxedCellInGPR: + case CellDisplacedInJSStack: + return DataFormatCell; + case UnboxedDoubleInFPR: + case DoubleDisplacedInJSStack: + return DataFormatDouble; + default: + return DataFormatNone; + } + } MacroAssembler::RegisterID gpr() const { - ASSERT(m_technique == InGPR || m_technique == UnboxedInt32InGPR || m_technique == UnboxedBooleanInGPR || m_technique == UInt32InGPR); + ASSERT(isInGPR()); return m_source.gpr; } @@ -254,94 +308,78 @@ public: ASSERT(m_technique == InPair); return m_source.pair.payloadGPR; } -#endif + + bool isInJSValueRegs() const + { + return m_technique == InPair; + } + +#if ENABLE(JIT) + JSValueRegs jsValueRegs() const + { + ASSERT(isInJSValueRegs()); + return JSValueRegs(tagGPR(), payloadGPR()); + } +#endif // ENABLE(JIT) +#else + bool isInJSValueRegs() const + { + return isInGPR(); + } +#endif // USE(JSVALUE32_64) MacroAssembler::FPRegisterID fpr() const { - ASSERT(m_technique == InFPR); + ASSERT(isInFPR()); return m_source.fpr; } VirtualRegister virtualRegister() const { - ASSERT(m_technique == DisplacedInJSStack || m_technique == Int32DisplacedInJSStack || m_technique == DoubleDisplacedInJSStack || m_technique == CellDisplacedInJSStack || m_technique == BooleanDisplacedInJSStack); - return m_source.virtualReg; - } - - JSValue constant() const - { - ASSERT(m_technique == Constant); - return JSValue::decode(m_source.constant); + ASSERT(isInJSStack()); + return VirtualRegister(m_source.virtualReg); } - void dump(PrintStream& out) const + ValueRecovery withLocalsOffset(int offset) const { - switch (technique()) { - case AlreadyInJSStack: - out.printf("-"); - break; - case AlreadyInJSStackAsUnboxedInt32: - out.printf("(int32)"); - break; - case AlreadyInJSStackAsUnboxedCell: - out.printf("(cell)"); - break; - case AlreadyInJSStackAsUnboxedBoolean: - out.printf("(bool)"); - break; - case AlreadyInJSStackAsUnboxedDouble: - out.printf("(double)"); - break; - case InGPR: - out.printf("%%r%d", gpr()); - break; - case UnboxedInt32InGPR: - out.printf("int32(%%r%d)", gpr()); - break; - case UnboxedBooleanInGPR: - out.printf("bool(%%r%d)", gpr()); - break; - case UInt32InGPR: - out.printf("uint32(%%r%d)", gpr()); - break; - case InFPR: - out.printf("%%fr%d", fpr()); - break; -#if USE(JSVALUE32_64) - case InPair: - out.printf("pair(%%r%d, %%r%d)", tagGPR(), payloadGPR()); - break; -#endif + switch (m_technique) { case DisplacedInJSStack: - out.printf("*%d", virtualRegister()); - break; case Int32DisplacedInJSStack: - out.printf("*int32(%d)", virtualRegister()); - break; case DoubleDisplacedInJSStack: - out.printf("*double(%d)", virtualRegister()); - break; case CellDisplacedInJSStack: - out.printf("*cell(%d)", virtualRegister()); - break; case BooleanDisplacedInJSStack: - out.printf("*bool(%d)", virtualRegister()); - break; - case ArgumentsThatWereNotCreated: - out.printf("arguments"); - break; - case Constant: - out.print("[", constant(), "]"); - break; - case DontKnow: - out.printf("!"); - break; + case Int52DisplacedInJSStack: + case StrictInt52DisplacedInJSStack: { + ValueRecovery result; + result.m_technique = m_technique; + result.m_source.virtualReg = m_source.virtualReg + offset; + return result; + } + default: - out.printf("?%d", technique()); - break; + return *this; } } + JSValue constant() const + { + ASSERT(isConstant()); + return JSValue::decode(m_source.constant); + } + + DFG::MinifiedID nodeID() const + { + ASSERT(m_technique == DirectArgumentsThatWereNotCreated || m_technique == ClonedArgumentsThatWereNotCreated); + return DFG::MinifiedID::fromBits(m_source.nodeID); + } + + JSValue recover(ExecState*) const; + +#if ENABLE(JIT) + void dumpInContext(PrintStream& out, DumpContext* context) const; + void dump(PrintStream& out) const; +#endif + private: ValueRecoveryTechnique m_technique; union { @@ -353,8 +391,9 @@ private: MacroAssembler::RegisterID payloadGPR; } pair; #endif - VirtualRegister virtualReg; + int virtualReg; EncodedJSValue constant; + uintptr_t nodeID; } m_source; }; diff --git a/Source/JavaScriptCore/bytecode/VariableWriteFireDetail.cpp b/Source/JavaScriptCore/bytecode/VariableWriteFireDetail.cpp new file mode 100644 index 000000000..b483ab21c --- /dev/null +++ b/Source/JavaScriptCore/bytecode/VariableWriteFireDetail.cpp @@ -0,0 +1,44 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "VariableWriteFireDetail.h" + +#include "JSCInlines.h" + +namespace JSC { + +void VariableWriteFireDetail::dump(PrintStream& out) const +{ + out.print("Write to ", m_name, " in ", JSValue(m_object)); +} + +void VariableWriteFireDetail::touch(WatchpointSet* set, JSObject* object, const PropertyName& name) +{ + set->touch(VariableWriteFireDetail(object, name)); +} + +} // namespace JSC + diff --git a/Source/JavaScriptCore/bytecode/GlobalResolveInfo.h b/Source/JavaScriptCore/bytecode/VariableWriteFireDetail.h index 99292b7f3..664f69cbb 100644 --- a/Source/JavaScriptCore/bytecode/GlobalResolveInfo.h +++ b/Source/JavaScriptCore/bytecode/VariableWriteFireDetail.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012 Apple Inc. All rights reserved. + * Copyright (C) 2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -23,33 +23,33 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef GlobalResolveInfo_h -#define GlobalResolveInfo_h +#ifndef VariableWriteFireDetail_h +#define VariableWriteFireDetail_h -#include "WriteBarrier.h" +#include "Watchpoint.h" namespace JSC { -struct GlobalResolveInfo { - GlobalResolveInfo() { } - - GlobalResolveInfo(unsigned bytecodeOffset) - : offset(0) - , bytecodeOffset(bytecodeOffset) +class JSObject; +class PropertyName; + +class VariableWriteFireDetail : public FireDetail { +public: + VariableWriteFireDetail(JSObject* object, const PropertyName& name) + : m_object(object) + , m_name(name) { } - WriteBarrier<Structure> structure; - PropertyOffset offset; - unsigned bytecodeOffset; // Only valid in old JIT code. This means nothing in the DFG. -}; + virtual void dump(PrintStream&) const override; + + JS_EXPORT_PRIVATE static void touch(WatchpointSet*, JSObject*, const PropertyName&); -inline unsigned getGlobalResolveInfoBytecodeOffset(GlobalResolveInfo* globalResolveInfo) -{ - return globalResolveInfo->bytecodeOffset; -} +private: + JSObject* m_object; + const PropertyName& m_name; +}; } // namespace JSC -#endif // GlobalResolveInfo_h - +#endif // VariableWriteFireDetail_h diff --git a/Source/JavaScriptCore/bytecode/VirtualRegister.cpp b/Source/JavaScriptCore/bytecode/VirtualRegister.cpp new file mode 100644 index 000000000..57cdb62c9 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/VirtualRegister.cpp @@ -0,0 +1,65 @@ +/* + * Copyright (C) 2011, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "VirtualRegister.h" + +namespace JSC { + +void VirtualRegister::dump(PrintStream& out) const +{ + if (!isValid()) { + out.print("<invalid>"); + return; + } + + if (isHeader()) { + out.print("head", m_virtualRegister); + return; + } + + if (isConstant()) { + out.print("const", toConstantIndex()); + return; + } + + if (isArgument()) { + if (!toArgument()) + out.print("this"); + else + out.print("arg", toArgument()); + return; + } + + if (isLocal()) { + out.print("loc", toLocal()); + return; + } + + RELEASE_ASSERT_NOT_REACHED(); +} + +} // namespace JSC + diff --git a/Source/JavaScriptCore/bytecode/VirtualRegister.h b/Source/JavaScriptCore/bytecode/VirtualRegister.h index a6dc8d565..613088ef6 100644 --- a/Source/JavaScriptCore/bytecode/VirtualRegister.h +++ b/Source/JavaScriptCore/bytecode/VirtualRegister.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011 Apple Inc. All rights reserved. + * Copyright (C) 2011, 2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,25 +26,105 @@ #ifndef VirtualRegister_h #define VirtualRegister_h -#include <wtf/Platform.h> +#include "CallFrame.h" + #include <wtf/PrintStream.h> namespace JSC { -// Type for a virtual register number (spill location). -// Using an enum to make this type-checked at compile time, to avert programmer errors. -enum VirtualRegister { InvalidVirtualRegister = -1 }; -COMPILE_ASSERT(sizeof(VirtualRegister) == sizeof(int), VirtualRegister_is_32bit); +inline bool operandIsLocal(int operand) +{ + return operand < 0; +} -} // namespace JSC +inline bool operandIsArgument(int operand) +{ + return operand >= 0; +} + + +class VirtualRegister { +public: + friend VirtualRegister virtualRegisterForLocal(int); + friend VirtualRegister virtualRegisterForArgument(int, int); + + VirtualRegister() + : m_virtualRegister(s_invalidVirtualRegister) + { } + + explicit VirtualRegister(int virtualRegister) + : m_virtualRegister(virtualRegister) + { } -namespace WTF { + bool isValid() const { return (m_virtualRegister != s_invalidVirtualRegister); } + bool isLocal() const { return operandIsLocal(m_virtualRegister); } + bool isArgument() const { return operandIsArgument(m_virtualRegister); } + bool isHeader() const { return m_virtualRegister >= 0 && m_virtualRegister < JSStack::ThisArgument; } + bool isConstant() const { return m_virtualRegister >= s_firstConstantRegisterIndex; } + int toLocal() const { ASSERT(isLocal()); return operandToLocal(m_virtualRegister); } + int toArgument() const { ASSERT(isArgument()); return operandToArgument(m_virtualRegister); } + int toConstantIndex() const { ASSERT(isConstant()); return m_virtualRegister - s_firstConstantRegisterIndex; } + int offset() const { return m_virtualRegister; } + int offsetInBytes() const { return m_virtualRegister * sizeof(Register); } -inline void printInternal(PrintStream& out, JSC::VirtualRegister value) + bool operator==(VirtualRegister other) const { return m_virtualRegister == other.m_virtualRegister; } + bool operator!=(VirtualRegister other) const { return m_virtualRegister != other.m_virtualRegister; } + bool operator<(VirtualRegister other) const { return m_virtualRegister < other.m_virtualRegister; } + bool operator>(VirtualRegister other) const { return m_virtualRegister > other.m_virtualRegister; } + bool operator<=(VirtualRegister other) const { return m_virtualRegister <= other.m_virtualRegister; } + bool operator>=(VirtualRegister other) const { return m_virtualRegister >= other.m_virtualRegister; } + + VirtualRegister operator+(int value) const + { + return VirtualRegister(offset() + value); + } + VirtualRegister operator-(int value) const + { + return VirtualRegister(offset() - value); + } + VirtualRegister operator+(VirtualRegister value) const + { + return VirtualRegister(offset() + value.offset()); + } + VirtualRegister operator-(VirtualRegister value) const + { + return VirtualRegister(offset() - value.offset()); + } + VirtualRegister& operator+=(int value) + { + return *this = *this + value; + } + VirtualRegister& operator-=(int value) + { + return *this = *this - value; + } + + void dump(PrintStream& out) const; + +private: + static const int s_invalidVirtualRegister = 0x3fffffff; + static const int s_firstConstantRegisterIndex = 0x40000000; + + static int localToOperand(int local) { return -1 - local; } + static int operandToLocal(int operand) { return -1 - operand; } + static int operandToArgument(int operand) { return operand - CallFrame::thisArgumentOffset(); } + static int argumentToOperand(int argument) { return argument + CallFrame::thisArgumentOffset(); } + + int m_virtualRegister; +}; + +COMPILE_ASSERT(sizeof(VirtualRegister) == sizeof(int), VirtualRegister_is_32bit); + +inline VirtualRegister virtualRegisterForLocal(int local) { - out.print(static_cast<int>(value)); + return VirtualRegister(VirtualRegister::localToOperand(local)); } -} // namespace WTF +inline VirtualRegister virtualRegisterForArgument(int argument, int offset = 0) +{ + return VirtualRegister(VirtualRegister::argumentToOperand(argument) + offset); +} + +} // namespace JSC #endif // VirtualRegister_h diff --git a/Source/JavaScriptCore/bytecode/Watchpoint.cpp b/Source/JavaScriptCore/bytecode/Watchpoint.cpp index 75dfe8a76..761c06744 100644 --- a/Source/JavaScriptCore/bytecode/Watchpoint.cpp +++ b/Source/JavaScriptCore/bytecode/Watchpoint.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012 Apple Inc. All rights reserved. + * Copyright (C) 2012-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,52 +26,103 @@ #include "config.h" #include "Watchpoint.h" -#include "LinkBuffer.h" +#include <wtf/CompilationThread.h> #include <wtf/PassRefPtr.h> namespace JSC { +void StringFireDetail::dump(PrintStream& out) const +{ + out.print(m_string); +} + Watchpoint::~Watchpoint() { - if (isOnList()) + if (isOnList()) { + // This will happen if we get destroyed before the set fires. That's totally a valid + // possibility. For example: + // + // CodeBlock has a Watchpoint on transition from structure S1. The transition never + // happens, but the CodeBlock gets destroyed because of GC. remove(); + } +} + +void Watchpoint::fire(const FireDetail& detail) +{ + RELEASE_ASSERT(!isOnList()); + fireInternal(detail); } -WatchpointSet::WatchpointSet(InitialWatchpointSetMode mode) - : m_isWatched(mode == InitializedWatching) - , m_isInvalidated(false) +WatchpointSet::WatchpointSet(WatchpointState state) + : m_state(state) + , m_setIsNotEmpty(false) { } WatchpointSet::~WatchpointSet() { - // Fire all watchpoints. This is necessary because it is possible, say with - // structure watchpoints, for the watchpoint set owner to die while the - // watchpoint owners are still live. - fireAllWatchpoints(); + // Remove all watchpoints, so that they don't try to remove themselves. Note that we + // don't fire watchpoints on deletion. We assume that any code that is interested in + // watchpoints already also separately has a mechanism to make sure that the code is + // either keeping the watchpoint set's owner alive, or does some weak reference thing. + while (!m_set.isEmpty()) + m_set.begin()->remove(); } void WatchpointSet::add(Watchpoint* watchpoint) { + ASSERT(!isCompilationThread()); + ASSERT(state() != IsInvalidated); if (!watchpoint) return; m_set.push(watchpoint); - m_isWatched = true; + m_setIsNotEmpty = true; + m_state = IsWatched; } -void WatchpointSet::notifyWriteSlow() +void WatchpointSet::fireAllSlow(const FireDetail& detail) { - ASSERT(m_isWatched); + ASSERT(state() == IsWatched); - fireAllWatchpoints(); - m_isWatched = false; - m_isInvalidated = true; + WTF::storeStoreFence(); + m_state = IsInvalidated; // Do this first. Needed for adaptive watchpoints. + fireAllWatchpoints(detail); + WTF::storeStoreFence(); } -void WatchpointSet::fireAllWatchpoints() +void WatchpointSet::fireAllSlow(const char* reason) { - while (!m_set.isEmpty()) - m_set.begin()->fire(); + fireAllSlow(StringFireDetail(reason)); +} + +void WatchpointSet::fireAllWatchpoints(const FireDetail& detail) +{ + // In case there are any adaptive watchpoints, we need to make sure that they see that this + // watchpoint has been already invalidated. + RELEASE_ASSERT(hasBeenInvalidated()); + + while (!m_set.isEmpty()) { + Watchpoint* watchpoint = m_set.begin(); + ASSERT(watchpoint->isOnList()); + + // Removing the Watchpoint before firing it makes it possible to implement watchpoints + // that add themselves to a different set when they fire. This kind of "adaptive" + // watchpoint can be used to track some semantic property that is more fine-graiend than + // what the set can convey. For example, we might care if a singleton object ever has a + // property called "foo". We can watch for this by checking if its Structure has "foo" and + // then watching its transitions. But then the watchpoint fires if any property is added. + // So, before the watchpoint decides to invalidate any code, it can check if it is + // possible to add itself to the transition watchpoint set of the singleton object's new + // Structure. + watchpoint->remove(); + ASSERT(m_set.begin() != watchpoint); + ASSERT(!watchpoint->isOnList()); + + watchpoint->fire(detail); + // After we fire the watchpoint, the watchpoint pointer may be a dangling pointer. That's + // fine, because we have no use for the pointer anymore. + } } void InlineWatchpointSet::add(Watchpoint* watchpoint) @@ -79,14 +130,17 @@ void InlineWatchpointSet::add(Watchpoint* watchpoint) inflate()->add(watchpoint); } +void InlineWatchpointSet::fireAll(const char* reason) +{ + fireAll(StringFireDetail(reason)); +} + WatchpointSet* InlineWatchpointSet::inflateSlow() { ASSERT(isThin()); - WatchpointSet* fat = adoptRef(new WatchpointSet(InitializedBlind)).leakRef(); - if (m_data & IsInvalidatedFlag) - fat->m_isInvalidated = true; - if (m_data & IsWatchedFlag) - fat->m_isWatched = true; + ASSERT(!isCompilationThread()); + WatchpointSet* fat = adoptRef(new WatchpointSet(decodeState(m_data))).leakRef(); + WTF::storeStoreFence(); m_data = bitwise_cast<uintptr_t>(fat); return fat; } diff --git a/Source/JavaScriptCore/bytecode/Watchpoint.h b/Source/JavaScriptCore/bytecode/Watchpoint.h index e6fba93a9..869e908c8 100644 --- a/Source/JavaScriptCore/bytecode/Watchpoint.h +++ b/Source/JavaScriptCore/bytecode/Watchpoint.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012 Apple Inc. All rights reserved. + * Copyright (C) 2012-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,12 +26,48 @@ #ifndef Watchpoint_h #define Watchpoint_h -#include <wtf/RefCounted.h> +#include <wtf/Atomics.h> +#include <wtf/FastMalloc.h> +#include <wtf/Noncopyable.h> +#include <wtf/PrintStream.h> #include <wtf/SentinelLinkedList.h> +#include <wtf/ThreadSafeRefCounted.h> namespace JSC { +class FireDetail { + void* operator new(size_t) = delete; + +public: + FireDetail() + { + } + + virtual ~FireDetail() + { + } + + virtual void dump(PrintStream&) const = 0; +}; + +class StringFireDetail : public FireDetail { +public: + StringFireDetail(const char* string) + : m_string(string) + { + } + + virtual void dump(PrintStream& out) const override; + +private: + const char* m_string; +}; + +class WatchpointSet; + class Watchpoint : public BasicRawSentinelNode<Watchpoint> { + WTF_MAKE_NONCOPYABLE(Watchpoint); + WTF_MAKE_FAST_ALLOCATED; public: Watchpoint() { @@ -39,23 +75,59 @@ public: virtual ~Watchpoint(); - void fire() { fireInternal(); } - protected: - virtual void fireInternal() = 0; + virtual void fireInternal(const FireDetail&) = 0; + +private: + friend class WatchpointSet; + void fire(const FireDetail&); }; -enum InitialWatchpointSetMode { InitializedWatching, InitializedBlind }; +enum WatchpointState { + ClearWatchpoint, + IsWatched, + IsInvalidated +}; class InlineWatchpointSet; -class WatchpointSet : public RefCounted<WatchpointSet> { +class WatchpointSet : public ThreadSafeRefCounted<WatchpointSet> { + friend class LLIntOffsetsExtractor; public: - WatchpointSet(InitialWatchpointSetMode); - ~WatchpointSet(); + JS_EXPORT_PRIVATE WatchpointSet(WatchpointState); + JS_EXPORT_PRIVATE ~WatchpointSet(); // Note that this will not fire any of the watchpoints; if you need to know when a WatchpointSet dies then you need a separate mechanism for this. - bool isStillValid() const { return !m_isInvalidated; } - bool hasBeenInvalidated() const { return m_isInvalidated; } + // Fast way of getting the state, which only works from the main thread. + WatchpointState stateOnJSThread() const + { + return static_cast<WatchpointState>(m_state); + } + + // It is safe to call this from another thread. It may return an old + // state. Guarantees that if *first* read the state() of the thing being + // watched and it returned IsWatched and *second* you actually read its + // value then it's safe to assume that if the state being watched changes + // then also the watchpoint state() will change to IsInvalidated. + WatchpointState state() const + { + WTF::loadLoadFence(); + WatchpointState result = static_cast<WatchpointState>(m_state); + WTF::loadLoadFence(); + return result; + } + + // It is safe to call this from another thread. It may return true + // even if the set actually had been invalidated, but that ought to happen + // only in the case of races, and should be rare. Guarantees that if you + // call this after observing something that must imply that the set is + // invalidated, then you will see this return false. This is ensured by + // issuing a load-load fence prior to querying the state. + bool isStillValid() const + { + return state() != IsInvalidated; + } + // Like isStillValid(), may be called from another thread. + bool hasBeenInvalidated() const { return !isStillValid(); } // As a convenience, this will ignore 0. That's because code paths in the DFG // that create speculation watchpoints may choose to bail out if speculation @@ -67,27 +139,76 @@ public: // watchpoint would have fired. That's a pretty good indication that you // probably don't want to set watchpoints, since we typically don't want to // set watchpoints that we believe will actually be fired. - void startWatching() { m_isWatched = true; } + void startWatching() + { + ASSERT(m_state != IsInvalidated); + if (m_state == IsWatched) + return; + WTF::storeStoreFence(); + m_state = IsWatched; + WTF::storeStoreFence(); + } - void notifyWrite() + void fireAll(const FireDetail& detail) { - if (!m_isWatched) + if (LIKELY(m_state != IsWatched)) return; - notifyWriteSlow(); + fireAllSlow(detail); + } + + void fireAll(const char* reason) + { + if (LIKELY(m_state != IsWatched)) + return; + fireAllSlow(reason); + } + + void touch(const FireDetail& detail) + { + if (state() == ClearWatchpoint) + startWatching(); + else + fireAll(detail); + } + + void touch(const char* reason) + { + touch(StringFireDetail(reason)); + } + + void invalidate(const FireDetail& detail) + { + if (state() == IsWatched) + fireAll(detail); + m_state = IsInvalidated; + } + + void invalidate(const char* reason) + { + invalidate(StringFireDetail(reason)); + } + + bool isBeingWatched() const + { + return m_setIsNotEmpty; } - bool* addressOfIsWatched() { return &m_isWatched; } + int8_t* addressOfState() { return &m_state; } + static ptrdiff_t offsetOfState() { return OBJECT_OFFSETOF(WatchpointSet, m_state); } + int8_t* addressOfSetIsNotEmpty() { return &m_setIsNotEmpty; } - JS_EXPORT_PRIVATE void notifyWriteSlow(); // Call only if you've checked isWatched. + JS_EXPORT_PRIVATE void fireAllSlow(const FireDetail&); // Call only if you've checked isWatched. + JS_EXPORT_PRIVATE void fireAllSlow(const char* reason); // Ditto. private: - void fireAllWatchpoints(); + void fireAllWatchpoints(const FireDetail&); friend class InlineWatchpointSet; - - SentinelLinkedList<Watchpoint, BasicRawSentinelNode<Watchpoint> > m_set; - bool m_isWatched; - bool m_isInvalidated; + + int8_t m_state; + int8_t m_setIsNotEmpty; + + SentinelLinkedList<Watchpoint, BasicRawSentinelNode<Watchpoint>> m_set; }; // InlineWatchpointSet is a low-overhead, non-copyable watchpoint set in which @@ -112,8 +233,8 @@ private: class InlineWatchpointSet { WTF_MAKE_NONCOPYABLE(InlineWatchpointSet); public: - InlineWatchpointSet(InitialWatchpointSetMode mode) - : m_data((mode == InitializedWatching ? IsWatchedFlag : 0) | IsThinFlag) + InlineWatchpointSet(WatchpointState state) + : m_data(encodeState(state)) { } @@ -124,13 +245,37 @@ public: freeFat(); } + // Fast way of getting the state, which only works from the main thread. + WatchpointState stateOnJSThread() const + { + uintptr_t data = m_data; + if (isFat(data)) + return fat(data)->stateOnJSThread(); + return decodeState(data); + } + + // It is safe to call this from another thread. It may return a prior state, + // but that should be fine since you should only perform actions based on the + // state if you also add a watchpoint. + WatchpointState state() const + { + WTF::loadLoadFence(); + uintptr_t data = m_data; + WTF::loadLoadFence(); + if (isFat(data)) + return fat(data)->state(); + return decodeState(data); + } + + // It is safe to call this from another thread. It may return false + // even if the set actually had been invalidated, but that ought to happen + // only in the case of races, and should be rare. bool hasBeenInvalidated() const { - if (isFat()) - return fat()->hasBeenInvalidated(); - return m_data & IsInvalidatedFlag; + return state() == IsInvalidated; } + // Like hasBeenInvalidated(), may be called from another thread. bool isStillValid() const { return !hasBeenInvalidated(); @@ -144,38 +289,130 @@ public: fat()->startWatching(); return; } - m_data |= IsWatchedFlag; + ASSERT(decodeState(m_data) != IsInvalidated); + m_data = encodeState(IsWatched); } - void notifyWrite() + void fireAll(const FireDetail& detail) { if (isFat()) { - fat()->notifyWrite(); + fat()->fireAll(detail); return; } - if (!(m_data & IsWatchedFlag)) + if (decodeState(m_data) == ClearWatchpoint) return; - m_data |= IsInvalidatedFlag; + m_data = encodeState(IsInvalidated); + WTF::storeStoreFence(); + } + + void invalidate(const FireDetail& detail) + { + if (isFat()) + fat()->invalidate(detail); + else + m_data = encodeState(IsInvalidated); + } + + JS_EXPORT_PRIVATE void fireAll(const char* reason); + + void touch(const FireDetail& detail) + { + if (isFat()) { + fat()->touch(detail); + return; + } + uintptr_t data = m_data; + if (decodeState(data) == IsInvalidated) + return; + WTF::storeStoreFence(); + if (decodeState(data) == ClearWatchpoint) + m_data = encodeState(IsWatched); + else + m_data = encodeState(IsInvalidated); + WTF::storeStoreFence(); + } + + void touch(const char* reason) + { + touch(StringFireDetail(reason)); + } + + // Note that for any watchpoint that is visible from the DFG, it would be incorrect to write code like: + // + // if (w.isBeingWatched()) + // w.fireAll() + // + // Concurrently to this, the DFG could do: + // + // if (w.isStillValid()) + // perform optimizations; + // if (!w.isStillValid()) + // retry compilation; + // + // Note that the DFG algorithm is widespread, and sound, because fireAll() and invalidate() will leave + // the watchpoint in a !isStillValid() state. Hence, if fireAll() or invalidate() interleaved between + // the first isStillValid() check and the second one, then it would simply cause the DFG to retry + // compilation later. + // + // But, if you change some piece of state that the DFG might optimize for, but invalidate the + // watchpoint by doing: + // + // if (w.isBeingWatched()) + // w.fireAll() + // + // then the DFG would never know that you invalidated state between the two checks. + // + // There are two ways to work around this: + // + // - Call fireAll() without a isBeingWatched() check. Then, the DFG will know that the watchpoint has + // been invalidated when it does its second check. + // + // - Do not expose the watchpoint set to the DFG directly, and have your own way of validating whether + // the assumptions that the DFG thread used are still valid when the DFG code is installed. + bool isBeingWatched() const + { + if (isFat()) + return fat()->isBeingWatched(); + return false; } private: static const uintptr_t IsThinFlag = 1; - static const uintptr_t IsInvalidatedFlag = 2; - static const uintptr_t IsWatchedFlag = 4; + static const uintptr_t StateMask = 6; + static const uintptr_t StateShift = 1; + + static bool isThin(uintptr_t data) { return data & IsThinFlag; } + static bool isFat(uintptr_t data) { return !isThin(data); } + + static WatchpointState decodeState(uintptr_t data) + { + ASSERT(isThin(data)); + return static_cast<WatchpointState>((data & StateMask) >> StateShift); + } - bool isThin() const { return m_data & IsThinFlag; } - bool isFat() const { return !isThin(); }; + static uintptr_t encodeState(WatchpointState state) + { + return (static_cast<uintptr_t>(state) << StateShift) | IsThinFlag; + } + + bool isThin() const { return isThin(m_data); } + bool isFat() const { return isFat(m_data); }; + + static WatchpointSet* fat(uintptr_t data) + { + return bitwise_cast<WatchpointSet*>(data); + } WatchpointSet* fat() { ASSERT(isFat()); - return bitwise_cast<WatchpointSet*>(m_data); + return fat(m_data); } const WatchpointSet* fat() const { ASSERT(isFat()); - return bitwise_cast<WatchpointSet*>(m_data); + return fat(m_data); } WatchpointSet* inflate() |